2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 16;
102 static unsigned int ipr_fast_reboot
;
103 static DEFINE_SPINLOCK(ipr_driver_lock
);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size
= 0x20,
114 .set_interrupt_mask_reg
= 0x0022C,
115 .clr_interrupt_mask_reg
= 0x00230,
116 .clr_interrupt_mask_reg32
= 0x00230,
117 .sense_interrupt_mask_reg
= 0x0022C,
118 .sense_interrupt_mask_reg32
= 0x0022C,
119 .clr_interrupt_reg
= 0x00228,
120 .clr_interrupt_reg32
= 0x00228,
121 .sense_interrupt_reg
= 0x00224,
122 .sense_interrupt_reg32
= 0x00224,
123 .ioarrin_reg
= 0x00404,
124 .sense_uproc_interrupt_reg
= 0x00214,
125 .sense_uproc_interrupt_reg32
= 0x00214,
126 .set_uproc_interrupt_reg
= 0x00214,
127 .set_uproc_interrupt_reg32
= 0x00214,
128 .clr_uproc_interrupt_reg
= 0x00218,
129 .clr_uproc_interrupt_reg32
= 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size
= 0x20,
139 .set_interrupt_mask_reg
= 0x00288,
140 .clr_interrupt_mask_reg
= 0x0028C,
141 .clr_interrupt_mask_reg32
= 0x0028C,
142 .sense_interrupt_mask_reg
= 0x00288,
143 .sense_interrupt_mask_reg32
= 0x00288,
144 .clr_interrupt_reg
= 0x00284,
145 .clr_interrupt_reg32
= 0x00284,
146 .sense_interrupt_reg
= 0x00280,
147 .sense_interrupt_reg32
= 0x00280,
148 .ioarrin_reg
= 0x00504,
149 .sense_uproc_interrupt_reg
= 0x00290,
150 .sense_uproc_interrupt_reg32
= 0x00290,
151 .set_uproc_interrupt_reg
= 0x00290,
152 .set_uproc_interrupt_reg32
= 0x00290,
153 .clr_uproc_interrupt_reg
= 0x00294,
154 .clr_uproc_interrupt_reg32
= 0x00294
160 .cache_line_size
= 0x20,
164 .set_interrupt_mask_reg
= 0x00010,
165 .clr_interrupt_mask_reg
= 0x00018,
166 .clr_interrupt_mask_reg32
= 0x0001C,
167 .sense_interrupt_mask_reg
= 0x00010,
168 .sense_interrupt_mask_reg32
= 0x00014,
169 .clr_interrupt_reg
= 0x00008,
170 .clr_interrupt_reg32
= 0x0000C,
171 .sense_interrupt_reg
= 0x00000,
172 .sense_interrupt_reg32
= 0x00004,
173 .ioarrin_reg
= 0x00070,
174 .sense_uproc_interrupt_reg
= 0x00020,
175 .sense_uproc_interrupt_reg32
= 0x00024,
176 .set_uproc_interrupt_reg
= 0x00020,
177 .set_uproc_interrupt_reg32
= 0x00024,
178 .clr_uproc_interrupt_reg
= 0x00028,
179 .clr_uproc_interrupt_reg32
= 0x0002C,
180 .init_feedback_reg
= 0x0005C,
181 .dump_addr_reg
= 0x00064,
182 .dump_data_reg
= 0x00068,
183 .endian_swap_reg
= 0x00084
188 static const struct ipr_chip_t ipr_chip
[] = {
189 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, true, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
197 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
198 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
201 static int ipr_max_bus_speeds
[] = {
202 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
208 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level
, ipr_log_level
, uint
, 0);
210 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode
, ipr_testmode
, int, 0);
212 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
214 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
215 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
216 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
218 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs
, ipr_max_devs
, int, 0);
222 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
224 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
225 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot
, ipr_fast_reboot
, int, S_IRUGO
| S_IWUSR
);
227 MODULE_PARM_DESC(fast_reboot
, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION
);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table
[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "4085: Service required"},
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
439 "4086: SAS Adapter Hardware Configuration Error"},
440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
441 "3140: Device bus not ready to ready transition"},
442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "FFFB: SCSI bus was reset"},
445 "FFFE: SCSI bus transition to single ended"},
447 "FFFE: SCSI bus transition to LVD"},
448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "FFFB: SCSI bus was reset by another initiator"},
450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "3029: A device replacement has occurred"},
452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "4102: Device bus fabric performance degradation"},
454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "9051: IOA cache data exists for a missing or failed device"},
456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "9025: Disk unit is not supported at its physical location"},
460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "3020: IOA detected a SCSI bus configuration error"},
462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "3150: SCSI bus configuration error"},
464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9074: Asymmetric advanced function disk configuration"},
466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "4040: Incomplete multipath connection between IOA and enclosure"},
468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "4041: Incomplete multipath connection between enclosure and device"},
470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "9075: Incomplete multipath connection between IOA and remote IOA"},
472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "9076: Configuration error, missing remote IOA"},
474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4050: Enclosure does not support a required multipath function"},
476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "4070: Logically bad block written on device"},
486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9041: Array protection temporarily suspended"},
488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9042: Corrupt array parity detected on specified device"},
490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9030: Array no longer protected due to missing or failed disk unit"},
492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
493 "9071: Link operational transition"},
494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "9072: Link not operational transition"},
496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "9032: Array exposed but still protected"},
498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL
,
499 "70DD: Device forced failed by disrupt device command"},
500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "4061: Multipath redundancy level got better"},
502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "4060: Multipath redundancy level got worse"},
504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL
,
505 "9083: Device raw mode enabled"},
506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL
,
507 "9084: Device raw mode disabled"},
509 "Failure due to other device"},
510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9008: IOA does not support functions expected by devices"},
512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9010: Cache data associated with attached devices cannot be found"},
514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9011: Cache data belongs to devices other than those attached"},
516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9020: Array missing 2 or more devices with only 1 device present"},
518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9021: Array missing 2 or more devices with 2 or more devices present"},
520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9022: Exposed array is missing a required device"},
522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9023: Array member(s) not at required physical locations"},
524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9024: Array not functional due to present hardware configuration"},
526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9026: Array not functional due to present hardware configuration"},
528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9027: Array is missing a device and parity is out of sync"},
530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9028: Maximum number of arrays already exist"},
532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9050: Required cache data cannot be located for a disk unit"},
534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9052: Cache data exists for a device that has been modified"},
536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9054: IOA resources not available due to previous problems"},
538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9092: Disk unit requires initialization before use"},
540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
541 "9029: Incorrect hardware configuration change has been detected"},
542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
543 "9060: One or more disk pairs are missing from an array"},
544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
545 "9061: One or more disks are missing from an array"},
546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
547 "9062: One or more disks are missing from an array"},
548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
549 "9063: Maximum number of functional arrays has been exceeded"},
551 "Data protect, other volume set problem"},
553 "Aborted command, invalid descriptor"},
555 "Target operating conditions have changed, dual adapter takeover"},
557 "Aborted command, medium removal prevented"},
559 "Command terminated by host"},
561 "Aborted command, command terminated by host"}
564 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
581 * Function Prototypes
583 static int ipr_reset_alert(struct ipr_cmnd
*);
584 static void ipr_process_ccn(struct ipr_cmnd
*);
585 static void ipr_process_error(struct ipr_cmnd
*);
586 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
588 enum ipr_shutdown_type
);
590 #ifdef CONFIG_SCSI_IPR_TRACE
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
595 * @add_data: additional data
600 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
601 u8 type
, u32 add_data
)
603 struct ipr_trace_entry
*trace_entry
;
604 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
605 unsigned int trace_index
;
607 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
608 trace_entry
= &ioa_cfg
->trace
[trace_index
];
609 trace_entry
->time
= jiffies
;
610 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
611 trace_entry
->type
= type
;
612 if (ipr_cmd
->ioa_cfg
->sis64
)
613 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
615 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
616 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
617 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
618 trace_entry
->u
.add_data
= add_data
;
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
632 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
634 unsigned long lock_flags
;
635 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
637 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
638 ipr_cmd
->done(ipr_cmd
);
639 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
651 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
652 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
653 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
654 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
657 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
658 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
659 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
660 ioarcb
->data_transfer_length
= 0;
661 ioarcb
->read_data_transfer_length
= 0;
662 ioarcb
->ioadl_len
= 0;
663 ioarcb
->read_ioadl_len
= 0;
665 if (ipr_cmd
->ioa_cfg
->sis64
) {
666 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
667 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
668 ioasa64
->u
.gata
.status
= 0;
670 ioarcb
->write_ioadl_addr
=
671 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
672 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
673 ioasa
->u
.gata
.status
= 0;
676 ioasa
->hdr
.ioasc
= 0;
677 ioasa
->hdr
.residual_data_len
= 0;
678 ipr_cmd
->scsi_cmd
= NULL
;
680 ipr_cmd
->sense_buffer
[0] = 0;
681 ipr_cmd
->dma_use_sg
= 0;
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
692 void (*fast_done
) (struct ipr_cmnd
*))
694 ipr_reinit_ipr_cmnd(ipr_cmd
);
695 ipr_cmd
->u
.scratch
= 0;
696 ipr_cmd
->sibling
= NULL
;
697 ipr_cmd
->eh_comp
= NULL
;
698 ipr_cmd
->fast_done
= fast_done
;
699 timer_setup(&ipr_cmd
->timer
, NULL
, 0);
703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704 * @ioa_cfg: ioa config struct
707 * pointer to ipr command struct
710 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
712 struct ipr_cmnd
*ipr_cmd
= NULL
;
714 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
715 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
716 struct ipr_cmnd
, queue
);
717 list_del(&ipr_cmd
->queue
);
725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
729 * pointer to ipr command struct
732 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
734 struct ipr_cmnd
*ipr_cmd
=
735 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
736 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
754 volatile u32 int_reg
;
757 /* Stop new interrupts */
758 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
759 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
760 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
761 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
764 /* Set interrupt mask to stop all new interrupts */
766 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
768 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
770 /* Clear any pending interrupts */
772 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
773 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
774 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
778 * ipr_save_pcix_cmd_reg - Save PCI-X command register
779 * @ioa_cfg: ioa config struct
782 * 0 on success / -EIO on failure
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
786 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
788 if (pcix_cmd_reg
== 0)
791 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
792 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
793 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
797 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
802 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803 * @ioa_cfg: ioa config struct
806 * 0 on success / -EIO on failure
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
810 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
813 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
814 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
815 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
824 * __ipr_sata_eh_done - done function for aborted SATA commands
825 * @ipr_cmd: ipr command struct
827 * This function is invoked for ops generated to SATA
828 * devices which are being aborted.
833 static void __ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
835 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
836 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
838 qc
->err_mask
|= AC_ERR_OTHER
;
839 sata_port
->ioasa
.status
|= ATA_BUSY
;
841 if (ipr_cmd
->eh_comp
)
842 complete(ipr_cmd
->eh_comp
);
843 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
847 * ipr_sata_eh_done - done function for aborted SATA commands
848 * @ipr_cmd: ipr command struct
850 * This function is invoked for ops generated to SATA
851 * devices which are being aborted.
856 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
858 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
859 unsigned long hrrq_flags
;
861 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
862 __ipr_sata_eh_done(ipr_cmd
);
863 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
867 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868 * @ipr_cmd: ipr command struct
870 * This function is invoked by the interrupt handler for
871 * ops generated by the SCSI mid-layer which are being aborted.
876 static void __ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
878 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
880 scsi_cmd
->result
|= (DID_ERROR
<< 16);
882 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
883 scsi_cmd
->scsi_done(scsi_cmd
);
884 if (ipr_cmd
->eh_comp
)
885 complete(ipr_cmd
->eh_comp
);
886 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
890 * ipr_scsi_eh_done - mid-layer done function for aborted ops
891 * @ipr_cmd: ipr command struct
893 * This function is invoked by the interrupt handler for
894 * ops generated by the SCSI mid-layer which are being aborted.
899 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
901 unsigned long hrrq_flags
;
902 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
904 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
905 __ipr_scsi_eh_done(ipr_cmd
);
906 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
910 * ipr_fail_all_ops - Fails all outstanding ops.
911 * @ioa_cfg: ioa config struct
913 * This function fails all outstanding ops.
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
920 struct ipr_cmnd
*ipr_cmd
, *temp
;
921 struct ipr_hrr_queue
*hrrq
;
924 for_each_hrrq(hrrq
, ioa_cfg
) {
925 spin_lock(&hrrq
->_lock
);
926 list_for_each_entry_safe(ipr_cmd
,
927 temp
, &hrrq
->hrrq_pending_q
, queue
) {
928 list_del(&ipr_cmd
->queue
);
930 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
931 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
932 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
933 cpu_to_be32(IPR_DRIVER_ILID
);
935 if (ipr_cmd
->scsi_cmd
)
936 ipr_cmd
->done
= __ipr_scsi_eh_done
;
937 else if (ipr_cmd
->qc
)
938 ipr_cmd
->done
= __ipr_sata_eh_done
;
940 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
941 IPR_IOASC_IOA_WAS_RESET
);
942 del_timer(&ipr_cmd
->timer
);
943 ipr_cmd
->done(ipr_cmd
);
945 spin_unlock(&hrrq
->_lock
);
951 * ipr_send_command - Send driver initiated requests.
952 * @ipr_cmd: ipr command struct
954 * This function sends a command to the adapter using the correct write call.
955 * In the case of sis64, calculate the ioarcb size required. Then or in the
961 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
963 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
964 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
966 if (ioa_cfg
->sis64
) {
967 /* The default size is 256 bytes */
968 send_dma_addr
|= 0x1;
970 /* If the number of ioadls * size of ioadl > 128 bytes,
971 then use a 512 byte ioarcb */
972 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
973 send_dma_addr
|= 0x4;
974 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
976 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
980 * ipr_do_req - Send driver initiated requests.
981 * @ipr_cmd: ipr command struct
982 * @done: done function
983 * @timeout_func: timeout function
984 * @timeout: timeout value
986 * This function sends the specified command to the adapter with the
987 * timeout given. The done function is invoked on command completion.
992 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
993 void (*done
) (struct ipr_cmnd
*),
994 void (*timeout_func
) (struct timer_list
*), u32 timeout
)
996 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
998 ipr_cmd
->done
= done
;
1000 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
1001 ipr_cmd
->timer
.function
= timeout_func
;
1003 add_timer(&ipr_cmd
->timer
);
1005 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
1007 ipr_send_command(ipr_cmd
);
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1020 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
1022 if (ipr_cmd
->sibling
)
1023 ipr_cmd
->sibling
= NULL
;
1025 complete(&ipr_cmd
->completion
);
1029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1035 * This function initializes an ioadl in the case where there is only a single
1041 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
1044 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
1045 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
1047 ipr_cmd
->dma_use_sg
= 1;
1049 if (ipr_cmd
->ioa_cfg
->sis64
) {
1050 ioadl64
->flags
= cpu_to_be32(flags
);
1051 ioadl64
->data_len
= cpu_to_be32(len
);
1052 ioadl64
->address
= cpu_to_be64(dma_addr
);
1054 ipr_cmd
->ioarcb
.ioadl_len
=
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1056 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1058 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1059 ioadl
->address
= cpu_to_be32(dma_addr
);
1061 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1062 ipr_cmd
->ioarcb
.read_ioadl_len
=
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1064 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1066 ipr_cmd
->ioarcb
.ioadl_len
=
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1068 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1083 void (*timeout_func
) (struct timer_list
*),
1086 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1088 init_completion(&ipr_cmd
->completion
);
1089 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1091 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1092 wait_for_completion(&ipr_cmd
->completion
);
1093 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1100 if (ioa_cfg
->hrrq_num
== 1)
1103 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1104 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1113 * @hostrcb: hostrcb struct
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1122 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1123 struct ipr_hostrcb
*hostrcb
)
1125 struct ipr_cmnd
*ipr_cmd
;
1126 struct ipr_ioarcb
*ioarcb
;
1128 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1129 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1130 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1131 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1133 ipr_cmd
->u
.hostrcb
= hostrcb
;
1134 ioarcb
= &ipr_cmd
->ioarcb
;
1136 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1137 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1138 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1139 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1140 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1141 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1143 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1144 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1146 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1147 ipr_cmd
->done
= ipr_process_ccn
;
1149 ipr_cmd
->done
= ipr_process_error
;
1151 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1153 ipr_send_command(ipr_cmd
);
1155 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1160 * ipr_update_ata_class - Update the ata class in the resource entry
1161 * @res: resource entry struct
1162 * @proto: cfgte device bus protocol value
1167 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1170 case IPR_PROTO_SATA
:
1171 case IPR_PROTO_SAS_STP
:
1172 res
->ata_class
= ATA_DEV_ATA
;
1174 case IPR_PROTO_SATA_ATAPI
:
1175 case IPR_PROTO_SAS_STP_ATAPI
:
1176 res
->ata_class
= ATA_DEV_ATAPI
;
1179 res
->ata_class
= ATA_DEV_UNKNOWN
;
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
1187 * @cfgtew: config table entry wrapper struct
1192 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1193 struct ipr_config_table_entry_wrapper
*cfgtew
)
1197 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1198 struct ipr_resource_entry
*gscsi_res
= NULL
;
1200 res
->needs_sync_complete
= 0;
1203 res
->del_from_ml
= 0;
1204 res
->resetting_device
= 0;
1205 res
->reset_occurred
= 0;
1207 res
->sata_port
= NULL
;
1209 if (ioa_cfg
->sis64
) {
1210 proto
= cfgtew
->u
.cfgte64
->proto
;
1211 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1212 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1213 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1214 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1216 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1217 sizeof(res
->res_path
));
1220 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1221 sizeof(res
->dev_lun
.scsi_lun
));
1222 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1224 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1225 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1226 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1228 res
->target
= gscsi_res
->target
;
1233 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1234 ioa_cfg
->max_devs_supported
);
1235 set_bit(res
->target
, ioa_cfg
->target_ids
);
1237 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1238 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1240 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1241 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1242 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1243 ioa_cfg
->max_devs_supported
);
1244 set_bit(res
->target
, ioa_cfg
->array_ids
);
1245 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1246 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1247 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1248 ioa_cfg
->max_devs_supported
);
1249 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1251 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1252 ioa_cfg
->max_devs_supported
);
1253 set_bit(res
->target
, ioa_cfg
->target_ids
);
1256 proto
= cfgtew
->u
.cfgte
->proto
;
1257 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1258 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1259 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1260 res
->type
= IPR_RES_TYPE_IOAFP
;
1262 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1264 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1265 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1266 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1267 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1270 ipr_update_ata_class(res
, proto
);
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1279 * 1 if the devices are the same / 0 otherwise
1281 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1282 struct ipr_config_table_entry_wrapper
*cfgtew
)
1284 if (res
->ioa_cfg
->sis64
) {
1285 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1286 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1287 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1288 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1292 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1293 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1294 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1302 * __ipr_format_res_path - Format the resource path for printing.
1303 * @res_path: resource path
1305 * @len: length of buffer provided
1310 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1316 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1317 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1318 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1328 * @len: length of buffer provided
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1334 u8
*res_path
, char *buffer
, int len
)
1339 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1340 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1352 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1353 struct ipr_config_table_entry_wrapper
*cfgtew
)
1355 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1359 if (res
->ioa_cfg
->sis64
) {
1360 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1361 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1362 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1364 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1365 sizeof(struct ipr_std_inq_data
));
1367 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1368 proto
= cfgtew
->u
.cfgte64
->proto
;
1369 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1370 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1372 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1373 sizeof(res
->dev_lun
.scsi_lun
));
1375 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1376 sizeof(res
->res_path
))) {
1377 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1378 sizeof(res
->res_path
));
1382 if (res
->sdev
&& new_path
)
1383 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1384 ipr_format_res_path(res
->ioa_cfg
,
1385 res
->res_path
, buffer
, sizeof(buffer
)));
1387 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1388 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1389 res
->type
= IPR_RES_TYPE_IOAFP
;
1391 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1393 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1394 sizeof(struct ipr_std_inq_data
));
1396 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1397 proto
= cfgtew
->u
.cfgte
->proto
;
1398 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1401 ipr_update_ata_class(res
, proto
);
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1413 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1415 struct ipr_resource_entry
*gscsi_res
= NULL
;
1416 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1418 if (!ioa_cfg
->sis64
)
1421 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1422 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1423 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1424 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1425 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1426 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1427 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1429 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1431 } else if (res
->bus
== 0)
1432 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1444 struct ipr_hostrcb
*hostrcb
)
1446 struct ipr_resource_entry
*res
= NULL
;
1447 struct ipr_config_table_entry_wrapper cfgtew
;
1448 __be32 cc_res_handle
;
1452 if (ioa_cfg
->sis64
) {
1453 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1454 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1456 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1457 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1460 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1461 if (res
->res_handle
== cc_res_handle
) {
1468 if (list_empty(&ioa_cfg
->free_res_q
)) {
1469 ipr_send_hcam(ioa_cfg
,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1475 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1476 struct ipr_resource_entry
, queue
);
1478 list_del(&res
->queue
);
1479 ipr_init_res_entry(res
, &cfgtew
);
1480 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1483 ipr_update_res_entry(res
, &cfgtew
);
1485 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1487 res
->del_from_ml
= 1;
1488 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1489 schedule_work(&ioa_cfg
->work_q
);
1491 ipr_clear_res_target(res
);
1492 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1494 } else if (!res
->sdev
|| res
->del_from_ml
) {
1496 schedule_work(&ioa_cfg
->work_q
);
1499 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1512 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1514 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1515 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1516 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1518 list_del_init(&hostrcb
->queue
);
1519 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1522 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
1523 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
)
1524 dev_err(&ioa_cfg
->pdev
->dev
,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1527 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1529 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1534 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535 * @i: index into buffer
1536 * @buf: string to modify
1538 * This function will strip all trailing whitespace, pad the end
1539 * of the string with a single space, and NULL terminate the string.
1542 * new length of string
1544 static int strip_and_pad_whitespace(int i
, char *buf
)
1546 while (i
&& buf
[i
] == ' ')
1554 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn struct
1562 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1563 struct ipr_vpd
*vpd
)
1565 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1568 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1569 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1571 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1572 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1574 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1575 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1577 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1581 * ipr_log_vpd - Log the passed VPD to the error log.
1582 * @vpd: vendor/product id/sn struct
1587 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1589 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1590 + IPR_SERIAL_NUM_LEN
];
1592 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1593 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1595 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1596 ipr_err("Vendor/Product ID: %s\n", buffer
);
1598 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1599 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1600 ipr_err(" Serial Number: %s\n", buffer
);
1604 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605 * @prefix: string to print at start of printk
1606 * @hostrcb: hostrcb pointer
1607 * @vpd: vendor/product id/sn/wwn struct
1612 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1613 struct ipr_ext_vpd
*vpd
)
1615 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1616 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1617 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1621 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622 * @vpd: vendor/product id/sn/wwn struct
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1629 ipr_log_vpd(&vpd
->vpd
);
1630 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1631 be32_to_cpu(vpd
->wwid
[1]));
1635 * ipr_log_enhanced_cache_error - Log a cache error.
1636 * @ioa_cfg: ioa config struct
1637 * @hostrcb: hostrcb struct
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1643 struct ipr_hostrcb
*hostrcb
)
1645 struct ipr_hostrcb_type_12_error
*error
;
1648 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1650 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1652 ipr_err("-----Current Configuration-----\n");
1653 ipr_err("Cache Directory Card Information:\n");
1654 ipr_log_ext_vpd(&error
->ioa_vpd
);
1655 ipr_err("Adapter Card Information:\n");
1656 ipr_log_ext_vpd(&error
->cfc_vpd
);
1658 ipr_err("-----Expected Configuration-----\n");
1659 ipr_err("Cache Directory Card Information:\n");
1660 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1661 ipr_err("Adapter Card Information:\n");
1662 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1664 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665 be32_to_cpu(error
->ioa_data
[0]),
1666 be32_to_cpu(error
->ioa_data
[1]),
1667 be32_to_cpu(error
->ioa_data
[2]));
1671 * ipr_log_cache_error - Log a cache error.
1672 * @ioa_cfg: ioa config struct
1673 * @hostrcb: hostrcb struct
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1679 struct ipr_hostrcb
*hostrcb
)
1681 struct ipr_hostrcb_type_02_error
*error
=
1682 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1684 ipr_err("-----Current Configuration-----\n");
1685 ipr_err("Cache Directory Card Information:\n");
1686 ipr_log_vpd(&error
->ioa_vpd
);
1687 ipr_err("Adapter Card Information:\n");
1688 ipr_log_vpd(&error
->cfc_vpd
);
1690 ipr_err("-----Expected Configuration-----\n");
1691 ipr_err("Cache Directory Card Information:\n");
1692 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1693 ipr_err("Adapter Card Information:\n");
1694 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1696 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697 be32_to_cpu(error
->ioa_data
[0]),
1698 be32_to_cpu(error
->ioa_data
[1]),
1699 be32_to_cpu(error
->ioa_data
[2]));
1703 * ipr_log_enhanced_config_error - Log a configuration error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1711 struct ipr_hostrcb
*hostrcb
)
1713 int errors_logged
, i
;
1714 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1715 struct ipr_hostrcb_type_13_error
*error
;
1717 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1718 errors_logged
= be32_to_cpu(error
->errors_logged
);
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error
->errors_detected
), errors_logged
);
1723 dev_entry
= error
->dev
;
1725 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1728 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1729 ipr_log_ext_vpd(&dev_entry
->vpd
);
1731 ipr_err("-----New Device Information-----\n");
1732 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1734 ipr_err("Cache Directory Card Information:\n");
1735 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1737 ipr_err("Adapter Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1743 * ipr_log_sis64_config_error - Log a device error.
1744 * @ioa_cfg: ioa config struct
1745 * @hostrcb: hostrcb struct
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1751 struct ipr_hostrcb
*hostrcb
)
1753 int errors_logged
, i
;
1754 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1755 struct ipr_hostrcb_type_23_error
*error
;
1756 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1758 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1759 errors_logged
= be32_to_cpu(error
->errors_logged
);
1761 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762 be32_to_cpu(error
->errors_detected
), errors_logged
);
1764 dev_entry
= error
->dev
;
1766 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1769 ipr_err("Device %d : %s", i
+ 1,
1770 __ipr_format_res_path(dev_entry
->res_path
,
1771 buffer
, sizeof(buffer
)));
1772 ipr_log_ext_vpd(&dev_entry
->vpd
);
1774 ipr_err("-----New Device Information-----\n");
1775 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1777 ipr_err("Cache Directory Card Information:\n");
1778 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1780 ipr_err("Adapter Card Information:\n");
1781 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1786 * ipr_log_config_error - Log a configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1793 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1794 struct ipr_hostrcb
*hostrcb
)
1796 int errors_logged
, i
;
1797 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1798 struct ipr_hostrcb_type_03_error
*error
;
1800 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1801 errors_logged
= be32_to_cpu(error
->errors_logged
);
1803 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804 be32_to_cpu(error
->errors_detected
), errors_logged
);
1806 dev_entry
= error
->dev
;
1808 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1811 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1812 ipr_log_vpd(&dev_entry
->vpd
);
1814 ipr_err("-----New Device Information-----\n");
1815 ipr_log_vpd(&dev_entry
->new_vpd
);
1817 ipr_err("Cache Directory Card Information:\n");
1818 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1820 ipr_err("Adapter Card Information:\n");
1821 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1823 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824 be32_to_cpu(dev_entry
->ioa_data
[0]),
1825 be32_to_cpu(dev_entry
->ioa_data
[1]),
1826 be32_to_cpu(dev_entry
->ioa_data
[2]),
1827 be32_to_cpu(dev_entry
->ioa_data
[3]),
1828 be32_to_cpu(dev_entry
->ioa_data
[4]));
1833 * ipr_log_enhanced_array_error - Log an array configuration error.
1834 * @ioa_cfg: ioa config struct
1835 * @hostrcb: hostrcb struct
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1841 struct ipr_hostrcb
*hostrcb
)
1844 struct ipr_hostrcb_type_14_error
*error
;
1845 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1846 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1848 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853 error
->protection_level
,
1854 ioa_cfg
->host
->host_no
,
1855 error
->last_func_vset_res_addr
.bus
,
1856 error
->last_func_vset_res_addr
.target
,
1857 error
->last_func_vset_res_addr
.lun
);
1861 array_entry
= error
->array_member
;
1862 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1863 ARRAY_SIZE(error
->array_member
));
1865 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1866 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1869 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1870 ipr_err("Exposed Array Member %d:\n", i
);
1872 ipr_err("Array Member %d:\n", i
);
1874 ipr_log_ext_vpd(&array_entry
->vpd
);
1875 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1876 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1877 "Expected Location");
1884 * ipr_log_array_error - Log an array configuration error.
1885 * @ioa_cfg: ioa config struct
1886 * @hostrcb: hostrcb struct
1891 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1892 struct ipr_hostrcb
*hostrcb
)
1895 struct ipr_hostrcb_type_04_error
*error
;
1896 struct ipr_hostrcb_array_data_entry
*array_entry
;
1897 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1899 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1903 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904 error
->protection_level
,
1905 ioa_cfg
->host
->host_no
,
1906 error
->last_func_vset_res_addr
.bus
,
1907 error
->last_func_vset_res_addr
.target
,
1908 error
->last_func_vset_res_addr
.lun
);
1912 array_entry
= error
->array_member
;
1914 for (i
= 0; i
< 18; i
++) {
1915 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1918 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1919 ipr_err("Exposed Array Member %d:\n", i
);
1921 ipr_err("Array Member %d:\n", i
);
1923 ipr_log_vpd(&array_entry
->vpd
);
1925 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1926 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1927 "Expected Location");
1932 array_entry
= error
->array_member2
;
1939 * ipr_log_hex_data - Log additional hex IOA error data.
1940 * @ioa_cfg: ioa config struct
1941 * @data: IOA error data
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, __be32
*data
, int len
)
1954 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1955 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1957 for (i
= 0; i
< len
/ 4; i
+= 4) {
1958 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1959 be32_to_cpu(data
[i
]),
1960 be32_to_cpu(data
[i
+1]),
1961 be32_to_cpu(data
[i
+2]),
1962 be32_to_cpu(data
[i
+3]));
1967 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968 * @ioa_cfg: ioa config struct
1969 * @hostrcb: hostrcb struct
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1975 struct ipr_hostrcb
*hostrcb
)
1977 struct ipr_hostrcb_type_17_error
*error
;
1980 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1982 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1984 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1985 strim(error
->failure_reason
);
1987 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1988 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1989 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1990 ipr_log_hex_data(ioa_cfg
, error
->data
,
1991 be32_to_cpu(hostrcb
->hcam
.length
) -
1992 (offsetof(struct ipr_hostrcb_error
, u
) +
1993 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1997 * ipr_log_dual_ioa_error - Log a dual adapter error.
1998 * @ioa_cfg: ioa config struct
1999 * @hostrcb: hostrcb struct
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
2005 struct ipr_hostrcb
*hostrcb
)
2007 struct ipr_hostrcb_type_07_error
*error
;
2009 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
2010 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2011 strim(error
->failure_reason
);
2013 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
2014 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
2015 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
2016 ipr_log_hex_data(ioa_cfg
, error
->data
,
2017 be32_to_cpu(hostrcb
->hcam
.length
) -
2018 (offsetof(struct ipr_hostrcb_error
, u
) +
2019 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
2022 static const struct {
2025 } path_active_desc
[] = {
2026 { IPR_PATH_NO_INFO
, "Path" },
2027 { IPR_PATH_ACTIVE
, "Active path" },
2028 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
2031 static const struct {
2034 } path_state_desc
[] = {
2035 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
2036 { IPR_PATH_HEALTHY
, "is healthy" },
2037 { IPR_PATH_DEGRADED
, "is degraded" },
2038 { IPR_PATH_FAILED
, "is failed" }
2042 * ipr_log_fabric_path - Log a fabric path error
2043 * @hostrcb: hostrcb struct
2044 * @fabric: fabric descriptor
2049 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
2050 struct ipr_hostrcb_fabric_desc
*fabric
)
2053 u8 path_state
= fabric
->path_state
;
2054 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2055 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2057 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2058 if (path_active_desc
[i
].active
!= active
)
2061 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2062 if (path_state_desc
[j
].state
!= state
)
2065 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2066 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2067 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2069 } else if (fabric
->cascaded_expander
== 0xff) {
2070 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2071 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2072 fabric
->ioa_port
, fabric
->phy
);
2073 } else if (fabric
->phy
== 0xff) {
2074 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2075 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2076 fabric
->ioa_port
, fabric
->cascaded_expander
);
2078 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2080 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2086 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2087 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2091 * ipr_log64_fabric_path - Log a fabric path error
2092 * @hostrcb: hostrcb struct
2093 * @fabric: fabric descriptor
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2099 struct ipr_hostrcb64_fabric_desc
*fabric
)
2102 u8 path_state
= fabric
->path_state
;
2103 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2104 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2105 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2107 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2108 if (path_active_desc
[i
].active
!= active
)
2111 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2112 if (path_state_desc
[j
].state
!= state
)
2115 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2116 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2117 ipr_format_res_path(hostrcb
->ioa_cfg
,
2119 buffer
, sizeof(buffer
)));
2124 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2125 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2126 buffer
, sizeof(buffer
)));
2129 static const struct {
2132 } path_type_desc
[] = {
2133 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2134 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2135 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2136 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2139 static const struct {
2142 } path_status_desc
[] = {
2143 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2144 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2145 { IPR_PATH_CFG_FAILED
, "Failed" },
2146 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2147 { IPR_PATH_NOT_DETECTED
, "Missing" },
2148 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2151 static const char *link_rate
[] = {
2154 "phy reset problem",
2171 * ipr_log_path_elem - Log a fabric path element.
2172 * @hostrcb: hostrcb struct
2173 * @cfg: fabric path element struct
2178 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2179 struct ipr_hostrcb_config_element
*cfg
)
2182 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2183 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2185 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2188 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2189 if (path_type_desc
[i
].type
!= type
)
2192 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2193 if (path_status_desc
[j
].status
!= status
)
2196 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2197 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2199 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2200 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2202 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2203 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2205 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2206 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2207 } else if (cfg
->cascaded_expander
== 0xff) {
2208 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2209 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2210 path_type_desc
[i
].desc
, cfg
->phy
,
2211 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2212 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2213 } else if (cfg
->phy
== 0xff) {
2214 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2215 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2216 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2217 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2218 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2220 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2222 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2223 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2224 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2231 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2233 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2234 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2238 * ipr_log64_path_elem - Log a fabric path element.
2239 * @hostrcb: hostrcb struct
2240 * @cfg: fabric path element struct
2245 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2246 struct ipr_hostrcb64_config_element
*cfg
)
2249 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2250 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2251 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2252 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2254 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2257 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2258 if (path_type_desc
[i
].type
!= type
)
2261 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2262 if (path_status_desc
[j
].status
!= status
)
2265 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2267 ipr_format_res_path(hostrcb
->ioa_cfg
,
2268 cfg
->res_path
, buffer
, sizeof(buffer
)),
2269 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2270 be32_to_cpu(cfg
->wwid
[0]),
2271 be32_to_cpu(cfg
->wwid
[1]));
2275 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276 "WWN=%08X%08X\n", cfg
->type_status
,
2277 ipr_format_res_path(hostrcb
->ioa_cfg
,
2278 cfg
->res_path
, buffer
, sizeof(buffer
)),
2279 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2280 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2284 * ipr_log_fabric_error - Log a fabric error.
2285 * @ioa_cfg: ioa config struct
2286 * @hostrcb: hostrcb struct
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2292 struct ipr_hostrcb
*hostrcb
)
2294 struct ipr_hostrcb_type_20_error
*error
;
2295 struct ipr_hostrcb_fabric_desc
*fabric
;
2296 struct ipr_hostrcb_config_element
*cfg
;
2299 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2300 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2301 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2303 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2304 (offsetof(struct ipr_hostrcb_error
, u
) +
2305 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2307 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2308 ipr_log_fabric_path(hostrcb
, fabric
);
2309 for_each_fabric_cfg(fabric
, cfg
)
2310 ipr_log_path_elem(hostrcb
, cfg
);
2312 add_len
-= be16_to_cpu(fabric
->length
);
2313 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2314 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2317 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2321 * ipr_log_sis64_array_error - Log a sis64 array error.
2322 * @ioa_cfg: ioa config struct
2323 * @hostrcb: hostrcb struct
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2329 struct ipr_hostrcb
*hostrcb
)
2332 struct ipr_hostrcb_type_24_error
*error
;
2333 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2334 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2335 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2337 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2341 ipr_err("RAID %s Array Configuration: %s\n",
2342 error
->protection_level
,
2343 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2344 buffer
, sizeof(buffer
)));
2348 array_entry
= error
->array_member
;
2349 num_entries
= min_t(u32
, error
->num_entries
,
2350 ARRAY_SIZE(error
->array_member
));
2352 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2354 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2357 if (error
->exposed_mode_adn
== i
)
2358 ipr_err("Exposed Array Member %d:\n", i
);
2360 ipr_err("Array Member %d:\n", i
);
2362 ipr_err("Array Member %d:\n", i
);
2363 ipr_log_ext_vpd(&array_entry
->vpd
);
2364 ipr_err("Current Location: %s\n",
2365 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2366 buffer
, sizeof(buffer
)));
2367 ipr_err("Expected Location: %s\n",
2368 ipr_format_res_path(ioa_cfg
,
2369 array_entry
->expected_res_path
,
2370 buffer
, sizeof(buffer
)));
2377 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378 * @ioa_cfg: ioa config struct
2379 * @hostrcb: hostrcb struct
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2385 struct ipr_hostrcb
*hostrcb
)
2387 struct ipr_hostrcb_type_30_error
*error
;
2388 struct ipr_hostrcb64_fabric_desc
*fabric
;
2389 struct ipr_hostrcb64_config_element
*cfg
;
2392 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2394 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2395 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2397 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2398 (offsetof(struct ipr_hostrcb64_error
, u
) +
2399 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2401 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2402 ipr_log64_fabric_path(hostrcb
, fabric
);
2403 for_each_fabric_cfg(fabric
, cfg
)
2404 ipr_log64_path_elem(hostrcb
, cfg
);
2406 add_len
-= be16_to_cpu(fabric
->length
);
2407 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2408 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2411 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2415 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2416 * @ioa_cfg: ioa config struct
2417 * @hostrcb: hostrcb struct
2422 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg
*ioa_cfg
,
2423 struct ipr_hostrcb
*hostrcb
)
2425 struct ipr_hostrcb_type_41_error
*error
;
2427 error
= &hostrcb
->hcam
.u
.error64
.u
.type_41_error
;
2429 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2430 ipr_err("Primary Failure Reason: %s\n", error
->failure_reason
);
2431 ipr_log_hex_data(ioa_cfg
, error
->data
,
2432 be32_to_cpu(hostrcb
->hcam
.length
) -
2433 (offsetof(struct ipr_hostrcb_error
, u
) +
2434 offsetof(struct ipr_hostrcb_type_41_error
, data
)));
2437 * ipr_log_generic_error - Log an adapter error.
2438 * @ioa_cfg: ioa config struct
2439 * @hostrcb: hostrcb struct
2444 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2445 struct ipr_hostrcb
*hostrcb
)
2447 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2448 be32_to_cpu(hostrcb
->hcam
.length
));
2452 * ipr_log_sis64_device_error - Log a cache error.
2453 * @ioa_cfg: ioa config struct
2454 * @hostrcb: hostrcb struct
2459 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2460 struct ipr_hostrcb
*hostrcb
)
2462 struct ipr_hostrcb_type_21_error
*error
;
2463 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2465 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2467 ipr_err("-----Failing Device Information-----\n");
2468 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2469 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2470 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2471 ipr_err("Device Resource Path: %s\n",
2472 __ipr_format_res_path(error
->res_path
,
2473 buffer
, sizeof(buffer
)));
2474 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2475 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2476 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2477 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2478 ipr_err("SCSI Sense Data:\n");
2479 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2480 ipr_err("SCSI Command Descriptor Block: \n");
2481 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2483 ipr_err("Additional IOA Data:\n");
2484 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2488 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2491 * This function will return the index of into the ipr_error_table
2492 * for the specified IOASC. If the IOASC is not in the table,
2493 * 0 will be returned, which points to the entry used for unknown errors.
2496 * index into the ipr_error_table
2498 static u32
ipr_get_error(u32 ioasc
)
2502 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2503 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2510 * ipr_handle_log_data - Log an adapter error.
2511 * @ioa_cfg: ioa config struct
2512 * @hostrcb: hostrcb struct
2514 * This function logs an adapter error to the system.
2519 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2520 struct ipr_hostrcb
*hostrcb
)
2524 struct ipr_hostrcb_type_21_error
*error
;
2526 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2529 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2530 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2533 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2535 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2537 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2538 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2539 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2540 scsi_report_bus_reset(ioa_cfg
->host
,
2541 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2544 error_index
= ipr_get_error(ioasc
);
2546 if (!ipr_error_table
[error_index
].log_hcam
)
2549 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2550 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2551 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2553 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2554 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2558 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2560 /* Set indication we have logged an error */
2561 ioa_cfg
->errors_logged
++;
2563 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2565 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2566 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2568 switch (hostrcb
->hcam
.overlay_id
) {
2569 case IPR_HOST_RCB_OVERLAY_ID_2
:
2570 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2572 case IPR_HOST_RCB_OVERLAY_ID_3
:
2573 ipr_log_config_error(ioa_cfg
, hostrcb
);
2575 case IPR_HOST_RCB_OVERLAY_ID_4
:
2576 case IPR_HOST_RCB_OVERLAY_ID_6
:
2577 ipr_log_array_error(ioa_cfg
, hostrcb
);
2579 case IPR_HOST_RCB_OVERLAY_ID_7
:
2580 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2582 case IPR_HOST_RCB_OVERLAY_ID_12
:
2583 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2585 case IPR_HOST_RCB_OVERLAY_ID_13
:
2586 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2588 case IPR_HOST_RCB_OVERLAY_ID_14
:
2589 case IPR_HOST_RCB_OVERLAY_ID_16
:
2590 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2592 case IPR_HOST_RCB_OVERLAY_ID_17
:
2593 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2595 case IPR_HOST_RCB_OVERLAY_ID_20
:
2596 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2598 case IPR_HOST_RCB_OVERLAY_ID_21
:
2599 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2601 case IPR_HOST_RCB_OVERLAY_ID_23
:
2602 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2604 case IPR_HOST_RCB_OVERLAY_ID_24
:
2605 case IPR_HOST_RCB_OVERLAY_ID_26
:
2606 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2608 case IPR_HOST_RCB_OVERLAY_ID_30
:
2609 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2611 case IPR_HOST_RCB_OVERLAY_ID_41
:
2612 ipr_log_sis64_service_required_error(ioa_cfg
, hostrcb
);
2614 case IPR_HOST_RCB_OVERLAY_ID_1
:
2615 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2617 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2622 static struct ipr_hostrcb
*ipr_get_free_hostrcb(struct ipr_ioa_cfg
*ioa
)
2624 struct ipr_hostrcb
*hostrcb
;
2626 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_free_q
,
2627 struct ipr_hostrcb
, queue
);
2629 if (unlikely(!hostrcb
)) {
2630 dev_info(&ioa
->pdev
->dev
, "Reclaiming async error buffers.");
2631 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_report_q
,
2632 struct ipr_hostrcb
, queue
);
2635 list_del_init(&hostrcb
->queue
);
2640 * ipr_process_error - Op done function for an adapter error log.
2641 * @ipr_cmd: ipr command struct
2643 * This function is the op done function for an error log host
2644 * controlled async from the adapter. It will log the error and
2645 * send the HCAM back to the adapter.
2650 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2652 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2653 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2654 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2658 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2660 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2662 list_del_init(&hostrcb
->queue
);
2663 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2666 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2667 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2668 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2669 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
2670 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
) {
2671 dev_err(&ioa_cfg
->pdev
->dev
,
2672 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2675 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_report_q
);
2676 schedule_work(&ioa_cfg
->work_q
);
2677 hostrcb
= ipr_get_free_hostrcb(ioa_cfg
);
2679 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2683 * ipr_timeout - An internally generated op has timed out.
2684 * @ipr_cmd: ipr command struct
2686 * This function blocks host requests and initiates an
2692 static void ipr_timeout(struct timer_list
*t
)
2694 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2695 unsigned long lock_flags
= 0;
2696 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2699 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2701 ioa_cfg
->errors_logged
++;
2702 dev_err(&ioa_cfg
->pdev
->dev
,
2703 "Adapter being reset due to command timeout.\n");
2705 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2706 ioa_cfg
->sdt_state
= GET_DUMP
;
2708 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2709 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2711 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2716 * ipr_oper_timeout - Adapter timed out transitioning to operational
2717 * @ipr_cmd: ipr command struct
2719 * This function blocks host requests and initiates an
2725 static void ipr_oper_timeout(struct timer_list
*t
)
2727 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2728 unsigned long lock_flags
= 0;
2729 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2732 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2734 ioa_cfg
->errors_logged
++;
2735 dev_err(&ioa_cfg
->pdev
->dev
,
2736 "Adapter timed out transitioning to operational.\n");
2738 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2739 ioa_cfg
->sdt_state
= GET_DUMP
;
2741 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2743 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2744 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2747 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2752 * ipr_find_ses_entry - Find matching SES in SES table
2753 * @res: resource entry struct of SES
2756 * pointer to SES table entry / NULL on failure
2758 static const struct ipr_ses_table_entry
*
2759 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2762 struct ipr_std_inq_vpids
*vpids
;
2763 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2765 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2766 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2767 if (ste
->compare_product_id_byte
[j
] == 'X') {
2768 vpids
= &res
->std_inq_data
.vpids
;
2769 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2777 if (matches
== IPR_PROD_ID_LEN
)
2785 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2786 * @ioa_cfg: ioa config struct
2788 * @bus_width: bus width
2791 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2792 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2793 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2794 * max 160MHz = max 320MB/sec).
2796 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2798 struct ipr_resource_entry
*res
;
2799 const struct ipr_ses_table_entry
*ste
;
2800 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2802 /* Loop through each config table entry in the config table buffer */
2803 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2804 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2807 if (bus
!= res
->bus
)
2810 if (!(ste
= ipr_find_ses_entry(res
)))
2813 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2816 return max_xfer_rate
;
2820 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2821 * @ioa_cfg: ioa config struct
2822 * @max_delay: max delay in micro-seconds to wait
2824 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2827 * 0 on success / other on failure
2829 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2831 volatile u32 pcii_reg
;
2834 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2835 while (delay
< max_delay
) {
2836 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2838 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2841 /* udelay cannot be used if delay is more than a few milliseconds */
2842 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2843 mdelay(delay
/ 1000);
2853 * ipr_get_sis64_dump_data_section - Dump IOA memory
2854 * @ioa_cfg: ioa config struct
2855 * @start_addr: adapter address to dump
2856 * @dest: destination kernel buffer
2857 * @length_in_words: length to dump in 4 byte words
2862 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2864 __be32
*dest
, u32 length_in_words
)
2868 for (i
= 0; i
< length_in_words
; i
++) {
2869 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2870 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2878 * ipr_get_ldump_data_section - Dump IOA memory
2879 * @ioa_cfg: ioa config struct
2880 * @start_addr: adapter address to dump
2881 * @dest: destination kernel buffer
2882 * @length_in_words: length to dump in 4 byte words
2885 * 0 on success / -EIO on failure
2887 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2889 __be32
*dest
, u32 length_in_words
)
2891 volatile u32 temp_pcii_reg
;
2895 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2896 dest
, length_in_words
);
2898 /* Write IOA interrupt reg starting LDUMP state */
2899 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2900 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2902 /* Wait for IO debug acknowledge */
2903 if (ipr_wait_iodbg_ack(ioa_cfg
,
2904 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2905 dev_err(&ioa_cfg
->pdev
->dev
,
2906 "IOA dump long data transfer timeout\n");
2910 /* Signal LDUMP interlocked - clear IO debug ack */
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2912 ioa_cfg
->regs
.clr_interrupt_reg
);
2914 /* Write Mailbox with starting address */
2915 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2917 /* Signal address valid - clear IOA Reset alert */
2918 writel(IPR_UPROCI_RESET_ALERT
,
2919 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2921 for (i
= 0; i
< length_in_words
; i
++) {
2922 /* Wait for IO debug acknowledge */
2923 if (ipr_wait_iodbg_ack(ioa_cfg
,
2924 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2925 dev_err(&ioa_cfg
->pdev
->dev
,
2926 "IOA dump short data transfer timeout\n");
2930 /* Read data from mailbox and increment destination pointer */
2931 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2934 /* For all but the last word of data, signal data received */
2935 if (i
< (length_in_words
- 1)) {
2936 /* Signal dump data received - Clear IO debug Ack */
2937 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2938 ioa_cfg
->regs
.clr_interrupt_reg
);
2942 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2943 writel(IPR_UPROCI_RESET_ALERT
,
2944 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2946 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2947 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2949 /* Signal dump data received - Clear IO debug Ack */
2950 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2951 ioa_cfg
->regs
.clr_interrupt_reg
);
2953 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2954 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2956 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2958 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2968 #ifdef CONFIG_SCSI_IPR_DUMP
2970 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2971 * @ioa_cfg: ioa config struct
2972 * @pci_address: adapter address
2973 * @length: length of data to copy
2975 * Copy data from PCI adapter to kernel buffer.
2976 * Note: length MUST be a 4 byte multiple
2978 * 0 on success / other on failure
2980 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2981 unsigned long pci_address
, u32 length
)
2983 int bytes_copied
= 0;
2984 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2986 unsigned long lock_flags
= 0;
2987 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2990 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2992 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2994 while (bytes_copied
< length
&&
2995 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2996 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2997 ioa_dump
->page_offset
== 0) {
2998 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
3002 return bytes_copied
;
3005 ioa_dump
->page_offset
= 0;
3006 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
3007 ioa_dump
->next_page_index
++;
3009 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
3011 rem_len
= length
- bytes_copied
;
3012 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
3013 cur_len
= min(rem_len
, rem_page_len
);
3015 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3016 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
3019 rc
= ipr_get_ldump_data_section(ioa_cfg
,
3020 pci_address
+ bytes_copied
,
3021 &page
[ioa_dump
->page_offset
/ 4],
3022 (cur_len
/ sizeof(u32
)));
3024 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3027 ioa_dump
->page_offset
+= cur_len
;
3028 bytes_copied
+= cur_len
;
3036 return bytes_copied
;
3040 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3041 * @hdr: dump entry header struct
3046 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
3048 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3050 hdr
->offset
= sizeof(*hdr
);
3051 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
3055 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3056 * @ioa_cfg: ioa config struct
3057 * @driver_dump: driver dump struct
3062 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
3063 struct ipr_driver_dump
*driver_dump
)
3065 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3067 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
3068 driver_dump
->ioa_type_entry
.hdr
.len
=
3069 sizeof(struct ipr_dump_ioa_type_entry
) -
3070 sizeof(struct ipr_dump_entry_header
);
3071 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3072 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
3073 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
3074 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
3075 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
3076 ucode_vpd
->minor_release
[1];
3077 driver_dump
->hdr
.num_entries
++;
3081 * ipr_dump_version_data - Fill in the driver version in the dump.
3082 * @ioa_cfg: ioa config struct
3083 * @driver_dump: driver dump struct
3088 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
3089 struct ipr_driver_dump
*driver_dump
)
3091 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
3092 driver_dump
->version_entry
.hdr
.len
=
3093 sizeof(struct ipr_dump_version_entry
) -
3094 sizeof(struct ipr_dump_entry_header
);
3095 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3096 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
3097 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
3098 driver_dump
->hdr
.num_entries
++;
3102 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3103 * @ioa_cfg: ioa config struct
3104 * @driver_dump: driver dump struct
3109 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3110 struct ipr_driver_dump
*driver_dump
)
3112 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3113 driver_dump
->trace_entry
.hdr
.len
=
3114 sizeof(struct ipr_dump_trace_entry
) -
3115 sizeof(struct ipr_dump_entry_header
);
3116 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3117 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3118 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3119 driver_dump
->hdr
.num_entries
++;
3123 * ipr_dump_location_data - Fill in the IOA location in the dump.
3124 * @ioa_cfg: ioa config struct
3125 * @driver_dump: driver dump struct
3130 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3131 struct ipr_driver_dump
*driver_dump
)
3133 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3134 driver_dump
->location_entry
.hdr
.len
=
3135 sizeof(struct ipr_dump_location_entry
) -
3136 sizeof(struct ipr_dump_entry_header
);
3137 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3138 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3139 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3140 driver_dump
->hdr
.num_entries
++;
3144 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3145 * @ioa_cfg: ioa config struct
3146 * @dump: dump struct
3151 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3153 unsigned long start_addr
, sdt_word
;
3154 unsigned long lock_flags
= 0;
3155 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3156 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3157 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3158 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3159 struct ipr_sdt
*sdt
;
3165 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3167 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3168 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3172 if (ioa_cfg
->sis64
) {
3173 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3174 ssleep(IPR_DUMP_DELAY_SECONDS
);
3175 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3178 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3180 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3181 dev_err(&ioa_cfg
->pdev
->dev
,
3182 "Invalid dump table format: %lx\n", start_addr
);
3183 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3187 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3189 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3191 /* Initialize the overall dump header */
3192 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3193 driver_dump
->hdr
.num_entries
= 1;
3194 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3195 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3196 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3197 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3199 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3200 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3201 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3202 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3204 /* Update dump_header */
3205 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3207 /* IOA Dump entry */
3208 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3209 ioa_dump
->hdr
.len
= 0;
3210 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3211 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3213 /* First entries in sdt are actually a list of dump addresses and
3214 lengths to gather the real dump data. sdt represents the pointer
3215 to the ioa generated dump table. Dump data will be extracted based
3216 on entries in this table */
3217 sdt
= &ioa_dump
->sdt
;
3219 if (ioa_cfg
->sis64
) {
3220 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3221 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3223 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3224 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3227 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3228 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3229 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3230 bytes_to_copy
/ sizeof(__be32
));
3232 /* Smart Dump table is ready to use and the first entry is valid */
3233 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3234 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3235 dev_err(&ioa_cfg
->pdev
->dev
,
3236 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3237 rc
, be32_to_cpu(sdt
->hdr
.state
));
3238 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3239 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3240 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3244 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3246 if (num_entries
> max_num_entries
)
3247 num_entries
= max_num_entries
;
3249 /* Update dump length to the actual data to be copied */
3250 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3252 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3254 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3256 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3258 for (i
= 0; i
< num_entries
; i
++) {
3259 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3260 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3264 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3265 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3267 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3269 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3270 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3272 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3273 bytes_to_copy
= end_off
- start_off
;
3278 if (bytes_to_copy
> max_dump_size
) {
3279 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3283 /* Copy data from adapter to driver buffers */
3284 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3287 ioa_dump
->hdr
.len
+= bytes_copied
;
3289 if (bytes_copied
!= bytes_to_copy
) {
3290 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3297 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3299 /* Update dump_header */
3300 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3302 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3307 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3311 * ipr_release_dump - Free adapter dump memory
3312 * @kref: kref struct
3317 static void ipr_release_dump(struct kref
*kref
)
3319 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3320 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3321 unsigned long lock_flags
= 0;
3325 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3326 ioa_cfg
->dump
= NULL
;
3327 ioa_cfg
->sdt_state
= INACTIVE
;
3328 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3330 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3331 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3333 vfree(dump
->ioa_dump
.ioa_data
);
3339 * ipr_worker_thread - Worker thread
3340 * @work: ioa config struct
3342 * Called at task level from a work thread. This function takes care
3343 * of adding and removing device from the mid-layer as configuration
3344 * changes are detected by the adapter.
3349 static void ipr_worker_thread(struct work_struct
*work
)
3351 unsigned long lock_flags
;
3352 struct ipr_resource_entry
*res
;
3353 struct scsi_device
*sdev
;
3354 struct ipr_dump
*dump
;
3355 struct ipr_ioa_cfg
*ioa_cfg
=
3356 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3357 u8 bus
, target
, lun
;
3361 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3363 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3364 dump
= ioa_cfg
->dump
;
3366 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3369 kref_get(&dump
->kref
);
3370 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3371 ipr_get_ioa_dump(ioa_cfg
, dump
);
3372 kref_put(&dump
->kref
, ipr_release_dump
);
3374 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3375 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3376 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3377 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3381 if (ioa_cfg
->scsi_unblock
) {
3382 ioa_cfg
->scsi_unblock
= 0;
3383 ioa_cfg
->scsi_blocked
= 0;
3384 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3385 scsi_unblock_requests(ioa_cfg
->host
);
3386 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3387 if (ioa_cfg
->scsi_blocked
)
3388 scsi_block_requests(ioa_cfg
->host
);
3391 if (!ioa_cfg
->scan_enabled
) {
3392 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3399 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
3400 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3404 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3405 if (res
->del_from_ml
&& res
->sdev
) {
3408 if (!scsi_device_get(sdev
)) {
3409 if (!res
->add_to_ml
)
3410 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3412 res
->del_from_ml
= 0;
3413 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3414 scsi_remove_device(sdev
);
3415 scsi_device_put(sdev
);
3416 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3423 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3424 if (res
->add_to_ml
) {
3426 target
= res
->target
;
3429 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3430 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3431 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3436 ioa_cfg
->scan_done
= 1;
3437 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3438 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3442 #ifdef CONFIG_SCSI_IPR_TRACE
3444 * ipr_read_trace - Dump the adapter trace
3445 * @filp: open sysfs file
3446 * @kobj: kobject struct
3447 * @bin_attr: bin_attribute struct
3450 * @count: buffer size
3453 * number of bytes printed to buffer
3455 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3456 struct bin_attribute
*bin_attr
,
3457 char *buf
, loff_t off
, size_t count
)
3459 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3460 struct Scsi_Host
*shost
= class_to_shost(dev
);
3461 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3462 unsigned long lock_flags
= 0;
3465 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3466 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3468 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3473 static struct bin_attribute ipr_trace_attr
= {
3479 .read
= ipr_read_trace
,
3484 * ipr_show_fw_version - Show the firmware version
3485 * @dev: class device struct
3489 * number of bytes printed to buffer
3491 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3492 struct device_attribute
*attr
, char *buf
)
3494 struct Scsi_Host
*shost
= class_to_shost(dev
);
3495 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3496 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3497 unsigned long lock_flags
= 0;
3500 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3501 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3502 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3503 ucode_vpd
->minor_release
[0],
3504 ucode_vpd
->minor_release
[1]);
3505 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3509 static struct device_attribute ipr_fw_version_attr
= {
3511 .name
= "fw_version",
3514 .show
= ipr_show_fw_version
,
3518 * ipr_show_log_level - Show the adapter's error logging level
3519 * @dev: class device struct
3523 * number of bytes printed to buffer
3525 static ssize_t
ipr_show_log_level(struct device
*dev
,
3526 struct device_attribute
*attr
, char *buf
)
3528 struct Scsi_Host
*shost
= class_to_shost(dev
);
3529 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3530 unsigned long lock_flags
= 0;
3533 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3534 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3535 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3540 * ipr_store_log_level - Change the adapter's error logging level
3541 * @dev: class device struct
3545 * number of bytes printed to buffer
3547 static ssize_t
ipr_store_log_level(struct device
*dev
,
3548 struct device_attribute
*attr
,
3549 const char *buf
, size_t count
)
3551 struct Scsi_Host
*shost
= class_to_shost(dev
);
3552 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3553 unsigned long lock_flags
= 0;
3555 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3556 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3557 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3561 static struct device_attribute ipr_log_level_attr
= {
3563 .name
= "log_level",
3564 .mode
= S_IRUGO
| S_IWUSR
,
3566 .show
= ipr_show_log_level
,
3567 .store
= ipr_store_log_level
3571 * ipr_store_diagnostics - IOA Diagnostics interface
3572 * @dev: device struct
3574 * @count: buffer size
3576 * This function will reset the adapter and wait a reasonable
3577 * amount of time for any errors that the adapter might log.
3580 * count on success / other on failure
3582 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3583 struct device_attribute
*attr
,
3584 const char *buf
, size_t count
)
3586 struct Scsi_Host
*shost
= class_to_shost(dev
);
3587 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3588 unsigned long lock_flags
= 0;
3591 if (!capable(CAP_SYS_ADMIN
))
3594 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3595 while (ioa_cfg
->in_reset_reload
) {
3596 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3597 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3598 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3601 ioa_cfg
->errors_logged
= 0;
3602 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3604 if (ioa_cfg
->in_reset_reload
) {
3605 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3606 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3608 /* Wait for a second for any errors to be logged */
3611 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3615 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3616 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3618 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3623 static struct device_attribute ipr_diagnostics_attr
= {
3625 .name
= "run_diagnostics",
3628 .store
= ipr_store_diagnostics
3632 * ipr_show_adapter_state - Show the adapter's state
3633 * @class_dev: device struct
3637 * number of bytes printed to buffer
3639 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3640 struct device_attribute
*attr
, char *buf
)
3642 struct Scsi_Host
*shost
= class_to_shost(dev
);
3643 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3644 unsigned long lock_flags
= 0;
3647 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3648 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3649 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3651 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3652 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3657 * ipr_store_adapter_state - Change adapter state
3658 * @dev: device struct
3660 * @count: buffer size
3662 * This function will change the adapter's state.
3665 * count on success / other on failure
3667 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3668 struct device_attribute
*attr
,
3669 const char *buf
, size_t count
)
3671 struct Scsi_Host
*shost
= class_to_shost(dev
);
3672 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3673 unsigned long lock_flags
;
3674 int result
= count
, i
;
3676 if (!capable(CAP_SYS_ADMIN
))
3679 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3680 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3681 !strncmp(buf
, "online", 6)) {
3682 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3683 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3684 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3685 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3688 ioa_cfg
->reset_retries
= 0;
3689 ioa_cfg
->in_ioa_bringdown
= 0;
3690 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3692 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3693 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3698 static struct device_attribute ipr_ioa_state_attr
= {
3700 .name
= "online_state",
3701 .mode
= S_IRUGO
| S_IWUSR
,
3703 .show
= ipr_show_adapter_state
,
3704 .store
= ipr_store_adapter_state
3708 * ipr_store_reset_adapter - Reset the adapter
3709 * @dev: device struct
3711 * @count: buffer size
3713 * This function will reset the adapter.
3716 * count on success / other on failure
3718 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3719 struct device_attribute
*attr
,
3720 const char *buf
, size_t count
)
3722 struct Scsi_Host
*shost
= class_to_shost(dev
);
3723 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3724 unsigned long lock_flags
;
3727 if (!capable(CAP_SYS_ADMIN
))
3730 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3731 if (!ioa_cfg
->in_reset_reload
)
3732 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3733 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3734 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3739 static struct device_attribute ipr_ioa_reset_attr
= {
3741 .name
= "reset_host",
3744 .store
= ipr_store_reset_adapter
3747 static int ipr_iopoll(struct irq_poll
*iop
, int budget
);
3749 * ipr_show_iopoll_weight - Show ipr polling mode
3750 * @dev: class device struct
3754 * number of bytes printed to buffer
3756 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3757 struct device_attribute
*attr
, char *buf
)
3759 struct Scsi_Host
*shost
= class_to_shost(dev
);
3760 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3761 unsigned long lock_flags
= 0;
3764 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3765 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3766 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3772 * ipr_store_iopoll_weight - Change the adapter's polling mode
3773 * @dev: class device struct
3777 * number of bytes printed to buffer
3779 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3780 struct device_attribute
*attr
,
3781 const char *buf
, size_t count
)
3783 struct Scsi_Host
*shost
= class_to_shost(dev
);
3784 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3785 unsigned long user_iopoll_weight
;
3786 unsigned long lock_flags
= 0;
3789 if (!ioa_cfg
->sis64
) {
3790 dev_info(&ioa_cfg
->pdev
->dev
, "irq_poll not supported on this adapter\n");
3793 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3796 if (user_iopoll_weight
> 256) {
3797 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid irq_poll weight. It must be less than 256\n");
3801 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3802 dev_info(&ioa_cfg
->pdev
->dev
, "Current irq_poll weight has the same weight\n");
3806 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3807 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3808 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3811 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3812 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3813 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3814 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3815 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3816 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3819 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3824 static struct device_attribute ipr_iopoll_weight_attr
= {
3826 .name
= "iopoll_weight",
3827 .mode
= S_IRUGO
| S_IWUSR
,
3829 .show
= ipr_show_iopoll_weight
,
3830 .store
= ipr_store_iopoll_weight
3834 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3835 * @buf_len: buffer length
3837 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3838 * list to use for microcode download
3841 * pointer to sglist / NULL on failure
3843 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3846 struct ipr_sglist
*sglist
;
3848 /* Get the minimum size per scatter/gather element */
3849 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3851 /* Get the actual size per element */
3852 order
= get_order(sg_size
);
3854 /* Allocate a scatter/gather list for the DMA */
3855 sglist
= kzalloc(sizeof(struct ipr_sglist
), GFP_KERNEL
);
3856 if (sglist
== NULL
) {
3860 sglist
->order
= order
;
3861 sglist
->scatterlist
= sgl_alloc_order(buf_len
, order
, false, GFP_KERNEL
,
3863 if (!sglist
->scatterlist
) {
3872 * ipr_free_ucode_buffer - Frees a microcode download buffer
3873 * @p_dnld: scatter/gather list pointer
3875 * Free a DMA'able ucode download buffer previously allocated with
3876 * ipr_alloc_ucode_buffer
3881 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3883 sgl_free_order(sglist
->scatterlist
, sglist
->order
);
3888 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3889 * @sglist: scatter/gather list pointer
3890 * @buffer: buffer pointer
3891 * @len: buffer length
3893 * Copy a microcode image from a user buffer into a buffer allocated by
3894 * ipr_alloc_ucode_buffer
3897 * 0 on success / other on failure
3899 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3900 u8
*buffer
, u32 len
)
3902 int bsize_elem
, i
, result
= 0;
3903 struct scatterlist
*scatterlist
;
3906 /* Determine the actual number of bytes per element */
3907 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3909 scatterlist
= sglist
->scatterlist
;
3911 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3912 struct page
*page
= sg_page(&scatterlist
[i
]);
3915 memcpy(kaddr
, buffer
, bsize_elem
);
3918 scatterlist
[i
].length
= bsize_elem
;
3926 if (len
% bsize_elem
) {
3927 struct page
*page
= sg_page(&scatterlist
[i
]);
3930 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3933 scatterlist
[i
].length
= len
% bsize_elem
;
3936 sglist
->buffer_len
= len
;
3941 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3942 * @ipr_cmd: ipr command struct
3943 * @sglist: scatter/gather list
3945 * Builds a microcode download IOA data list (IOADL).
3948 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3949 struct ipr_sglist
*sglist
)
3951 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3952 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3953 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3956 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3957 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3958 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3961 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3962 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3963 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3964 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3965 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3968 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3972 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3973 * @ipr_cmd: ipr command struct
3974 * @sglist: scatter/gather list
3976 * Builds a microcode download IOA data list (IOADL).
3979 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3980 struct ipr_sglist
*sglist
)
3982 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3983 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3984 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3987 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3988 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3989 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3992 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3994 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3995 ioadl
[i
].flags_and_data_len
=
3996 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3998 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
4001 ioadl
[i
-1].flags_and_data_len
|=
4002 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
4006 * ipr_update_ioa_ucode - Update IOA's microcode
4007 * @ioa_cfg: ioa config struct
4008 * @sglist: scatter/gather list
4010 * Initiate an adapter reset to update the IOA's microcode
4013 * 0 on success / -EIO on failure
4015 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
4016 struct ipr_sglist
*sglist
)
4018 unsigned long lock_flags
;
4020 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4021 while (ioa_cfg
->in_reset_reload
) {
4022 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4023 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4024 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4027 if (ioa_cfg
->ucode_sglist
) {
4028 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4029 dev_err(&ioa_cfg
->pdev
->dev
,
4030 "Microcode download already in progress\n");
4034 sglist
->num_dma_sg
= dma_map_sg(&ioa_cfg
->pdev
->dev
,
4035 sglist
->scatterlist
, sglist
->num_sg
,
4038 if (!sglist
->num_dma_sg
) {
4039 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4040 dev_err(&ioa_cfg
->pdev
->dev
,
4041 "Failed to map microcode download buffer!\n");
4045 ioa_cfg
->ucode_sglist
= sglist
;
4046 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
4047 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4048 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4050 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4051 ioa_cfg
->ucode_sglist
= NULL
;
4052 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4057 * ipr_store_update_fw - Update the firmware on the adapter
4058 * @class_dev: device struct
4060 * @count: buffer size
4062 * This function will update the firmware on the adapter.
4065 * count on success / other on failure
4067 static ssize_t
ipr_store_update_fw(struct device
*dev
,
4068 struct device_attribute
*attr
,
4069 const char *buf
, size_t count
)
4071 struct Scsi_Host
*shost
= class_to_shost(dev
);
4072 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4073 struct ipr_ucode_image_header
*image_hdr
;
4074 const struct firmware
*fw_entry
;
4075 struct ipr_sglist
*sglist
;
4079 int result
, dnld_size
;
4081 if (!capable(CAP_SYS_ADMIN
))
4084 snprintf(fname
, sizeof(fname
), "%s", buf
);
4086 endline
= strchr(fname
, '\n');
4090 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
4091 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4095 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4097 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4098 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4099 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4102 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4103 release_firmware(fw_entry
);
4107 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4110 dev_err(&ioa_cfg
->pdev
->dev
,
4111 "Microcode buffer copy to DMA buffer failed\n");
4115 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4117 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4122 ipr_free_ucode_buffer(sglist
);
4123 release_firmware(fw_entry
);
4127 static struct device_attribute ipr_update_fw_attr
= {
4129 .name
= "update_fw",
4132 .store
= ipr_store_update_fw
4136 * ipr_show_fw_type - Show the adapter's firmware type.
4137 * @dev: class device struct
4141 * number of bytes printed to buffer
4143 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4144 struct device_attribute
*attr
, char *buf
)
4146 struct Scsi_Host
*shost
= class_to_shost(dev
);
4147 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4148 unsigned long lock_flags
= 0;
4151 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4152 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4153 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4157 static struct device_attribute ipr_ioa_fw_type_attr
= {
4162 .show
= ipr_show_fw_type
4165 static ssize_t
ipr_read_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4166 struct bin_attribute
*bin_attr
, char *buf
,
4167 loff_t off
, size_t count
)
4169 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4170 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4171 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4172 struct ipr_hostrcb
*hostrcb
;
4173 unsigned long lock_flags
= 0;
4176 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4177 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4178 struct ipr_hostrcb
, queue
);
4180 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4183 ret
= memory_read_from_buffer(buf
, count
, &off
, &hostrcb
->hcam
,
4184 sizeof(hostrcb
->hcam
));
4185 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4189 static ssize_t
ipr_next_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4190 struct bin_attribute
*bin_attr
, char *buf
,
4191 loff_t off
, size_t count
)
4193 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4194 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4195 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4196 struct ipr_hostrcb
*hostrcb
;
4197 unsigned long lock_flags
= 0;
4199 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4200 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4201 struct ipr_hostrcb
, queue
);
4203 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4207 /* Reclaim hostrcb before exit */
4208 list_move_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
4209 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4213 static struct bin_attribute ipr_ioa_async_err_log
= {
4215 .name
= "async_err_log",
4216 .mode
= S_IRUGO
| S_IWUSR
,
4219 .read
= ipr_read_async_err_log
,
4220 .write
= ipr_next_async_err_log
4223 static struct device_attribute
*ipr_ioa_attrs
[] = {
4224 &ipr_fw_version_attr
,
4225 &ipr_log_level_attr
,
4226 &ipr_diagnostics_attr
,
4227 &ipr_ioa_state_attr
,
4228 &ipr_ioa_reset_attr
,
4229 &ipr_update_fw_attr
,
4230 &ipr_ioa_fw_type_attr
,
4231 &ipr_iopoll_weight_attr
,
4235 #ifdef CONFIG_SCSI_IPR_DUMP
4237 * ipr_read_dump - Dump the adapter
4238 * @filp: open sysfs file
4239 * @kobj: kobject struct
4240 * @bin_attr: bin_attribute struct
4243 * @count: buffer size
4246 * number of bytes printed to buffer
4248 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4249 struct bin_attribute
*bin_attr
,
4250 char *buf
, loff_t off
, size_t count
)
4252 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4253 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4254 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4255 struct ipr_dump
*dump
;
4256 unsigned long lock_flags
= 0;
4261 if (!capable(CAP_SYS_ADMIN
))
4264 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4265 dump
= ioa_cfg
->dump
;
4267 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4268 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4271 kref_get(&dump
->kref
);
4272 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4274 if (off
> dump
->driver_dump
.hdr
.len
) {
4275 kref_put(&dump
->kref
, ipr_release_dump
);
4279 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4280 count
= dump
->driver_dump
.hdr
.len
- off
;
4284 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4285 if (off
+ count
> sizeof(dump
->driver_dump
))
4286 len
= sizeof(dump
->driver_dump
) - off
;
4289 src
= (u8
*)&dump
->driver_dump
+ off
;
4290 memcpy(buf
, src
, len
);
4296 off
-= sizeof(dump
->driver_dump
);
4299 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4300 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4301 sizeof(struct ipr_sdt_entry
));
4303 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4304 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4306 if (count
&& off
< sdt_end
) {
4307 if (off
+ count
> sdt_end
)
4308 len
= sdt_end
- off
;
4311 src
= (u8
*)&dump
->ioa_dump
+ off
;
4312 memcpy(buf
, src
, len
);
4321 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4322 len
= PAGE_ALIGN(off
) - off
;
4325 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4326 src
+= off
& ~PAGE_MASK
;
4327 memcpy(buf
, src
, len
);
4333 kref_put(&dump
->kref
, ipr_release_dump
);
4338 * ipr_alloc_dump - Prepare for adapter dump
4339 * @ioa_cfg: ioa config struct
4342 * 0 on success / other on failure
4344 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4346 struct ipr_dump
*dump
;
4348 unsigned long lock_flags
= 0;
4350 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4353 ipr_err("Dump memory allocation failed\n");
4358 ioa_data
= vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES
,
4361 ioa_data
= vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES
,
4365 ipr_err("Dump memory allocation failed\n");
4370 dump
->ioa_dump
.ioa_data
= ioa_data
;
4372 kref_init(&dump
->kref
);
4373 dump
->ioa_cfg
= ioa_cfg
;
4375 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4377 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4378 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4379 vfree(dump
->ioa_dump
.ioa_data
);
4384 ioa_cfg
->dump
= dump
;
4385 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4386 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4387 ioa_cfg
->dump_taken
= 1;
4388 schedule_work(&ioa_cfg
->work_q
);
4390 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4396 * ipr_free_dump - Free adapter dump memory
4397 * @ioa_cfg: ioa config struct
4400 * 0 on success / other on failure
4402 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4404 struct ipr_dump
*dump
;
4405 unsigned long lock_flags
= 0;
4409 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4410 dump
= ioa_cfg
->dump
;
4412 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4416 ioa_cfg
->dump
= NULL
;
4417 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4419 kref_put(&dump
->kref
, ipr_release_dump
);
4426 * ipr_write_dump - Setup dump state of adapter
4427 * @filp: open sysfs file
4428 * @kobj: kobject struct
4429 * @bin_attr: bin_attribute struct
4432 * @count: buffer size
4435 * number of bytes printed to buffer
4437 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4438 struct bin_attribute
*bin_attr
,
4439 char *buf
, loff_t off
, size_t count
)
4441 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4442 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4443 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4446 if (!capable(CAP_SYS_ADMIN
))
4450 rc
= ipr_alloc_dump(ioa_cfg
);
4451 else if (buf
[0] == '0')
4452 rc
= ipr_free_dump(ioa_cfg
);
4462 static struct bin_attribute ipr_dump_attr
= {
4465 .mode
= S_IRUSR
| S_IWUSR
,
4468 .read
= ipr_read_dump
,
4469 .write
= ipr_write_dump
4472 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4476 * ipr_change_queue_depth - Change the device's queue depth
4477 * @sdev: scsi device struct
4478 * @qdepth: depth to set
4479 * @reason: calling context
4484 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4486 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4487 struct ipr_resource_entry
*res
;
4488 unsigned long lock_flags
= 0;
4490 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4491 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4493 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4494 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4495 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4497 scsi_change_queue_depth(sdev
, qdepth
);
4498 return sdev
->queue_depth
;
4502 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4503 * @dev: device struct
4504 * @attr: device attribute structure
4508 * number of bytes printed to buffer
4510 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4512 struct scsi_device
*sdev
= to_scsi_device(dev
);
4513 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4514 struct ipr_resource_entry
*res
;
4515 unsigned long lock_flags
= 0;
4516 ssize_t len
= -ENXIO
;
4518 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4519 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4521 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4522 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4526 static struct device_attribute ipr_adapter_handle_attr
= {
4528 .name
= "adapter_handle",
4531 .show
= ipr_show_adapter_handle
4535 * ipr_show_resource_path - Show the resource path or the resource address for
4537 * @dev: device struct
4538 * @attr: device attribute structure
4542 * number of bytes printed to buffer
4544 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4546 struct scsi_device
*sdev
= to_scsi_device(dev
);
4547 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4548 struct ipr_resource_entry
*res
;
4549 unsigned long lock_flags
= 0;
4550 ssize_t len
= -ENXIO
;
4551 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4553 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4554 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4555 if (res
&& ioa_cfg
->sis64
)
4556 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4557 __ipr_format_res_path(res
->res_path
, buffer
,
4560 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4561 res
->bus
, res
->target
, res
->lun
);
4563 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4567 static struct device_attribute ipr_resource_path_attr
= {
4569 .name
= "resource_path",
4572 .show
= ipr_show_resource_path
4576 * ipr_show_device_id - Show the device_id for this device.
4577 * @dev: device struct
4578 * @attr: device attribute structure
4582 * number of bytes printed to buffer
4584 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4586 struct scsi_device
*sdev
= to_scsi_device(dev
);
4587 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4588 struct ipr_resource_entry
*res
;
4589 unsigned long lock_flags
= 0;
4590 ssize_t len
= -ENXIO
;
4592 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4593 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4594 if (res
&& ioa_cfg
->sis64
)
4595 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", be64_to_cpu(res
->dev_id
));
4597 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4599 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4603 static struct device_attribute ipr_device_id_attr
= {
4605 .name
= "device_id",
4608 .show
= ipr_show_device_id
4612 * ipr_show_resource_type - Show the resource type for this device.
4613 * @dev: device struct
4614 * @attr: device attribute structure
4618 * number of bytes printed to buffer
4620 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4622 struct scsi_device
*sdev
= to_scsi_device(dev
);
4623 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4624 struct ipr_resource_entry
*res
;
4625 unsigned long lock_flags
= 0;
4626 ssize_t len
= -ENXIO
;
4628 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4629 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4632 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4634 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4638 static struct device_attribute ipr_resource_type_attr
= {
4640 .name
= "resource_type",
4643 .show
= ipr_show_resource_type
4647 * ipr_show_raw_mode - Show the adapter's raw mode
4648 * @dev: class device struct
4652 * number of bytes printed to buffer
4654 static ssize_t
ipr_show_raw_mode(struct device
*dev
,
4655 struct device_attribute
*attr
, char *buf
)
4657 struct scsi_device
*sdev
= to_scsi_device(dev
);
4658 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4659 struct ipr_resource_entry
*res
;
4660 unsigned long lock_flags
= 0;
4663 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4664 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4666 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", res
->raw_mode
);
4669 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4674 * ipr_store_raw_mode - Change the adapter's raw mode
4675 * @dev: class device struct
4679 * number of bytes printed to buffer
4681 static ssize_t
ipr_store_raw_mode(struct device
*dev
,
4682 struct device_attribute
*attr
,
4683 const char *buf
, size_t count
)
4685 struct scsi_device
*sdev
= to_scsi_device(dev
);
4686 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4687 struct ipr_resource_entry
*res
;
4688 unsigned long lock_flags
= 0;
4691 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4692 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4694 if (ipr_is_af_dasd_device(res
)) {
4695 res
->raw_mode
= simple_strtoul(buf
, NULL
, 10);
4698 sdev_printk(KERN_INFO
, res
->sdev
, "raw mode is %s\n",
4699 res
->raw_mode
? "enabled" : "disabled");
4704 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4708 static struct device_attribute ipr_raw_mode_attr
= {
4711 .mode
= S_IRUGO
| S_IWUSR
,
4713 .show
= ipr_show_raw_mode
,
4714 .store
= ipr_store_raw_mode
4717 static struct device_attribute
*ipr_dev_attrs
[] = {
4718 &ipr_adapter_handle_attr
,
4719 &ipr_resource_path_attr
,
4720 &ipr_device_id_attr
,
4721 &ipr_resource_type_attr
,
4727 * ipr_biosparam - Return the HSC mapping
4728 * @sdev: scsi device struct
4729 * @block_device: block device pointer
4730 * @capacity: capacity of the device
4731 * @parm: Array containing returned HSC values.
4733 * This function generates the HSC parms that fdisk uses.
4734 * We want to make sure we return something that places partitions
4735 * on 4k boundaries for best performance with the IOA.
4740 static int ipr_biosparam(struct scsi_device
*sdev
,
4741 struct block_device
*block_device
,
4742 sector_t capacity
, int *parm
)
4750 cylinders
= capacity
;
4751 sector_div(cylinders
, (128 * 32));
4756 parm
[2] = cylinders
;
4762 * ipr_find_starget - Find target based on bus/target.
4763 * @starget: scsi target struct
4766 * resource entry pointer if found / NULL if not found
4768 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4770 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4771 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4772 struct ipr_resource_entry
*res
;
4774 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4775 if ((res
->bus
== starget
->channel
) &&
4776 (res
->target
== starget
->id
)) {
4784 static struct ata_port_info sata_port_info
;
4787 * ipr_target_alloc - Prepare for commands to a SCSI target
4788 * @starget: scsi target struct
4790 * If the device is a SATA device, this function allocates an
4791 * ATA port with libata, else it does nothing.
4794 * 0 on success / non-0 on failure
4796 static int ipr_target_alloc(struct scsi_target
*starget
)
4798 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4799 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4800 struct ipr_sata_port
*sata_port
;
4801 struct ata_port
*ap
;
4802 struct ipr_resource_entry
*res
;
4803 unsigned long lock_flags
;
4805 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4806 res
= ipr_find_starget(starget
);
4807 starget
->hostdata
= NULL
;
4809 if (res
&& ipr_is_gata(res
)) {
4810 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4811 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4815 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4817 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4818 sata_port
->ioa_cfg
= ioa_cfg
;
4820 sata_port
->res
= res
;
4822 res
->sata_port
= sata_port
;
4823 ap
->private_data
= sata_port
;
4824 starget
->hostdata
= sata_port
;
4830 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4836 * ipr_target_destroy - Destroy a SCSI target
4837 * @starget: scsi target struct
4839 * If the device was a SATA device, this function frees the libata
4840 * ATA port, else it does nothing.
4843 static void ipr_target_destroy(struct scsi_target
*starget
)
4845 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4846 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4847 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4849 if (ioa_cfg
->sis64
) {
4850 if (!ipr_find_starget(starget
)) {
4851 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4852 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4853 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4854 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4855 else if (starget
->channel
== 0)
4856 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4861 starget
->hostdata
= NULL
;
4862 ata_sas_port_destroy(sata_port
->ap
);
4868 * ipr_find_sdev - Find device based on bus/target/lun.
4869 * @sdev: scsi device struct
4872 * resource entry pointer if found / NULL if not found
4874 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4876 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4877 struct ipr_resource_entry
*res
;
4879 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4880 if ((res
->bus
== sdev
->channel
) &&
4881 (res
->target
== sdev
->id
) &&
4882 (res
->lun
== sdev
->lun
))
4890 * ipr_slave_destroy - Unconfigure a SCSI device
4891 * @sdev: scsi device struct
4896 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4898 struct ipr_resource_entry
*res
;
4899 struct ipr_ioa_cfg
*ioa_cfg
;
4900 unsigned long lock_flags
= 0;
4902 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4904 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4905 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4908 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4909 sdev
->hostdata
= NULL
;
4911 res
->sata_port
= NULL
;
4913 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4917 * ipr_slave_configure - Configure a SCSI device
4918 * @sdev: scsi device struct
4920 * This function configures the specified scsi device.
4925 static int ipr_slave_configure(struct scsi_device
*sdev
)
4927 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4928 struct ipr_resource_entry
*res
;
4929 struct ata_port
*ap
= NULL
;
4930 unsigned long lock_flags
= 0;
4931 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4933 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4934 res
= sdev
->hostdata
;
4936 if (ipr_is_af_dasd_device(res
))
4937 sdev
->type
= TYPE_RAID
;
4938 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4939 sdev
->scsi_level
= 4;
4940 sdev
->no_uld_attach
= 1;
4942 if (ipr_is_vset_device(res
)) {
4943 sdev
->scsi_level
= SCSI_SPC_3
;
4944 sdev
->no_report_opcodes
= 1;
4945 blk_queue_rq_timeout(sdev
->request_queue
,
4946 IPR_VSET_RW_TIMEOUT
);
4947 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4949 if (ipr_is_gata(res
) && res
->sata_port
)
4950 ap
= res
->sata_port
->ap
;
4951 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4954 scsi_change_queue_depth(sdev
, IPR_MAX_CMD_PER_ATA_LUN
);
4955 ata_sas_slave_configure(sdev
, ap
);
4959 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4960 ipr_format_res_path(ioa_cfg
,
4961 res
->res_path
, buffer
, sizeof(buffer
)));
4964 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4969 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4970 * @sdev: scsi device struct
4972 * This function initializes an ATA port so that future commands
4973 * sent through queuecommand will work.
4978 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4980 struct ipr_sata_port
*sata_port
= NULL
;
4984 if (sdev
->sdev_target
)
4985 sata_port
= sdev
->sdev_target
->hostdata
;
4987 rc
= ata_sas_port_init(sata_port
->ap
);
4989 rc
= ata_sas_sync_probe(sata_port
->ap
);
4993 ipr_slave_destroy(sdev
);
5000 * ipr_slave_alloc - Prepare for commands to a device.
5001 * @sdev: scsi device struct
5003 * This function saves a pointer to the resource entry
5004 * in the scsi device struct if the device exists. We
5005 * can then use this pointer in ipr_queuecommand when
5006 * handling new commands.
5009 * 0 on success / -ENXIO if device does not exist
5011 static int ipr_slave_alloc(struct scsi_device
*sdev
)
5013 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
5014 struct ipr_resource_entry
*res
;
5015 unsigned long lock_flags
;
5018 sdev
->hostdata
= NULL
;
5020 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5022 res
= ipr_find_sdev(sdev
);
5027 sdev
->hostdata
= res
;
5028 if (!ipr_is_naca_model(res
))
5029 res
->needs_sync_complete
= 1;
5031 if (ipr_is_gata(res
)) {
5032 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5033 return ipr_ata_slave_alloc(sdev
);
5037 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5043 * ipr_match_lun - Match function for specified LUN
5044 * @ipr_cmd: ipr command struct
5045 * @device: device to match (sdev)
5048 * 1 if command matches sdev / 0 if command does not match sdev
5050 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
5052 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
5058 * ipr_cmnd_is_free - Check if a command is free or not
5059 * @ipr_cmd ipr command struct
5064 static bool ipr_cmnd_is_free(struct ipr_cmnd
*ipr_cmd
)
5066 struct ipr_cmnd
*loop_cmd
;
5068 list_for_each_entry(loop_cmd
, &ipr_cmd
->hrrq
->hrrq_free_q
, queue
) {
5069 if (loop_cmd
== ipr_cmd
)
5077 * ipr_match_res - Match function for specified resource entry
5078 * @ipr_cmd: ipr command struct
5079 * @resource: resource entry to match
5082 * 1 if command matches sdev / 0 if command does not match sdev
5084 static int ipr_match_res(struct ipr_cmnd
*ipr_cmd
, void *resource
)
5086 struct ipr_resource_entry
*res
= resource
;
5088 if (res
&& ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
)
5094 * ipr_wait_for_ops - Wait for matching commands to complete
5095 * @ipr_cmd: ipr command struct
5096 * @device: device to match (sdev)
5097 * @match: match function to use
5102 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
5103 int (*match
)(struct ipr_cmnd
*, void *))
5105 struct ipr_cmnd
*ipr_cmd
;
5107 unsigned long flags
;
5108 struct ipr_hrr_queue
*hrrq
;
5109 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
5110 DECLARE_COMPLETION_ONSTACK(comp
);
5116 for_each_hrrq(hrrq
, ioa_cfg
) {
5117 spin_lock_irqsave(hrrq
->lock
, flags
);
5118 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5119 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5120 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5121 if (match(ipr_cmd
, device
)) {
5122 ipr_cmd
->eh_comp
= &comp
;
5127 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5131 timeout
= wait_for_completion_timeout(&comp
, timeout
);
5136 for_each_hrrq(hrrq
, ioa_cfg
) {
5137 spin_lock_irqsave(hrrq
->lock
, flags
);
5138 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5139 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5140 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5141 if (match(ipr_cmd
, device
)) {
5142 ipr_cmd
->eh_comp
= NULL
;
5147 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5151 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
5153 return wait
? FAILED
: SUCCESS
;
5162 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
5164 struct ipr_ioa_cfg
*ioa_cfg
;
5165 unsigned long lock_flags
= 0;
5169 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5170 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5172 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5173 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5174 dev_err(&ioa_cfg
->pdev
->dev
,
5175 "Adapter being reset as a result of error recovery.\n");
5177 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5178 ioa_cfg
->sdt_state
= GET_DUMP
;
5181 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5182 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5183 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5185 /* If we got hit with a host reset while we were already resetting
5186 the adapter for some reason, and the reset failed. */
5187 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5192 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5198 * ipr_device_reset - Reset the device
5199 * @ioa_cfg: ioa config struct
5200 * @res: resource entry struct
5202 * This function issues a device reset to the affected device.
5203 * If the device is a SCSI device, a LUN reset will be sent
5204 * to the device first. If that does not work, a target reset
5205 * will be sent. If the device is a SATA device, a PHY reset will
5209 * 0 on success / non-zero on failure
5211 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5212 struct ipr_resource_entry
*res
)
5214 struct ipr_cmnd
*ipr_cmd
;
5215 struct ipr_ioarcb
*ioarcb
;
5216 struct ipr_cmd_pkt
*cmd_pkt
;
5217 struct ipr_ioarcb_ata_regs
*regs
;
5221 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5222 ioarcb
= &ipr_cmd
->ioarcb
;
5223 cmd_pkt
= &ioarcb
->cmd_pkt
;
5225 if (ipr_cmd
->ioa_cfg
->sis64
) {
5226 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
5227 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
5229 regs
= &ioarcb
->u
.add_data
.u
.regs
;
5231 ioarcb
->res_handle
= res
->res_handle
;
5232 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5233 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5234 if (ipr_is_gata(res
)) {
5235 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
5236 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
5237 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
5240 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5241 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5242 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5243 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
5244 if (ipr_cmd
->ioa_cfg
->sis64
)
5245 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
5246 sizeof(struct ipr_ioasa_gata
));
5248 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
5249 sizeof(struct ipr_ioasa_gata
));
5253 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
5257 * ipr_sata_reset - Reset the SATA port
5258 * @link: SATA link to reset
5259 * @classes: class of the attached device
5261 * This function issues a SATA phy reset to the affected ATA link.
5264 * 0 on success / non-zero on failure
5266 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5267 unsigned long deadline
)
5269 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5270 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5271 struct ipr_resource_entry
*res
;
5272 unsigned long lock_flags
= 0;
5273 int rc
= -ENXIO
, ret
;
5276 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5277 while (ioa_cfg
->in_reset_reload
) {
5278 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5279 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5280 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5283 res
= sata_port
->res
;
5285 rc
= ipr_device_reset(ioa_cfg
, res
);
5286 *classes
= res
->ata_class
;
5287 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5289 ret
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5290 if (ret
!= SUCCESS
) {
5291 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5292 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5293 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5295 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5298 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5305 * ipr_eh_dev_reset - Reset the device
5306 * @scsi_cmd: scsi command struct
5308 * This function issues a device reset to the affected device.
5309 * A LUN reset will be sent to the device first. If that does
5310 * not work, a target reset will be sent.
5315 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5317 struct ipr_cmnd
*ipr_cmd
;
5318 struct ipr_ioa_cfg
*ioa_cfg
;
5319 struct ipr_resource_entry
*res
;
5320 struct ata_port
*ap
;
5322 struct ipr_hrr_queue
*hrrq
;
5325 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5326 res
= scsi_cmd
->device
->hostdata
;
5329 * If we are currently going through reset/reload, return failed. This will force the
5330 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5333 if (ioa_cfg
->in_reset_reload
)
5335 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5338 for_each_hrrq(hrrq
, ioa_cfg
) {
5339 spin_lock(&hrrq
->_lock
);
5340 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5341 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5343 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5346 if (ipr_cmnd_is_free(ipr_cmd
))
5349 ipr_cmd
->done
= ipr_sata_eh_done
;
5350 if (!(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5351 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5352 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5356 spin_unlock(&hrrq
->_lock
);
5358 res
->resetting_device
= 1;
5359 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5361 if (ipr_is_gata(res
) && res
->sata_port
) {
5362 ap
= res
->sata_port
->ap
;
5363 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5364 ata_std_error_handler(ap
);
5365 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5367 rc
= ipr_device_reset(ioa_cfg
, res
);
5368 res
->resetting_device
= 0;
5369 res
->reset_occurred
= 1;
5372 return rc
? FAILED
: SUCCESS
;
5375 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5378 struct ipr_ioa_cfg
*ioa_cfg
;
5379 struct ipr_resource_entry
*res
;
5381 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5382 res
= cmd
->device
->hostdata
;
5387 spin_lock_irq(cmd
->device
->host
->host_lock
);
5388 rc
= __ipr_eh_dev_reset(cmd
);
5389 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5391 if (rc
== SUCCESS
) {
5392 if (ipr_is_gata(res
) && res
->sata_port
)
5393 rc
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5395 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5402 * ipr_bus_reset_done - Op done function for bus reset.
5403 * @ipr_cmd: ipr command struct
5405 * This function is the op done function for a bus reset
5410 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5412 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5413 struct ipr_resource_entry
*res
;
5416 if (!ioa_cfg
->sis64
)
5417 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5418 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5419 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5425 * If abort has not completed, indicate the reset has, else call the
5426 * abort's done function to wake the sleeping eh thread
5428 if (ipr_cmd
->sibling
->sibling
)
5429 ipr_cmd
->sibling
->sibling
= NULL
;
5431 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5433 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5438 * ipr_abort_timeout - An abort task has timed out
5439 * @ipr_cmd: ipr command struct
5441 * This function handles when an abort task times out. If this
5442 * happens we issue a bus reset since we have resources tied
5443 * up that must be freed before returning to the midlayer.
5448 static void ipr_abort_timeout(struct timer_list
*t
)
5450 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
5451 struct ipr_cmnd
*reset_cmd
;
5452 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5453 struct ipr_cmd_pkt
*cmd_pkt
;
5454 unsigned long lock_flags
= 0;
5457 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5458 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5459 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5463 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5464 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5465 ipr_cmd
->sibling
= reset_cmd
;
5466 reset_cmd
->sibling
= ipr_cmd
;
5467 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5468 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5469 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5470 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5471 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5473 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5474 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5479 * ipr_cancel_op - Cancel specified op
5480 * @scsi_cmd: scsi command struct
5482 * This function cancels specified op.
5487 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5489 struct ipr_cmnd
*ipr_cmd
;
5490 struct ipr_ioa_cfg
*ioa_cfg
;
5491 struct ipr_resource_entry
*res
;
5492 struct ipr_cmd_pkt
*cmd_pkt
;
5494 int i
, op_found
= 0;
5495 struct ipr_hrr_queue
*hrrq
;
5498 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5499 res
= scsi_cmd
->device
->hostdata
;
5501 /* If we are currently going through reset/reload, return failed.
5502 * This will force the mid-layer to call ipr_eh_host_reset,
5503 * which will then go to sleep and wait for the reset to complete
5505 if (ioa_cfg
->in_reset_reload
||
5506 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5512 * If we are aborting a timed out op, chances are that the timeout was caused
5513 * by a still not detected EEH error. In such cases, reading a register will
5514 * trigger the EEH recovery infrastructure.
5516 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5518 if (!ipr_is_gscsi(res
))
5521 for_each_hrrq(hrrq
, ioa_cfg
) {
5522 spin_lock(&hrrq
->_lock
);
5523 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5524 if (ioa_cfg
->ipr_cmnd_list
[i
]->scsi_cmd
== scsi_cmd
) {
5525 if (!ipr_cmnd_is_free(ioa_cfg
->ipr_cmnd_list
[i
])) {
5531 spin_unlock(&hrrq
->_lock
);
5537 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5538 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5539 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5540 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5541 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5542 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5544 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5546 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5547 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5550 * If the abort task timed out and we sent a bus reset, we will get
5551 * one the following responses to the abort
5553 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5558 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5559 if (!ipr_is_naca_model(res
))
5560 res
->needs_sync_complete
= 1;
5563 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5567 * ipr_eh_abort - Abort a single op
5568 * @scsi_cmd: scsi command struct
5571 * 0 if scan in progress / 1 if scan is complete
5573 static int ipr_scan_finished(struct Scsi_Host
*shost
, unsigned long elapsed_time
)
5575 unsigned long lock_flags
;
5576 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
5579 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
5580 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
|| ioa_cfg
->scan_done
)
5582 if ((elapsed_time
/HZ
) > (ioa_cfg
->transop_timeout
* 2))
5584 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
5589 * ipr_eh_host_reset - Reset the host adapter
5590 * @scsi_cmd: scsi command struct
5595 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5597 unsigned long flags
;
5599 struct ipr_ioa_cfg
*ioa_cfg
;
5603 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5605 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5606 rc
= ipr_cancel_op(scsi_cmd
);
5607 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5610 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5616 * ipr_handle_other_interrupt - Handle "other" interrupts
5617 * @ioa_cfg: ioa config struct
5618 * @int_reg: interrupt register
5621 * IRQ_NONE / IRQ_HANDLED
5623 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5626 irqreturn_t rc
= IRQ_HANDLED
;
5629 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5630 int_reg
&= ~int_mask_reg
;
5632 /* If an interrupt on the adapter did not occur, ignore it.
5633 * Or in the case of SIS 64, check for a stage change interrupt.
5635 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5636 if (ioa_cfg
->sis64
) {
5637 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5638 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5639 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5641 /* clear stage change */
5642 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5643 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5644 list_del(&ioa_cfg
->reset_cmd
->queue
);
5645 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5646 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5654 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5655 /* Mask the interrupt */
5656 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5657 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5659 list_del(&ioa_cfg
->reset_cmd
->queue
);
5660 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5661 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5662 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5663 if (ioa_cfg
->clear_isr
) {
5664 if (ipr_debug
&& printk_ratelimit())
5665 dev_err(&ioa_cfg
->pdev
->dev
,
5666 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5667 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5668 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5672 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5673 ioa_cfg
->ioa_unit_checked
= 1;
5674 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5675 dev_err(&ioa_cfg
->pdev
->dev
,
5676 "No Host RRQ. 0x%08X\n", int_reg
);
5678 dev_err(&ioa_cfg
->pdev
->dev
,
5679 "Permanent IOA failure. 0x%08X\n", int_reg
);
5681 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5682 ioa_cfg
->sdt_state
= GET_DUMP
;
5684 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5685 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5692 * ipr_isr_eh - Interrupt service routine error handler
5693 * @ioa_cfg: ioa config struct
5694 * @msg: message to log
5699 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5701 ioa_cfg
->errors_logged
++;
5702 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5704 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5705 ioa_cfg
->sdt_state
= GET_DUMP
;
5707 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5710 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5711 struct list_head
*doneq
)
5715 struct ipr_cmnd
*ipr_cmd
;
5716 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5719 /* If interrupts are disabled, ignore the interrupt */
5720 if (!hrr_queue
->allow_interrupts
)
5723 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5724 hrr_queue
->toggle_bit
) {
5726 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5727 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5728 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5730 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5731 cmd_index
< hrr_queue
->min_cmd_id
)) {
5733 "Invalid response handle from IOA: ",
5738 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5739 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5741 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5743 list_move_tail(&ipr_cmd
->queue
, doneq
);
5745 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5746 hrr_queue
->hrrq_curr
++;
5748 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5749 hrr_queue
->toggle_bit
^= 1u;
5752 if (budget
> 0 && num_hrrq
>= budget
)
5759 static int ipr_iopoll(struct irq_poll
*iop
, int budget
)
5761 struct ipr_ioa_cfg
*ioa_cfg
;
5762 struct ipr_hrr_queue
*hrrq
;
5763 struct ipr_cmnd
*ipr_cmd
, *temp
;
5764 unsigned long hrrq_flags
;
5768 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5769 ioa_cfg
= hrrq
->ioa_cfg
;
5771 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5772 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5774 if (completed_ops
< budget
)
5775 irq_poll_complete(iop
);
5776 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5778 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5779 list_del(&ipr_cmd
->queue
);
5780 del_timer(&ipr_cmd
->timer
);
5781 ipr_cmd
->fast_done(ipr_cmd
);
5784 return completed_ops
;
5788 * ipr_isr - Interrupt service routine
5790 * @devp: pointer to ioa config struct
5793 * IRQ_NONE / IRQ_HANDLED
5795 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5797 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5798 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5799 unsigned long hrrq_flags
= 0;
5803 struct ipr_cmnd
*ipr_cmd
, *temp
;
5804 irqreturn_t rc
= IRQ_NONE
;
5807 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5808 /* If interrupts are disabled, ignore the interrupt */
5809 if (!hrrq
->allow_interrupts
) {
5810 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5815 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5818 if (!ioa_cfg
->clear_isr
)
5821 /* Clear the PCI interrupt */
5824 writel(IPR_PCII_HRRQ_UPDATED
,
5825 ioa_cfg
->regs
.clr_interrupt_reg32
);
5826 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5827 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5828 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5830 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5831 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5833 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5834 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5836 "Error clearing HRRQ: ", num_hrrq
);
5843 if (unlikely(rc
== IRQ_NONE
))
5844 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5846 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5847 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5848 list_del(&ipr_cmd
->queue
);
5849 del_timer(&ipr_cmd
->timer
);
5850 ipr_cmd
->fast_done(ipr_cmd
);
5856 * ipr_isr_mhrrq - Interrupt service routine
5858 * @devp: pointer to ioa config struct
5861 * IRQ_NONE / IRQ_HANDLED
5863 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5865 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5866 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5867 unsigned long hrrq_flags
= 0;
5868 struct ipr_cmnd
*ipr_cmd
, *temp
;
5869 irqreturn_t rc
= IRQ_NONE
;
5872 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5874 /* If interrupts are disabled, ignore the interrupt */
5875 if (!hrrq
->allow_interrupts
) {
5876 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5880 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5881 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5883 irq_poll_sched(&hrrq
->iopoll
);
5884 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5888 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5891 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5895 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5897 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5898 list_del(&ipr_cmd
->queue
);
5899 del_timer(&ipr_cmd
->timer
);
5900 ipr_cmd
->fast_done(ipr_cmd
);
5906 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5907 * @ioa_cfg: ioa config struct
5908 * @ipr_cmd: ipr command struct
5911 * 0 on success / -1 on failure
5913 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5914 struct ipr_cmnd
*ipr_cmd
)
5917 struct scatterlist
*sg
;
5919 u32 ioadl_flags
= 0;
5920 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5921 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5922 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5924 length
= scsi_bufflen(scsi_cmd
);
5928 nseg
= scsi_dma_map(scsi_cmd
);
5930 if (printk_ratelimit())
5931 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5935 ipr_cmd
->dma_use_sg
= nseg
;
5937 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5939 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5941 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5942 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5943 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5944 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5945 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5947 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5948 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5949 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5950 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5953 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5958 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5959 * @ioa_cfg: ioa config struct
5960 * @ipr_cmd: ipr command struct
5963 * 0 on success / -1 on failure
5965 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5966 struct ipr_cmnd
*ipr_cmd
)
5969 struct scatterlist
*sg
;
5971 u32 ioadl_flags
= 0;
5972 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5973 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5974 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5976 length
= scsi_bufflen(scsi_cmd
);
5980 nseg
= scsi_dma_map(scsi_cmd
);
5982 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5986 ipr_cmd
->dma_use_sg
= nseg
;
5988 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5989 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5990 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5991 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5993 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5994 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5995 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5996 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5997 ioarcb
->read_ioadl_len
=
5998 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6001 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
6002 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
6003 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
6004 offsetof(struct ipr_ioarcb
, u
.add_data
));
6005 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6008 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
6009 ioadl
[i
].flags_and_data_len
=
6010 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6011 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
6014 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6019 * __ipr_erp_done - Process completion of ERP for a device
6020 * @ipr_cmd: ipr command struct
6022 * This function copies the sense buffer into the scsi_cmd
6023 * struct and pushes the scsi_done function.
6028 static void __ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6030 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6031 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6032 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6034 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6035 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6036 scmd_printk(KERN_ERR
, scsi_cmd
,
6037 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
6039 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
6040 SCSI_SENSE_BUFFERSIZE
);
6044 if (!ipr_is_naca_model(res
))
6045 res
->needs_sync_complete
= 1;
6048 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6049 scsi_cmd
->scsi_done(scsi_cmd
);
6050 if (ipr_cmd
->eh_comp
)
6051 complete(ipr_cmd
->eh_comp
);
6052 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6056 * ipr_erp_done - Process completion of ERP for a device
6057 * @ipr_cmd: ipr command struct
6059 * This function copies the sense buffer into the scsi_cmd
6060 * struct and pushes the scsi_done function.
6065 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6067 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6068 unsigned long hrrq_flags
;
6070 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6071 __ipr_erp_done(ipr_cmd
);
6072 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6076 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6077 * @ipr_cmd: ipr command struct
6082 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
6084 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6085 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6086 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6088 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
6089 ioarcb
->data_transfer_length
= 0;
6090 ioarcb
->read_data_transfer_length
= 0;
6091 ioarcb
->ioadl_len
= 0;
6092 ioarcb
->read_ioadl_len
= 0;
6093 ioasa
->hdr
.ioasc
= 0;
6094 ioasa
->hdr
.residual_data_len
= 0;
6096 if (ipr_cmd
->ioa_cfg
->sis64
)
6097 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6098 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
6100 ioarcb
->write_ioadl_addr
=
6101 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
6102 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6107 * __ipr_erp_request_sense - Send request sense to a device
6108 * @ipr_cmd: ipr command struct
6110 * This function sends a request sense to a device as a result
6111 * of a check condition.
6116 static void __ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6118 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6119 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6121 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6122 __ipr_erp_done(ipr_cmd
);
6126 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6128 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
6129 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
6130 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
6131 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
6132 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6133 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
6135 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
6136 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
6138 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
6139 IPR_REQUEST_SENSE_TIMEOUT
* 2);
6143 * ipr_erp_request_sense - Send request sense to a device
6144 * @ipr_cmd: ipr command struct
6146 * This function sends a request sense to a device as a result
6147 * of a check condition.
6152 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6154 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6155 unsigned long hrrq_flags
;
6157 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6158 __ipr_erp_request_sense(ipr_cmd
);
6159 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6163 * ipr_erp_cancel_all - Send cancel all to a device
6164 * @ipr_cmd: ipr command struct
6166 * This function sends a cancel all to a device to clear the
6167 * queue. If we are running TCQ on the device, QERR is set to 1,
6168 * which means all outstanding ops have been dropped on the floor.
6169 * Cancel all will return them to us.
6174 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
6176 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6177 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6178 struct ipr_cmd_pkt
*cmd_pkt
;
6182 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6184 if (!scsi_cmd
->device
->simple_tags
) {
6185 __ipr_erp_request_sense(ipr_cmd
);
6189 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6190 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
6191 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
6193 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
6194 IPR_CANCEL_ALL_TIMEOUT
);
6198 * ipr_dump_ioasa - Dump contents of IOASA
6199 * @ioa_cfg: ioa config struct
6200 * @ipr_cmd: ipr command struct
6201 * @res: resource entry struct
6203 * This function is invoked by the interrupt handler when ops
6204 * fail. It will log the IOASA if appropriate. Only called
6210 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
6211 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
6215 u32 ioasc
, fd_ioasc
;
6216 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6217 __be32
*ioasa_data
= (__be32
*)ioasa
;
6220 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
6221 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
6226 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
6229 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
6230 error_index
= ipr_get_error(fd_ioasc
);
6232 error_index
= ipr_get_error(ioasc
);
6234 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
6235 /* Don't log an error if the IOA already logged one */
6236 if (ioasa
->hdr
.ilid
!= 0)
6239 if (!ipr_is_gscsi(res
))
6242 if (ipr_error_table
[error_index
].log_ioasa
== 0)
6246 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
6248 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
6249 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
6250 data_len
= sizeof(struct ipr_ioasa64
);
6251 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
6252 data_len
= sizeof(struct ipr_ioasa
);
6254 ipr_err("IOASA Dump:\n");
6256 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
6257 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
6258 be32_to_cpu(ioasa_data
[i
]),
6259 be32_to_cpu(ioasa_data
[i
+1]),
6260 be32_to_cpu(ioasa_data
[i
+2]),
6261 be32_to_cpu(ioasa_data
[i
+3]));
6266 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6268 * @sense_buf: sense data buffer
6273 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
6276 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
6277 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
6278 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6279 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
6281 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
6283 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
6286 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
6288 if (ipr_is_vset_device(res
) &&
6289 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
6290 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
6291 sense_buf
[0] = 0x72;
6292 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
6293 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
6294 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
6298 sense_buf
[9] = 0x0A;
6299 sense_buf
[10] = 0x80;
6301 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6303 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6304 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6305 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6306 sense_buf
[15] = failing_lba
& 0x000000ff;
6308 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6310 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6311 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6312 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6313 sense_buf
[19] = failing_lba
& 0x000000ff;
6315 sense_buf
[0] = 0x70;
6316 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6317 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6318 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6320 /* Illegal request */
6321 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6322 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6323 sense_buf
[7] = 10; /* additional length */
6325 /* IOARCB was in error */
6326 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6327 sense_buf
[15] = 0xC0;
6328 else /* Parameter data was invalid */
6329 sense_buf
[15] = 0x80;
6332 ((IPR_FIELD_POINTER_MASK
&
6333 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6335 (IPR_FIELD_POINTER_MASK
&
6336 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6338 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6339 if (ipr_is_vset_device(res
))
6340 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6342 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6344 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6345 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6346 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6347 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6348 sense_buf
[6] = failing_lba
& 0x000000ff;
6351 sense_buf
[7] = 6; /* additional length */
6357 * ipr_get_autosense - Copy autosense data to sense buffer
6358 * @ipr_cmd: ipr command struct
6360 * This function copies the autosense buffer to the buffer
6361 * in the scsi_cmd, if there is autosense available.
6364 * 1 if autosense was available / 0 if not
6366 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6368 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6369 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6371 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6374 if (ipr_cmd
->ioa_cfg
->sis64
)
6375 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6376 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6377 SCSI_SENSE_BUFFERSIZE
));
6379 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6380 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6381 SCSI_SENSE_BUFFERSIZE
));
6386 * ipr_erp_start - Process an error response for a SCSI op
6387 * @ioa_cfg: ioa config struct
6388 * @ipr_cmd: ipr command struct
6390 * This function determines whether or not to initiate ERP
6391 * on the affected device.
6396 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6397 struct ipr_cmnd
*ipr_cmd
)
6399 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6400 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6401 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6402 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6405 __ipr_scsi_eh_done(ipr_cmd
);
6409 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6410 ipr_gen_sense(ipr_cmd
);
6412 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6414 switch (masked_ioasc
) {
6415 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6416 if (ipr_is_naca_model(res
))
6417 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6419 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6421 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6422 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6423 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6425 case IPR_IOASC_HW_SEL_TIMEOUT
:
6426 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6427 if (!ipr_is_naca_model(res
))
6428 res
->needs_sync_complete
= 1;
6430 case IPR_IOASC_SYNC_REQUIRED
:
6432 res
->needs_sync_complete
= 1;
6433 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6435 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6436 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6438 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6439 * so SCSI mid-layer and upper layers handle it accordingly.
6441 if (scsi_cmd
->result
!= SAM_STAT_CHECK_CONDITION
)
6442 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6444 case IPR_IOASC_BUS_WAS_RESET
:
6445 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6447 * Report the bus reset and ask for a retry. The device
6448 * will give CC/UA the next command.
6450 if (!res
->resetting_device
)
6451 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6452 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6453 if (!ipr_is_naca_model(res
))
6454 res
->needs_sync_complete
= 1;
6456 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6457 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6458 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6459 if (!ipr_get_autosense(ipr_cmd
)) {
6460 if (!ipr_is_naca_model(res
)) {
6461 ipr_erp_cancel_all(ipr_cmd
);
6466 if (!ipr_is_naca_model(res
))
6467 res
->needs_sync_complete
= 1;
6469 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6471 case IPR_IOASC_IR_NON_OPTIMIZED
:
6472 if (res
->raw_mode
) {
6474 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6476 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6479 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6480 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6481 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6482 res
->needs_sync_complete
= 1;
6486 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6487 scsi_cmd
->scsi_done(scsi_cmd
);
6488 if (ipr_cmd
->eh_comp
)
6489 complete(ipr_cmd
->eh_comp
);
6490 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6494 * ipr_scsi_done - mid-layer done function
6495 * @ipr_cmd: ipr command struct
6497 * This function is invoked by the interrupt handler for
6498 * ops generated by the SCSI mid-layer
6503 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6505 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6506 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6507 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6508 unsigned long lock_flags
;
6510 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6512 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6513 scsi_dma_unmap(scsi_cmd
);
6515 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6516 scsi_cmd
->scsi_done(scsi_cmd
);
6517 if (ipr_cmd
->eh_comp
)
6518 complete(ipr_cmd
->eh_comp
);
6519 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6520 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6522 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6523 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6524 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6525 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6526 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6531 * ipr_queuecommand - Queue a mid-layer request
6532 * @shost: scsi host struct
6533 * @scsi_cmd: scsi command struct
6535 * This function queues a request generated by the mid-layer.
6539 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6540 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6542 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6543 struct scsi_cmnd
*scsi_cmd
)
6545 struct ipr_ioa_cfg
*ioa_cfg
;
6546 struct ipr_resource_entry
*res
;
6547 struct ipr_ioarcb
*ioarcb
;
6548 struct ipr_cmnd
*ipr_cmd
;
6549 unsigned long hrrq_flags
, lock_flags
;
6551 struct ipr_hrr_queue
*hrrq
;
6554 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6556 scsi_cmd
->result
= (DID_OK
<< 16);
6557 res
= scsi_cmd
->device
->hostdata
;
6559 if (ipr_is_gata(res
) && res
->sata_port
) {
6560 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6561 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6562 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6566 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6567 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6569 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6571 * We are currently blocking all devices due to a host reset
6572 * We have told the host to stop giving us new requests, but
6573 * ERP ops don't count. FIXME
6575 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6576 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6577 return SCSI_MLQUEUE_HOST_BUSY
;
6581 * FIXME - Create scsi_set_host_offline interface
6582 * and the ioa_is_dead check can be removed
6584 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6585 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6589 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6590 if (ipr_cmd
== NULL
) {
6591 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6592 return SCSI_MLQUEUE_HOST_BUSY
;
6594 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6596 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6597 ioarcb
= &ipr_cmd
->ioarcb
;
6599 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6600 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6601 ipr_cmd
->done
= ipr_scsi_eh_done
;
6603 if (ipr_is_gscsi(res
)) {
6604 if (scsi_cmd
->underflow
== 0)
6605 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6607 if (res
->reset_occurred
) {
6608 res
->reset_occurred
= 0;
6609 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6613 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6614 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6616 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6617 if (scsi_cmd
->flags
& SCMD_TAGGED
)
6618 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_SIMPLE_TASK
;
6620 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_UNTAGGED_TASK
;
6623 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6624 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6625 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6627 if (res
->raw_mode
&& ipr_is_af_dasd_device(res
)) {
6628 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_PIPE
;
6630 if (scsi_cmd
->underflow
== 0)
6631 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6635 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6637 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6639 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6640 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6641 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6642 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6644 scsi_dma_unmap(scsi_cmd
);
6645 return SCSI_MLQUEUE_HOST_BUSY
;
6648 if (unlikely(hrrq
->ioa_is_dead
)) {
6649 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6650 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6651 scsi_dma_unmap(scsi_cmd
);
6655 ioarcb
->res_handle
= res
->res_handle
;
6656 if (res
->needs_sync_complete
) {
6657 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6658 res
->needs_sync_complete
= 0;
6660 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6661 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6662 ipr_send_command(ipr_cmd
);
6663 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6667 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6668 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6669 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6670 scsi_cmd
->scsi_done(scsi_cmd
);
6671 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6676 * ipr_ioctl - IOCTL handler
6677 * @sdev: scsi device struct
6682 * 0 on success / other on failure
6684 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6686 struct ipr_resource_entry
*res
;
6688 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6689 if (res
&& ipr_is_gata(res
)) {
6690 if (cmd
== HDIO_GET_IDENTITY
)
6692 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6699 * ipr_info - Get information about the card/driver
6700 * @scsi_host: scsi host struct
6703 * pointer to buffer with description string
6705 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6707 static char buffer
[512];
6708 struct ipr_ioa_cfg
*ioa_cfg
;
6709 unsigned long lock_flags
= 0;
6711 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6713 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6714 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6715 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6720 static struct scsi_host_template driver_template
= {
6721 .module
= THIS_MODULE
,
6723 .info
= ipr_ioa_info
,
6725 .queuecommand
= ipr_queuecommand
,
6726 .eh_abort_handler
= ipr_eh_abort
,
6727 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6728 .eh_host_reset_handler
= ipr_eh_host_reset
,
6729 .slave_alloc
= ipr_slave_alloc
,
6730 .slave_configure
= ipr_slave_configure
,
6731 .slave_destroy
= ipr_slave_destroy
,
6732 .scan_finished
= ipr_scan_finished
,
6733 .target_alloc
= ipr_target_alloc
,
6734 .target_destroy
= ipr_target_destroy
,
6735 .change_queue_depth
= ipr_change_queue_depth
,
6736 .bios_param
= ipr_biosparam
,
6737 .can_queue
= IPR_MAX_COMMANDS
,
6739 .sg_tablesize
= IPR_MAX_SGLIST
,
6740 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6741 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6742 .use_clustering
= ENABLE_CLUSTERING
,
6743 .shost_attrs
= ipr_ioa_attrs
,
6744 .sdev_attrs
= ipr_dev_attrs
,
6745 .proc_name
= IPR_NAME
,
6749 * ipr_ata_phy_reset - libata phy_reset handler
6750 * @ap: ata port to reset
6753 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6755 unsigned long flags
;
6756 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6757 struct ipr_resource_entry
*res
= sata_port
->res
;
6758 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6762 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6763 while (ioa_cfg
->in_reset_reload
) {
6764 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6765 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6766 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6769 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6772 rc
= ipr_device_reset(ioa_cfg
, res
);
6775 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6779 ap
->link
.device
[0].class = res
->ata_class
;
6780 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6781 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6784 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6789 * ipr_ata_post_internal - Cleanup after an internal command
6790 * @qc: ATA queued command
6795 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6797 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6798 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6799 struct ipr_cmnd
*ipr_cmd
;
6800 struct ipr_hrr_queue
*hrrq
;
6801 unsigned long flags
;
6803 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6804 while (ioa_cfg
->in_reset_reload
) {
6805 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6806 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6807 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6810 for_each_hrrq(hrrq
, ioa_cfg
) {
6811 spin_lock(&hrrq
->_lock
);
6812 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6813 if (ipr_cmd
->qc
== qc
) {
6814 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6818 spin_unlock(&hrrq
->_lock
);
6820 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6824 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6825 * @regs: destination
6826 * @tf: source ATA taskfile
6831 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6832 struct ata_taskfile
*tf
)
6834 regs
->feature
= tf
->feature
;
6835 regs
->nsect
= tf
->nsect
;
6836 regs
->lbal
= tf
->lbal
;
6837 regs
->lbam
= tf
->lbam
;
6838 regs
->lbah
= tf
->lbah
;
6839 regs
->device
= tf
->device
;
6840 regs
->command
= tf
->command
;
6841 regs
->hob_feature
= tf
->hob_feature
;
6842 regs
->hob_nsect
= tf
->hob_nsect
;
6843 regs
->hob_lbal
= tf
->hob_lbal
;
6844 regs
->hob_lbam
= tf
->hob_lbam
;
6845 regs
->hob_lbah
= tf
->hob_lbah
;
6846 regs
->ctl
= tf
->ctl
;
6850 * ipr_sata_done - done function for SATA commands
6851 * @ipr_cmd: ipr command struct
6853 * This function is invoked by the interrupt handler for
6854 * ops generated by the SCSI mid-layer to SATA devices
6859 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6861 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6862 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6863 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6864 struct ipr_resource_entry
*res
= sata_port
->res
;
6865 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6867 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6868 if (ipr_cmd
->ioa_cfg
->sis64
)
6869 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6870 sizeof(struct ipr_ioasa_gata
));
6872 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6873 sizeof(struct ipr_ioasa_gata
));
6874 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6876 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6877 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6879 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6880 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6882 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6883 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6884 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6885 ata_qc_complete(qc
);
6889 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6890 * @ipr_cmd: ipr command struct
6891 * @qc: ATA queued command
6894 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6895 struct ata_queued_cmd
*qc
)
6897 u32 ioadl_flags
= 0;
6898 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6899 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6900 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6901 int len
= qc
->nbytes
;
6902 struct scatterlist
*sg
;
6904 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6909 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6910 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6911 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6912 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6913 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6915 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6917 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6918 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6919 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6921 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6922 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6923 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6924 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6926 last_ioadl64
= ioadl64
;
6930 if (likely(last_ioadl64
))
6931 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6935 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6936 * @ipr_cmd: ipr command struct
6937 * @qc: ATA queued command
6940 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6941 struct ata_queued_cmd
*qc
)
6943 u32 ioadl_flags
= 0;
6944 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6945 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6946 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6947 int len
= qc
->nbytes
;
6948 struct scatterlist
*sg
;
6954 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6955 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6956 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6957 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6959 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6960 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6961 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6962 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6963 ioarcb
->read_ioadl_len
=
6964 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6967 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6968 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6969 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6975 if (likely(last_ioadl
))
6976 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6980 * ipr_qc_defer - Get a free ipr_cmd
6981 * @qc: queued command
6986 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6988 struct ata_port
*ap
= qc
->ap
;
6989 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6990 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6991 struct ipr_cmnd
*ipr_cmd
;
6992 struct ipr_hrr_queue
*hrrq
;
6995 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6996 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6998 qc
->lldd_task
= NULL
;
6999 spin_lock(&hrrq
->_lock
);
7000 if (unlikely(hrrq
->ioa_is_dead
)) {
7001 spin_unlock(&hrrq
->_lock
);
7005 if (unlikely(!hrrq
->allow_cmds
)) {
7006 spin_unlock(&hrrq
->_lock
);
7007 return ATA_DEFER_LINK
;
7010 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
7011 if (ipr_cmd
== NULL
) {
7012 spin_unlock(&hrrq
->_lock
);
7013 return ATA_DEFER_LINK
;
7016 qc
->lldd_task
= ipr_cmd
;
7017 spin_unlock(&hrrq
->_lock
);
7022 * ipr_qc_issue - Issue a SATA qc to a device
7023 * @qc: queued command
7028 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
7030 struct ata_port
*ap
= qc
->ap
;
7031 struct ipr_sata_port
*sata_port
= ap
->private_data
;
7032 struct ipr_resource_entry
*res
= sata_port
->res
;
7033 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
7034 struct ipr_cmnd
*ipr_cmd
;
7035 struct ipr_ioarcb
*ioarcb
;
7036 struct ipr_ioarcb_ata_regs
*regs
;
7038 if (qc
->lldd_task
== NULL
)
7041 ipr_cmd
= qc
->lldd_task
;
7042 if (ipr_cmd
== NULL
)
7043 return AC_ERR_SYSTEM
;
7045 qc
->lldd_task
= NULL
;
7046 spin_lock(&ipr_cmd
->hrrq
->_lock
);
7047 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
7048 ipr_cmd
->hrrq
->ioa_is_dead
)) {
7049 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7050 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7051 return AC_ERR_SYSTEM
;
7054 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
7055 ioarcb
= &ipr_cmd
->ioarcb
;
7057 if (ioa_cfg
->sis64
) {
7058 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
7059 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
7061 regs
= &ioarcb
->u
.add_data
.u
.regs
;
7063 memset(regs
, 0, sizeof(*regs
));
7064 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
7066 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7068 ipr_cmd
->done
= ipr_sata_done
;
7069 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
7070 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
7071 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
7072 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
7073 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
7076 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
7078 ipr_build_ata_ioadl(ipr_cmd
, qc
);
7080 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
7081 ipr_copy_sata_tf(regs
, &qc
->tf
);
7082 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
7083 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
7085 switch (qc
->tf
.protocol
) {
7086 case ATA_PROT_NODATA
:
7091 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7094 case ATAPI_PROT_PIO
:
7095 case ATAPI_PROT_NODATA
:
7096 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7099 case ATAPI_PROT_DMA
:
7100 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7101 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7106 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7107 return AC_ERR_INVALID
;
7110 ipr_send_command(ipr_cmd
);
7111 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7117 * ipr_qc_fill_rtf - Read result TF
7118 * @qc: ATA queued command
7123 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
7125 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
7126 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
7127 struct ata_taskfile
*tf
= &qc
->result_tf
;
7129 tf
->feature
= g
->error
;
7130 tf
->nsect
= g
->nsect
;
7134 tf
->device
= g
->device
;
7135 tf
->command
= g
->status
;
7136 tf
->hob_nsect
= g
->hob_nsect
;
7137 tf
->hob_lbal
= g
->hob_lbal
;
7138 tf
->hob_lbam
= g
->hob_lbam
;
7139 tf
->hob_lbah
= g
->hob_lbah
;
7144 static struct ata_port_operations ipr_sata_ops
= {
7145 .phy_reset
= ipr_ata_phy_reset
,
7146 .hardreset
= ipr_sata_reset
,
7147 .post_internal_cmd
= ipr_ata_post_internal
,
7148 .qc_prep
= ata_noop_qc_prep
,
7149 .qc_defer
= ipr_qc_defer
,
7150 .qc_issue
= ipr_qc_issue
,
7151 .qc_fill_rtf
= ipr_qc_fill_rtf
,
7152 .port_start
= ata_sas_port_start
,
7153 .port_stop
= ata_sas_port_stop
7156 static struct ata_port_info sata_port_info
= {
7157 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
7159 .pio_mask
= ATA_PIO4_ONLY
,
7160 .mwdma_mask
= ATA_MWDMA2
,
7161 .udma_mask
= ATA_UDMA6
,
7162 .port_ops
= &ipr_sata_ops
7165 #ifdef CONFIG_PPC_PSERIES
7166 static const u16 ipr_blocked_processors
[] = {
7178 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7179 * @ioa_cfg: ioa cfg struct
7181 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7182 * certain pSeries hardware. This function determines if the given
7183 * adapter is in one of these confgurations or not.
7186 * 1 if adapter is not supported / 0 if adapter is supported
7188 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
7192 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
7193 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
7194 if (pvr_version_is(ipr_blocked_processors
[i
]))
7201 #define ipr_invalid_adapter(ioa_cfg) 0
7205 * ipr_ioa_bringdown_done - IOA bring down completion.
7206 * @ipr_cmd: ipr command struct
7208 * This function processes the completion of an adapter bring down.
7209 * It wakes any reset sleepers.
7214 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
7216 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7220 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
7222 ioa_cfg
->scsi_unblock
= 1;
7223 schedule_work(&ioa_cfg
->work_q
);
7226 ioa_cfg
->in_reset_reload
= 0;
7227 ioa_cfg
->reset_retries
= 0;
7228 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7229 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7230 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
7231 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7235 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7236 wake_up_all(&ioa_cfg
->reset_wait_q
);
7239 return IPR_RC_JOB_RETURN
;
7243 * ipr_ioa_reset_done - IOA reset completion.
7244 * @ipr_cmd: ipr command struct
7246 * This function processes the completion of an adapter reset.
7247 * It schedules any necessary mid-layer add/removes and
7248 * wakes any reset sleepers.
7253 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
7255 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7256 struct ipr_resource_entry
*res
;
7260 ioa_cfg
->in_reset_reload
= 0;
7261 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
7262 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
7263 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
7264 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
7267 ioa_cfg
->reset_cmd
= NULL
;
7268 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
7270 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
7271 if (res
->add_to_ml
|| res
->del_from_ml
) {
7276 schedule_work(&ioa_cfg
->work_q
);
7278 for (j
= 0; j
< IPR_NUM_HCAMS
; j
++) {
7279 list_del_init(&ioa_cfg
->hostrcb
[j
]->queue
);
7280 if (j
< IPR_NUM_LOG_HCAMS
)
7281 ipr_send_hcam(ioa_cfg
,
7282 IPR_HCAM_CDB_OP_CODE_LOG_DATA
,
7283 ioa_cfg
->hostrcb
[j
]);
7285 ipr_send_hcam(ioa_cfg
,
7286 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
7287 ioa_cfg
->hostrcb
[j
]);
7290 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
7291 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
7293 ioa_cfg
->reset_retries
= 0;
7294 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7295 wake_up_all(&ioa_cfg
->reset_wait_q
);
7297 ioa_cfg
->scsi_unblock
= 1;
7298 schedule_work(&ioa_cfg
->work_q
);
7300 return IPR_RC_JOB_RETURN
;
7304 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7305 * @supported_dev: supported device struct
7306 * @vpids: vendor product id struct
7311 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
7312 struct ipr_std_inq_vpids
*vpids
)
7314 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
7315 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
7316 supported_dev
->num_records
= 1;
7317 supported_dev
->data_length
=
7318 cpu_to_be16(sizeof(struct ipr_supported_device
));
7319 supported_dev
->reserved
= 0;
7323 * ipr_set_supported_devs - Send Set Supported Devices for a device
7324 * @ipr_cmd: ipr command struct
7326 * This function sends a Set Supported Devices to the adapter
7329 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7331 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7333 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7334 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7335 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7336 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7338 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7340 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7341 if (!ipr_is_scsi_disk(res
))
7344 ipr_cmd
->u
.res
= res
;
7345 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7347 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7348 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7349 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7351 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7352 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7353 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7354 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7356 ipr_init_ioadl(ipr_cmd
,
7357 ioa_cfg
->vpd_cbs_dma
+
7358 offsetof(struct ipr_misc_cbs
, supp_dev
),
7359 sizeof(struct ipr_supported_device
),
7360 IPR_IOADL_FLAGS_WRITE_LAST
);
7362 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7363 IPR_SET_SUP_DEVICE_TIMEOUT
);
7365 if (!ioa_cfg
->sis64
)
7366 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7368 return IPR_RC_JOB_RETURN
;
7372 return IPR_RC_JOB_CONTINUE
;
7376 * ipr_get_mode_page - Locate specified mode page
7377 * @mode_pages: mode page buffer
7378 * @page_code: page code to find
7379 * @len: minimum required length for mode page
7382 * pointer to mode page / NULL on failure
7384 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7385 u32 page_code
, u32 len
)
7387 struct ipr_mode_page_hdr
*mode_hdr
;
7391 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7394 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7395 mode_hdr
= (struct ipr_mode_page_hdr
*)
7396 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7399 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7400 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7404 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7405 mode_hdr
->page_length
);
7406 length
-= page_length
;
7407 mode_hdr
= (struct ipr_mode_page_hdr
*)
7408 ((unsigned long)mode_hdr
+ page_length
);
7415 * ipr_check_term_power - Check for term power errors
7416 * @ioa_cfg: ioa config struct
7417 * @mode_pages: IOAFP mode pages buffer
7419 * Check the IOAFP's mode page 28 for term power errors
7424 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7425 struct ipr_mode_pages
*mode_pages
)
7429 struct ipr_dev_bus_entry
*bus
;
7430 struct ipr_mode_page28
*mode_page
;
7432 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7433 sizeof(struct ipr_mode_page28
));
7435 entry_length
= mode_page
->entry_length
;
7437 bus
= mode_page
->bus
;
7439 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7440 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7441 dev_err(&ioa_cfg
->pdev
->dev
,
7442 "Term power is absent on scsi bus %d\n",
7446 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7451 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7452 * @ioa_cfg: ioa config struct
7454 * Looks through the config table checking for SES devices. If
7455 * the SES device is in the SES table indicating a maximum SCSI
7456 * bus speed, the speed is limited for the bus.
7461 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7466 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7467 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7468 ioa_cfg
->bus_attr
[i
].bus_width
);
7470 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7471 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7476 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7477 * @ioa_cfg: ioa config struct
7478 * @mode_pages: mode page 28 buffer
7480 * Updates mode page 28 based on driver configuration
7485 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7486 struct ipr_mode_pages
*mode_pages
)
7488 int i
, entry_length
;
7489 struct ipr_dev_bus_entry
*bus
;
7490 struct ipr_bus_attributes
*bus_attr
;
7491 struct ipr_mode_page28
*mode_page
;
7493 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7494 sizeof(struct ipr_mode_page28
));
7496 entry_length
= mode_page
->entry_length
;
7498 /* Loop for each device bus entry */
7499 for (i
= 0, bus
= mode_page
->bus
;
7500 i
< mode_page
->num_entries
;
7501 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7502 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7503 dev_err(&ioa_cfg
->pdev
->dev
,
7504 "Invalid resource address reported: 0x%08X\n",
7505 IPR_GET_PHYS_LOC(bus
->res_addr
));
7509 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7510 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7511 bus
->bus_width
= bus_attr
->bus_width
;
7512 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7513 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7514 if (bus_attr
->qas_enabled
)
7515 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7517 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7522 * ipr_build_mode_select - Build a mode select command
7523 * @ipr_cmd: ipr command struct
7524 * @res_handle: resource handle to send command to
7525 * @parm: Byte 2 of Mode Sense command
7526 * @dma_addr: DMA buffer address
7527 * @xfer_len: data transfer length
7532 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7533 __be32 res_handle
, u8 parm
,
7534 dma_addr_t dma_addr
, u8 xfer_len
)
7536 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7538 ioarcb
->res_handle
= res_handle
;
7539 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7540 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7541 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7542 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7543 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7545 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7549 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7550 * @ipr_cmd: ipr command struct
7552 * This function sets up the SCSI bus attributes and sends
7553 * a Mode Select for Page 28 to activate them.
7558 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7560 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7561 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7565 ipr_scsi_bus_speed_limit(ioa_cfg
);
7566 ipr_check_term_power(ioa_cfg
, mode_pages
);
7567 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7568 length
= mode_pages
->hdr
.length
+ 1;
7569 mode_pages
->hdr
.length
= 0;
7571 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7572 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7575 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7576 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7577 struct ipr_resource_entry
, queue
);
7578 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7581 return IPR_RC_JOB_RETURN
;
7585 * ipr_build_mode_sense - Builds a mode sense command
7586 * @ipr_cmd: ipr command struct
7587 * @res: resource entry struct
7588 * @parm: Byte 2 of mode sense command
7589 * @dma_addr: DMA address of mode sense buffer
7590 * @xfer_len: Size of DMA buffer
7595 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7597 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7599 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7601 ioarcb
->res_handle
= res_handle
;
7602 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7603 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7604 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7605 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7607 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7611 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7612 * @ipr_cmd: ipr command struct
7614 * This function handles the failure of an IOA bringup command.
7619 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7621 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7622 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7624 dev_err(&ioa_cfg
->pdev
->dev
,
7625 "0x%02X failed with IOASC: 0x%08X\n",
7626 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7628 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7629 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7630 return IPR_RC_JOB_RETURN
;
7634 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7635 * @ipr_cmd: ipr command struct
7637 * This function handles the failure of a Mode Sense to the IOAFP.
7638 * Some adapters do not handle all mode pages.
7641 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7643 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7645 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7646 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7648 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7649 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7650 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7651 struct ipr_resource_entry
, queue
);
7652 return IPR_RC_JOB_CONTINUE
;
7655 return ipr_reset_cmd_failed(ipr_cmd
);
7659 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7660 * @ipr_cmd: ipr command struct
7662 * This function send a Page 28 mode sense to the IOA to
7663 * retrieve SCSI bus attributes.
7668 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7670 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7673 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7674 0x28, ioa_cfg
->vpd_cbs_dma
+
7675 offsetof(struct ipr_misc_cbs
, mode_pages
),
7676 sizeof(struct ipr_mode_pages
));
7678 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7679 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7681 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7684 return IPR_RC_JOB_RETURN
;
7688 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7689 * @ipr_cmd: ipr command struct
7691 * This function enables dual IOA RAID support if possible.
7696 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7698 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7699 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7700 struct ipr_mode_page24
*mode_page
;
7704 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7705 sizeof(struct ipr_mode_page24
));
7708 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7710 length
= mode_pages
->hdr
.length
+ 1;
7711 mode_pages
->hdr
.length
= 0;
7713 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7714 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7717 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7718 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7721 return IPR_RC_JOB_RETURN
;
7725 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7726 * @ipr_cmd: ipr command struct
7728 * This function handles the failure of a Mode Sense to the IOAFP.
7729 * Some adapters do not handle all mode pages.
7732 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7734 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7736 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7738 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7739 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7740 return IPR_RC_JOB_CONTINUE
;
7743 return ipr_reset_cmd_failed(ipr_cmd
);
7747 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7748 * @ipr_cmd: ipr command struct
7750 * This function send a mode sense to the IOA to retrieve
7751 * the IOA Advanced Function Control mode page.
7756 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7758 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7761 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7762 0x24, ioa_cfg
->vpd_cbs_dma
+
7763 offsetof(struct ipr_misc_cbs
, mode_pages
),
7764 sizeof(struct ipr_mode_pages
));
7766 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7767 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7769 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7772 return IPR_RC_JOB_RETURN
;
7776 * ipr_init_res_table - Initialize the resource table
7777 * @ipr_cmd: ipr command struct
7779 * This function looks through the existing resource table, comparing
7780 * it with the config table. This function will take care of old/new
7781 * devices and schedule adding/removing them from the mid-layer
7785 * IPR_RC_JOB_CONTINUE
7787 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7789 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7790 struct ipr_resource_entry
*res
, *temp
;
7791 struct ipr_config_table_entry_wrapper cfgtew
;
7792 int entries
, found
, flag
, i
;
7797 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7799 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7801 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7802 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7804 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7805 list_move_tail(&res
->queue
, &old_res
);
7808 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7810 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7812 for (i
= 0; i
< entries
; i
++) {
7814 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7816 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7819 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7820 if (ipr_is_same_device(res
, &cfgtew
)) {
7821 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7828 if (list_empty(&ioa_cfg
->free_res_q
)) {
7829 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7834 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7835 struct ipr_resource_entry
, queue
);
7836 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7837 ipr_init_res_entry(res
, &cfgtew
);
7839 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7840 res
->sdev
->allow_restart
= 1;
7843 ipr_update_res_entry(res
, &cfgtew
);
7846 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7848 res
->del_from_ml
= 1;
7849 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7850 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7854 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7855 ipr_clear_res_target(res
);
7856 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7859 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7860 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7862 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7865 return IPR_RC_JOB_CONTINUE
;
7869 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7870 * @ipr_cmd: ipr command struct
7872 * This function sends a Query IOA Configuration command
7873 * to the adapter to retrieve the IOA configuration table.
7878 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7880 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7881 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7882 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7883 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7886 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7887 ioa_cfg
->dual_raid
= 1;
7888 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7889 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7890 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7891 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7892 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7894 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7895 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7896 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7897 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7899 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7900 IPR_IOADL_FLAGS_READ_LAST
);
7902 ipr_cmd
->job_step
= ipr_init_res_table
;
7904 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7907 return IPR_RC_JOB_RETURN
;
7910 static int ipr_ioa_service_action_failed(struct ipr_cmnd
*ipr_cmd
)
7912 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7914 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
)
7915 return IPR_RC_JOB_CONTINUE
;
7917 return ipr_reset_cmd_failed(ipr_cmd
);
7920 static void ipr_build_ioa_service_action(struct ipr_cmnd
*ipr_cmd
,
7921 __be32 res_handle
, u8 sa_code
)
7923 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7925 ioarcb
->res_handle
= res_handle
;
7926 ioarcb
->cmd_pkt
.cdb
[0] = IPR_IOA_SERVICE_ACTION
;
7927 ioarcb
->cmd_pkt
.cdb
[1] = sa_code
;
7928 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7932 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7938 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd
*ipr_cmd
)
7940 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7941 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7942 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
7946 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7948 if (pageC4
->cache_cap
[0] & IPR_CAP_SYNC_CACHE
) {
7949 ipr_build_ioa_service_action(ipr_cmd
,
7950 cpu_to_be32(IPR_IOA_RES_HANDLE
),
7951 IPR_IOA_SA_CHANGE_CACHE_PARAMS
);
7953 ioarcb
->cmd_pkt
.cdb
[2] = 0x40;
7955 ipr_cmd
->job_step_failed
= ipr_ioa_service_action_failed
;
7956 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7957 IPR_SET_SUP_DEVICE_TIMEOUT
);
7960 return IPR_RC_JOB_RETURN
;
7964 return IPR_RC_JOB_CONTINUE
;
7968 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7969 * @ipr_cmd: ipr command struct
7971 * This utility function sends an inquiry to the adapter.
7976 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7977 dma_addr_t dma_addr
, u8 xfer_len
)
7979 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7982 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7983 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7985 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7986 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7987 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7988 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7990 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7992 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7997 * ipr_inquiry_page_supported - Is the given inquiry page supported
7998 * @page0: inquiry page 0 buffer
8001 * This function determines if the specified inquiry page is supported.
8004 * 1 if page is supported / 0 if not
8006 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
8010 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
8011 if (page0
->page
[i
] == page
)
8018 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8019 * @ipr_cmd: ipr command struct
8021 * This function sends a Page 0xC4 inquiry to the adapter
8022 * to retrieve software VPD information.
8025 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8027 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd
*ipr_cmd
)
8029 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8030 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8031 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
8034 ipr_cmd
->job_step
= ipr_ioafp_set_caching_parameters
;
8035 memset(pageC4
, 0, sizeof(*pageC4
));
8037 if (ipr_inquiry_page_supported(page0
, 0xC4)) {
8038 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xC4,
8039 (ioa_cfg
->vpd_cbs_dma
8040 + offsetof(struct ipr_misc_cbs
,
8042 sizeof(struct ipr_inquiry_pageC4
));
8043 return IPR_RC_JOB_RETURN
;
8047 return IPR_RC_JOB_CONTINUE
;
8051 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8052 * @ipr_cmd: ipr command struct
8054 * This function sends a Page 0xD0 inquiry to the adapter
8055 * to retrieve adapter capabilities.
8058 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8060 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
8062 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8063 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8064 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
8067 ipr_cmd
->job_step
= ipr_ioafp_pageC4_inquiry
;
8068 memset(cap
, 0, sizeof(*cap
));
8070 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
8071 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
8072 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
8073 sizeof(struct ipr_inquiry_cap
));
8074 return IPR_RC_JOB_RETURN
;
8078 return IPR_RC_JOB_CONTINUE
;
8082 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8083 * @ipr_cmd: ipr command struct
8085 * This function sends a Page 3 inquiry to the adapter
8086 * to retrieve software VPD information.
8089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8091 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
8093 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8097 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
8099 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
8100 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
8101 sizeof(struct ipr_inquiry_page3
));
8104 return IPR_RC_JOB_RETURN
;
8108 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8109 * @ipr_cmd: ipr command struct
8111 * This function sends a Page 0 inquiry to the adapter
8112 * to retrieve supported inquiry pages.
8115 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8117 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
8119 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8124 /* Grab the type out of the VPD and store it away */
8125 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
8127 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
8129 if (ipr_invalid_adapter(ioa_cfg
)) {
8130 dev_err(&ioa_cfg
->pdev
->dev
,
8131 "Adapter not supported in this hardware configuration.\n");
8133 if (!ipr_testmode
) {
8134 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
8135 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8136 list_add_tail(&ipr_cmd
->queue
,
8137 &ioa_cfg
->hrrq
->hrrq_free_q
);
8138 return IPR_RC_JOB_RETURN
;
8142 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
8144 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
8145 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
8146 sizeof(struct ipr_inquiry_page0
));
8149 return IPR_RC_JOB_RETURN
;
8153 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8154 * @ipr_cmd: ipr command struct
8156 * This function sends a standard inquiry to the adapter.
8161 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
8163 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8166 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
8168 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
8169 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
8170 sizeof(struct ipr_ioa_vpd
));
8173 return IPR_RC_JOB_RETURN
;
8177 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8178 * @ipr_cmd: ipr command struct
8180 * This function send an Identify Host Request Response Queue
8181 * command to establish the HRRQ with the adapter.
8186 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
8188 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8189 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
8190 struct ipr_hrr_queue
*hrrq
;
8193 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
8194 if (ioa_cfg
->identify_hrrq_index
== 0)
8195 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
8197 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
8198 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
8200 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
8201 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8203 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8205 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
8207 if (ioa_cfg
->nvectors
== 1)
8208 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
8210 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
8212 ioarcb
->cmd_pkt
.cdb
[2] =
8213 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
8214 ioarcb
->cmd_pkt
.cdb
[3] =
8215 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
8216 ioarcb
->cmd_pkt
.cdb
[4] =
8217 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
8218 ioarcb
->cmd_pkt
.cdb
[5] =
8219 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
8220 ioarcb
->cmd_pkt
.cdb
[7] =
8221 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
8222 ioarcb
->cmd_pkt
.cdb
[8] =
8223 (sizeof(u32
) * hrrq
->size
) & 0xff;
8225 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8226 ioarcb
->cmd_pkt
.cdb
[9] =
8227 ioa_cfg
->identify_hrrq_index
;
8229 if (ioa_cfg
->sis64
) {
8230 ioarcb
->cmd_pkt
.cdb
[10] =
8231 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
8232 ioarcb
->cmd_pkt
.cdb
[11] =
8233 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
8234 ioarcb
->cmd_pkt
.cdb
[12] =
8235 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
8236 ioarcb
->cmd_pkt
.cdb
[13] =
8237 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
8240 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8241 ioarcb
->cmd_pkt
.cdb
[14] =
8242 ioa_cfg
->identify_hrrq_index
;
8244 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8245 IPR_INTERNAL_TIMEOUT
);
8247 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
8248 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8251 return IPR_RC_JOB_RETURN
;
8255 return IPR_RC_JOB_CONTINUE
;
8259 * ipr_reset_timer_done - Adapter reset timer function
8260 * @ipr_cmd: ipr command struct
8262 * Description: This function is used in adapter reset processing
8263 * for timing events. If the reset_cmd pointer in the IOA
8264 * config struct is not this adapter's we are doing nested
8265 * resets and fail_all_ops will take care of freeing the
8271 static void ipr_reset_timer_done(struct timer_list
*t
)
8273 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
8274 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8275 unsigned long lock_flags
= 0;
8277 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8279 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
8280 list_del(&ipr_cmd
->queue
);
8281 ipr_cmd
->done(ipr_cmd
);
8284 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8288 * ipr_reset_start_timer - Start a timer for adapter reset job
8289 * @ipr_cmd: ipr command struct
8290 * @timeout: timeout value
8292 * Description: This function is used in adapter reset processing
8293 * for timing events. If the reset_cmd pointer in the IOA
8294 * config struct is not this adapter's we are doing nested
8295 * resets and fail_all_ops will take care of freeing the
8301 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
8302 unsigned long timeout
)
8306 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8307 ipr_cmd
->done
= ipr_reset_ioa_job
;
8309 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
8310 ipr_cmd
->timer
.function
= ipr_reset_timer_done
;
8311 add_timer(&ipr_cmd
->timer
);
8315 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8316 * @ioa_cfg: ioa cfg struct
8321 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8323 struct ipr_hrr_queue
*hrrq
;
8325 for_each_hrrq(hrrq
, ioa_cfg
) {
8326 spin_lock(&hrrq
->_lock
);
8327 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
8329 /* Initialize Host RRQ pointers */
8330 hrrq
->hrrq_start
= hrrq
->host_rrq
;
8331 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
8332 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
8333 hrrq
->toggle_bit
= 1;
8334 spin_unlock(&hrrq
->_lock
);
8338 ioa_cfg
->identify_hrrq_index
= 0;
8339 if (ioa_cfg
->hrrq_num
== 1)
8340 atomic_set(&ioa_cfg
->hrrq_index
, 0);
8342 atomic_set(&ioa_cfg
->hrrq_index
, 1);
8344 /* Zero out config table */
8345 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
8349 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8350 * @ipr_cmd: ipr command struct
8353 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8355 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
8357 unsigned long stage
, stage_time
;
8359 volatile u32 int_reg
;
8360 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8363 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
8364 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
8365 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
8367 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
8369 /* sanity check the stage_time value */
8370 if (stage_time
== 0)
8371 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
8372 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
8373 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
8374 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
8375 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
8377 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
8378 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8379 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8380 stage_time
= ioa_cfg
->transop_timeout
;
8381 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8382 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
8383 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8384 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8385 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8386 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8387 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
8388 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8389 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8390 return IPR_RC_JOB_CONTINUE
;
8394 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
8395 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8396 ipr_cmd
->done
= ipr_reset_ioa_job
;
8397 add_timer(&ipr_cmd
->timer
);
8399 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8401 return IPR_RC_JOB_RETURN
;
8405 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8406 * @ipr_cmd: ipr command struct
8408 * This function reinitializes some control blocks and
8409 * enables destructive diagnostics on the adapter.
8414 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
8416 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8417 volatile u32 int_reg
;
8418 volatile u64 maskval
;
8422 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8423 ipr_init_ioa_mem(ioa_cfg
);
8425 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8426 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8427 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
8428 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8430 if (ioa_cfg
->sis64
) {
8431 /* Set the adapter to the correct endian mode. */
8432 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8433 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8436 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8438 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8439 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8440 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8441 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8442 return IPR_RC_JOB_CONTINUE
;
8445 /* Enable destructive diagnostics on IOA */
8446 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8448 if (ioa_cfg
->sis64
) {
8449 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8450 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8451 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8453 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8455 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8457 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8459 if (ioa_cfg
->sis64
) {
8460 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8461 return IPR_RC_JOB_CONTINUE
;
8464 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8465 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8466 ipr_cmd
->done
= ipr_reset_ioa_job
;
8467 add_timer(&ipr_cmd
->timer
);
8468 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8471 return IPR_RC_JOB_RETURN
;
8475 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8476 * @ipr_cmd: ipr command struct
8478 * This function is invoked when an adapter dump has run out
8479 * of processing time.
8482 * IPR_RC_JOB_CONTINUE
8484 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8486 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8488 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8489 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8490 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8491 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8493 ioa_cfg
->dump_timeout
= 1;
8494 ipr_cmd
->job_step
= ipr_reset_alert
;
8496 return IPR_RC_JOB_CONTINUE
;
8500 * ipr_unit_check_no_data - Log a unit check/no data error log
8501 * @ioa_cfg: ioa config struct
8503 * Logs an error indicating the adapter unit checked, but for some
8504 * reason, we were unable to fetch the unit check buffer.
8509 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8511 ioa_cfg
->errors_logged
++;
8512 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8516 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8517 * @ioa_cfg: ioa config struct
8519 * Fetches the unit check buffer from the adapter by clocking the data
8520 * through the mailbox register.
8525 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8527 unsigned long mailbox
;
8528 struct ipr_hostrcb
*hostrcb
;
8529 struct ipr_uc_sdt sdt
;
8533 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8535 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8536 ipr_unit_check_no_data(ioa_cfg
);
8540 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8541 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8542 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8544 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8545 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8546 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8547 ipr_unit_check_no_data(ioa_cfg
);
8551 /* Find length of the first sdt entry (UC buffer) */
8552 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8553 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8555 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8556 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8557 IPR_FMT2_MBX_ADDR_MASK
;
8559 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8560 struct ipr_hostrcb
, queue
);
8561 list_del_init(&hostrcb
->queue
);
8562 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8564 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8565 be32_to_cpu(sdt
.entry
[0].start_token
),
8566 (__be32
*)&hostrcb
->hcam
,
8567 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8570 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8571 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8572 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8573 ioa_cfg
->sdt_state
== GET_DUMP
)
8574 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8576 ipr_unit_check_no_data(ioa_cfg
);
8578 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8582 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8583 * @ipr_cmd: ipr command struct
8585 * Description: This function will call to get the unit check buffer.
8590 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8592 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8595 ioa_cfg
->ioa_unit_checked
= 0;
8596 ipr_get_unit_check_buffer(ioa_cfg
);
8597 ipr_cmd
->job_step
= ipr_reset_alert
;
8598 ipr_reset_start_timer(ipr_cmd
, 0);
8601 return IPR_RC_JOB_RETURN
;
8604 static int ipr_dump_mailbox_wait(struct ipr_cmnd
*ipr_cmd
)
8606 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8610 if (ioa_cfg
->sdt_state
!= GET_DUMP
)
8611 return IPR_RC_JOB_RETURN
;
8613 if (!ioa_cfg
->sis64
|| !ipr_cmd
->u
.time_left
||
8614 (readl(ioa_cfg
->regs
.sense_interrupt_reg
) &
8615 IPR_PCII_MAILBOX_STABLE
)) {
8617 if (!ipr_cmd
->u
.time_left
)
8618 dev_err(&ioa_cfg
->pdev
->dev
,
8619 "Timed out waiting for Mailbox register.\n");
8621 ioa_cfg
->sdt_state
= READ_DUMP
;
8622 ioa_cfg
->dump_timeout
= 0;
8624 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8626 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8627 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8628 schedule_work(&ioa_cfg
->work_q
);
8631 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8632 ipr_reset_start_timer(ipr_cmd
,
8633 IPR_CHECK_FOR_RESET_TIMEOUT
);
8637 return IPR_RC_JOB_RETURN
;
8641 * ipr_reset_restore_cfg_space - Restore PCI config space.
8642 * @ipr_cmd: ipr command struct
8644 * Description: This function restores the saved PCI config space of
8645 * the adapter, fails all outstanding ops back to the callers, and
8646 * fetches the dump/unit check if applicable to this reset.
8649 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8651 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8653 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8657 ioa_cfg
->pdev
->state_saved
= true;
8658 pci_restore_state(ioa_cfg
->pdev
);
8660 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8661 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8662 return IPR_RC_JOB_CONTINUE
;
8665 ipr_fail_all_ops(ioa_cfg
);
8667 if (ioa_cfg
->sis64
) {
8668 /* Set the adapter to the correct endian mode. */
8669 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8670 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8673 if (ioa_cfg
->ioa_unit_checked
) {
8674 if (ioa_cfg
->sis64
) {
8675 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8676 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8677 return IPR_RC_JOB_RETURN
;
8679 ioa_cfg
->ioa_unit_checked
= 0;
8680 ipr_get_unit_check_buffer(ioa_cfg
);
8681 ipr_cmd
->job_step
= ipr_reset_alert
;
8682 ipr_reset_start_timer(ipr_cmd
, 0);
8683 return IPR_RC_JOB_RETURN
;
8687 if (ioa_cfg
->in_ioa_bringdown
) {
8688 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8689 } else if (ioa_cfg
->sdt_state
== GET_DUMP
) {
8690 ipr_cmd
->job_step
= ipr_dump_mailbox_wait
;
8691 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_MAILBOX
;
8693 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8697 return IPR_RC_JOB_CONTINUE
;
8701 * ipr_reset_bist_done - BIST has completed on the adapter.
8702 * @ipr_cmd: ipr command struct
8704 * Description: Unblock config space and resume the reset process.
8707 * IPR_RC_JOB_CONTINUE
8709 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8711 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8714 if (ioa_cfg
->cfg_locked
)
8715 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8716 ioa_cfg
->cfg_locked
= 0;
8717 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8719 return IPR_RC_JOB_CONTINUE
;
8723 * ipr_reset_start_bist - Run BIST on the adapter.
8724 * @ipr_cmd: ipr command struct
8726 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8729 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8731 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8733 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8734 int rc
= PCIBIOS_SUCCESSFUL
;
8737 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8738 writel(IPR_UPROCI_SIS64_START_BIST
,
8739 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8741 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8743 if (rc
== PCIBIOS_SUCCESSFUL
) {
8744 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8745 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8746 rc
= IPR_RC_JOB_RETURN
;
8748 if (ioa_cfg
->cfg_locked
)
8749 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8750 ioa_cfg
->cfg_locked
= 0;
8751 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8752 rc
= IPR_RC_JOB_CONTINUE
;
8760 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8761 * @ipr_cmd: ipr command struct
8763 * Description: This clears PCI reset to the adapter and delays two seconds.
8768 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8771 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8772 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8774 return IPR_RC_JOB_RETURN
;
8778 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8779 * @work: work struct
8781 * Description: This pulses warm reset to a slot.
8784 static void ipr_reset_reset_work(struct work_struct
*work
)
8786 struct ipr_cmnd
*ipr_cmd
= container_of(work
, struct ipr_cmnd
, work
);
8787 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8788 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8789 unsigned long lock_flags
= 0;
8792 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8793 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT
));
8794 pci_set_pcie_reset_state(pdev
, pcie_deassert_reset
);
8796 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8797 if (ioa_cfg
->reset_cmd
== ipr_cmd
)
8798 ipr_reset_ioa_job(ipr_cmd
);
8799 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8804 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8805 * @ipr_cmd: ipr command struct
8807 * Description: This asserts PCI reset to the adapter.
8812 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8814 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8817 INIT_WORK(&ipr_cmd
->work
, ipr_reset_reset_work
);
8818 queue_work(ioa_cfg
->reset_work_q
, &ipr_cmd
->work
);
8819 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8821 return IPR_RC_JOB_RETURN
;
8825 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8826 * @ipr_cmd: ipr command struct
8828 * Description: This attempts to block config access to the IOA.
8831 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8833 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8835 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8836 int rc
= IPR_RC_JOB_CONTINUE
;
8838 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8839 ioa_cfg
->cfg_locked
= 1;
8840 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8842 if (ipr_cmd
->u
.time_left
) {
8843 rc
= IPR_RC_JOB_RETURN
;
8844 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8845 ipr_reset_start_timer(ipr_cmd
,
8846 IPR_CHECK_FOR_RESET_TIMEOUT
);
8848 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8849 dev_err(&ioa_cfg
->pdev
->dev
,
8850 "Timed out waiting to lock config access. Resetting anyway.\n");
8858 * ipr_reset_block_config_access - Block config access to the IOA
8859 * @ipr_cmd: ipr command struct
8861 * Description: This attempts to block config access to the IOA
8864 * IPR_RC_JOB_CONTINUE
8866 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8868 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8869 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8870 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8871 return IPR_RC_JOB_CONTINUE
;
8875 * ipr_reset_allowed - Query whether or not IOA can be reset
8876 * @ioa_cfg: ioa config struct
8879 * 0 if reset not allowed / non-zero if reset is allowed
8881 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8883 volatile u32 temp_reg
;
8885 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8886 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8890 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8891 * @ipr_cmd: ipr command struct
8893 * Description: This function waits for adapter permission to run BIST,
8894 * then runs BIST. If the adapter does not give permission after a
8895 * reasonable time, we will reset the adapter anyway. The impact of
8896 * resetting the adapter without warning the adapter is the risk of
8897 * losing the persistent error log on the adapter. If the adapter is
8898 * reset while it is writing to the flash on the adapter, the flash
8899 * segment will have bad ECC and be zeroed.
8902 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8904 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8906 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8907 int rc
= IPR_RC_JOB_RETURN
;
8909 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8910 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8911 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8913 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8914 rc
= IPR_RC_JOB_CONTINUE
;
8921 * ipr_reset_alert - Alert the adapter of a pending reset
8922 * @ipr_cmd: ipr command struct
8924 * Description: This function alerts the adapter that it will be reset.
8925 * If memory space is not currently enabled, proceed directly
8926 * to running BIST on the adapter. The timer must always be started
8927 * so we guarantee we do not run BIST from ipr_isr.
8932 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8934 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8939 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8941 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8942 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8943 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8944 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8946 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8949 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8950 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8953 return IPR_RC_JOB_RETURN
;
8957 * ipr_reset_quiesce_done - Complete IOA disconnect
8958 * @ipr_cmd: ipr command struct
8960 * Description: Freeze the adapter to complete quiesce processing
8963 * IPR_RC_JOB_CONTINUE
8965 static int ipr_reset_quiesce_done(struct ipr_cmnd
*ipr_cmd
)
8967 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8970 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8971 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8973 return IPR_RC_JOB_CONTINUE
;
8977 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8978 * @ipr_cmd: ipr command struct
8980 * Description: Ensure nothing is outstanding to the IOA and
8981 * proceed with IOA disconnect. Otherwise reset the IOA.
8984 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8986 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd
*ipr_cmd
)
8988 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8989 struct ipr_cmnd
*loop_cmd
;
8990 struct ipr_hrr_queue
*hrrq
;
8991 int rc
= IPR_RC_JOB_CONTINUE
;
8995 ipr_cmd
->job_step
= ipr_reset_quiesce_done
;
8997 for_each_hrrq(hrrq
, ioa_cfg
) {
8998 spin_lock(&hrrq
->_lock
);
8999 list_for_each_entry(loop_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
9001 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9002 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9003 rc
= IPR_RC_JOB_RETURN
;
9006 spin_unlock(&hrrq
->_lock
);
9017 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9018 * @ipr_cmd: ipr command struct
9020 * Description: Cancel any oustanding HCAMs to the IOA.
9023 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9025 static int ipr_reset_cancel_hcam(struct ipr_cmnd
*ipr_cmd
)
9027 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9028 int rc
= IPR_RC_JOB_CONTINUE
;
9029 struct ipr_cmd_pkt
*cmd_pkt
;
9030 struct ipr_cmnd
*hcam_cmd
;
9031 struct ipr_hrr_queue
*hrrq
= &ioa_cfg
->hrrq
[IPR_INIT_HRRQ
];
9034 ipr_cmd
->job_step
= ipr_reset_cancel_hcam_done
;
9036 if (!hrrq
->ioa_is_dead
) {
9037 if (!list_empty(&ioa_cfg
->hostrcb_pending_q
)) {
9038 list_for_each_entry(hcam_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
9039 if (hcam_cmd
->ioarcb
.cmd_pkt
.cdb
[0] != IPR_HOST_CONTROLLED_ASYNC
)
9042 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9043 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9044 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
9045 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
9046 cmd_pkt
->cdb
[0] = IPR_CANCEL_REQUEST
;
9047 cmd_pkt
->cdb
[1] = IPR_CANCEL_64BIT_IOARCB
;
9048 cmd_pkt
->cdb
[10] = ((u64
) hcam_cmd
->dma_addr
>> 56) & 0xff;
9049 cmd_pkt
->cdb
[11] = ((u64
) hcam_cmd
->dma_addr
>> 48) & 0xff;
9050 cmd_pkt
->cdb
[12] = ((u64
) hcam_cmd
->dma_addr
>> 40) & 0xff;
9051 cmd_pkt
->cdb
[13] = ((u64
) hcam_cmd
->dma_addr
>> 32) & 0xff;
9052 cmd_pkt
->cdb
[2] = ((u64
) hcam_cmd
->dma_addr
>> 24) & 0xff;
9053 cmd_pkt
->cdb
[3] = ((u64
) hcam_cmd
->dma_addr
>> 16) & 0xff;
9054 cmd_pkt
->cdb
[4] = ((u64
) hcam_cmd
->dma_addr
>> 8) & 0xff;
9055 cmd_pkt
->cdb
[5] = ((u64
) hcam_cmd
->dma_addr
) & 0xff;
9057 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9058 IPR_CANCEL_TIMEOUT
);
9060 rc
= IPR_RC_JOB_RETURN
;
9061 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9066 ipr_cmd
->job_step
= ipr_reset_alert
;
9073 * ipr_reset_ucode_download_done - Microcode download completion
9074 * @ipr_cmd: ipr command struct
9076 * Description: This function unmaps the microcode download buffer.
9079 * IPR_RC_JOB_CONTINUE
9081 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
9083 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9084 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9086 dma_unmap_sg(&ioa_cfg
->pdev
->dev
, sglist
->scatterlist
,
9087 sglist
->num_sg
, DMA_TO_DEVICE
);
9089 ipr_cmd
->job_step
= ipr_reset_alert
;
9090 return IPR_RC_JOB_CONTINUE
;
9094 * ipr_reset_ucode_download - Download microcode to the adapter
9095 * @ipr_cmd: ipr command struct
9097 * Description: This function checks to see if it there is microcode
9098 * to download to the adapter. If there is, a download is performed.
9101 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9103 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
9105 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9106 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9109 ipr_cmd
->job_step
= ipr_reset_alert
;
9112 return IPR_RC_JOB_CONTINUE
;
9114 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9115 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
9116 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
9117 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
9118 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
9119 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
9120 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
9123 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
9125 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
9126 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
9128 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9129 IPR_WRITE_BUFFER_TIMEOUT
);
9132 return IPR_RC_JOB_RETURN
;
9136 * ipr_reset_shutdown_ioa - Shutdown the adapter
9137 * @ipr_cmd: ipr command struct
9139 * Description: This function issues an adapter shutdown of the
9140 * specified type to the specified adapter as part of the
9141 * adapter reset job.
9144 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9146 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
9148 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9149 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
9150 unsigned long timeout
;
9151 int rc
= IPR_RC_JOB_CONTINUE
;
9154 if (shutdown_type
== IPR_SHUTDOWN_QUIESCE
)
9155 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9156 else if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
9157 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
9158 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9159 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9160 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
9161 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
9163 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
9164 timeout
= IPR_SHUTDOWN_TIMEOUT
;
9165 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
9166 timeout
= IPR_INTERNAL_TIMEOUT
;
9167 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
9168 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
9170 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
9172 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
9174 rc
= IPR_RC_JOB_RETURN
;
9175 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
9177 ipr_cmd
->job_step
= ipr_reset_alert
;
9184 * ipr_reset_ioa_job - Adapter reset job
9185 * @ipr_cmd: ipr command struct
9187 * Description: This function is the job router for the adapter reset job.
9192 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
9195 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9198 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
9200 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
9202 * We are doing nested adapter resets and this is
9203 * not the current reset job.
9205 list_add_tail(&ipr_cmd
->queue
,
9206 &ipr_cmd
->hrrq
->hrrq_free_q
);
9210 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
9211 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
9212 if (rc
== IPR_RC_JOB_RETURN
)
9216 ipr_reinit_ipr_cmnd(ipr_cmd
);
9217 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
9218 rc
= ipr_cmd
->job_step(ipr_cmd
);
9219 } while (rc
== IPR_RC_JOB_CONTINUE
);
9223 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9224 * @ioa_cfg: ioa config struct
9225 * @job_step: first job step of reset job
9226 * @shutdown_type: shutdown type
9228 * Description: This function will initiate the reset of the given adapter
9229 * starting at the selected job step.
9230 * If the caller needs to wait on the completion of the reset,
9231 * the caller must sleep on the reset_wait_q.
9236 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9237 int (*job_step
) (struct ipr_cmnd
*),
9238 enum ipr_shutdown_type shutdown_type
)
9240 struct ipr_cmnd
*ipr_cmd
;
9243 ioa_cfg
->in_reset_reload
= 1;
9244 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9245 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9246 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9247 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9250 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9251 ioa_cfg
->scsi_unblock
= 0;
9252 ioa_cfg
->scsi_blocked
= 1;
9253 scsi_block_requests(ioa_cfg
->host
);
9256 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9257 ioa_cfg
->reset_cmd
= ipr_cmd
;
9258 ipr_cmd
->job_step
= job_step
;
9259 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
9261 ipr_reset_ioa_job(ipr_cmd
);
9265 * ipr_initiate_ioa_reset - Initiate an adapter reset
9266 * @ioa_cfg: ioa config struct
9267 * @shutdown_type: shutdown type
9269 * Description: This function will initiate the reset of the given adapter.
9270 * If the caller needs to wait on the completion of the reset,
9271 * the caller must sleep on the reset_wait_q.
9276 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9277 enum ipr_shutdown_type shutdown_type
)
9281 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
9284 if (ioa_cfg
->in_reset_reload
) {
9285 if (ioa_cfg
->sdt_state
== GET_DUMP
)
9286 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9287 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
9288 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9291 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
9292 dev_err(&ioa_cfg
->pdev
->dev
,
9293 "IOA taken offline - error recovery failed\n");
9295 ioa_cfg
->reset_retries
= 0;
9296 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9297 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9298 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
9299 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9303 if (ioa_cfg
->in_ioa_bringdown
) {
9304 ioa_cfg
->reset_cmd
= NULL
;
9305 ioa_cfg
->in_reset_reload
= 0;
9306 ipr_fail_all_ops(ioa_cfg
);
9307 wake_up_all(&ioa_cfg
->reset_wait_q
);
9309 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9310 ioa_cfg
->scsi_unblock
= 1;
9311 schedule_work(&ioa_cfg
->work_q
);
9315 ioa_cfg
->in_ioa_bringdown
= 1;
9316 shutdown_type
= IPR_SHUTDOWN_NONE
;
9320 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
9325 * ipr_reset_freeze - Hold off all I/O activity
9326 * @ipr_cmd: ipr command struct
9328 * Description: If the PCI slot is frozen, hold off all I/O
9329 * activity; then, as soon as the slot is available again,
9330 * initiate an adapter reset.
9332 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
9334 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9337 /* Disallow new interrupts, avoid loop */
9338 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9339 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9340 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
9341 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9344 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
9345 ipr_cmd
->done
= ipr_reset_ioa_job
;
9346 return IPR_RC_JOB_RETURN
;
9350 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9351 * @pdev: PCI device struct
9353 * Description: This routine is called to tell us that the MMIO
9354 * access to the IOA has been restored
9356 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
9358 unsigned long flags
= 0;
9359 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9361 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9362 if (!ioa_cfg
->probe_done
)
9363 pci_save_state(pdev
);
9364 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9365 return PCI_ERS_RESULT_NEED_RESET
;
9369 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9370 * @pdev: PCI device struct
9372 * Description: This routine is called to tell us that the PCI bus
9373 * is down. Can't do anything here, except put the device driver
9374 * into a holding pattern, waiting for the PCI bus to come back.
9376 static void ipr_pci_frozen(struct pci_dev
*pdev
)
9378 unsigned long flags
= 0;
9379 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9381 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9382 if (ioa_cfg
->probe_done
)
9383 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
9384 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9388 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9389 * @pdev: PCI device struct
9391 * Description: This routine is called by the pci error recovery
9392 * code after the PCI slot has been reset, just before we
9393 * should resume normal operations.
9395 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
9397 unsigned long flags
= 0;
9398 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9400 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9401 if (ioa_cfg
->probe_done
) {
9402 if (ioa_cfg
->needs_warm_reset
)
9403 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9405 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
9408 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9409 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9410 return PCI_ERS_RESULT_RECOVERED
;
9414 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9415 * @pdev: PCI device struct
9417 * Description: This routine is called when the PCI bus has
9418 * permanently failed.
9420 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
9422 unsigned long flags
= 0;
9423 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9426 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9427 if (ioa_cfg
->probe_done
) {
9428 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9429 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9430 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
9431 ioa_cfg
->in_ioa_bringdown
= 1;
9432 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9433 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9434 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9435 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9438 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9440 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9441 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9445 * ipr_pci_error_detected - Called when a PCI error is detected.
9446 * @pdev: PCI device struct
9447 * @state: PCI channel state
9449 * Description: Called when a PCI error is detected.
9452 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9454 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
9455 pci_channel_state_t state
)
9458 case pci_channel_io_frozen
:
9459 ipr_pci_frozen(pdev
);
9460 return PCI_ERS_RESULT_CAN_RECOVER
;
9461 case pci_channel_io_perm_failure
:
9462 ipr_pci_perm_failure(pdev
);
9463 return PCI_ERS_RESULT_DISCONNECT
;
9468 return PCI_ERS_RESULT_NEED_RESET
;
9472 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9473 * @ioa_cfg: ioa cfg struct
9475 * Description: This is the second phase of adapter initialization
9476 * This function takes care of initilizing the adapter to the point
9477 * where it can accept new commands.
9480 * 0 on success / -EIO on failure
9482 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
9485 unsigned long host_lock_flags
= 0;
9488 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9489 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
9490 ioa_cfg
->probe_done
= 1;
9491 if (ioa_cfg
->needs_hard_reset
) {
9492 ioa_cfg
->needs_hard_reset
= 0;
9493 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9495 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
9497 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9504 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9505 * @ioa_cfg: ioa config struct
9510 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9514 if (ioa_cfg
->ipr_cmnd_list
) {
9515 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9516 if (ioa_cfg
->ipr_cmnd_list
[i
])
9517 dma_pool_free(ioa_cfg
->ipr_cmd_pool
,
9518 ioa_cfg
->ipr_cmnd_list
[i
],
9519 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
9521 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
9525 if (ioa_cfg
->ipr_cmd_pool
)
9526 dma_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
9528 kfree(ioa_cfg
->ipr_cmnd_list
);
9529 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
9530 ioa_cfg
->ipr_cmnd_list
= NULL
;
9531 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
9532 ioa_cfg
->ipr_cmd_pool
= NULL
;
9536 * ipr_free_mem - Frees memory allocated for an adapter
9537 * @ioa_cfg: ioa cfg struct
9542 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9546 kfree(ioa_cfg
->res_entries
);
9547 dma_free_coherent(&ioa_cfg
->pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9548 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9549 ipr_free_cmd_blks(ioa_cfg
);
9551 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
9552 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9553 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9554 ioa_cfg
->hrrq
[i
].host_rrq
,
9555 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9557 dma_free_coherent(&ioa_cfg
->pdev
->dev
, ioa_cfg
->cfg_table_size
,
9558 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9560 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9561 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9562 sizeof(struct ipr_hostrcb
),
9563 ioa_cfg
->hostrcb
[i
],
9564 ioa_cfg
->hostrcb_dma
[i
]);
9567 ipr_free_dump(ioa_cfg
);
9568 kfree(ioa_cfg
->trace
);
9572 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9573 * @ioa_cfg: ipr cfg struct
9575 * This function frees all allocated IRQs for the
9576 * specified adapter.
9581 static void ipr_free_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9583 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9586 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
9587 free_irq(pci_irq_vector(pdev
, i
), &ioa_cfg
->hrrq
[i
]);
9588 pci_free_irq_vectors(pdev
);
9592 * ipr_free_all_resources - Free all allocated resources for an adapter.
9593 * @ipr_cmd: ipr command struct
9595 * This function frees all allocated resources for the
9596 * specified adapter.
9601 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
9603 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9606 ipr_free_irqs(ioa_cfg
);
9607 if (ioa_cfg
->reset_work_q
)
9608 destroy_workqueue(ioa_cfg
->reset_work_q
);
9609 iounmap(ioa_cfg
->hdw_dma_regs
);
9610 pci_release_regions(pdev
);
9611 ipr_free_mem(ioa_cfg
);
9612 scsi_host_put(ioa_cfg
->host
);
9613 pci_disable_device(pdev
);
9618 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9619 * @ioa_cfg: ioa config struct
9622 * 0 on success / -ENOMEM on allocation failure
9624 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9626 struct ipr_cmnd
*ipr_cmd
;
9627 struct ipr_ioarcb
*ioarcb
;
9628 dma_addr_t dma_addr
;
9629 int i
, entries_each_hrrq
, hrrq_id
= 0;
9631 ioa_cfg
->ipr_cmd_pool
= dma_pool_create(IPR_NAME
, &ioa_cfg
->pdev
->dev
,
9632 sizeof(struct ipr_cmnd
), 512, 0);
9634 if (!ioa_cfg
->ipr_cmd_pool
)
9637 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9638 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9640 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9641 ipr_free_cmd_blks(ioa_cfg
);
9645 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9646 if (ioa_cfg
->hrrq_num
> 1) {
9648 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9649 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9650 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9651 (entries_each_hrrq
- 1);
9654 IPR_NUM_BASE_CMD_BLKS
/
9655 (ioa_cfg
->hrrq_num
- 1);
9656 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9657 IPR_NUM_INTERNAL_CMD_BLKS
+
9658 (i
- 1) * entries_each_hrrq
;
9659 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9660 (IPR_NUM_INTERNAL_CMD_BLKS
+
9661 i
* entries_each_hrrq
- 1);
9664 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9665 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9666 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9668 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9671 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9673 i
= IPR_NUM_CMD_BLKS
-
9674 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9676 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9677 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9680 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9681 ipr_cmd
= dma_pool_zalloc(ioa_cfg
->ipr_cmd_pool
,
9682 GFP_KERNEL
, &dma_addr
);
9685 ipr_free_cmd_blks(ioa_cfg
);
9689 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9690 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9692 ioarcb
= &ipr_cmd
->ioarcb
;
9693 ipr_cmd
->dma_addr
= dma_addr
;
9695 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9697 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9699 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9700 if (ioa_cfg
->sis64
) {
9701 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9702 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9703 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9704 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9706 ioarcb
->write_ioadl_addr
=
9707 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9708 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9709 ioarcb
->ioasa_host_pci_addr
=
9710 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9712 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9713 ipr_cmd
->cmd_index
= i
;
9714 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9715 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9716 offsetof(struct ipr_cmnd
, sense_buffer
);
9718 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9719 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9720 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9721 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9729 * ipr_alloc_mem - Allocate memory for an adapter
9730 * @ioa_cfg: ioa config struct
9733 * 0 on success / non-zero for error
9735 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9737 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9738 int i
, rc
= -ENOMEM
;
9741 ioa_cfg
->res_entries
= kcalloc(ioa_cfg
->max_devs_supported
,
9742 sizeof(struct ipr_resource_entry
),
9745 if (!ioa_cfg
->res_entries
)
9748 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9749 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9750 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9753 ioa_cfg
->vpd_cbs
= dma_alloc_coherent(&pdev
->dev
,
9754 sizeof(struct ipr_misc_cbs
),
9755 &ioa_cfg
->vpd_cbs_dma
,
9758 if (!ioa_cfg
->vpd_cbs
)
9759 goto out_free_res_entries
;
9761 if (ipr_alloc_cmd_blks(ioa_cfg
))
9762 goto out_free_vpd_cbs
;
9764 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9765 ioa_cfg
->hrrq
[i
].host_rrq
= dma_alloc_coherent(&pdev
->dev
,
9766 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9767 &ioa_cfg
->hrrq
[i
].host_rrq_dma
,
9770 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9772 dma_free_coherent(&pdev
->dev
,
9773 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9774 ioa_cfg
->hrrq
[i
].host_rrq
,
9775 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9776 goto out_ipr_free_cmd_blocks
;
9778 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9781 ioa_cfg
->u
.cfg_table
= dma_alloc_coherent(&pdev
->dev
,
9782 ioa_cfg
->cfg_table_size
,
9783 &ioa_cfg
->cfg_table_dma
,
9786 if (!ioa_cfg
->u
.cfg_table
)
9787 goto out_free_host_rrq
;
9789 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9790 ioa_cfg
->hostrcb
[i
] = dma_alloc_coherent(&pdev
->dev
,
9791 sizeof(struct ipr_hostrcb
),
9792 &ioa_cfg
->hostrcb_dma
[i
],
9795 if (!ioa_cfg
->hostrcb
[i
])
9796 goto out_free_hostrcb_dma
;
9798 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9799 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9800 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9801 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9804 ioa_cfg
->trace
= kcalloc(IPR_NUM_TRACE_ENTRIES
,
9805 sizeof(struct ipr_trace_entry
),
9808 if (!ioa_cfg
->trace
)
9809 goto out_free_hostrcb_dma
;
9816 out_free_hostrcb_dma
:
9818 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_hostrcb
),
9819 ioa_cfg
->hostrcb
[i
],
9820 ioa_cfg
->hostrcb_dma
[i
]);
9822 dma_free_coherent(&pdev
->dev
, ioa_cfg
->cfg_table_size
,
9823 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9825 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9826 dma_free_coherent(&pdev
->dev
,
9827 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9828 ioa_cfg
->hrrq
[i
].host_rrq
,
9829 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9831 out_ipr_free_cmd_blocks
:
9832 ipr_free_cmd_blks(ioa_cfg
);
9834 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9835 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9836 out_free_res_entries
:
9837 kfree(ioa_cfg
->res_entries
);
9842 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9843 * @ioa_cfg: ioa config struct
9848 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9852 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9853 ioa_cfg
->bus_attr
[i
].bus
= i
;
9854 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9855 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9856 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9857 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9859 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9864 * ipr_init_regs - Initialize IOA registers
9865 * @ioa_cfg: ioa config struct
9870 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9872 const struct ipr_interrupt_offsets
*p
;
9873 struct ipr_interrupts
*t
;
9876 p
= &ioa_cfg
->chip_cfg
->regs
;
9878 base
= ioa_cfg
->hdw_dma_regs
;
9880 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9881 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9882 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9883 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9884 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9885 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9886 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9887 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9888 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9889 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9890 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9891 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9892 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9893 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9894 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9895 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9897 if (ioa_cfg
->sis64
) {
9898 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9899 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9900 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9901 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9906 * ipr_init_ioa_cfg - Initialize IOA config struct
9907 * @ioa_cfg: ioa config struct
9908 * @host: scsi host struct
9909 * @pdev: PCI dev struct
9914 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9915 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9919 ioa_cfg
->host
= host
;
9920 ioa_cfg
->pdev
= pdev
;
9921 ioa_cfg
->log_level
= ipr_log_level
;
9922 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9923 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9924 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9925 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9926 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9927 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9928 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9930 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9931 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9932 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_report_q
);
9933 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9934 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9935 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9936 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9937 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9938 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9939 ioa_cfg
->sdt_state
= INACTIVE
;
9941 ipr_initialize_bus_attr(ioa_cfg
);
9942 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9944 if (ioa_cfg
->sis64
) {
9945 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9946 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9947 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9948 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9949 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9950 + ((sizeof(struct ipr_config_table_entry64
)
9951 * ioa_cfg
->max_devs_supported
)));
9953 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9954 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9955 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9956 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9957 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9958 + ((sizeof(struct ipr_config_table_entry
)
9959 * ioa_cfg
->max_devs_supported
)));
9962 host
->max_channel
= IPR_VSET_BUS
;
9963 host
->unique_id
= host
->host_no
;
9964 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9965 host
->can_queue
= ioa_cfg
->max_cmds
;
9966 pci_set_drvdata(pdev
, ioa_cfg
);
9968 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9969 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9970 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9971 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9973 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9975 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9980 * ipr_get_chip_info - Find adapter chip information
9981 * @dev_id: PCI device id struct
9984 * ptr to chip information on success / NULL on failure
9986 static const struct ipr_chip_t
*
9987 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9991 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9992 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9993 ipr_chip
[i
].device
== dev_id
->device
)
9994 return &ipr_chip
[i
];
9999 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10000 * during probe time
10001 * @ioa_cfg: ioa config struct
10006 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
10008 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
10010 if (pci_channel_offline(pdev
)) {
10011 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
10012 !pci_channel_offline(pdev
),
10013 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
10014 pci_restore_state(pdev
);
10018 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
10020 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
10022 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
10023 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
10024 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
10025 ioa_cfg
->vectors_info
[vec_idx
].
10026 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
10030 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
,
10031 struct pci_dev
*pdev
)
10035 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
10036 rc
= request_irq(pci_irq_vector(pdev
, i
),
10039 ioa_cfg
->vectors_info
[i
].desc
,
10040 &ioa_cfg
->hrrq
[i
]);
10043 free_irq(pci_irq_vector(pdev
, i
),
10044 &ioa_cfg
->hrrq
[i
]);
10052 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10053 * @pdev: PCI device struct
10055 * Description: Simply set the msi_received flag to 1 indicating that
10056 * Message Signaled Interrupts are supported.
10059 * 0 on success / non-zero on failure
10061 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
10063 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
10064 unsigned long lock_flags
= 0;
10065 irqreturn_t rc
= IRQ_HANDLED
;
10067 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
10068 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10070 ioa_cfg
->msi_received
= 1;
10071 wake_up(&ioa_cfg
->msi_wait_q
);
10073 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10078 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10079 * @pdev: PCI device struct
10081 * Description: This routine sets up and initiates a test interrupt to determine
10082 * if the interrupt is received via the ipr_test_intr() service routine.
10083 * If the tests fails, the driver will fall back to LSI.
10086 * 0 on success / non-zero on failure
10088 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
10091 volatile u32 int_reg
;
10092 unsigned long lock_flags
= 0;
10093 int irq
= pci_irq_vector(pdev
, 0);
10097 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10098 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
10099 ioa_cfg
->msi_received
= 0;
10100 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10101 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
10102 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
10103 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10105 rc
= request_irq(irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
10107 dev_err(&pdev
->dev
, "Can not assign irq %d\n", irq
);
10109 } else if (ipr_debug
)
10110 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", irq
);
10112 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
10113 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10114 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
10115 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10116 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10118 if (!ioa_cfg
->msi_received
) {
10119 /* MSI test failed */
10120 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
10122 } else if (ipr_debug
)
10123 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
10125 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10127 free_irq(irq
, ioa_cfg
);
10134 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10135 * @pdev: PCI device struct
10136 * @dev_id: PCI device id struct
10139 * 0 on success / non-zero on failure
10141 static int ipr_probe_ioa(struct pci_dev
*pdev
,
10142 const struct pci_device_id
*dev_id
)
10144 struct ipr_ioa_cfg
*ioa_cfg
;
10145 struct Scsi_Host
*host
;
10146 unsigned long ipr_regs_pci
;
10147 void __iomem
*ipr_regs
;
10148 int rc
= PCIBIOS_SUCCESSFUL
;
10149 volatile u32 mask
, uproc
, interrupts
;
10150 unsigned long lock_flags
, driver_lock_flags
;
10151 unsigned int irq_flag
;
10155 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
10156 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
10159 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
10164 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
10165 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
10166 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
10168 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
10170 if (!ioa_cfg
->ipr_chip
) {
10171 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
10172 dev_id
->vendor
, dev_id
->device
);
10173 goto out_scsi_host_put
;
10176 /* set SIS 32 or SIS 64 */
10177 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
10178 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
10179 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
10180 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
10182 if (ipr_transop_timeout
)
10183 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
10184 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
10185 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
10187 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
10189 ioa_cfg
->revid
= pdev
->revision
;
10191 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
10193 ipr_regs_pci
= pci_resource_start(pdev
, 0);
10195 rc
= pci_request_regions(pdev
, IPR_NAME
);
10197 dev_err(&pdev
->dev
,
10198 "Couldn't register memory range of registers\n");
10199 goto out_scsi_host_put
;
10202 rc
= pci_enable_device(pdev
);
10204 if (rc
|| pci_channel_offline(pdev
)) {
10205 if (pci_channel_offline(pdev
)) {
10206 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10207 rc
= pci_enable_device(pdev
);
10211 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
10212 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10213 goto out_release_regions
;
10217 ipr_regs
= pci_ioremap_bar(pdev
, 0);
10220 dev_err(&pdev
->dev
,
10221 "Couldn't map memory range of registers\n");
10226 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
10227 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
10228 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
10230 ipr_init_regs(ioa_cfg
);
10232 if (ioa_cfg
->sis64
) {
10233 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10235 dev_dbg(&pdev
->dev
, "Failed to set 64 bit DMA mask\n");
10236 rc
= dma_set_mask_and_coherent(&pdev
->dev
,
10240 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10243 dev_err(&pdev
->dev
, "Failed to set DMA mask\n");
10244 goto cleanup_nomem
;
10247 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
10248 ioa_cfg
->chip_cfg
->cache_line_size
);
10250 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10251 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
10252 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10254 goto cleanup_nomem
;
10257 /* Issue MMIO read to ensure card is not in EEH */
10258 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10259 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10261 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
10262 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
10263 IPR_MAX_MSIX_VECTORS
);
10264 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
10267 irq_flag
= PCI_IRQ_LEGACY
;
10268 if (ioa_cfg
->ipr_chip
->has_msi
)
10269 irq_flag
|= PCI_IRQ_MSI
| PCI_IRQ_MSIX
;
10270 rc
= pci_alloc_irq_vectors(pdev
, 1, ipr_number_of_msix
, irq_flag
);
10272 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10273 goto cleanup_nomem
;
10275 ioa_cfg
->nvectors
= rc
;
10277 if (!pdev
->msi_enabled
&& !pdev
->msix_enabled
)
10278 ioa_cfg
->clear_isr
= 1;
10280 pci_set_master(pdev
);
10282 if (pci_channel_offline(pdev
)) {
10283 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10284 pci_set_master(pdev
);
10285 if (pci_channel_offline(pdev
)) {
10287 goto out_msi_disable
;
10291 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10292 rc
= ipr_test_msi(ioa_cfg
, pdev
);
10295 dev_info(&pdev
->dev
,
10296 "Request for %d MSI%ss succeeded.", ioa_cfg
->nvectors
,
10297 pdev
->msix_enabled
? "-X" : "");
10300 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10301 pci_free_irq_vectors(pdev
);
10303 ioa_cfg
->nvectors
= 1;
10304 ioa_cfg
->clear_isr
= 1;
10307 goto out_msi_disable
;
10311 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
10312 (unsigned int)num_online_cpus(),
10313 (unsigned int)IPR_MAX_HRRQ_NUM
);
10315 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
10316 goto out_msi_disable
;
10318 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
10319 goto out_msi_disable
;
10321 rc
= ipr_alloc_mem(ioa_cfg
);
10323 dev_err(&pdev
->dev
,
10324 "Couldn't allocate enough memory for device driver!\n");
10325 goto out_msi_disable
;
10328 /* Save away PCI config space for use following IOA reset */
10329 rc
= pci_save_state(pdev
);
10331 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10332 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
10334 goto cleanup_nolog
;
10338 * If HRRQ updated interrupt is not masked, or reset alert is set,
10339 * the card is in an unknown state and needs a hard reset
10341 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
10342 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
10343 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
10344 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
10345 ioa_cfg
->needs_hard_reset
= 1;
10346 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
10347 ioa_cfg
->needs_hard_reset
= 1;
10348 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
10349 ioa_cfg
->ioa_unit_checked
= 1;
10351 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10352 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10353 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10355 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10356 name_msi_vectors(ioa_cfg
);
10357 rc
= request_irq(pci_irq_vector(pdev
, 0), ipr_isr
, 0,
10358 ioa_cfg
->vectors_info
[0].desc
,
10359 &ioa_cfg
->hrrq
[0]);
10361 rc
= ipr_request_other_msi_irqs(ioa_cfg
, pdev
);
10363 rc
= request_irq(pdev
->irq
, ipr_isr
,
10365 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
10368 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
10370 goto cleanup_nolog
;
10373 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
10374 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
10375 ioa_cfg
->needs_warm_reset
= 1;
10376 ioa_cfg
->reset
= ipr_reset_slot_reset
;
10378 ioa_cfg
->reset_work_q
= alloc_ordered_workqueue("ipr_reset_%d",
10379 WQ_MEM_RECLAIM
, host
->host_no
);
10381 if (!ioa_cfg
->reset_work_q
) {
10382 dev_err(&pdev
->dev
, "Couldn't register reset workqueue\n");
10387 ioa_cfg
->reset
= ipr_reset_start_bist
;
10389 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10390 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
10391 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10398 ipr_free_irqs(ioa_cfg
);
10400 ipr_free_mem(ioa_cfg
);
10402 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10403 pci_free_irq_vectors(pdev
);
10407 pci_disable_device(pdev
);
10408 out_release_regions
:
10409 pci_release_regions(pdev
);
10411 scsi_host_put(host
);
10416 * ipr_initiate_ioa_bringdown - Bring down an adapter
10417 * @ioa_cfg: ioa config struct
10418 * @shutdown_type: shutdown type
10420 * Description: This function will initiate bringing down the adapter.
10421 * This consists of issuing an IOA shutdown to the adapter
10422 * to flush the cache, and running BIST.
10423 * If the caller needs to wait on the completion of the reset,
10424 * the caller must sleep on the reset_wait_q.
10429 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
10430 enum ipr_shutdown_type shutdown_type
)
10433 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
10434 ioa_cfg
->sdt_state
= ABORT_DUMP
;
10435 ioa_cfg
->reset_retries
= 0;
10436 ioa_cfg
->in_ioa_bringdown
= 1;
10437 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
10442 * __ipr_remove - Remove a single adapter
10443 * @pdev: pci device struct
10445 * Adapter hot plug remove entry point.
10450 static void __ipr_remove(struct pci_dev
*pdev
)
10452 unsigned long host_lock_flags
= 0;
10453 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10455 unsigned long driver_lock_flags
;
10458 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10459 while (ioa_cfg
->in_reset_reload
) {
10460 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10461 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10462 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10465 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
10466 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
10467 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
10468 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
10471 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10473 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10474 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10475 flush_work(&ioa_cfg
->work_q
);
10476 if (ioa_cfg
->reset_work_q
)
10477 flush_workqueue(ioa_cfg
->reset_work_q
);
10478 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
10479 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10481 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10482 list_del(&ioa_cfg
->queue
);
10483 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10485 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
10486 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
10487 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10489 ipr_free_all_resources(ioa_cfg
);
10495 * ipr_remove - IOA hot plug remove entry point
10496 * @pdev: pci device struct
10498 * Adapter hot plug remove entry point.
10503 static void ipr_remove(struct pci_dev
*pdev
)
10505 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10509 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10511 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10513 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10514 &ipr_ioa_async_err_log
);
10515 scsi_remove_host(ioa_cfg
->host
);
10517 __ipr_remove(pdev
);
10523 * ipr_probe - Adapter hot plug add entry point
10526 * 0 on success / non-zero on failure
10528 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
10530 struct ipr_ioa_cfg
*ioa_cfg
;
10531 unsigned long flags
;
10534 rc
= ipr_probe_ioa(pdev
, dev_id
);
10539 ioa_cfg
= pci_get_drvdata(pdev
);
10540 rc
= ipr_probe_ioa_part2(ioa_cfg
);
10543 __ipr_remove(pdev
);
10547 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
10550 __ipr_remove(pdev
);
10554 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10558 scsi_remove_host(ioa_cfg
->host
);
10559 __ipr_remove(pdev
);
10563 rc
= sysfs_create_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10564 &ipr_ioa_async_err_log
);
10567 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10569 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10571 scsi_remove_host(ioa_cfg
->host
);
10572 __ipr_remove(pdev
);
10576 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10580 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10581 &ipr_ioa_async_err_log
);
10582 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10584 scsi_remove_host(ioa_cfg
->host
);
10585 __ipr_remove(pdev
);
10588 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10589 ioa_cfg
->scan_enabled
= 1;
10590 schedule_work(&ioa_cfg
->work_q
);
10591 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10593 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
10595 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10596 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
10597 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
10598 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
10602 scsi_scan_host(ioa_cfg
->host
);
10608 * ipr_shutdown - Shutdown handler.
10609 * @pdev: pci device struct
10611 * This function is invoked upon system shutdown/reboot. It will issue
10612 * an adapter shutdown to the adapter to flush the write cache.
10617 static void ipr_shutdown(struct pci_dev
*pdev
)
10619 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10620 unsigned long lock_flags
= 0;
10621 enum ipr_shutdown_type shutdown_type
= IPR_SHUTDOWN_NORMAL
;
10624 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10625 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10626 ioa_cfg
->iopoll_weight
= 0;
10627 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10628 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10631 while (ioa_cfg
->in_reset_reload
) {
10632 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10633 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10634 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10637 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
)
10638 shutdown_type
= IPR_SHUTDOWN_QUIESCE
;
10640 ipr_initiate_ioa_bringdown(ioa_cfg
, shutdown_type
);
10641 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10642 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10643 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
) {
10644 ipr_free_irqs(ioa_cfg
);
10645 pci_disable_device(ioa_cfg
->pdev
);
10649 static struct pci_device_id ipr_pci_table
[] = {
10650 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10651 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10652 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10653 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10654 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10655 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10656 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10657 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10658 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10659 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10660 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10661 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10662 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10663 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10664 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10665 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10666 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10667 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10668 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10669 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10670 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10671 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10672 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10673 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10674 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10675 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10676 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10677 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10678 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10679 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10680 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10681 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10682 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10683 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10684 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10685 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10686 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10687 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10689 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10690 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10691 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10692 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10693 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10694 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10695 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10696 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10697 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10698 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10699 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10700 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10701 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10702 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10703 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10704 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10705 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10706 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10707 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10708 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10709 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10710 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10711 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10712 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10713 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10714 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10715 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10716 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10717 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10718 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10719 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10720 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10721 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10722 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10723 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10724 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10725 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10726 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10727 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10728 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10729 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10730 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10731 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10732 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10733 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10734 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10735 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10736 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10737 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10738 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10740 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10741 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10742 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10744 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10746 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10747 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10748 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10749 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10750 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10752 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10754 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10756 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580A
, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10758 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580B
, 0, 0, 0 },
10761 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10763 static const struct pci_error_handlers ipr_err_handler
= {
10764 .error_detected
= ipr_pci_error_detected
,
10765 .mmio_enabled
= ipr_pci_mmio_enabled
,
10766 .slot_reset
= ipr_pci_slot_reset
,
10769 static struct pci_driver ipr_driver
= {
10771 .id_table
= ipr_pci_table
,
10772 .probe
= ipr_probe
,
10773 .remove
= ipr_remove
,
10774 .shutdown
= ipr_shutdown
,
10775 .err_handler
= &ipr_err_handler
,
10779 * ipr_halt_done - Shutdown prepare completion
10784 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10786 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10790 * ipr_halt - Issue shutdown prepare to all adapters
10793 * NOTIFY_OK on success / NOTIFY_DONE on failure
10795 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10797 struct ipr_cmnd
*ipr_cmd
;
10798 struct ipr_ioa_cfg
*ioa_cfg
;
10799 unsigned long flags
= 0, driver_lock_flags
;
10801 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10802 return NOTIFY_DONE
;
10804 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10806 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10807 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10808 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
10809 (ipr_fast_reboot
&& event
== SYS_RESTART
&& ioa_cfg
->sis64
)) {
10810 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10814 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10815 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10816 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10817 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10818 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10820 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10821 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10823 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10828 static struct notifier_block ipr_notifier
= {
10833 * ipr_init - Module entry point
10836 * 0 on success / negative value on failure
10838 static int __init
ipr_init(void)
10840 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10841 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10843 register_reboot_notifier(&ipr_notifier
);
10844 return pci_register_driver(&ipr_driver
);
10848 * ipr_exit - Module unload
10850 * Module unload entry point.
10855 static void __exit
ipr_exit(void)
10857 unregister_reboot_notifier(&ipr_notifier
);
10858 pci_unregister_driver(&ipr_driver
);
10861 module_init(ipr_init
);
10862 module_exit(ipr_exit
);