2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 2;
102 static unsigned int ipr_fast_reboot
;
103 static DEFINE_SPINLOCK(ipr_driver_lock
);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size
= 0x20,
114 .set_interrupt_mask_reg
= 0x0022C,
115 .clr_interrupt_mask_reg
= 0x00230,
116 .clr_interrupt_mask_reg32
= 0x00230,
117 .sense_interrupt_mask_reg
= 0x0022C,
118 .sense_interrupt_mask_reg32
= 0x0022C,
119 .clr_interrupt_reg
= 0x00228,
120 .clr_interrupt_reg32
= 0x00228,
121 .sense_interrupt_reg
= 0x00224,
122 .sense_interrupt_reg32
= 0x00224,
123 .ioarrin_reg
= 0x00404,
124 .sense_uproc_interrupt_reg
= 0x00214,
125 .sense_uproc_interrupt_reg32
= 0x00214,
126 .set_uproc_interrupt_reg
= 0x00214,
127 .set_uproc_interrupt_reg32
= 0x00214,
128 .clr_uproc_interrupt_reg
= 0x00218,
129 .clr_uproc_interrupt_reg32
= 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size
= 0x20,
139 .set_interrupt_mask_reg
= 0x00288,
140 .clr_interrupt_mask_reg
= 0x0028C,
141 .clr_interrupt_mask_reg32
= 0x0028C,
142 .sense_interrupt_mask_reg
= 0x00288,
143 .sense_interrupt_mask_reg32
= 0x00288,
144 .clr_interrupt_reg
= 0x00284,
145 .clr_interrupt_reg32
= 0x00284,
146 .sense_interrupt_reg
= 0x00280,
147 .sense_interrupt_reg32
= 0x00280,
148 .ioarrin_reg
= 0x00504,
149 .sense_uproc_interrupt_reg
= 0x00290,
150 .sense_uproc_interrupt_reg32
= 0x00290,
151 .set_uproc_interrupt_reg
= 0x00290,
152 .set_uproc_interrupt_reg32
= 0x00290,
153 .clr_uproc_interrupt_reg
= 0x00294,
154 .clr_uproc_interrupt_reg32
= 0x00294
160 .cache_line_size
= 0x20,
164 .set_interrupt_mask_reg
= 0x00010,
165 .clr_interrupt_mask_reg
= 0x00018,
166 .clr_interrupt_mask_reg32
= 0x0001C,
167 .sense_interrupt_mask_reg
= 0x00010,
168 .sense_interrupt_mask_reg32
= 0x00014,
169 .clr_interrupt_reg
= 0x00008,
170 .clr_interrupt_reg32
= 0x0000C,
171 .sense_interrupt_reg
= 0x00000,
172 .sense_interrupt_reg32
= 0x00004,
173 .ioarrin_reg
= 0x00070,
174 .sense_uproc_interrupt_reg
= 0x00020,
175 .sense_uproc_interrupt_reg32
= 0x00024,
176 .set_uproc_interrupt_reg
= 0x00020,
177 .set_uproc_interrupt_reg32
= 0x00024,
178 .clr_uproc_interrupt_reg
= 0x00028,
179 .clr_uproc_interrupt_reg32
= 0x0002C,
180 .init_feedback_reg
= 0x0005C,
181 .dump_addr_reg
= 0x00064,
182 .dump_data_reg
= 0x00068,
183 .endian_swap_reg
= 0x00084
188 static const struct ipr_chip_t ipr_chip
[] = {
189 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, IPR_USE_MSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
197 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
200 static int ipr_max_bus_speeds
[] = {
201 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
207 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208 module_param_named(log_level
, ipr_log_level
, uint
, 0);
209 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
210 module_param_named(testmode
, ipr_testmode
, int, 0);
211 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
212 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
213 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
214 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
215 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
216 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
217 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
218 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
219 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
220 module_param_named(max_devs
, ipr_max_devs
, int, 0);
221 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
223 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
224 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
225 module_param_named(fast_reboot
, ipr_fast_reboot
, int, S_IRUGO
| S_IWUSR
);
226 MODULE_PARM_DESC(fast_reboot
, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(IPR_DRIVER_VERSION
);
230 /* A constant array of IOASCs/URCs/Error Messages */
232 struct ipr_error_table_t ipr_error_table
[] = {
233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
234 "8155: An unknown error was received"},
236 "Soft underlength error"},
238 "Command to be cancelled not found"},
240 "Qualified success"},
241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
242 "FFFE: Soft device bus error recovered by the IOA"},
243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
244 "4101: Soft device bus fabric error"},
245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
246 "FFFC: Logical block guard error recovered by the device"},
247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
248 "FFFC: Logical block reference tag error recovered by the device"},
249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
250 "4171: Recovered scatter list tag / sequence number error"},
251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
254 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
256 "FFFD: Recovered logical block reference tag error detected by the IOA"},
257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
258 "FFFD: Logical block guard error recovered by the IOA"},
259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
260 "FFF9: Device sector reassign successful"},
261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
262 "FFF7: Media error recovered by device rewrite procedures"},
263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
264 "7001: IOA sector reassignment successful"},
265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
266 "FFF9: Soft media error. Sector reassignment recommended"},
267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
268 "FFF7: Media error recovered by IOA rewrite procedures"},
269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
270 "FF3D: Soft PCI bus error recovered by the IOA"},
271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
272 "FFF6: Device hardware error recovered by the IOA"},
273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
274 "FFF6: Device hardware error recovered by the device"},
275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
276 "FF3D: Soft IOA error recovered by the IOA"},
277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
278 "FFFA: Undefined device response recovered by the IOA"},
279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
280 "FFF6: Device bus error, message or command phase"},
281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
282 "FFFE: Task Management Function failed"},
283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
284 "FFF6: Failure prediction threshold exceeded"},
285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
286 "8009: Impending cache battery pack failure"},
288 "Logical Unit in process of becoming ready"},
290 "Initializing command required"},
292 "34FF: Disk device format in progress"},
294 "Logical unit not accessible, target port in unavailable state"},
295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
296 "9070: IOA requested reset"},
298 "Synchronization required"},
300 "IOA microcode download required"},
302 "Device bus connection is prohibited by host"},
304 "No ready, IOA shutdown"},
306 "Not ready, IOA has been shutdown"},
307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
308 "3020: Storage subsystem configuration error"},
310 "FFF5: Medium error, data unreadable, recommend reassign"},
312 "7000: Medium error, data unreadable, do not reassign"},
313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
314 "FFF3: Disk media format bad"},
315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
316 "3002: Addressed device failed to respond to selection"},
317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
318 "3100: Device bus error"},
319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
320 "3109: IOA timed out a device command"},
322 "3120: SCSI bus is not operational"},
323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
324 "4100: Hard device bus fabric error"},
325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
326 "310C: Logical block guard error detected by the device"},
327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
328 "310C: Logical block reference tag error detected by the device"},
329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
330 "4170: Scatter list tag / sequence number error"},
331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
332 "8150: Logical block CRC error on IOA to Host transfer"},
333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
334 "4170: Logical block sequence number error on IOA to Host transfer"},
335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
336 "310D: Logical block reference tag error detected by the IOA"},
337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
338 "310D: Logical block guard error detected by the IOA"},
339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
340 "9000: IOA reserved area data check"},
341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
342 "9001: IOA reserved area invalid data pattern"},
343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
344 "9002: IOA reserved area LRC error"},
345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
346 "Hardware Error, IOA metadata access error"},
347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
348 "102E: Out of alternate sectors for disk storage"},
349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
350 "FFF4: Data transfer underlength error"},
351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
352 "FFF4: Data transfer overlength error"},
353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
354 "3400: Logical unit failure"},
355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
356 "FFF4: Device microcode is corrupt"},
357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
358 "8150: PCI bus error"},
360 "Unsupported device bus message received"},
361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
362 "FFF4: Disk device problem"},
363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
364 "8150: Permanent IOA failure"},
365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
366 "3010: Disk device returned wrong response to IOA"},
367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
368 "8151: IOA microcode error"},
370 "Device bus status error"},
371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
372 "8157: IOA error requiring IOA reset to recover"},
374 "ATA device status error"},
376 "Message reject received from the device"},
377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
378 "8008: A permanent cache battery pack failure occurred"},
379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
380 "9090: Disk unit has been modified after the last known status"},
381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
382 "9081: IOA detected device error"},
383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
384 "9082: IOA detected device error"},
385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
386 "3110: Device bus error, message or command phase"},
387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
388 "3110: SAS Command / Task Management Function failed"},
389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
390 "9091: Incorrect hardware configuration change has been detected"},
391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
392 "9073: Invalid multi-adapter configuration"},
393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
394 "4010: Incorrect connection between cascaded expanders"},
395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
396 "4020: Connections exceed IOA design limits"},
397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
398 "4030: Incorrect multipath connection"},
399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
400 "4110: Unsupported enclosure function"},
401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
402 "4120: SAS cable VPD cannot be read"},
403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
404 "FFF4: Command to logical unit failed"},
406 "Illegal request, invalid request type or request packet"},
408 "Illegal request, invalid resource handle"},
410 "Illegal request, commands not allowed to this device"},
412 "Illegal request, command not allowed to a secondary adapter"},
414 "Illegal request, command not allowed to a non-optimized resource"},
416 "Illegal request, invalid field in parameter list"},
418 "Illegal request, parameter not supported"},
420 "Illegal request, parameter value invalid"},
422 "Illegal request, command sequence error"},
424 "Illegal request, dual adapter support not enabled"},
426 "Illegal request, another cable connector was physically disabled"},
428 "Illegal request, inconsistent group id/group count"},
429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
430 "9031: Array protection temporarily suspended, protection resuming"},
431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
432 "9040: Array protection temporarily suspended, protection resuming"},
433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
434 "4080: IOA exceeded maximum operating temperature"},
435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
436 "4085: Service required"},
437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
438 "3140: Device bus not ready to ready transition"},
439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
440 "FFFB: SCSI bus was reset"},
442 "FFFE: SCSI bus transition to single ended"},
444 "FFFE: SCSI bus transition to LVD"},
445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
446 "FFFB: SCSI bus was reset by another initiator"},
447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
448 "3029: A device replacement has occurred"},
449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
450 "4102: Device bus fabric performance degradation"},
451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
452 "9051: IOA cache data exists for a missing or failed device"},
453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
456 "9025: Disk unit is not supported at its physical location"},
457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
458 "3020: IOA detected a SCSI bus configuration error"},
459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
460 "3150: SCSI bus configuration error"},
461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
462 "9074: Asymmetric advanced function disk configuration"},
463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
464 "4040: Incomplete multipath connection between IOA and enclosure"},
465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
466 "4041: Incomplete multipath connection between enclosure and device"},
467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
468 "9075: Incomplete multipath connection between IOA and remote IOA"},
469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
470 "9076: Configuration error, missing remote IOA"},
471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
472 "4050: Enclosure does not support a required multipath function"},
473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
474 "4121: Configuration error, required cable is missing"},
475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
476 "4122: Cable is not plugged into the correct location on remote IOA"},
477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
478 "4123: Configuration error, invalid cable vital product data"},
479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
480 "4124: Configuration error, both cable ends are plugged into the same IOA"},
481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
482 "4070: Logically bad block written on device"},
483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
484 "9041: Array protection temporarily suspended"},
485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
486 "9042: Corrupt array parity detected on specified device"},
487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
488 "9030: Array no longer protected due to missing or failed disk unit"},
489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
490 "9071: Link operational transition"},
491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
492 "9072: Link not operational transition"},
493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
494 "9032: Array exposed but still protected"},
495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL
+ 1,
496 "70DD: Device forced failed by disrupt device command"},
497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
498 "4061: Multipath redundancy level got better"},
499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
500 "4060: Multipath redundancy level got worse"},
501 {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL
,
502 "9083: Device raw mode enabled"},
503 {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL
,
504 "9084: Device raw mode disabled"},
506 "Failure due to other device"},
507 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
508 "9008: IOA does not support functions expected by devices"},
509 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
510 "9010: Cache data associated with attached devices cannot be found"},
511 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
512 "9011: Cache data belongs to devices other than those attached"},
513 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
514 "9020: Array missing 2 or more devices with only 1 device present"},
515 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
516 "9021: Array missing 2 or more devices with 2 or more devices present"},
517 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
518 "9022: Exposed array is missing a required device"},
519 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
520 "9023: Array member(s) not at required physical locations"},
521 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
522 "9024: Array not functional due to present hardware configuration"},
523 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
524 "9026: Array not functional due to present hardware configuration"},
525 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
526 "9027: Array is missing a device and parity is out of sync"},
527 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
528 "9028: Maximum number of arrays already exist"},
529 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
530 "9050: Required cache data cannot be located for a disk unit"},
531 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
532 "9052: Cache data exists for a device that has been modified"},
533 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
534 "9054: IOA resources not available due to previous problems"},
535 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
536 "9092: Disk unit requires initialization before use"},
537 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
538 "9029: Incorrect hardware configuration change has been detected"},
539 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
540 "9060: One or more disk pairs are missing from an array"},
541 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
542 "9061: One or more disks are missing from an array"},
543 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
544 "9062: One or more disks are missing from an array"},
545 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
546 "9063: Maximum number of functional arrays has been exceeded"},
548 "Data protect, other volume set problem"},
550 "Aborted command, invalid descriptor"},
552 "Target operating conditions have changed, dual adapter takeover"},
554 "Aborted command, medium removal prevented"},
556 "Command terminated by host"},
558 "Aborted command, command terminated by host"}
561 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
562 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
563 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
565 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
566 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
567 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
568 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
569 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
571 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578 * Function Prototypes
580 static int ipr_reset_alert(struct ipr_cmnd
*);
581 static void ipr_process_ccn(struct ipr_cmnd
*);
582 static void ipr_process_error(struct ipr_cmnd
*);
583 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
584 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
585 enum ipr_shutdown_type
);
587 #ifdef CONFIG_SCSI_IPR_TRACE
589 * ipr_trc_hook - Add a trace entry to the driver trace
590 * @ipr_cmd: ipr command struct
592 * @add_data: additional data
597 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
598 u8 type
, u32 add_data
)
600 struct ipr_trace_entry
*trace_entry
;
601 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
602 unsigned int trace_index
;
604 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
605 trace_entry
= &ioa_cfg
->trace
[trace_index
];
606 trace_entry
->time
= jiffies
;
607 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
608 trace_entry
->type
= type
;
609 if (ipr_cmd
->ioa_cfg
->sis64
)
610 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
612 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
613 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
614 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
615 trace_entry
->u
.add_data
= add_data
;
619 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
623 * ipr_lock_and_done - Acquire lock and complete command
624 * @ipr_cmd: ipr command struct
629 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
631 unsigned long lock_flags
;
632 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
634 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
635 ipr_cmd
->done(ipr_cmd
);
636 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
640 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
641 * @ipr_cmd: ipr command struct
646 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
648 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
649 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
650 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
651 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
654 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
655 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
656 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
657 ioarcb
->data_transfer_length
= 0;
658 ioarcb
->read_data_transfer_length
= 0;
659 ioarcb
->ioadl_len
= 0;
660 ioarcb
->read_ioadl_len
= 0;
662 if (ipr_cmd
->ioa_cfg
->sis64
) {
663 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
664 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
665 ioasa64
->u
.gata
.status
= 0;
667 ioarcb
->write_ioadl_addr
=
668 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
669 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
670 ioasa
->u
.gata
.status
= 0;
673 ioasa
->hdr
.ioasc
= 0;
674 ioasa
->hdr
.residual_data_len
= 0;
675 ipr_cmd
->scsi_cmd
= NULL
;
677 ipr_cmd
->sense_buffer
[0] = 0;
678 ipr_cmd
->dma_use_sg
= 0;
682 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
683 * @ipr_cmd: ipr command struct
688 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
689 void (*fast_done
) (struct ipr_cmnd
*))
691 ipr_reinit_ipr_cmnd(ipr_cmd
);
692 ipr_cmd
->u
.scratch
= 0;
693 ipr_cmd
->sibling
= NULL
;
694 ipr_cmd
->eh_comp
= NULL
;
695 ipr_cmd
->fast_done
= fast_done
;
696 init_timer(&ipr_cmd
->timer
);
700 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
701 * @ioa_cfg: ioa config struct
704 * pointer to ipr command struct
707 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
709 struct ipr_cmnd
*ipr_cmd
= NULL
;
711 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
712 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
713 struct ipr_cmnd
, queue
);
714 list_del(&ipr_cmd
->queue
);
722 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
723 * @ioa_cfg: ioa config struct
726 * pointer to ipr command struct
729 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
731 struct ipr_cmnd
*ipr_cmd
=
732 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
733 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
738 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
739 * @ioa_cfg: ioa config struct
740 * @clr_ints: interrupts to clear
742 * This function masks all interrupts on the adapter, then clears the
743 * interrupts specified in the mask
748 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
751 volatile u32 int_reg
;
754 /* Stop new interrupts */
755 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
756 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
757 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
758 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
762 /* Set interrupt mask to stop all new interrupts */
764 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
766 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
768 /* Clear any pending interrupts */
770 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
771 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
772 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
776 * ipr_save_pcix_cmd_reg - Save PCI-X command register
777 * @ioa_cfg: ioa config struct
780 * 0 on success / -EIO on failure
782 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
784 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
786 if (pcix_cmd_reg
== 0)
789 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
790 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
791 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
795 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
800 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
801 * @ioa_cfg: ioa config struct
804 * 0 on success / -EIO on failure
806 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
808 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
811 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
812 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
813 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
822 * ipr_sata_eh_done - done function for aborted SATA commands
823 * @ipr_cmd: ipr command struct
825 * This function is invoked for ops generated to SATA
826 * devices which are being aborted.
831 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
833 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
834 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
836 qc
->err_mask
|= AC_ERR_OTHER
;
837 sata_port
->ioasa
.status
|= ATA_BUSY
;
839 if (ipr_cmd
->eh_comp
)
840 complete(ipr_cmd
->eh_comp
);
841 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
845 * ipr_scsi_eh_done - mid-layer done function for aborted ops
846 * @ipr_cmd: ipr command struct
848 * This function is invoked by the interrupt handler for
849 * ops generated by the SCSI mid-layer which are being aborted.
854 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
856 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
858 scsi_cmd
->result
|= (DID_ERROR
<< 16);
860 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
861 scsi_cmd
->scsi_done(scsi_cmd
);
862 if (ipr_cmd
->eh_comp
)
863 complete(ipr_cmd
->eh_comp
);
864 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
868 * ipr_fail_all_ops - Fails all outstanding ops.
869 * @ioa_cfg: ioa config struct
871 * This function fails all outstanding ops.
876 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
878 struct ipr_cmnd
*ipr_cmd
, *temp
;
879 struct ipr_hrr_queue
*hrrq
;
882 for_each_hrrq(hrrq
, ioa_cfg
) {
883 spin_lock(&hrrq
->_lock
);
884 list_for_each_entry_safe(ipr_cmd
,
885 temp
, &hrrq
->hrrq_pending_q
, queue
) {
886 list_del(&ipr_cmd
->queue
);
888 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
889 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
890 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
891 cpu_to_be32(IPR_DRIVER_ILID
);
893 if (ipr_cmd
->scsi_cmd
)
894 ipr_cmd
->done
= ipr_scsi_eh_done
;
895 else if (ipr_cmd
->qc
)
896 ipr_cmd
->done
= ipr_sata_eh_done
;
898 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
899 IPR_IOASC_IOA_WAS_RESET
);
900 del_timer(&ipr_cmd
->timer
);
901 ipr_cmd
->done(ipr_cmd
);
903 spin_unlock(&hrrq
->_lock
);
909 * ipr_send_command - Send driver initiated requests.
910 * @ipr_cmd: ipr command struct
912 * This function sends a command to the adapter using the correct write call.
913 * In the case of sis64, calculate the ioarcb size required. Then or in the
919 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
921 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
922 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
924 if (ioa_cfg
->sis64
) {
925 /* The default size is 256 bytes */
926 send_dma_addr
|= 0x1;
928 /* If the number of ioadls * size of ioadl > 128 bytes,
929 then use a 512 byte ioarcb */
930 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
931 send_dma_addr
|= 0x4;
932 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
934 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
938 * ipr_do_req - Send driver initiated requests.
939 * @ipr_cmd: ipr command struct
940 * @done: done function
941 * @timeout_func: timeout function
942 * @timeout: timeout value
944 * This function sends the specified command to the adapter with the
945 * timeout given. The done function is invoked on command completion.
950 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
951 void (*done
) (struct ipr_cmnd
*),
952 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
954 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
956 ipr_cmd
->done
= done
;
958 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
959 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
960 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
962 add_timer(&ipr_cmd
->timer
);
964 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
966 ipr_send_command(ipr_cmd
);
970 * ipr_internal_cmd_done - Op done function for an internally generated op.
971 * @ipr_cmd: ipr command struct
973 * This function is the op done function for an internally generated,
974 * blocking op. It simply wakes the sleeping thread.
979 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
981 if (ipr_cmd
->sibling
)
982 ipr_cmd
->sibling
= NULL
;
984 complete(&ipr_cmd
->completion
);
988 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
989 * @ipr_cmd: ipr command struct
990 * @dma_addr: dma address
991 * @len: transfer length
992 * @flags: ioadl flag value
994 * This function initializes an ioadl in the case where there is only a single
1000 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
1003 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
1004 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
1006 ipr_cmd
->dma_use_sg
= 1;
1008 if (ipr_cmd
->ioa_cfg
->sis64
) {
1009 ioadl64
->flags
= cpu_to_be32(flags
);
1010 ioadl64
->data_len
= cpu_to_be32(len
);
1011 ioadl64
->address
= cpu_to_be64(dma_addr
);
1013 ipr_cmd
->ioarcb
.ioadl_len
=
1014 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1015 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1017 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1018 ioadl
->address
= cpu_to_be32(dma_addr
);
1020 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1021 ipr_cmd
->ioarcb
.read_ioadl_len
=
1022 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1023 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1025 ipr_cmd
->ioarcb
.ioadl_len
=
1026 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1027 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1033 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1034 * @ipr_cmd: ipr command struct
1035 * @timeout_func: function to invoke if command times out
1041 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1042 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
1045 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1047 init_completion(&ipr_cmd
->completion
);
1048 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1050 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1051 wait_for_completion(&ipr_cmd
->completion
);
1052 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1055 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1059 if (ioa_cfg
->hrrq_num
== 1)
1062 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1063 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1069 * ipr_send_hcam - Send an HCAM to the adapter.
1070 * @ioa_cfg: ioa config struct
1072 * @hostrcb: hostrcb struct
1074 * This function will send a Host Controlled Async command to the adapter.
1075 * If HCAMs are currently not allowed to be issued to the adapter, it will
1076 * place the hostrcb on the free queue.
1081 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1082 struct ipr_hostrcb
*hostrcb
)
1084 struct ipr_cmnd
*ipr_cmd
;
1085 struct ipr_ioarcb
*ioarcb
;
1087 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1088 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1089 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1090 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1092 ipr_cmd
->u
.hostrcb
= hostrcb
;
1093 ioarcb
= &ipr_cmd
->ioarcb
;
1095 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1096 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1097 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1098 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1099 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1100 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1102 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1103 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1105 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1106 ipr_cmd
->done
= ipr_process_ccn
;
1108 ipr_cmd
->done
= ipr_process_error
;
1110 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1112 ipr_send_command(ipr_cmd
);
1114 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1119 * ipr_update_ata_class - Update the ata class in the resource entry
1120 * @res: resource entry struct
1121 * @proto: cfgte device bus protocol value
1126 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1129 case IPR_PROTO_SATA
:
1130 case IPR_PROTO_SAS_STP
:
1131 res
->ata_class
= ATA_DEV_ATA
;
1133 case IPR_PROTO_SATA_ATAPI
:
1134 case IPR_PROTO_SAS_STP_ATAPI
:
1135 res
->ata_class
= ATA_DEV_ATAPI
;
1138 res
->ata_class
= ATA_DEV_UNKNOWN
;
1144 * ipr_init_res_entry - Initialize a resource entry struct.
1145 * @res: resource entry struct
1146 * @cfgtew: config table entry wrapper struct
1151 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1152 struct ipr_config_table_entry_wrapper
*cfgtew
)
1156 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1157 struct ipr_resource_entry
*gscsi_res
= NULL
;
1159 res
->needs_sync_complete
= 0;
1162 res
->del_from_ml
= 0;
1163 res
->resetting_device
= 0;
1164 res
->reset_occurred
= 0;
1166 res
->sata_port
= NULL
;
1168 if (ioa_cfg
->sis64
) {
1169 proto
= cfgtew
->u
.cfgte64
->proto
;
1170 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1171 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1172 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1173 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1175 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1176 sizeof(res
->res_path
));
1179 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1180 sizeof(res
->dev_lun
.scsi_lun
));
1181 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1183 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1184 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1185 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1187 res
->target
= gscsi_res
->target
;
1192 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1193 ioa_cfg
->max_devs_supported
);
1194 set_bit(res
->target
, ioa_cfg
->target_ids
);
1196 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1197 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1199 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1200 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1201 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1202 ioa_cfg
->max_devs_supported
);
1203 set_bit(res
->target
, ioa_cfg
->array_ids
);
1204 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1205 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1206 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1207 ioa_cfg
->max_devs_supported
);
1208 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1210 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1211 ioa_cfg
->max_devs_supported
);
1212 set_bit(res
->target
, ioa_cfg
->target_ids
);
1215 proto
= cfgtew
->u
.cfgte
->proto
;
1216 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1217 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1218 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1219 res
->type
= IPR_RES_TYPE_IOAFP
;
1221 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1223 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1224 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1225 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1226 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1229 ipr_update_ata_class(res
, proto
);
1233 * ipr_is_same_device - Determine if two devices are the same.
1234 * @res: resource entry struct
1235 * @cfgtew: config table entry wrapper struct
1238 * 1 if the devices are the same / 0 otherwise
1240 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1241 struct ipr_config_table_entry_wrapper
*cfgtew
)
1243 if (res
->ioa_cfg
->sis64
) {
1244 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1245 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1246 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1247 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1251 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1252 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1253 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1261 * __ipr_format_res_path - Format the resource path for printing.
1262 * @res_path: resource path
1264 * @len: length of buffer provided
1269 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1275 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1276 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1277 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1283 * ipr_format_res_path - Format the resource path for printing.
1284 * @ioa_cfg: ioa config struct
1285 * @res_path: resource path
1287 * @len: length of buffer provided
1292 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1293 u8
*res_path
, char *buffer
, int len
)
1298 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1299 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1304 * ipr_update_res_entry - Update the resource entry.
1305 * @res: resource entry struct
1306 * @cfgtew: config table entry wrapper struct
1311 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1312 struct ipr_config_table_entry_wrapper
*cfgtew
)
1314 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1318 if (res
->ioa_cfg
->sis64
) {
1319 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1320 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1321 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1323 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1324 sizeof(struct ipr_std_inq_data
));
1326 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1327 proto
= cfgtew
->u
.cfgte64
->proto
;
1328 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1329 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1331 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1332 sizeof(res
->dev_lun
.scsi_lun
));
1334 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1335 sizeof(res
->res_path
))) {
1336 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1337 sizeof(res
->res_path
));
1341 if (res
->sdev
&& new_path
)
1342 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1343 ipr_format_res_path(res
->ioa_cfg
,
1344 res
->res_path
, buffer
, sizeof(buffer
)));
1346 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1347 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1348 res
->type
= IPR_RES_TYPE_IOAFP
;
1350 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1352 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1353 sizeof(struct ipr_std_inq_data
));
1355 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1356 proto
= cfgtew
->u
.cfgte
->proto
;
1357 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1360 ipr_update_ata_class(res
, proto
);
1364 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1366 * @res: resource entry struct
1367 * @cfgtew: config table entry wrapper struct
1372 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1374 struct ipr_resource_entry
*gscsi_res
= NULL
;
1375 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1377 if (!ioa_cfg
->sis64
)
1380 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1381 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1382 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1383 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1384 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1385 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1386 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1388 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1390 } else if (res
->bus
== 0)
1391 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1395 * ipr_handle_config_change - Handle a config change from the adapter
1396 * @ioa_cfg: ioa config struct
1402 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1403 struct ipr_hostrcb
*hostrcb
)
1405 struct ipr_resource_entry
*res
= NULL
;
1406 struct ipr_config_table_entry_wrapper cfgtew
;
1407 __be32 cc_res_handle
;
1411 if (ioa_cfg
->sis64
) {
1412 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1413 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1415 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1416 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1419 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1420 if (res
->res_handle
== cc_res_handle
) {
1427 if (list_empty(&ioa_cfg
->free_res_q
)) {
1428 ipr_send_hcam(ioa_cfg
,
1429 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1434 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1435 struct ipr_resource_entry
, queue
);
1437 list_del(&res
->queue
);
1438 ipr_init_res_entry(res
, &cfgtew
);
1439 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1442 ipr_update_res_entry(res
, &cfgtew
);
1444 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1446 res
->del_from_ml
= 1;
1447 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1448 schedule_work(&ioa_cfg
->work_q
);
1450 ipr_clear_res_target(res
);
1451 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1453 } else if (!res
->sdev
|| res
->del_from_ml
) {
1455 schedule_work(&ioa_cfg
->work_q
);
1458 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1462 * ipr_process_ccn - Op done function for a CCN.
1463 * @ipr_cmd: ipr command struct
1465 * This function is the op done function for a configuration
1466 * change notification host controlled async from the adapter.
1471 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1473 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1474 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1475 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1477 list_del(&hostrcb
->queue
);
1478 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1481 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
1482 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
)
1483 dev_err(&ioa_cfg
->pdev
->dev
,
1484 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1486 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1488 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1493 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1494 * @i: index into buffer
1495 * @buf: string to modify
1497 * This function will strip all trailing whitespace, pad the end
1498 * of the string with a single space, and NULL terminate the string.
1501 * new length of string
1503 static int strip_and_pad_whitespace(int i
, char *buf
)
1505 while (i
&& buf
[i
] == ' ')
1513 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1514 * @prefix: string to print at start of printk
1515 * @hostrcb: hostrcb pointer
1516 * @vpd: vendor/product id/sn struct
1521 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1522 struct ipr_vpd
*vpd
)
1524 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1527 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1528 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1530 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1531 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1533 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1534 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1536 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1540 * ipr_log_vpd - Log the passed VPD to the error log.
1541 * @vpd: vendor/product id/sn struct
1546 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1548 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1549 + IPR_SERIAL_NUM_LEN
];
1551 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1552 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1554 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1555 ipr_err("Vendor/Product ID: %s\n", buffer
);
1557 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1558 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1559 ipr_err(" Serial Number: %s\n", buffer
);
1563 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1564 * @prefix: string to print at start of printk
1565 * @hostrcb: hostrcb pointer
1566 * @vpd: vendor/product id/sn/wwn struct
1571 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1572 struct ipr_ext_vpd
*vpd
)
1574 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1575 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1576 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1580 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1581 * @vpd: vendor/product id/sn/wwn struct
1586 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1588 ipr_log_vpd(&vpd
->vpd
);
1589 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1590 be32_to_cpu(vpd
->wwid
[1]));
1594 * ipr_log_enhanced_cache_error - Log a cache error.
1595 * @ioa_cfg: ioa config struct
1596 * @hostrcb: hostrcb struct
1601 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1602 struct ipr_hostrcb
*hostrcb
)
1604 struct ipr_hostrcb_type_12_error
*error
;
1607 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1609 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1611 ipr_err("-----Current Configuration-----\n");
1612 ipr_err("Cache Directory Card Information:\n");
1613 ipr_log_ext_vpd(&error
->ioa_vpd
);
1614 ipr_err("Adapter Card Information:\n");
1615 ipr_log_ext_vpd(&error
->cfc_vpd
);
1617 ipr_err("-----Expected Configuration-----\n");
1618 ipr_err("Cache Directory Card Information:\n");
1619 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1620 ipr_err("Adapter Card Information:\n");
1621 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1623 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1624 be32_to_cpu(error
->ioa_data
[0]),
1625 be32_to_cpu(error
->ioa_data
[1]),
1626 be32_to_cpu(error
->ioa_data
[2]));
1630 * ipr_log_cache_error - Log a cache error.
1631 * @ioa_cfg: ioa config struct
1632 * @hostrcb: hostrcb struct
1637 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1638 struct ipr_hostrcb
*hostrcb
)
1640 struct ipr_hostrcb_type_02_error
*error
=
1641 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1643 ipr_err("-----Current Configuration-----\n");
1644 ipr_err("Cache Directory Card Information:\n");
1645 ipr_log_vpd(&error
->ioa_vpd
);
1646 ipr_err("Adapter Card Information:\n");
1647 ipr_log_vpd(&error
->cfc_vpd
);
1649 ipr_err("-----Expected Configuration-----\n");
1650 ipr_err("Cache Directory Card Information:\n");
1651 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1652 ipr_err("Adapter Card Information:\n");
1653 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1655 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1656 be32_to_cpu(error
->ioa_data
[0]),
1657 be32_to_cpu(error
->ioa_data
[1]),
1658 be32_to_cpu(error
->ioa_data
[2]));
1662 * ipr_log_enhanced_config_error - Log a configuration error.
1663 * @ioa_cfg: ioa config struct
1664 * @hostrcb: hostrcb struct
1669 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1670 struct ipr_hostrcb
*hostrcb
)
1672 int errors_logged
, i
;
1673 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1674 struct ipr_hostrcb_type_13_error
*error
;
1676 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1677 errors_logged
= be32_to_cpu(error
->errors_logged
);
1679 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1680 be32_to_cpu(error
->errors_detected
), errors_logged
);
1682 dev_entry
= error
->dev
;
1684 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1687 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1688 ipr_log_ext_vpd(&dev_entry
->vpd
);
1690 ipr_err("-----New Device Information-----\n");
1691 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1693 ipr_err("Cache Directory Card Information:\n");
1694 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1696 ipr_err("Adapter Card Information:\n");
1697 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1702 * ipr_log_sis64_config_error - Log a device error.
1703 * @ioa_cfg: ioa config struct
1704 * @hostrcb: hostrcb struct
1709 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1710 struct ipr_hostrcb
*hostrcb
)
1712 int errors_logged
, i
;
1713 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1714 struct ipr_hostrcb_type_23_error
*error
;
1715 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1717 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1718 errors_logged
= be32_to_cpu(error
->errors_logged
);
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error
->errors_detected
), errors_logged
);
1723 dev_entry
= error
->dev
;
1725 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1728 ipr_err("Device %d : %s", i
+ 1,
1729 __ipr_format_res_path(dev_entry
->res_path
,
1730 buffer
, sizeof(buffer
)));
1731 ipr_log_ext_vpd(&dev_entry
->vpd
);
1733 ipr_err("-----New Device Information-----\n");
1734 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1736 ipr_err("Cache Directory Card Information:\n");
1737 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1739 ipr_err("Adapter Card Information:\n");
1740 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1745 * ipr_log_config_error - Log a configuration error.
1746 * @ioa_cfg: ioa config struct
1747 * @hostrcb: hostrcb struct
1752 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1753 struct ipr_hostrcb
*hostrcb
)
1755 int errors_logged
, i
;
1756 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1757 struct ipr_hostrcb_type_03_error
*error
;
1759 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1760 errors_logged
= be32_to_cpu(error
->errors_logged
);
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error
->errors_detected
), errors_logged
);
1765 dev_entry
= error
->dev
;
1767 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1770 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1771 ipr_log_vpd(&dev_entry
->vpd
);
1773 ipr_err("-----New Device Information-----\n");
1774 ipr_log_vpd(&dev_entry
->new_vpd
);
1776 ipr_err("Cache Directory Card Information:\n");
1777 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1779 ipr_err("Adapter Card Information:\n");
1780 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1782 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1783 be32_to_cpu(dev_entry
->ioa_data
[0]),
1784 be32_to_cpu(dev_entry
->ioa_data
[1]),
1785 be32_to_cpu(dev_entry
->ioa_data
[2]),
1786 be32_to_cpu(dev_entry
->ioa_data
[3]),
1787 be32_to_cpu(dev_entry
->ioa_data
[4]));
1792 * ipr_log_enhanced_array_error - Log an array configuration error.
1793 * @ioa_cfg: ioa config struct
1794 * @hostrcb: hostrcb struct
1799 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1800 struct ipr_hostrcb
*hostrcb
)
1803 struct ipr_hostrcb_type_14_error
*error
;
1804 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1805 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1807 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1811 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1812 error
->protection_level
,
1813 ioa_cfg
->host
->host_no
,
1814 error
->last_func_vset_res_addr
.bus
,
1815 error
->last_func_vset_res_addr
.target
,
1816 error
->last_func_vset_res_addr
.lun
);
1820 array_entry
= error
->array_member
;
1821 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1822 ARRAY_SIZE(error
->array_member
));
1824 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1825 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1828 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1829 ipr_err("Exposed Array Member %d:\n", i
);
1831 ipr_err("Array Member %d:\n", i
);
1833 ipr_log_ext_vpd(&array_entry
->vpd
);
1834 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1835 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1836 "Expected Location");
1843 * ipr_log_array_error - Log an array configuration error.
1844 * @ioa_cfg: ioa config struct
1845 * @hostrcb: hostrcb struct
1850 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1851 struct ipr_hostrcb
*hostrcb
)
1854 struct ipr_hostrcb_type_04_error
*error
;
1855 struct ipr_hostrcb_array_data_entry
*array_entry
;
1856 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1858 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1862 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1863 error
->protection_level
,
1864 ioa_cfg
->host
->host_no
,
1865 error
->last_func_vset_res_addr
.bus
,
1866 error
->last_func_vset_res_addr
.target
,
1867 error
->last_func_vset_res_addr
.lun
);
1871 array_entry
= error
->array_member
;
1873 for (i
= 0; i
< 18; i
++) {
1874 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1877 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1878 ipr_err("Exposed Array Member %d:\n", i
);
1880 ipr_err("Array Member %d:\n", i
);
1882 ipr_log_vpd(&array_entry
->vpd
);
1884 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1885 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1886 "Expected Location");
1891 array_entry
= error
->array_member2
;
1898 * ipr_log_hex_data - Log additional hex IOA error data.
1899 * @ioa_cfg: ioa config struct
1900 * @data: IOA error data
1906 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, __be32
*data
, int len
)
1913 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1914 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1916 for (i
= 0; i
< len
/ 4; i
+= 4) {
1917 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1918 be32_to_cpu(data
[i
]),
1919 be32_to_cpu(data
[i
+1]),
1920 be32_to_cpu(data
[i
+2]),
1921 be32_to_cpu(data
[i
+3]));
1926 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1927 * @ioa_cfg: ioa config struct
1928 * @hostrcb: hostrcb struct
1933 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1934 struct ipr_hostrcb
*hostrcb
)
1936 struct ipr_hostrcb_type_17_error
*error
;
1939 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1941 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1943 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1944 strim(error
->failure_reason
);
1946 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1947 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1948 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1949 ipr_log_hex_data(ioa_cfg
, error
->data
,
1950 be32_to_cpu(hostrcb
->hcam
.length
) -
1951 (offsetof(struct ipr_hostrcb_error
, u
) +
1952 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1956 * ipr_log_dual_ioa_error - Log a dual adapter error.
1957 * @ioa_cfg: ioa config struct
1958 * @hostrcb: hostrcb struct
1963 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1964 struct ipr_hostrcb
*hostrcb
)
1966 struct ipr_hostrcb_type_07_error
*error
;
1968 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1969 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1970 strim(error
->failure_reason
);
1972 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1973 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1974 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1975 ipr_log_hex_data(ioa_cfg
, error
->data
,
1976 be32_to_cpu(hostrcb
->hcam
.length
) -
1977 (offsetof(struct ipr_hostrcb_error
, u
) +
1978 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1981 static const struct {
1984 } path_active_desc
[] = {
1985 { IPR_PATH_NO_INFO
, "Path" },
1986 { IPR_PATH_ACTIVE
, "Active path" },
1987 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
1990 static const struct {
1993 } path_state_desc
[] = {
1994 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
1995 { IPR_PATH_HEALTHY
, "is healthy" },
1996 { IPR_PATH_DEGRADED
, "is degraded" },
1997 { IPR_PATH_FAILED
, "is failed" }
2001 * ipr_log_fabric_path - Log a fabric path error
2002 * @hostrcb: hostrcb struct
2003 * @fabric: fabric descriptor
2008 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
2009 struct ipr_hostrcb_fabric_desc
*fabric
)
2012 u8 path_state
= fabric
->path_state
;
2013 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2014 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2016 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2017 if (path_active_desc
[i
].active
!= active
)
2020 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2021 if (path_state_desc
[j
].state
!= state
)
2024 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2025 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2026 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2028 } else if (fabric
->cascaded_expander
== 0xff) {
2029 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2030 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2031 fabric
->ioa_port
, fabric
->phy
);
2032 } else if (fabric
->phy
== 0xff) {
2033 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2034 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2035 fabric
->ioa_port
, fabric
->cascaded_expander
);
2037 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2038 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2039 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2045 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2046 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2050 * ipr_log64_fabric_path - Log a fabric path error
2051 * @hostrcb: hostrcb struct
2052 * @fabric: fabric descriptor
2057 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2058 struct ipr_hostrcb64_fabric_desc
*fabric
)
2061 u8 path_state
= fabric
->path_state
;
2062 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2063 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2064 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2066 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2067 if (path_active_desc
[i
].active
!= active
)
2070 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2071 if (path_state_desc
[j
].state
!= state
)
2074 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2075 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2076 ipr_format_res_path(hostrcb
->ioa_cfg
,
2078 buffer
, sizeof(buffer
)));
2083 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2084 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2085 buffer
, sizeof(buffer
)));
2088 static const struct {
2091 } path_type_desc
[] = {
2092 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2093 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2094 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2095 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2098 static const struct {
2101 } path_status_desc
[] = {
2102 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2103 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2104 { IPR_PATH_CFG_FAILED
, "Failed" },
2105 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2106 { IPR_PATH_NOT_DETECTED
, "Missing" },
2107 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2110 static const char *link_rate
[] = {
2113 "phy reset problem",
2130 * ipr_log_path_elem - Log a fabric path element.
2131 * @hostrcb: hostrcb struct
2132 * @cfg: fabric path element struct
2137 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2138 struct ipr_hostrcb_config_element
*cfg
)
2141 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2142 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2144 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2147 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2148 if (path_type_desc
[i
].type
!= type
)
2151 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2152 if (path_status_desc
[j
].status
!= status
)
2155 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2156 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2157 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2158 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2159 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2161 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2162 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2163 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2164 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2165 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2166 } else if (cfg
->cascaded_expander
== 0xff) {
2167 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2168 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2169 path_type_desc
[i
].desc
, cfg
->phy
,
2170 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2171 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2172 } else if (cfg
->phy
== 0xff) {
2173 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2174 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2175 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2176 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2177 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2179 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2180 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2181 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2182 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2183 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2190 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2191 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2192 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2193 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2197 * ipr_log64_path_elem - Log a fabric path element.
2198 * @hostrcb: hostrcb struct
2199 * @cfg: fabric path element struct
2204 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2205 struct ipr_hostrcb64_config_element
*cfg
)
2208 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2209 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2210 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2211 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2213 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2216 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2217 if (path_type_desc
[i
].type
!= type
)
2220 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2221 if (path_status_desc
[j
].status
!= status
)
2224 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2225 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2226 ipr_format_res_path(hostrcb
->ioa_cfg
,
2227 cfg
->res_path
, buffer
, sizeof(buffer
)),
2228 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2229 be32_to_cpu(cfg
->wwid
[0]),
2230 be32_to_cpu(cfg
->wwid
[1]));
2234 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2235 "WWN=%08X%08X\n", cfg
->type_status
,
2236 ipr_format_res_path(hostrcb
->ioa_cfg
,
2237 cfg
->res_path
, buffer
, sizeof(buffer
)),
2238 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2239 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2243 * ipr_log_fabric_error - Log a fabric error.
2244 * @ioa_cfg: ioa config struct
2245 * @hostrcb: hostrcb struct
2250 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2251 struct ipr_hostrcb
*hostrcb
)
2253 struct ipr_hostrcb_type_20_error
*error
;
2254 struct ipr_hostrcb_fabric_desc
*fabric
;
2255 struct ipr_hostrcb_config_element
*cfg
;
2258 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2259 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2260 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2262 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2263 (offsetof(struct ipr_hostrcb_error
, u
) +
2264 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2266 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2267 ipr_log_fabric_path(hostrcb
, fabric
);
2268 for_each_fabric_cfg(fabric
, cfg
)
2269 ipr_log_path_elem(hostrcb
, cfg
);
2271 add_len
-= be16_to_cpu(fabric
->length
);
2272 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2273 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2276 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2280 * ipr_log_sis64_array_error - Log a sis64 array error.
2281 * @ioa_cfg: ioa config struct
2282 * @hostrcb: hostrcb struct
2287 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2288 struct ipr_hostrcb
*hostrcb
)
2291 struct ipr_hostrcb_type_24_error
*error
;
2292 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2293 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2294 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2296 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2300 ipr_err("RAID %s Array Configuration: %s\n",
2301 error
->protection_level
,
2302 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2303 buffer
, sizeof(buffer
)));
2307 array_entry
= error
->array_member
;
2308 num_entries
= min_t(u32
, error
->num_entries
,
2309 ARRAY_SIZE(error
->array_member
));
2311 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2313 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2316 if (error
->exposed_mode_adn
== i
)
2317 ipr_err("Exposed Array Member %d:\n", i
);
2319 ipr_err("Array Member %d:\n", i
);
2321 ipr_err("Array Member %d:\n", i
);
2322 ipr_log_ext_vpd(&array_entry
->vpd
);
2323 ipr_err("Current Location: %s\n",
2324 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2325 buffer
, sizeof(buffer
)));
2326 ipr_err("Expected Location: %s\n",
2327 ipr_format_res_path(ioa_cfg
,
2328 array_entry
->expected_res_path
,
2329 buffer
, sizeof(buffer
)));
2336 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2337 * @ioa_cfg: ioa config struct
2338 * @hostrcb: hostrcb struct
2343 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2344 struct ipr_hostrcb
*hostrcb
)
2346 struct ipr_hostrcb_type_30_error
*error
;
2347 struct ipr_hostrcb64_fabric_desc
*fabric
;
2348 struct ipr_hostrcb64_config_element
*cfg
;
2351 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2353 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2354 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2356 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2357 (offsetof(struct ipr_hostrcb64_error
, u
) +
2358 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2360 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2361 ipr_log64_fabric_path(hostrcb
, fabric
);
2362 for_each_fabric_cfg(fabric
, cfg
)
2363 ipr_log64_path_elem(hostrcb
, cfg
);
2365 add_len
-= be16_to_cpu(fabric
->length
);
2366 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2367 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2370 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2374 * ipr_log_generic_error - Log an adapter error.
2375 * @ioa_cfg: ioa config struct
2376 * @hostrcb: hostrcb struct
2381 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2382 struct ipr_hostrcb
*hostrcb
)
2384 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2385 be32_to_cpu(hostrcb
->hcam
.length
));
2389 * ipr_log_sis64_device_error - Log a cache error.
2390 * @ioa_cfg: ioa config struct
2391 * @hostrcb: hostrcb struct
2396 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2397 struct ipr_hostrcb
*hostrcb
)
2399 struct ipr_hostrcb_type_21_error
*error
;
2400 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2402 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2404 ipr_err("-----Failing Device Information-----\n");
2405 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2406 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2407 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2408 ipr_err("Device Resource Path: %s\n",
2409 __ipr_format_res_path(error
->res_path
,
2410 buffer
, sizeof(buffer
)));
2411 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2412 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2413 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2414 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2415 ipr_err("SCSI Sense Data:\n");
2416 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2417 ipr_err("SCSI Command Descriptor Block: \n");
2418 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2420 ipr_err("Additional IOA Data:\n");
2421 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2425 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2428 * This function will return the index of into the ipr_error_table
2429 * for the specified IOASC. If the IOASC is not in the table,
2430 * 0 will be returned, which points to the entry used for unknown errors.
2433 * index into the ipr_error_table
2435 static u32
ipr_get_error(u32 ioasc
)
2439 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2440 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2447 * ipr_handle_log_data - Log an adapter error.
2448 * @ioa_cfg: ioa config struct
2449 * @hostrcb: hostrcb struct
2451 * This function logs an adapter error to the system.
2456 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2457 struct ipr_hostrcb
*hostrcb
)
2461 struct ipr_hostrcb_type_21_error
*error
;
2463 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2466 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2467 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2470 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2472 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2474 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2475 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2476 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2477 scsi_report_bus_reset(ioa_cfg
->host
,
2478 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2481 error_index
= ipr_get_error(ioasc
);
2483 if (!ipr_error_table
[error_index
].log_hcam
)
2486 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2487 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2488 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2490 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2491 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2495 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2497 /* Set indication we have logged an error */
2498 ioa_cfg
->errors_logged
++;
2500 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2502 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2503 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2505 switch (hostrcb
->hcam
.overlay_id
) {
2506 case IPR_HOST_RCB_OVERLAY_ID_2
:
2507 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2509 case IPR_HOST_RCB_OVERLAY_ID_3
:
2510 ipr_log_config_error(ioa_cfg
, hostrcb
);
2512 case IPR_HOST_RCB_OVERLAY_ID_4
:
2513 case IPR_HOST_RCB_OVERLAY_ID_6
:
2514 ipr_log_array_error(ioa_cfg
, hostrcb
);
2516 case IPR_HOST_RCB_OVERLAY_ID_7
:
2517 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2519 case IPR_HOST_RCB_OVERLAY_ID_12
:
2520 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2522 case IPR_HOST_RCB_OVERLAY_ID_13
:
2523 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2525 case IPR_HOST_RCB_OVERLAY_ID_14
:
2526 case IPR_HOST_RCB_OVERLAY_ID_16
:
2527 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2529 case IPR_HOST_RCB_OVERLAY_ID_17
:
2530 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2532 case IPR_HOST_RCB_OVERLAY_ID_20
:
2533 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2535 case IPR_HOST_RCB_OVERLAY_ID_21
:
2536 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2538 case IPR_HOST_RCB_OVERLAY_ID_23
:
2539 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2541 case IPR_HOST_RCB_OVERLAY_ID_24
:
2542 case IPR_HOST_RCB_OVERLAY_ID_26
:
2543 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2545 case IPR_HOST_RCB_OVERLAY_ID_30
:
2546 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2548 case IPR_HOST_RCB_OVERLAY_ID_1
:
2549 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2551 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2557 * ipr_process_error - Op done function for an adapter error log.
2558 * @ipr_cmd: ipr command struct
2560 * This function is the op done function for an error log host
2561 * controlled async from the adapter. It will log the error and
2562 * send the HCAM back to the adapter.
2567 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2569 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2570 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2571 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2575 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2577 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2579 list_del(&hostrcb
->queue
);
2580 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2583 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2584 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2585 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2586 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
2587 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
) {
2588 dev_err(&ioa_cfg
->pdev
->dev
,
2589 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2592 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2596 * ipr_timeout - An internally generated op has timed out.
2597 * @ipr_cmd: ipr command struct
2599 * This function blocks host requests and initiates an
2605 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
2607 unsigned long lock_flags
= 0;
2608 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2611 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2613 ioa_cfg
->errors_logged
++;
2614 dev_err(&ioa_cfg
->pdev
->dev
,
2615 "Adapter being reset due to command timeout.\n");
2617 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2618 ioa_cfg
->sdt_state
= GET_DUMP
;
2620 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2621 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2623 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2628 * ipr_oper_timeout - Adapter timed out transitioning to operational
2629 * @ipr_cmd: ipr command struct
2631 * This function blocks host requests and initiates an
2637 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
2639 unsigned long lock_flags
= 0;
2640 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2643 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2645 ioa_cfg
->errors_logged
++;
2646 dev_err(&ioa_cfg
->pdev
->dev
,
2647 "Adapter timed out transitioning to operational.\n");
2649 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2650 ioa_cfg
->sdt_state
= GET_DUMP
;
2652 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2654 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2655 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2658 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2663 * ipr_find_ses_entry - Find matching SES in SES table
2664 * @res: resource entry struct of SES
2667 * pointer to SES table entry / NULL on failure
2669 static const struct ipr_ses_table_entry
*
2670 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2673 struct ipr_std_inq_vpids
*vpids
;
2674 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2676 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2677 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2678 if (ste
->compare_product_id_byte
[j
] == 'X') {
2679 vpids
= &res
->std_inq_data
.vpids
;
2680 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2688 if (matches
== IPR_PROD_ID_LEN
)
2696 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2697 * @ioa_cfg: ioa config struct
2699 * @bus_width: bus width
2702 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2703 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2704 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2705 * max 160MHz = max 320MB/sec).
2707 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2709 struct ipr_resource_entry
*res
;
2710 const struct ipr_ses_table_entry
*ste
;
2711 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2713 /* Loop through each config table entry in the config table buffer */
2714 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2715 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2718 if (bus
!= res
->bus
)
2721 if (!(ste
= ipr_find_ses_entry(res
)))
2724 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2727 return max_xfer_rate
;
2731 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2732 * @ioa_cfg: ioa config struct
2733 * @max_delay: max delay in micro-seconds to wait
2735 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2738 * 0 on success / other on failure
2740 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2742 volatile u32 pcii_reg
;
2745 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2746 while (delay
< max_delay
) {
2747 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2749 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2752 /* udelay cannot be used if delay is more than a few milliseconds */
2753 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2754 mdelay(delay
/ 1000);
2764 * ipr_get_sis64_dump_data_section - Dump IOA memory
2765 * @ioa_cfg: ioa config struct
2766 * @start_addr: adapter address to dump
2767 * @dest: destination kernel buffer
2768 * @length_in_words: length to dump in 4 byte words
2773 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2775 __be32
*dest
, u32 length_in_words
)
2779 for (i
= 0; i
< length_in_words
; i
++) {
2780 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2781 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2789 * ipr_get_ldump_data_section - Dump IOA memory
2790 * @ioa_cfg: ioa config struct
2791 * @start_addr: adapter address to dump
2792 * @dest: destination kernel buffer
2793 * @length_in_words: length to dump in 4 byte words
2796 * 0 on success / -EIO on failure
2798 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2800 __be32
*dest
, u32 length_in_words
)
2802 volatile u32 temp_pcii_reg
;
2806 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2807 dest
, length_in_words
);
2809 /* Write IOA interrupt reg starting LDUMP state */
2810 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2811 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2813 /* Wait for IO debug acknowledge */
2814 if (ipr_wait_iodbg_ack(ioa_cfg
,
2815 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2816 dev_err(&ioa_cfg
->pdev
->dev
,
2817 "IOA dump long data transfer timeout\n");
2821 /* Signal LDUMP interlocked - clear IO debug ack */
2822 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2823 ioa_cfg
->regs
.clr_interrupt_reg
);
2825 /* Write Mailbox with starting address */
2826 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2828 /* Signal address valid - clear IOA Reset alert */
2829 writel(IPR_UPROCI_RESET_ALERT
,
2830 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2832 for (i
= 0; i
< length_in_words
; i
++) {
2833 /* Wait for IO debug acknowledge */
2834 if (ipr_wait_iodbg_ack(ioa_cfg
,
2835 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2836 dev_err(&ioa_cfg
->pdev
->dev
,
2837 "IOA dump short data transfer timeout\n");
2841 /* Read data from mailbox and increment destination pointer */
2842 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2845 /* For all but the last word of data, signal data received */
2846 if (i
< (length_in_words
- 1)) {
2847 /* Signal dump data received - Clear IO debug Ack */
2848 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2849 ioa_cfg
->regs
.clr_interrupt_reg
);
2853 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2854 writel(IPR_UPROCI_RESET_ALERT
,
2855 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2857 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2858 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2860 /* Signal dump data received - Clear IO debug Ack */
2861 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2862 ioa_cfg
->regs
.clr_interrupt_reg
);
2864 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2865 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2867 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2869 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2879 #ifdef CONFIG_SCSI_IPR_DUMP
2881 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2882 * @ioa_cfg: ioa config struct
2883 * @pci_address: adapter address
2884 * @length: length of data to copy
2886 * Copy data from PCI adapter to kernel buffer.
2887 * Note: length MUST be a 4 byte multiple
2889 * 0 on success / other on failure
2891 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2892 unsigned long pci_address
, u32 length
)
2894 int bytes_copied
= 0;
2895 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2897 unsigned long lock_flags
= 0;
2898 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2901 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2903 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2905 while (bytes_copied
< length
&&
2906 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2907 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2908 ioa_dump
->page_offset
== 0) {
2909 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2913 return bytes_copied
;
2916 ioa_dump
->page_offset
= 0;
2917 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2918 ioa_dump
->next_page_index
++;
2920 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2922 rem_len
= length
- bytes_copied
;
2923 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2924 cur_len
= min(rem_len
, rem_page_len
);
2926 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2927 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2930 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2931 pci_address
+ bytes_copied
,
2932 &page
[ioa_dump
->page_offset
/ 4],
2933 (cur_len
/ sizeof(u32
)));
2935 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2938 ioa_dump
->page_offset
+= cur_len
;
2939 bytes_copied
+= cur_len
;
2947 return bytes_copied
;
2951 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2952 * @hdr: dump entry header struct
2957 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
2959 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
2961 hdr
->offset
= sizeof(*hdr
);
2962 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
2966 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2967 * @ioa_cfg: ioa config struct
2968 * @driver_dump: driver dump struct
2973 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
2974 struct ipr_driver_dump
*driver_dump
)
2976 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2978 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
2979 driver_dump
->ioa_type_entry
.hdr
.len
=
2980 sizeof(struct ipr_dump_ioa_type_entry
) -
2981 sizeof(struct ipr_dump_entry_header
);
2982 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2983 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
2984 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
2985 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
2986 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
2987 ucode_vpd
->minor_release
[1];
2988 driver_dump
->hdr
.num_entries
++;
2992 * ipr_dump_version_data - Fill in the driver version in the dump.
2993 * @ioa_cfg: ioa config struct
2994 * @driver_dump: driver dump struct
2999 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
3000 struct ipr_driver_dump
*driver_dump
)
3002 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
3003 driver_dump
->version_entry
.hdr
.len
=
3004 sizeof(struct ipr_dump_version_entry
) -
3005 sizeof(struct ipr_dump_entry_header
);
3006 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3007 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
3008 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
3009 driver_dump
->hdr
.num_entries
++;
3013 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3014 * @ioa_cfg: ioa config struct
3015 * @driver_dump: driver dump struct
3020 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3021 struct ipr_driver_dump
*driver_dump
)
3023 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3024 driver_dump
->trace_entry
.hdr
.len
=
3025 sizeof(struct ipr_dump_trace_entry
) -
3026 sizeof(struct ipr_dump_entry_header
);
3027 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3028 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3029 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3030 driver_dump
->hdr
.num_entries
++;
3034 * ipr_dump_location_data - Fill in the IOA location in the dump.
3035 * @ioa_cfg: ioa config struct
3036 * @driver_dump: driver dump struct
3041 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3042 struct ipr_driver_dump
*driver_dump
)
3044 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3045 driver_dump
->location_entry
.hdr
.len
=
3046 sizeof(struct ipr_dump_location_entry
) -
3047 sizeof(struct ipr_dump_entry_header
);
3048 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3049 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3050 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3051 driver_dump
->hdr
.num_entries
++;
3055 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3056 * @ioa_cfg: ioa config struct
3057 * @dump: dump struct
3062 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3064 unsigned long start_addr
, sdt_word
;
3065 unsigned long lock_flags
= 0;
3066 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3067 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3068 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3069 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3070 struct ipr_sdt
*sdt
;
3076 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3078 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3079 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3083 if (ioa_cfg
->sis64
) {
3084 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3085 ssleep(IPR_DUMP_DELAY_SECONDS
);
3086 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3089 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3091 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3092 dev_err(&ioa_cfg
->pdev
->dev
,
3093 "Invalid dump table format: %lx\n", start_addr
);
3094 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3098 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3100 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3102 /* Initialize the overall dump header */
3103 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3104 driver_dump
->hdr
.num_entries
= 1;
3105 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3106 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3107 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3108 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3110 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3111 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3112 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3113 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3115 /* Update dump_header */
3116 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3118 /* IOA Dump entry */
3119 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3120 ioa_dump
->hdr
.len
= 0;
3121 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3122 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3124 /* First entries in sdt are actually a list of dump addresses and
3125 lengths to gather the real dump data. sdt represents the pointer
3126 to the ioa generated dump table. Dump data will be extracted based
3127 on entries in this table */
3128 sdt
= &ioa_dump
->sdt
;
3130 if (ioa_cfg
->sis64
) {
3131 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3132 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3134 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3135 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3138 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3139 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3140 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3141 bytes_to_copy
/ sizeof(__be32
));
3143 /* Smart Dump table is ready to use and the first entry is valid */
3144 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3145 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3146 dev_err(&ioa_cfg
->pdev
->dev
,
3147 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3148 rc
, be32_to_cpu(sdt
->hdr
.state
));
3149 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3150 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3151 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3155 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3157 if (num_entries
> max_num_entries
)
3158 num_entries
= max_num_entries
;
3160 /* Update dump length to the actual data to be copied */
3161 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3163 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3165 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3167 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3169 for (i
= 0; i
< num_entries
; i
++) {
3170 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3171 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3175 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3176 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3178 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3180 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3181 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3183 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3184 bytes_to_copy
= end_off
- start_off
;
3189 if (bytes_to_copy
> max_dump_size
) {
3190 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3194 /* Copy data from adapter to driver buffers */
3195 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3198 ioa_dump
->hdr
.len
+= bytes_copied
;
3200 if (bytes_copied
!= bytes_to_copy
) {
3201 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3208 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3210 /* Update dump_header */
3211 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3213 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3218 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3222 * ipr_release_dump - Free adapter dump memory
3223 * @kref: kref struct
3228 static void ipr_release_dump(struct kref
*kref
)
3230 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3231 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3232 unsigned long lock_flags
= 0;
3236 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3237 ioa_cfg
->dump
= NULL
;
3238 ioa_cfg
->sdt_state
= INACTIVE
;
3239 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3241 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3242 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3244 vfree(dump
->ioa_dump
.ioa_data
);
3250 * ipr_worker_thread - Worker thread
3251 * @work: ioa config struct
3253 * Called at task level from a work thread. This function takes care
3254 * of adding and removing device from the mid-layer as configuration
3255 * changes are detected by the adapter.
3260 static void ipr_worker_thread(struct work_struct
*work
)
3262 unsigned long lock_flags
;
3263 struct ipr_resource_entry
*res
;
3264 struct scsi_device
*sdev
;
3265 struct ipr_dump
*dump
;
3266 struct ipr_ioa_cfg
*ioa_cfg
=
3267 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3268 u8 bus
, target
, lun
;
3272 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3274 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3275 dump
= ioa_cfg
->dump
;
3277 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3280 kref_get(&dump
->kref
);
3281 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3282 ipr_get_ioa_dump(ioa_cfg
, dump
);
3283 kref_put(&dump
->kref
, ipr_release_dump
);
3285 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3286 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3287 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3288 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3295 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
3296 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3300 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3301 if (res
->del_from_ml
&& res
->sdev
) {
3304 if (!scsi_device_get(sdev
)) {
3305 if (!res
->add_to_ml
)
3306 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3308 res
->del_from_ml
= 0;
3309 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3310 scsi_remove_device(sdev
);
3311 scsi_device_put(sdev
);
3312 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3319 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3320 if (res
->add_to_ml
) {
3322 target
= res
->target
;
3325 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3326 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3327 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3332 ioa_cfg
->scan_done
= 1;
3333 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3334 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3338 #ifdef CONFIG_SCSI_IPR_TRACE
3340 * ipr_read_trace - Dump the adapter trace
3341 * @filp: open sysfs file
3342 * @kobj: kobject struct
3343 * @bin_attr: bin_attribute struct
3346 * @count: buffer size
3349 * number of bytes printed to buffer
3351 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3352 struct bin_attribute
*bin_attr
,
3353 char *buf
, loff_t off
, size_t count
)
3355 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3356 struct Scsi_Host
*shost
= class_to_shost(dev
);
3357 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3358 unsigned long lock_flags
= 0;
3361 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3362 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3364 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3369 static struct bin_attribute ipr_trace_attr
= {
3375 .read
= ipr_read_trace
,
3380 * ipr_show_fw_version - Show the firmware version
3381 * @dev: class device struct
3385 * number of bytes printed to buffer
3387 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3388 struct device_attribute
*attr
, char *buf
)
3390 struct Scsi_Host
*shost
= class_to_shost(dev
);
3391 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3392 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3393 unsigned long lock_flags
= 0;
3396 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3397 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3398 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3399 ucode_vpd
->minor_release
[0],
3400 ucode_vpd
->minor_release
[1]);
3401 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3405 static struct device_attribute ipr_fw_version_attr
= {
3407 .name
= "fw_version",
3410 .show
= ipr_show_fw_version
,
3414 * ipr_show_log_level - Show the adapter's error logging level
3415 * @dev: class device struct
3419 * number of bytes printed to buffer
3421 static ssize_t
ipr_show_log_level(struct device
*dev
,
3422 struct device_attribute
*attr
, char *buf
)
3424 struct Scsi_Host
*shost
= class_to_shost(dev
);
3425 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3426 unsigned long lock_flags
= 0;
3429 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3430 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3431 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3436 * ipr_store_log_level - Change the adapter's error logging level
3437 * @dev: class device struct
3441 * number of bytes printed to buffer
3443 static ssize_t
ipr_store_log_level(struct device
*dev
,
3444 struct device_attribute
*attr
,
3445 const char *buf
, size_t count
)
3447 struct Scsi_Host
*shost
= class_to_shost(dev
);
3448 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3449 unsigned long lock_flags
= 0;
3451 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3452 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3453 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3457 static struct device_attribute ipr_log_level_attr
= {
3459 .name
= "log_level",
3460 .mode
= S_IRUGO
| S_IWUSR
,
3462 .show
= ipr_show_log_level
,
3463 .store
= ipr_store_log_level
3467 * ipr_store_diagnostics - IOA Diagnostics interface
3468 * @dev: device struct
3470 * @count: buffer size
3472 * This function will reset the adapter and wait a reasonable
3473 * amount of time for any errors that the adapter might log.
3476 * count on success / other on failure
3478 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3479 struct device_attribute
*attr
,
3480 const char *buf
, size_t count
)
3482 struct Scsi_Host
*shost
= class_to_shost(dev
);
3483 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3484 unsigned long lock_flags
= 0;
3487 if (!capable(CAP_SYS_ADMIN
))
3490 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3491 while (ioa_cfg
->in_reset_reload
) {
3492 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3493 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3494 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3497 ioa_cfg
->errors_logged
= 0;
3498 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3500 if (ioa_cfg
->in_reset_reload
) {
3501 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3502 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3504 /* Wait for a second for any errors to be logged */
3507 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3511 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3512 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3514 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3519 static struct device_attribute ipr_diagnostics_attr
= {
3521 .name
= "run_diagnostics",
3524 .store
= ipr_store_diagnostics
3528 * ipr_show_adapter_state - Show the adapter's state
3529 * @class_dev: device struct
3533 * number of bytes printed to buffer
3535 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3536 struct device_attribute
*attr
, char *buf
)
3538 struct Scsi_Host
*shost
= class_to_shost(dev
);
3539 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3540 unsigned long lock_flags
= 0;
3543 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3544 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3545 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3547 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3548 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3553 * ipr_store_adapter_state - Change adapter state
3554 * @dev: device struct
3556 * @count: buffer size
3558 * This function will change the adapter's state.
3561 * count on success / other on failure
3563 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3564 struct device_attribute
*attr
,
3565 const char *buf
, size_t count
)
3567 struct Scsi_Host
*shost
= class_to_shost(dev
);
3568 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3569 unsigned long lock_flags
;
3570 int result
= count
, i
;
3572 if (!capable(CAP_SYS_ADMIN
))
3575 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3576 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3577 !strncmp(buf
, "online", 6)) {
3578 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3579 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3580 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3581 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3584 ioa_cfg
->reset_retries
= 0;
3585 ioa_cfg
->in_ioa_bringdown
= 0;
3586 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3588 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3589 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3594 static struct device_attribute ipr_ioa_state_attr
= {
3596 .name
= "online_state",
3597 .mode
= S_IRUGO
| S_IWUSR
,
3599 .show
= ipr_show_adapter_state
,
3600 .store
= ipr_store_adapter_state
3604 * ipr_store_reset_adapter - Reset the adapter
3605 * @dev: device struct
3607 * @count: buffer size
3609 * This function will reset the adapter.
3612 * count on success / other on failure
3614 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3615 struct device_attribute
*attr
,
3616 const char *buf
, size_t count
)
3618 struct Scsi_Host
*shost
= class_to_shost(dev
);
3619 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3620 unsigned long lock_flags
;
3623 if (!capable(CAP_SYS_ADMIN
))
3626 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3627 if (!ioa_cfg
->in_reset_reload
)
3628 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3629 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3630 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3635 static struct device_attribute ipr_ioa_reset_attr
= {
3637 .name
= "reset_host",
3640 .store
= ipr_store_reset_adapter
3643 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
);
3645 * ipr_show_iopoll_weight - Show ipr polling mode
3646 * @dev: class device struct
3650 * number of bytes printed to buffer
3652 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3653 struct device_attribute
*attr
, char *buf
)
3655 struct Scsi_Host
*shost
= class_to_shost(dev
);
3656 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3657 unsigned long lock_flags
= 0;
3660 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3661 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3662 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3668 * ipr_store_iopoll_weight - Change the adapter's polling mode
3669 * @dev: class device struct
3673 * number of bytes printed to buffer
3675 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3676 struct device_attribute
*attr
,
3677 const char *buf
, size_t count
)
3679 struct Scsi_Host
*shost
= class_to_shost(dev
);
3680 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3681 unsigned long user_iopoll_weight
;
3682 unsigned long lock_flags
= 0;
3685 if (!ioa_cfg
->sis64
) {
3686 dev_info(&ioa_cfg
->pdev
->dev
, "blk-iopoll not supported on this adapter\n");
3689 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3692 if (user_iopoll_weight
> 256) {
3693 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid blk-iopoll weight. It must be less than 256\n");
3697 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3698 dev_info(&ioa_cfg
->pdev
->dev
, "Current blk-iopoll weight has the same weight\n");
3702 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3703 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3704 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3707 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3708 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3709 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3710 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3711 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3712 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3713 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
3716 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3721 static struct device_attribute ipr_iopoll_weight_attr
= {
3723 .name
= "iopoll_weight",
3724 .mode
= S_IRUGO
| S_IWUSR
,
3726 .show
= ipr_show_iopoll_weight
,
3727 .store
= ipr_store_iopoll_weight
3731 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3732 * @buf_len: buffer length
3734 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3735 * list to use for microcode download
3738 * pointer to sglist / NULL on failure
3740 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3742 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
3743 struct ipr_sglist
*sglist
;
3744 struct scatterlist
*scatterlist
;
3747 /* Get the minimum size per scatter/gather element */
3748 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3750 /* Get the actual size per element */
3751 order
= get_order(sg_size
);
3753 /* Determine the actual number of bytes per element */
3754 bsize_elem
= PAGE_SIZE
* (1 << order
);
3756 /* Determine the actual number of sg entries needed */
3757 if (buf_len
% bsize_elem
)
3758 num_elem
= (buf_len
/ bsize_elem
) + 1;
3760 num_elem
= buf_len
/ bsize_elem
;
3762 /* Allocate a scatter/gather list for the DMA */
3763 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
3764 (sizeof(struct scatterlist
) * (num_elem
- 1)),
3767 if (sglist
== NULL
) {
3772 scatterlist
= sglist
->scatterlist
;
3773 sg_init_table(scatterlist
, num_elem
);
3775 sglist
->order
= order
;
3776 sglist
->num_sg
= num_elem
;
3778 /* Allocate a bunch of sg elements */
3779 for (i
= 0; i
< num_elem
; i
++) {
3780 page
= alloc_pages(GFP_KERNEL
, order
);
3784 /* Free up what we already allocated */
3785 for (j
= i
- 1; j
>= 0; j
--)
3786 __free_pages(sg_page(&scatterlist
[j
]), order
);
3791 sg_set_page(&scatterlist
[i
], page
, 0, 0);
3798 * ipr_free_ucode_buffer - Frees a microcode download buffer
3799 * @p_dnld: scatter/gather list pointer
3801 * Free a DMA'able ucode download buffer previously allocated with
3802 * ipr_alloc_ucode_buffer
3807 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3811 for (i
= 0; i
< sglist
->num_sg
; i
++)
3812 __free_pages(sg_page(&sglist
->scatterlist
[i
]), sglist
->order
);
3818 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3819 * @sglist: scatter/gather list pointer
3820 * @buffer: buffer pointer
3821 * @len: buffer length
3823 * Copy a microcode image from a user buffer into a buffer allocated by
3824 * ipr_alloc_ucode_buffer
3827 * 0 on success / other on failure
3829 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3830 u8
*buffer
, u32 len
)
3832 int bsize_elem
, i
, result
= 0;
3833 struct scatterlist
*scatterlist
;
3836 /* Determine the actual number of bytes per element */
3837 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3839 scatterlist
= sglist
->scatterlist
;
3841 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3842 struct page
*page
= sg_page(&scatterlist
[i
]);
3845 memcpy(kaddr
, buffer
, bsize_elem
);
3848 scatterlist
[i
].length
= bsize_elem
;
3856 if (len
% bsize_elem
) {
3857 struct page
*page
= sg_page(&scatterlist
[i
]);
3860 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3863 scatterlist
[i
].length
= len
% bsize_elem
;
3866 sglist
->buffer_len
= len
;
3871 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3872 * @ipr_cmd: ipr command struct
3873 * @sglist: scatter/gather list
3875 * Builds a microcode download IOA data list (IOADL).
3878 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3879 struct ipr_sglist
*sglist
)
3881 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3882 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3883 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3886 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3887 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3888 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3891 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3892 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3893 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3894 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3895 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3898 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3902 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3903 * @ipr_cmd: ipr command struct
3904 * @sglist: scatter/gather list
3906 * Builds a microcode download IOA data list (IOADL).
3909 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3910 struct ipr_sglist
*sglist
)
3912 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3913 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3914 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3917 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3918 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3919 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3922 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3924 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3925 ioadl
[i
].flags_and_data_len
=
3926 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3928 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3931 ioadl
[i
-1].flags_and_data_len
|=
3932 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3936 * ipr_update_ioa_ucode - Update IOA's microcode
3937 * @ioa_cfg: ioa config struct
3938 * @sglist: scatter/gather list
3940 * Initiate an adapter reset to update the IOA's microcode
3943 * 0 on success / -EIO on failure
3945 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3946 struct ipr_sglist
*sglist
)
3948 unsigned long lock_flags
;
3950 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3951 while (ioa_cfg
->in_reset_reload
) {
3952 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3953 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3954 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3957 if (ioa_cfg
->ucode_sglist
) {
3958 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3959 dev_err(&ioa_cfg
->pdev
->dev
,
3960 "Microcode download already in progress\n");
3964 sglist
->num_dma_sg
= dma_map_sg(&ioa_cfg
->pdev
->dev
,
3965 sglist
->scatterlist
, sglist
->num_sg
,
3968 if (!sglist
->num_dma_sg
) {
3969 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3970 dev_err(&ioa_cfg
->pdev
->dev
,
3971 "Failed to map microcode download buffer!\n");
3975 ioa_cfg
->ucode_sglist
= sglist
;
3976 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3977 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3978 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3980 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3981 ioa_cfg
->ucode_sglist
= NULL
;
3982 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3987 * ipr_store_update_fw - Update the firmware on the adapter
3988 * @class_dev: device struct
3990 * @count: buffer size
3992 * This function will update the firmware on the adapter.
3995 * count on success / other on failure
3997 static ssize_t
ipr_store_update_fw(struct device
*dev
,
3998 struct device_attribute
*attr
,
3999 const char *buf
, size_t count
)
4001 struct Scsi_Host
*shost
= class_to_shost(dev
);
4002 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4003 struct ipr_ucode_image_header
*image_hdr
;
4004 const struct firmware
*fw_entry
;
4005 struct ipr_sglist
*sglist
;
4009 int result
, dnld_size
;
4011 if (!capable(CAP_SYS_ADMIN
))
4014 snprintf(fname
, sizeof(fname
), "%s", buf
);
4016 endline
= strchr(fname
, '\n');
4020 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
4021 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4025 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4027 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4028 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4029 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4032 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4033 release_firmware(fw_entry
);
4037 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4040 dev_err(&ioa_cfg
->pdev
->dev
,
4041 "Microcode buffer copy to DMA buffer failed\n");
4045 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4047 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4052 ipr_free_ucode_buffer(sglist
);
4053 release_firmware(fw_entry
);
4057 static struct device_attribute ipr_update_fw_attr
= {
4059 .name
= "update_fw",
4062 .store
= ipr_store_update_fw
4066 * ipr_show_fw_type - Show the adapter's firmware type.
4067 * @dev: class device struct
4071 * number of bytes printed to buffer
4073 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4074 struct device_attribute
*attr
, char *buf
)
4076 struct Scsi_Host
*shost
= class_to_shost(dev
);
4077 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4078 unsigned long lock_flags
= 0;
4081 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4082 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4083 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4087 static struct device_attribute ipr_ioa_fw_type_attr
= {
4092 .show
= ipr_show_fw_type
4095 static struct device_attribute
*ipr_ioa_attrs
[] = {
4096 &ipr_fw_version_attr
,
4097 &ipr_log_level_attr
,
4098 &ipr_diagnostics_attr
,
4099 &ipr_ioa_state_attr
,
4100 &ipr_ioa_reset_attr
,
4101 &ipr_update_fw_attr
,
4102 &ipr_ioa_fw_type_attr
,
4103 &ipr_iopoll_weight_attr
,
4107 #ifdef CONFIG_SCSI_IPR_DUMP
4109 * ipr_read_dump - Dump the adapter
4110 * @filp: open sysfs file
4111 * @kobj: kobject struct
4112 * @bin_attr: bin_attribute struct
4115 * @count: buffer size
4118 * number of bytes printed to buffer
4120 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4121 struct bin_attribute
*bin_attr
,
4122 char *buf
, loff_t off
, size_t count
)
4124 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4125 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4126 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4127 struct ipr_dump
*dump
;
4128 unsigned long lock_flags
= 0;
4133 if (!capable(CAP_SYS_ADMIN
))
4136 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4137 dump
= ioa_cfg
->dump
;
4139 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4140 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4143 kref_get(&dump
->kref
);
4144 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4146 if (off
> dump
->driver_dump
.hdr
.len
) {
4147 kref_put(&dump
->kref
, ipr_release_dump
);
4151 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4152 count
= dump
->driver_dump
.hdr
.len
- off
;
4156 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4157 if (off
+ count
> sizeof(dump
->driver_dump
))
4158 len
= sizeof(dump
->driver_dump
) - off
;
4161 src
= (u8
*)&dump
->driver_dump
+ off
;
4162 memcpy(buf
, src
, len
);
4168 off
-= sizeof(dump
->driver_dump
);
4171 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4172 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4173 sizeof(struct ipr_sdt_entry
));
4175 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4176 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4178 if (count
&& off
< sdt_end
) {
4179 if (off
+ count
> sdt_end
)
4180 len
= sdt_end
- off
;
4183 src
= (u8
*)&dump
->ioa_dump
+ off
;
4184 memcpy(buf
, src
, len
);
4193 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4194 len
= PAGE_ALIGN(off
) - off
;
4197 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4198 src
+= off
& ~PAGE_MASK
;
4199 memcpy(buf
, src
, len
);
4205 kref_put(&dump
->kref
, ipr_release_dump
);
4210 * ipr_alloc_dump - Prepare for adapter dump
4211 * @ioa_cfg: ioa config struct
4214 * 0 on success / other on failure
4216 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4218 struct ipr_dump
*dump
;
4220 unsigned long lock_flags
= 0;
4222 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4225 ipr_err("Dump memory allocation failed\n");
4230 ioa_data
= vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4232 ioa_data
= vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4235 ipr_err("Dump memory allocation failed\n");
4240 dump
->ioa_dump
.ioa_data
= ioa_data
;
4242 kref_init(&dump
->kref
);
4243 dump
->ioa_cfg
= ioa_cfg
;
4245 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4247 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4248 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4249 vfree(dump
->ioa_dump
.ioa_data
);
4254 ioa_cfg
->dump
= dump
;
4255 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4256 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4257 ioa_cfg
->dump_taken
= 1;
4258 schedule_work(&ioa_cfg
->work_q
);
4260 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4266 * ipr_free_dump - Free adapter dump memory
4267 * @ioa_cfg: ioa config struct
4270 * 0 on success / other on failure
4272 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4274 struct ipr_dump
*dump
;
4275 unsigned long lock_flags
= 0;
4279 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4280 dump
= ioa_cfg
->dump
;
4282 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4286 ioa_cfg
->dump
= NULL
;
4287 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4289 kref_put(&dump
->kref
, ipr_release_dump
);
4296 * ipr_write_dump - Setup dump state of adapter
4297 * @filp: open sysfs file
4298 * @kobj: kobject struct
4299 * @bin_attr: bin_attribute struct
4302 * @count: buffer size
4305 * number of bytes printed to buffer
4307 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4308 struct bin_attribute
*bin_attr
,
4309 char *buf
, loff_t off
, size_t count
)
4311 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4312 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4313 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4316 if (!capable(CAP_SYS_ADMIN
))
4320 rc
= ipr_alloc_dump(ioa_cfg
);
4321 else if (buf
[0] == '0')
4322 rc
= ipr_free_dump(ioa_cfg
);
4332 static struct bin_attribute ipr_dump_attr
= {
4335 .mode
= S_IRUSR
| S_IWUSR
,
4338 .read
= ipr_read_dump
,
4339 .write
= ipr_write_dump
4342 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4346 * ipr_change_queue_depth - Change the device's queue depth
4347 * @sdev: scsi device struct
4348 * @qdepth: depth to set
4349 * @reason: calling context
4354 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4356 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4357 struct ipr_resource_entry
*res
;
4358 unsigned long lock_flags
= 0;
4360 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4361 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4363 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4364 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4365 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4367 scsi_change_queue_depth(sdev
, qdepth
);
4368 return sdev
->queue_depth
;
4372 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4373 * @dev: device struct
4374 * @attr: device attribute structure
4378 * number of bytes printed to buffer
4380 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4382 struct scsi_device
*sdev
= to_scsi_device(dev
);
4383 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4384 struct ipr_resource_entry
*res
;
4385 unsigned long lock_flags
= 0;
4386 ssize_t len
= -ENXIO
;
4388 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4389 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4391 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4392 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4396 static struct device_attribute ipr_adapter_handle_attr
= {
4398 .name
= "adapter_handle",
4401 .show
= ipr_show_adapter_handle
4405 * ipr_show_resource_path - Show the resource path or the resource address for
4407 * @dev: device struct
4408 * @attr: device attribute structure
4412 * number of bytes printed to buffer
4414 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4416 struct scsi_device
*sdev
= to_scsi_device(dev
);
4417 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4418 struct ipr_resource_entry
*res
;
4419 unsigned long lock_flags
= 0;
4420 ssize_t len
= -ENXIO
;
4421 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4423 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4424 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4425 if (res
&& ioa_cfg
->sis64
)
4426 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4427 __ipr_format_res_path(res
->res_path
, buffer
,
4430 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4431 res
->bus
, res
->target
, res
->lun
);
4433 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4437 static struct device_attribute ipr_resource_path_attr
= {
4439 .name
= "resource_path",
4442 .show
= ipr_show_resource_path
4446 * ipr_show_device_id - Show the device_id for this device.
4447 * @dev: device struct
4448 * @attr: device attribute structure
4452 * number of bytes printed to buffer
4454 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4456 struct scsi_device
*sdev
= to_scsi_device(dev
);
4457 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4458 struct ipr_resource_entry
*res
;
4459 unsigned long lock_flags
= 0;
4460 ssize_t len
= -ENXIO
;
4462 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4463 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4464 if (res
&& ioa_cfg
->sis64
)
4465 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", be64_to_cpu(res
->dev_id
));
4467 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4469 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4473 static struct device_attribute ipr_device_id_attr
= {
4475 .name
= "device_id",
4478 .show
= ipr_show_device_id
4482 * ipr_show_resource_type - Show the resource type for this device.
4483 * @dev: device struct
4484 * @attr: device attribute structure
4488 * number of bytes printed to buffer
4490 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4492 struct scsi_device
*sdev
= to_scsi_device(dev
);
4493 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4494 struct ipr_resource_entry
*res
;
4495 unsigned long lock_flags
= 0;
4496 ssize_t len
= -ENXIO
;
4498 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4499 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4502 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4504 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4508 static struct device_attribute ipr_resource_type_attr
= {
4510 .name
= "resource_type",
4513 .show
= ipr_show_resource_type
4517 * ipr_show_raw_mode - Show the adapter's raw mode
4518 * @dev: class device struct
4522 * number of bytes printed to buffer
4524 static ssize_t
ipr_show_raw_mode(struct device
*dev
,
4525 struct device_attribute
*attr
, char *buf
)
4527 struct scsi_device
*sdev
= to_scsi_device(dev
);
4528 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4529 struct ipr_resource_entry
*res
;
4530 unsigned long lock_flags
= 0;
4533 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4534 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4536 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", res
->raw_mode
);
4539 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4544 * ipr_store_raw_mode - Change the adapter's raw mode
4545 * @dev: class device struct
4549 * number of bytes printed to buffer
4551 static ssize_t
ipr_store_raw_mode(struct device
*dev
,
4552 struct device_attribute
*attr
,
4553 const char *buf
, size_t count
)
4555 struct scsi_device
*sdev
= to_scsi_device(dev
);
4556 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4557 struct ipr_resource_entry
*res
;
4558 unsigned long lock_flags
= 0;
4561 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4562 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4564 if (ipr_is_af_dasd_device(res
)) {
4565 res
->raw_mode
= simple_strtoul(buf
, NULL
, 10);
4568 sdev_printk(KERN_INFO
, res
->sdev
, "raw mode is %s\n",
4569 res
->raw_mode
? "enabled" : "disabled");
4574 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4578 static struct device_attribute ipr_raw_mode_attr
= {
4581 .mode
= S_IRUGO
| S_IWUSR
,
4583 .show
= ipr_show_raw_mode
,
4584 .store
= ipr_store_raw_mode
4587 static struct device_attribute
*ipr_dev_attrs
[] = {
4588 &ipr_adapter_handle_attr
,
4589 &ipr_resource_path_attr
,
4590 &ipr_device_id_attr
,
4591 &ipr_resource_type_attr
,
4597 * ipr_biosparam - Return the HSC mapping
4598 * @sdev: scsi device struct
4599 * @block_device: block device pointer
4600 * @capacity: capacity of the device
4601 * @parm: Array containing returned HSC values.
4603 * This function generates the HSC parms that fdisk uses.
4604 * We want to make sure we return something that places partitions
4605 * on 4k boundaries for best performance with the IOA.
4610 static int ipr_biosparam(struct scsi_device
*sdev
,
4611 struct block_device
*block_device
,
4612 sector_t capacity
, int *parm
)
4620 cylinders
= capacity
;
4621 sector_div(cylinders
, (128 * 32));
4626 parm
[2] = cylinders
;
4632 * ipr_find_starget - Find target based on bus/target.
4633 * @starget: scsi target struct
4636 * resource entry pointer if found / NULL if not found
4638 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4640 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4641 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4642 struct ipr_resource_entry
*res
;
4644 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4645 if ((res
->bus
== starget
->channel
) &&
4646 (res
->target
== starget
->id
)) {
4654 static struct ata_port_info sata_port_info
;
4657 * ipr_target_alloc - Prepare for commands to a SCSI target
4658 * @starget: scsi target struct
4660 * If the device is a SATA device, this function allocates an
4661 * ATA port with libata, else it does nothing.
4664 * 0 on success / non-0 on failure
4666 static int ipr_target_alloc(struct scsi_target
*starget
)
4668 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4669 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4670 struct ipr_sata_port
*sata_port
;
4671 struct ata_port
*ap
;
4672 struct ipr_resource_entry
*res
;
4673 unsigned long lock_flags
;
4675 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4676 res
= ipr_find_starget(starget
);
4677 starget
->hostdata
= NULL
;
4679 if (res
&& ipr_is_gata(res
)) {
4680 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4681 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4685 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4687 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4688 sata_port
->ioa_cfg
= ioa_cfg
;
4690 sata_port
->res
= res
;
4692 res
->sata_port
= sata_port
;
4693 ap
->private_data
= sata_port
;
4694 starget
->hostdata
= sata_port
;
4700 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4706 * ipr_target_destroy - Destroy a SCSI target
4707 * @starget: scsi target struct
4709 * If the device was a SATA device, this function frees the libata
4710 * ATA port, else it does nothing.
4713 static void ipr_target_destroy(struct scsi_target
*starget
)
4715 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4716 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4717 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4719 if (ioa_cfg
->sis64
) {
4720 if (!ipr_find_starget(starget
)) {
4721 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4722 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4723 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4724 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4725 else if (starget
->channel
== 0)
4726 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4731 starget
->hostdata
= NULL
;
4732 ata_sas_port_destroy(sata_port
->ap
);
4738 * ipr_find_sdev - Find device based on bus/target/lun.
4739 * @sdev: scsi device struct
4742 * resource entry pointer if found / NULL if not found
4744 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4746 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4747 struct ipr_resource_entry
*res
;
4749 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4750 if ((res
->bus
== sdev
->channel
) &&
4751 (res
->target
== sdev
->id
) &&
4752 (res
->lun
== sdev
->lun
))
4760 * ipr_slave_destroy - Unconfigure a SCSI device
4761 * @sdev: scsi device struct
4766 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4768 struct ipr_resource_entry
*res
;
4769 struct ipr_ioa_cfg
*ioa_cfg
;
4770 unsigned long lock_flags
= 0;
4772 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4774 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4775 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4778 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4779 sdev
->hostdata
= NULL
;
4781 res
->sata_port
= NULL
;
4783 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4787 * ipr_slave_configure - Configure a SCSI device
4788 * @sdev: scsi device struct
4790 * This function configures the specified scsi device.
4795 static int ipr_slave_configure(struct scsi_device
*sdev
)
4797 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4798 struct ipr_resource_entry
*res
;
4799 struct ata_port
*ap
= NULL
;
4800 unsigned long lock_flags
= 0;
4801 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4803 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4804 res
= sdev
->hostdata
;
4806 if (ipr_is_af_dasd_device(res
))
4807 sdev
->type
= TYPE_RAID
;
4808 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4809 sdev
->scsi_level
= 4;
4810 sdev
->no_uld_attach
= 1;
4812 if (ipr_is_vset_device(res
)) {
4813 sdev
->scsi_level
= SCSI_SPC_3
;
4814 blk_queue_rq_timeout(sdev
->request_queue
,
4815 IPR_VSET_RW_TIMEOUT
);
4816 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4818 if (ipr_is_gata(res
) && res
->sata_port
)
4819 ap
= res
->sata_port
->ap
;
4820 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4823 scsi_change_queue_depth(sdev
, IPR_MAX_CMD_PER_ATA_LUN
);
4824 ata_sas_slave_configure(sdev
, ap
);
4828 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4829 ipr_format_res_path(ioa_cfg
,
4830 res
->res_path
, buffer
, sizeof(buffer
)));
4833 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4838 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4839 * @sdev: scsi device struct
4841 * This function initializes an ATA port so that future commands
4842 * sent through queuecommand will work.
4847 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4849 struct ipr_sata_port
*sata_port
= NULL
;
4853 if (sdev
->sdev_target
)
4854 sata_port
= sdev
->sdev_target
->hostdata
;
4856 rc
= ata_sas_port_init(sata_port
->ap
);
4858 rc
= ata_sas_sync_probe(sata_port
->ap
);
4862 ipr_slave_destroy(sdev
);
4869 * ipr_slave_alloc - Prepare for commands to a device.
4870 * @sdev: scsi device struct
4872 * This function saves a pointer to the resource entry
4873 * in the scsi device struct if the device exists. We
4874 * can then use this pointer in ipr_queuecommand when
4875 * handling new commands.
4878 * 0 on success / -ENXIO if device does not exist
4880 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4882 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4883 struct ipr_resource_entry
*res
;
4884 unsigned long lock_flags
;
4887 sdev
->hostdata
= NULL
;
4889 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4891 res
= ipr_find_sdev(sdev
);
4896 sdev
->hostdata
= res
;
4897 if (!ipr_is_naca_model(res
))
4898 res
->needs_sync_complete
= 1;
4900 if (ipr_is_gata(res
)) {
4901 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4902 return ipr_ata_slave_alloc(sdev
);
4906 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4912 * ipr_match_lun - Match function for specified LUN
4913 * @ipr_cmd: ipr command struct
4914 * @device: device to match (sdev)
4917 * 1 if command matches sdev / 0 if command does not match sdev
4919 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
4921 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
4927 * ipr_wait_for_ops - Wait for matching commands to complete
4928 * @ipr_cmd: ipr command struct
4929 * @device: device to match (sdev)
4930 * @match: match function to use
4935 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
4936 int (*match
)(struct ipr_cmnd
*, void *))
4938 struct ipr_cmnd
*ipr_cmd
;
4940 unsigned long flags
;
4941 struct ipr_hrr_queue
*hrrq
;
4942 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
4943 DECLARE_COMPLETION_ONSTACK(comp
);
4949 for_each_hrrq(hrrq
, ioa_cfg
) {
4950 spin_lock_irqsave(hrrq
->lock
, flags
);
4951 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
4952 if (match(ipr_cmd
, device
)) {
4953 ipr_cmd
->eh_comp
= &comp
;
4957 spin_unlock_irqrestore(hrrq
->lock
, flags
);
4961 timeout
= wait_for_completion_timeout(&comp
, timeout
);
4966 for_each_hrrq(hrrq
, ioa_cfg
) {
4967 spin_lock_irqsave(hrrq
->lock
, flags
);
4968 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
4969 if (match(ipr_cmd
, device
)) {
4970 ipr_cmd
->eh_comp
= NULL
;
4974 spin_unlock_irqrestore(hrrq
->lock
, flags
);
4978 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
4980 return wait
? FAILED
: SUCCESS
;
4989 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
4991 struct ipr_ioa_cfg
*ioa_cfg
;
4992 unsigned long lock_flags
= 0;
4996 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
4997 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4999 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5000 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5001 dev_err(&ioa_cfg
->pdev
->dev
,
5002 "Adapter being reset as a result of error recovery.\n");
5004 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5005 ioa_cfg
->sdt_state
= GET_DUMP
;
5008 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5009 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5010 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5012 /* If we got hit with a host reset while we were already resetting
5013 the adapter for some reason, and the reset failed. */
5014 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5019 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5025 * ipr_device_reset - Reset the device
5026 * @ioa_cfg: ioa config struct
5027 * @res: resource entry struct
5029 * This function issues a device reset to the affected device.
5030 * If the device is a SCSI device, a LUN reset will be sent
5031 * to the device first. If that does not work, a target reset
5032 * will be sent. If the device is a SATA device, a PHY reset will
5036 * 0 on success / non-zero on failure
5038 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5039 struct ipr_resource_entry
*res
)
5041 struct ipr_cmnd
*ipr_cmd
;
5042 struct ipr_ioarcb
*ioarcb
;
5043 struct ipr_cmd_pkt
*cmd_pkt
;
5044 struct ipr_ioarcb_ata_regs
*regs
;
5048 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5049 ioarcb
= &ipr_cmd
->ioarcb
;
5050 cmd_pkt
= &ioarcb
->cmd_pkt
;
5052 if (ipr_cmd
->ioa_cfg
->sis64
) {
5053 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
5054 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
5056 regs
= &ioarcb
->u
.add_data
.u
.regs
;
5058 ioarcb
->res_handle
= res
->res_handle
;
5059 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5060 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5061 if (ipr_is_gata(res
)) {
5062 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
5063 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
5064 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
5067 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5068 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5069 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5070 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
5071 if (ipr_cmd
->ioa_cfg
->sis64
)
5072 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
5073 sizeof(struct ipr_ioasa_gata
));
5075 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
5076 sizeof(struct ipr_ioasa_gata
));
5080 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
5084 * ipr_sata_reset - Reset the SATA port
5085 * @link: SATA link to reset
5086 * @classes: class of the attached device
5088 * This function issues a SATA phy reset to the affected ATA link.
5091 * 0 on success / non-zero on failure
5093 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5094 unsigned long deadline
)
5096 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5097 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5098 struct ipr_resource_entry
*res
;
5099 unsigned long lock_flags
= 0;
5103 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5104 while (ioa_cfg
->in_reset_reload
) {
5105 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5106 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5107 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5110 res
= sata_port
->res
;
5112 rc
= ipr_device_reset(ioa_cfg
, res
);
5113 *classes
= res
->ata_class
;
5116 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5122 * ipr_eh_dev_reset - Reset the device
5123 * @scsi_cmd: scsi command struct
5125 * This function issues a device reset to the affected device.
5126 * A LUN reset will be sent to the device first. If that does
5127 * not work, a target reset will be sent.
5132 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5134 struct ipr_cmnd
*ipr_cmd
;
5135 struct ipr_ioa_cfg
*ioa_cfg
;
5136 struct ipr_resource_entry
*res
;
5137 struct ata_port
*ap
;
5139 struct ipr_hrr_queue
*hrrq
;
5142 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5143 res
= scsi_cmd
->device
->hostdata
;
5149 * If we are currently going through reset/reload, return failed. This will force the
5150 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5153 if (ioa_cfg
->in_reset_reload
)
5155 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5158 for_each_hrrq(hrrq
, ioa_cfg
) {
5159 spin_lock(&hrrq
->_lock
);
5160 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5161 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5162 if (ipr_cmd
->scsi_cmd
)
5163 ipr_cmd
->done
= ipr_scsi_eh_done
;
5165 ipr_cmd
->done
= ipr_sata_eh_done
;
5167 !(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5168 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5169 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5173 spin_unlock(&hrrq
->_lock
);
5175 res
->resetting_device
= 1;
5176 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5178 if (ipr_is_gata(res
) && res
->sata_port
) {
5179 ap
= res
->sata_port
->ap
;
5180 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5181 ata_std_error_handler(ap
);
5182 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5184 for_each_hrrq(hrrq
, ioa_cfg
) {
5185 spin_lock(&hrrq
->_lock
);
5186 list_for_each_entry(ipr_cmd
,
5187 &hrrq
->hrrq_pending_q
, queue
) {
5188 if (ipr_cmd
->ioarcb
.res_handle
==
5194 spin_unlock(&hrrq
->_lock
);
5197 rc
= ipr_device_reset(ioa_cfg
, res
);
5198 res
->resetting_device
= 0;
5199 res
->reset_occurred
= 1;
5202 return rc
? FAILED
: SUCCESS
;
5205 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5208 struct ipr_ioa_cfg
*ioa_cfg
;
5210 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5212 spin_lock_irq(cmd
->device
->host
->host_lock
);
5213 rc
= __ipr_eh_dev_reset(cmd
);
5214 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5217 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5223 * ipr_bus_reset_done - Op done function for bus reset.
5224 * @ipr_cmd: ipr command struct
5226 * This function is the op done function for a bus reset
5231 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5233 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5234 struct ipr_resource_entry
*res
;
5237 if (!ioa_cfg
->sis64
)
5238 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5239 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5240 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5246 * If abort has not completed, indicate the reset has, else call the
5247 * abort's done function to wake the sleeping eh thread
5249 if (ipr_cmd
->sibling
->sibling
)
5250 ipr_cmd
->sibling
->sibling
= NULL
;
5252 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5254 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5259 * ipr_abort_timeout - An abort task has timed out
5260 * @ipr_cmd: ipr command struct
5262 * This function handles when an abort task times out. If this
5263 * happens we issue a bus reset since we have resources tied
5264 * up that must be freed before returning to the midlayer.
5269 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
5271 struct ipr_cmnd
*reset_cmd
;
5272 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5273 struct ipr_cmd_pkt
*cmd_pkt
;
5274 unsigned long lock_flags
= 0;
5277 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5278 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5279 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5283 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5284 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5285 ipr_cmd
->sibling
= reset_cmd
;
5286 reset_cmd
->sibling
= ipr_cmd
;
5287 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5288 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5289 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5290 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5291 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5293 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5294 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5299 * ipr_cancel_op - Cancel specified op
5300 * @scsi_cmd: scsi command struct
5302 * This function cancels specified op.
5307 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5309 struct ipr_cmnd
*ipr_cmd
;
5310 struct ipr_ioa_cfg
*ioa_cfg
;
5311 struct ipr_resource_entry
*res
;
5312 struct ipr_cmd_pkt
*cmd_pkt
;
5315 struct ipr_hrr_queue
*hrrq
;
5318 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5319 res
= scsi_cmd
->device
->hostdata
;
5321 /* If we are currently going through reset/reload, return failed.
5322 * This will force the mid-layer to call ipr_eh_host_reset,
5323 * which will then go to sleep and wait for the reset to complete
5325 if (ioa_cfg
->in_reset_reload
||
5326 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5332 * If we are aborting a timed out op, chances are that the timeout was caused
5333 * by a still not detected EEH error. In such cases, reading a register will
5334 * trigger the EEH recovery infrastructure.
5336 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5338 if (!ipr_is_gscsi(res
))
5341 for_each_hrrq(hrrq
, ioa_cfg
) {
5342 spin_lock(&hrrq
->_lock
);
5343 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5344 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
5345 ipr_cmd
->done
= ipr_scsi_eh_done
;
5350 spin_unlock(&hrrq
->_lock
);
5356 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5357 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5358 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5359 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5360 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5361 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5363 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5365 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5366 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5369 * If the abort task timed out and we sent a bus reset, we will get
5370 * one the following responses to the abort
5372 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5377 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5378 if (!ipr_is_naca_model(res
))
5379 res
->needs_sync_complete
= 1;
5382 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5386 * ipr_eh_abort - Abort a single op
5387 * @scsi_cmd: scsi command struct
5390 * 0 if scan in progress / 1 if scan is complete
5392 static int ipr_scan_finished(struct Scsi_Host
*shost
, unsigned long elapsed_time
)
5394 unsigned long lock_flags
;
5395 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
5398 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
5399 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
|| ioa_cfg
->scan_done
)
5401 if ((elapsed_time
/HZ
) > (ioa_cfg
->transop_timeout
* 2))
5403 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
5408 * ipr_eh_host_reset - Reset the host adapter
5409 * @scsi_cmd: scsi command struct
5414 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5416 unsigned long flags
;
5418 struct ipr_ioa_cfg
*ioa_cfg
;
5422 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5424 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5425 rc
= ipr_cancel_op(scsi_cmd
);
5426 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5429 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5435 * ipr_handle_other_interrupt - Handle "other" interrupts
5436 * @ioa_cfg: ioa config struct
5437 * @int_reg: interrupt register
5440 * IRQ_NONE / IRQ_HANDLED
5442 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5445 irqreturn_t rc
= IRQ_HANDLED
;
5448 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5449 int_reg
&= ~int_mask_reg
;
5451 /* If an interrupt on the adapter did not occur, ignore it.
5452 * Or in the case of SIS 64, check for a stage change interrupt.
5454 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5455 if (ioa_cfg
->sis64
) {
5456 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5457 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5458 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5460 /* clear stage change */
5461 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5462 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5463 list_del(&ioa_cfg
->reset_cmd
->queue
);
5464 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5465 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5473 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5474 /* Mask the interrupt */
5475 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5476 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5478 list_del(&ioa_cfg
->reset_cmd
->queue
);
5479 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5480 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5481 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5482 if (ioa_cfg
->clear_isr
) {
5483 if (ipr_debug
&& printk_ratelimit())
5484 dev_err(&ioa_cfg
->pdev
->dev
,
5485 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5486 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5487 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5491 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5492 ioa_cfg
->ioa_unit_checked
= 1;
5493 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5494 dev_err(&ioa_cfg
->pdev
->dev
,
5495 "No Host RRQ. 0x%08X\n", int_reg
);
5497 dev_err(&ioa_cfg
->pdev
->dev
,
5498 "Permanent IOA failure. 0x%08X\n", int_reg
);
5500 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5501 ioa_cfg
->sdt_state
= GET_DUMP
;
5503 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5504 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5511 * ipr_isr_eh - Interrupt service routine error handler
5512 * @ioa_cfg: ioa config struct
5513 * @msg: message to log
5518 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5520 ioa_cfg
->errors_logged
++;
5521 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5523 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5524 ioa_cfg
->sdt_state
= GET_DUMP
;
5526 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5529 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5530 struct list_head
*doneq
)
5534 struct ipr_cmnd
*ipr_cmd
;
5535 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5538 /* If interrupts are disabled, ignore the interrupt */
5539 if (!hrr_queue
->allow_interrupts
)
5542 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5543 hrr_queue
->toggle_bit
) {
5545 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5546 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5547 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5549 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5550 cmd_index
< hrr_queue
->min_cmd_id
)) {
5552 "Invalid response handle from IOA: ",
5557 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5558 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5560 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5562 list_move_tail(&ipr_cmd
->queue
, doneq
);
5564 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5565 hrr_queue
->hrrq_curr
++;
5567 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5568 hrr_queue
->toggle_bit
^= 1u;
5571 if (budget
> 0 && num_hrrq
>= budget
)
5578 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
)
5580 struct ipr_ioa_cfg
*ioa_cfg
;
5581 struct ipr_hrr_queue
*hrrq
;
5582 struct ipr_cmnd
*ipr_cmd
, *temp
;
5583 unsigned long hrrq_flags
;
5587 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5588 ioa_cfg
= hrrq
->ioa_cfg
;
5590 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5591 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5593 if (completed_ops
< budget
)
5594 blk_iopoll_complete(iop
);
5595 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5597 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5598 list_del(&ipr_cmd
->queue
);
5599 del_timer(&ipr_cmd
->timer
);
5600 ipr_cmd
->fast_done(ipr_cmd
);
5603 return completed_ops
;
5607 * ipr_isr - Interrupt service routine
5609 * @devp: pointer to ioa config struct
5612 * IRQ_NONE / IRQ_HANDLED
5614 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5616 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5617 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5618 unsigned long hrrq_flags
= 0;
5622 struct ipr_cmnd
*ipr_cmd
, *temp
;
5623 irqreturn_t rc
= IRQ_NONE
;
5626 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5627 /* If interrupts are disabled, ignore the interrupt */
5628 if (!hrrq
->allow_interrupts
) {
5629 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5634 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5637 if (!ioa_cfg
->clear_isr
)
5640 /* Clear the PCI interrupt */
5643 writel(IPR_PCII_HRRQ_UPDATED
,
5644 ioa_cfg
->regs
.clr_interrupt_reg32
);
5645 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5646 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5647 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5649 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5650 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5652 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5653 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5655 "Error clearing HRRQ: ", num_hrrq
);
5662 if (unlikely(rc
== IRQ_NONE
))
5663 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5665 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5666 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5667 list_del(&ipr_cmd
->queue
);
5668 del_timer(&ipr_cmd
->timer
);
5669 ipr_cmd
->fast_done(ipr_cmd
);
5675 * ipr_isr_mhrrq - Interrupt service routine
5677 * @devp: pointer to ioa config struct
5680 * IRQ_NONE / IRQ_HANDLED
5682 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5684 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5685 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5686 unsigned long hrrq_flags
= 0;
5687 struct ipr_cmnd
*ipr_cmd
, *temp
;
5688 irqreturn_t rc
= IRQ_NONE
;
5691 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5693 /* If interrupts are disabled, ignore the interrupt */
5694 if (!hrrq
->allow_interrupts
) {
5695 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5699 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5700 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5702 if (!blk_iopoll_sched_prep(&hrrq
->iopoll
))
5703 blk_iopoll_sched(&hrrq
->iopoll
);
5704 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5708 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5711 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5715 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5717 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5718 list_del(&ipr_cmd
->queue
);
5719 del_timer(&ipr_cmd
->timer
);
5720 ipr_cmd
->fast_done(ipr_cmd
);
5726 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5727 * @ioa_cfg: ioa config struct
5728 * @ipr_cmd: ipr command struct
5731 * 0 on success / -1 on failure
5733 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5734 struct ipr_cmnd
*ipr_cmd
)
5737 struct scatterlist
*sg
;
5739 u32 ioadl_flags
= 0;
5740 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5741 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5742 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5744 length
= scsi_bufflen(scsi_cmd
);
5748 nseg
= scsi_dma_map(scsi_cmd
);
5750 if (printk_ratelimit())
5751 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5755 ipr_cmd
->dma_use_sg
= nseg
;
5757 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5759 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5761 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5762 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5763 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5764 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5765 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5767 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5768 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5769 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5770 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5773 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5778 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5779 * @ioa_cfg: ioa config struct
5780 * @ipr_cmd: ipr command struct
5783 * 0 on success / -1 on failure
5785 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5786 struct ipr_cmnd
*ipr_cmd
)
5789 struct scatterlist
*sg
;
5791 u32 ioadl_flags
= 0;
5792 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5793 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5794 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5796 length
= scsi_bufflen(scsi_cmd
);
5800 nseg
= scsi_dma_map(scsi_cmd
);
5802 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5806 ipr_cmd
->dma_use_sg
= nseg
;
5808 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5809 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5810 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5811 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5813 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5814 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5815 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5816 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5817 ioarcb
->read_ioadl_len
=
5818 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5821 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5822 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5823 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5824 offsetof(struct ipr_ioarcb
, u
.add_data
));
5825 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5828 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5829 ioadl
[i
].flags_and_data_len
=
5830 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5831 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5834 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5839 * ipr_erp_done - Process completion of ERP for a device
5840 * @ipr_cmd: ipr command struct
5842 * This function copies the sense buffer into the scsi_cmd
5843 * struct and pushes the scsi_done function.
5848 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
5850 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5851 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5852 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5854 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5855 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5856 scmd_printk(KERN_ERR
, scsi_cmd
,
5857 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
5859 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
5860 SCSI_SENSE_BUFFERSIZE
);
5864 if (!ipr_is_naca_model(res
))
5865 res
->needs_sync_complete
= 1;
5868 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5869 scsi_cmd
->scsi_done(scsi_cmd
);
5870 if (ipr_cmd
->eh_comp
)
5871 complete(ipr_cmd
->eh_comp
);
5872 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5876 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5877 * @ipr_cmd: ipr command struct
5882 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
5884 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5885 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5886 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
5888 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
5889 ioarcb
->data_transfer_length
= 0;
5890 ioarcb
->read_data_transfer_length
= 0;
5891 ioarcb
->ioadl_len
= 0;
5892 ioarcb
->read_ioadl_len
= 0;
5893 ioasa
->hdr
.ioasc
= 0;
5894 ioasa
->hdr
.residual_data_len
= 0;
5896 if (ipr_cmd
->ioa_cfg
->sis64
)
5897 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
5898 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
5900 ioarcb
->write_ioadl_addr
=
5901 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
5902 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5907 * ipr_erp_request_sense - Send request sense to a device
5908 * @ipr_cmd: ipr command struct
5910 * This function sends a request sense to a device as a result
5911 * of a check condition.
5916 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
5918 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5919 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5921 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5922 ipr_erp_done(ipr_cmd
);
5926 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5928 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
5929 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
5930 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
5931 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
5932 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
5933 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
5935 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
5936 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
5938 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
5939 IPR_REQUEST_SENSE_TIMEOUT
* 2);
5943 * ipr_erp_cancel_all - Send cancel all to a device
5944 * @ipr_cmd: ipr command struct
5946 * This function sends a cancel all to a device to clear the
5947 * queue. If we are running TCQ on the device, QERR is set to 1,
5948 * which means all outstanding ops have been dropped on the floor.
5949 * Cancel all will return them to us.
5954 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
5956 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5957 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5958 struct ipr_cmd_pkt
*cmd_pkt
;
5962 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5964 if (!scsi_cmd
->device
->simple_tags
) {
5965 ipr_erp_request_sense(ipr_cmd
);
5969 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5970 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5971 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5973 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
5974 IPR_CANCEL_ALL_TIMEOUT
);
5978 * ipr_dump_ioasa - Dump contents of IOASA
5979 * @ioa_cfg: ioa config struct
5980 * @ipr_cmd: ipr command struct
5981 * @res: resource entry struct
5983 * This function is invoked by the interrupt handler when ops
5984 * fail. It will log the IOASA if appropriate. Only called
5990 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
5991 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
5995 u32 ioasc
, fd_ioasc
;
5996 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5997 __be32
*ioasa_data
= (__be32
*)ioasa
;
6000 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
6001 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
6006 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
6009 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
6010 error_index
= ipr_get_error(fd_ioasc
);
6012 error_index
= ipr_get_error(ioasc
);
6014 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
6015 /* Don't log an error if the IOA already logged one */
6016 if (ioasa
->hdr
.ilid
!= 0)
6019 if (!ipr_is_gscsi(res
))
6022 if (ipr_error_table
[error_index
].log_ioasa
== 0)
6026 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
6028 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
6029 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
6030 data_len
= sizeof(struct ipr_ioasa64
);
6031 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
6032 data_len
= sizeof(struct ipr_ioasa
);
6034 ipr_err("IOASA Dump:\n");
6036 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
6037 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
6038 be32_to_cpu(ioasa_data
[i
]),
6039 be32_to_cpu(ioasa_data
[i
+1]),
6040 be32_to_cpu(ioasa_data
[i
+2]),
6041 be32_to_cpu(ioasa_data
[i
+3]));
6046 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6048 * @sense_buf: sense data buffer
6053 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
6056 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
6057 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
6058 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6059 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
6061 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
6063 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
6066 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
6068 if (ipr_is_vset_device(res
) &&
6069 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
6070 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
6071 sense_buf
[0] = 0x72;
6072 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
6073 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
6074 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
6078 sense_buf
[9] = 0x0A;
6079 sense_buf
[10] = 0x80;
6081 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6083 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6084 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6085 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6086 sense_buf
[15] = failing_lba
& 0x000000ff;
6088 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6090 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6091 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6092 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6093 sense_buf
[19] = failing_lba
& 0x000000ff;
6095 sense_buf
[0] = 0x70;
6096 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6097 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6098 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6100 /* Illegal request */
6101 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6102 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6103 sense_buf
[7] = 10; /* additional length */
6105 /* IOARCB was in error */
6106 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6107 sense_buf
[15] = 0xC0;
6108 else /* Parameter data was invalid */
6109 sense_buf
[15] = 0x80;
6112 ((IPR_FIELD_POINTER_MASK
&
6113 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6115 (IPR_FIELD_POINTER_MASK
&
6116 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6118 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6119 if (ipr_is_vset_device(res
))
6120 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6122 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6124 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6125 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6126 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6127 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6128 sense_buf
[6] = failing_lba
& 0x000000ff;
6131 sense_buf
[7] = 6; /* additional length */
6137 * ipr_get_autosense - Copy autosense data to sense buffer
6138 * @ipr_cmd: ipr command struct
6140 * This function copies the autosense buffer to the buffer
6141 * in the scsi_cmd, if there is autosense available.
6144 * 1 if autosense was available / 0 if not
6146 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6148 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6149 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6151 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6154 if (ipr_cmd
->ioa_cfg
->sis64
)
6155 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6156 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6157 SCSI_SENSE_BUFFERSIZE
));
6159 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6160 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6161 SCSI_SENSE_BUFFERSIZE
));
6166 * ipr_erp_start - Process an error response for a SCSI op
6167 * @ioa_cfg: ioa config struct
6168 * @ipr_cmd: ipr command struct
6170 * This function determines whether or not to initiate ERP
6171 * on the affected device.
6176 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6177 struct ipr_cmnd
*ipr_cmd
)
6179 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6180 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6181 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6182 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6185 ipr_scsi_eh_done(ipr_cmd
);
6189 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6190 ipr_gen_sense(ipr_cmd
);
6192 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6194 switch (masked_ioasc
) {
6195 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6196 if (ipr_is_naca_model(res
))
6197 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6199 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6201 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6202 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6203 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6205 case IPR_IOASC_HW_SEL_TIMEOUT
:
6206 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6207 if (!ipr_is_naca_model(res
))
6208 res
->needs_sync_complete
= 1;
6210 case IPR_IOASC_SYNC_REQUIRED
:
6212 res
->needs_sync_complete
= 1;
6213 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6215 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6216 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6217 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6219 case IPR_IOASC_BUS_WAS_RESET
:
6220 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6222 * Report the bus reset and ask for a retry. The device
6223 * will give CC/UA the next command.
6225 if (!res
->resetting_device
)
6226 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6227 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6228 if (!ipr_is_naca_model(res
))
6229 res
->needs_sync_complete
= 1;
6231 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6232 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6233 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6234 if (!ipr_get_autosense(ipr_cmd
)) {
6235 if (!ipr_is_naca_model(res
)) {
6236 ipr_erp_cancel_all(ipr_cmd
);
6241 if (!ipr_is_naca_model(res
))
6242 res
->needs_sync_complete
= 1;
6244 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6246 case IPR_IOASC_IR_NON_OPTIMIZED
:
6247 if (res
->raw_mode
) {
6249 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6251 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6254 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6255 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6256 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6257 res
->needs_sync_complete
= 1;
6261 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6262 scsi_cmd
->scsi_done(scsi_cmd
);
6263 if (ipr_cmd
->eh_comp
)
6264 complete(ipr_cmd
->eh_comp
);
6265 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6269 * ipr_scsi_done - mid-layer done function
6270 * @ipr_cmd: ipr command struct
6272 * This function is invoked by the interrupt handler for
6273 * ops generated by the SCSI mid-layer
6278 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6280 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6281 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6282 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6283 unsigned long lock_flags
;
6285 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6287 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6288 scsi_dma_unmap(scsi_cmd
);
6290 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6291 scsi_cmd
->scsi_done(scsi_cmd
);
6292 if (ipr_cmd
->eh_comp
)
6293 complete(ipr_cmd
->eh_comp
);
6294 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6295 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6297 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6298 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6299 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6300 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6301 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6306 * ipr_queuecommand - Queue a mid-layer request
6307 * @shost: scsi host struct
6308 * @scsi_cmd: scsi command struct
6310 * This function queues a request generated by the mid-layer.
6314 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6315 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6317 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6318 struct scsi_cmnd
*scsi_cmd
)
6320 struct ipr_ioa_cfg
*ioa_cfg
;
6321 struct ipr_resource_entry
*res
;
6322 struct ipr_ioarcb
*ioarcb
;
6323 struct ipr_cmnd
*ipr_cmd
;
6324 unsigned long hrrq_flags
, lock_flags
;
6326 struct ipr_hrr_queue
*hrrq
;
6329 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6331 scsi_cmd
->result
= (DID_OK
<< 16);
6332 res
= scsi_cmd
->device
->hostdata
;
6334 if (ipr_is_gata(res
) && res
->sata_port
) {
6335 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6336 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6337 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6341 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6342 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6344 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6346 * We are currently blocking all devices due to a host reset
6347 * We have told the host to stop giving us new requests, but
6348 * ERP ops don't count. FIXME
6350 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6351 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6352 return SCSI_MLQUEUE_HOST_BUSY
;
6356 * FIXME - Create scsi_set_host_offline interface
6357 * and the ioa_is_dead check can be removed
6359 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6360 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6364 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6365 if (ipr_cmd
== NULL
) {
6366 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6367 return SCSI_MLQUEUE_HOST_BUSY
;
6369 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6371 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6372 ioarcb
= &ipr_cmd
->ioarcb
;
6374 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6375 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6376 ipr_cmd
->done
= ipr_scsi_eh_done
;
6378 if (ipr_is_gscsi(res
)) {
6379 if (scsi_cmd
->underflow
== 0)
6380 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6382 if (res
->reset_occurred
) {
6383 res
->reset_occurred
= 0;
6384 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6388 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6389 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6391 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6392 if (scsi_cmd
->flags
& SCMD_TAGGED
)
6393 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_SIMPLE_TASK
;
6395 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_UNTAGGED_TASK
;
6398 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6399 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6400 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6402 if (res
->raw_mode
&& ipr_is_af_dasd_device(res
)) {
6403 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_PIPE
;
6405 if (scsi_cmd
->underflow
== 0)
6406 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6410 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6412 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6414 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6415 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6416 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6417 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6419 scsi_dma_unmap(scsi_cmd
);
6420 return SCSI_MLQUEUE_HOST_BUSY
;
6423 if (unlikely(hrrq
->ioa_is_dead
)) {
6424 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6425 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6426 scsi_dma_unmap(scsi_cmd
);
6430 ioarcb
->res_handle
= res
->res_handle
;
6431 if (res
->needs_sync_complete
) {
6432 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6433 res
->needs_sync_complete
= 0;
6435 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6436 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6437 ipr_send_command(ipr_cmd
);
6438 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6442 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6443 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6444 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6445 scsi_cmd
->scsi_done(scsi_cmd
);
6446 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6451 * ipr_ioctl - IOCTL handler
6452 * @sdev: scsi device struct
6457 * 0 on success / other on failure
6459 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6461 struct ipr_resource_entry
*res
;
6463 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6464 if (res
&& ipr_is_gata(res
)) {
6465 if (cmd
== HDIO_GET_IDENTITY
)
6467 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6474 * ipr_info - Get information about the card/driver
6475 * @scsi_host: scsi host struct
6478 * pointer to buffer with description string
6480 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6482 static char buffer
[512];
6483 struct ipr_ioa_cfg
*ioa_cfg
;
6484 unsigned long lock_flags
= 0;
6486 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6488 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6489 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6490 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6495 static struct scsi_host_template driver_template
= {
6496 .module
= THIS_MODULE
,
6498 .info
= ipr_ioa_info
,
6500 .queuecommand
= ipr_queuecommand
,
6501 .eh_abort_handler
= ipr_eh_abort
,
6502 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6503 .eh_host_reset_handler
= ipr_eh_host_reset
,
6504 .slave_alloc
= ipr_slave_alloc
,
6505 .slave_configure
= ipr_slave_configure
,
6506 .slave_destroy
= ipr_slave_destroy
,
6507 .scan_finished
= ipr_scan_finished
,
6508 .target_alloc
= ipr_target_alloc
,
6509 .target_destroy
= ipr_target_destroy
,
6510 .change_queue_depth
= ipr_change_queue_depth
,
6511 .bios_param
= ipr_biosparam
,
6512 .can_queue
= IPR_MAX_COMMANDS
,
6514 .sg_tablesize
= IPR_MAX_SGLIST
,
6515 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6516 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6517 .use_clustering
= ENABLE_CLUSTERING
,
6518 .shost_attrs
= ipr_ioa_attrs
,
6519 .sdev_attrs
= ipr_dev_attrs
,
6520 .proc_name
= IPR_NAME
,
6524 * ipr_ata_phy_reset - libata phy_reset handler
6525 * @ap: ata port to reset
6528 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6530 unsigned long flags
;
6531 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6532 struct ipr_resource_entry
*res
= sata_port
->res
;
6533 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6537 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6538 while (ioa_cfg
->in_reset_reload
) {
6539 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6540 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6541 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6544 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6547 rc
= ipr_device_reset(ioa_cfg
, res
);
6550 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6554 ap
->link
.device
[0].class = res
->ata_class
;
6555 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6556 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6559 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6564 * ipr_ata_post_internal - Cleanup after an internal command
6565 * @qc: ATA queued command
6570 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6572 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6573 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6574 struct ipr_cmnd
*ipr_cmd
;
6575 struct ipr_hrr_queue
*hrrq
;
6576 unsigned long flags
;
6578 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6579 while (ioa_cfg
->in_reset_reload
) {
6580 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6581 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6582 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6585 for_each_hrrq(hrrq
, ioa_cfg
) {
6586 spin_lock(&hrrq
->_lock
);
6587 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6588 if (ipr_cmd
->qc
== qc
) {
6589 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6593 spin_unlock(&hrrq
->_lock
);
6595 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6599 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6600 * @regs: destination
6601 * @tf: source ATA taskfile
6606 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6607 struct ata_taskfile
*tf
)
6609 regs
->feature
= tf
->feature
;
6610 regs
->nsect
= tf
->nsect
;
6611 regs
->lbal
= tf
->lbal
;
6612 regs
->lbam
= tf
->lbam
;
6613 regs
->lbah
= tf
->lbah
;
6614 regs
->device
= tf
->device
;
6615 regs
->command
= tf
->command
;
6616 regs
->hob_feature
= tf
->hob_feature
;
6617 regs
->hob_nsect
= tf
->hob_nsect
;
6618 regs
->hob_lbal
= tf
->hob_lbal
;
6619 regs
->hob_lbam
= tf
->hob_lbam
;
6620 regs
->hob_lbah
= tf
->hob_lbah
;
6621 regs
->ctl
= tf
->ctl
;
6625 * ipr_sata_done - done function for SATA commands
6626 * @ipr_cmd: ipr command struct
6628 * This function is invoked by the interrupt handler for
6629 * ops generated by the SCSI mid-layer to SATA devices
6634 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6636 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6637 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6638 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6639 struct ipr_resource_entry
*res
= sata_port
->res
;
6640 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6642 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6643 if (ipr_cmd
->ioa_cfg
->sis64
)
6644 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6645 sizeof(struct ipr_ioasa_gata
));
6647 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6648 sizeof(struct ipr_ioasa_gata
));
6649 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6651 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6652 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6654 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6655 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6657 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6658 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6659 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6660 ata_qc_complete(qc
);
6664 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6665 * @ipr_cmd: ipr command struct
6666 * @qc: ATA queued command
6669 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6670 struct ata_queued_cmd
*qc
)
6672 u32 ioadl_flags
= 0;
6673 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6674 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6675 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6676 int len
= qc
->nbytes
;
6677 struct scatterlist
*sg
;
6679 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6684 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6685 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6686 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6687 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6688 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6690 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6692 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6693 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6694 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6696 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6697 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6698 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6699 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6701 last_ioadl64
= ioadl64
;
6705 if (likely(last_ioadl64
))
6706 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6710 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6711 * @ipr_cmd: ipr command struct
6712 * @qc: ATA queued command
6715 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6716 struct ata_queued_cmd
*qc
)
6718 u32 ioadl_flags
= 0;
6719 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6720 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6721 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6722 int len
= qc
->nbytes
;
6723 struct scatterlist
*sg
;
6729 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6730 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6731 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6732 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6734 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6735 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6736 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6737 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6738 ioarcb
->read_ioadl_len
=
6739 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6742 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6743 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6744 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6750 if (likely(last_ioadl
))
6751 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6755 * ipr_qc_defer - Get a free ipr_cmd
6756 * @qc: queued command
6761 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6763 struct ata_port
*ap
= qc
->ap
;
6764 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6765 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6766 struct ipr_cmnd
*ipr_cmd
;
6767 struct ipr_hrr_queue
*hrrq
;
6770 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6771 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6773 qc
->lldd_task
= NULL
;
6774 spin_lock(&hrrq
->_lock
);
6775 if (unlikely(hrrq
->ioa_is_dead
)) {
6776 spin_unlock(&hrrq
->_lock
);
6780 if (unlikely(!hrrq
->allow_cmds
)) {
6781 spin_unlock(&hrrq
->_lock
);
6782 return ATA_DEFER_LINK
;
6785 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6786 if (ipr_cmd
== NULL
) {
6787 spin_unlock(&hrrq
->_lock
);
6788 return ATA_DEFER_LINK
;
6791 qc
->lldd_task
= ipr_cmd
;
6792 spin_unlock(&hrrq
->_lock
);
6797 * ipr_qc_issue - Issue a SATA qc to a device
6798 * @qc: queued command
6803 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
6805 struct ata_port
*ap
= qc
->ap
;
6806 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6807 struct ipr_resource_entry
*res
= sata_port
->res
;
6808 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6809 struct ipr_cmnd
*ipr_cmd
;
6810 struct ipr_ioarcb
*ioarcb
;
6811 struct ipr_ioarcb_ata_regs
*regs
;
6813 if (qc
->lldd_task
== NULL
)
6816 ipr_cmd
= qc
->lldd_task
;
6817 if (ipr_cmd
== NULL
)
6818 return AC_ERR_SYSTEM
;
6820 qc
->lldd_task
= NULL
;
6821 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6822 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
6823 ipr_cmd
->hrrq
->ioa_is_dead
)) {
6824 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6825 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6826 return AC_ERR_SYSTEM
;
6829 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
6830 ioarcb
= &ipr_cmd
->ioarcb
;
6832 if (ioa_cfg
->sis64
) {
6833 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
6834 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
6836 regs
= &ioarcb
->u
.add_data
.u
.regs
;
6838 memset(regs
, 0, sizeof(*regs
));
6839 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
6841 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
6843 ipr_cmd
->done
= ipr_sata_done
;
6844 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
6845 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
6846 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6847 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6848 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
6851 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
6853 ipr_build_ata_ioadl(ipr_cmd
, qc
);
6855 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
6856 ipr_copy_sata_tf(regs
, &qc
->tf
);
6857 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
6858 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6860 switch (qc
->tf
.protocol
) {
6861 case ATA_PROT_NODATA
:
6866 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6869 case ATAPI_PROT_PIO
:
6870 case ATAPI_PROT_NODATA
:
6871 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6874 case ATAPI_PROT_DMA
:
6875 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6876 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6881 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6882 return AC_ERR_INVALID
;
6885 ipr_send_command(ipr_cmd
);
6886 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6892 * ipr_qc_fill_rtf - Read result TF
6893 * @qc: ATA queued command
6898 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
6900 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6901 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
6902 struct ata_taskfile
*tf
= &qc
->result_tf
;
6904 tf
->feature
= g
->error
;
6905 tf
->nsect
= g
->nsect
;
6909 tf
->device
= g
->device
;
6910 tf
->command
= g
->status
;
6911 tf
->hob_nsect
= g
->hob_nsect
;
6912 tf
->hob_lbal
= g
->hob_lbal
;
6913 tf
->hob_lbam
= g
->hob_lbam
;
6914 tf
->hob_lbah
= g
->hob_lbah
;
6919 static struct ata_port_operations ipr_sata_ops
= {
6920 .phy_reset
= ipr_ata_phy_reset
,
6921 .hardreset
= ipr_sata_reset
,
6922 .post_internal_cmd
= ipr_ata_post_internal
,
6923 .qc_prep
= ata_noop_qc_prep
,
6924 .qc_defer
= ipr_qc_defer
,
6925 .qc_issue
= ipr_qc_issue
,
6926 .qc_fill_rtf
= ipr_qc_fill_rtf
,
6927 .port_start
= ata_sas_port_start
,
6928 .port_stop
= ata_sas_port_stop
6931 static struct ata_port_info sata_port_info
= {
6932 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
6934 .pio_mask
= ATA_PIO4_ONLY
,
6935 .mwdma_mask
= ATA_MWDMA2
,
6936 .udma_mask
= ATA_UDMA6
,
6937 .port_ops
= &ipr_sata_ops
6940 #ifdef CONFIG_PPC_PSERIES
6941 static const u16 ipr_blocked_processors
[] = {
6953 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6954 * @ioa_cfg: ioa cfg struct
6956 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6957 * certain pSeries hardware. This function determines if the given
6958 * adapter is in one of these confgurations or not.
6961 * 1 if adapter is not supported / 0 if adapter is supported
6963 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
6967 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
6968 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
6969 if (pvr_version_is(ipr_blocked_processors
[i
]))
6976 #define ipr_invalid_adapter(ioa_cfg) 0
6980 * ipr_ioa_bringdown_done - IOA bring down completion.
6981 * @ipr_cmd: ipr command struct
6983 * This function processes the completion of an adapter bring down.
6984 * It wakes any reset sleepers.
6989 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
6991 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6995 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
6997 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
6998 scsi_unblock_requests(ioa_cfg
->host
);
6999 spin_lock_irq(ioa_cfg
->host
->host_lock
);
7002 ioa_cfg
->in_reset_reload
= 0;
7003 ioa_cfg
->reset_retries
= 0;
7004 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7005 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7006 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
7007 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7011 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7012 wake_up_all(&ioa_cfg
->reset_wait_q
);
7015 return IPR_RC_JOB_RETURN
;
7019 * ipr_ioa_reset_done - IOA reset completion.
7020 * @ipr_cmd: ipr command struct
7022 * This function processes the completion of an adapter reset.
7023 * It schedules any necessary mid-layer add/removes and
7024 * wakes any reset sleepers.
7029 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
7031 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7032 struct ipr_resource_entry
*res
;
7033 struct ipr_hostrcb
*hostrcb
, *temp
;
7037 ioa_cfg
->in_reset_reload
= 0;
7038 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
7039 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
7040 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
7041 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
7044 ioa_cfg
->reset_cmd
= NULL
;
7045 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
7047 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
7048 if (res
->add_to_ml
|| res
->del_from_ml
) {
7053 schedule_work(&ioa_cfg
->work_q
);
7055 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
7056 list_del(&hostrcb
->queue
);
7057 if (i
++ < IPR_NUM_LOG_HCAMS
)
7058 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
7060 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
7063 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
7064 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
7066 ioa_cfg
->reset_retries
= 0;
7067 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7068 wake_up_all(&ioa_cfg
->reset_wait_q
);
7070 spin_unlock(ioa_cfg
->host
->host_lock
);
7071 scsi_unblock_requests(ioa_cfg
->host
);
7072 spin_lock(ioa_cfg
->host
->host_lock
);
7074 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
7075 scsi_block_requests(ioa_cfg
->host
);
7077 schedule_work(&ioa_cfg
->work_q
);
7079 return IPR_RC_JOB_RETURN
;
7083 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7084 * @supported_dev: supported device struct
7085 * @vpids: vendor product id struct
7090 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
7091 struct ipr_std_inq_vpids
*vpids
)
7093 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
7094 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
7095 supported_dev
->num_records
= 1;
7096 supported_dev
->data_length
=
7097 cpu_to_be16(sizeof(struct ipr_supported_device
));
7098 supported_dev
->reserved
= 0;
7102 * ipr_set_supported_devs - Send Set Supported Devices for a device
7103 * @ipr_cmd: ipr command struct
7105 * This function sends a Set Supported Devices to the adapter
7108 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7110 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7112 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7113 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7114 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7115 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7117 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7119 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7120 if (!ipr_is_scsi_disk(res
))
7123 ipr_cmd
->u
.res
= res
;
7124 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7126 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7127 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7128 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7130 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7131 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7132 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7133 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7135 ipr_init_ioadl(ipr_cmd
,
7136 ioa_cfg
->vpd_cbs_dma
+
7137 offsetof(struct ipr_misc_cbs
, supp_dev
),
7138 sizeof(struct ipr_supported_device
),
7139 IPR_IOADL_FLAGS_WRITE_LAST
);
7141 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7142 IPR_SET_SUP_DEVICE_TIMEOUT
);
7144 if (!ioa_cfg
->sis64
)
7145 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7147 return IPR_RC_JOB_RETURN
;
7151 return IPR_RC_JOB_CONTINUE
;
7155 * ipr_get_mode_page - Locate specified mode page
7156 * @mode_pages: mode page buffer
7157 * @page_code: page code to find
7158 * @len: minimum required length for mode page
7161 * pointer to mode page / NULL on failure
7163 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7164 u32 page_code
, u32 len
)
7166 struct ipr_mode_page_hdr
*mode_hdr
;
7170 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7173 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7174 mode_hdr
= (struct ipr_mode_page_hdr
*)
7175 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7178 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7179 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7183 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7184 mode_hdr
->page_length
);
7185 length
-= page_length
;
7186 mode_hdr
= (struct ipr_mode_page_hdr
*)
7187 ((unsigned long)mode_hdr
+ page_length
);
7194 * ipr_check_term_power - Check for term power errors
7195 * @ioa_cfg: ioa config struct
7196 * @mode_pages: IOAFP mode pages buffer
7198 * Check the IOAFP's mode page 28 for term power errors
7203 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7204 struct ipr_mode_pages
*mode_pages
)
7208 struct ipr_dev_bus_entry
*bus
;
7209 struct ipr_mode_page28
*mode_page
;
7211 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7212 sizeof(struct ipr_mode_page28
));
7214 entry_length
= mode_page
->entry_length
;
7216 bus
= mode_page
->bus
;
7218 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7219 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7220 dev_err(&ioa_cfg
->pdev
->dev
,
7221 "Term power is absent on scsi bus %d\n",
7225 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7230 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7231 * @ioa_cfg: ioa config struct
7233 * Looks through the config table checking for SES devices. If
7234 * the SES device is in the SES table indicating a maximum SCSI
7235 * bus speed, the speed is limited for the bus.
7240 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7245 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7246 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7247 ioa_cfg
->bus_attr
[i
].bus_width
);
7249 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7250 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7255 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7256 * @ioa_cfg: ioa config struct
7257 * @mode_pages: mode page 28 buffer
7259 * Updates mode page 28 based on driver configuration
7264 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7265 struct ipr_mode_pages
*mode_pages
)
7267 int i
, entry_length
;
7268 struct ipr_dev_bus_entry
*bus
;
7269 struct ipr_bus_attributes
*bus_attr
;
7270 struct ipr_mode_page28
*mode_page
;
7272 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7273 sizeof(struct ipr_mode_page28
));
7275 entry_length
= mode_page
->entry_length
;
7277 /* Loop for each device bus entry */
7278 for (i
= 0, bus
= mode_page
->bus
;
7279 i
< mode_page
->num_entries
;
7280 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7281 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7282 dev_err(&ioa_cfg
->pdev
->dev
,
7283 "Invalid resource address reported: 0x%08X\n",
7284 IPR_GET_PHYS_LOC(bus
->res_addr
));
7288 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7289 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7290 bus
->bus_width
= bus_attr
->bus_width
;
7291 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7292 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7293 if (bus_attr
->qas_enabled
)
7294 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7296 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7301 * ipr_build_mode_select - Build a mode select command
7302 * @ipr_cmd: ipr command struct
7303 * @res_handle: resource handle to send command to
7304 * @parm: Byte 2 of Mode Sense command
7305 * @dma_addr: DMA buffer address
7306 * @xfer_len: data transfer length
7311 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7312 __be32 res_handle
, u8 parm
,
7313 dma_addr_t dma_addr
, u8 xfer_len
)
7315 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7317 ioarcb
->res_handle
= res_handle
;
7318 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7319 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7320 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7321 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7322 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7324 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7328 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7329 * @ipr_cmd: ipr command struct
7331 * This function sets up the SCSI bus attributes and sends
7332 * a Mode Select for Page 28 to activate them.
7337 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7339 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7340 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7344 ipr_scsi_bus_speed_limit(ioa_cfg
);
7345 ipr_check_term_power(ioa_cfg
, mode_pages
);
7346 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7347 length
= mode_pages
->hdr
.length
+ 1;
7348 mode_pages
->hdr
.length
= 0;
7350 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7351 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7354 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7355 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7356 struct ipr_resource_entry
, queue
);
7357 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7360 return IPR_RC_JOB_RETURN
;
7364 * ipr_build_mode_sense - Builds a mode sense command
7365 * @ipr_cmd: ipr command struct
7366 * @res: resource entry struct
7367 * @parm: Byte 2 of mode sense command
7368 * @dma_addr: DMA address of mode sense buffer
7369 * @xfer_len: Size of DMA buffer
7374 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7376 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7378 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7380 ioarcb
->res_handle
= res_handle
;
7381 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7382 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7383 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7384 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7386 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7390 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7391 * @ipr_cmd: ipr command struct
7393 * This function handles the failure of an IOA bringup command.
7398 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7400 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7401 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7403 dev_err(&ioa_cfg
->pdev
->dev
,
7404 "0x%02X failed with IOASC: 0x%08X\n",
7405 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7407 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7408 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7409 return IPR_RC_JOB_RETURN
;
7413 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7414 * @ipr_cmd: ipr command struct
7416 * This function handles the failure of a Mode Sense to the IOAFP.
7417 * Some adapters do not handle all mode pages.
7420 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7422 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7424 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7425 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7427 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7428 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7429 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7430 struct ipr_resource_entry
, queue
);
7431 return IPR_RC_JOB_CONTINUE
;
7434 return ipr_reset_cmd_failed(ipr_cmd
);
7438 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7439 * @ipr_cmd: ipr command struct
7441 * This function send a Page 28 mode sense to the IOA to
7442 * retrieve SCSI bus attributes.
7447 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7449 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7452 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7453 0x28, ioa_cfg
->vpd_cbs_dma
+
7454 offsetof(struct ipr_misc_cbs
, mode_pages
),
7455 sizeof(struct ipr_mode_pages
));
7457 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7458 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7460 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7463 return IPR_RC_JOB_RETURN
;
7467 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7468 * @ipr_cmd: ipr command struct
7470 * This function enables dual IOA RAID support if possible.
7475 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7477 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7478 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7479 struct ipr_mode_page24
*mode_page
;
7483 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7484 sizeof(struct ipr_mode_page24
));
7487 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7489 length
= mode_pages
->hdr
.length
+ 1;
7490 mode_pages
->hdr
.length
= 0;
7492 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7493 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7496 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7497 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7500 return IPR_RC_JOB_RETURN
;
7504 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7505 * @ipr_cmd: ipr command struct
7507 * This function handles the failure of a Mode Sense to the IOAFP.
7508 * Some adapters do not handle all mode pages.
7511 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7513 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7515 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7517 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7518 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7519 return IPR_RC_JOB_CONTINUE
;
7522 return ipr_reset_cmd_failed(ipr_cmd
);
7526 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7527 * @ipr_cmd: ipr command struct
7529 * This function send a mode sense to the IOA to retrieve
7530 * the IOA Advanced Function Control mode page.
7535 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7537 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7540 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7541 0x24, ioa_cfg
->vpd_cbs_dma
+
7542 offsetof(struct ipr_misc_cbs
, mode_pages
),
7543 sizeof(struct ipr_mode_pages
));
7545 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7546 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7548 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7551 return IPR_RC_JOB_RETURN
;
7555 * ipr_init_res_table - Initialize the resource table
7556 * @ipr_cmd: ipr command struct
7558 * This function looks through the existing resource table, comparing
7559 * it with the config table. This function will take care of old/new
7560 * devices and schedule adding/removing them from the mid-layer
7564 * IPR_RC_JOB_CONTINUE
7566 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7568 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7569 struct ipr_resource_entry
*res
, *temp
;
7570 struct ipr_config_table_entry_wrapper cfgtew
;
7571 int entries
, found
, flag
, i
;
7576 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7578 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7580 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7581 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7583 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7584 list_move_tail(&res
->queue
, &old_res
);
7587 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7589 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7591 for (i
= 0; i
< entries
; i
++) {
7593 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7595 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7598 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7599 if (ipr_is_same_device(res
, &cfgtew
)) {
7600 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7607 if (list_empty(&ioa_cfg
->free_res_q
)) {
7608 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7613 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7614 struct ipr_resource_entry
, queue
);
7615 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7616 ipr_init_res_entry(res
, &cfgtew
);
7618 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7619 res
->sdev
->allow_restart
= 1;
7622 ipr_update_res_entry(res
, &cfgtew
);
7625 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7627 res
->del_from_ml
= 1;
7628 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7629 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7633 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7634 ipr_clear_res_target(res
);
7635 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7638 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7639 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7641 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7644 return IPR_RC_JOB_CONTINUE
;
7648 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7649 * @ipr_cmd: ipr command struct
7651 * This function sends a Query IOA Configuration command
7652 * to the adapter to retrieve the IOA configuration table.
7657 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7659 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7660 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7661 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7662 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7665 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7666 ioa_cfg
->dual_raid
= 1;
7667 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7668 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7669 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7670 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7671 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7673 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7674 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7675 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7676 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7678 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7679 IPR_IOADL_FLAGS_READ_LAST
);
7681 ipr_cmd
->job_step
= ipr_init_res_table
;
7683 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7686 return IPR_RC_JOB_RETURN
;
7689 static int ipr_ioa_service_action_failed(struct ipr_cmnd
*ipr_cmd
)
7691 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7693 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
)
7694 return IPR_RC_JOB_CONTINUE
;
7696 return ipr_reset_cmd_failed(ipr_cmd
);
7699 static void ipr_build_ioa_service_action(struct ipr_cmnd
*ipr_cmd
,
7700 __be32 res_handle
, u8 sa_code
)
7702 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7704 ioarcb
->res_handle
= res_handle
;
7705 ioarcb
->cmd_pkt
.cdb
[0] = IPR_IOA_SERVICE_ACTION
;
7706 ioarcb
->cmd_pkt
.cdb
[1] = sa_code
;
7707 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7711 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7717 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd
*ipr_cmd
)
7719 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7720 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7721 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
7725 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7727 if (pageC4
->cache_cap
[0] & IPR_CAP_SYNC_CACHE
) {
7728 ipr_build_ioa_service_action(ipr_cmd
,
7729 cpu_to_be32(IPR_IOA_RES_HANDLE
),
7730 IPR_IOA_SA_CHANGE_CACHE_PARAMS
);
7732 ioarcb
->cmd_pkt
.cdb
[2] = 0x40;
7734 ipr_cmd
->job_step_failed
= ipr_ioa_service_action_failed
;
7735 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7736 IPR_SET_SUP_DEVICE_TIMEOUT
);
7739 return IPR_RC_JOB_RETURN
;
7743 return IPR_RC_JOB_CONTINUE
;
7747 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7748 * @ipr_cmd: ipr command struct
7750 * This utility function sends an inquiry to the adapter.
7755 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7756 dma_addr_t dma_addr
, u8 xfer_len
)
7758 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7761 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7762 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7764 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7765 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7766 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7767 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7769 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7771 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7776 * ipr_inquiry_page_supported - Is the given inquiry page supported
7777 * @page0: inquiry page 0 buffer
7780 * This function determines if the specified inquiry page is supported.
7783 * 1 if page is supported / 0 if not
7785 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7789 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7790 if (page0
->page
[i
] == page
)
7797 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7798 * @ipr_cmd: ipr command struct
7800 * This function sends a Page 0xC4 inquiry to the adapter
7801 * to retrieve software VPD information.
7804 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7806 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd
*ipr_cmd
)
7808 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7809 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
7810 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
7813 ipr_cmd
->job_step
= ipr_ioafp_set_caching_parameters
;
7814 memset(pageC4
, 0, sizeof(*pageC4
));
7816 if (ipr_inquiry_page_supported(page0
, 0xC4)) {
7817 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xC4,
7818 (ioa_cfg
->vpd_cbs_dma
7819 + offsetof(struct ipr_misc_cbs
,
7821 sizeof(struct ipr_inquiry_pageC4
));
7822 return IPR_RC_JOB_RETURN
;
7826 return IPR_RC_JOB_CONTINUE
;
7830 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7831 * @ipr_cmd: ipr command struct
7833 * This function sends a Page 0xD0 inquiry to the adapter
7834 * to retrieve adapter capabilities.
7837 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7839 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
7841 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7842 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
7843 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7846 ipr_cmd
->job_step
= ipr_ioafp_pageC4_inquiry
;
7847 memset(cap
, 0, sizeof(*cap
));
7849 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
7850 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
7851 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
7852 sizeof(struct ipr_inquiry_cap
));
7853 return IPR_RC_JOB_RETURN
;
7857 return IPR_RC_JOB_CONTINUE
;
7861 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7862 * @ipr_cmd: ipr command struct
7864 * This function sends a Page 3 inquiry to the adapter
7865 * to retrieve software VPD information.
7868 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7870 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
7872 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7876 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
7878 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
7879 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
7880 sizeof(struct ipr_inquiry_page3
));
7883 return IPR_RC_JOB_RETURN
;
7887 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7888 * @ipr_cmd: ipr command struct
7890 * This function sends a Page 0 inquiry to the adapter
7891 * to retrieve supported inquiry pages.
7894 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7896 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
7898 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7903 /* Grab the type out of the VPD and store it away */
7904 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
7906 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
7908 if (ipr_invalid_adapter(ioa_cfg
)) {
7909 dev_err(&ioa_cfg
->pdev
->dev
,
7910 "Adapter not supported in this hardware configuration.\n");
7912 if (!ipr_testmode
) {
7913 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
7914 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7915 list_add_tail(&ipr_cmd
->queue
,
7916 &ioa_cfg
->hrrq
->hrrq_free_q
);
7917 return IPR_RC_JOB_RETURN
;
7921 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
7923 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
7924 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
7925 sizeof(struct ipr_inquiry_page0
));
7928 return IPR_RC_JOB_RETURN
;
7932 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7933 * @ipr_cmd: ipr command struct
7935 * This function sends a standard inquiry to the adapter.
7940 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
7942 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7945 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
7947 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
7948 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
7949 sizeof(struct ipr_ioa_vpd
));
7952 return IPR_RC_JOB_RETURN
;
7956 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7957 * @ipr_cmd: ipr command struct
7959 * This function send an Identify Host Request Response Queue
7960 * command to establish the HRRQ with the adapter.
7965 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
7967 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7968 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7969 struct ipr_hrr_queue
*hrrq
;
7972 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
7973 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
7975 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
7976 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
7978 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
7979 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7981 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7983 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
7985 if (ioa_cfg
->nvectors
== 1)
7986 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
7988 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
7990 ioarcb
->cmd_pkt
.cdb
[2] =
7991 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
7992 ioarcb
->cmd_pkt
.cdb
[3] =
7993 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
7994 ioarcb
->cmd_pkt
.cdb
[4] =
7995 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
7996 ioarcb
->cmd_pkt
.cdb
[5] =
7997 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
7998 ioarcb
->cmd_pkt
.cdb
[7] =
7999 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
8000 ioarcb
->cmd_pkt
.cdb
[8] =
8001 (sizeof(u32
) * hrrq
->size
) & 0xff;
8003 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8004 ioarcb
->cmd_pkt
.cdb
[9] =
8005 ioa_cfg
->identify_hrrq_index
;
8007 if (ioa_cfg
->sis64
) {
8008 ioarcb
->cmd_pkt
.cdb
[10] =
8009 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
8010 ioarcb
->cmd_pkt
.cdb
[11] =
8011 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
8012 ioarcb
->cmd_pkt
.cdb
[12] =
8013 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
8014 ioarcb
->cmd_pkt
.cdb
[13] =
8015 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
8018 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8019 ioarcb
->cmd_pkt
.cdb
[14] =
8020 ioa_cfg
->identify_hrrq_index
;
8022 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8023 IPR_INTERNAL_TIMEOUT
);
8025 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
8026 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8029 return IPR_RC_JOB_RETURN
;
8033 return IPR_RC_JOB_CONTINUE
;
8037 * ipr_reset_timer_done - Adapter reset timer function
8038 * @ipr_cmd: ipr command struct
8040 * Description: This function is used in adapter reset processing
8041 * for timing events. If the reset_cmd pointer in the IOA
8042 * config struct is not this adapter's we are doing nested
8043 * resets and fail_all_ops will take care of freeing the
8049 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
8051 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8052 unsigned long lock_flags
= 0;
8054 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8056 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
8057 list_del(&ipr_cmd
->queue
);
8058 ipr_cmd
->done(ipr_cmd
);
8061 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8065 * ipr_reset_start_timer - Start a timer for adapter reset job
8066 * @ipr_cmd: ipr command struct
8067 * @timeout: timeout value
8069 * Description: This function is used in adapter reset processing
8070 * for timing events. If the reset_cmd pointer in the IOA
8071 * config struct is not this adapter's we are doing nested
8072 * resets and fail_all_ops will take care of freeing the
8078 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
8079 unsigned long timeout
)
8083 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8084 ipr_cmd
->done
= ipr_reset_ioa_job
;
8086 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
8087 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
8088 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
8089 add_timer(&ipr_cmd
->timer
);
8093 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8094 * @ioa_cfg: ioa cfg struct
8099 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8101 struct ipr_hrr_queue
*hrrq
;
8103 for_each_hrrq(hrrq
, ioa_cfg
) {
8104 spin_lock(&hrrq
->_lock
);
8105 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
8107 /* Initialize Host RRQ pointers */
8108 hrrq
->hrrq_start
= hrrq
->host_rrq
;
8109 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
8110 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
8111 hrrq
->toggle_bit
= 1;
8112 spin_unlock(&hrrq
->_lock
);
8116 ioa_cfg
->identify_hrrq_index
= 0;
8117 if (ioa_cfg
->hrrq_num
== 1)
8118 atomic_set(&ioa_cfg
->hrrq_index
, 0);
8120 atomic_set(&ioa_cfg
->hrrq_index
, 1);
8122 /* Zero out config table */
8123 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
8127 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8128 * @ipr_cmd: ipr command struct
8131 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8133 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
8135 unsigned long stage
, stage_time
;
8137 volatile u32 int_reg
;
8138 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8141 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
8142 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
8143 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
8145 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
8147 /* sanity check the stage_time value */
8148 if (stage_time
== 0)
8149 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
8150 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
8151 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
8152 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
8153 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
8155 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
8156 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8157 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8158 stage_time
= ioa_cfg
->transop_timeout
;
8159 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8160 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
8161 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8162 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8163 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8164 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8165 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
8166 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8167 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8168 return IPR_RC_JOB_CONTINUE
;
8172 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
8173 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
8174 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
8175 ipr_cmd
->done
= ipr_reset_ioa_job
;
8176 add_timer(&ipr_cmd
->timer
);
8178 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8180 return IPR_RC_JOB_RETURN
;
8184 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8185 * @ipr_cmd: ipr command struct
8187 * This function reinitializes some control blocks and
8188 * enables destructive diagnostics on the adapter.
8193 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
8195 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8196 volatile u32 int_reg
;
8197 volatile u64 maskval
;
8201 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8202 ipr_init_ioa_mem(ioa_cfg
);
8204 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8205 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8206 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
8207 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8210 if (ioa_cfg
->sis64
) {
8211 /* Set the adapter to the correct endian mode. */
8212 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8213 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8216 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8218 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8219 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8220 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8221 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8222 return IPR_RC_JOB_CONTINUE
;
8225 /* Enable destructive diagnostics on IOA */
8226 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8228 if (ioa_cfg
->sis64
) {
8229 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8230 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8231 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8233 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8235 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8237 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8239 if (ioa_cfg
->sis64
) {
8240 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8241 return IPR_RC_JOB_CONTINUE
;
8244 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
8245 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8246 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
8247 ipr_cmd
->done
= ipr_reset_ioa_job
;
8248 add_timer(&ipr_cmd
->timer
);
8249 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8252 return IPR_RC_JOB_RETURN
;
8256 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8257 * @ipr_cmd: ipr command struct
8259 * This function is invoked when an adapter dump has run out
8260 * of processing time.
8263 * IPR_RC_JOB_CONTINUE
8265 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8267 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8269 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8270 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8271 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8272 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8274 ioa_cfg
->dump_timeout
= 1;
8275 ipr_cmd
->job_step
= ipr_reset_alert
;
8277 return IPR_RC_JOB_CONTINUE
;
8281 * ipr_unit_check_no_data - Log a unit check/no data error log
8282 * @ioa_cfg: ioa config struct
8284 * Logs an error indicating the adapter unit checked, but for some
8285 * reason, we were unable to fetch the unit check buffer.
8290 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8292 ioa_cfg
->errors_logged
++;
8293 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8297 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8298 * @ioa_cfg: ioa config struct
8300 * Fetches the unit check buffer from the adapter by clocking the data
8301 * through the mailbox register.
8306 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8308 unsigned long mailbox
;
8309 struct ipr_hostrcb
*hostrcb
;
8310 struct ipr_uc_sdt sdt
;
8314 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8316 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8317 ipr_unit_check_no_data(ioa_cfg
);
8321 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8322 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8323 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8325 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8326 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8327 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8328 ipr_unit_check_no_data(ioa_cfg
);
8332 /* Find length of the first sdt entry (UC buffer) */
8333 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8334 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8336 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8337 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8338 IPR_FMT2_MBX_ADDR_MASK
;
8340 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8341 struct ipr_hostrcb
, queue
);
8342 list_del(&hostrcb
->queue
);
8343 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8345 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8346 be32_to_cpu(sdt
.entry
[0].start_token
),
8347 (__be32
*)&hostrcb
->hcam
,
8348 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8351 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8352 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8353 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8354 ioa_cfg
->sdt_state
== GET_DUMP
)
8355 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8357 ipr_unit_check_no_data(ioa_cfg
);
8359 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8363 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8364 * @ipr_cmd: ipr command struct
8366 * Description: This function will call to get the unit check buffer.
8371 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8373 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8376 ioa_cfg
->ioa_unit_checked
= 0;
8377 ipr_get_unit_check_buffer(ioa_cfg
);
8378 ipr_cmd
->job_step
= ipr_reset_alert
;
8379 ipr_reset_start_timer(ipr_cmd
, 0);
8382 return IPR_RC_JOB_RETURN
;
8385 static int ipr_dump_mailbox_wait(struct ipr_cmnd
*ipr_cmd
)
8387 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8391 if (ioa_cfg
->sdt_state
!= GET_DUMP
)
8392 return IPR_RC_JOB_RETURN
;
8394 if (!ioa_cfg
->sis64
|| !ipr_cmd
->u
.time_left
||
8395 (readl(ioa_cfg
->regs
.sense_interrupt_reg
) &
8396 IPR_PCII_MAILBOX_STABLE
)) {
8398 if (!ipr_cmd
->u
.time_left
)
8399 dev_err(&ioa_cfg
->pdev
->dev
,
8400 "Timed out waiting for Mailbox register.\n");
8402 ioa_cfg
->sdt_state
= READ_DUMP
;
8403 ioa_cfg
->dump_timeout
= 0;
8405 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8407 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8408 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8409 schedule_work(&ioa_cfg
->work_q
);
8412 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8413 ipr_reset_start_timer(ipr_cmd
,
8414 IPR_CHECK_FOR_RESET_TIMEOUT
);
8418 return IPR_RC_JOB_RETURN
;
8422 * ipr_reset_restore_cfg_space - Restore PCI config space.
8423 * @ipr_cmd: ipr command struct
8425 * Description: This function restores the saved PCI config space of
8426 * the adapter, fails all outstanding ops back to the callers, and
8427 * fetches the dump/unit check if applicable to this reset.
8430 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8432 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8434 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8438 ioa_cfg
->pdev
->state_saved
= true;
8439 pci_restore_state(ioa_cfg
->pdev
);
8441 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8442 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8443 return IPR_RC_JOB_CONTINUE
;
8446 ipr_fail_all_ops(ioa_cfg
);
8448 if (ioa_cfg
->sis64
) {
8449 /* Set the adapter to the correct endian mode. */
8450 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8451 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8454 if (ioa_cfg
->ioa_unit_checked
) {
8455 if (ioa_cfg
->sis64
) {
8456 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8457 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8458 return IPR_RC_JOB_RETURN
;
8460 ioa_cfg
->ioa_unit_checked
= 0;
8461 ipr_get_unit_check_buffer(ioa_cfg
);
8462 ipr_cmd
->job_step
= ipr_reset_alert
;
8463 ipr_reset_start_timer(ipr_cmd
, 0);
8464 return IPR_RC_JOB_RETURN
;
8468 if (ioa_cfg
->in_ioa_bringdown
) {
8469 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8470 } else if (ioa_cfg
->sdt_state
== GET_DUMP
) {
8471 ipr_cmd
->job_step
= ipr_dump_mailbox_wait
;
8472 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_MAILBOX
;
8474 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8478 return IPR_RC_JOB_CONTINUE
;
8482 * ipr_reset_bist_done - BIST has completed on the adapter.
8483 * @ipr_cmd: ipr command struct
8485 * Description: Unblock config space and resume the reset process.
8488 * IPR_RC_JOB_CONTINUE
8490 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8492 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8495 if (ioa_cfg
->cfg_locked
)
8496 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8497 ioa_cfg
->cfg_locked
= 0;
8498 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8500 return IPR_RC_JOB_CONTINUE
;
8504 * ipr_reset_start_bist - Run BIST on the adapter.
8505 * @ipr_cmd: ipr command struct
8507 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8510 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8512 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8514 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8515 int rc
= PCIBIOS_SUCCESSFUL
;
8518 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8519 writel(IPR_UPROCI_SIS64_START_BIST
,
8520 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8522 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8524 if (rc
== PCIBIOS_SUCCESSFUL
) {
8525 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8526 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8527 rc
= IPR_RC_JOB_RETURN
;
8529 if (ioa_cfg
->cfg_locked
)
8530 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8531 ioa_cfg
->cfg_locked
= 0;
8532 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8533 rc
= IPR_RC_JOB_CONTINUE
;
8541 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8542 * @ipr_cmd: ipr command struct
8544 * Description: This clears PCI reset to the adapter and delays two seconds.
8549 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8552 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8553 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8555 return IPR_RC_JOB_RETURN
;
8559 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8560 * @work: work struct
8562 * Description: This pulses warm reset to a slot.
8565 static void ipr_reset_reset_work(struct work_struct
*work
)
8567 struct ipr_cmnd
*ipr_cmd
= container_of(work
, struct ipr_cmnd
, work
);
8568 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8569 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8570 unsigned long lock_flags
= 0;
8573 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8574 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT
));
8575 pci_set_pcie_reset_state(pdev
, pcie_deassert_reset
);
8577 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8578 if (ioa_cfg
->reset_cmd
== ipr_cmd
)
8579 ipr_reset_ioa_job(ipr_cmd
);
8580 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8585 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8586 * @ipr_cmd: ipr command struct
8588 * Description: This asserts PCI reset to the adapter.
8593 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8595 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8598 INIT_WORK(&ipr_cmd
->work
, ipr_reset_reset_work
);
8599 queue_work(ioa_cfg
->reset_work_q
, &ipr_cmd
->work
);
8600 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8602 return IPR_RC_JOB_RETURN
;
8606 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8607 * @ipr_cmd: ipr command struct
8609 * Description: This attempts to block config access to the IOA.
8612 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8614 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8616 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8617 int rc
= IPR_RC_JOB_CONTINUE
;
8619 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8620 ioa_cfg
->cfg_locked
= 1;
8621 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8623 if (ipr_cmd
->u
.time_left
) {
8624 rc
= IPR_RC_JOB_RETURN
;
8625 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8626 ipr_reset_start_timer(ipr_cmd
,
8627 IPR_CHECK_FOR_RESET_TIMEOUT
);
8629 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8630 dev_err(&ioa_cfg
->pdev
->dev
,
8631 "Timed out waiting to lock config access. Resetting anyway.\n");
8639 * ipr_reset_block_config_access - Block config access to the IOA
8640 * @ipr_cmd: ipr command struct
8642 * Description: This attempts to block config access to the IOA
8645 * IPR_RC_JOB_CONTINUE
8647 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8649 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8650 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8651 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8652 return IPR_RC_JOB_CONTINUE
;
8656 * ipr_reset_allowed - Query whether or not IOA can be reset
8657 * @ioa_cfg: ioa config struct
8660 * 0 if reset not allowed / non-zero if reset is allowed
8662 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8664 volatile u32 temp_reg
;
8666 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8667 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8671 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8672 * @ipr_cmd: ipr command struct
8674 * Description: This function waits for adapter permission to run BIST,
8675 * then runs BIST. If the adapter does not give permission after a
8676 * reasonable time, we will reset the adapter anyway. The impact of
8677 * resetting the adapter without warning the adapter is the risk of
8678 * losing the persistent error log on the adapter. If the adapter is
8679 * reset while it is writing to the flash on the adapter, the flash
8680 * segment will have bad ECC and be zeroed.
8683 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8685 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8687 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8688 int rc
= IPR_RC_JOB_RETURN
;
8690 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8691 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8692 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8694 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8695 rc
= IPR_RC_JOB_CONTINUE
;
8702 * ipr_reset_alert - Alert the adapter of a pending reset
8703 * @ipr_cmd: ipr command struct
8705 * Description: This function alerts the adapter that it will be reset.
8706 * If memory space is not currently enabled, proceed directly
8707 * to running BIST on the adapter. The timer must always be started
8708 * so we guarantee we do not run BIST from ipr_isr.
8713 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8715 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8720 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8722 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8723 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8724 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8725 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8727 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8730 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8731 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8734 return IPR_RC_JOB_RETURN
;
8738 * ipr_reset_quiesce_done - Complete IOA disconnect
8739 * @ipr_cmd: ipr command struct
8741 * Description: Freeze the adapter to complete quiesce processing
8744 * IPR_RC_JOB_CONTINUE
8746 static int ipr_reset_quiesce_done(struct ipr_cmnd
*ipr_cmd
)
8748 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8751 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8752 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8754 return IPR_RC_JOB_CONTINUE
;
8758 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8759 * @ipr_cmd: ipr command struct
8761 * Description: Ensure nothing is outstanding to the IOA and
8762 * proceed with IOA disconnect. Otherwise reset the IOA.
8765 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8767 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd
*ipr_cmd
)
8769 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8770 struct ipr_cmnd
*loop_cmd
;
8771 struct ipr_hrr_queue
*hrrq
;
8772 int rc
= IPR_RC_JOB_CONTINUE
;
8776 ipr_cmd
->job_step
= ipr_reset_quiesce_done
;
8778 for_each_hrrq(hrrq
, ioa_cfg
) {
8779 spin_lock(&hrrq
->_lock
);
8780 list_for_each_entry(loop_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
8782 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8783 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
8784 rc
= IPR_RC_JOB_RETURN
;
8787 spin_unlock(&hrrq
->_lock
);
8798 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8799 * @ipr_cmd: ipr command struct
8801 * Description: Cancel any oustanding HCAMs to the IOA.
8804 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8806 static int ipr_reset_cancel_hcam(struct ipr_cmnd
*ipr_cmd
)
8808 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8809 int rc
= IPR_RC_JOB_CONTINUE
;
8810 struct ipr_cmd_pkt
*cmd_pkt
;
8811 struct ipr_cmnd
*hcam_cmd
;
8812 struct ipr_hrr_queue
*hrrq
= &ioa_cfg
->hrrq
[IPR_INIT_HRRQ
];
8815 ipr_cmd
->job_step
= ipr_reset_cancel_hcam_done
;
8817 if (!hrrq
->ioa_is_dead
) {
8818 if (!list_empty(&ioa_cfg
->hostrcb_pending_q
)) {
8819 list_for_each_entry(hcam_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
8820 if (hcam_cmd
->ioarcb
.cmd_pkt
.cdb
[0] != IPR_HOST_CONTROLLED_ASYNC
)
8823 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8824 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8825 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
8826 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
8827 cmd_pkt
->cdb
[0] = IPR_CANCEL_REQUEST
;
8828 cmd_pkt
->cdb
[1] = IPR_CANCEL_64BIT_IOARCB
;
8829 cmd_pkt
->cdb
[10] = ((u64
) hcam_cmd
->dma_addr
>> 56) & 0xff;
8830 cmd_pkt
->cdb
[11] = ((u64
) hcam_cmd
->dma_addr
>> 48) & 0xff;
8831 cmd_pkt
->cdb
[12] = ((u64
) hcam_cmd
->dma_addr
>> 40) & 0xff;
8832 cmd_pkt
->cdb
[13] = ((u64
) hcam_cmd
->dma_addr
>> 32) & 0xff;
8833 cmd_pkt
->cdb
[2] = ((u64
) hcam_cmd
->dma_addr
>> 24) & 0xff;
8834 cmd_pkt
->cdb
[3] = ((u64
) hcam_cmd
->dma_addr
>> 16) & 0xff;
8835 cmd_pkt
->cdb
[4] = ((u64
) hcam_cmd
->dma_addr
>> 8) & 0xff;
8836 cmd_pkt
->cdb
[5] = ((u64
) hcam_cmd
->dma_addr
) & 0xff;
8838 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8839 IPR_CANCEL_TIMEOUT
);
8841 rc
= IPR_RC_JOB_RETURN
;
8842 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
8847 ipr_cmd
->job_step
= ipr_reset_alert
;
8854 * ipr_reset_ucode_download_done - Microcode download completion
8855 * @ipr_cmd: ipr command struct
8857 * Description: This function unmaps the microcode download buffer.
8860 * IPR_RC_JOB_CONTINUE
8862 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
8864 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8865 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8867 dma_unmap_sg(&ioa_cfg
->pdev
->dev
, sglist
->scatterlist
,
8868 sglist
->num_sg
, DMA_TO_DEVICE
);
8870 ipr_cmd
->job_step
= ipr_reset_alert
;
8871 return IPR_RC_JOB_CONTINUE
;
8875 * ipr_reset_ucode_download - Download microcode to the adapter
8876 * @ipr_cmd: ipr command struct
8878 * Description: This function checks to see if it there is microcode
8879 * to download to the adapter. If there is, a download is performed.
8882 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8884 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
8886 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8887 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8890 ipr_cmd
->job_step
= ipr_reset_alert
;
8893 return IPR_RC_JOB_CONTINUE
;
8895 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8896 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
8897 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
8898 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
8899 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
8900 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
8901 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
8904 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
8906 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
8907 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
8909 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8910 IPR_WRITE_BUFFER_TIMEOUT
);
8913 return IPR_RC_JOB_RETURN
;
8917 * ipr_reset_shutdown_ioa - Shutdown the adapter
8918 * @ipr_cmd: ipr command struct
8920 * Description: This function issues an adapter shutdown of the
8921 * specified type to the specified adapter as part of the
8922 * adapter reset job.
8925 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8927 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
8929 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8930 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
8931 unsigned long timeout
;
8932 int rc
= IPR_RC_JOB_CONTINUE
;
8935 if (shutdown_type
== IPR_SHUTDOWN_QUIESCE
)
8936 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
8937 else if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
8938 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8939 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8940 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8941 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
8942 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
8944 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
8945 timeout
= IPR_SHUTDOWN_TIMEOUT
;
8946 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
8947 timeout
= IPR_INTERNAL_TIMEOUT
;
8948 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
8949 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
8951 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
8953 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
8955 rc
= IPR_RC_JOB_RETURN
;
8956 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
8958 ipr_cmd
->job_step
= ipr_reset_alert
;
8965 * ipr_reset_ioa_job - Adapter reset job
8966 * @ipr_cmd: ipr command struct
8968 * Description: This function is the job router for the adapter reset job.
8973 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
8976 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8979 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
8981 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
8983 * We are doing nested adapter resets and this is
8984 * not the current reset job.
8986 list_add_tail(&ipr_cmd
->queue
,
8987 &ipr_cmd
->hrrq
->hrrq_free_q
);
8991 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
8992 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
8993 if (rc
== IPR_RC_JOB_RETURN
)
8997 ipr_reinit_ipr_cmnd(ipr_cmd
);
8998 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
8999 rc
= ipr_cmd
->job_step(ipr_cmd
);
9000 } while (rc
== IPR_RC_JOB_CONTINUE
);
9004 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9005 * @ioa_cfg: ioa config struct
9006 * @job_step: first job step of reset job
9007 * @shutdown_type: shutdown type
9009 * Description: This function will initiate the reset of the given adapter
9010 * starting at the selected job step.
9011 * If the caller needs to wait on the completion of the reset,
9012 * the caller must sleep on the reset_wait_q.
9017 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9018 int (*job_step
) (struct ipr_cmnd
*),
9019 enum ipr_shutdown_type shutdown_type
)
9021 struct ipr_cmnd
*ipr_cmd
;
9024 ioa_cfg
->in_reset_reload
= 1;
9025 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9026 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9027 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9028 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9031 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
)
9032 scsi_block_requests(ioa_cfg
->host
);
9034 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9035 ioa_cfg
->reset_cmd
= ipr_cmd
;
9036 ipr_cmd
->job_step
= job_step
;
9037 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
9039 ipr_reset_ioa_job(ipr_cmd
);
9043 * ipr_initiate_ioa_reset - Initiate an adapter reset
9044 * @ioa_cfg: ioa config struct
9045 * @shutdown_type: shutdown type
9047 * Description: This function will initiate the reset of the given adapter.
9048 * If the caller needs to wait on the completion of the reset,
9049 * the caller must sleep on the reset_wait_q.
9054 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9055 enum ipr_shutdown_type shutdown_type
)
9059 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
9062 if (ioa_cfg
->in_reset_reload
) {
9063 if (ioa_cfg
->sdt_state
== GET_DUMP
)
9064 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9065 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
9066 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9069 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
9070 dev_err(&ioa_cfg
->pdev
->dev
,
9071 "IOA taken offline - error recovery failed\n");
9073 ioa_cfg
->reset_retries
= 0;
9074 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9075 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9076 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
9077 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9081 if (ioa_cfg
->in_ioa_bringdown
) {
9082 ioa_cfg
->reset_cmd
= NULL
;
9083 ioa_cfg
->in_reset_reload
= 0;
9084 ipr_fail_all_ops(ioa_cfg
);
9085 wake_up_all(&ioa_cfg
->reset_wait_q
);
9087 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9088 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
9089 scsi_unblock_requests(ioa_cfg
->host
);
9090 spin_lock_irq(ioa_cfg
->host
->host_lock
);
9094 ioa_cfg
->in_ioa_bringdown
= 1;
9095 shutdown_type
= IPR_SHUTDOWN_NONE
;
9099 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
9104 * ipr_reset_freeze - Hold off all I/O activity
9105 * @ipr_cmd: ipr command struct
9107 * Description: If the PCI slot is frozen, hold off all I/O
9108 * activity; then, as soon as the slot is available again,
9109 * initiate an adapter reset.
9111 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
9113 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9116 /* Disallow new interrupts, avoid loop */
9117 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9118 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9119 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
9120 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9123 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
9124 ipr_cmd
->done
= ipr_reset_ioa_job
;
9125 return IPR_RC_JOB_RETURN
;
9129 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9130 * @pdev: PCI device struct
9132 * Description: This routine is called to tell us that the MMIO
9133 * access to the IOA has been restored
9135 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
9137 unsigned long flags
= 0;
9138 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9140 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9141 if (!ioa_cfg
->probe_done
)
9142 pci_save_state(pdev
);
9143 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9144 return PCI_ERS_RESULT_NEED_RESET
;
9148 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9149 * @pdev: PCI device struct
9151 * Description: This routine is called to tell us that the PCI bus
9152 * is down. Can't do anything here, except put the device driver
9153 * into a holding pattern, waiting for the PCI bus to come back.
9155 static void ipr_pci_frozen(struct pci_dev
*pdev
)
9157 unsigned long flags
= 0;
9158 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9160 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9161 if (ioa_cfg
->probe_done
)
9162 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
9163 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9167 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9168 * @pdev: PCI device struct
9170 * Description: This routine is called by the pci error recovery
9171 * code after the PCI slot has been reset, just before we
9172 * should resume normal operations.
9174 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
9176 unsigned long flags
= 0;
9177 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9179 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9180 if (ioa_cfg
->probe_done
) {
9181 if (ioa_cfg
->needs_warm_reset
)
9182 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9184 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
9187 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9188 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9189 return PCI_ERS_RESULT_RECOVERED
;
9193 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9194 * @pdev: PCI device struct
9196 * Description: This routine is called when the PCI bus has
9197 * permanently failed.
9199 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
9201 unsigned long flags
= 0;
9202 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9205 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9206 if (ioa_cfg
->probe_done
) {
9207 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9208 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9209 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
9210 ioa_cfg
->in_ioa_bringdown
= 1;
9211 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9212 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9213 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9214 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9217 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9219 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9220 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9224 * ipr_pci_error_detected - Called when a PCI error is detected.
9225 * @pdev: PCI device struct
9226 * @state: PCI channel state
9228 * Description: Called when a PCI error is detected.
9231 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9233 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
9234 pci_channel_state_t state
)
9237 case pci_channel_io_frozen
:
9238 ipr_pci_frozen(pdev
);
9239 return PCI_ERS_RESULT_CAN_RECOVER
;
9240 case pci_channel_io_perm_failure
:
9241 ipr_pci_perm_failure(pdev
);
9242 return PCI_ERS_RESULT_DISCONNECT
;
9247 return PCI_ERS_RESULT_NEED_RESET
;
9251 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9252 * @ioa_cfg: ioa cfg struct
9254 * Description: This is the second phase of adapter intialization
9255 * This function takes care of initilizing the adapter to the point
9256 * where it can accept new commands.
9259 * 0 on success / -EIO on failure
9261 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
9264 unsigned long host_lock_flags
= 0;
9267 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9268 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
9269 ioa_cfg
->probe_done
= 1;
9270 if (ioa_cfg
->needs_hard_reset
) {
9271 ioa_cfg
->needs_hard_reset
= 0;
9272 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9274 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
9276 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9283 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9284 * @ioa_cfg: ioa config struct
9289 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9293 if (ioa_cfg
->ipr_cmnd_list
) {
9294 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9295 if (ioa_cfg
->ipr_cmnd_list
[i
])
9296 dma_pool_free(ioa_cfg
->ipr_cmd_pool
,
9297 ioa_cfg
->ipr_cmnd_list
[i
],
9298 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
9300 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
9304 if (ioa_cfg
->ipr_cmd_pool
)
9305 dma_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
9307 kfree(ioa_cfg
->ipr_cmnd_list
);
9308 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
9309 ioa_cfg
->ipr_cmnd_list
= NULL
;
9310 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
9311 ioa_cfg
->ipr_cmd_pool
= NULL
;
9315 * ipr_free_mem - Frees memory allocated for an adapter
9316 * @ioa_cfg: ioa cfg struct
9321 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9325 kfree(ioa_cfg
->res_entries
);
9326 dma_free_coherent(&ioa_cfg
->pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9327 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9328 ipr_free_cmd_blks(ioa_cfg
);
9330 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
9331 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9332 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9333 ioa_cfg
->hrrq
[i
].host_rrq
,
9334 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9336 dma_free_coherent(&ioa_cfg
->pdev
->dev
, ioa_cfg
->cfg_table_size
,
9337 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9339 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
9340 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9341 sizeof(struct ipr_hostrcb
),
9342 ioa_cfg
->hostrcb
[i
],
9343 ioa_cfg
->hostrcb_dma
[i
]);
9346 ipr_free_dump(ioa_cfg
);
9347 kfree(ioa_cfg
->trace
);
9351 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9352 * @ioa_cfg: ipr cfg struct
9354 * This function frees all allocated IRQs for the
9355 * specified adapter.
9360 static void ipr_free_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9362 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9364 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
9365 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9367 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
9368 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
9371 free_irq(pdev
->irq
, &ioa_cfg
->hrrq
[0]);
9373 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
9374 pci_disable_msi(pdev
);
9375 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
9376 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9377 pci_disable_msix(pdev
);
9378 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
9383 * ipr_free_all_resources - Free all allocated resources for an adapter.
9384 * @ipr_cmd: ipr command struct
9386 * This function frees all allocated resources for the
9387 * specified adapter.
9392 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
9394 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9397 ipr_free_irqs(ioa_cfg
);
9398 if (ioa_cfg
->reset_work_q
)
9399 destroy_workqueue(ioa_cfg
->reset_work_q
);
9400 iounmap(ioa_cfg
->hdw_dma_regs
);
9401 pci_release_regions(pdev
);
9402 ipr_free_mem(ioa_cfg
);
9403 scsi_host_put(ioa_cfg
->host
);
9404 pci_disable_device(pdev
);
9409 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9410 * @ioa_cfg: ioa config struct
9413 * 0 on success / -ENOMEM on allocation failure
9415 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9417 struct ipr_cmnd
*ipr_cmd
;
9418 struct ipr_ioarcb
*ioarcb
;
9419 dma_addr_t dma_addr
;
9420 int i
, entries_each_hrrq
, hrrq_id
= 0;
9422 ioa_cfg
->ipr_cmd_pool
= dma_pool_create(IPR_NAME
, &ioa_cfg
->pdev
->dev
,
9423 sizeof(struct ipr_cmnd
), 512, 0);
9425 if (!ioa_cfg
->ipr_cmd_pool
)
9428 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9429 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9431 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9432 ipr_free_cmd_blks(ioa_cfg
);
9436 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9437 if (ioa_cfg
->hrrq_num
> 1) {
9439 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9440 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9441 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9442 (entries_each_hrrq
- 1);
9445 IPR_NUM_BASE_CMD_BLKS
/
9446 (ioa_cfg
->hrrq_num
- 1);
9447 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9448 IPR_NUM_INTERNAL_CMD_BLKS
+
9449 (i
- 1) * entries_each_hrrq
;
9450 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9451 (IPR_NUM_INTERNAL_CMD_BLKS
+
9452 i
* entries_each_hrrq
- 1);
9455 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9456 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9457 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9459 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9462 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9464 i
= IPR_NUM_CMD_BLKS
-
9465 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9467 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9468 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9471 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9472 ipr_cmd
= dma_pool_alloc(ioa_cfg
->ipr_cmd_pool
, GFP_KERNEL
, &dma_addr
);
9475 ipr_free_cmd_blks(ioa_cfg
);
9479 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
9480 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9481 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9483 ioarcb
= &ipr_cmd
->ioarcb
;
9484 ipr_cmd
->dma_addr
= dma_addr
;
9486 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9488 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9490 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9491 if (ioa_cfg
->sis64
) {
9492 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9493 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9494 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9495 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9497 ioarcb
->write_ioadl_addr
=
9498 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9499 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9500 ioarcb
->ioasa_host_pci_addr
=
9501 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9503 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9504 ipr_cmd
->cmd_index
= i
;
9505 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9506 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9507 offsetof(struct ipr_cmnd
, sense_buffer
);
9509 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9510 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9511 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9512 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9520 * ipr_alloc_mem - Allocate memory for an adapter
9521 * @ioa_cfg: ioa config struct
9524 * 0 on success / non-zero for error
9526 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9528 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9529 int i
, rc
= -ENOMEM
;
9532 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
9533 ioa_cfg
->max_devs_supported
, GFP_KERNEL
);
9535 if (!ioa_cfg
->res_entries
)
9538 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9539 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9540 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9543 ioa_cfg
->vpd_cbs
= dma_alloc_coherent(&pdev
->dev
,
9544 sizeof(struct ipr_misc_cbs
),
9545 &ioa_cfg
->vpd_cbs_dma
,
9548 if (!ioa_cfg
->vpd_cbs
)
9549 goto out_free_res_entries
;
9551 if (ipr_alloc_cmd_blks(ioa_cfg
))
9552 goto out_free_vpd_cbs
;
9554 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9555 ioa_cfg
->hrrq
[i
].host_rrq
= dma_alloc_coherent(&pdev
->dev
,
9556 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9557 &ioa_cfg
->hrrq
[i
].host_rrq_dma
,
9560 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9562 dma_free_coherent(&pdev
->dev
,
9563 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9564 ioa_cfg
->hrrq
[i
].host_rrq
,
9565 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9566 goto out_ipr_free_cmd_blocks
;
9568 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9571 ioa_cfg
->u
.cfg_table
= dma_alloc_coherent(&pdev
->dev
,
9572 ioa_cfg
->cfg_table_size
,
9573 &ioa_cfg
->cfg_table_dma
,
9576 if (!ioa_cfg
->u
.cfg_table
)
9577 goto out_free_host_rrq
;
9579 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
9580 ioa_cfg
->hostrcb
[i
] = dma_alloc_coherent(&pdev
->dev
,
9581 sizeof(struct ipr_hostrcb
),
9582 &ioa_cfg
->hostrcb_dma
[i
],
9585 if (!ioa_cfg
->hostrcb
[i
])
9586 goto out_free_hostrcb_dma
;
9588 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9589 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9590 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9591 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9594 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
9595 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
9597 if (!ioa_cfg
->trace
)
9598 goto out_free_hostrcb_dma
;
9605 out_free_hostrcb_dma
:
9607 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_hostrcb
),
9608 ioa_cfg
->hostrcb
[i
],
9609 ioa_cfg
->hostrcb_dma
[i
]);
9611 dma_free_coherent(&pdev
->dev
, ioa_cfg
->cfg_table_size
,
9612 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9614 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9615 dma_free_coherent(&pdev
->dev
,
9616 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9617 ioa_cfg
->hrrq
[i
].host_rrq
,
9618 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9620 out_ipr_free_cmd_blocks
:
9621 ipr_free_cmd_blks(ioa_cfg
);
9623 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9624 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9625 out_free_res_entries
:
9626 kfree(ioa_cfg
->res_entries
);
9631 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9632 * @ioa_cfg: ioa config struct
9637 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9641 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9642 ioa_cfg
->bus_attr
[i
].bus
= i
;
9643 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9644 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9645 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9646 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9648 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9653 * ipr_init_regs - Initialize IOA registers
9654 * @ioa_cfg: ioa config struct
9659 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9661 const struct ipr_interrupt_offsets
*p
;
9662 struct ipr_interrupts
*t
;
9665 p
= &ioa_cfg
->chip_cfg
->regs
;
9667 base
= ioa_cfg
->hdw_dma_regs
;
9669 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9670 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9671 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9672 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9673 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9674 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9675 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9676 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9677 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9678 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9679 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9680 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9681 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9682 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9683 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9684 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9686 if (ioa_cfg
->sis64
) {
9687 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9688 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9689 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9690 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9695 * ipr_init_ioa_cfg - Initialize IOA config struct
9696 * @ioa_cfg: ioa config struct
9697 * @host: scsi host struct
9698 * @pdev: PCI dev struct
9703 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9704 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9708 ioa_cfg
->host
= host
;
9709 ioa_cfg
->pdev
= pdev
;
9710 ioa_cfg
->log_level
= ipr_log_level
;
9711 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9712 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9713 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9714 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9715 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9716 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9717 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9719 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9720 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9721 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9722 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9723 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9724 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9725 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9726 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9727 ioa_cfg
->sdt_state
= INACTIVE
;
9729 ipr_initialize_bus_attr(ioa_cfg
);
9730 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9732 if (ioa_cfg
->sis64
) {
9733 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9734 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9735 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9736 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9737 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9738 + ((sizeof(struct ipr_config_table_entry64
)
9739 * ioa_cfg
->max_devs_supported
)));
9741 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9742 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9743 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9744 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9745 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9746 + ((sizeof(struct ipr_config_table_entry
)
9747 * ioa_cfg
->max_devs_supported
)));
9750 host
->max_channel
= IPR_VSET_BUS
;
9751 host
->unique_id
= host
->host_no
;
9752 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9753 host
->can_queue
= ioa_cfg
->max_cmds
;
9754 pci_set_drvdata(pdev
, ioa_cfg
);
9756 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9757 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9758 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9759 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9761 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9763 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9768 * ipr_get_chip_info - Find adapter chip information
9769 * @dev_id: PCI device id struct
9772 * ptr to chip information on success / NULL on failure
9774 static const struct ipr_chip_t
*
9775 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9779 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9780 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9781 ipr_chip
[i
].device
== dev_id
->device
)
9782 return &ipr_chip
[i
];
9787 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9789 * @ioa_cfg: ioa config struct
9794 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
9796 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9798 if (pci_channel_offline(pdev
)) {
9799 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
9800 !pci_channel_offline(pdev
),
9801 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
9802 pci_restore_state(pdev
);
9806 static int ipr_enable_msix(struct ipr_ioa_cfg
*ioa_cfg
)
9808 struct msix_entry entries
[IPR_MAX_MSIX_VECTORS
];
9811 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
9812 entries
[i
].entry
= i
;
9814 vectors
= pci_enable_msix_range(ioa_cfg
->pdev
,
9815 entries
, 1, ipr_number_of_msix
);
9817 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9821 for (i
= 0; i
< vectors
; i
++)
9822 ioa_cfg
->vectors_info
[i
].vec
= entries
[i
].vector
;
9823 ioa_cfg
->nvectors
= vectors
;
9828 static int ipr_enable_msi(struct ipr_ioa_cfg
*ioa_cfg
)
9832 vectors
= pci_enable_msi_range(ioa_cfg
->pdev
, 1, ipr_number_of_msix
);
9834 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9838 for (i
= 0; i
< vectors
; i
++)
9839 ioa_cfg
->vectors_info
[i
].vec
= ioa_cfg
->pdev
->irq
+ i
;
9840 ioa_cfg
->nvectors
= vectors
;
9845 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9847 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9849 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
9850 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
9851 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
9852 ioa_cfg
->vectors_info
[vec_idx
].
9853 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
9857 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9861 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
9862 rc
= request_irq(ioa_cfg
->vectors_info
[i
].vec
,
9865 ioa_cfg
->vectors_info
[i
].desc
,
9869 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
9878 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9879 * @pdev: PCI device struct
9881 * Description: Simply set the msi_received flag to 1 indicating that
9882 * Message Signaled Interrupts are supported.
9885 * 0 on success / non-zero on failure
9887 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
9889 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
9890 unsigned long lock_flags
= 0;
9891 irqreturn_t rc
= IRQ_HANDLED
;
9893 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
9894 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9896 ioa_cfg
->msi_received
= 1;
9897 wake_up(&ioa_cfg
->msi_wait_q
);
9899 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9904 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9905 * @pdev: PCI device struct
9907 * Description: The return value from pci_enable_msi_range() can not always be
9908 * trusted. This routine sets up and initiates a test interrupt to determine
9909 * if the interrupt is received via the ipr_test_intr() service routine.
9910 * If the tests fails, the driver will fall back to LSI.
9913 * 0 on success / non-zero on failure
9915 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
9918 volatile u32 int_reg
;
9919 unsigned long lock_flags
= 0;
9923 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9924 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9925 ioa_cfg
->msi_received
= 0;
9926 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9927 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
9928 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
9929 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9931 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9932 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9934 rc
= request_irq(pdev
->irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9936 dev_err(&pdev
->dev
, "Can not assign irq %d\n", pdev
->irq
);
9938 } else if (ipr_debug
)
9939 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", pdev
->irq
);
9941 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
9942 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
9943 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
9944 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9945 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9947 if (!ioa_cfg
->msi_received
) {
9948 /* MSI test failed */
9949 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
9951 } else if (ipr_debug
)
9952 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
9954 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9956 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9957 free_irq(ioa_cfg
->vectors_info
[0].vec
, ioa_cfg
);
9959 free_irq(pdev
->irq
, ioa_cfg
);
9966 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9967 * @pdev: PCI device struct
9968 * @dev_id: PCI device id struct
9971 * 0 on success / non-zero on failure
9973 static int ipr_probe_ioa(struct pci_dev
*pdev
,
9974 const struct pci_device_id
*dev_id
)
9976 struct ipr_ioa_cfg
*ioa_cfg
;
9977 struct Scsi_Host
*host
;
9978 unsigned long ipr_regs_pci
;
9979 void __iomem
*ipr_regs
;
9980 int rc
= PCIBIOS_SUCCESSFUL
;
9981 volatile u32 mask
, uproc
, interrupts
;
9982 unsigned long lock_flags
, driver_lock_flags
;
9986 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
9987 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
9990 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
9995 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
9996 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
9997 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
9999 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
10001 if (!ioa_cfg
->ipr_chip
) {
10002 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
10003 dev_id
->vendor
, dev_id
->device
);
10004 goto out_scsi_host_put
;
10007 /* set SIS 32 or SIS 64 */
10008 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
10009 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
10010 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
10011 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
10013 if (ipr_transop_timeout
)
10014 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
10015 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
10016 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
10018 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
10020 ioa_cfg
->revid
= pdev
->revision
;
10022 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
10024 ipr_regs_pci
= pci_resource_start(pdev
, 0);
10026 rc
= pci_request_regions(pdev
, IPR_NAME
);
10028 dev_err(&pdev
->dev
,
10029 "Couldn't register memory range of registers\n");
10030 goto out_scsi_host_put
;
10033 rc
= pci_enable_device(pdev
);
10035 if (rc
|| pci_channel_offline(pdev
)) {
10036 if (pci_channel_offline(pdev
)) {
10037 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10038 rc
= pci_enable_device(pdev
);
10042 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
10043 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10044 goto out_release_regions
;
10048 ipr_regs
= pci_ioremap_bar(pdev
, 0);
10051 dev_err(&pdev
->dev
,
10052 "Couldn't map memory range of registers\n");
10057 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
10058 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
10059 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
10061 ipr_init_regs(ioa_cfg
);
10063 if (ioa_cfg
->sis64
) {
10064 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10066 dev_dbg(&pdev
->dev
, "Failed to set 64 bit DMA mask\n");
10067 rc
= dma_set_mask_and_coherent(&pdev
->dev
,
10071 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10074 dev_err(&pdev
->dev
, "Failed to set DMA mask\n");
10075 goto cleanup_nomem
;
10078 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
10079 ioa_cfg
->chip_cfg
->cache_line_size
);
10081 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10082 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
10083 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10085 goto cleanup_nomem
;
10088 /* Issue MMIO read to ensure card is not in EEH */
10089 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10090 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10092 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
10093 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
10094 IPR_MAX_MSIX_VECTORS
);
10095 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
10098 if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
10099 ipr_enable_msix(ioa_cfg
) == 0)
10100 ioa_cfg
->intr_flag
= IPR_USE_MSIX
;
10101 else if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
10102 ipr_enable_msi(ioa_cfg
) == 0)
10103 ioa_cfg
->intr_flag
= IPR_USE_MSI
;
10105 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
10106 ioa_cfg
->clear_isr
= 1;
10107 ioa_cfg
->nvectors
= 1;
10108 dev_info(&pdev
->dev
, "Cannot enable MSI.\n");
10111 pci_set_master(pdev
);
10113 if (pci_channel_offline(pdev
)) {
10114 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10115 pci_set_master(pdev
);
10116 if (pci_channel_offline(pdev
)) {
10118 goto out_msi_disable
;
10122 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
10123 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
10124 rc
= ipr_test_msi(ioa_cfg
, pdev
);
10125 if (rc
== -EOPNOTSUPP
) {
10126 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10127 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
10128 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
10129 pci_disable_msi(pdev
);
10130 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
10131 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
10132 pci_disable_msix(pdev
);
10135 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
10136 ioa_cfg
->nvectors
= 1;
10139 goto out_msi_disable
;
10141 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
10142 dev_info(&pdev
->dev
,
10143 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10144 ioa_cfg
->nvectors
, pdev
->irq
);
10145 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
10146 dev_info(&pdev
->dev
,
10147 "Request for %d MSIXs succeeded.",
10148 ioa_cfg
->nvectors
);
10152 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
10153 (unsigned int)num_online_cpus(),
10154 (unsigned int)IPR_MAX_HRRQ_NUM
);
10156 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
10157 goto out_msi_disable
;
10159 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
10160 goto out_msi_disable
;
10162 rc
= ipr_alloc_mem(ioa_cfg
);
10164 dev_err(&pdev
->dev
,
10165 "Couldn't allocate enough memory for device driver!\n");
10166 goto out_msi_disable
;
10169 /* Save away PCI config space for use following IOA reset */
10170 rc
= pci_save_state(pdev
);
10172 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10173 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
10175 goto cleanup_nolog
;
10179 * If HRRQ updated interrupt is not masked, or reset alert is set,
10180 * the card is in an unknown state and needs a hard reset
10182 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
10183 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
10184 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
10185 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
10186 ioa_cfg
->needs_hard_reset
= 1;
10187 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
10188 ioa_cfg
->needs_hard_reset
= 1;
10189 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
10190 ioa_cfg
->ioa_unit_checked
= 1;
10192 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10193 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10194 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10196 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
10197 || ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
10198 name_msi_vectors(ioa_cfg
);
10199 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_isr
,
10201 ioa_cfg
->vectors_info
[0].desc
,
10202 &ioa_cfg
->hrrq
[0]);
10204 rc
= ipr_request_other_msi_irqs(ioa_cfg
);
10206 rc
= request_irq(pdev
->irq
, ipr_isr
,
10208 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
10211 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
10213 goto cleanup_nolog
;
10216 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
10217 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
10218 ioa_cfg
->needs_warm_reset
= 1;
10219 ioa_cfg
->reset
= ipr_reset_slot_reset
;
10221 ioa_cfg
->reset_work_q
= alloc_ordered_workqueue("ipr_reset_%d",
10222 WQ_MEM_RECLAIM
, host
->host_no
);
10224 if (!ioa_cfg
->reset_work_q
) {
10225 dev_err(&pdev
->dev
, "Couldn't register reset workqueue\n");
10229 ioa_cfg
->reset
= ipr_reset_start_bist
;
10231 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10232 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
10233 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10240 ipr_free_irqs(ioa_cfg
);
10242 ipr_free_mem(ioa_cfg
);
10244 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10245 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
10246 pci_disable_msi(pdev
);
10247 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
10248 pci_disable_msix(pdev
);
10252 pci_disable_device(pdev
);
10253 out_release_regions
:
10254 pci_release_regions(pdev
);
10256 scsi_host_put(host
);
10261 * ipr_initiate_ioa_bringdown - Bring down an adapter
10262 * @ioa_cfg: ioa config struct
10263 * @shutdown_type: shutdown type
10265 * Description: This function will initiate bringing down the adapter.
10266 * This consists of issuing an IOA shutdown to the adapter
10267 * to flush the cache, and running BIST.
10268 * If the caller needs to wait on the completion of the reset,
10269 * the caller must sleep on the reset_wait_q.
10274 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
10275 enum ipr_shutdown_type shutdown_type
)
10278 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
10279 ioa_cfg
->sdt_state
= ABORT_DUMP
;
10280 ioa_cfg
->reset_retries
= 0;
10281 ioa_cfg
->in_ioa_bringdown
= 1;
10282 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
10287 * __ipr_remove - Remove a single adapter
10288 * @pdev: pci device struct
10290 * Adapter hot plug remove entry point.
10295 static void __ipr_remove(struct pci_dev
*pdev
)
10297 unsigned long host_lock_flags
= 0;
10298 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10300 unsigned long driver_lock_flags
;
10303 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10304 while (ioa_cfg
->in_reset_reload
) {
10305 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10306 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10307 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10310 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
10311 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
10312 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
10313 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
10316 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10318 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10319 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10320 flush_work(&ioa_cfg
->work_q
);
10321 if (ioa_cfg
->reset_work_q
)
10322 flush_workqueue(ioa_cfg
->reset_work_q
);
10323 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
10324 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10326 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10327 list_del(&ioa_cfg
->queue
);
10328 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10330 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
10331 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
10332 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10334 ipr_free_all_resources(ioa_cfg
);
10340 * ipr_remove - IOA hot plug remove entry point
10341 * @pdev: pci device struct
10343 * Adapter hot plug remove entry point.
10348 static void ipr_remove(struct pci_dev
*pdev
)
10350 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10354 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10356 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10358 scsi_remove_host(ioa_cfg
->host
);
10360 __ipr_remove(pdev
);
10366 * ipr_probe - Adapter hot plug add entry point
10369 * 0 on success / non-zero on failure
10371 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
10373 struct ipr_ioa_cfg
*ioa_cfg
;
10376 rc
= ipr_probe_ioa(pdev
, dev_id
);
10381 ioa_cfg
= pci_get_drvdata(pdev
);
10382 rc
= ipr_probe_ioa_part2(ioa_cfg
);
10385 __ipr_remove(pdev
);
10389 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
10392 __ipr_remove(pdev
);
10396 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10400 scsi_remove_host(ioa_cfg
->host
);
10401 __ipr_remove(pdev
);
10405 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10409 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10411 scsi_remove_host(ioa_cfg
->host
);
10412 __ipr_remove(pdev
);
10416 scsi_scan_host(ioa_cfg
->host
);
10417 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
10419 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10420 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
10421 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
10422 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
10423 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
10427 schedule_work(&ioa_cfg
->work_q
);
10432 * ipr_shutdown - Shutdown handler.
10433 * @pdev: pci device struct
10435 * This function is invoked upon system shutdown/reboot. It will issue
10436 * an adapter shutdown to the adapter to flush the write cache.
10441 static void ipr_shutdown(struct pci_dev
*pdev
)
10443 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10444 unsigned long lock_flags
= 0;
10445 enum ipr_shutdown_type shutdown_type
= IPR_SHUTDOWN_NORMAL
;
10448 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10449 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10450 ioa_cfg
->iopoll_weight
= 0;
10451 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10452 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10455 while (ioa_cfg
->in_reset_reload
) {
10456 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10457 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10458 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10461 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
)
10462 shutdown_type
= IPR_SHUTDOWN_QUIESCE
;
10464 ipr_initiate_ioa_bringdown(ioa_cfg
, shutdown_type
);
10465 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10466 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10467 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
) {
10468 ipr_free_irqs(ioa_cfg
);
10469 pci_disable_device(ioa_cfg
->pdev
);
10473 static struct pci_device_id ipr_pci_table
[] = {
10474 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10475 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10476 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10477 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10478 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10479 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10480 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10481 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10482 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10483 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10484 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10485 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10486 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10487 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10488 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10489 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10490 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10491 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10492 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10493 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10494 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10495 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10496 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10497 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10498 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10499 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10500 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10501 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10502 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10503 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10504 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10505 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10506 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10507 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10508 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10509 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10510 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10511 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10512 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10513 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10514 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10515 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10516 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10517 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10518 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10519 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10520 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10521 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10522 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10523 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10524 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10525 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10526 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10527 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10528 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10529 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10530 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10531 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10532 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10533 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10534 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10535 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10536 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10537 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10538 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10539 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10540 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10541 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10542 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10543 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10544 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10545 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10546 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10547 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10548 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10549 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10550 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10551 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10552 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10553 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10554 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10555 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10556 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10557 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10558 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10559 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10560 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10561 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10562 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10563 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10564 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10565 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10566 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10567 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10568 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10569 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10570 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10571 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10572 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10573 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10574 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10575 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10576 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10577 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10578 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10581 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10583 static const struct pci_error_handlers ipr_err_handler
= {
10584 .error_detected
= ipr_pci_error_detected
,
10585 .mmio_enabled
= ipr_pci_mmio_enabled
,
10586 .slot_reset
= ipr_pci_slot_reset
,
10589 static struct pci_driver ipr_driver
= {
10591 .id_table
= ipr_pci_table
,
10592 .probe
= ipr_probe
,
10593 .remove
= ipr_remove
,
10594 .shutdown
= ipr_shutdown
,
10595 .err_handler
= &ipr_err_handler
,
10599 * ipr_halt_done - Shutdown prepare completion
10604 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10606 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10610 * ipr_halt - Issue shutdown prepare to all adapters
10613 * NOTIFY_OK on success / NOTIFY_DONE on failure
10615 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10617 struct ipr_cmnd
*ipr_cmd
;
10618 struct ipr_ioa_cfg
*ioa_cfg
;
10619 unsigned long flags
= 0, driver_lock_flags
;
10621 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10622 return NOTIFY_DONE
;
10624 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10626 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10627 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10628 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
10629 (ipr_fast_reboot
&& event
== SYS_RESTART
&& ioa_cfg
->sis64
)) {
10630 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10634 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10635 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10636 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10637 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10638 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10640 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10641 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10643 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10648 static struct notifier_block ipr_notifier
= {
10653 * ipr_init - Module entry point
10656 * 0 on success / negative value on failure
10658 static int __init
ipr_init(void)
10660 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10661 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10663 register_reboot_notifier(&ipr_notifier
);
10664 return pci_register_driver(&ipr_driver
);
10668 * ipr_exit - Module unload
10670 * Module unload entry point.
10675 static void __exit
ipr_exit(void)
10677 unregister_reboot_notifier(&ipr_notifier
);
10678 pci_unregister_driver(&ipr_driver
);
10681 module_init(ipr_init
);
10682 module_exit(ipr_exit
);