2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 16;
102 static unsigned int ipr_fast_reboot
;
103 static DEFINE_SPINLOCK(ipr_driver_lock
);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size
= 0x20,
114 .set_interrupt_mask_reg
= 0x0022C,
115 .clr_interrupt_mask_reg
= 0x00230,
116 .clr_interrupt_mask_reg32
= 0x00230,
117 .sense_interrupt_mask_reg
= 0x0022C,
118 .sense_interrupt_mask_reg32
= 0x0022C,
119 .clr_interrupt_reg
= 0x00228,
120 .clr_interrupt_reg32
= 0x00228,
121 .sense_interrupt_reg
= 0x00224,
122 .sense_interrupt_reg32
= 0x00224,
123 .ioarrin_reg
= 0x00404,
124 .sense_uproc_interrupt_reg
= 0x00214,
125 .sense_uproc_interrupt_reg32
= 0x00214,
126 .set_uproc_interrupt_reg
= 0x00214,
127 .set_uproc_interrupt_reg32
= 0x00214,
128 .clr_uproc_interrupt_reg
= 0x00218,
129 .clr_uproc_interrupt_reg32
= 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size
= 0x20,
139 .set_interrupt_mask_reg
= 0x00288,
140 .clr_interrupt_mask_reg
= 0x0028C,
141 .clr_interrupt_mask_reg32
= 0x0028C,
142 .sense_interrupt_mask_reg
= 0x00288,
143 .sense_interrupt_mask_reg32
= 0x00288,
144 .clr_interrupt_reg
= 0x00284,
145 .clr_interrupt_reg32
= 0x00284,
146 .sense_interrupt_reg
= 0x00280,
147 .sense_interrupt_reg32
= 0x00280,
148 .ioarrin_reg
= 0x00504,
149 .sense_uproc_interrupt_reg
= 0x00290,
150 .sense_uproc_interrupt_reg32
= 0x00290,
151 .set_uproc_interrupt_reg
= 0x00290,
152 .set_uproc_interrupt_reg32
= 0x00290,
153 .clr_uproc_interrupt_reg
= 0x00294,
154 .clr_uproc_interrupt_reg32
= 0x00294
160 .cache_line_size
= 0x20,
164 .set_interrupt_mask_reg
= 0x00010,
165 .clr_interrupt_mask_reg
= 0x00018,
166 .clr_interrupt_mask_reg32
= 0x0001C,
167 .sense_interrupt_mask_reg
= 0x00010,
168 .sense_interrupt_mask_reg32
= 0x00014,
169 .clr_interrupt_reg
= 0x00008,
170 .clr_interrupt_reg32
= 0x0000C,
171 .sense_interrupt_reg
= 0x00000,
172 .sense_interrupt_reg32
= 0x00004,
173 .ioarrin_reg
= 0x00070,
174 .sense_uproc_interrupt_reg
= 0x00020,
175 .sense_uproc_interrupt_reg32
= 0x00024,
176 .set_uproc_interrupt_reg
= 0x00020,
177 .set_uproc_interrupt_reg32
= 0x00024,
178 .clr_uproc_interrupt_reg
= 0x00028,
179 .clr_uproc_interrupt_reg32
= 0x0002C,
180 .init_feedback_reg
= 0x0005C,
181 .dump_addr_reg
= 0x00064,
182 .dump_data_reg
= 0x00068,
183 .endian_swap_reg
= 0x00084
188 static const struct ipr_chip_t ipr_chip
[] = {
189 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, true, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
197 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
198 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
201 static int ipr_max_bus_speeds
[] = {
202 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
208 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level
, ipr_log_level
, uint
, 0);
210 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode
, ipr_testmode
, int, 0);
212 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
214 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
215 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
216 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
218 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs
, ipr_max_devs
, int, 0);
222 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
224 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
225 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot
, ipr_fast_reboot
, int, S_IRUGO
| S_IWUSR
);
227 MODULE_PARM_DESC(fast_reboot
, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION
);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table
[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "4085: Service required"},
438 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
439 "3140: Device bus not ready to ready transition"},
440 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
441 "FFFB: SCSI bus was reset"},
443 "FFFE: SCSI bus transition to single ended"},
445 "FFFE: SCSI bus transition to LVD"},
446 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
447 "FFFB: SCSI bus was reset by another initiator"},
448 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "3029: A device replacement has occurred"},
450 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "4102: Device bus fabric performance degradation"},
452 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "9051: IOA cache data exists for a missing or failed device"},
454 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "9025: Disk unit is not supported at its physical location"},
458 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "3020: IOA detected a SCSI bus configuration error"},
460 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "3150: SCSI bus configuration error"},
462 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "9074: Asymmetric advanced function disk configuration"},
464 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "4040: Incomplete multipath connection between IOA and enclosure"},
466 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "4041: Incomplete multipath connection between enclosure and device"},
468 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "9075: Incomplete multipath connection between IOA and remote IOA"},
470 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "9076: Configuration error, missing remote IOA"},
472 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "4050: Enclosure does not support a required multipath function"},
474 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4121: Configuration error, required cable is missing"},
476 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4122: Cable is not plugged into the correct location on remote IOA"},
478 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4123: Configuration error, invalid cable vital product data"},
480 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "4124: Configuration error, both cable ends are plugged into the same IOA"},
482 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "4070: Logically bad block written on device"},
484 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "9041: Array protection temporarily suspended"},
486 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9042: Corrupt array parity detected on specified device"},
488 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9030: Array no longer protected due to missing or failed disk unit"},
490 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9071: Link operational transition"},
492 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
493 "9072: Link not operational transition"},
494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "9032: Array exposed but still protected"},
496 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL
,
497 "70DD: Device forced failed by disrupt device command"},
498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
499 "4061: Multipath redundancy level got better"},
500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "4060: Multipath redundancy level got worse"},
502 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL
,
503 "9083: Device raw mode enabled"},
504 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL
,
505 "9084: Device raw mode disabled"},
507 "Failure due to other device"},
508 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
509 "9008: IOA does not support functions expected by devices"},
510 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9010: Cache data associated with attached devices cannot be found"},
512 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9011: Cache data belongs to devices other than those attached"},
514 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9020: Array missing 2 or more devices with only 1 device present"},
516 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9021: Array missing 2 or more devices with 2 or more devices present"},
518 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9022: Exposed array is missing a required device"},
520 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9023: Array member(s) not at required physical locations"},
522 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9024: Array not functional due to present hardware configuration"},
524 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9026: Array not functional due to present hardware configuration"},
526 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9027: Array is missing a device and parity is out of sync"},
528 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9028: Maximum number of arrays already exist"},
530 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9050: Required cache data cannot be located for a disk unit"},
532 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9052: Cache data exists for a device that has been modified"},
534 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9054: IOA resources not available due to previous problems"},
536 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9092: Disk unit requires initialization before use"},
538 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9029: Incorrect hardware configuration change has been detected"},
540 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
541 "9060: One or more disk pairs are missing from an array"},
542 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
543 "9061: One or more disks are missing from an array"},
544 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
545 "9062: One or more disks are missing from an array"},
546 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
547 "9063: Maximum number of functional arrays has been exceeded"},
549 "Data protect, other volume set problem"},
551 "Aborted command, invalid descriptor"},
553 "Target operating conditions have changed, dual adapter takeover"},
555 "Aborted command, medium removal prevented"},
557 "Command terminated by host"},
559 "Aborted command, command terminated by host"}
562 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
563 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
565 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
571 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
574 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
579 * Function Prototypes
581 static int ipr_reset_alert(struct ipr_cmnd
*);
582 static void ipr_process_ccn(struct ipr_cmnd
*);
583 static void ipr_process_error(struct ipr_cmnd
*);
584 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
586 enum ipr_shutdown_type
);
588 #ifdef CONFIG_SCSI_IPR_TRACE
590 * ipr_trc_hook - Add a trace entry to the driver trace
591 * @ipr_cmd: ipr command struct
593 * @add_data: additional data
598 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
599 u8 type
, u32 add_data
)
601 struct ipr_trace_entry
*trace_entry
;
602 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
603 unsigned int trace_index
;
605 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
606 trace_entry
= &ioa_cfg
->trace
[trace_index
];
607 trace_entry
->time
= jiffies
;
608 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
609 trace_entry
->type
= type
;
610 if (ipr_cmd
->ioa_cfg
->sis64
)
611 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
613 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
614 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
615 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
616 trace_entry
->u
.add_data
= add_data
;
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
624 * ipr_lock_and_done - Acquire lock and complete command
625 * @ipr_cmd: ipr command struct
630 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
632 unsigned long lock_flags
;
633 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
635 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
636 ipr_cmd
->done(ipr_cmd
);
637 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
641 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642 * @ipr_cmd: ipr command struct
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
649 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
650 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
651 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
652 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
655 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
656 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
657 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
658 ioarcb
->data_transfer_length
= 0;
659 ioarcb
->read_data_transfer_length
= 0;
660 ioarcb
->ioadl_len
= 0;
661 ioarcb
->read_ioadl_len
= 0;
663 if (ipr_cmd
->ioa_cfg
->sis64
) {
664 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
665 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
666 ioasa64
->u
.gata
.status
= 0;
668 ioarcb
->write_ioadl_addr
=
669 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
670 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
671 ioasa
->u
.gata
.status
= 0;
674 ioasa
->hdr
.ioasc
= 0;
675 ioasa
->hdr
.residual_data_len
= 0;
676 ipr_cmd
->scsi_cmd
= NULL
;
678 ipr_cmd
->sense_buffer
[0] = 0;
679 ipr_cmd
->dma_use_sg
= 0;
683 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684 * @ipr_cmd: ipr command struct
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
690 void (*fast_done
) (struct ipr_cmnd
*))
692 ipr_reinit_ipr_cmnd(ipr_cmd
);
693 ipr_cmd
->u
.scratch
= 0;
694 ipr_cmd
->sibling
= NULL
;
695 ipr_cmd
->eh_comp
= NULL
;
696 ipr_cmd
->fast_done
= fast_done
;
697 timer_setup(&ipr_cmd
->timer
, NULL
, 0);
701 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702 * @ioa_cfg: ioa config struct
705 * pointer to ipr command struct
708 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
710 struct ipr_cmnd
*ipr_cmd
= NULL
;
712 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
713 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
714 struct ipr_cmnd
, queue
);
715 list_del(&ipr_cmd
->queue
);
723 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724 * @ioa_cfg: ioa config struct
727 * pointer to ipr command struct
730 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
732 struct ipr_cmnd
*ipr_cmd
=
733 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
734 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
739 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740 * @ioa_cfg: ioa config struct
741 * @clr_ints: interrupts to clear
743 * This function masks all interrupts on the adapter, then clears the
744 * interrupts specified in the mask
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
752 volatile u32 int_reg
;
755 /* Stop new interrupts */
756 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
757 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
758 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
759 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
763 /* Set interrupt mask to stop all new interrupts */
765 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
767 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
769 /* Clear any pending interrupts */
771 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
772 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
773 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
777 * ipr_save_pcix_cmd_reg - Save PCI-X command register
778 * @ioa_cfg: ioa config struct
781 * 0 on success / -EIO on failure
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
785 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
787 if (pcix_cmd_reg
== 0)
790 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
791 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
792 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
796 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
801 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802 * @ioa_cfg: ioa config struct
805 * 0 on success / -EIO on failure
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
809 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
812 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
813 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
814 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
823 * __ipr_sata_eh_done - done function for aborted SATA commands
824 * @ipr_cmd: ipr command struct
826 * This function is invoked for ops generated to SATA
827 * devices which are being aborted.
832 static void __ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
834 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
835 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
837 qc
->err_mask
|= AC_ERR_OTHER
;
838 sata_port
->ioasa
.status
|= ATA_BUSY
;
840 if (ipr_cmd
->eh_comp
)
841 complete(ipr_cmd
->eh_comp
);
842 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
846 * ipr_sata_eh_done - done function for aborted SATA commands
847 * @ipr_cmd: ipr command struct
849 * This function is invoked for ops generated to SATA
850 * devices which are being aborted.
855 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
857 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
858 unsigned long hrrq_flags
;
860 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
861 __ipr_sata_eh_done(ipr_cmd
);
862 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
866 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867 * @ipr_cmd: ipr command struct
869 * This function is invoked by the interrupt handler for
870 * ops generated by the SCSI mid-layer which are being aborted.
875 static void __ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
877 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
879 scsi_cmd
->result
|= (DID_ERROR
<< 16);
881 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
882 scsi_cmd
->scsi_done(scsi_cmd
);
883 if (ipr_cmd
->eh_comp
)
884 complete(ipr_cmd
->eh_comp
);
885 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
889 * ipr_scsi_eh_done - mid-layer done function for aborted ops
890 * @ipr_cmd: ipr command struct
892 * This function is invoked by the interrupt handler for
893 * ops generated by the SCSI mid-layer which are being aborted.
898 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
900 unsigned long hrrq_flags
;
901 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
903 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
904 __ipr_scsi_eh_done(ipr_cmd
);
905 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
909 * ipr_fail_all_ops - Fails all outstanding ops.
910 * @ioa_cfg: ioa config struct
912 * This function fails all outstanding ops.
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
919 struct ipr_cmnd
*ipr_cmd
, *temp
;
920 struct ipr_hrr_queue
*hrrq
;
923 for_each_hrrq(hrrq
, ioa_cfg
) {
924 spin_lock(&hrrq
->_lock
);
925 list_for_each_entry_safe(ipr_cmd
,
926 temp
, &hrrq
->hrrq_pending_q
, queue
) {
927 list_del(&ipr_cmd
->queue
);
929 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
930 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
931 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
932 cpu_to_be32(IPR_DRIVER_ILID
);
934 if (ipr_cmd
->scsi_cmd
)
935 ipr_cmd
->done
= __ipr_scsi_eh_done
;
936 else if (ipr_cmd
->qc
)
937 ipr_cmd
->done
= __ipr_sata_eh_done
;
939 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
940 IPR_IOASC_IOA_WAS_RESET
);
941 del_timer(&ipr_cmd
->timer
);
942 ipr_cmd
->done(ipr_cmd
);
944 spin_unlock(&hrrq
->_lock
);
950 * ipr_send_command - Send driver initiated requests.
951 * @ipr_cmd: ipr command struct
953 * This function sends a command to the adapter using the correct write call.
954 * In the case of sis64, calculate the ioarcb size required. Then or in the
960 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
962 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
963 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
965 if (ioa_cfg
->sis64
) {
966 /* The default size is 256 bytes */
967 send_dma_addr
|= 0x1;
969 /* If the number of ioadls * size of ioadl > 128 bytes,
970 then use a 512 byte ioarcb */
971 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
972 send_dma_addr
|= 0x4;
973 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
975 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
979 * ipr_do_req - Send driver initiated requests.
980 * @ipr_cmd: ipr command struct
981 * @done: done function
982 * @timeout_func: timeout function
983 * @timeout: timeout value
985 * This function sends the specified command to the adapter with the
986 * timeout given. The done function is invoked on command completion.
991 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
992 void (*done
) (struct ipr_cmnd
*),
993 void (*timeout_func
) (struct timer_list
*), u32 timeout
)
995 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
997 ipr_cmd
->done
= done
;
999 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
1000 ipr_cmd
->timer
.function
= timeout_func
;
1002 add_timer(&ipr_cmd
->timer
);
1004 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
1006 ipr_send_command(ipr_cmd
);
1010 * ipr_internal_cmd_done - Op done function for an internally generated op.
1011 * @ipr_cmd: ipr command struct
1013 * This function is the op done function for an internally generated,
1014 * blocking op. It simply wakes the sleeping thread.
1019 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
1021 if (ipr_cmd
->sibling
)
1022 ipr_cmd
->sibling
= NULL
;
1024 complete(&ipr_cmd
->completion
);
1028 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1029 * @ipr_cmd: ipr command struct
1030 * @dma_addr: dma address
1031 * @len: transfer length
1032 * @flags: ioadl flag value
1034 * This function initializes an ioadl in the case where there is only a single
1040 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
1043 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
1044 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
1046 ipr_cmd
->dma_use_sg
= 1;
1048 if (ipr_cmd
->ioa_cfg
->sis64
) {
1049 ioadl64
->flags
= cpu_to_be32(flags
);
1050 ioadl64
->data_len
= cpu_to_be32(len
);
1051 ioadl64
->address
= cpu_to_be64(dma_addr
);
1053 ipr_cmd
->ioarcb
.ioadl_len
=
1054 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1055 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1057 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1058 ioadl
->address
= cpu_to_be32(dma_addr
);
1060 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1061 ipr_cmd
->ioarcb
.read_ioadl_len
=
1062 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1063 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1065 ipr_cmd
->ioarcb
.ioadl_len
=
1066 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1067 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1073 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1074 * @ipr_cmd: ipr command struct
1075 * @timeout_func: function to invoke if command times out
1081 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1082 void (*timeout_func
) (struct timer_list
*),
1085 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1087 init_completion(&ipr_cmd
->completion
);
1088 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1090 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1091 wait_for_completion(&ipr_cmd
->completion
);
1092 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1095 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1099 if (ioa_cfg
->hrrq_num
== 1)
1102 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1103 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1109 * ipr_send_hcam - Send an HCAM to the adapter.
1110 * @ioa_cfg: ioa config struct
1112 * @hostrcb: hostrcb struct
1114 * This function will send a Host Controlled Async command to the adapter.
1115 * If HCAMs are currently not allowed to be issued to the adapter, it will
1116 * place the hostrcb on the free queue.
1121 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1122 struct ipr_hostrcb
*hostrcb
)
1124 struct ipr_cmnd
*ipr_cmd
;
1125 struct ipr_ioarcb
*ioarcb
;
1127 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1128 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1129 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1130 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1132 ipr_cmd
->u
.hostrcb
= hostrcb
;
1133 ioarcb
= &ipr_cmd
->ioarcb
;
1135 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1136 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1137 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1138 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1139 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1140 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1142 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1143 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1145 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1146 ipr_cmd
->done
= ipr_process_ccn
;
1148 ipr_cmd
->done
= ipr_process_error
;
1150 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1152 ipr_send_command(ipr_cmd
);
1154 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1159 * ipr_update_ata_class - Update the ata class in the resource entry
1160 * @res: resource entry struct
1161 * @proto: cfgte device bus protocol value
1166 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1169 case IPR_PROTO_SATA
:
1170 case IPR_PROTO_SAS_STP
:
1171 res
->ata_class
= ATA_DEV_ATA
;
1173 case IPR_PROTO_SATA_ATAPI
:
1174 case IPR_PROTO_SAS_STP_ATAPI
:
1175 res
->ata_class
= ATA_DEV_ATAPI
;
1178 res
->ata_class
= ATA_DEV_UNKNOWN
;
1184 * ipr_init_res_entry - Initialize a resource entry struct.
1185 * @res: resource entry struct
1186 * @cfgtew: config table entry wrapper struct
1191 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1192 struct ipr_config_table_entry_wrapper
*cfgtew
)
1196 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1197 struct ipr_resource_entry
*gscsi_res
= NULL
;
1199 res
->needs_sync_complete
= 0;
1202 res
->del_from_ml
= 0;
1203 res
->resetting_device
= 0;
1204 res
->reset_occurred
= 0;
1206 res
->sata_port
= NULL
;
1208 if (ioa_cfg
->sis64
) {
1209 proto
= cfgtew
->u
.cfgte64
->proto
;
1210 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1211 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1212 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1213 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1215 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1216 sizeof(res
->res_path
));
1219 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1220 sizeof(res
->dev_lun
.scsi_lun
));
1221 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1223 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1224 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1225 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1227 res
->target
= gscsi_res
->target
;
1232 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1233 ioa_cfg
->max_devs_supported
);
1234 set_bit(res
->target
, ioa_cfg
->target_ids
);
1236 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1237 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1239 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1240 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1241 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1242 ioa_cfg
->max_devs_supported
);
1243 set_bit(res
->target
, ioa_cfg
->array_ids
);
1244 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1245 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1246 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1247 ioa_cfg
->max_devs_supported
);
1248 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1250 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1251 ioa_cfg
->max_devs_supported
);
1252 set_bit(res
->target
, ioa_cfg
->target_ids
);
1255 proto
= cfgtew
->u
.cfgte
->proto
;
1256 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1257 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1258 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1259 res
->type
= IPR_RES_TYPE_IOAFP
;
1261 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1263 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1264 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1265 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1266 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1269 ipr_update_ata_class(res
, proto
);
1273 * ipr_is_same_device - Determine if two devices are the same.
1274 * @res: resource entry struct
1275 * @cfgtew: config table entry wrapper struct
1278 * 1 if the devices are the same / 0 otherwise
1280 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1281 struct ipr_config_table_entry_wrapper
*cfgtew
)
1283 if (res
->ioa_cfg
->sis64
) {
1284 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1285 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1286 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1287 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1291 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1292 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1293 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1301 * __ipr_format_res_path - Format the resource path for printing.
1302 * @res_path: resource path
1304 * @len: length of buffer provided
1309 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1315 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1316 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1317 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1323 * ipr_format_res_path - Format the resource path for printing.
1324 * @ioa_cfg: ioa config struct
1325 * @res_path: resource path
1327 * @len: length of buffer provided
1332 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1333 u8
*res_path
, char *buffer
, int len
)
1338 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1339 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1344 * ipr_update_res_entry - Update the resource entry.
1345 * @res: resource entry struct
1346 * @cfgtew: config table entry wrapper struct
1351 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1352 struct ipr_config_table_entry_wrapper
*cfgtew
)
1354 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1358 if (res
->ioa_cfg
->sis64
) {
1359 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1360 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1361 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1363 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1364 sizeof(struct ipr_std_inq_data
));
1366 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1367 proto
= cfgtew
->u
.cfgte64
->proto
;
1368 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1369 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1371 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1372 sizeof(res
->dev_lun
.scsi_lun
));
1374 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1375 sizeof(res
->res_path
))) {
1376 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1377 sizeof(res
->res_path
));
1381 if (res
->sdev
&& new_path
)
1382 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1383 ipr_format_res_path(res
->ioa_cfg
,
1384 res
->res_path
, buffer
, sizeof(buffer
)));
1386 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1387 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1388 res
->type
= IPR_RES_TYPE_IOAFP
;
1390 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1392 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1393 sizeof(struct ipr_std_inq_data
));
1395 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1396 proto
= cfgtew
->u
.cfgte
->proto
;
1397 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1400 ipr_update_ata_class(res
, proto
);
1404 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406 * @res: resource entry struct
1407 * @cfgtew: config table entry wrapper struct
1412 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1414 struct ipr_resource_entry
*gscsi_res
= NULL
;
1415 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1417 if (!ioa_cfg
->sis64
)
1420 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1421 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1422 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1423 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1424 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1425 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1426 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1428 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1430 } else if (res
->bus
== 0)
1431 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1435 * ipr_handle_config_change - Handle a config change from the adapter
1436 * @ioa_cfg: ioa config struct
1442 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1443 struct ipr_hostrcb
*hostrcb
)
1445 struct ipr_resource_entry
*res
= NULL
;
1446 struct ipr_config_table_entry_wrapper cfgtew
;
1447 __be32 cc_res_handle
;
1451 if (ioa_cfg
->sis64
) {
1452 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1453 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1455 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1456 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1459 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1460 if (res
->res_handle
== cc_res_handle
) {
1467 if (list_empty(&ioa_cfg
->free_res_q
)) {
1468 ipr_send_hcam(ioa_cfg
,
1469 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1474 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1475 struct ipr_resource_entry
, queue
);
1477 list_del(&res
->queue
);
1478 ipr_init_res_entry(res
, &cfgtew
);
1479 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1482 ipr_update_res_entry(res
, &cfgtew
);
1484 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1486 res
->del_from_ml
= 1;
1487 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1488 schedule_work(&ioa_cfg
->work_q
);
1490 ipr_clear_res_target(res
);
1491 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1493 } else if (!res
->sdev
|| res
->del_from_ml
) {
1495 schedule_work(&ioa_cfg
->work_q
);
1498 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1502 * ipr_process_ccn - Op done function for a CCN.
1503 * @ipr_cmd: ipr command struct
1505 * This function is the op done function for a configuration
1506 * change notification host controlled async from the adapter.
1511 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1513 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1514 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1515 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1517 list_del_init(&hostrcb
->queue
);
1518 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1521 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
1522 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
)
1523 dev_err(&ioa_cfg
->pdev
->dev
,
1524 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1526 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1528 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1533 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1534 * @i: index into buffer
1535 * @buf: string to modify
1537 * This function will strip all trailing whitespace, pad the end
1538 * of the string with a single space, and NULL terminate the string.
1541 * new length of string
1543 static int strip_and_pad_whitespace(int i
, char *buf
)
1545 while (i
&& buf
[i
] == ' ')
1553 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554 * @prefix: string to print at start of printk
1555 * @hostrcb: hostrcb pointer
1556 * @vpd: vendor/product id/sn struct
1561 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1562 struct ipr_vpd
*vpd
)
1564 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1567 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1568 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1570 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1571 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1573 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1574 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1576 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1580 * ipr_log_vpd - Log the passed VPD to the error log.
1581 * @vpd: vendor/product id/sn struct
1586 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1588 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1589 + IPR_SERIAL_NUM_LEN
];
1591 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1592 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1594 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1595 ipr_err("Vendor/Product ID: %s\n", buffer
);
1597 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1598 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1599 ipr_err(" Serial Number: %s\n", buffer
);
1603 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1604 * @prefix: string to print at start of printk
1605 * @hostrcb: hostrcb pointer
1606 * @vpd: vendor/product id/sn/wwn struct
1611 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1612 struct ipr_ext_vpd
*vpd
)
1614 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1615 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1616 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1620 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1621 * @vpd: vendor/product id/sn/wwn struct
1626 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1628 ipr_log_vpd(&vpd
->vpd
);
1629 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1630 be32_to_cpu(vpd
->wwid
[1]));
1634 * ipr_log_enhanced_cache_error - Log a cache error.
1635 * @ioa_cfg: ioa config struct
1636 * @hostrcb: hostrcb struct
1641 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1642 struct ipr_hostrcb
*hostrcb
)
1644 struct ipr_hostrcb_type_12_error
*error
;
1647 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1649 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1651 ipr_err("-----Current Configuration-----\n");
1652 ipr_err("Cache Directory Card Information:\n");
1653 ipr_log_ext_vpd(&error
->ioa_vpd
);
1654 ipr_err("Adapter Card Information:\n");
1655 ipr_log_ext_vpd(&error
->cfc_vpd
);
1657 ipr_err("-----Expected Configuration-----\n");
1658 ipr_err("Cache Directory Card Information:\n");
1659 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1660 ipr_err("Adapter Card Information:\n");
1661 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1663 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1664 be32_to_cpu(error
->ioa_data
[0]),
1665 be32_to_cpu(error
->ioa_data
[1]),
1666 be32_to_cpu(error
->ioa_data
[2]));
1670 * ipr_log_cache_error - Log a cache error.
1671 * @ioa_cfg: ioa config struct
1672 * @hostrcb: hostrcb struct
1677 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1678 struct ipr_hostrcb
*hostrcb
)
1680 struct ipr_hostrcb_type_02_error
*error
=
1681 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1683 ipr_err("-----Current Configuration-----\n");
1684 ipr_err("Cache Directory Card Information:\n");
1685 ipr_log_vpd(&error
->ioa_vpd
);
1686 ipr_err("Adapter Card Information:\n");
1687 ipr_log_vpd(&error
->cfc_vpd
);
1689 ipr_err("-----Expected Configuration-----\n");
1690 ipr_err("Cache Directory Card Information:\n");
1691 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1692 ipr_err("Adapter Card Information:\n");
1693 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1695 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1696 be32_to_cpu(error
->ioa_data
[0]),
1697 be32_to_cpu(error
->ioa_data
[1]),
1698 be32_to_cpu(error
->ioa_data
[2]));
1702 * ipr_log_enhanced_config_error - Log a configuration error.
1703 * @ioa_cfg: ioa config struct
1704 * @hostrcb: hostrcb struct
1709 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1710 struct ipr_hostrcb
*hostrcb
)
1712 int errors_logged
, i
;
1713 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1714 struct ipr_hostrcb_type_13_error
*error
;
1716 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1717 errors_logged
= be32_to_cpu(error
->errors_logged
);
1719 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1720 be32_to_cpu(error
->errors_detected
), errors_logged
);
1722 dev_entry
= error
->dev
;
1724 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1727 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1728 ipr_log_ext_vpd(&dev_entry
->vpd
);
1730 ipr_err("-----New Device Information-----\n");
1731 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1733 ipr_err("Cache Directory Card Information:\n");
1734 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1736 ipr_err("Adapter Card Information:\n");
1737 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1742 * ipr_log_sis64_config_error - Log a device error.
1743 * @ioa_cfg: ioa config struct
1744 * @hostrcb: hostrcb struct
1749 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1750 struct ipr_hostrcb
*hostrcb
)
1752 int errors_logged
, i
;
1753 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1754 struct ipr_hostrcb_type_23_error
*error
;
1755 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1757 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1758 errors_logged
= be32_to_cpu(error
->errors_logged
);
1760 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1761 be32_to_cpu(error
->errors_detected
), errors_logged
);
1763 dev_entry
= error
->dev
;
1765 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1768 ipr_err("Device %d : %s", i
+ 1,
1769 __ipr_format_res_path(dev_entry
->res_path
,
1770 buffer
, sizeof(buffer
)));
1771 ipr_log_ext_vpd(&dev_entry
->vpd
);
1773 ipr_err("-----New Device Information-----\n");
1774 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1776 ipr_err("Cache Directory Card Information:\n");
1777 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1779 ipr_err("Adapter Card Information:\n");
1780 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1785 * ipr_log_config_error - Log a configuration error.
1786 * @ioa_cfg: ioa config struct
1787 * @hostrcb: hostrcb struct
1792 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1793 struct ipr_hostrcb
*hostrcb
)
1795 int errors_logged
, i
;
1796 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1797 struct ipr_hostrcb_type_03_error
*error
;
1799 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1800 errors_logged
= be32_to_cpu(error
->errors_logged
);
1802 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1803 be32_to_cpu(error
->errors_detected
), errors_logged
);
1805 dev_entry
= error
->dev
;
1807 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1810 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1811 ipr_log_vpd(&dev_entry
->vpd
);
1813 ipr_err("-----New Device Information-----\n");
1814 ipr_log_vpd(&dev_entry
->new_vpd
);
1816 ipr_err("Cache Directory Card Information:\n");
1817 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1819 ipr_err("Adapter Card Information:\n");
1820 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1822 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1823 be32_to_cpu(dev_entry
->ioa_data
[0]),
1824 be32_to_cpu(dev_entry
->ioa_data
[1]),
1825 be32_to_cpu(dev_entry
->ioa_data
[2]),
1826 be32_to_cpu(dev_entry
->ioa_data
[3]),
1827 be32_to_cpu(dev_entry
->ioa_data
[4]));
1832 * ipr_log_enhanced_array_error - Log an array configuration error.
1833 * @ioa_cfg: ioa config struct
1834 * @hostrcb: hostrcb struct
1839 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1840 struct ipr_hostrcb
*hostrcb
)
1843 struct ipr_hostrcb_type_14_error
*error
;
1844 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1845 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1847 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1851 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1852 error
->protection_level
,
1853 ioa_cfg
->host
->host_no
,
1854 error
->last_func_vset_res_addr
.bus
,
1855 error
->last_func_vset_res_addr
.target
,
1856 error
->last_func_vset_res_addr
.lun
);
1860 array_entry
= error
->array_member
;
1861 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1862 ARRAY_SIZE(error
->array_member
));
1864 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1865 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1868 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1869 ipr_err("Exposed Array Member %d:\n", i
);
1871 ipr_err("Array Member %d:\n", i
);
1873 ipr_log_ext_vpd(&array_entry
->vpd
);
1874 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1875 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1876 "Expected Location");
1883 * ipr_log_array_error - Log an array configuration error.
1884 * @ioa_cfg: ioa config struct
1885 * @hostrcb: hostrcb struct
1890 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1891 struct ipr_hostrcb
*hostrcb
)
1894 struct ipr_hostrcb_type_04_error
*error
;
1895 struct ipr_hostrcb_array_data_entry
*array_entry
;
1896 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1898 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1902 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1903 error
->protection_level
,
1904 ioa_cfg
->host
->host_no
,
1905 error
->last_func_vset_res_addr
.bus
,
1906 error
->last_func_vset_res_addr
.target
,
1907 error
->last_func_vset_res_addr
.lun
);
1911 array_entry
= error
->array_member
;
1913 for (i
= 0; i
< 18; i
++) {
1914 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1917 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1918 ipr_err("Exposed Array Member %d:\n", i
);
1920 ipr_err("Array Member %d:\n", i
);
1922 ipr_log_vpd(&array_entry
->vpd
);
1924 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1925 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1926 "Expected Location");
1931 array_entry
= error
->array_member2
;
1938 * ipr_log_hex_data - Log additional hex IOA error data.
1939 * @ioa_cfg: ioa config struct
1940 * @data: IOA error data
1946 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, __be32
*data
, int len
)
1953 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1954 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1956 for (i
= 0; i
< len
/ 4; i
+= 4) {
1957 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1958 be32_to_cpu(data
[i
]),
1959 be32_to_cpu(data
[i
+1]),
1960 be32_to_cpu(data
[i
+2]),
1961 be32_to_cpu(data
[i
+3]));
1966 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1967 * @ioa_cfg: ioa config struct
1968 * @hostrcb: hostrcb struct
1973 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1974 struct ipr_hostrcb
*hostrcb
)
1976 struct ipr_hostrcb_type_17_error
*error
;
1979 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1981 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1983 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1984 strim(error
->failure_reason
);
1986 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1987 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1988 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1989 ipr_log_hex_data(ioa_cfg
, error
->data
,
1990 be32_to_cpu(hostrcb
->hcam
.length
) -
1991 (offsetof(struct ipr_hostrcb_error
, u
) +
1992 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1996 * ipr_log_dual_ioa_error - Log a dual adapter error.
1997 * @ioa_cfg: ioa config struct
1998 * @hostrcb: hostrcb struct
2003 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
2004 struct ipr_hostrcb
*hostrcb
)
2006 struct ipr_hostrcb_type_07_error
*error
;
2008 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
2009 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2010 strim(error
->failure_reason
);
2012 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
2013 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
2014 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
2015 ipr_log_hex_data(ioa_cfg
, error
->data
,
2016 be32_to_cpu(hostrcb
->hcam
.length
) -
2017 (offsetof(struct ipr_hostrcb_error
, u
) +
2018 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
2021 static const struct {
2024 } path_active_desc
[] = {
2025 { IPR_PATH_NO_INFO
, "Path" },
2026 { IPR_PATH_ACTIVE
, "Active path" },
2027 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
2030 static const struct {
2033 } path_state_desc
[] = {
2034 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
2035 { IPR_PATH_HEALTHY
, "is healthy" },
2036 { IPR_PATH_DEGRADED
, "is degraded" },
2037 { IPR_PATH_FAILED
, "is failed" }
2041 * ipr_log_fabric_path - Log a fabric path error
2042 * @hostrcb: hostrcb struct
2043 * @fabric: fabric descriptor
2048 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
2049 struct ipr_hostrcb_fabric_desc
*fabric
)
2052 u8 path_state
= fabric
->path_state
;
2053 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2054 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2056 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2057 if (path_active_desc
[i
].active
!= active
)
2060 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2061 if (path_state_desc
[j
].state
!= state
)
2064 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2065 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2066 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2068 } else if (fabric
->cascaded_expander
== 0xff) {
2069 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2070 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2071 fabric
->ioa_port
, fabric
->phy
);
2072 } else if (fabric
->phy
== 0xff) {
2073 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2074 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2075 fabric
->ioa_port
, fabric
->cascaded_expander
);
2077 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2078 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2079 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2085 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2086 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2090 * ipr_log64_fabric_path - Log a fabric path error
2091 * @hostrcb: hostrcb struct
2092 * @fabric: fabric descriptor
2097 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2098 struct ipr_hostrcb64_fabric_desc
*fabric
)
2101 u8 path_state
= fabric
->path_state
;
2102 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2103 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2104 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2106 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2107 if (path_active_desc
[i
].active
!= active
)
2110 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2111 if (path_state_desc
[j
].state
!= state
)
2114 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2115 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2116 ipr_format_res_path(hostrcb
->ioa_cfg
,
2118 buffer
, sizeof(buffer
)));
2123 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2124 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2125 buffer
, sizeof(buffer
)));
2128 static const struct {
2131 } path_type_desc
[] = {
2132 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2133 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2134 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2135 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2138 static const struct {
2141 } path_status_desc
[] = {
2142 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2143 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2144 { IPR_PATH_CFG_FAILED
, "Failed" },
2145 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2146 { IPR_PATH_NOT_DETECTED
, "Missing" },
2147 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2150 static const char *link_rate
[] = {
2153 "phy reset problem",
2170 * ipr_log_path_elem - Log a fabric path element.
2171 * @hostrcb: hostrcb struct
2172 * @cfg: fabric path element struct
2177 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2178 struct ipr_hostrcb_config_element
*cfg
)
2181 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2182 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2184 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2187 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2188 if (path_type_desc
[i
].type
!= type
)
2191 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2192 if (path_status_desc
[j
].status
!= status
)
2195 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2196 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2197 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2198 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2199 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2201 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2202 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2203 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2204 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2205 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2206 } else if (cfg
->cascaded_expander
== 0xff) {
2207 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2208 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2209 path_type_desc
[i
].desc
, cfg
->phy
,
2210 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2211 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2212 } else if (cfg
->phy
== 0xff) {
2213 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2214 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2215 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2216 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2217 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2219 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2220 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2221 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2222 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2223 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2230 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2231 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2232 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2233 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2237 * ipr_log64_path_elem - Log a fabric path element.
2238 * @hostrcb: hostrcb struct
2239 * @cfg: fabric path element struct
2244 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2245 struct ipr_hostrcb64_config_element
*cfg
)
2248 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2249 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2250 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2251 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2253 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2256 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2257 if (path_type_desc
[i
].type
!= type
)
2260 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2261 if (path_status_desc
[j
].status
!= status
)
2264 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2265 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2266 ipr_format_res_path(hostrcb
->ioa_cfg
,
2267 cfg
->res_path
, buffer
, sizeof(buffer
)),
2268 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2269 be32_to_cpu(cfg
->wwid
[0]),
2270 be32_to_cpu(cfg
->wwid
[1]));
2274 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2275 "WWN=%08X%08X\n", cfg
->type_status
,
2276 ipr_format_res_path(hostrcb
->ioa_cfg
,
2277 cfg
->res_path
, buffer
, sizeof(buffer
)),
2278 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2279 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2283 * ipr_log_fabric_error - Log a fabric error.
2284 * @ioa_cfg: ioa config struct
2285 * @hostrcb: hostrcb struct
2290 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2291 struct ipr_hostrcb
*hostrcb
)
2293 struct ipr_hostrcb_type_20_error
*error
;
2294 struct ipr_hostrcb_fabric_desc
*fabric
;
2295 struct ipr_hostrcb_config_element
*cfg
;
2298 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2299 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2300 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2302 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2303 (offsetof(struct ipr_hostrcb_error
, u
) +
2304 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2306 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2307 ipr_log_fabric_path(hostrcb
, fabric
);
2308 for_each_fabric_cfg(fabric
, cfg
)
2309 ipr_log_path_elem(hostrcb
, cfg
);
2311 add_len
-= be16_to_cpu(fabric
->length
);
2312 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2313 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2316 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2320 * ipr_log_sis64_array_error - Log a sis64 array error.
2321 * @ioa_cfg: ioa config struct
2322 * @hostrcb: hostrcb struct
2327 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2328 struct ipr_hostrcb
*hostrcb
)
2331 struct ipr_hostrcb_type_24_error
*error
;
2332 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2333 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2334 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2336 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2340 ipr_err("RAID %s Array Configuration: %s\n",
2341 error
->protection_level
,
2342 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2343 buffer
, sizeof(buffer
)));
2347 array_entry
= error
->array_member
;
2348 num_entries
= min_t(u32
, error
->num_entries
,
2349 ARRAY_SIZE(error
->array_member
));
2351 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2353 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2356 if (error
->exposed_mode_adn
== i
)
2357 ipr_err("Exposed Array Member %d:\n", i
);
2359 ipr_err("Array Member %d:\n", i
);
2361 ipr_err("Array Member %d:\n", i
);
2362 ipr_log_ext_vpd(&array_entry
->vpd
);
2363 ipr_err("Current Location: %s\n",
2364 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2365 buffer
, sizeof(buffer
)));
2366 ipr_err("Expected Location: %s\n",
2367 ipr_format_res_path(ioa_cfg
,
2368 array_entry
->expected_res_path
,
2369 buffer
, sizeof(buffer
)));
2376 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2377 * @ioa_cfg: ioa config struct
2378 * @hostrcb: hostrcb struct
2383 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2384 struct ipr_hostrcb
*hostrcb
)
2386 struct ipr_hostrcb_type_30_error
*error
;
2387 struct ipr_hostrcb64_fabric_desc
*fabric
;
2388 struct ipr_hostrcb64_config_element
*cfg
;
2391 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2393 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2394 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2396 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2397 (offsetof(struct ipr_hostrcb64_error
, u
) +
2398 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2400 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2401 ipr_log64_fabric_path(hostrcb
, fabric
);
2402 for_each_fabric_cfg(fabric
, cfg
)
2403 ipr_log64_path_elem(hostrcb
, cfg
);
2405 add_len
-= be16_to_cpu(fabric
->length
);
2406 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2407 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2410 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2414 * ipr_log_generic_error - Log an adapter error.
2415 * @ioa_cfg: ioa config struct
2416 * @hostrcb: hostrcb struct
2421 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2422 struct ipr_hostrcb
*hostrcb
)
2424 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2425 be32_to_cpu(hostrcb
->hcam
.length
));
2429 * ipr_log_sis64_device_error - Log a cache error.
2430 * @ioa_cfg: ioa config struct
2431 * @hostrcb: hostrcb struct
2436 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2437 struct ipr_hostrcb
*hostrcb
)
2439 struct ipr_hostrcb_type_21_error
*error
;
2440 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2442 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2444 ipr_err("-----Failing Device Information-----\n");
2445 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2446 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2447 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2448 ipr_err("Device Resource Path: %s\n",
2449 __ipr_format_res_path(error
->res_path
,
2450 buffer
, sizeof(buffer
)));
2451 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2452 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2453 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2454 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2455 ipr_err("SCSI Sense Data:\n");
2456 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2457 ipr_err("SCSI Command Descriptor Block: \n");
2458 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2460 ipr_err("Additional IOA Data:\n");
2461 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2465 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2468 * This function will return the index of into the ipr_error_table
2469 * for the specified IOASC. If the IOASC is not in the table,
2470 * 0 will be returned, which points to the entry used for unknown errors.
2473 * index into the ipr_error_table
2475 static u32
ipr_get_error(u32 ioasc
)
2479 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2480 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2487 * ipr_handle_log_data - Log an adapter error.
2488 * @ioa_cfg: ioa config struct
2489 * @hostrcb: hostrcb struct
2491 * This function logs an adapter error to the system.
2496 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2497 struct ipr_hostrcb
*hostrcb
)
2501 struct ipr_hostrcb_type_21_error
*error
;
2503 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2506 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2507 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2510 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2512 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2514 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2515 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2516 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2517 scsi_report_bus_reset(ioa_cfg
->host
,
2518 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2521 error_index
= ipr_get_error(ioasc
);
2523 if (!ipr_error_table
[error_index
].log_hcam
)
2526 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2527 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2528 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2530 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2531 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2535 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2537 /* Set indication we have logged an error */
2538 ioa_cfg
->errors_logged
++;
2540 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2542 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2543 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2545 switch (hostrcb
->hcam
.overlay_id
) {
2546 case IPR_HOST_RCB_OVERLAY_ID_2
:
2547 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2549 case IPR_HOST_RCB_OVERLAY_ID_3
:
2550 ipr_log_config_error(ioa_cfg
, hostrcb
);
2552 case IPR_HOST_RCB_OVERLAY_ID_4
:
2553 case IPR_HOST_RCB_OVERLAY_ID_6
:
2554 ipr_log_array_error(ioa_cfg
, hostrcb
);
2556 case IPR_HOST_RCB_OVERLAY_ID_7
:
2557 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2559 case IPR_HOST_RCB_OVERLAY_ID_12
:
2560 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2562 case IPR_HOST_RCB_OVERLAY_ID_13
:
2563 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2565 case IPR_HOST_RCB_OVERLAY_ID_14
:
2566 case IPR_HOST_RCB_OVERLAY_ID_16
:
2567 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2569 case IPR_HOST_RCB_OVERLAY_ID_17
:
2570 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2572 case IPR_HOST_RCB_OVERLAY_ID_20
:
2573 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2575 case IPR_HOST_RCB_OVERLAY_ID_21
:
2576 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2578 case IPR_HOST_RCB_OVERLAY_ID_23
:
2579 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2581 case IPR_HOST_RCB_OVERLAY_ID_24
:
2582 case IPR_HOST_RCB_OVERLAY_ID_26
:
2583 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2585 case IPR_HOST_RCB_OVERLAY_ID_30
:
2586 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2588 case IPR_HOST_RCB_OVERLAY_ID_1
:
2589 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2591 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2596 static struct ipr_hostrcb
*ipr_get_free_hostrcb(struct ipr_ioa_cfg
*ioa
)
2598 struct ipr_hostrcb
*hostrcb
;
2600 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_free_q
,
2601 struct ipr_hostrcb
, queue
);
2603 if (unlikely(!hostrcb
)) {
2604 dev_info(&ioa
->pdev
->dev
, "Reclaiming async error buffers.");
2605 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_report_q
,
2606 struct ipr_hostrcb
, queue
);
2609 list_del_init(&hostrcb
->queue
);
2614 * ipr_process_error - Op done function for an adapter error log.
2615 * @ipr_cmd: ipr command struct
2617 * This function is the op done function for an error log host
2618 * controlled async from the adapter. It will log the error and
2619 * send the HCAM back to the adapter.
2624 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2626 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2627 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2628 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2632 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2634 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2636 list_del_init(&hostrcb
->queue
);
2637 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2640 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2641 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2642 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2643 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
2644 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
) {
2645 dev_err(&ioa_cfg
->pdev
->dev
,
2646 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2649 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_report_q
);
2650 schedule_work(&ioa_cfg
->work_q
);
2651 hostrcb
= ipr_get_free_hostrcb(ioa_cfg
);
2653 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2657 * ipr_timeout - An internally generated op has timed out.
2658 * @ipr_cmd: ipr command struct
2660 * This function blocks host requests and initiates an
2666 static void ipr_timeout(struct timer_list
*t
)
2668 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2669 unsigned long lock_flags
= 0;
2670 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2673 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2675 ioa_cfg
->errors_logged
++;
2676 dev_err(&ioa_cfg
->pdev
->dev
,
2677 "Adapter being reset due to command timeout.\n");
2679 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2680 ioa_cfg
->sdt_state
= GET_DUMP
;
2682 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2683 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2685 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2690 * ipr_oper_timeout - Adapter timed out transitioning to operational
2691 * @ipr_cmd: ipr command struct
2693 * This function blocks host requests and initiates an
2699 static void ipr_oper_timeout(struct timer_list
*t
)
2701 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2702 unsigned long lock_flags
= 0;
2703 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2706 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2708 ioa_cfg
->errors_logged
++;
2709 dev_err(&ioa_cfg
->pdev
->dev
,
2710 "Adapter timed out transitioning to operational.\n");
2712 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2713 ioa_cfg
->sdt_state
= GET_DUMP
;
2715 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2717 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2718 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2721 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2726 * ipr_find_ses_entry - Find matching SES in SES table
2727 * @res: resource entry struct of SES
2730 * pointer to SES table entry / NULL on failure
2732 static const struct ipr_ses_table_entry
*
2733 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2736 struct ipr_std_inq_vpids
*vpids
;
2737 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2739 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2740 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2741 if (ste
->compare_product_id_byte
[j
] == 'X') {
2742 vpids
= &res
->std_inq_data
.vpids
;
2743 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2751 if (matches
== IPR_PROD_ID_LEN
)
2759 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2760 * @ioa_cfg: ioa config struct
2762 * @bus_width: bus width
2765 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2766 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2767 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2768 * max 160MHz = max 320MB/sec).
2770 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2772 struct ipr_resource_entry
*res
;
2773 const struct ipr_ses_table_entry
*ste
;
2774 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2776 /* Loop through each config table entry in the config table buffer */
2777 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2778 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2781 if (bus
!= res
->bus
)
2784 if (!(ste
= ipr_find_ses_entry(res
)))
2787 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2790 return max_xfer_rate
;
2794 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2795 * @ioa_cfg: ioa config struct
2796 * @max_delay: max delay in micro-seconds to wait
2798 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2801 * 0 on success / other on failure
2803 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2805 volatile u32 pcii_reg
;
2808 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2809 while (delay
< max_delay
) {
2810 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2812 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2815 /* udelay cannot be used if delay is more than a few milliseconds */
2816 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2817 mdelay(delay
/ 1000);
2827 * ipr_get_sis64_dump_data_section - Dump IOA memory
2828 * @ioa_cfg: ioa config struct
2829 * @start_addr: adapter address to dump
2830 * @dest: destination kernel buffer
2831 * @length_in_words: length to dump in 4 byte words
2836 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2838 __be32
*dest
, u32 length_in_words
)
2842 for (i
= 0; i
< length_in_words
; i
++) {
2843 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2844 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2852 * ipr_get_ldump_data_section - Dump IOA memory
2853 * @ioa_cfg: ioa config struct
2854 * @start_addr: adapter address to dump
2855 * @dest: destination kernel buffer
2856 * @length_in_words: length to dump in 4 byte words
2859 * 0 on success / -EIO on failure
2861 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2863 __be32
*dest
, u32 length_in_words
)
2865 volatile u32 temp_pcii_reg
;
2869 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2870 dest
, length_in_words
);
2872 /* Write IOA interrupt reg starting LDUMP state */
2873 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2874 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2876 /* Wait for IO debug acknowledge */
2877 if (ipr_wait_iodbg_ack(ioa_cfg
,
2878 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2879 dev_err(&ioa_cfg
->pdev
->dev
,
2880 "IOA dump long data transfer timeout\n");
2884 /* Signal LDUMP interlocked - clear IO debug ack */
2885 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2886 ioa_cfg
->regs
.clr_interrupt_reg
);
2888 /* Write Mailbox with starting address */
2889 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2891 /* Signal address valid - clear IOA Reset alert */
2892 writel(IPR_UPROCI_RESET_ALERT
,
2893 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2895 for (i
= 0; i
< length_in_words
; i
++) {
2896 /* Wait for IO debug acknowledge */
2897 if (ipr_wait_iodbg_ack(ioa_cfg
,
2898 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2899 dev_err(&ioa_cfg
->pdev
->dev
,
2900 "IOA dump short data transfer timeout\n");
2904 /* Read data from mailbox and increment destination pointer */
2905 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2908 /* For all but the last word of data, signal data received */
2909 if (i
< (length_in_words
- 1)) {
2910 /* Signal dump data received - Clear IO debug Ack */
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2912 ioa_cfg
->regs
.clr_interrupt_reg
);
2916 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2917 writel(IPR_UPROCI_RESET_ALERT
,
2918 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2920 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2921 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2923 /* Signal dump data received - Clear IO debug Ack */
2924 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2925 ioa_cfg
->regs
.clr_interrupt_reg
);
2927 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2928 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2930 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2932 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2942 #ifdef CONFIG_SCSI_IPR_DUMP
2944 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2945 * @ioa_cfg: ioa config struct
2946 * @pci_address: adapter address
2947 * @length: length of data to copy
2949 * Copy data from PCI adapter to kernel buffer.
2950 * Note: length MUST be a 4 byte multiple
2952 * 0 on success / other on failure
2954 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2955 unsigned long pci_address
, u32 length
)
2957 int bytes_copied
= 0;
2958 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2960 unsigned long lock_flags
= 0;
2961 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2964 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2966 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2968 while (bytes_copied
< length
&&
2969 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2970 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2971 ioa_dump
->page_offset
== 0) {
2972 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2976 return bytes_copied
;
2979 ioa_dump
->page_offset
= 0;
2980 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2981 ioa_dump
->next_page_index
++;
2983 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2985 rem_len
= length
- bytes_copied
;
2986 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2987 cur_len
= min(rem_len
, rem_page_len
);
2989 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2990 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2993 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2994 pci_address
+ bytes_copied
,
2995 &page
[ioa_dump
->page_offset
/ 4],
2996 (cur_len
/ sizeof(u32
)));
2998 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3001 ioa_dump
->page_offset
+= cur_len
;
3002 bytes_copied
+= cur_len
;
3010 return bytes_copied
;
3014 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3015 * @hdr: dump entry header struct
3020 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
3022 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3024 hdr
->offset
= sizeof(*hdr
);
3025 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
3029 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3030 * @ioa_cfg: ioa config struct
3031 * @driver_dump: driver dump struct
3036 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
3037 struct ipr_driver_dump
*driver_dump
)
3039 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3041 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
3042 driver_dump
->ioa_type_entry
.hdr
.len
=
3043 sizeof(struct ipr_dump_ioa_type_entry
) -
3044 sizeof(struct ipr_dump_entry_header
);
3045 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3046 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
3047 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
3048 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
3049 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
3050 ucode_vpd
->minor_release
[1];
3051 driver_dump
->hdr
.num_entries
++;
3055 * ipr_dump_version_data - Fill in the driver version in the dump.
3056 * @ioa_cfg: ioa config struct
3057 * @driver_dump: driver dump struct
3062 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
3063 struct ipr_driver_dump
*driver_dump
)
3065 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
3066 driver_dump
->version_entry
.hdr
.len
=
3067 sizeof(struct ipr_dump_version_entry
) -
3068 sizeof(struct ipr_dump_entry_header
);
3069 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3070 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
3071 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
3072 driver_dump
->hdr
.num_entries
++;
3076 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3077 * @ioa_cfg: ioa config struct
3078 * @driver_dump: driver dump struct
3083 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3084 struct ipr_driver_dump
*driver_dump
)
3086 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3087 driver_dump
->trace_entry
.hdr
.len
=
3088 sizeof(struct ipr_dump_trace_entry
) -
3089 sizeof(struct ipr_dump_entry_header
);
3090 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3091 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3092 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3093 driver_dump
->hdr
.num_entries
++;
3097 * ipr_dump_location_data - Fill in the IOA location in the dump.
3098 * @ioa_cfg: ioa config struct
3099 * @driver_dump: driver dump struct
3104 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3105 struct ipr_driver_dump
*driver_dump
)
3107 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3108 driver_dump
->location_entry
.hdr
.len
=
3109 sizeof(struct ipr_dump_location_entry
) -
3110 sizeof(struct ipr_dump_entry_header
);
3111 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3112 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3113 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3114 driver_dump
->hdr
.num_entries
++;
3118 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3119 * @ioa_cfg: ioa config struct
3120 * @dump: dump struct
3125 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3127 unsigned long start_addr
, sdt_word
;
3128 unsigned long lock_flags
= 0;
3129 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3130 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3131 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3132 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3133 struct ipr_sdt
*sdt
;
3139 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3141 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3142 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3146 if (ioa_cfg
->sis64
) {
3147 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3148 ssleep(IPR_DUMP_DELAY_SECONDS
);
3149 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3152 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3154 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3155 dev_err(&ioa_cfg
->pdev
->dev
,
3156 "Invalid dump table format: %lx\n", start_addr
);
3157 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3161 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3163 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3165 /* Initialize the overall dump header */
3166 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3167 driver_dump
->hdr
.num_entries
= 1;
3168 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3169 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3170 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3171 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3173 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3174 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3175 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3176 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3178 /* Update dump_header */
3179 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3181 /* IOA Dump entry */
3182 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3183 ioa_dump
->hdr
.len
= 0;
3184 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3185 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3187 /* First entries in sdt are actually a list of dump addresses and
3188 lengths to gather the real dump data. sdt represents the pointer
3189 to the ioa generated dump table. Dump data will be extracted based
3190 on entries in this table */
3191 sdt
= &ioa_dump
->sdt
;
3193 if (ioa_cfg
->sis64
) {
3194 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3195 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3197 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3198 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3201 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3202 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3203 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3204 bytes_to_copy
/ sizeof(__be32
));
3206 /* Smart Dump table is ready to use and the first entry is valid */
3207 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3208 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3209 dev_err(&ioa_cfg
->pdev
->dev
,
3210 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3211 rc
, be32_to_cpu(sdt
->hdr
.state
));
3212 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3213 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3214 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3218 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3220 if (num_entries
> max_num_entries
)
3221 num_entries
= max_num_entries
;
3223 /* Update dump length to the actual data to be copied */
3224 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3226 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3228 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3230 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3232 for (i
= 0; i
< num_entries
; i
++) {
3233 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3234 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3238 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3239 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3241 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3243 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3244 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3246 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3247 bytes_to_copy
= end_off
- start_off
;
3252 if (bytes_to_copy
> max_dump_size
) {
3253 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3257 /* Copy data from adapter to driver buffers */
3258 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3261 ioa_dump
->hdr
.len
+= bytes_copied
;
3263 if (bytes_copied
!= bytes_to_copy
) {
3264 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3271 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3273 /* Update dump_header */
3274 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3276 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3281 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3285 * ipr_release_dump - Free adapter dump memory
3286 * @kref: kref struct
3291 static void ipr_release_dump(struct kref
*kref
)
3293 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3294 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3295 unsigned long lock_flags
= 0;
3299 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3300 ioa_cfg
->dump
= NULL
;
3301 ioa_cfg
->sdt_state
= INACTIVE
;
3302 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3304 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3305 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3307 vfree(dump
->ioa_dump
.ioa_data
);
3313 * ipr_worker_thread - Worker thread
3314 * @work: ioa config struct
3316 * Called at task level from a work thread. This function takes care
3317 * of adding and removing device from the mid-layer as configuration
3318 * changes are detected by the adapter.
3323 static void ipr_worker_thread(struct work_struct
*work
)
3325 unsigned long lock_flags
;
3326 struct ipr_resource_entry
*res
;
3327 struct scsi_device
*sdev
;
3328 struct ipr_dump
*dump
;
3329 struct ipr_ioa_cfg
*ioa_cfg
=
3330 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3331 u8 bus
, target
, lun
;
3335 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3337 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3338 dump
= ioa_cfg
->dump
;
3340 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3343 kref_get(&dump
->kref
);
3344 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3345 ipr_get_ioa_dump(ioa_cfg
, dump
);
3346 kref_put(&dump
->kref
, ipr_release_dump
);
3348 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3349 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3350 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3351 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3355 if (ioa_cfg
->scsi_unblock
) {
3356 ioa_cfg
->scsi_unblock
= 0;
3357 ioa_cfg
->scsi_blocked
= 0;
3358 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3359 scsi_unblock_requests(ioa_cfg
->host
);
3360 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3361 if (ioa_cfg
->scsi_blocked
)
3362 scsi_block_requests(ioa_cfg
->host
);
3365 if (!ioa_cfg
->scan_enabled
) {
3366 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3373 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
3374 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3378 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3379 if (res
->del_from_ml
&& res
->sdev
) {
3382 if (!scsi_device_get(sdev
)) {
3383 if (!res
->add_to_ml
)
3384 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3386 res
->del_from_ml
= 0;
3387 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3388 scsi_remove_device(sdev
);
3389 scsi_device_put(sdev
);
3390 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3397 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3398 if (res
->add_to_ml
) {
3400 target
= res
->target
;
3403 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3404 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3405 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3410 ioa_cfg
->scan_done
= 1;
3411 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3412 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3416 #ifdef CONFIG_SCSI_IPR_TRACE
3418 * ipr_read_trace - Dump the adapter trace
3419 * @filp: open sysfs file
3420 * @kobj: kobject struct
3421 * @bin_attr: bin_attribute struct
3424 * @count: buffer size
3427 * number of bytes printed to buffer
3429 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3430 struct bin_attribute
*bin_attr
,
3431 char *buf
, loff_t off
, size_t count
)
3433 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3434 struct Scsi_Host
*shost
= class_to_shost(dev
);
3435 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3436 unsigned long lock_flags
= 0;
3439 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3440 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3442 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3447 static struct bin_attribute ipr_trace_attr
= {
3453 .read
= ipr_read_trace
,
3458 * ipr_show_fw_version - Show the firmware version
3459 * @dev: class device struct
3463 * number of bytes printed to buffer
3465 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3466 struct device_attribute
*attr
, char *buf
)
3468 struct Scsi_Host
*shost
= class_to_shost(dev
);
3469 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3470 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3471 unsigned long lock_flags
= 0;
3474 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3475 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3476 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3477 ucode_vpd
->minor_release
[0],
3478 ucode_vpd
->minor_release
[1]);
3479 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3483 static struct device_attribute ipr_fw_version_attr
= {
3485 .name
= "fw_version",
3488 .show
= ipr_show_fw_version
,
3492 * ipr_show_log_level - Show the adapter's error logging level
3493 * @dev: class device struct
3497 * number of bytes printed to buffer
3499 static ssize_t
ipr_show_log_level(struct device
*dev
,
3500 struct device_attribute
*attr
, char *buf
)
3502 struct Scsi_Host
*shost
= class_to_shost(dev
);
3503 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3504 unsigned long lock_flags
= 0;
3507 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3508 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3509 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3514 * ipr_store_log_level - Change the adapter's error logging level
3515 * @dev: class device struct
3519 * number of bytes printed to buffer
3521 static ssize_t
ipr_store_log_level(struct device
*dev
,
3522 struct device_attribute
*attr
,
3523 const char *buf
, size_t count
)
3525 struct Scsi_Host
*shost
= class_to_shost(dev
);
3526 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3527 unsigned long lock_flags
= 0;
3529 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3530 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3531 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3535 static struct device_attribute ipr_log_level_attr
= {
3537 .name
= "log_level",
3538 .mode
= S_IRUGO
| S_IWUSR
,
3540 .show
= ipr_show_log_level
,
3541 .store
= ipr_store_log_level
3545 * ipr_store_diagnostics - IOA Diagnostics interface
3546 * @dev: device struct
3548 * @count: buffer size
3550 * This function will reset the adapter and wait a reasonable
3551 * amount of time for any errors that the adapter might log.
3554 * count on success / other on failure
3556 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3557 struct device_attribute
*attr
,
3558 const char *buf
, size_t count
)
3560 struct Scsi_Host
*shost
= class_to_shost(dev
);
3561 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3562 unsigned long lock_flags
= 0;
3565 if (!capable(CAP_SYS_ADMIN
))
3568 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3569 while (ioa_cfg
->in_reset_reload
) {
3570 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3571 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3572 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3575 ioa_cfg
->errors_logged
= 0;
3576 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3578 if (ioa_cfg
->in_reset_reload
) {
3579 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3580 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3582 /* Wait for a second for any errors to be logged */
3585 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3589 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3590 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3592 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3597 static struct device_attribute ipr_diagnostics_attr
= {
3599 .name
= "run_diagnostics",
3602 .store
= ipr_store_diagnostics
3606 * ipr_show_adapter_state - Show the adapter's state
3607 * @class_dev: device struct
3611 * number of bytes printed to buffer
3613 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3614 struct device_attribute
*attr
, char *buf
)
3616 struct Scsi_Host
*shost
= class_to_shost(dev
);
3617 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3618 unsigned long lock_flags
= 0;
3621 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3622 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3623 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3625 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3626 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3631 * ipr_store_adapter_state - Change adapter state
3632 * @dev: device struct
3634 * @count: buffer size
3636 * This function will change the adapter's state.
3639 * count on success / other on failure
3641 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3642 struct device_attribute
*attr
,
3643 const char *buf
, size_t count
)
3645 struct Scsi_Host
*shost
= class_to_shost(dev
);
3646 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3647 unsigned long lock_flags
;
3648 int result
= count
, i
;
3650 if (!capable(CAP_SYS_ADMIN
))
3653 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3654 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3655 !strncmp(buf
, "online", 6)) {
3656 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3657 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3658 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3659 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3662 ioa_cfg
->reset_retries
= 0;
3663 ioa_cfg
->in_ioa_bringdown
= 0;
3664 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3666 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3667 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3672 static struct device_attribute ipr_ioa_state_attr
= {
3674 .name
= "online_state",
3675 .mode
= S_IRUGO
| S_IWUSR
,
3677 .show
= ipr_show_adapter_state
,
3678 .store
= ipr_store_adapter_state
3682 * ipr_store_reset_adapter - Reset the adapter
3683 * @dev: device struct
3685 * @count: buffer size
3687 * This function will reset the adapter.
3690 * count on success / other on failure
3692 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3693 struct device_attribute
*attr
,
3694 const char *buf
, size_t count
)
3696 struct Scsi_Host
*shost
= class_to_shost(dev
);
3697 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3698 unsigned long lock_flags
;
3701 if (!capable(CAP_SYS_ADMIN
))
3704 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3705 if (!ioa_cfg
->in_reset_reload
)
3706 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3707 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3708 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3713 static struct device_attribute ipr_ioa_reset_attr
= {
3715 .name
= "reset_host",
3718 .store
= ipr_store_reset_adapter
3721 static int ipr_iopoll(struct irq_poll
*iop
, int budget
);
3723 * ipr_show_iopoll_weight - Show ipr polling mode
3724 * @dev: class device struct
3728 * number of bytes printed to buffer
3730 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3731 struct device_attribute
*attr
, char *buf
)
3733 struct Scsi_Host
*shost
= class_to_shost(dev
);
3734 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3735 unsigned long lock_flags
= 0;
3738 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3739 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3740 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3746 * ipr_store_iopoll_weight - Change the adapter's polling mode
3747 * @dev: class device struct
3751 * number of bytes printed to buffer
3753 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3754 struct device_attribute
*attr
,
3755 const char *buf
, size_t count
)
3757 struct Scsi_Host
*shost
= class_to_shost(dev
);
3758 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3759 unsigned long user_iopoll_weight
;
3760 unsigned long lock_flags
= 0;
3763 if (!ioa_cfg
->sis64
) {
3764 dev_info(&ioa_cfg
->pdev
->dev
, "irq_poll not supported on this adapter\n");
3767 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3770 if (user_iopoll_weight
> 256) {
3771 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid irq_poll weight. It must be less than 256\n");
3775 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3776 dev_info(&ioa_cfg
->pdev
->dev
, "Current irq_poll weight has the same weight\n");
3780 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3781 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3782 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3785 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3786 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3787 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3788 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3789 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3790 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3793 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3798 static struct device_attribute ipr_iopoll_weight_attr
= {
3800 .name
= "iopoll_weight",
3801 .mode
= S_IRUGO
| S_IWUSR
,
3803 .show
= ipr_show_iopoll_weight
,
3804 .store
= ipr_store_iopoll_weight
3808 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3809 * @buf_len: buffer length
3811 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3812 * list to use for microcode download
3815 * pointer to sglist / NULL on failure
3817 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3820 struct ipr_sglist
*sglist
;
3822 /* Get the minimum size per scatter/gather element */
3823 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3825 /* Get the actual size per element */
3826 order
= get_order(sg_size
);
3828 /* Allocate a scatter/gather list for the DMA */
3829 sglist
= kzalloc(sizeof(struct ipr_sglist
), GFP_KERNEL
);
3830 if (sglist
== NULL
) {
3834 sglist
->order
= order
;
3835 sglist
->scatterlist
= sgl_alloc_order(buf_len
, order
, false, GFP_KERNEL
,
3837 if (!sglist
->scatterlist
) {
3846 * ipr_free_ucode_buffer - Frees a microcode download buffer
3847 * @p_dnld: scatter/gather list pointer
3849 * Free a DMA'able ucode download buffer previously allocated with
3850 * ipr_alloc_ucode_buffer
3855 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3857 sgl_free_order(sglist
->scatterlist
, sglist
->order
);
3862 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3863 * @sglist: scatter/gather list pointer
3864 * @buffer: buffer pointer
3865 * @len: buffer length
3867 * Copy a microcode image from a user buffer into a buffer allocated by
3868 * ipr_alloc_ucode_buffer
3871 * 0 on success / other on failure
3873 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3874 u8
*buffer
, u32 len
)
3876 int bsize_elem
, i
, result
= 0;
3877 struct scatterlist
*scatterlist
;
3880 /* Determine the actual number of bytes per element */
3881 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3883 scatterlist
= sglist
->scatterlist
;
3885 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3886 struct page
*page
= sg_page(&scatterlist
[i
]);
3889 memcpy(kaddr
, buffer
, bsize_elem
);
3892 scatterlist
[i
].length
= bsize_elem
;
3900 if (len
% bsize_elem
) {
3901 struct page
*page
= sg_page(&scatterlist
[i
]);
3904 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3907 scatterlist
[i
].length
= len
% bsize_elem
;
3910 sglist
->buffer_len
= len
;
3915 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3916 * @ipr_cmd: ipr command struct
3917 * @sglist: scatter/gather list
3919 * Builds a microcode download IOA data list (IOADL).
3922 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3923 struct ipr_sglist
*sglist
)
3925 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3926 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3927 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3930 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3931 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3932 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3935 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3936 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3937 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3938 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3939 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3942 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3946 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3947 * @ipr_cmd: ipr command struct
3948 * @sglist: scatter/gather list
3950 * Builds a microcode download IOA data list (IOADL).
3953 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3954 struct ipr_sglist
*sglist
)
3956 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3957 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3958 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3961 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3962 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3963 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3966 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3968 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3969 ioadl
[i
].flags_and_data_len
=
3970 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3972 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3975 ioadl
[i
-1].flags_and_data_len
|=
3976 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3980 * ipr_update_ioa_ucode - Update IOA's microcode
3981 * @ioa_cfg: ioa config struct
3982 * @sglist: scatter/gather list
3984 * Initiate an adapter reset to update the IOA's microcode
3987 * 0 on success / -EIO on failure
3989 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3990 struct ipr_sglist
*sglist
)
3992 unsigned long lock_flags
;
3994 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3995 while (ioa_cfg
->in_reset_reload
) {
3996 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3997 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3998 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4001 if (ioa_cfg
->ucode_sglist
) {
4002 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4003 dev_err(&ioa_cfg
->pdev
->dev
,
4004 "Microcode download already in progress\n");
4008 sglist
->num_dma_sg
= dma_map_sg(&ioa_cfg
->pdev
->dev
,
4009 sglist
->scatterlist
, sglist
->num_sg
,
4012 if (!sglist
->num_dma_sg
) {
4013 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4014 dev_err(&ioa_cfg
->pdev
->dev
,
4015 "Failed to map microcode download buffer!\n");
4019 ioa_cfg
->ucode_sglist
= sglist
;
4020 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
4021 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4022 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4024 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4025 ioa_cfg
->ucode_sglist
= NULL
;
4026 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4031 * ipr_store_update_fw - Update the firmware on the adapter
4032 * @class_dev: device struct
4034 * @count: buffer size
4036 * This function will update the firmware on the adapter.
4039 * count on success / other on failure
4041 static ssize_t
ipr_store_update_fw(struct device
*dev
,
4042 struct device_attribute
*attr
,
4043 const char *buf
, size_t count
)
4045 struct Scsi_Host
*shost
= class_to_shost(dev
);
4046 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4047 struct ipr_ucode_image_header
*image_hdr
;
4048 const struct firmware
*fw_entry
;
4049 struct ipr_sglist
*sglist
;
4053 int result
, dnld_size
;
4055 if (!capable(CAP_SYS_ADMIN
))
4058 snprintf(fname
, sizeof(fname
), "%s", buf
);
4060 endline
= strchr(fname
, '\n');
4064 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
4065 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4069 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4071 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4072 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4073 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4076 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4077 release_firmware(fw_entry
);
4081 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4084 dev_err(&ioa_cfg
->pdev
->dev
,
4085 "Microcode buffer copy to DMA buffer failed\n");
4089 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4091 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4096 ipr_free_ucode_buffer(sglist
);
4097 release_firmware(fw_entry
);
4101 static struct device_attribute ipr_update_fw_attr
= {
4103 .name
= "update_fw",
4106 .store
= ipr_store_update_fw
4110 * ipr_show_fw_type - Show the adapter's firmware type.
4111 * @dev: class device struct
4115 * number of bytes printed to buffer
4117 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4118 struct device_attribute
*attr
, char *buf
)
4120 struct Scsi_Host
*shost
= class_to_shost(dev
);
4121 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4122 unsigned long lock_flags
= 0;
4125 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4126 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4127 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4131 static struct device_attribute ipr_ioa_fw_type_attr
= {
4136 .show
= ipr_show_fw_type
4139 static ssize_t
ipr_read_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4140 struct bin_attribute
*bin_attr
, char *buf
,
4141 loff_t off
, size_t count
)
4143 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4144 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4145 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4146 struct ipr_hostrcb
*hostrcb
;
4147 unsigned long lock_flags
= 0;
4150 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4151 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4152 struct ipr_hostrcb
, queue
);
4154 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4157 ret
= memory_read_from_buffer(buf
, count
, &off
, &hostrcb
->hcam
,
4158 sizeof(hostrcb
->hcam
));
4159 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4163 static ssize_t
ipr_next_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4164 struct bin_attribute
*bin_attr
, char *buf
,
4165 loff_t off
, size_t count
)
4167 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4168 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4169 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4170 struct ipr_hostrcb
*hostrcb
;
4171 unsigned long lock_flags
= 0;
4173 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4174 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4175 struct ipr_hostrcb
, queue
);
4177 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4181 /* Reclaim hostrcb before exit */
4182 list_move_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
4183 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4187 static struct bin_attribute ipr_ioa_async_err_log
= {
4189 .name
= "async_err_log",
4190 .mode
= S_IRUGO
| S_IWUSR
,
4193 .read
= ipr_read_async_err_log
,
4194 .write
= ipr_next_async_err_log
4197 static struct device_attribute
*ipr_ioa_attrs
[] = {
4198 &ipr_fw_version_attr
,
4199 &ipr_log_level_attr
,
4200 &ipr_diagnostics_attr
,
4201 &ipr_ioa_state_attr
,
4202 &ipr_ioa_reset_attr
,
4203 &ipr_update_fw_attr
,
4204 &ipr_ioa_fw_type_attr
,
4205 &ipr_iopoll_weight_attr
,
4209 #ifdef CONFIG_SCSI_IPR_DUMP
4211 * ipr_read_dump - Dump the adapter
4212 * @filp: open sysfs file
4213 * @kobj: kobject struct
4214 * @bin_attr: bin_attribute struct
4217 * @count: buffer size
4220 * number of bytes printed to buffer
4222 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4223 struct bin_attribute
*bin_attr
,
4224 char *buf
, loff_t off
, size_t count
)
4226 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4227 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4228 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4229 struct ipr_dump
*dump
;
4230 unsigned long lock_flags
= 0;
4235 if (!capable(CAP_SYS_ADMIN
))
4238 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4239 dump
= ioa_cfg
->dump
;
4241 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4242 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4245 kref_get(&dump
->kref
);
4246 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4248 if (off
> dump
->driver_dump
.hdr
.len
) {
4249 kref_put(&dump
->kref
, ipr_release_dump
);
4253 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4254 count
= dump
->driver_dump
.hdr
.len
- off
;
4258 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4259 if (off
+ count
> sizeof(dump
->driver_dump
))
4260 len
= sizeof(dump
->driver_dump
) - off
;
4263 src
= (u8
*)&dump
->driver_dump
+ off
;
4264 memcpy(buf
, src
, len
);
4270 off
-= sizeof(dump
->driver_dump
);
4273 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4274 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4275 sizeof(struct ipr_sdt_entry
));
4277 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4278 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4280 if (count
&& off
< sdt_end
) {
4281 if (off
+ count
> sdt_end
)
4282 len
= sdt_end
- off
;
4285 src
= (u8
*)&dump
->ioa_dump
+ off
;
4286 memcpy(buf
, src
, len
);
4295 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4296 len
= PAGE_ALIGN(off
) - off
;
4299 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4300 src
+= off
& ~PAGE_MASK
;
4301 memcpy(buf
, src
, len
);
4307 kref_put(&dump
->kref
, ipr_release_dump
);
4312 * ipr_alloc_dump - Prepare for adapter dump
4313 * @ioa_cfg: ioa config struct
4316 * 0 on success / other on failure
4318 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4320 struct ipr_dump
*dump
;
4322 unsigned long lock_flags
= 0;
4324 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4327 ipr_err("Dump memory allocation failed\n");
4332 ioa_data
= vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4334 ioa_data
= vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4337 ipr_err("Dump memory allocation failed\n");
4342 dump
->ioa_dump
.ioa_data
= ioa_data
;
4344 kref_init(&dump
->kref
);
4345 dump
->ioa_cfg
= ioa_cfg
;
4347 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4349 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4350 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4351 vfree(dump
->ioa_dump
.ioa_data
);
4356 ioa_cfg
->dump
= dump
;
4357 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4358 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4359 ioa_cfg
->dump_taken
= 1;
4360 schedule_work(&ioa_cfg
->work_q
);
4362 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4368 * ipr_free_dump - Free adapter dump memory
4369 * @ioa_cfg: ioa config struct
4372 * 0 on success / other on failure
4374 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4376 struct ipr_dump
*dump
;
4377 unsigned long lock_flags
= 0;
4381 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4382 dump
= ioa_cfg
->dump
;
4384 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4388 ioa_cfg
->dump
= NULL
;
4389 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4391 kref_put(&dump
->kref
, ipr_release_dump
);
4398 * ipr_write_dump - Setup dump state of adapter
4399 * @filp: open sysfs file
4400 * @kobj: kobject struct
4401 * @bin_attr: bin_attribute struct
4404 * @count: buffer size
4407 * number of bytes printed to buffer
4409 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4410 struct bin_attribute
*bin_attr
,
4411 char *buf
, loff_t off
, size_t count
)
4413 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4414 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4415 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4418 if (!capable(CAP_SYS_ADMIN
))
4422 rc
= ipr_alloc_dump(ioa_cfg
);
4423 else if (buf
[0] == '0')
4424 rc
= ipr_free_dump(ioa_cfg
);
4434 static struct bin_attribute ipr_dump_attr
= {
4437 .mode
= S_IRUSR
| S_IWUSR
,
4440 .read
= ipr_read_dump
,
4441 .write
= ipr_write_dump
4444 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4448 * ipr_change_queue_depth - Change the device's queue depth
4449 * @sdev: scsi device struct
4450 * @qdepth: depth to set
4451 * @reason: calling context
4456 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4458 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4459 struct ipr_resource_entry
*res
;
4460 unsigned long lock_flags
= 0;
4462 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4463 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4465 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4466 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4467 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4469 scsi_change_queue_depth(sdev
, qdepth
);
4470 return sdev
->queue_depth
;
4474 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4475 * @dev: device struct
4476 * @attr: device attribute structure
4480 * number of bytes printed to buffer
4482 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4484 struct scsi_device
*sdev
= to_scsi_device(dev
);
4485 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4486 struct ipr_resource_entry
*res
;
4487 unsigned long lock_flags
= 0;
4488 ssize_t len
= -ENXIO
;
4490 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4491 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4493 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4494 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4498 static struct device_attribute ipr_adapter_handle_attr
= {
4500 .name
= "adapter_handle",
4503 .show
= ipr_show_adapter_handle
4507 * ipr_show_resource_path - Show the resource path or the resource address for
4509 * @dev: device struct
4510 * @attr: device attribute structure
4514 * number of bytes printed to buffer
4516 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4518 struct scsi_device
*sdev
= to_scsi_device(dev
);
4519 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4520 struct ipr_resource_entry
*res
;
4521 unsigned long lock_flags
= 0;
4522 ssize_t len
= -ENXIO
;
4523 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4525 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4526 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4527 if (res
&& ioa_cfg
->sis64
)
4528 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4529 __ipr_format_res_path(res
->res_path
, buffer
,
4532 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4533 res
->bus
, res
->target
, res
->lun
);
4535 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4539 static struct device_attribute ipr_resource_path_attr
= {
4541 .name
= "resource_path",
4544 .show
= ipr_show_resource_path
4548 * ipr_show_device_id - Show the device_id for this device.
4549 * @dev: device struct
4550 * @attr: device attribute structure
4554 * number of bytes printed to buffer
4556 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4558 struct scsi_device
*sdev
= to_scsi_device(dev
);
4559 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4560 struct ipr_resource_entry
*res
;
4561 unsigned long lock_flags
= 0;
4562 ssize_t len
= -ENXIO
;
4564 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4565 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4566 if (res
&& ioa_cfg
->sis64
)
4567 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", be64_to_cpu(res
->dev_id
));
4569 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4571 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4575 static struct device_attribute ipr_device_id_attr
= {
4577 .name
= "device_id",
4580 .show
= ipr_show_device_id
4584 * ipr_show_resource_type - Show the resource type for this device.
4585 * @dev: device struct
4586 * @attr: device attribute structure
4590 * number of bytes printed to buffer
4592 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4594 struct scsi_device
*sdev
= to_scsi_device(dev
);
4595 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4596 struct ipr_resource_entry
*res
;
4597 unsigned long lock_flags
= 0;
4598 ssize_t len
= -ENXIO
;
4600 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4601 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4604 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4606 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4610 static struct device_attribute ipr_resource_type_attr
= {
4612 .name
= "resource_type",
4615 .show
= ipr_show_resource_type
4619 * ipr_show_raw_mode - Show the adapter's raw mode
4620 * @dev: class device struct
4624 * number of bytes printed to buffer
4626 static ssize_t
ipr_show_raw_mode(struct device
*dev
,
4627 struct device_attribute
*attr
, char *buf
)
4629 struct scsi_device
*sdev
= to_scsi_device(dev
);
4630 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4631 struct ipr_resource_entry
*res
;
4632 unsigned long lock_flags
= 0;
4635 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4636 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4638 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", res
->raw_mode
);
4641 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4646 * ipr_store_raw_mode - Change the adapter's raw mode
4647 * @dev: class device struct
4651 * number of bytes printed to buffer
4653 static ssize_t
ipr_store_raw_mode(struct device
*dev
,
4654 struct device_attribute
*attr
,
4655 const char *buf
, size_t count
)
4657 struct scsi_device
*sdev
= to_scsi_device(dev
);
4658 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4659 struct ipr_resource_entry
*res
;
4660 unsigned long lock_flags
= 0;
4663 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4664 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4666 if (ipr_is_af_dasd_device(res
)) {
4667 res
->raw_mode
= simple_strtoul(buf
, NULL
, 10);
4670 sdev_printk(KERN_INFO
, res
->sdev
, "raw mode is %s\n",
4671 res
->raw_mode
? "enabled" : "disabled");
4676 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4680 static struct device_attribute ipr_raw_mode_attr
= {
4683 .mode
= S_IRUGO
| S_IWUSR
,
4685 .show
= ipr_show_raw_mode
,
4686 .store
= ipr_store_raw_mode
4689 static struct device_attribute
*ipr_dev_attrs
[] = {
4690 &ipr_adapter_handle_attr
,
4691 &ipr_resource_path_attr
,
4692 &ipr_device_id_attr
,
4693 &ipr_resource_type_attr
,
4699 * ipr_biosparam - Return the HSC mapping
4700 * @sdev: scsi device struct
4701 * @block_device: block device pointer
4702 * @capacity: capacity of the device
4703 * @parm: Array containing returned HSC values.
4705 * This function generates the HSC parms that fdisk uses.
4706 * We want to make sure we return something that places partitions
4707 * on 4k boundaries for best performance with the IOA.
4712 static int ipr_biosparam(struct scsi_device
*sdev
,
4713 struct block_device
*block_device
,
4714 sector_t capacity
, int *parm
)
4722 cylinders
= capacity
;
4723 sector_div(cylinders
, (128 * 32));
4728 parm
[2] = cylinders
;
4734 * ipr_find_starget - Find target based on bus/target.
4735 * @starget: scsi target struct
4738 * resource entry pointer if found / NULL if not found
4740 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4742 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4743 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4744 struct ipr_resource_entry
*res
;
4746 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4747 if ((res
->bus
== starget
->channel
) &&
4748 (res
->target
== starget
->id
)) {
4756 static struct ata_port_info sata_port_info
;
4759 * ipr_target_alloc - Prepare for commands to a SCSI target
4760 * @starget: scsi target struct
4762 * If the device is a SATA device, this function allocates an
4763 * ATA port with libata, else it does nothing.
4766 * 0 on success / non-0 on failure
4768 static int ipr_target_alloc(struct scsi_target
*starget
)
4770 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4771 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4772 struct ipr_sata_port
*sata_port
;
4773 struct ata_port
*ap
;
4774 struct ipr_resource_entry
*res
;
4775 unsigned long lock_flags
;
4777 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4778 res
= ipr_find_starget(starget
);
4779 starget
->hostdata
= NULL
;
4781 if (res
&& ipr_is_gata(res
)) {
4782 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4783 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4787 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4789 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4790 sata_port
->ioa_cfg
= ioa_cfg
;
4792 sata_port
->res
= res
;
4794 res
->sata_port
= sata_port
;
4795 ap
->private_data
= sata_port
;
4796 starget
->hostdata
= sata_port
;
4802 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4808 * ipr_target_destroy - Destroy a SCSI target
4809 * @starget: scsi target struct
4811 * If the device was a SATA device, this function frees the libata
4812 * ATA port, else it does nothing.
4815 static void ipr_target_destroy(struct scsi_target
*starget
)
4817 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4818 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4819 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4821 if (ioa_cfg
->sis64
) {
4822 if (!ipr_find_starget(starget
)) {
4823 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4824 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4825 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4826 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4827 else if (starget
->channel
== 0)
4828 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4833 starget
->hostdata
= NULL
;
4834 ata_sas_port_destroy(sata_port
->ap
);
4840 * ipr_find_sdev - Find device based on bus/target/lun.
4841 * @sdev: scsi device struct
4844 * resource entry pointer if found / NULL if not found
4846 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4848 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4849 struct ipr_resource_entry
*res
;
4851 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4852 if ((res
->bus
== sdev
->channel
) &&
4853 (res
->target
== sdev
->id
) &&
4854 (res
->lun
== sdev
->lun
))
4862 * ipr_slave_destroy - Unconfigure a SCSI device
4863 * @sdev: scsi device struct
4868 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4870 struct ipr_resource_entry
*res
;
4871 struct ipr_ioa_cfg
*ioa_cfg
;
4872 unsigned long lock_flags
= 0;
4874 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4876 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4877 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4880 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4881 sdev
->hostdata
= NULL
;
4883 res
->sata_port
= NULL
;
4885 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4889 * ipr_slave_configure - Configure a SCSI device
4890 * @sdev: scsi device struct
4892 * This function configures the specified scsi device.
4897 static int ipr_slave_configure(struct scsi_device
*sdev
)
4899 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4900 struct ipr_resource_entry
*res
;
4901 struct ata_port
*ap
= NULL
;
4902 unsigned long lock_flags
= 0;
4903 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4905 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4906 res
= sdev
->hostdata
;
4908 if (ipr_is_af_dasd_device(res
))
4909 sdev
->type
= TYPE_RAID
;
4910 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4911 sdev
->scsi_level
= 4;
4912 sdev
->no_uld_attach
= 1;
4914 if (ipr_is_vset_device(res
)) {
4915 sdev
->scsi_level
= SCSI_SPC_3
;
4916 sdev
->no_report_opcodes
= 1;
4917 blk_queue_rq_timeout(sdev
->request_queue
,
4918 IPR_VSET_RW_TIMEOUT
);
4919 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4921 if (ipr_is_gata(res
) && res
->sata_port
)
4922 ap
= res
->sata_port
->ap
;
4923 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4926 scsi_change_queue_depth(sdev
, IPR_MAX_CMD_PER_ATA_LUN
);
4927 ata_sas_slave_configure(sdev
, ap
);
4931 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4932 ipr_format_res_path(ioa_cfg
,
4933 res
->res_path
, buffer
, sizeof(buffer
)));
4936 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4941 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4942 * @sdev: scsi device struct
4944 * This function initializes an ATA port so that future commands
4945 * sent through queuecommand will work.
4950 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4952 struct ipr_sata_port
*sata_port
= NULL
;
4956 if (sdev
->sdev_target
)
4957 sata_port
= sdev
->sdev_target
->hostdata
;
4959 rc
= ata_sas_port_init(sata_port
->ap
);
4961 rc
= ata_sas_sync_probe(sata_port
->ap
);
4965 ipr_slave_destroy(sdev
);
4972 * ipr_slave_alloc - Prepare for commands to a device.
4973 * @sdev: scsi device struct
4975 * This function saves a pointer to the resource entry
4976 * in the scsi device struct if the device exists. We
4977 * can then use this pointer in ipr_queuecommand when
4978 * handling new commands.
4981 * 0 on success / -ENXIO if device does not exist
4983 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4985 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4986 struct ipr_resource_entry
*res
;
4987 unsigned long lock_flags
;
4990 sdev
->hostdata
= NULL
;
4992 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4994 res
= ipr_find_sdev(sdev
);
4999 sdev
->hostdata
= res
;
5000 if (!ipr_is_naca_model(res
))
5001 res
->needs_sync_complete
= 1;
5003 if (ipr_is_gata(res
)) {
5004 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5005 return ipr_ata_slave_alloc(sdev
);
5009 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5015 * ipr_match_lun - Match function for specified LUN
5016 * @ipr_cmd: ipr command struct
5017 * @device: device to match (sdev)
5020 * 1 if command matches sdev / 0 if command does not match sdev
5022 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
5024 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
5030 * ipr_cmnd_is_free - Check if a command is free or not
5031 * @ipr_cmd ipr command struct
5036 static bool ipr_cmnd_is_free(struct ipr_cmnd
*ipr_cmd
)
5038 struct ipr_cmnd
*loop_cmd
;
5040 list_for_each_entry(loop_cmd
, &ipr_cmd
->hrrq
->hrrq_free_q
, queue
) {
5041 if (loop_cmd
== ipr_cmd
)
5049 * ipr_match_res - Match function for specified resource entry
5050 * @ipr_cmd: ipr command struct
5051 * @resource: resource entry to match
5054 * 1 if command matches sdev / 0 if command does not match sdev
5056 static int ipr_match_res(struct ipr_cmnd
*ipr_cmd
, void *resource
)
5058 struct ipr_resource_entry
*res
= resource
;
5060 if (res
&& ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
)
5066 * ipr_wait_for_ops - Wait for matching commands to complete
5067 * @ipr_cmd: ipr command struct
5068 * @device: device to match (sdev)
5069 * @match: match function to use
5074 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
5075 int (*match
)(struct ipr_cmnd
*, void *))
5077 struct ipr_cmnd
*ipr_cmd
;
5079 unsigned long flags
;
5080 struct ipr_hrr_queue
*hrrq
;
5081 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
5082 DECLARE_COMPLETION_ONSTACK(comp
);
5088 for_each_hrrq(hrrq
, ioa_cfg
) {
5089 spin_lock_irqsave(hrrq
->lock
, flags
);
5090 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5091 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5092 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5093 if (match(ipr_cmd
, device
)) {
5094 ipr_cmd
->eh_comp
= &comp
;
5099 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5103 timeout
= wait_for_completion_timeout(&comp
, timeout
);
5108 for_each_hrrq(hrrq
, ioa_cfg
) {
5109 spin_lock_irqsave(hrrq
->lock
, flags
);
5110 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5111 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5112 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5113 if (match(ipr_cmd
, device
)) {
5114 ipr_cmd
->eh_comp
= NULL
;
5119 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5123 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
5125 return wait
? FAILED
: SUCCESS
;
5134 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
5136 struct ipr_ioa_cfg
*ioa_cfg
;
5137 unsigned long lock_flags
= 0;
5141 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5142 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5144 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5145 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5146 dev_err(&ioa_cfg
->pdev
->dev
,
5147 "Adapter being reset as a result of error recovery.\n");
5149 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5150 ioa_cfg
->sdt_state
= GET_DUMP
;
5153 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5154 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5155 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5157 /* If we got hit with a host reset while we were already resetting
5158 the adapter for some reason, and the reset failed. */
5159 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5164 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5170 * ipr_device_reset - Reset the device
5171 * @ioa_cfg: ioa config struct
5172 * @res: resource entry struct
5174 * This function issues a device reset to the affected device.
5175 * If the device is a SCSI device, a LUN reset will be sent
5176 * to the device first. If that does not work, a target reset
5177 * will be sent. If the device is a SATA device, a PHY reset will
5181 * 0 on success / non-zero on failure
5183 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5184 struct ipr_resource_entry
*res
)
5186 struct ipr_cmnd
*ipr_cmd
;
5187 struct ipr_ioarcb
*ioarcb
;
5188 struct ipr_cmd_pkt
*cmd_pkt
;
5189 struct ipr_ioarcb_ata_regs
*regs
;
5193 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5194 ioarcb
= &ipr_cmd
->ioarcb
;
5195 cmd_pkt
= &ioarcb
->cmd_pkt
;
5197 if (ipr_cmd
->ioa_cfg
->sis64
) {
5198 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
5199 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
5201 regs
= &ioarcb
->u
.add_data
.u
.regs
;
5203 ioarcb
->res_handle
= res
->res_handle
;
5204 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5205 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5206 if (ipr_is_gata(res
)) {
5207 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
5208 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
5209 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
5212 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5213 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5214 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5215 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
5216 if (ipr_cmd
->ioa_cfg
->sis64
)
5217 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
5218 sizeof(struct ipr_ioasa_gata
));
5220 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
5221 sizeof(struct ipr_ioasa_gata
));
5225 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
5229 * ipr_sata_reset - Reset the SATA port
5230 * @link: SATA link to reset
5231 * @classes: class of the attached device
5233 * This function issues a SATA phy reset to the affected ATA link.
5236 * 0 on success / non-zero on failure
5238 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5239 unsigned long deadline
)
5241 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5242 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5243 struct ipr_resource_entry
*res
;
5244 unsigned long lock_flags
= 0;
5245 int rc
= -ENXIO
, ret
;
5248 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5249 while (ioa_cfg
->in_reset_reload
) {
5250 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5251 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5252 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5255 res
= sata_port
->res
;
5257 rc
= ipr_device_reset(ioa_cfg
, res
);
5258 *classes
= res
->ata_class
;
5259 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5261 ret
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5262 if (ret
!= SUCCESS
) {
5263 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5264 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5265 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5267 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5270 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5277 * ipr_eh_dev_reset - Reset the device
5278 * @scsi_cmd: scsi command struct
5280 * This function issues a device reset to the affected device.
5281 * A LUN reset will be sent to the device first. If that does
5282 * not work, a target reset will be sent.
5287 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5289 struct ipr_cmnd
*ipr_cmd
;
5290 struct ipr_ioa_cfg
*ioa_cfg
;
5291 struct ipr_resource_entry
*res
;
5292 struct ata_port
*ap
;
5294 struct ipr_hrr_queue
*hrrq
;
5297 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5298 res
= scsi_cmd
->device
->hostdata
;
5301 * If we are currently going through reset/reload, return failed. This will force the
5302 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5305 if (ioa_cfg
->in_reset_reload
)
5307 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5310 for_each_hrrq(hrrq
, ioa_cfg
) {
5311 spin_lock(&hrrq
->_lock
);
5312 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5313 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5315 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5318 if (ipr_cmnd_is_free(ipr_cmd
))
5321 ipr_cmd
->done
= ipr_sata_eh_done
;
5322 if (!(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5323 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5324 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5328 spin_unlock(&hrrq
->_lock
);
5330 res
->resetting_device
= 1;
5331 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5333 if (ipr_is_gata(res
) && res
->sata_port
) {
5334 ap
= res
->sata_port
->ap
;
5335 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5336 ata_std_error_handler(ap
);
5337 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5339 rc
= ipr_device_reset(ioa_cfg
, res
);
5340 res
->resetting_device
= 0;
5341 res
->reset_occurred
= 1;
5344 return rc
? FAILED
: SUCCESS
;
5347 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5350 struct ipr_ioa_cfg
*ioa_cfg
;
5351 struct ipr_resource_entry
*res
;
5353 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5354 res
= cmd
->device
->hostdata
;
5359 spin_lock_irq(cmd
->device
->host
->host_lock
);
5360 rc
= __ipr_eh_dev_reset(cmd
);
5361 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5363 if (rc
== SUCCESS
) {
5364 if (ipr_is_gata(res
) && res
->sata_port
)
5365 rc
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5367 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5374 * ipr_bus_reset_done - Op done function for bus reset.
5375 * @ipr_cmd: ipr command struct
5377 * This function is the op done function for a bus reset
5382 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5384 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5385 struct ipr_resource_entry
*res
;
5388 if (!ioa_cfg
->sis64
)
5389 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5390 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5391 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5397 * If abort has not completed, indicate the reset has, else call the
5398 * abort's done function to wake the sleeping eh thread
5400 if (ipr_cmd
->sibling
->sibling
)
5401 ipr_cmd
->sibling
->sibling
= NULL
;
5403 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5405 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5410 * ipr_abort_timeout - An abort task has timed out
5411 * @ipr_cmd: ipr command struct
5413 * This function handles when an abort task times out. If this
5414 * happens we issue a bus reset since we have resources tied
5415 * up that must be freed before returning to the midlayer.
5420 static void ipr_abort_timeout(struct timer_list
*t
)
5422 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
5423 struct ipr_cmnd
*reset_cmd
;
5424 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5425 struct ipr_cmd_pkt
*cmd_pkt
;
5426 unsigned long lock_flags
= 0;
5429 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5430 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5431 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5435 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5436 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5437 ipr_cmd
->sibling
= reset_cmd
;
5438 reset_cmd
->sibling
= ipr_cmd
;
5439 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5440 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5441 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5442 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5443 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5445 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5446 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5451 * ipr_cancel_op - Cancel specified op
5452 * @scsi_cmd: scsi command struct
5454 * This function cancels specified op.
5459 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5461 struct ipr_cmnd
*ipr_cmd
;
5462 struct ipr_ioa_cfg
*ioa_cfg
;
5463 struct ipr_resource_entry
*res
;
5464 struct ipr_cmd_pkt
*cmd_pkt
;
5466 int i
, op_found
= 0;
5467 struct ipr_hrr_queue
*hrrq
;
5470 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5471 res
= scsi_cmd
->device
->hostdata
;
5473 /* If we are currently going through reset/reload, return failed.
5474 * This will force the mid-layer to call ipr_eh_host_reset,
5475 * which will then go to sleep and wait for the reset to complete
5477 if (ioa_cfg
->in_reset_reload
||
5478 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5484 * If we are aborting a timed out op, chances are that the timeout was caused
5485 * by a still not detected EEH error. In such cases, reading a register will
5486 * trigger the EEH recovery infrastructure.
5488 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5490 if (!ipr_is_gscsi(res
))
5493 for_each_hrrq(hrrq
, ioa_cfg
) {
5494 spin_lock(&hrrq
->_lock
);
5495 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5496 if (ioa_cfg
->ipr_cmnd_list
[i
]->scsi_cmd
== scsi_cmd
) {
5497 if (!ipr_cmnd_is_free(ioa_cfg
->ipr_cmnd_list
[i
])) {
5503 spin_unlock(&hrrq
->_lock
);
5509 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5510 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5511 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5512 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5513 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5514 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5516 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5518 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5519 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5522 * If the abort task timed out and we sent a bus reset, we will get
5523 * one the following responses to the abort
5525 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5530 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5531 if (!ipr_is_naca_model(res
))
5532 res
->needs_sync_complete
= 1;
5535 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5539 * ipr_eh_abort - Abort a single op
5540 * @scsi_cmd: scsi command struct
5543 * 0 if scan in progress / 1 if scan is complete
5545 static int ipr_scan_finished(struct Scsi_Host
*shost
, unsigned long elapsed_time
)
5547 unsigned long lock_flags
;
5548 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
5551 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
5552 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
|| ioa_cfg
->scan_done
)
5554 if ((elapsed_time
/HZ
) > (ioa_cfg
->transop_timeout
* 2))
5556 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
5561 * ipr_eh_host_reset - Reset the host adapter
5562 * @scsi_cmd: scsi command struct
5567 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5569 unsigned long flags
;
5571 struct ipr_ioa_cfg
*ioa_cfg
;
5575 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5577 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5578 rc
= ipr_cancel_op(scsi_cmd
);
5579 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5582 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5588 * ipr_handle_other_interrupt - Handle "other" interrupts
5589 * @ioa_cfg: ioa config struct
5590 * @int_reg: interrupt register
5593 * IRQ_NONE / IRQ_HANDLED
5595 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5598 irqreturn_t rc
= IRQ_HANDLED
;
5601 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5602 int_reg
&= ~int_mask_reg
;
5604 /* If an interrupt on the adapter did not occur, ignore it.
5605 * Or in the case of SIS 64, check for a stage change interrupt.
5607 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5608 if (ioa_cfg
->sis64
) {
5609 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5610 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5611 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5613 /* clear stage change */
5614 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5615 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5616 list_del(&ioa_cfg
->reset_cmd
->queue
);
5617 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5618 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5626 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5627 /* Mask the interrupt */
5628 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5629 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5631 list_del(&ioa_cfg
->reset_cmd
->queue
);
5632 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5633 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5634 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5635 if (ioa_cfg
->clear_isr
) {
5636 if (ipr_debug
&& printk_ratelimit())
5637 dev_err(&ioa_cfg
->pdev
->dev
,
5638 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5639 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5640 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5644 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5645 ioa_cfg
->ioa_unit_checked
= 1;
5646 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5647 dev_err(&ioa_cfg
->pdev
->dev
,
5648 "No Host RRQ. 0x%08X\n", int_reg
);
5650 dev_err(&ioa_cfg
->pdev
->dev
,
5651 "Permanent IOA failure. 0x%08X\n", int_reg
);
5653 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5654 ioa_cfg
->sdt_state
= GET_DUMP
;
5656 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5657 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5664 * ipr_isr_eh - Interrupt service routine error handler
5665 * @ioa_cfg: ioa config struct
5666 * @msg: message to log
5671 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5673 ioa_cfg
->errors_logged
++;
5674 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5676 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5677 ioa_cfg
->sdt_state
= GET_DUMP
;
5679 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5682 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5683 struct list_head
*doneq
)
5687 struct ipr_cmnd
*ipr_cmd
;
5688 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5691 /* If interrupts are disabled, ignore the interrupt */
5692 if (!hrr_queue
->allow_interrupts
)
5695 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5696 hrr_queue
->toggle_bit
) {
5698 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5699 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5700 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5702 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5703 cmd_index
< hrr_queue
->min_cmd_id
)) {
5705 "Invalid response handle from IOA: ",
5710 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5711 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5713 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5715 list_move_tail(&ipr_cmd
->queue
, doneq
);
5717 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5718 hrr_queue
->hrrq_curr
++;
5720 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5721 hrr_queue
->toggle_bit
^= 1u;
5724 if (budget
> 0 && num_hrrq
>= budget
)
5731 static int ipr_iopoll(struct irq_poll
*iop
, int budget
)
5733 struct ipr_ioa_cfg
*ioa_cfg
;
5734 struct ipr_hrr_queue
*hrrq
;
5735 struct ipr_cmnd
*ipr_cmd
, *temp
;
5736 unsigned long hrrq_flags
;
5740 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5741 ioa_cfg
= hrrq
->ioa_cfg
;
5743 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5744 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5746 if (completed_ops
< budget
)
5747 irq_poll_complete(iop
);
5748 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5750 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5751 list_del(&ipr_cmd
->queue
);
5752 del_timer(&ipr_cmd
->timer
);
5753 ipr_cmd
->fast_done(ipr_cmd
);
5756 return completed_ops
;
5760 * ipr_isr - Interrupt service routine
5762 * @devp: pointer to ioa config struct
5765 * IRQ_NONE / IRQ_HANDLED
5767 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5769 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5770 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5771 unsigned long hrrq_flags
= 0;
5775 struct ipr_cmnd
*ipr_cmd
, *temp
;
5776 irqreturn_t rc
= IRQ_NONE
;
5779 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5780 /* If interrupts are disabled, ignore the interrupt */
5781 if (!hrrq
->allow_interrupts
) {
5782 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5787 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5790 if (!ioa_cfg
->clear_isr
)
5793 /* Clear the PCI interrupt */
5796 writel(IPR_PCII_HRRQ_UPDATED
,
5797 ioa_cfg
->regs
.clr_interrupt_reg32
);
5798 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5799 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5800 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5802 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5803 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5805 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5806 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5808 "Error clearing HRRQ: ", num_hrrq
);
5815 if (unlikely(rc
== IRQ_NONE
))
5816 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5818 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5819 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5820 list_del(&ipr_cmd
->queue
);
5821 del_timer(&ipr_cmd
->timer
);
5822 ipr_cmd
->fast_done(ipr_cmd
);
5828 * ipr_isr_mhrrq - Interrupt service routine
5830 * @devp: pointer to ioa config struct
5833 * IRQ_NONE / IRQ_HANDLED
5835 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5837 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5838 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5839 unsigned long hrrq_flags
= 0;
5840 struct ipr_cmnd
*ipr_cmd
, *temp
;
5841 irqreturn_t rc
= IRQ_NONE
;
5844 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5846 /* If interrupts are disabled, ignore the interrupt */
5847 if (!hrrq
->allow_interrupts
) {
5848 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5852 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5853 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5855 irq_poll_sched(&hrrq
->iopoll
);
5856 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5860 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5863 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5867 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5869 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5870 list_del(&ipr_cmd
->queue
);
5871 del_timer(&ipr_cmd
->timer
);
5872 ipr_cmd
->fast_done(ipr_cmd
);
5878 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5879 * @ioa_cfg: ioa config struct
5880 * @ipr_cmd: ipr command struct
5883 * 0 on success / -1 on failure
5885 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5886 struct ipr_cmnd
*ipr_cmd
)
5889 struct scatterlist
*sg
;
5891 u32 ioadl_flags
= 0;
5892 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5893 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5894 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5896 length
= scsi_bufflen(scsi_cmd
);
5900 nseg
= scsi_dma_map(scsi_cmd
);
5902 if (printk_ratelimit())
5903 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5907 ipr_cmd
->dma_use_sg
= nseg
;
5909 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5911 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5913 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5914 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5915 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5916 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5917 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5919 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5920 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5921 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5922 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5925 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5930 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5931 * @ioa_cfg: ioa config struct
5932 * @ipr_cmd: ipr command struct
5935 * 0 on success / -1 on failure
5937 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5938 struct ipr_cmnd
*ipr_cmd
)
5941 struct scatterlist
*sg
;
5943 u32 ioadl_flags
= 0;
5944 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5945 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5946 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5948 length
= scsi_bufflen(scsi_cmd
);
5952 nseg
= scsi_dma_map(scsi_cmd
);
5954 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5958 ipr_cmd
->dma_use_sg
= nseg
;
5960 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5961 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5962 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5963 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5965 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5966 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5967 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5968 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5969 ioarcb
->read_ioadl_len
=
5970 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5973 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5974 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5975 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5976 offsetof(struct ipr_ioarcb
, u
.add_data
));
5977 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5980 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5981 ioadl
[i
].flags_and_data_len
=
5982 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5983 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5986 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5991 * __ipr_erp_done - Process completion of ERP for a device
5992 * @ipr_cmd: ipr command struct
5994 * This function copies the sense buffer into the scsi_cmd
5995 * struct and pushes the scsi_done function.
6000 static void __ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6002 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6003 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6004 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6006 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6007 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6008 scmd_printk(KERN_ERR
, scsi_cmd
,
6009 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
6011 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
6012 SCSI_SENSE_BUFFERSIZE
);
6016 if (!ipr_is_naca_model(res
))
6017 res
->needs_sync_complete
= 1;
6020 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6021 scsi_cmd
->scsi_done(scsi_cmd
);
6022 if (ipr_cmd
->eh_comp
)
6023 complete(ipr_cmd
->eh_comp
);
6024 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6028 * ipr_erp_done - Process completion of ERP for a device
6029 * @ipr_cmd: ipr command struct
6031 * This function copies the sense buffer into the scsi_cmd
6032 * struct and pushes the scsi_done function.
6037 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6039 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6040 unsigned long hrrq_flags
;
6042 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6043 __ipr_erp_done(ipr_cmd
);
6044 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6048 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6049 * @ipr_cmd: ipr command struct
6054 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
6056 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6057 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6058 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6060 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
6061 ioarcb
->data_transfer_length
= 0;
6062 ioarcb
->read_data_transfer_length
= 0;
6063 ioarcb
->ioadl_len
= 0;
6064 ioarcb
->read_ioadl_len
= 0;
6065 ioasa
->hdr
.ioasc
= 0;
6066 ioasa
->hdr
.residual_data_len
= 0;
6068 if (ipr_cmd
->ioa_cfg
->sis64
)
6069 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6070 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
6072 ioarcb
->write_ioadl_addr
=
6073 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
6074 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6079 * __ipr_erp_request_sense - Send request sense to a device
6080 * @ipr_cmd: ipr command struct
6082 * This function sends a request sense to a device as a result
6083 * of a check condition.
6088 static void __ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6090 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6091 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6093 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6094 __ipr_erp_done(ipr_cmd
);
6098 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6100 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
6101 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
6102 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
6103 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
6104 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6105 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
6107 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
6108 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
6110 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
6111 IPR_REQUEST_SENSE_TIMEOUT
* 2);
6115 * ipr_erp_request_sense - Send request sense to a device
6116 * @ipr_cmd: ipr command struct
6118 * This function sends a request sense to a device as a result
6119 * of a check condition.
6124 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6126 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6127 unsigned long hrrq_flags
;
6129 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6130 __ipr_erp_request_sense(ipr_cmd
);
6131 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6135 * ipr_erp_cancel_all - Send cancel all to a device
6136 * @ipr_cmd: ipr command struct
6138 * This function sends a cancel all to a device to clear the
6139 * queue. If we are running TCQ on the device, QERR is set to 1,
6140 * which means all outstanding ops have been dropped on the floor.
6141 * Cancel all will return them to us.
6146 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
6148 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6149 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6150 struct ipr_cmd_pkt
*cmd_pkt
;
6154 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6156 if (!scsi_cmd
->device
->simple_tags
) {
6157 __ipr_erp_request_sense(ipr_cmd
);
6161 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6162 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
6163 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
6165 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
6166 IPR_CANCEL_ALL_TIMEOUT
);
6170 * ipr_dump_ioasa - Dump contents of IOASA
6171 * @ioa_cfg: ioa config struct
6172 * @ipr_cmd: ipr command struct
6173 * @res: resource entry struct
6175 * This function is invoked by the interrupt handler when ops
6176 * fail. It will log the IOASA if appropriate. Only called
6182 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
6183 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
6187 u32 ioasc
, fd_ioasc
;
6188 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6189 __be32
*ioasa_data
= (__be32
*)ioasa
;
6192 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
6193 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
6198 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
6201 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
6202 error_index
= ipr_get_error(fd_ioasc
);
6204 error_index
= ipr_get_error(ioasc
);
6206 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
6207 /* Don't log an error if the IOA already logged one */
6208 if (ioasa
->hdr
.ilid
!= 0)
6211 if (!ipr_is_gscsi(res
))
6214 if (ipr_error_table
[error_index
].log_ioasa
== 0)
6218 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
6220 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
6221 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
6222 data_len
= sizeof(struct ipr_ioasa64
);
6223 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
6224 data_len
= sizeof(struct ipr_ioasa
);
6226 ipr_err("IOASA Dump:\n");
6228 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
6229 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
6230 be32_to_cpu(ioasa_data
[i
]),
6231 be32_to_cpu(ioasa_data
[i
+1]),
6232 be32_to_cpu(ioasa_data
[i
+2]),
6233 be32_to_cpu(ioasa_data
[i
+3]));
6238 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6240 * @sense_buf: sense data buffer
6245 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
6248 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
6249 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
6250 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6251 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
6253 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
6255 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
6258 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
6260 if (ipr_is_vset_device(res
) &&
6261 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
6262 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
6263 sense_buf
[0] = 0x72;
6264 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
6265 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
6266 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
6270 sense_buf
[9] = 0x0A;
6271 sense_buf
[10] = 0x80;
6273 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6275 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6276 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6277 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6278 sense_buf
[15] = failing_lba
& 0x000000ff;
6280 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6282 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6283 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6284 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6285 sense_buf
[19] = failing_lba
& 0x000000ff;
6287 sense_buf
[0] = 0x70;
6288 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6289 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6290 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6292 /* Illegal request */
6293 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6294 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6295 sense_buf
[7] = 10; /* additional length */
6297 /* IOARCB was in error */
6298 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6299 sense_buf
[15] = 0xC0;
6300 else /* Parameter data was invalid */
6301 sense_buf
[15] = 0x80;
6304 ((IPR_FIELD_POINTER_MASK
&
6305 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6307 (IPR_FIELD_POINTER_MASK
&
6308 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6310 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6311 if (ipr_is_vset_device(res
))
6312 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6314 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6316 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6317 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6318 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6319 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6320 sense_buf
[6] = failing_lba
& 0x000000ff;
6323 sense_buf
[7] = 6; /* additional length */
6329 * ipr_get_autosense - Copy autosense data to sense buffer
6330 * @ipr_cmd: ipr command struct
6332 * This function copies the autosense buffer to the buffer
6333 * in the scsi_cmd, if there is autosense available.
6336 * 1 if autosense was available / 0 if not
6338 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6340 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6341 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6343 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6346 if (ipr_cmd
->ioa_cfg
->sis64
)
6347 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6348 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6349 SCSI_SENSE_BUFFERSIZE
));
6351 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6352 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6353 SCSI_SENSE_BUFFERSIZE
));
6358 * ipr_erp_start - Process an error response for a SCSI op
6359 * @ioa_cfg: ioa config struct
6360 * @ipr_cmd: ipr command struct
6362 * This function determines whether or not to initiate ERP
6363 * on the affected device.
6368 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6369 struct ipr_cmnd
*ipr_cmd
)
6371 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6372 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6373 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6374 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6377 __ipr_scsi_eh_done(ipr_cmd
);
6381 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6382 ipr_gen_sense(ipr_cmd
);
6384 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6386 switch (masked_ioasc
) {
6387 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6388 if (ipr_is_naca_model(res
))
6389 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6391 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6393 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6394 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6395 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6397 case IPR_IOASC_HW_SEL_TIMEOUT
:
6398 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6399 if (!ipr_is_naca_model(res
))
6400 res
->needs_sync_complete
= 1;
6402 case IPR_IOASC_SYNC_REQUIRED
:
6404 res
->needs_sync_complete
= 1;
6405 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6407 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6408 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6410 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6411 * so SCSI mid-layer and upper layers handle it accordingly.
6413 if (scsi_cmd
->result
!= SAM_STAT_CHECK_CONDITION
)
6414 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6416 case IPR_IOASC_BUS_WAS_RESET
:
6417 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6419 * Report the bus reset and ask for a retry. The device
6420 * will give CC/UA the next command.
6422 if (!res
->resetting_device
)
6423 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6424 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6425 if (!ipr_is_naca_model(res
))
6426 res
->needs_sync_complete
= 1;
6428 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6429 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6430 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6431 if (!ipr_get_autosense(ipr_cmd
)) {
6432 if (!ipr_is_naca_model(res
)) {
6433 ipr_erp_cancel_all(ipr_cmd
);
6438 if (!ipr_is_naca_model(res
))
6439 res
->needs_sync_complete
= 1;
6441 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6443 case IPR_IOASC_IR_NON_OPTIMIZED
:
6444 if (res
->raw_mode
) {
6446 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6448 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6451 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6452 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6453 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6454 res
->needs_sync_complete
= 1;
6458 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6459 scsi_cmd
->scsi_done(scsi_cmd
);
6460 if (ipr_cmd
->eh_comp
)
6461 complete(ipr_cmd
->eh_comp
);
6462 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6466 * ipr_scsi_done - mid-layer done function
6467 * @ipr_cmd: ipr command struct
6469 * This function is invoked by the interrupt handler for
6470 * ops generated by the SCSI mid-layer
6475 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6477 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6478 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6479 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6480 unsigned long lock_flags
;
6482 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6484 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6485 scsi_dma_unmap(scsi_cmd
);
6487 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6488 scsi_cmd
->scsi_done(scsi_cmd
);
6489 if (ipr_cmd
->eh_comp
)
6490 complete(ipr_cmd
->eh_comp
);
6491 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6492 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6494 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6495 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6496 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6497 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6498 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6503 * ipr_queuecommand - Queue a mid-layer request
6504 * @shost: scsi host struct
6505 * @scsi_cmd: scsi command struct
6507 * This function queues a request generated by the mid-layer.
6511 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6512 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6514 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6515 struct scsi_cmnd
*scsi_cmd
)
6517 struct ipr_ioa_cfg
*ioa_cfg
;
6518 struct ipr_resource_entry
*res
;
6519 struct ipr_ioarcb
*ioarcb
;
6520 struct ipr_cmnd
*ipr_cmd
;
6521 unsigned long hrrq_flags
, lock_flags
;
6523 struct ipr_hrr_queue
*hrrq
;
6526 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6528 scsi_cmd
->result
= (DID_OK
<< 16);
6529 res
= scsi_cmd
->device
->hostdata
;
6531 if (ipr_is_gata(res
) && res
->sata_port
) {
6532 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6533 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6534 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6538 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6539 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6541 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6543 * We are currently blocking all devices due to a host reset
6544 * We have told the host to stop giving us new requests, but
6545 * ERP ops don't count. FIXME
6547 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6548 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6549 return SCSI_MLQUEUE_HOST_BUSY
;
6553 * FIXME - Create scsi_set_host_offline interface
6554 * and the ioa_is_dead check can be removed
6556 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6557 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6561 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6562 if (ipr_cmd
== NULL
) {
6563 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6564 return SCSI_MLQUEUE_HOST_BUSY
;
6566 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6568 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6569 ioarcb
= &ipr_cmd
->ioarcb
;
6571 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6572 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6573 ipr_cmd
->done
= ipr_scsi_eh_done
;
6575 if (ipr_is_gscsi(res
)) {
6576 if (scsi_cmd
->underflow
== 0)
6577 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6579 if (res
->reset_occurred
) {
6580 res
->reset_occurred
= 0;
6581 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6585 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6586 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6588 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6589 if (scsi_cmd
->flags
& SCMD_TAGGED
)
6590 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_SIMPLE_TASK
;
6592 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_UNTAGGED_TASK
;
6595 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6596 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6597 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6599 if (res
->raw_mode
&& ipr_is_af_dasd_device(res
)) {
6600 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_PIPE
;
6602 if (scsi_cmd
->underflow
== 0)
6603 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6607 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6609 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6611 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6612 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6613 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6614 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6616 scsi_dma_unmap(scsi_cmd
);
6617 return SCSI_MLQUEUE_HOST_BUSY
;
6620 if (unlikely(hrrq
->ioa_is_dead
)) {
6621 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6622 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6623 scsi_dma_unmap(scsi_cmd
);
6627 ioarcb
->res_handle
= res
->res_handle
;
6628 if (res
->needs_sync_complete
) {
6629 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6630 res
->needs_sync_complete
= 0;
6632 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6633 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6634 ipr_send_command(ipr_cmd
);
6635 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6639 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6640 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6641 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6642 scsi_cmd
->scsi_done(scsi_cmd
);
6643 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6648 * ipr_ioctl - IOCTL handler
6649 * @sdev: scsi device struct
6654 * 0 on success / other on failure
6656 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6658 struct ipr_resource_entry
*res
;
6660 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6661 if (res
&& ipr_is_gata(res
)) {
6662 if (cmd
== HDIO_GET_IDENTITY
)
6664 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6671 * ipr_info - Get information about the card/driver
6672 * @scsi_host: scsi host struct
6675 * pointer to buffer with description string
6677 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6679 static char buffer
[512];
6680 struct ipr_ioa_cfg
*ioa_cfg
;
6681 unsigned long lock_flags
= 0;
6683 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6685 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6686 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6687 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6692 static struct scsi_host_template driver_template
= {
6693 .module
= THIS_MODULE
,
6695 .info
= ipr_ioa_info
,
6697 .queuecommand
= ipr_queuecommand
,
6698 .eh_abort_handler
= ipr_eh_abort
,
6699 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6700 .eh_host_reset_handler
= ipr_eh_host_reset
,
6701 .slave_alloc
= ipr_slave_alloc
,
6702 .slave_configure
= ipr_slave_configure
,
6703 .slave_destroy
= ipr_slave_destroy
,
6704 .scan_finished
= ipr_scan_finished
,
6705 .target_alloc
= ipr_target_alloc
,
6706 .target_destroy
= ipr_target_destroy
,
6707 .change_queue_depth
= ipr_change_queue_depth
,
6708 .bios_param
= ipr_biosparam
,
6709 .can_queue
= IPR_MAX_COMMANDS
,
6711 .sg_tablesize
= IPR_MAX_SGLIST
,
6712 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6713 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6714 .use_clustering
= ENABLE_CLUSTERING
,
6715 .shost_attrs
= ipr_ioa_attrs
,
6716 .sdev_attrs
= ipr_dev_attrs
,
6717 .proc_name
= IPR_NAME
,
6721 * ipr_ata_phy_reset - libata phy_reset handler
6722 * @ap: ata port to reset
6725 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6727 unsigned long flags
;
6728 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6729 struct ipr_resource_entry
*res
= sata_port
->res
;
6730 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6734 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6735 while (ioa_cfg
->in_reset_reload
) {
6736 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6737 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6738 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6741 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6744 rc
= ipr_device_reset(ioa_cfg
, res
);
6747 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6751 ap
->link
.device
[0].class = res
->ata_class
;
6752 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6753 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6756 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6761 * ipr_ata_post_internal - Cleanup after an internal command
6762 * @qc: ATA queued command
6767 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6769 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6770 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6771 struct ipr_cmnd
*ipr_cmd
;
6772 struct ipr_hrr_queue
*hrrq
;
6773 unsigned long flags
;
6775 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6776 while (ioa_cfg
->in_reset_reload
) {
6777 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6778 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6779 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6782 for_each_hrrq(hrrq
, ioa_cfg
) {
6783 spin_lock(&hrrq
->_lock
);
6784 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6785 if (ipr_cmd
->qc
== qc
) {
6786 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6790 spin_unlock(&hrrq
->_lock
);
6792 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6796 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6797 * @regs: destination
6798 * @tf: source ATA taskfile
6803 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6804 struct ata_taskfile
*tf
)
6806 regs
->feature
= tf
->feature
;
6807 regs
->nsect
= tf
->nsect
;
6808 regs
->lbal
= tf
->lbal
;
6809 regs
->lbam
= tf
->lbam
;
6810 regs
->lbah
= tf
->lbah
;
6811 regs
->device
= tf
->device
;
6812 regs
->command
= tf
->command
;
6813 regs
->hob_feature
= tf
->hob_feature
;
6814 regs
->hob_nsect
= tf
->hob_nsect
;
6815 regs
->hob_lbal
= tf
->hob_lbal
;
6816 regs
->hob_lbam
= tf
->hob_lbam
;
6817 regs
->hob_lbah
= tf
->hob_lbah
;
6818 regs
->ctl
= tf
->ctl
;
6822 * ipr_sata_done - done function for SATA commands
6823 * @ipr_cmd: ipr command struct
6825 * This function is invoked by the interrupt handler for
6826 * ops generated by the SCSI mid-layer to SATA devices
6831 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6833 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6834 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6835 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6836 struct ipr_resource_entry
*res
= sata_port
->res
;
6837 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6839 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6840 if (ipr_cmd
->ioa_cfg
->sis64
)
6841 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6842 sizeof(struct ipr_ioasa_gata
));
6844 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6845 sizeof(struct ipr_ioasa_gata
));
6846 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6848 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6849 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6851 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6852 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6854 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6855 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6856 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6857 ata_qc_complete(qc
);
6861 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6862 * @ipr_cmd: ipr command struct
6863 * @qc: ATA queued command
6866 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6867 struct ata_queued_cmd
*qc
)
6869 u32 ioadl_flags
= 0;
6870 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6871 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6872 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6873 int len
= qc
->nbytes
;
6874 struct scatterlist
*sg
;
6876 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6881 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6882 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6883 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6884 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6885 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6887 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6889 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6890 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6891 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6893 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6894 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6895 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6896 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6898 last_ioadl64
= ioadl64
;
6902 if (likely(last_ioadl64
))
6903 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6907 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6908 * @ipr_cmd: ipr command struct
6909 * @qc: ATA queued command
6912 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6913 struct ata_queued_cmd
*qc
)
6915 u32 ioadl_flags
= 0;
6916 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6917 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6918 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6919 int len
= qc
->nbytes
;
6920 struct scatterlist
*sg
;
6926 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6927 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6928 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6929 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6931 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6932 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6933 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6934 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6935 ioarcb
->read_ioadl_len
=
6936 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6939 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6940 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6941 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6947 if (likely(last_ioadl
))
6948 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6952 * ipr_qc_defer - Get a free ipr_cmd
6953 * @qc: queued command
6958 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6960 struct ata_port
*ap
= qc
->ap
;
6961 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6962 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6963 struct ipr_cmnd
*ipr_cmd
;
6964 struct ipr_hrr_queue
*hrrq
;
6967 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6968 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6970 qc
->lldd_task
= NULL
;
6971 spin_lock(&hrrq
->_lock
);
6972 if (unlikely(hrrq
->ioa_is_dead
)) {
6973 spin_unlock(&hrrq
->_lock
);
6977 if (unlikely(!hrrq
->allow_cmds
)) {
6978 spin_unlock(&hrrq
->_lock
);
6979 return ATA_DEFER_LINK
;
6982 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6983 if (ipr_cmd
== NULL
) {
6984 spin_unlock(&hrrq
->_lock
);
6985 return ATA_DEFER_LINK
;
6988 qc
->lldd_task
= ipr_cmd
;
6989 spin_unlock(&hrrq
->_lock
);
6994 * ipr_qc_issue - Issue a SATA qc to a device
6995 * @qc: queued command
7000 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
7002 struct ata_port
*ap
= qc
->ap
;
7003 struct ipr_sata_port
*sata_port
= ap
->private_data
;
7004 struct ipr_resource_entry
*res
= sata_port
->res
;
7005 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
7006 struct ipr_cmnd
*ipr_cmd
;
7007 struct ipr_ioarcb
*ioarcb
;
7008 struct ipr_ioarcb_ata_regs
*regs
;
7010 if (qc
->lldd_task
== NULL
)
7013 ipr_cmd
= qc
->lldd_task
;
7014 if (ipr_cmd
== NULL
)
7015 return AC_ERR_SYSTEM
;
7017 qc
->lldd_task
= NULL
;
7018 spin_lock(&ipr_cmd
->hrrq
->_lock
);
7019 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
7020 ipr_cmd
->hrrq
->ioa_is_dead
)) {
7021 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7022 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7023 return AC_ERR_SYSTEM
;
7026 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
7027 ioarcb
= &ipr_cmd
->ioarcb
;
7029 if (ioa_cfg
->sis64
) {
7030 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
7031 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
7033 regs
= &ioarcb
->u
.add_data
.u
.regs
;
7035 memset(regs
, 0, sizeof(*regs
));
7036 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
7038 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7040 ipr_cmd
->done
= ipr_sata_done
;
7041 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
7042 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
7043 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
7044 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
7045 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
7048 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
7050 ipr_build_ata_ioadl(ipr_cmd
, qc
);
7052 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
7053 ipr_copy_sata_tf(regs
, &qc
->tf
);
7054 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
7055 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
7057 switch (qc
->tf
.protocol
) {
7058 case ATA_PROT_NODATA
:
7063 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7066 case ATAPI_PROT_PIO
:
7067 case ATAPI_PROT_NODATA
:
7068 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7071 case ATAPI_PROT_DMA
:
7072 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7073 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7078 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7079 return AC_ERR_INVALID
;
7082 ipr_send_command(ipr_cmd
);
7083 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7089 * ipr_qc_fill_rtf - Read result TF
7090 * @qc: ATA queued command
7095 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
7097 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
7098 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
7099 struct ata_taskfile
*tf
= &qc
->result_tf
;
7101 tf
->feature
= g
->error
;
7102 tf
->nsect
= g
->nsect
;
7106 tf
->device
= g
->device
;
7107 tf
->command
= g
->status
;
7108 tf
->hob_nsect
= g
->hob_nsect
;
7109 tf
->hob_lbal
= g
->hob_lbal
;
7110 tf
->hob_lbam
= g
->hob_lbam
;
7111 tf
->hob_lbah
= g
->hob_lbah
;
7116 static struct ata_port_operations ipr_sata_ops
= {
7117 .phy_reset
= ipr_ata_phy_reset
,
7118 .hardreset
= ipr_sata_reset
,
7119 .post_internal_cmd
= ipr_ata_post_internal
,
7120 .qc_prep
= ata_noop_qc_prep
,
7121 .qc_defer
= ipr_qc_defer
,
7122 .qc_issue
= ipr_qc_issue
,
7123 .qc_fill_rtf
= ipr_qc_fill_rtf
,
7124 .port_start
= ata_sas_port_start
,
7125 .port_stop
= ata_sas_port_stop
7128 static struct ata_port_info sata_port_info
= {
7129 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
7131 .pio_mask
= ATA_PIO4_ONLY
,
7132 .mwdma_mask
= ATA_MWDMA2
,
7133 .udma_mask
= ATA_UDMA6
,
7134 .port_ops
= &ipr_sata_ops
7137 #ifdef CONFIG_PPC_PSERIES
7138 static const u16 ipr_blocked_processors
[] = {
7150 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7151 * @ioa_cfg: ioa cfg struct
7153 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7154 * certain pSeries hardware. This function determines if the given
7155 * adapter is in one of these confgurations or not.
7158 * 1 if adapter is not supported / 0 if adapter is supported
7160 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
7164 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
7165 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
7166 if (pvr_version_is(ipr_blocked_processors
[i
]))
7173 #define ipr_invalid_adapter(ioa_cfg) 0
7177 * ipr_ioa_bringdown_done - IOA bring down completion.
7178 * @ipr_cmd: ipr command struct
7180 * This function processes the completion of an adapter bring down.
7181 * It wakes any reset sleepers.
7186 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
7188 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7192 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
7194 ioa_cfg
->scsi_unblock
= 1;
7195 schedule_work(&ioa_cfg
->work_q
);
7198 ioa_cfg
->in_reset_reload
= 0;
7199 ioa_cfg
->reset_retries
= 0;
7200 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7201 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7202 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
7203 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7207 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7208 wake_up_all(&ioa_cfg
->reset_wait_q
);
7211 return IPR_RC_JOB_RETURN
;
7215 * ipr_ioa_reset_done - IOA reset completion.
7216 * @ipr_cmd: ipr command struct
7218 * This function processes the completion of an adapter reset.
7219 * It schedules any necessary mid-layer add/removes and
7220 * wakes any reset sleepers.
7225 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
7227 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7228 struct ipr_resource_entry
*res
;
7232 ioa_cfg
->in_reset_reload
= 0;
7233 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
7234 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
7235 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
7236 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
7239 ioa_cfg
->reset_cmd
= NULL
;
7240 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
7242 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
7243 if (res
->add_to_ml
|| res
->del_from_ml
) {
7248 schedule_work(&ioa_cfg
->work_q
);
7250 for (j
= 0; j
< IPR_NUM_HCAMS
; j
++) {
7251 list_del_init(&ioa_cfg
->hostrcb
[j
]->queue
);
7252 if (j
< IPR_NUM_LOG_HCAMS
)
7253 ipr_send_hcam(ioa_cfg
,
7254 IPR_HCAM_CDB_OP_CODE_LOG_DATA
,
7255 ioa_cfg
->hostrcb
[j
]);
7257 ipr_send_hcam(ioa_cfg
,
7258 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
7259 ioa_cfg
->hostrcb
[j
]);
7262 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
7263 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
7265 ioa_cfg
->reset_retries
= 0;
7266 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7267 wake_up_all(&ioa_cfg
->reset_wait_q
);
7269 ioa_cfg
->scsi_unblock
= 1;
7270 schedule_work(&ioa_cfg
->work_q
);
7272 return IPR_RC_JOB_RETURN
;
7276 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7277 * @supported_dev: supported device struct
7278 * @vpids: vendor product id struct
7283 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
7284 struct ipr_std_inq_vpids
*vpids
)
7286 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
7287 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
7288 supported_dev
->num_records
= 1;
7289 supported_dev
->data_length
=
7290 cpu_to_be16(sizeof(struct ipr_supported_device
));
7291 supported_dev
->reserved
= 0;
7295 * ipr_set_supported_devs - Send Set Supported Devices for a device
7296 * @ipr_cmd: ipr command struct
7298 * This function sends a Set Supported Devices to the adapter
7301 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7303 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7305 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7306 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7307 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7308 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7310 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7312 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7313 if (!ipr_is_scsi_disk(res
))
7316 ipr_cmd
->u
.res
= res
;
7317 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7319 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7320 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7321 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7323 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7324 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7325 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7326 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7328 ipr_init_ioadl(ipr_cmd
,
7329 ioa_cfg
->vpd_cbs_dma
+
7330 offsetof(struct ipr_misc_cbs
, supp_dev
),
7331 sizeof(struct ipr_supported_device
),
7332 IPR_IOADL_FLAGS_WRITE_LAST
);
7334 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7335 IPR_SET_SUP_DEVICE_TIMEOUT
);
7337 if (!ioa_cfg
->sis64
)
7338 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7340 return IPR_RC_JOB_RETURN
;
7344 return IPR_RC_JOB_CONTINUE
;
7348 * ipr_get_mode_page - Locate specified mode page
7349 * @mode_pages: mode page buffer
7350 * @page_code: page code to find
7351 * @len: minimum required length for mode page
7354 * pointer to mode page / NULL on failure
7356 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7357 u32 page_code
, u32 len
)
7359 struct ipr_mode_page_hdr
*mode_hdr
;
7363 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7366 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7367 mode_hdr
= (struct ipr_mode_page_hdr
*)
7368 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7371 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7372 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7376 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7377 mode_hdr
->page_length
);
7378 length
-= page_length
;
7379 mode_hdr
= (struct ipr_mode_page_hdr
*)
7380 ((unsigned long)mode_hdr
+ page_length
);
7387 * ipr_check_term_power - Check for term power errors
7388 * @ioa_cfg: ioa config struct
7389 * @mode_pages: IOAFP mode pages buffer
7391 * Check the IOAFP's mode page 28 for term power errors
7396 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7397 struct ipr_mode_pages
*mode_pages
)
7401 struct ipr_dev_bus_entry
*bus
;
7402 struct ipr_mode_page28
*mode_page
;
7404 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7405 sizeof(struct ipr_mode_page28
));
7407 entry_length
= mode_page
->entry_length
;
7409 bus
= mode_page
->bus
;
7411 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7412 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7413 dev_err(&ioa_cfg
->pdev
->dev
,
7414 "Term power is absent on scsi bus %d\n",
7418 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7423 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7424 * @ioa_cfg: ioa config struct
7426 * Looks through the config table checking for SES devices. If
7427 * the SES device is in the SES table indicating a maximum SCSI
7428 * bus speed, the speed is limited for the bus.
7433 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7438 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7439 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7440 ioa_cfg
->bus_attr
[i
].bus_width
);
7442 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7443 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7448 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7449 * @ioa_cfg: ioa config struct
7450 * @mode_pages: mode page 28 buffer
7452 * Updates mode page 28 based on driver configuration
7457 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7458 struct ipr_mode_pages
*mode_pages
)
7460 int i
, entry_length
;
7461 struct ipr_dev_bus_entry
*bus
;
7462 struct ipr_bus_attributes
*bus_attr
;
7463 struct ipr_mode_page28
*mode_page
;
7465 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7466 sizeof(struct ipr_mode_page28
));
7468 entry_length
= mode_page
->entry_length
;
7470 /* Loop for each device bus entry */
7471 for (i
= 0, bus
= mode_page
->bus
;
7472 i
< mode_page
->num_entries
;
7473 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7474 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7475 dev_err(&ioa_cfg
->pdev
->dev
,
7476 "Invalid resource address reported: 0x%08X\n",
7477 IPR_GET_PHYS_LOC(bus
->res_addr
));
7481 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7482 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7483 bus
->bus_width
= bus_attr
->bus_width
;
7484 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7485 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7486 if (bus_attr
->qas_enabled
)
7487 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7489 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7494 * ipr_build_mode_select - Build a mode select command
7495 * @ipr_cmd: ipr command struct
7496 * @res_handle: resource handle to send command to
7497 * @parm: Byte 2 of Mode Sense command
7498 * @dma_addr: DMA buffer address
7499 * @xfer_len: data transfer length
7504 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7505 __be32 res_handle
, u8 parm
,
7506 dma_addr_t dma_addr
, u8 xfer_len
)
7508 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7510 ioarcb
->res_handle
= res_handle
;
7511 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7512 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7513 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7514 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7515 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7517 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7521 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7522 * @ipr_cmd: ipr command struct
7524 * This function sets up the SCSI bus attributes and sends
7525 * a Mode Select for Page 28 to activate them.
7530 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7532 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7533 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7537 ipr_scsi_bus_speed_limit(ioa_cfg
);
7538 ipr_check_term_power(ioa_cfg
, mode_pages
);
7539 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7540 length
= mode_pages
->hdr
.length
+ 1;
7541 mode_pages
->hdr
.length
= 0;
7543 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7544 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7547 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7548 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7549 struct ipr_resource_entry
, queue
);
7550 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7553 return IPR_RC_JOB_RETURN
;
7557 * ipr_build_mode_sense - Builds a mode sense command
7558 * @ipr_cmd: ipr command struct
7559 * @res: resource entry struct
7560 * @parm: Byte 2 of mode sense command
7561 * @dma_addr: DMA address of mode sense buffer
7562 * @xfer_len: Size of DMA buffer
7567 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7569 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7571 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7573 ioarcb
->res_handle
= res_handle
;
7574 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7575 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7576 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7577 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7579 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7583 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7584 * @ipr_cmd: ipr command struct
7586 * This function handles the failure of an IOA bringup command.
7591 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7593 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7594 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7596 dev_err(&ioa_cfg
->pdev
->dev
,
7597 "0x%02X failed with IOASC: 0x%08X\n",
7598 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7600 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7601 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7602 return IPR_RC_JOB_RETURN
;
7606 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7607 * @ipr_cmd: ipr command struct
7609 * This function handles the failure of a Mode Sense to the IOAFP.
7610 * Some adapters do not handle all mode pages.
7613 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7615 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7617 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7618 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7620 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7621 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7622 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7623 struct ipr_resource_entry
, queue
);
7624 return IPR_RC_JOB_CONTINUE
;
7627 return ipr_reset_cmd_failed(ipr_cmd
);
7631 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7632 * @ipr_cmd: ipr command struct
7634 * This function send a Page 28 mode sense to the IOA to
7635 * retrieve SCSI bus attributes.
7640 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7642 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7645 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7646 0x28, ioa_cfg
->vpd_cbs_dma
+
7647 offsetof(struct ipr_misc_cbs
, mode_pages
),
7648 sizeof(struct ipr_mode_pages
));
7650 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7651 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7653 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7656 return IPR_RC_JOB_RETURN
;
7660 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7661 * @ipr_cmd: ipr command struct
7663 * This function enables dual IOA RAID support if possible.
7668 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7670 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7671 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7672 struct ipr_mode_page24
*mode_page
;
7676 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7677 sizeof(struct ipr_mode_page24
));
7680 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7682 length
= mode_pages
->hdr
.length
+ 1;
7683 mode_pages
->hdr
.length
= 0;
7685 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7686 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7689 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7690 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7693 return IPR_RC_JOB_RETURN
;
7697 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7698 * @ipr_cmd: ipr command struct
7700 * This function handles the failure of a Mode Sense to the IOAFP.
7701 * Some adapters do not handle all mode pages.
7704 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7706 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7708 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7710 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7711 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7712 return IPR_RC_JOB_CONTINUE
;
7715 return ipr_reset_cmd_failed(ipr_cmd
);
7719 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7720 * @ipr_cmd: ipr command struct
7722 * This function send a mode sense to the IOA to retrieve
7723 * the IOA Advanced Function Control mode page.
7728 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7730 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7733 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7734 0x24, ioa_cfg
->vpd_cbs_dma
+
7735 offsetof(struct ipr_misc_cbs
, mode_pages
),
7736 sizeof(struct ipr_mode_pages
));
7738 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7739 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7741 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7744 return IPR_RC_JOB_RETURN
;
7748 * ipr_init_res_table - Initialize the resource table
7749 * @ipr_cmd: ipr command struct
7751 * This function looks through the existing resource table, comparing
7752 * it with the config table. This function will take care of old/new
7753 * devices and schedule adding/removing them from the mid-layer
7757 * IPR_RC_JOB_CONTINUE
7759 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7761 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7762 struct ipr_resource_entry
*res
, *temp
;
7763 struct ipr_config_table_entry_wrapper cfgtew
;
7764 int entries
, found
, flag
, i
;
7769 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7771 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7773 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7774 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7776 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7777 list_move_tail(&res
->queue
, &old_res
);
7780 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7782 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7784 for (i
= 0; i
< entries
; i
++) {
7786 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7788 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7791 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7792 if (ipr_is_same_device(res
, &cfgtew
)) {
7793 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7800 if (list_empty(&ioa_cfg
->free_res_q
)) {
7801 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7806 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7807 struct ipr_resource_entry
, queue
);
7808 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7809 ipr_init_res_entry(res
, &cfgtew
);
7811 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7812 res
->sdev
->allow_restart
= 1;
7815 ipr_update_res_entry(res
, &cfgtew
);
7818 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7820 res
->del_from_ml
= 1;
7821 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7822 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7826 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7827 ipr_clear_res_target(res
);
7828 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7831 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7832 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7834 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7837 return IPR_RC_JOB_CONTINUE
;
7841 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7842 * @ipr_cmd: ipr command struct
7844 * This function sends a Query IOA Configuration command
7845 * to the adapter to retrieve the IOA configuration table.
7850 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7852 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7853 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7854 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7855 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7858 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7859 ioa_cfg
->dual_raid
= 1;
7860 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7861 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7862 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7863 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7864 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7866 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7867 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7868 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7869 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7871 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7872 IPR_IOADL_FLAGS_READ_LAST
);
7874 ipr_cmd
->job_step
= ipr_init_res_table
;
7876 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7879 return IPR_RC_JOB_RETURN
;
7882 static int ipr_ioa_service_action_failed(struct ipr_cmnd
*ipr_cmd
)
7884 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7886 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
)
7887 return IPR_RC_JOB_CONTINUE
;
7889 return ipr_reset_cmd_failed(ipr_cmd
);
7892 static void ipr_build_ioa_service_action(struct ipr_cmnd
*ipr_cmd
,
7893 __be32 res_handle
, u8 sa_code
)
7895 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7897 ioarcb
->res_handle
= res_handle
;
7898 ioarcb
->cmd_pkt
.cdb
[0] = IPR_IOA_SERVICE_ACTION
;
7899 ioarcb
->cmd_pkt
.cdb
[1] = sa_code
;
7900 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7904 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7910 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd
*ipr_cmd
)
7912 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7913 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7914 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
7918 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7920 if (pageC4
->cache_cap
[0] & IPR_CAP_SYNC_CACHE
) {
7921 ipr_build_ioa_service_action(ipr_cmd
,
7922 cpu_to_be32(IPR_IOA_RES_HANDLE
),
7923 IPR_IOA_SA_CHANGE_CACHE_PARAMS
);
7925 ioarcb
->cmd_pkt
.cdb
[2] = 0x40;
7927 ipr_cmd
->job_step_failed
= ipr_ioa_service_action_failed
;
7928 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7929 IPR_SET_SUP_DEVICE_TIMEOUT
);
7932 return IPR_RC_JOB_RETURN
;
7936 return IPR_RC_JOB_CONTINUE
;
7940 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7941 * @ipr_cmd: ipr command struct
7943 * This utility function sends an inquiry to the adapter.
7948 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7949 dma_addr_t dma_addr
, u8 xfer_len
)
7951 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7954 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7955 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7957 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7958 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7959 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7960 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7962 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7964 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7969 * ipr_inquiry_page_supported - Is the given inquiry page supported
7970 * @page0: inquiry page 0 buffer
7973 * This function determines if the specified inquiry page is supported.
7976 * 1 if page is supported / 0 if not
7978 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7982 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7983 if (page0
->page
[i
] == page
)
7990 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7991 * @ipr_cmd: ipr command struct
7993 * This function sends a Page 0xC4 inquiry to the adapter
7994 * to retrieve software VPD information.
7997 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7999 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd
*ipr_cmd
)
8001 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8002 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8003 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
8006 ipr_cmd
->job_step
= ipr_ioafp_set_caching_parameters
;
8007 memset(pageC4
, 0, sizeof(*pageC4
));
8009 if (ipr_inquiry_page_supported(page0
, 0xC4)) {
8010 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xC4,
8011 (ioa_cfg
->vpd_cbs_dma
8012 + offsetof(struct ipr_misc_cbs
,
8014 sizeof(struct ipr_inquiry_pageC4
));
8015 return IPR_RC_JOB_RETURN
;
8019 return IPR_RC_JOB_CONTINUE
;
8023 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8024 * @ipr_cmd: ipr command struct
8026 * This function sends a Page 0xD0 inquiry to the adapter
8027 * to retrieve adapter capabilities.
8030 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8032 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
8034 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8035 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8036 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
8039 ipr_cmd
->job_step
= ipr_ioafp_pageC4_inquiry
;
8040 memset(cap
, 0, sizeof(*cap
));
8042 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
8043 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
8044 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
8045 sizeof(struct ipr_inquiry_cap
));
8046 return IPR_RC_JOB_RETURN
;
8050 return IPR_RC_JOB_CONTINUE
;
8054 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8055 * @ipr_cmd: ipr command struct
8057 * This function sends a Page 3 inquiry to the adapter
8058 * to retrieve software VPD information.
8061 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8063 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
8065 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8069 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
8071 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
8072 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
8073 sizeof(struct ipr_inquiry_page3
));
8076 return IPR_RC_JOB_RETURN
;
8080 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8081 * @ipr_cmd: ipr command struct
8083 * This function sends a Page 0 inquiry to the adapter
8084 * to retrieve supported inquiry pages.
8087 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8089 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
8091 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8096 /* Grab the type out of the VPD and store it away */
8097 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
8099 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
8101 if (ipr_invalid_adapter(ioa_cfg
)) {
8102 dev_err(&ioa_cfg
->pdev
->dev
,
8103 "Adapter not supported in this hardware configuration.\n");
8105 if (!ipr_testmode
) {
8106 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
8107 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8108 list_add_tail(&ipr_cmd
->queue
,
8109 &ioa_cfg
->hrrq
->hrrq_free_q
);
8110 return IPR_RC_JOB_RETURN
;
8114 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
8116 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
8117 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
8118 sizeof(struct ipr_inquiry_page0
));
8121 return IPR_RC_JOB_RETURN
;
8125 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8126 * @ipr_cmd: ipr command struct
8128 * This function sends a standard inquiry to the adapter.
8133 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
8135 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8138 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
8140 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
8141 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
8142 sizeof(struct ipr_ioa_vpd
));
8145 return IPR_RC_JOB_RETURN
;
8149 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8150 * @ipr_cmd: ipr command struct
8152 * This function send an Identify Host Request Response Queue
8153 * command to establish the HRRQ with the adapter.
8158 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
8160 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8161 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
8162 struct ipr_hrr_queue
*hrrq
;
8165 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
8166 if (ioa_cfg
->identify_hrrq_index
== 0)
8167 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
8169 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
8170 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
8172 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
8173 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8175 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8177 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
8179 if (ioa_cfg
->nvectors
== 1)
8180 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
8182 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
8184 ioarcb
->cmd_pkt
.cdb
[2] =
8185 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
8186 ioarcb
->cmd_pkt
.cdb
[3] =
8187 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
8188 ioarcb
->cmd_pkt
.cdb
[4] =
8189 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
8190 ioarcb
->cmd_pkt
.cdb
[5] =
8191 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
8192 ioarcb
->cmd_pkt
.cdb
[7] =
8193 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
8194 ioarcb
->cmd_pkt
.cdb
[8] =
8195 (sizeof(u32
) * hrrq
->size
) & 0xff;
8197 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8198 ioarcb
->cmd_pkt
.cdb
[9] =
8199 ioa_cfg
->identify_hrrq_index
;
8201 if (ioa_cfg
->sis64
) {
8202 ioarcb
->cmd_pkt
.cdb
[10] =
8203 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
8204 ioarcb
->cmd_pkt
.cdb
[11] =
8205 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
8206 ioarcb
->cmd_pkt
.cdb
[12] =
8207 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
8208 ioarcb
->cmd_pkt
.cdb
[13] =
8209 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
8212 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8213 ioarcb
->cmd_pkt
.cdb
[14] =
8214 ioa_cfg
->identify_hrrq_index
;
8216 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8217 IPR_INTERNAL_TIMEOUT
);
8219 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
8220 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8223 return IPR_RC_JOB_RETURN
;
8227 return IPR_RC_JOB_CONTINUE
;
8231 * ipr_reset_timer_done - Adapter reset timer function
8232 * @ipr_cmd: ipr command struct
8234 * Description: This function is used in adapter reset processing
8235 * for timing events. If the reset_cmd pointer in the IOA
8236 * config struct is not this adapter's we are doing nested
8237 * resets and fail_all_ops will take care of freeing the
8243 static void ipr_reset_timer_done(struct timer_list
*t
)
8245 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
8246 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8247 unsigned long lock_flags
= 0;
8249 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8251 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
8252 list_del(&ipr_cmd
->queue
);
8253 ipr_cmd
->done(ipr_cmd
);
8256 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8260 * ipr_reset_start_timer - Start a timer for adapter reset job
8261 * @ipr_cmd: ipr command struct
8262 * @timeout: timeout value
8264 * Description: This function is used in adapter reset processing
8265 * for timing events. If the reset_cmd pointer in the IOA
8266 * config struct is not this adapter's we are doing nested
8267 * resets and fail_all_ops will take care of freeing the
8273 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
8274 unsigned long timeout
)
8278 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8279 ipr_cmd
->done
= ipr_reset_ioa_job
;
8281 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
8282 ipr_cmd
->timer
.function
= ipr_reset_timer_done
;
8283 add_timer(&ipr_cmd
->timer
);
8287 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8288 * @ioa_cfg: ioa cfg struct
8293 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8295 struct ipr_hrr_queue
*hrrq
;
8297 for_each_hrrq(hrrq
, ioa_cfg
) {
8298 spin_lock(&hrrq
->_lock
);
8299 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
8301 /* Initialize Host RRQ pointers */
8302 hrrq
->hrrq_start
= hrrq
->host_rrq
;
8303 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
8304 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
8305 hrrq
->toggle_bit
= 1;
8306 spin_unlock(&hrrq
->_lock
);
8310 ioa_cfg
->identify_hrrq_index
= 0;
8311 if (ioa_cfg
->hrrq_num
== 1)
8312 atomic_set(&ioa_cfg
->hrrq_index
, 0);
8314 atomic_set(&ioa_cfg
->hrrq_index
, 1);
8316 /* Zero out config table */
8317 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
8321 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8322 * @ipr_cmd: ipr command struct
8325 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8327 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
8329 unsigned long stage
, stage_time
;
8331 volatile u32 int_reg
;
8332 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8335 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
8336 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
8337 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
8339 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
8341 /* sanity check the stage_time value */
8342 if (stage_time
== 0)
8343 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
8344 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
8345 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
8346 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
8347 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
8349 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
8350 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8351 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8352 stage_time
= ioa_cfg
->transop_timeout
;
8353 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8354 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
8355 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8356 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8357 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8358 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8359 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
8360 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8361 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8362 return IPR_RC_JOB_CONTINUE
;
8366 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
8367 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8368 ipr_cmd
->done
= ipr_reset_ioa_job
;
8369 add_timer(&ipr_cmd
->timer
);
8371 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8373 return IPR_RC_JOB_RETURN
;
8377 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8378 * @ipr_cmd: ipr command struct
8380 * This function reinitializes some control blocks and
8381 * enables destructive diagnostics on the adapter.
8386 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
8388 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8389 volatile u32 int_reg
;
8390 volatile u64 maskval
;
8394 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8395 ipr_init_ioa_mem(ioa_cfg
);
8397 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8398 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8399 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
8400 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8403 if (ioa_cfg
->sis64
) {
8404 /* Set the adapter to the correct endian mode. */
8405 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8406 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8409 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8411 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8412 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8413 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8414 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8415 return IPR_RC_JOB_CONTINUE
;
8418 /* Enable destructive diagnostics on IOA */
8419 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8421 if (ioa_cfg
->sis64
) {
8422 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8423 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8424 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8426 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8428 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8430 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8432 if (ioa_cfg
->sis64
) {
8433 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8434 return IPR_RC_JOB_CONTINUE
;
8437 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8438 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8439 ipr_cmd
->done
= ipr_reset_ioa_job
;
8440 add_timer(&ipr_cmd
->timer
);
8441 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8444 return IPR_RC_JOB_RETURN
;
8448 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8449 * @ipr_cmd: ipr command struct
8451 * This function is invoked when an adapter dump has run out
8452 * of processing time.
8455 * IPR_RC_JOB_CONTINUE
8457 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8459 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8461 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8462 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8463 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8464 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8466 ioa_cfg
->dump_timeout
= 1;
8467 ipr_cmd
->job_step
= ipr_reset_alert
;
8469 return IPR_RC_JOB_CONTINUE
;
8473 * ipr_unit_check_no_data - Log a unit check/no data error log
8474 * @ioa_cfg: ioa config struct
8476 * Logs an error indicating the adapter unit checked, but for some
8477 * reason, we were unable to fetch the unit check buffer.
8482 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8484 ioa_cfg
->errors_logged
++;
8485 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8489 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8490 * @ioa_cfg: ioa config struct
8492 * Fetches the unit check buffer from the adapter by clocking the data
8493 * through the mailbox register.
8498 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8500 unsigned long mailbox
;
8501 struct ipr_hostrcb
*hostrcb
;
8502 struct ipr_uc_sdt sdt
;
8506 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8508 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8509 ipr_unit_check_no_data(ioa_cfg
);
8513 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8514 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8515 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8517 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8518 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8519 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8520 ipr_unit_check_no_data(ioa_cfg
);
8524 /* Find length of the first sdt entry (UC buffer) */
8525 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8526 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8528 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8529 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8530 IPR_FMT2_MBX_ADDR_MASK
;
8532 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8533 struct ipr_hostrcb
, queue
);
8534 list_del_init(&hostrcb
->queue
);
8535 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8537 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8538 be32_to_cpu(sdt
.entry
[0].start_token
),
8539 (__be32
*)&hostrcb
->hcam
,
8540 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8543 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8544 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8545 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8546 ioa_cfg
->sdt_state
== GET_DUMP
)
8547 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8549 ipr_unit_check_no_data(ioa_cfg
);
8551 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8555 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8556 * @ipr_cmd: ipr command struct
8558 * Description: This function will call to get the unit check buffer.
8563 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8565 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8568 ioa_cfg
->ioa_unit_checked
= 0;
8569 ipr_get_unit_check_buffer(ioa_cfg
);
8570 ipr_cmd
->job_step
= ipr_reset_alert
;
8571 ipr_reset_start_timer(ipr_cmd
, 0);
8574 return IPR_RC_JOB_RETURN
;
8577 static int ipr_dump_mailbox_wait(struct ipr_cmnd
*ipr_cmd
)
8579 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8583 if (ioa_cfg
->sdt_state
!= GET_DUMP
)
8584 return IPR_RC_JOB_RETURN
;
8586 if (!ioa_cfg
->sis64
|| !ipr_cmd
->u
.time_left
||
8587 (readl(ioa_cfg
->regs
.sense_interrupt_reg
) &
8588 IPR_PCII_MAILBOX_STABLE
)) {
8590 if (!ipr_cmd
->u
.time_left
)
8591 dev_err(&ioa_cfg
->pdev
->dev
,
8592 "Timed out waiting for Mailbox register.\n");
8594 ioa_cfg
->sdt_state
= READ_DUMP
;
8595 ioa_cfg
->dump_timeout
= 0;
8597 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8599 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8600 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8601 schedule_work(&ioa_cfg
->work_q
);
8604 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8605 ipr_reset_start_timer(ipr_cmd
,
8606 IPR_CHECK_FOR_RESET_TIMEOUT
);
8610 return IPR_RC_JOB_RETURN
;
8614 * ipr_reset_restore_cfg_space - Restore PCI config space.
8615 * @ipr_cmd: ipr command struct
8617 * Description: This function restores the saved PCI config space of
8618 * the adapter, fails all outstanding ops back to the callers, and
8619 * fetches the dump/unit check if applicable to this reset.
8622 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8624 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8626 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8630 ioa_cfg
->pdev
->state_saved
= true;
8631 pci_restore_state(ioa_cfg
->pdev
);
8633 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8634 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8635 return IPR_RC_JOB_CONTINUE
;
8638 ipr_fail_all_ops(ioa_cfg
);
8640 if (ioa_cfg
->sis64
) {
8641 /* Set the adapter to the correct endian mode. */
8642 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8643 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8646 if (ioa_cfg
->ioa_unit_checked
) {
8647 if (ioa_cfg
->sis64
) {
8648 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8649 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8650 return IPR_RC_JOB_RETURN
;
8652 ioa_cfg
->ioa_unit_checked
= 0;
8653 ipr_get_unit_check_buffer(ioa_cfg
);
8654 ipr_cmd
->job_step
= ipr_reset_alert
;
8655 ipr_reset_start_timer(ipr_cmd
, 0);
8656 return IPR_RC_JOB_RETURN
;
8660 if (ioa_cfg
->in_ioa_bringdown
) {
8661 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8662 } else if (ioa_cfg
->sdt_state
== GET_DUMP
) {
8663 ipr_cmd
->job_step
= ipr_dump_mailbox_wait
;
8664 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_MAILBOX
;
8666 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8670 return IPR_RC_JOB_CONTINUE
;
8674 * ipr_reset_bist_done - BIST has completed on the adapter.
8675 * @ipr_cmd: ipr command struct
8677 * Description: Unblock config space and resume the reset process.
8680 * IPR_RC_JOB_CONTINUE
8682 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8684 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8687 if (ioa_cfg
->cfg_locked
)
8688 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8689 ioa_cfg
->cfg_locked
= 0;
8690 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8692 return IPR_RC_JOB_CONTINUE
;
8696 * ipr_reset_start_bist - Run BIST on the adapter.
8697 * @ipr_cmd: ipr command struct
8699 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8702 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8704 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8706 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8707 int rc
= PCIBIOS_SUCCESSFUL
;
8710 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8711 writel(IPR_UPROCI_SIS64_START_BIST
,
8712 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8714 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8716 if (rc
== PCIBIOS_SUCCESSFUL
) {
8717 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8718 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8719 rc
= IPR_RC_JOB_RETURN
;
8721 if (ioa_cfg
->cfg_locked
)
8722 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8723 ioa_cfg
->cfg_locked
= 0;
8724 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8725 rc
= IPR_RC_JOB_CONTINUE
;
8733 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8734 * @ipr_cmd: ipr command struct
8736 * Description: This clears PCI reset to the adapter and delays two seconds.
8741 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8744 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8745 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8747 return IPR_RC_JOB_RETURN
;
8751 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8752 * @work: work struct
8754 * Description: This pulses warm reset to a slot.
8757 static void ipr_reset_reset_work(struct work_struct
*work
)
8759 struct ipr_cmnd
*ipr_cmd
= container_of(work
, struct ipr_cmnd
, work
);
8760 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8761 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8762 unsigned long lock_flags
= 0;
8765 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8766 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT
));
8767 pci_set_pcie_reset_state(pdev
, pcie_deassert_reset
);
8769 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8770 if (ioa_cfg
->reset_cmd
== ipr_cmd
)
8771 ipr_reset_ioa_job(ipr_cmd
);
8772 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8777 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8778 * @ipr_cmd: ipr command struct
8780 * Description: This asserts PCI reset to the adapter.
8785 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8787 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8790 INIT_WORK(&ipr_cmd
->work
, ipr_reset_reset_work
);
8791 queue_work(ioa_cfg
->reset_work_q
, &ipr_cmd
->work
);
8792 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8794 return IPR_RC_JOB_RETURN
;
8798 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8799 * @ipr_cmd: ipr command struct
8801 * Description: This attempts to block config access to the IOA.
8804 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8806 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8808 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8809 int rc
= IPR_RC_JOB_CONTINUE
;
8811 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8812 ioa_cfg
->cfg_locked
= 1;
8813 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8815 if (ipr_cmd
->u
.time_left
) {
8816 rc
= IPR_RC_JOB_RETURN
;
8817 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8818 ipr_reset_start_timer(ipr_cmd
,
8819 IPR_CHECK_FOR_RESET_TIMEOUT
);
8821 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8822 dev_err(&ioa_cfg
->pdev
->dev
,
8823 "Timed out waiting to lock config access. Resetting anyway.\n");
8831 * ipr_reset_block_config_access - Block config access to the IOA
8832 * @ipr_cmd: ipr command struct
8834 * Description: This attempts to block config access to the IOA
8837 * IPR_RC_JOB_CONTINUE
8839 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8841 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8842 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8843 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8844 return IPR_RC_JOB_CONTINUE
;
8848 * ipr_reset_allowed - Query whether or not IOA can be reset
8849 * @ioa_cfg: ioa config struct
8852 * 0 if reset not allowed / non-zero if reset is allowed
8854 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8856 volatile u32 temp_reg
;
8858 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8859 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8863 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8864 * @ipr_cmd: ipr command struct
8866 * Description: This function waits for adapter permission to run BIST,
8867 * then runs BIST. If the adapter does not give permission after a
8868 * reasonable time, we will reset the adapter anyway. The impact of
8869 * resetting the adapter without warning the adapter is the risk of
8870 * losing the persistent error log on the adapter. If the adapter is
8871 * reset while it is writing to the flash on the adapter, the flash
8872 * segment will have bad ECC and be zeroed.
8875 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8877 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8879 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8880 int rc
= IPR_RC_JOB_RETURN
;
8882 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8883 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8884 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8886 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8887 rc
= IPR_RC_JOB_CONTINUE
;
8894 * ipr_reset_alert - Alert the adapter of a pending reset
8895 * @ipr_cmd: ipr command struct
8897 * Description: This function alerts the adapter that it will be reset.
8898 * If memory space is not currently enabled, proceed directly
8899 * to running BIST on the adapter. The timer must always be started
8900 * so we guarantee we do not run BIST from ipr_isr.
8905 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8907 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8912 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8914 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8915 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8916 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8917 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8919 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8922 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8923 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8926 return IPR_RC_JOB_RETURN
;
8930 * ipr_reset_quiesce_done - Complete IOA disconnect
8931 * @ipr_cmd: ipr command struct
8933 * Description: Freeze the adapter to complete quiesce processing
8936 * IPR_RC_JOB_CONTINUE
8938 static int ipr_reset_quiesce_done(struct ipr_cmnd
*ipr_cmd
)
8940 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8943 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8944 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8946 return IPR_RC_JOB_CONTINUE
;
8950 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8951 * @ipr_cmd: ipr command struct
8953 * Description: Ensure nothing is outstanding to the IOA and
8954 * proceed with IOA disconnect. Otherwise reset the IOA.
8957 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8959 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd
*ipr_cmd
)
8961 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8962 struct ipr_cmnd
*loop_cmd
;
8963 struct ipr_hrr_queue
*hrrq
;
8964 int rc
= IPR_RC_JOB_CONTINUE
;
8968 ipr_cmd
->job_step
= ipr_reset_quiesce_done
;
8970 for_each_hrrq(hrrq
, ioa_cfg
) {
8971 spin_lock(&hrrq
->_lock
);
8972 list_for_each_entry(loop_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
8974 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8975 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
8976 rc
= IPR_RC_JOB_RETURN
;
8979 spin_unlock(&hrrq
->_lock
);
8990 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8991 * @ipr_cmd: ipr command struct
8993 * Description: Cancel any oustanding HCAMs to the IOA.
8996 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8998 static int ipr_reset_cancel_hcam(struct ipr_cmnd
*ipr_cmd
)
9000 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9001 int rc
= IPR_RC_JOB_CONTINUE
;
9002 struct ipr_cmd_pkt
*cmd_pkt
;
9003 struct ipr_cmnd
*hcam_cmd
;
9004 struct ipr_hrr_queue
*hrrq
= &ioa_cfg
->hrrq
[IPR_INIT_HRRQ
];
9007 ipr_cmd
->job_step
= ipr_reset_cancel_hcam_done
;
9009 if (!hrrq
->ioa_is_dead
) {
9010 if (!list_empty(&ioa_cfg
->hostrcb_pending_q
)) {
9011 list_for_each_entry(hcam_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
9012 if (hcam_cmd
->ioarcb
.cmd_pkt
.cdb
[0] != IPR_HOST_CONTROLLED_ASYNC
)
9015 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9016 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9017 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
9018 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
9019 cmd_pkt
->cdb
[0] = IPR_CANCEL_REQUEST
;
9020 cmd_pkt
->cdb
[1] = IPR_CANCEL_64BIT_IOARCB
;
9021 cmd_pkt
->cdb
[10] = ((u64
) hcam_cmd
->dma_addr
>> 56) & 0xff;
9022 cmd_pkt
->cdb
[11] = ((u64
) hcam_cmd
->dma_addr
>> 48) & 0xff;
9023 cmd_pkt
->cdb
[12] = ((u64
) hcam_cmd
->dma_addr
>> 40) & 0xff;
9024 cmd_pkt
->cdb
[13] = ((u64
) hcam_cmd
->dma_addr
>> 32) & 0xff;
9025 cmd_pkt
->cdb
[2] = ((u64
) hcam_cmd
->dma_addr
>> 24) & 0xff;
9026 cmd_pkt
->cdb
[3] = ((u64
) hcam_cmd
->dma_addr
>> 16) & 0xff;
9027 cmd_pkt
->cdb
[4] = ((u64
) hcam_cmd
->dma_addr
>> 8) & 0xff;
9028 cmd_pkt
->cdb
[5] = ((u64
) hcam_cmd
->dma_addr
) & 0xff;
9030 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9031 IPR_CANCEL_TIMEOUT
);
9033 rc
= IPR_RC_JOB_RETURN
;
9034 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9039 ipr_cmd
->job_step
= ipr_reset_alert
;
9046 * ipr_reset_ucode_download_done - Microcode download completion
9047 * @ipr_cmd: ipr command struct
9049 * Description: This function unmaps the microcode download buffer.
9052 * IPR_RC_JOB_CONTINUE
9054 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
9056 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9057 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9059 dma_unmap_sg(&ioa_cfg
->pdev
->dev
, sglist
->scatterlist
,
9060 sglist
->num_sg
, DMA_TO_DEVICE
);
9062 ipr_cmd
->job_step
= ipr_reset_alert
;
9063 return IPR_RC_JOB_CONTINUE
;
9067 * ipr_reset_ucode_download - Download microcode to the adapter
9068 * @ipr_cmd: ipr command struct
9070 * Description: This function checks to see if it there is microcode
9071 * to download to the adapter. If there is, a download is performed.
9074 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9076 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
9078 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9079 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9082 ipr_cmd
->job_step
= ipr_reset_alert
;
9085 return IPR_RC_JOB_CONTINUE
;
9087 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9088 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
9089 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
9090 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
9091 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
9092 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
9093 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
9096 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
9098 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
9099 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
9101 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9102 IPR_WRITE_BUFFER_TIMEOUT
);
9105 return IPR_RC_JOB_RETURN
;
9109 * ipr_reset_shutdown_ioa - Shutdown the adapter
9110 * @ipr_cmd: ipr command struct
9112 * Description: This function issues an adapter shutdown of the
9113 * specified type to the specified adapter as part of the
9114 * adapter reset job.
9117 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9119 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
9121 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9122 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
9123 unsigned long timeout
;
9124 int rc
= IPR_RC_JOB_CONTINUE
;
9127 if (shutdown_type
== IPR_SHUTDOWN_QUIESCE
)
9128 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9129 else if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
9130 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
9131 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9132 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9133 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
9134 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
9136 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
9137 timeout
= IPR_SHUTDOWN_TIMEOUT
;
9138 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
9139 timeout
= IPR_INTERNAL_TIMEOUT
;
9140 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
9141 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
9143 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
9145 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
9147 rc
= IPR_RC_JOB_RETURN
;
9148 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
9150 ipr_cmd
->job_step
= ipr_reset_alert
;
9157 * ipr_reset_ioa_job - Adapter reset job
9158 * @ipr_cmd: ipr command struct
9160 * Description: This function is the job router for the adapter reset job.
9165 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
9168 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9171 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
9173 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
9175 * We are doing nested adapter resets and this is
9176 * not the current reset job.
9178 list_add_tail(&ipr_cmd
->queue
,
9179 &ipr_cmd
->hrrq
->hrrq_free_q
);
9183 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
9184 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
9185 if (rc
== IPR_RC_JOB_RETURN
)
9189 ipr_reinit_ipr_cmnd(ipr_cmd
);
9190 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
9191 rc
= ipr_cmd
->job_step(ipr_cmd
);
9192 } while (rc
== IPR_RC_JOB_CONTINUE
);
9196 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9197 * @ioa_cfg: ioa config struct
9198 * @job_step: first job step of reset job
9199 * @shutdown_type: shutdown type
9201 * Description: This function will initiate the reset of the given adapter
9202 * starting at the selected job step.
9203 * If the caller needs to wait on the completion of the reset,
9204 * the caller must sleep on the reset_wait_q.
9209 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9210 int (*job_step
) (struct ipr_cmnd
*),
9211 enum ipr_shutdown_type shutdown_type
)
9213 struct ipr_cmnd
*ipr_cmd
;
9216 ioa_cfg
->in_reset_reload
= 1;
9217 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9218 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9219 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9220 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9223 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9224 ioa_cfg
->scsi_unblock
= 0;
9225 ioa_cfg
->scsi_blocked
= 1;
9226 scsi_block_requests(ioa_cfg
->host
);
9229 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9230 ioa_cfg
->reset_cmd
= ipr_cmd
;
9231 ipr_cmd
->job_step
= job_step
;
9232 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
9234 ipr_reset_ioa_job(ipr_cmd
);
9238 * ipr_initiate_ioa_reset - Initiate an adapter reset
9239 * @ioa_cfg: ioa config struct
9240 * @shutdown_type: shutdown type
9242 * Description: This function will initiate the reset of the given adapter.
9243 * If the caller needs to wait on the completion of the reset,
9244 * the caller must sleep on the reset_wait_q.
9249 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9250 enum ipr_shutdown_type shutdown_type
)
9254 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
9257 if (ioa_cfg
->in_reset_reload
) {
9258 if (ioa_cfg
->sdt_state
== GET_DUMP
)
9259 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9260 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
9261 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9264 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
9265 dev_err(&ioa_cfg
->pdev
->dev
,
9266 "IOA taken offline - error recovery failed\n");
9268 ioa_cfg
->reset_retries
= 0;
9269 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9270 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9271 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
9272 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9276 if (ioa_cfg
->in_ioa_bringdown
) {
9277 ioa_cfg
->reset_cmd
= NULL
;
9278 ioa_cfg
->in_reset_reload
= 0;
9279 ipr_fail_all_ops(ioa_cfg
);
9280 wake_up_all(&ioa_cfg
->reset_wait_q
);
9282 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9283 ioa_cfg
->scsi_unblock
= 1;
9284 schedule_work(&ioa_cfg
->work_q
);
9288 ioa_cfg
->in_ioa_bringdown
= 1;
9289 shutdown_type
= IPR_SHUTDOWN_NONE
;
9293 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
9298 * ipr_reset_freeze - Hold off all I/O activity
9299 * @ipr_cmd: ipr command struct
9301 * Description: If the PCI slot is frozen, hold off all I/O
9302 * activity; then, as soon as the slot is available again,
9303 * initiate an adapter reset.
9305 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
9307 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9310 /* Disallow new interrupts, avoid loop */
9311 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9312 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9313 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
9314 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9317 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
9318 ipr_cmd
->done
= ipr_reset_ioa_job
;
9319 return IPR_RC_JOB_RETURN
;
9323 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9324 * @pdev: PCI device struct
9326 * Description: This routine is called to tell us that the MMIO
9327 * access to the IOA has been restored
9329 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
9331 unsigned long flags
= 0;
9332 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9334 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9335 if (!ioa_cfg
->probe_done
)
9336 pci_save_state(pdev
);
9337 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9338 return PCI_ERS_RESULT_NEED_RESET
;
9342 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9343 * @pdev: PCI device struct
9345 * Description: This routine is called to tell us that the PCI bus
9346 * is down. Can't do anything here, except put the device driver
9347 * into a holding pattern, waiting for the PCI bus to come back.
9349 static void ipr_pci_frozen(struct pci_dev
*pdev
)
9351 unsigned long flags
= 0;
9352 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9354 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9355 if (ioa_cfg
->probe_done
)
9356 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
9357 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9361 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9362 * @pdev: PCI device struct
9364 * Description: This routine is called by the pci error recovery
9365 * code after the PCI slot has been reset, just before we
9366 * should resume normal operations.
9368 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
9370 unsigned long flags
= 0;
9371 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9373 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9374 if (ioa_cfg
->probe_done
) {
9375 if (ioa_cfg
->needs_warm_reset
)
9376 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9378 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
9381 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9382 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9383 return PCI_ERS_RESULT_RECOVERED
;
9387 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9388 * @pdev: PCI device struct
9390 * Description: This routine is called when the PCI bus has
9391 * permanently failed.
9393 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
9395 unsigned long flags
= 0;
9396 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9399 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9400 if (ioa_cfg
->probe_done
) {
9401 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9402 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9403 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
9404 ioa_cfg
->in_ioa_bringdown
= 1;
9405 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9406 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9407 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9408 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9411 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9413 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9414 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9418 * ipr_pci_error_detected - Called when a PCI error is detected.
9419 * @pdev: PCI device struct
9420 * @state: PCI channel state
9422 * Description: Called when a PCI error is detected.
9425 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9427 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
9428 pci_channel_state_t state
)
9431 case pci_channel_io_frozen
:
9432 ipr_pci_frozen(pdev
);
9433 return PCI_ERS_RESULT_CAN_RECOVER
;
9434 case pci_channel_io_perm_failure
:
9435 ipr_pci_perm_failure(pdev
);
9436 return PCI_ERS_RESULT_DISCONNECT
;
9441 return PCI_ERS_RESULT_NEED_RESET
;
9445 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9446 * @ioa_cfg: ioa cfg struct
9448 * Description: This is the second phase of adapter initialization
9449 * This function takes care of initilizing the adapter to the point
9450 * where it can accept new commands.
9453 * 0 on success / -EIO on failure
9455 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
9458 unsigned long host_lock_flags
= 0;
9461 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9462 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
9463 ioa_cfg
->probe_done
= 1;
9464 if (ioa_cfg
->needs_hard_reset
) {
9465 ioa_cfg
->needs_hard_reset
= 0;
9466 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9468 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
9470 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9477 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9478 * @ioa_cfg: ioa config struct
9483 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9487 if (ioa_cfg
->ipr_cmnd_list
) {
9488 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9489 if (ioa_cfg
->ipr_cmnd_list
[i
])
9490 dma_pool_free(ioa_cfg
->ipr_cmd_pool
,
9491 ioa_cfg
->ipr_cmnd_list
[i
],
9492 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
9494 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
9498 if (ioa_cfg
->ipr_cmd_pool
)
9499 dma_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
9501 kfree(ioa_cfg
->ipr_cmnd_list
);
9502 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
9503 ioa_cfg
->ipr_cmnd_list
= NULL
;
9504 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
9505 ioa_cfg
->ipr_cmd_pool
= NULL
;
9509 * ipr_free_mem - Frees memory allocated for an adapter
9510 * @ioa_cfg: ioa cfg struct
9515 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9519 kfree(ioa_cfg
->res_entries
);
9520 dma_free_coherent(&ioa_cfg
->pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9521 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9522 ipr_free_cmd_blks(ioa_cfg
);
9524 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
9525 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9526 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9527 ioa_cfg
->hrrq
[i
].host_rrq
,
9528 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9530 dma_free_coherent(&ioa_cfg
->pdev
->dev
, ioa_cfg
->cfg_table_size
,
9531 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9533 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9534 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9535 sizeof(struct ipr_hostrcb
),
9536 ioa_cfg
->hostrcb
[i
],
9537 ioa_cfg
->hostrcb_dma
[i
]);
9540 ipr_free_dump(ioa_cfg
);
9541 kfree(ioa_cfg
->trace
);
9545 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9546 * @ioa_cfg: ipr cfg struct
9548 * This function frees all allocated IRQs for the
9549 * specified adapter.
9554 static void ipr_free_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9556 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9559 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
9560 free_irq(pci_irq_vector(pdev
, i
), &ioa_cfg
->hrrq
[i
]);
9561 pci_free_irq_vectors(pdev
);
9565 * ipr_free_all_resources - Free all allocated resources for an adapter.
9566 * @ipr_cmd: ipr command struct
9568 * This function frees all allocated resources for the
9569 * specified adapter.
9574 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
9576 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9579 ipr_free_irqs(ioa_cfg
);
9580 if (ioa_cfg
->reset_work_q
)
9581 destroy_workqueue(ioa_cfg
->reset_work_q
);
9582 iounmap(ioa_cfg
->hdw_dma_regs
);
9583 pci_release_regions(pdev
);
9584 ipr_free_mem(ioa_cfg
);
9585 scsi_host_put(ioa_cfg
->host
);
9586 pci_disable_device(pdev
);
9591 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9592 * @ioa_cfg: ioa config struct
9595 * 0 on success / -ENOMEM on allocation failure
9597 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9599 struct ipr_cmnd
*ipr_cmd
;
9600 struct ipr_ioarcb
*ioarcb
;
9601 dma_addr_t dma_addr
;
9602 int i
, entries_each_hrrq
, hrrq_id
= 0;
9604 ioa_cfg
->ipr_cmd_pool
= dma_pool_create(IPR_NAME
, &ioa_cfg
->pdev
->dev
,
9605 sizeof(struct ipr_cmnd
), 512, 0);
9607 if (!ioa_cfg
->ipr_cmd_pool
)
9610 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9611 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9613 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9614 ipr_free_cmd_blks(ioa_cfg
);
9618 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9619 if (ioa_cfg
->hrrq_num
> 1) {
9621 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9622 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9623 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9624 (entries_each_hrrq
- 1);
9627 IPR_NUM_BASE_CMD_BLKS
/
9628 (ioa_cfg
->hrrq_num
- 1);
9629 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9630 IPR_NUM_INTERNAL_CMD_BLKS
+
9631 (i
- 1) * entries_each_hrrq
;
9632 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9633 (IPR_NUM_INTERNAL_CMD_BLKS
+
9634 i
* entries_each_hrrq
- 1);
9637 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9638 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9639 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9641 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9644 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9646 i
= IPR_NUM_CMD_BLKS
-
9647 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9649 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9650 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9653 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9654 ipr_cmd
= dma_pool_zalloc(ioa_cfg
->ipr_cmd_pool
,
9655 GFP_KERNEL
, &dma_addr
);
9658 ipr_free_cmd_blks(ioa_cfg
);
9662 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9663 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9665 ioarcb
= &ipr_cmd
->ioarcb
;
9666 ipr_cmd
->dma_addr
= dma_addr
;
9668 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9670 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9672 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9673 if (ioa_cfg
->sis64
) {
9674 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9675 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9676 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9677 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9679 ioarcb
->write_ioadl_addr
=
9680 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9681 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9682 ioarcb
->ioasa_host_pci_addr
=
9683 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9685 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9686 ipr_cmd
->cmd_index
= i
;
9687 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9688 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9689 offsetof(struct ipr_cmnd
, sense_buffer
);
9691 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9692 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9693 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9694 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9702 * ipr_alloc_mem - Allocate memory for an adapter
9703 * @ioa_cfg: ioa config struct
9706 * 0 on success / non-zero for error
9708 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9710 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9711 int i
, rc
= -ENOMEM
;
9714 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
9715 ioa_cfg
->max_devs_supported
, GFP_KERNEL
);
9717 if (!ioa_cfg
->res_entries
)
9720 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9721 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9722 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9725 ioa_cfg
->vpd_cbs
= dma_alloc_coherent(&pdev
->dev
,
9726 sizeof(struct ipr_misc_cbs
),
9727 &ioa_cfg
->vpd_cbs_dma
,
9730 if (!ioa_cfg
->vpd_cbs
)
9731 goto out_free_res_entries
;
9733 if (ipr_alloc_cmd_blks(ioa_cfg
))
9734 goto out_free_vpd_cbs
;
9736 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9737 ioa_cfg
->hrrq
[i
].host_rrq
= dma_alloc_coherent(&pdev
->dev
,
9738 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9739 &ioa_cfg
->hrrq
[i
].host_rrq_dma
,
9742 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9744 dma_free_coherent(&pdev
->dev
,
9745 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9746 ioa_cfg
->hrrq
[i
].host_rrq
,
9747 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9748 goto out_ipr_free_cmd_blocks
;
9750 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9753 ioa_cfg
->u
.cfg_table
= dma_alloc_coherent(&pdev
->dev
,
9754 ioa_cfg
->cfg_table_size
,
9755 &ioa_cfg
->cfg_table_dma
,
9758 if (!ioa_cfg
->u
.cfg_table
)
9759 goto out_free_host_rrq
;
9761 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9762 ioa_cfg
->hostrcb
[i
] = dma_alloc_coherent(&pdev
->dev
,
9763 sizeof(struct ipr_hostrcb
),
9764 &ioa_cfg
->hostrcb_dma
[i
],
9767 if (!ioa_cfg
->hostrcb
[i
])
9768 goto out_free_hostrcb_dma
;
9770 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9771 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9772 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9773 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9776 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
9777 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
9779 if (!ioa_cfg
->trace
)
9780 goto out_free_hostrcb_dma
;
9787 out_free_hostrcb_dma
:
9789 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_hostrcb
),
9790 ioa_cfg
->hostrcb
[i
],
9791 ioa_cfg
->hostrcb_dma
[i
]);
9793 dma_free_coherent(&pdev
->dev
, ioa_cfg
->cfg_table_size
,
9794 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9796 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9797 dma_free_coherent(&pdev
->dev
,
9798 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9799 ioa_cfg
->hrrq
[i
].host_rrq
,
9800 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9802 out_ipr_free_cmd_blocks
:
9803 ipr_free_cmd_blks(ioa_cfg
);
9805 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9806 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9807 out_free_res_entries
:
9808 kfree(ioa_cfg
->res_entries
);
9813 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9814 * @ioa_cfg: ioa config struct
9819 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9823 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9824 ioa_cfg
->bus_attr
[i
].bus
= i
;
9825 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9826 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9827 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9828 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9830 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9835 * ipr_init_regs - Initialize IOA registers
9836 * @ioa_cfg: ioa config struct
9841 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9843 const struct ipr_interrupt_offsets
*p
;
9844 struct ipr_interrupts
*t
;
9847 p
= &ioa_cfg
->chip_cfg
->regs
;
9849 base
= ioa_cfg
->hdw_dma_regs
;
9851 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9852 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9853 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9854 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9855 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9856 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9857 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9858 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9859 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9860 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9861 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9862 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9863 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9864 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9865 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9866 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9868 if (ioa_cfg
->sis64
) {
9869 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9870 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9871 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9872 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9877 * ipr_init_ioa_cfg - Initialize IOA config struct
9878 * @ioa_cfg: ioa config struct
9879 * @host: scsi host struct
9880 * @pdev: PCI dev struct
9885 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9886 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9890 ioa_cfg
->host
= host
;
9891 ioa_cfg
->pdev
= pdev
;
9892 ioa_cfg
->log_level
= ipr_log_level
;
9893 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9894 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9895 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9896 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9897 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9898 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9899 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9901 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9902 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9903 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_report_q
);
9904 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9905 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9906 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9907 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9908 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9909 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9910 ioa_cfg
->sdt_state
= INACTIVE
;
9912 ipr_initialize_bus_attr(ioa_cfg
);
9913 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9915 if (ioa_cfg
->sis64
) {
9916 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9917 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9918 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9919 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9920 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9921 + ((sizeof(struct ipr_config_table_entry64
)
9922 * ioa_cfg
->max_devs_supported
)));
9924 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9925 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9926 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9927 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9928 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9929 + ((sizeof(struct ipr_config_table_entry
)
9930 * ioa_cfg
->max_devs_supported
)));
9933 host
->max_channel
= IPR_VSET_BUS
;
9934 host
->unique_id
= host
->host_no
;
9935 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9936 host
->can_queue
= ioa_cfg
->max_cmds
;
9937 pci_set_drvdata(pdev
, ioa_cfg
);
9939 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9940 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9941 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9942 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9944 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9946 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9951 * ipr_get_chip_info - Find adapter chip information
9952 * @dev_id: PCI device id struct
9955 * ptr to chip information on success / NULL on failure
9957 static const struct ipr_chip_t
*
9958 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9962 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9963 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9964 ipr_chip
[i
].device
== dev_id
->device
)
9965 return &ipr_chip
[i
];
9970 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9972 * @ioa_cfg: ioa config struct
9977 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
9979 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9981 if (pci_channel_offline(pdev
)) {
9982 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
9983 !pci_channel_offline(pdev
),
9984 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
9985 pci_restore_state(pdev
);
9989 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9991 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9993 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
9994 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
9995 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
9996 ioa_cfg
->vectors_info
[vec_idx
].
9997 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
10001 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
,
10002 struct pci_dev
*pdev
)
10006 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
10007 rc
= request_irq(pci_irq_vector(pdev
, i
),
10010 ioa_cfg
->vectors_info
[i
].desc
,
10011 &ioa_cfg
->hrrq
[i
]);
10014 free_irq(pci_irq_vector(pdev
, i
),
10015 &ioa_cfg
->hrrq
[i
]);
10023 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10024 * @pdev: PCI device struct
10026 * Description: Simply set the msi_received flag to 1 indicating that
10027 * Message Signaled Interrupts are supported.
10030 * 0 on success / non-zero on failure
10032 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
10034 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
10035 unsigned long lock_flags
= 0;
10036 irqreturn_t rc
= IRQ_HANDLED
;
10038 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
10039 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10041 ioa_cfg
->msi_received
= 1;
10042 wake_up(&ioa_cfg
->msi_wait_q
);
10044 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10049 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10050 * @pdev: PCI device struct
10052 * Description: This routine sets up and initiates a test interrupt to determine
10053 * if the interrupt is received via the ipr_test_intr() service routine.
10054 * If the tests fails, the driver will fall back to LSI.
10057 * 0 on success / non-zero on failure
10059 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
10062 volatile u32 int_reg
;
10063 unsigned long lock_flags
= 0;
10064 int irq
= pci_irq_vector(pdev
, 0);
10068 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10069 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
10070 ioa_cfg
->msi_received
= 0;
10071 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10072 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
10073 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
10074 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10076 rc
= request_irq(irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
10078 dev_err(&pdev
->dev
, "Can not assign irq %d\n", irq
);
10080 } else if (ipr_debug
)
10081 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", irq
);
10083 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
10084 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10085 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
10086 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10087 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10089 if (!ioa_cfg
->msi_received
) {
10090 /* MSI test failed */
10091 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
10093 } else if (ipr_debug
)
10094 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
10096 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10098 free_irq(irq
, ioa_cfg
);
10105 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10106 * @pdev: PCI device struct
10107 * @dev_id: PCI device id struct
10110 * 0 on success / non-zero on failure
10112 static int ipr_probe_ioa(struct pci_dev
*pdev
,
10113 const struct pci_device_id
*dev_id
)
10115 struct ipr_ioa_cfg
*ioa_cfg
;
10116 struct Scsi_Host
*host
;
10117 unsigned long ipr_regs_pci
;
10118 void __iomem
*ipr_regs
;
10119 int rc
= PCIBIOS_SUCCESSFUL
;
10120 volatile u32 mask
, uproc
, interrupts
;
10121 unsigned long lock_flags
, driver_lock_flags
;
10122 unsigned int irq_flag
;
10126 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
10127 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
10130 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
10135 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
10136 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
10137 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
10139 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
10141 if (!ioa_cfg
->ipr_chip
) {
10142 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
10143 dev_id
->vendor
, dev_id
->device
);
10144 goto out_scsi_host_put
;
10147 /* set SIS 32 or SIS 64 */
10148 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
10149 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
10150 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
10151 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
10153 if (ipr_transop_timeout
)
10154 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
10155 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
10156 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
10158 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
10160 ioa_cfg
->revid
= pdev
->revision
;
10162 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
10164 ipr_regs_pci
= pci_resource_start(pdev
, 0);
10166 rc
= pci_request_regions(pdev
, IPR_NAME
);
10168 dev_err(&pdev
->dev
,
10169 "Couldn't register memory range of registers\n");
10170 goto out_scsi_host_put
;
10173 rc
= pci_enable_device(pdev
);
10175 if (rc
|| pci_channel_offline(pdev
)) {
10176 if (pci_channel_offline(pdev
)) {
10177 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10178 rc
= pci_enable_device(pdev
);
10182 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
10183 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10184 goto out_release_regions
;
10188 ipr_regs
= pci_ioremap_bar(pdev
, 0);
10191 dev_err(&pdev
->dev
,
10192 "Couldn't map memory range of registers\n");
10197 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
10198 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
10199 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
10201 ipr_init_regs(ioa_cfg
);
10203 if (ioa_cfg
->sis64
) {
10204 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10206 dev_dbg(&pdev
->dev
, "Failed to set 64 bit DMA mask\n");
10207 rc
= dma_set_mask_and_coherent(&pdev
->dev
,
10211 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10214 dev_err(&pdev
->dev
, "Failed to set DMA mask\n");
10215 goto cleanup_nomem
;
10218 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
10219 ioa_cfg
->chip_cfg
->cache_line_size
);
10221 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10222 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
10223 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10225 goto cleanup_nomem
;
10228 /* Issue MMIO read to ensure card is not in EEH */
10229 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10230 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10232 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
10233 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
10234 IPR_MAX_MSIX_VECTORS
);
10235 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
10238 irq_flag
= PCI_IRQ_LEGACY
;
10239 if (ioa_cfg
->ipr_chip
->has_msi
)
10240 irq_flag
|= PCI_IRQ_MSI
| PCI_IRQ_MSIX
;
10241 rc
= pci_alloc_irq_vectors(pdev
, 1, ipr_number_of_msix
, irq_flag
);
10243 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10244 goto cleanup_nomem
;
10246 ioa_cfg
->nvectors
= rc
;
10248 if (!pdev
->msi_enabled
&& !pdev
->msix_enabled
)
10249 ioa_cfg
->clear_isr
= 1;
10251 pci_set_master(pdev
);
10253 if (pci_channel_offline(pdev
)) {
10254 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10255 pci_set_master(pdev
);
10256 if (pci_channel_offline(pdev
)) {
10258 goto out_msi_disable
;
10262 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10263 rc
= ipr_test_msi(ioa_cfg
, pdev
);
10266 dev_info(&pdev
->dev
,
10267 "Request for %d MSI%ss succeeded.", ioa_cfg
->nvectors
,
10268 pdev
->msix_enabled
? "-X" : "");
10271 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10272 pci_free_irq_vectors(pdev
);
10274 ioa_cfg
->nvectors
= 1;
10275 ioa_cfg
->clear_isr
= 1;
10278 goto out_msi_disable
;
10282 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
10283 (unsigned int)num_online_cpus(),
10284 (unsigned int)IPR_MAX_HRRQ_NUM
);
10286 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
10287 goto out_msi_disable
;
10289 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
10290 goto out_msi_disable
;
10292 rc
= ipr_alloc_mem(ioa_cfg
);
10294 dev_err(&pdev
->dev
,
10295 "Couldn't allocate enough memory for device driver!\n");
10296 goto out_msi_disable
;
10299 /* Save away PCI config space for use following IOA reset */
10300 rc
= pci_save_state(pdev
);
10302 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10303 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
10305 goto cleanup_nolog
;
10309 * If HRRQ updated interrupt is not masked, or reset alert is set,
10310 * the card is in an unknown state and needs a hard reset
10312 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
10313 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
10314 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
10315 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
10316 ioa_cfg
->needs_hard_reset
= 1;
10317 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
10318 ioa_cfg
->needs_hard_reset
= 1;
10319 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
10320 ioa_cfg
->ioa_unit_checked
= 1;
10322 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10323 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10324 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10326 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10327 name_msi_vectors(ioa_cfg
);
10328 rc
= request_irq(pci_irq_vector(pdev
, 0), ipr_isr
, 0,
10329 ioa_cfg
->vectors_info
[0].desc
,
10330 &ioa_cfg
->hrrq
[0]);
10332 rc
= ipr_request_other_msi_irqs(ioa_cfg
, pdev
);
10334 rc
= request_irq(pdev
->irq
, ipr_isr
,
10336 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
10339 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
10341 goto cleanup_nolog
;
10344 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
10345 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
10346 ioa_cfg
->needs_warm_reset
= 1;
10347 ioa_cfg
->reset
= ipr_reset_slot_reset
;
10349 ioa_cfg
->reset_work_q
= alloc_ordered_workqueue("ipr_reset_%d",
10350 WQ_MEM_RECLAIM
, host
->host_no
);
10352 if (!ioa_cfg
->reset_work_q
) {
10353 dev_err(&pdev
->dev
, "Couldn't register reset workqueue\n");
10358 ioa_cfg
->reset
= ipr_reset_start_bist
;
10360 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10361 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
10362 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10369 ipr_free_irqs(ioa_cfg
);
10371 ipr_free_mem(ioa_cfg
);
10373 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10374 pci_free_irq_vectors(pdev
);
10378 pci_disable_device(pdev
);
10379 out_release_regions
:
10380 pci_release_regions(pdev
);
10382 scsi_host_put(host
);
10387 * ipr_initiate_ioa_bringdown - Bring down an adapter
10388 * @ioa_cfg: ioa config struct
10389 * @shutdown_type: shutdown type
10391 * Description: This function will initiate bringing down the adapter.
10392 * This consists of issuing an IOA shutdown to the adapter
10393 * to flush the cache, and running BIST.
10394 * If the caller needs to wait on the completion of the reset,
10395 * the caller must sleep on the reset_wait_q.
10400 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
10401 enum ipr_shutdown_type shutdown_type
)
10404 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
10405 ioa_cfg
->sdt_state
= ABORT_DUMP
;
10406 ioa_cfg
->reset_retries
= 0;
10407 ioa_cfg
->in_ioa_bringdown
= 1;
10408 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
10413 * __ipr_remove - Remove a single adapter
10414 * @pdev: pci device struct
10416 * Adapter hot plug remove entry point.
10421 static void __ipr_remove(struct pci_dev
*pdev
)
10423 unsigned long host_lock_flags
= 0;
10424 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10426 unsigned long driver_lock_flags
;
10429 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10430 while (ioa_cfg
->in_reset_reload
) {
10431 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10432 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10433 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10436 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
10437 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
10438 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
10439 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
10442 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10444 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10445 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10446 flush_work(&ioa_cfg
->work_q
);
10447 if (ioa_cfg
->reset_work_q
)
10448 flush_workqueue(ioa_cfg
->reset_work_q
);
10449 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
10450 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10452 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10453 list_del(&ioa_cfg
->queue
);
10454 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10456 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
10457 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
10458 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10460 ipr_free_all_resources(ioa_cfg
);
10466 * ipr_remove - IOA hot plug remove entry point
10467 * @pdev: pci device struct
10469 * Adapter hot plug remove entry point.
10474 static void ipr_remove(struct pci_dev
*pdev
)
10476 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10480 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10482 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10484 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10485 &ipr_ioa_async_err_log
);
10486 scsi_remove_host(ioa_cfg
->host
);
10488 __ipr_remove(pdev
);
10494 * ipr_probe - Adapter hot plug add entry point
10497 * 0 on success / non-zero on failure
10499 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
10501 struct ipr_ioa_cfg
*ioa_cfg
;
10502 unsigned long flags
;
10505 rc
= ipr_probe_ioa(pdev
, dev_id
);
10510 ioa_cfg
= pci_get_drvdata(pdev
);
10511 rc
= ipr_probe_ioa_part2(ioa_cfg
);
10514 __ipr_remove(pdev
);
10518 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
10521 __ipr_remove(pdev
);
10525 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10529 scsi_remove_host(ioa_cfg
->host
);
10530 __ipr_remove(pdev
);
10534 rc
= sysfs_create_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10535 &ipr_ioa_async_err_log
);
10538 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10540 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10542 scsi_remove_host(ioa_cfg
->host
);
10543 __ipr_remove(pdev
);
10547 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10551 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10552 &ipr_ioa_async_err_log
);
10553 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10555 scsi_remove_host(ioa_cfg
->host
);
10556 __ipr_remove(pdev
);
10559 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10560 ioa_cfg
->scan_enabled
= 1;
10561 schedule_work(&ioa_cfg
->work_q
);
10562 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10564 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
10566 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10567 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
10568 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
10569 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
10573 scsi_scan_host(ioa_cfg
->host
);
10579 * ipr_shutdown - Shutdown handler.
10580 * @pdev: pci device struct
10582 * This function is invoked upon system shutdown/reboot. It will issue
10583 * an adapter shutdown to the adapter to flush the write cache.
10588 static void ipr_shutdown(struct pci_dev
*pdev
)
10590 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10591 unsigned long lock_flags
= 0;
10592 enum ipr_shutdown_type shutdown_type
= IPR_SHUTDOWN_NORMAL
;
10595 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10596 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10597 ioa_cfg
->iopoll_weight
= 0;
10598 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10599 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10602 while (ioa_cfg
->in_reset_reload
) {
10603 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10604 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10605 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10608 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
)
10609 shutdown_type
= IPR_SHUTDOWN_QUIESCE
;
10611 ipr_initiate_ioa_bringdown(ioa_cfg
, shutdown_type
);
10612 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10613 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10614 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
) {
10615 ipr_free_irqs(ioa_cfg
);
10616 pci_disable_device(ioa_cfg
->pdev
);
10620 static struct pci_device_id ipr_pci_table
[] = {
10621 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10622 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10623 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10624 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10625 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10626 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10627 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10628 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10629 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10630 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10631 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10632 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10633 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10634 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10635 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10636 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10637 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10638 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10639 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10640 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10641 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10642 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10643 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10644 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10645 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10646 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10647 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10648 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10649 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10650 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10651 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10652 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10653 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10654 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10655 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10656 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10657 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10658 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10659 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10660 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10661 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10662 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10663 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10664 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10665 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10666 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10667 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10668 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10669 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10670 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10671 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10672 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10673 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10674 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10675 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10676 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10677 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10678 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10679 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10680 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10681 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10683 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10685 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10686 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10687 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10689 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10690 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10691 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10692 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10693 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10694 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10695 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10696 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10697 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10698 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10699 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10700 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10701 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10702 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10703 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10705 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10707 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10708 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10709 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10710 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10711 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10713 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10714 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10715 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10716 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10717 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10718 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10719 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10720 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10721 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10723 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10724 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10725 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10726 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10727 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580A
, 0, 0, 0 },
10728 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10729 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580B
, 0, 0, 0 },
10732 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10734 static const struct pci_error_handlers ipr_err_handler
= {
10735 .error_detected
= ipr_pci_error_detected
,
10736 .mmio_enabled
= ipr_pci_mmio_enabled
,
10737 .slot_reset
= ipr_pci_slot_reset
,
10740 static struct pci_driver ipr_driver
= {
10742 .id_table
= ipr_pci_table
,
10743 .probe
= ipr_probe
,
10744 .remove
= ipr_remove
,
10745 .shutdown
= ipr_shutdown
,
10746 .err_handler
= &ipr_err_handler
,
10750 * ipr_halt_done - Shutdown prepare completion
10755 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10757 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10761 * ipr_halt - Issue shutdown prepare to all adapters
10764 * NOTIFY_OK on success / NOTIFY_DONE on failure
10766 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10768 struct ipr_cmnd
*ipr_cmd
;
10769 struct ipr_ioa_cfg
*ioa_cfg
;
10770 unsigned long flags
= 0, driver_lock_flags
;
10772 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10773 return NOTIFY_DONE
;
10775 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10777 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10778 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10779 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
10780 (ipr_fast_reboot
&& event
== SYS_RESTART
&& ioa_cfg
->sis64
)) {
10781 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10785 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10786 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10787 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10788 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10789 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10791 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10792 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10794 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10799 static struct notifier_block ipr_notifier
= {
10804 * ipr_init - Module entry point
10807 * 0 on success / negative value on failure
10809 static int __init
ipr_init(void)
10811 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10812 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10814 register_reboot_notifier(&ipr_notifier
);
10815 return pci_register_driver(&ipr_driver
);
10819 * ipr_exit - Module unload
10821 * Module unload entry point.
10826 static void __exit
ipr_exit(void)
10828 unregister_reboot_notifier(&ipr_notifier
);
10829 pci_unregister_driver(&ipr_driver
);
10832 module_init(ipr_init
);
10833 module_exit(ipr_exit
);