2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 16;
102 static unsigned int ipr_fast_reboot
;
103 static DEFINE_SPINLOCK(ipr_driver_lock
);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size
= 0x20,
114 .set_interrupt_mask_reg
= 0x0022C,
115 .clr_interrupt_mask_reg
= 0x00230,
116 .clr_interrupt_mask_reg32
= 0x00230,
117 .sense_interrupt_mask_reg
= 0x0022C,
118 .sense_interrupt_mask_reg32
= 0x0022C,
119 .clr_interrupt_reg
= 0x00228,
120 .clr_interrupt_reg32
= 0x00228,
121 .sense_interrupt_reg
= 0x00224,
122 .sense_interrupt_reg32
= 0x00224,
123 .ioarrin_reg
= 0x00404,
124 .sense_uproc_interrupt_reg
= 0x00214,
125 .sense_uproc_interrupt_reg32
= 0x00214,
126 .set_uproc_interrupt_reg
= 0x00214,
127 .set_uproc_interrupt_reg32
= 0x00214,
128 .clr_uproc_interrupt_reg
= 0x00218,
129 .clr_uproc_interrupt_reg32
= 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size
= 0x20,
139 .set_interrupt_mask_reg
= 0x00288,
140 .clr_interrupt_mask_reg
= 0x0028C,
141 .clr_interrupt_mask_reg32
= 0x0028C,
142 .sense_interrupt_mask_reg
= 0x00288,
143 .sense_interrupt_mask_reg32
= 0x00288,
144 .clr_interrupt_reg
= 0x00284,
145 .clr_interrupt_reg32
= 0x00284,
146 .sense_interrupt_reg
= 0x00280,
147 .sense_interrupt_reg32
= 0x00280,
148 .ioarrin_reg
= 0x00504,
149 .sense_uproc_interrupt_reg
= 0x00290,
150 .sense_uproc_interrupt_reg32
= 0x00290,
151 .set_uproc_interrupt_reg
= 0x00290,
152 .set_uproc_interrupt_reg32
= 0x00290,
153 .clr_uproc_interrupt_reg
= 0x00294,
154 .clr_uproc_interrupt_reg32
= 0x00294
160 .cache_line_size
= 0x20,
164 .set_interrupt_mask_reg
= 0x00010,
165 .clr_interrupt_mask_reg
= 0x00018,
166 .clr_interrupt_mask_reg32
= 0x0001C,
167 .sense_interrupt_mask_reg
= 0x00010,
168 .sense_interrupt_mask_reg32
= 0x00014,
169 .clr_interrupt_reg
= 0x00008,
170 .clr_interrupt_reg32
= 0x0000C,
171 .sense_interrupt_reg
= 0x00000,
172 .sense_interrupt_reg32
= 0x00004,
173 .ioarrin_reg
= 0x00070,
174 .sense_uproc_interrupt_reg
= 0x00020,
175 .sense_uproc_interrupt_reg32
= 0x00024,
176 .set_uproc_interrupt_reg
= 0x00020,
177 .set_uproc_interrupt_reg32
= 0x00024,
178 .clr_uproc_interrupt_reg
= 0x00028,
179 .clr_uproc_interrupt_reg32
= 0x0002C,
180 .init_feedback_reg
= 0x0005C,
181 .dump_addr_reg
= 0x00064,
182 .dump_data_reg
= 0x00068,
183 .endian_swap_reg
= 0x00084
188 static const struct ipr_chip_t ipr_chip
[] = {
189 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, true, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
197 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
198 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
201 static int ipr_max_bus_speeds
[] = {
202 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
208 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level
, ipr_log_level
, uint
, 0);
210 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode
, ipr_testmode
, int, 0);
212 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
214 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
215 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
216 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
218 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs
, ipr_max_devs
, int, 0);
222 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
224 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
225 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot
, ipr_fast_reboot
, int, S_IRUGO
| S_IWUSR
);
227 MODULE_PARM_DESC(fast_reboot
, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION
);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table
[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "4085: Service required"},
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
439 "4086: SAS Adapter Hardware Configuration Error"},
440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
441 "3140: Device bus not ready to ready transition"},
442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "FFFB: SCSI bus was reset"},
445 "FFFE: SCSI bus transition to single ended"},
447 "FFFE: SCSI bus transition to LVD"},
448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "FFFB: SCSI bus was reset by another initiator"},
450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "3029: A device replacement has occurred"},
452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "4102: Device bus fabric performance degradation"},
454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "9051: IOA cache data exists for a missing or failed device"},
456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "9025: Disk unit is not supported at its physical location"},
460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "3020: IOA detected a SCSI bus configuration error"},
462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "3150: SCSI bus configuration error"},
464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9074: Asymmetric advanced function disk configuration"},
466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "4040: Incomplete multipath connection between IOA and enclosure"},
468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "4041: Incomplete multipath connection between enclosure and device"},
470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "9075: Incomplete multipath connection between IOA and remote IOA"},
472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "9076: Configuration error, missing remote IOA"},
474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4050: Enclosure does not support a required multipath function"},
476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "4070: Logically bad block written on device"},
486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9041: Array protection temporarily suspended"},
488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9042: Corrupt array parity detected on specified device"},
490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9030: Array no longer protected due to missing or failed disk unit"},
492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
493 "9071: Link operational transition"},
494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "9072: Link not operational transition"},
496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "9032: Array exposed but still protected"},
498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL
,
499 "70DD: Device forced failed by disrupt device command"},
500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "4061: Multipath redundancy level got better"},
502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "4060: Multipath redundancy level got worse"},
504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL
,
505 "9083: Device raw mode enabled"},
506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL
,
507 "9084: Device raw mode disabled"},
509 "Failure due to other device"},
510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9008: IOA does not support functions expected by devices"},
512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9010: Cache data associated with attached devices cannot be found"},
514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9011: Cache data belongs to devices other than those attached"},
516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9020: Array missing 2 or more devices with only 1 device present"},
518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9021: Array missing 2 or more devices with 2 or more devices present"},
520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9022: Exposed array is missing a required device"},
522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9023: Array member(s) not at required physical locations"},
524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9024: Array not functional due to present hardware configuration"},
526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9026: Array not functional due to present hardware configuration"},
528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9027: Array is missing a device and parity is out of sync"},
530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9028: Maximum number of arrays already exist"},
532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9050: Required cache data cannot be located for a disk unit"},
534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9052: Cache data exists for a device that has been modified"},
536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9054: IOA resources not available due to previous problems"},
538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9092: Disk unit requires initialization before use"},
540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
541 "9029: Incorrect hardware configuration change has been detected"},
542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
543 "9060: One or more disk pairs are missing from an array"},
544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
545 "9061: One or more disks are missing from an array"},
546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
547 "9062: One or more disks are missing from an array"},
548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
549 "9063: Maximum number of functional arrays has been exceeded"},
551 "Data protect, other volume set problem"},
553 "Aborted command, invalid descriptor"},
555 "Target operating conditions have changed, dual adapter takeover"},
557 "Aborted command, medium removal prevented"},
559 "Command terminated by host"},
561 "Aborted command, command terminated by host"}
564 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
581 * Function Prototypes
583 static int ipr_reset_alert(struct ipr_cmnd
*);
584 static void ipr_process_ccn(struct ipr_cmnd
*);
585 static void ipr_process_error(struct ipr_cmnd
*);
586 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
588 enum ipr_shutdown_type
);
590 #ifdef CONFIG_SCSI_IPR_TRACE
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
595 * @add_data: additional data
600 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
601 u8 type
, u32 add_data
)
603 struct ipr_trace_entry
*trace_entry
;
604 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
605 unsigned int trace_index
;
607 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
608 trace_entry
= &ioa_cfg
->trace
[trace_index
];
609 trace_entry
->time
= jiffies
;
610 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
611 trace_entry
->type
= type
;
612 if (ipr_cmd
->ioa_cfg
->sis64
)
613 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
615 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
616 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
617 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
618 trace_entry
->u
.add_data
= add_data
;
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
632 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
634 unsigned long lock_flags
;
635 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
637 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
638 ipr_cmd
->done(ipr_cmd
);
639 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
651 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
652 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
653 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
654 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
657 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
658 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
659 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
660 ioarcb
->data_transfer_length
= 0;
661 ioarcb
->read_data_transfer_length
= 0;
662 ioarcb
->ioadl_len
= 0;
663 ioarcb
->read_ioadl_len
= 0;
665 if (ipr_cmd
->ioa_cfg
->sis64
) {
666 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
667 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
668 ioasa64
->u
.gata
.status
= 0;
670 ioarcb
->write_ioadl_addr
=
671 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
672 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
673 ioasa
->u
.gata
.status
= 0;
676 ioasa
->hdr
.ioasc
= 0;
677 ioasa
->hdr
.residual_data_len
= 0;
678 ipr_cmd
->scsi_cmd
= NULL
;
680 ipr_cmd
->sense_buffer
[0] = 0;
681 ipr_cmd
->dma_use_sg
= 0;
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
692 void (*fast_done
) (struct ipr_cmnd
*))
694 ipr_reinit_ipr_cmnd(ipr_cmd
);
695 ipr_cmd
->u
.scratch
= 0;
696 ipr_cmd
->sibling
= NULL
;
697 ipr_cmd
->eh_comp
= NULL
;
698 ipr_cmd
->fast_done
= fast_done
;
699 timer_setup(&ipr_cmd
->timer
, NULL
, 0);
703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704 * @ioa_cfg: ioa config struct
707 * pointer to ipr command struct
710 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
712 struct ipr_cmnd
*ipr_cmd
= NULL
;
714 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
715 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
716 struct ipr_cmnd
, queue
);
717 list_del(&ipr_cmd
->queue
);
725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
729 * pointer to ipr command struct
732 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
734 struct ipr_cmnd
*ipr_cmd
=
735 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
736 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
754 volatile u32 int_reg
;
757 /* Stop new interrupts */
758 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
759 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
760 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
761 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
764 /* Set interrupt mask to stop all new interrupts */
766 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
768 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
770 /* Clear any pending interrupts */
772 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
773 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
774 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
778 * ipr_save_pcix_cmd_reg - Save PCI-X command register
779 * @ioa_cfg: ioa config struct
782 * 0 on success / -EIO on failure
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
786 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
788 if (pcix_cmd_reg
== 0)
791 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
792 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
793 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
797 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
802 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803 * @ioa_cfg: ioa config struct
806 * 0 on success / -EIO on failure
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
810 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
813 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
814 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
815 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
824 * __ipr_sata_eh_done - done function for aborted SATA commands
825 * @ipr_cmd: ipr command struct
827 * This function is invoked for ops generated to SATA
828 * devices which are being aborted.
833 static void __ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
835 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
836 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
838 qc
->err_mask
|= AC_ERR_OTHER
;
839 sata_port
->ioasa
.status
|= ATA_BUSY
;
841 if (ipr_cmd
->eh_comp
)
842 complete(ipr_cmd
->eh_comp
);
843 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
847 * ipr_sata_eh_done - done function for aborted SATA commands
848 * @ipr_cmd: ipr command struct
850 * This function is invoked for ops generated to SATA
851 * devices which are being aborted.
856 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
858 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
859 unsigned long hrrq_flags
;
861 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
862 __ipr_sata_eh_done(ipr_cmd
);
863 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
867 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868 * @ipr_cmd: ipr command struct
870 * This function is invoked by the interrupt handler for
871 * ops generated by the SCSI mid-layer which are being aborted.
876 static void __ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
878 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
880 scsi_cmd
->result
|= (DID_ERROR
<< 16);
882 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
883 scsi_cmd
->scsi_done(scsi_cmd
);
884 if (ipr_cmd
->eh_comp
)
885 complete(ipr_cmd
->eh_comp
);
886 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
890 * ipr_scsi_eh_done - mid-layer done function for aborted ops
891 * @ipr_cmd: ipr command struct
893 * This function is invoked by the interrupt handler for
894 * ops generated by the SCSI mid-layer which are being aborted.
899 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
901 unsigned long hrrq_flags
;
902 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
904 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
905 __ipr_scsi_eh_done(ipr_cmd
);
906 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
910 * ipr_fail_all_ops - Fails all outstanding ops.
911 * @ioa_cfg: ioa config struct
913 * This function fails all outstanding ops.
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
920 struct ipr_cmnd
*ipr_cmd
, *temp
;
921 struct ipr_hrr_queue
*hrrq
;
924 for_each_hrrq(hrrq
, ioa_cfg
) {
925 spin_lock(&hrrq
->_lock
);
926 list_for_each_entry_safe(ipr_cmd
,
927 temp
, &hrrq
->hrrq_pending_q
, queue
) {
928 list_del(&ipr_cmd
->queue
);
930 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
931 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
932 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
933 cpu_to_be32(IPR_DRIVER_ILID
);
935 if (ipr_cmd
->scsi_cmd
)
936 ipr_cmd
->done
= __ipr_scsi_eh_done
;
937 else if (ipr_cmd
->qc
)
938 ipr_cmd
->done
= __ipr_sata_eh_done
;
940 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
941 IPR_IOASC_IOA_WAS_RESET
);
942 del_timer(&ipr_cmd
->timer
);
943 ipr_cmd
->done(ipr_cmd
);
945 spin_unlock(&hrrq
->_lock
);
951 * ipr_send_command - Send driver initiated requests.
952 * @ipr_cmd: ipr command struct
954 * This function sends a command to the adapter using the correct write call.
955 * In the case of sis64, calculate the ioarcb size required. Then or in the
961 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
963 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
964 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
966 if (ioa_cfg
->sis64
) {
967 /* The default size is 256 bytes */
968 send_dma_addr
|= 0x1;
970 /* If the number of ioadls * size of ioadl > 128 bytes,
971 then use a 512 byte ioarcb */
972 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
973 send_dma_addr
|= 0x4;
974 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
976 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
980 * ipr_do_req - Send driver initiated requests.
981 * @ipr_cmd: ipr command struct
982 * @done: done function
983 * @timeout_func: timeout function
984 * @timeout: timeout value
986 * This function sends the specified command to the adapter with the
987 * timeout given. The done function is invoked on command completion.
992 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
993 void (*done
) (struct ipr_cmnd
*),
994 void (*timeout_func
) (struct timer_list
*), u32 timeout
)
996 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
998 ipr_cmd
->done
= done
;
1000 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
1001 ipr_cmd
->timer
.function
= timeout_func
;
1003 add_timer(&ipr_cmd
->timer
);
1005 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
1007 ipr_send_command(ipr_cmd
);
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1020 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
1022 if (ipr_cmd
->sibling
)
1023 ipr_cmd
->sibling
= NULL
;
1025 complete(&ipr_cmd
->completion
);
1029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1035 * This function initializes an ioadl in the case where there is only a single
1041 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
1044 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
1045 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
1047 ipr_cmd
->dma_use_sg
= 1;
1049 if (ipr_cmd
->ioa_cfg
->sis64
) {
1050 ioadl64
->flags
= cpu_to_be32(flags
);
1051 ioadl64
->data_len
= cpu_to_be32(len
);
1052 ioadl64
->address
= cpu_to_be64(dma_addr
);
1054 ipr_cmd
->ioarcb
.ioadl_len
=
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1056 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1058 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1059 ioadl
->address
= cpu_to_be32(dma_addr
);
1061 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1062 ipr_cmd
->ioarcb
.read_ioadl_len
=
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1064 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1066 ipr_cmd
->ioarcb
.ioadl_len
=
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1068 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1083 void (*timeout_func
) (struct timer_list
*),
1086 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1088 init_completion(&ipr_cmd
->completion
);
1089 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1091 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1092 wait_for_completion(&ipr_cmd
->completion
);
1093 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1100 if (ioa_cfg
->hrrq_num
== 1)
1103 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1104 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1113 * @hostrcb: hostrcb struct
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1122 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1123 struct ipr_hostrcb
*hostrcb
)
1125 struct ipr_cmnd
*ipr_cmd
;
1126 struct ipr_ioarcb
*ioarcb
;
1128 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1129 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1130 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1131 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1133 ipr_cmd
->u
.hostrcb
= hostrcb
;
1134 ioarcb
= &ipr_cmd
->ioarcb
;
1136 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1137 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1138 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1139 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1140 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1141 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1143 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1144 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1146 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1147 ipr_cmd
->done
= ipr_process_ccn
;
1149 ipr_cmd
->done
= ipr_process_error
;
1151 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1153 ipr_send_command(ipr_cmd
);
1155 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1160 * ipr_update_ata_class - Update the ata class in the resource entry
1161 * @res: resource entry struct
1162 * @proto: cfgte device bus protocol value
1167 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1170 case IPR_PROTO_SATA
:
1171 case IPR_PROTO_SAS_STP
:
1172 res
->ata_class
= ATA_DEV_ATA
;
1174 case IPR_PROTO_SATA_ATAPI
:
1175 case IPR_PROTO_SAS_STP_ATAPI
:
1176 res
->ata_class
= ATA_DEV_ATAPI
;
1179 res
->ata_class
= ATA_DEV_UNKNOWN
;
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
1187 * @cfgtew: config table entry wrapper struct
1192 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1193 struct ipr_config_table_entry_wrapper
*cfgtew
)
1197 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1198 struct ipr_resource_entry
*gscsi_res
= NULL
;
1200 res
->needs_sync_complete
= 0;
1203 res
->del_from_ml
= 0;
1204 res
->resetting_device
= 0;
1205 res
->reset_occurred
= 0;
1207 res
->sata_port
= NULL
;
1209 if (ioa_cfg
->sis64
) {
1210 proto
= cfgtew
->u
.cfgte64
->proto
;
1211 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1212 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1213 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1214 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1216 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1217 sizeof(res
->res_path
));
1220 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1221 sizeof(res
->dev_lun
.scsi_lun
));
1222 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1224 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1225 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1226 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1228 res
->target
= gscsi_res
->target
;
1233 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1234 ioa_cfg
->max_devs_supported
);
1235 set_bit(res
->target
, ioa_cfg
->target_ids
);
1237 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1238 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1240 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1241 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1242 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1243 ioa_cfg
->max_devs_supported
);
1244 set_bit(res
->target
, ioa_cfg
->array_ids
);
1245 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1246 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1247 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1248 ioa_cfg
->max_devs_supported
);
1249 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1251 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1252 ioa_cfg
->max_devs_supported
);
1253 set_bit(res
->target
, ioa_cfg
->target_ids
);
1256 proto
= cfgtew
->u
.cfgte
->proto
;
1257 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1258 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1259 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1260 res
->type
= IPR_RES_TYPE_IOAFP
;
1262 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1264 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1265 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1266 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1267 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1270 ipr_update_ata_class(res
, proto
);
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1279 * 1 if the devices are the same / 0 otherwise
1281 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1282 struct ipr_config_table_entry_wrapper
*cfgtew
)
1284 if (res
->ioa_cfg
->sis64
) {
1285 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1286 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1287 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1288 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1292 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1293 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1294 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1302 * __ipr_format_res_path - Format the resource path for printing.
1303 * @res_path: resource path
1305 * @len: length of buffer provided
1310 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1316 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1317 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1318 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1328 * @len: length of buffer provided
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1334 u8
*res_path
, char *buffer
, int len
)
1339 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1340 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1352 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1353 struct ipr_config_table_entry_wrapper
*cfgtew
)
1355 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1359 if (res
->ioa_cfg
->sis64
) {
1360 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1361 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1362 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1364 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1365 sizeof(struct ipr_std_inq_data
));
1367 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1368 proto
= cfgtew
->u
.cfgte64
->proto
;
1369 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1370 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1372 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1373 sizeof(res
->dev_lun
.scsi_lun
));
1375 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1376 sizeof(res
->res_path
))) {
1377 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1378 sizeof(res
->res_path
));
1382 if (res
->sdev
&& new_path
)
1383 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1384 ipr_format_res_path(res
->ioa_cfg
,
1385 res
->res_path
, buffer
, sizeof(buffer
)));
1387 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1388 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1389 res
->type
= IPR_RES_TYPE_IOAFP
;
1391 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1393 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1394 sizeof(struct ipr_std_inq_data
));
1396 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1397 proto
= cfgtew
->u
.cfgte
->proto
;
1398 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1401 ipr_update_ata_class(res
, proto
);
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1413 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1415 struct ipr_resource_entry
*gscsi_res
= NULL
;
1416 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1418 if (!ioa_cfg
->sis64
)
1421 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1422 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1423 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1424 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1425 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1426 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1427 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1429 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1431 } else if (res
->bus
== 0)
1432 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1444 struct ipr_hostrcb
*hostrcb
)
1446 struct ipr_resource_entry
*res
= NULL
;
1447 struct ipr_config_table_entry_wrapper cfgtew
;
1448 __be32 cc_res_handle
;
1452 if (ioa_cfg
->sis64
) {
1453 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1454 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1456 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1457 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1460 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1461 if (res
->res_handle
== cc_res_handle
) {
1468 if (list_empty(&ioa_cfg
->free_res_q
)) {
1469 ipr_send_hcam(ioa_cfg
,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1475 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1476 struct ipr_resource_entry
, queue
);
1478 list_del(&res
->queue
);
1479 ipr_init_res_entry(res
, &cfgtew
);
1480 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1483 ipr_update_res_entry(res
, &cfgtew
);
1485 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1487 res
->del_from_ml
= 1;
1488 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1489 schedule_work(&ioa_cfg
->work_q
);
1491 ipr_clear_res_target(res
);
1492 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1494 } else if (!res
->sdev
|| res
->del_from_ml
) {
1496 schedule_work(&ioa_cfg
->work_q
);
1499 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1512 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1514 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1515 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1516 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1518 list_del_init(&hostrcb
->queue
);
1519 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1522 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
1523 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
)
1524 dev_err(&ioa_cfg
->pdev
->dev
,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1527 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1529 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1534 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535 * @i: index into buffer
1536 * @buf: string to modify
1538 * This function will strip all trailing whitespace, pad the end
1539 * of the string with a single space, and NULL terminate the string.
1542 * new length of string
1544 static int strip_and_pad_whitespace(int i
, char *buf
)
1546 while (i
&& buf
[i
] == ' ')
1554 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn struct
1562 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1563 struct ipr_vpd
*vpd
)
1565 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1568 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1569 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1571 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1572 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1574 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1575 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1577 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1581 * ipr_log_vpd - Log the passed VPD to the error log.
1582 * @vpd: vendor/product id/sn struct
1587 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1589 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1590 + IPR_SERIAL_NUM_LEN
];
1592 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1593 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1595 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1596 ipr_err("Vendor/Product ID: %s\n", buffer
);
1598 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1599 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1600 ipr_err(" Serial Number: %s\n", buffer
);
1604 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605 * @prefix: string to print at start of printk
1606 * @hostrcb: hostrcb pointer
1607 * @vpd: vendor/product id/sn/wwn struct
1612 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1613 struct ipr_ext_vpd
*vpd
)
1615 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1616 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1617 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1621 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622 * @vpd: vendor/product id/sn/wwn struct
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1629 ipr_log_vpd(&vpd
->vpd
);
1630 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1631 be32_to_cpu(vpd
->wwid
[1]));
1635 * ipr_log_enhanced_cache_error - Log a cache error.
1636 * @ioa_cfg: ioa config struct
1637 * @hostrcb: hostrcb struct
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1643 struct ipr_hostrcb
*hostrcb
)
1645 struct ipr_hostrcb_type_12_error
*error
;
1648 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1650 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1652 ipr_err("-----Current Configuration-----\n");
1653 ipr_err("Cache Directory Card Information:\n");
1654 ipr_log_ext_vpd(&error
->ioa_vpd
);
1655 ipr_err("Adapter Card Information:\n");
1656 ipr_log_ext_vpd(&error
->cfc_vpd
);
1658 ipr_err("-----Expected Configuration-----\n");
1659 ipr_err("Cache Directory Card Information:\n");
1660 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1661 ipr_err("Adapter Card Information:\n");
1662 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1664 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665 be32_to_cpu(error
->ioa_data
[0]),
1666 be32_to_cpu(error
->ioa_data
[1]),
1667 be32_to_cpu(error
->ioa_data
[2]));
1671 * ipr_log_cache_error - Log a cache error.
1672 * @ioa_cfg: ioa config struct
1673 * @hostrcb: hostrcb struct
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1679 struct ipr_hostrcb
*hostrcb
)
1681 struct ipr_hostrcb_type_02_error
*error
=
1682 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1684 ipr_err("-----Current Configuration-----\n");
1685 ipr_err("Cache Directory Card Information:\n");
1686 ipr_log_vpd(&error
->ioa_vpd
);
1687 ipr_err("Adapter Card Information:\n");
1688 ipr_log_vpd(&error
->cfc_vpd
);
1690 ipr_err("-----Expected Configuration-----\n");
1691 ipr_err("Cache Directory Card Information:\n");
1692 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1693 ipr_err("Adapter Card Information:\n");
1694 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1696 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697 be32_to_cpu(error
->ioa_data
[0]),
1698 be32_to_cpu(error
->ioa_data
[1]),
1699 be32_to_cpu(error
->ioa_data
[2]));
1703 * ipr_log_enhanced_config_error - Log a configuration error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1711 struct ipr_hostrcb
*hostrcb
)
1713 int errors_logged
, i
;
1714 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1715 struct ipr_hostrcb_type_13_error
*error
;
1717 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1718 errors_logged
= be32_to_cpu(error
->errors_logged
);
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error
->errors_detected
), errors_logged
);
1723 dev_entry
= error
->dev
;
1725 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1728 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1729 ipr_log_ext_vpd(&dev_entry
->vpd
);
1731 ipr_err("-----New Device Information-----\n");
1732 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1734 ipr_err("Cache Directory Card Information:\n");
1735 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1737 ipr_err("Adapter Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1743 * ipr_log_sis64_config_error - Log a device error.
1744 * @ioa_cfg: ioa config struct
1745 * @hostrcb: hostrcb struct
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1751 struct ipr_hostrcb
*hostrcb
)
1753 int errors_logged
, i
;
1754 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1755 struct ipr_hostrcb_type_23_error
*error
;
1756 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1758 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1759 errors_logged
= be32_to_cpu(error
->errors_logged
);
1761 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762 be32_to_cpu(error
->errors_detected
), errors_logged
);
1764 dev_entry
= error
->dev
;
1766 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1769 ipr_err("Device %d : %s", i
+ 1,
1770 __ipr_format_res_path(dev_entry
->res_path
,
1771 buffer
, sizeof(buffer
)));
1772 ipr_log_ext_vpd(&dev_entry
->vpd
);
1774 ipr_err("-----New Device Information-----\n");
1775 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1777 ipr_err("Cache Directory Card Information:\n");
1778 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1780 ipr_err("Adapter Card Information:\n");
1781 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1786 * ipr_log_config_error - Log a configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1793 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1794 struct ipr_hostrcb
*hostrcb
)
1796 int errors_logged
, i
;
1797 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1798 struct ipr_hostrcb_type_03_error
*error
;
1800 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1801 errors_logged
= be32_to_cpu(error
->errors_logged
);
1803 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804 be32_to_cpu(error
->errors_detected
), errors_logged
);
1806 dev_entry
= error
->dev
;
1808 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1811 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1812 ipr_log_vpd(&dev_entry
->vpd
);
1814 ipr_err("-----New Device Information-----\n");
1815 ipr_log_vpd(&dev_entry
->new_vpd
);
1817 ipr_err("Cache Directory Card Information:\n");
1818 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1820 ipr_err("Adapter Card Information:\n");
1821 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1823 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824 be32_to_cpu(dev_entry
->ioa_data
[0]),
1825 be32_to_cpu(dev_entry
->ioa_data
[1]),
1826 be32_to_cpu(dev_entry
->ioa_data
[2]),
1827 be32_to_cpu(dev_entry
->ioa_data
[3]),
1828 be32_to_cpu(dev_entry
->ioa_data
[4]));
1833 * ipr_log_enhanced_array_error - Log an array configuration error.
1834 * @ioa_cfg: ioa config struct
1835 * @hostrcb: hostrcb struct
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1841 struct ipr_hostrcb
*hostrcb
)
1844 struct ipr_hostrcb_type_14_error
*error
;
1845 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1846 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1848 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853 error
->protection_level
,
1854 ioa_cfg
->host
->host_no
,
1855 error
->last_func_vset_res_addr
.bus
,
1856 error
->last_func_vset_res_addr
.target
,
1857 error
->last_func_vset_res_addr
.lun
);
1861 array_entry
= error
->array_member
;
1862 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1863 ARRAY_SIZE(error
->array_member
));
1865 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1866 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1869 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1870 ipr_err("Exposed Array Member %d:\n", i
);
1872 ipr_err("Array Member %d:\n", i
);
1874 ipr_log_ext_vpd(&array_entry
->vpd
);
1875 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1876 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1877 "Expected Location");
1884 * ipr_log_array_error - Log an array configuration error.
1885 * @ioa_cfg: ioa config struct
1886 * @hostrcb: hostrcb struct
1891 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1892 struct ipr_hostrcb
*hostrcb
)
1895 struct ipr_hostrcb_type_04_error
*error
;
1896 struct ipr_hostrcb_array_data_entry
*array_entry
;
1897 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1899 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1903 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904 error
->protection_level
,
1905 ioa_cfg
->host
->host_no
,
1906 error
->last_func_vset_res_addr
.bus
,
1907 error
->last_func_vset_res_addr
.target
,
1908 error
->last_func_vset_res_addr
.lun
);
1912 array_entry
= error
->array_member
;
1914 for (i
= 0; i
< 18; i
++) {
1915 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1918 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1919 ipr_err("Exposed Array Member %d:\n", i
);
1921 ipr_err("Array Member %d:\n", i
);
1923 ipr_log_vpd(&array_entry
->vpd
);
1925 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1926 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1927 "Expected Location");
1932 array_entry
= error
->array_member2
;
1939 * ipr_log_hex_data - Log additional hex IOA error data.
1940 * @ioa_cfg: ioa config struct
1941 * @data: IOA error data
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, __be32
*data
, int len
)
1954 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1955 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1957 for (i
= 0; i
< len
/ 4; i
+= 4) {
1958 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1959 be32_to_cpu(data
[i
]),
1960 be32_to_cpu(data
[i
+1]),
1961 be32_to_cpu(data
[i
+2]),
1962 be32_to_cpu(data
[i
+3]));
1967 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968 * @ioa_cfg: ioa config struct
1969 * @hostrcb: hostrcb struct
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1975 struct ipr_hostrcb
*hostrcb
)
1977 struct ipr_hostrcb_type_17_error
*error
;
1980 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1982 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1984 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1985 strim(error
->failure_reason
);
1987 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1988 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1989 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1990 ipr_log_hex_data(ioa_cfg
, error
->data
,
1991 be32_to_cpu(hostrcb
->hcam
.length
) -
1992 (offsetof(struct ipr_hostrcb_error
, u
) +
1993 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1997 * ipr_log_dual_ioa_error - Log a dual adapter error.
1998 * @ioa_cfg: ioa config struct
1999 * @hostrcb: hostrcb struct
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
2005 struct ipr_hostrcb
*hostrcb
)
2007 struct ipr_hostrcb_type_07_error
*error
;
2009 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
2010 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2011 strim(error
->failure_reason
);
2013 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
2014 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
2015 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
2016 ipr_log_hex_data(ioa_cfg
, error
->data
,
2017 be32_to_cpu(hostrcb
->hcam
.length
) -
2018 (offsetof(struct ipr_hostrcb_error
, u
) +
2019 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
2022 static const struct {
2025 } path_active_desc
[] = {
2026 { IPR_PATH_NO_INFO
, "Path" },
2027 { IPR_PATH_ACTIVE
, "Active path" },
2028 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
2031 static const struct {
2034 } path_state_desc
[] = {
2035 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
2036 { IPR_PATH_HEALTHY
, "is healthy" },
2037 { IPR_PATH_DEGRADED
, "is degraded" },
2038 { IPR_PATH_FAILED
, "is failed" }
2042 * ipr_log_fabric_path - Log a fabric path error
2043 * @hostrcb: hostrcb struct
2044 * @fabric: fabric descriptor
2049 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
2050 struct ipr_hostrcb_fabric_desc
*fabric
)
2053 u8 path_state
= fabric
->path_state
;
2054 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2055 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2057 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2058 if (path_active_desc
[i
].active
!= active
)
2061 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2062 if (path_state_desc
[j
].state
!= state
)
2065 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2066 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2067 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2069 } else if (fabric
->cascaded_expander
== 0xff) {
2070 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2071 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2072 fabric
->ioa_port
, fabric
->phy
);
2073 } else if (fabric
->phy
== 0xff) {
2074 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2075 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2076 fabric
->ioa_port
, fabric
->cascaded_expander
);
2078 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2080 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2086 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2087 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2091 * ipr_log64_fabric_path - Log a fabric path error
2092 * @hostrcb: hostrcb struct
2093 * @fabric: fabric descriptor
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2099 struct ipr_hostrcb64_fabric_desc
*fabric
)
2102 u8 path_state
= fabric
->path_state
;
2103 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2104 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2105 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2107 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2108 if (path_active_desc
[i
].active
!= active
)
2111 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2112 if (path_state_desc
[j
].state
!= state
)
2115 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2116 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2117 ipr_format_res_path(hostrcb
->ioa_cfg
,
2119 buffer
, sizeof(buffer
)));
2124 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2125 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2126 buffer
, sizeof(buffer
)));
2129 static const struct {
2132 } path_type_desc
[] = {
2133 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2134 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2135 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2136 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2139 static const struct {
2142 } path_status_desc
[] = {
2143 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2144 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2145 { IPR_PATH_CFG_FAILED
, "Failed" },
2146 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2147 { IPR_PATH_NOT_DETECTED
, "Missing" },
2148 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2151 static const char *link_rate
[] = {
2154 "phy reset problem",
2171 * ipr_log_path_elem - Log a fabric path element.
2172 * @hostrcb: hostrcb struct
2173 * @cfg: fabric path element struct
2178 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2179 struct ipr_hostrcb_config_element
*cfg
)
2182 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2183 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2185 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2188 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2189 if (path_type_desc
[i
].type
!= type
)
2192 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2193 if (path_status_desc
[j
].status
!= status
)
2196 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2197 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2199 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2200 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2202 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2203 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2205 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2206 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2207 } else if (cfg
->cascaded_expander
== 0xff) {
2208 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2209 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2210 path_type_desc
[i
].desc
, cfg
->phy
,
2211 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2212 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2213 } else if (cfg
->phy
== 0xff) {
2214 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2215 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2216 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2217 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2218 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2220 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2222 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2223 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2224 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2231 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2233 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2234 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2238 * ipr_log64_path_elem - Log a fabric path element.
2239 * @hostrcb: hostrcb struct
2240 * @cfg: fabric path element struct
2245 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2246 struct ipr_hostrcb64_config_element
*cfg
)
2249 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2250 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2251 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2252 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2254 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2257 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2258 if (path_type_desc
[i
].type
!= type
)
2261 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2262 if (path_status_desc
[j
].status
!= status
)
2265 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2267 ipr_format_res_path(hostrcb
->ioa_cfg
,
2268 cfg
->res_path
, buffer
, sizeof(buffer
)),
2269 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2270 be32_to_cpu(cfg
->wwid
[0]),
2271 be32_to_cpu(cfg
->wwid
[1]));
2275 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276 "WWN=%08X%08X\n", cfg
->type_status
,
2277 ipr_format_res_path(hostrcb
->ioa_cfg
,
2278 cfg
->res_path
, buffer
, sizeof(buffer
)),
2279 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2280 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2284 * ipr_log_fabric_error - Log a fabric error.
2285 * @ioa_cfg: ioa config struct
2286 * @hostrcb: hostrcb struct
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2292 struct ipr_hostrcb
*hostrcb
)
2294 struct ipr_hostrcb_type_20_error
*error
;
2295 struct ipr_hostrcb_fabric_desc
*fabric
;
2296 struct ipr_hostrcb_config_element
*cfg
;
2299 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2300 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2301 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2303 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2304 (offsetof(struct ipr_hostrcb_error
, u
) +
2305 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2307 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2308 ipr_log_fabric_path(hostrcb
, fabric
);
2309 for_each_fabric_cfg(fabric
, cfg
)
2310 ipr_log_path_elem(hostrcb
, cfg
);
2312 add_len
-= be16_to_cpu(fabric
->length
);
2313 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2314 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2317 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2321 * ipr_log_sis64_array_error - Log a sis64 array error.
2322 * @ioa_cfg: ioa config struct
2323 * @hostrcb: hostrcb struct
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2329 struct ipr_hostrcb
*hostrcb
)
2332 struct ipr_hostrcb_type_24_error
*error
;
2333 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2334 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2335 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2337 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2341 ipr_err("RAID %s Array Configuration: %s\n",
2342 error
->protection_level
,
2343 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2344 buffer
, sizeof(buffer
)));
2348 array_entry
= error
->array_member
;
2349 num_entries
= min_t(u32
, error
->num_entries
,
2350 ARRAY_SIZE(error
->array_member
));
2352 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2354 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2357 if (error
->exposed_mode_adn
== i
)
2358 ipr_err("Exposed Array Member %d:\n", i
);
2360 ipr_err("Array Member %d:\n", i
);
2362 ipr_err("Array Member %d:\n", i
);
2363 ipr_log_ext_vpd(&array_entry
->vpd
);
2364 ipr_err("Current Location: %s\n",
2365 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2366 buffer
, sizeof(buffer
)));
2367 ipr_err("Expected Location: %s\n",
2368 ipr_format_res_path(ioa_cfg
,
2369 array_entry
->expected_res_path
,
2370 buffer
, sizeof(buffer
)));
2377 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378 * @ioa_cfg: ioa config struct
2379 * @hostrcb: hostrcb struct
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2385 struct ipr_hostrcb
*hostrcb
)
2387 struct ipr_hostrcb_type_30_error
*error
;
2388 struct ipr_hostrcb64_fabric_desc
*fabric
;
2389 struct ipr_hostrcb64_config_element
*cfg
;
2392 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2394 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2395 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2397 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2398 (offsetof(struct ipr_hostrcb64_error
, u
) +
2399 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2401 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2402 ipr_log64_fabric_path(hostrcb
, fabric
);
2403 for_each_fabric_cfg(fabric
, cfg
)
2404 ipr_log64_path_elem(hostrcb
, cfg
);
2406 add_len
-= be16_to_cpu(fabric
->length
);
2407 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2408 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2411 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2415 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2416 * @ioa_cfg: ioa config struct
2417 * @hostrcb: hostrcb struct
2422 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg
*ioa_cfg
,
2423 struct ipr_hostrcb
*hostrcb
)
2425 struct ipr_hostrcb_type_41_error
*error
;
2427 error
= &hostrcb
->hcam
.u
.error64
.u
.type_41_error
;
2429 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2430 ipr_err("Primary Failure Reason: %s\n", error
->failure_reason
);
2431 ipr_log_hex_data(ioa_cfg
, error
->data
,
2432 be32_to_cpu(hostrcb
->hcam
.length
) -
2433 (offsetof(struct ipr_hostrcb_error
, u
) +
2434 offsetof(struct ipr_hostrcb_type_41_error
, data
)));
2437 * ipr_log_generic_error - Log an adapter error.
2438 * @ioa_cfg: ioa config struct
2439 * @hostrcb: hostrcb struct
2444 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2445 struct ipr_hostrcb
*hostrcb
)
2447 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2448 be32_to_cpu(hostrcb
->hcam
.length
));
2452 * ipr_log_sis64_device_error - Log a cache error.
2453 * @ioa_cfg: ioa config struct
2454 * @hostrcb: hostrcb struct
2459 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2460 struct ipr_hostrcb
*hostrcb
)
2462 struct ipr_hostrcb_type_21_error
*error
;
2463 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2465 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2467 ipr_err("-----Failing Device Information-----\n");
2468 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2469 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2470 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2471 ipr_err("Device Resource Path: %s\n",
2472 __ipr_format_res_path(error
->res_path
,
2473 buffer
, sizeof(buffer
)));
2474 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2475 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2476 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2477 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2478 ipr_err("SCSI Sense Data:\n");
2479 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2480 ipr_err("SCSI Command Descriptor Block: \n");
2481 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2483 ipr_err("Additional IOA Data:\n");
2484 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2488 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2491 * This function will return the index of into the ipr_error_table
2492 * for the specified IOASC. If the IOASC is not in the table,
2493 * 0 will be returned, which points to the entry used for unknown errors.
2496 * index into the ipr_error_table
2498 static u32
ipr_get_error(u32 ioasc
)
2502 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2503 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2510 * ipr_handle_log_data - Log an adapter error.
2511 * @ioa_cfg: ioa config struct
2512 * @hostrcb: hostrcb struct
2514 * This function logs an adapter error to the system.
2519 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2520 struct ipr_hostrcb
*hostrcb
)
2524 struct ipr_hostrcb_type_21_error
*error
;
2526 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2529 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2530 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2533 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2535 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2537 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2538 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2539 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2540 scsi_report_bus_reset(ioa_cfg
->host
,
2541 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2544 error_index
= ipr_get_error(ioasc
);
2546 if (!ipr_error_table
[error_index
].log_hcam
)
2549 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2550 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2551 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2553 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2554 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2558 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2560 /* Set indication we have logged an error */
2561 ioa_cfg
->errors_logged
++;
2563 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2565 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2566 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2568 switch (hostrcb
->hcam
.overlay_id
) {
2569 case IPR_HOST_RCB_OVERLAY_ID_2
:
2570 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2572 case IPR_HOST_RCB_OVERLAY_ID_3
:
2573 ipr_log_config_error(ioa_cfg
, hostrcb
);
2575 case IPR_HOST_RCB_OVERLAY_ID_4
:
2576 case IPR_HOST_RCB_OVERLAY_ID_6
:
2577 ipr_log_array_error(ioa_cfg
, hostrcb
);
2579 case IPR_HOST_RCB_OVERLAY_ID_7
:
2580 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2582 case IPR_HOST_RCB_OVERLAY_ID_12
:
2583 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2585 case IPR_HOST_RCB_OVERLAY_ID_13
:
2586 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2588 case IPR_HOST_RCB_OVERLAY_ID_14
:
2589 case IPR_HOST_RCB_OVERLAY_ID_16
:
2590 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2592 case IPR_HOST_RCB_OVERLAY_ID_17
:
2593 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2595 case IPR_HOST_RCB_OVERLAY_ID_20
:
2596 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2598 case IPR_HOST_RCB_OVERLAY_ID_21
:
2599 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2601 case IPR_HOST_RCB_OVERLAY_ID_23
:
2602 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2604 case IPR_HOST_RCB_OVERLAY_ID_24
:
2605 case IPR_HOST_RCB_OVERLAY_ID_26
:
2606 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2608 case IPR_HOST_RCB_OVERLAY_ID_30
:
2609 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2611 case IPR_HOST_RCB_OVERLAY_ID_41
:
2612 ipr_log_sis64_service_required_error(ioa_cfg
, hostrcb
);
2614 case IPR_HOST_RCB_OVERLAY_ID_1
:
2615 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2617 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2622 static struct ipr_hostrcb
*ipr_get_free_hostrcb(struct ipr_ioa_cfg
*ioa
)
2624 struct ipr_hostrcb
*hostrcb
;
2626 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_free_q
,
2627 struct ipr_hostrcb
, queue
);
2629 if (unlikely(!hostrcb
)) {
2630 dev_info(&ioa
->pdev
->dev
, "Reclaiming async error buffers.");
2631 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_report_q
,
2632 struct ipr_hostrcb
, queue
);
2635 list_del_init(&hostrcb
->queue
);
2640 * ipr_process_error - Op done function for an adapter error log.
2641 * @ipr_cmd: ipr command struct
2643 * This function is the op done function for an error log host
2644 * controlled async from the adapter. It will log the error and
2645 * send the HCAM back to the adapter.
2650 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2652 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2653 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2654 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2658 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2660 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2662 list_del_init(&hostrcb
->queue
);
2663 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2666 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2667 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2668 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2669 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
2670 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
) {
2671 dev_err(&ioa_cfg
->pdev
->dev
,
2672 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2675 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_report_q
);
2676 schedule_work(&ioa_cfg
->work_q
);
2677 hostrcb
= ipr_get_free_hostrcb(ioa_cfg
);
2679 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2683 * ipr_timeout - An internally generated op has timed out.
2684 * @ipr_cmd: ipr command struct
2686 * This function blocks host requests and initiates an
2692 static void ipr_timeout(struct timer_list
*t
)
2694 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2695 unsigned long lock_flags
= 0;
2696 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2699 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2701 ioa_cfg
->errors_logged
++;
2702 dev_err(&ioa_cfg
->pdev
->dev
,
2703 "Adapter being reset due to command timeout.\n");
2705 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2706 ioa_cfg
->sdt_state
= GET_DUMP
;
2708 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2709 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2711 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2716 * ipr_oper_timeout - Adapter timed out transitioning to operational
2717 * @ipr_cmd: ipr command struct
2719 * This function blocks host requests and initiates an
2725 static void ipr_oper_timeout(struct timer_list
*t
)
2727 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2728 unsigned long lock_flags
= 0;
2729 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2732 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2734 ioa_cfg
->errors_logged
++;
2735 dev_err(&ioa_cfg
->pdev
->dev
,
2736 "Adapter timed out transitioning to operational.\n");
2738 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2739 ioa_cfg
->sdt_state
= GET_DUMP
;
2741 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2743 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2744 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2747 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2752 * ipr_find_ses_entry - Find matching SES in SES table
2753 * @res: resource entry struct of SES
2756 * pointer to SES table entry / NULL on failure
2758 static const struct ipr_ses_table_entry
*
2759 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2762 struct ipr_std_inq_vpids
*vpids
;
2763 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2765 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2766 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2767 if (ste
->compare_product_id_byte
[j
] == 'X') {
2768 vpids
= &res
->std_inq_data
.vpids
;
2769 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2777 if (matches
== IPR_PROD_ID_LEN
)
2785 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2786 * @ioa_cfg: ioa config struct
2788 * @bus_width: bus width
2791 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2792 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2793 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2794 * max 160MHz = max 320MB/sec).
2796 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2798 struct ipr_resource_entry
*res
;
2799 const struct ipr_ses_table_entry
*ste
;
2800 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2802 /* Loop through each config table entry in the config table buffer */
2803 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2804 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2807 if (bus
!= res
->bus
)
2810 if (!(ste
= ipr_find_ses_entry(res
)))
2813 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2816 return max_xfer_rate
;
2820 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2821 * @ioa_cfg: ioa config struct
2822 * @max_delay: max delay in micro-seconds to wait
2824 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2827 * 0 on success / other on failure
2829 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2831 volatile u32 pcii_reg
;
2834 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2835 while (delay
< max_delay
) {
2836 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2838 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2841 /* udelay cannot be used if delay is more than a few milliseconds */
2842 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2843 mdelay(delay
/ 1000);
2853 * ipr_get_sis64_dump_data_section - Dump IOA memory
2854 * @ioa_cfg: ioa config struct
2855 * @start_addr: adapter address to dump
2856 * @dest: destination kernel buffer
2857 * @length_in_words: length to dump in 4 byte words
2862 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2864 __be32
*dest
, u32 length_in_words
)
2868 for (i
= 0; i
< length_in_words
; i
++) {
2869 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2870 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2878 * ipr_get_ldump_data_section - Dump IOA memory
2879 * @ioa_cfg: ioa config struct
2880 * @start_addr: adapter address to dump
2881 * @dest: destination kernel buffer
2882 * @length_in_words: length to dump in 4 byte words
2885 * 0 on success / -EIO on failure
2887 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2889 __be32
*dest
, u32 length_in_words
)
2891 volatile u32 temp_pcii_reg
;
2895 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2896 dest
, length_in_words
);
2898 /* Write IOA interrupt reg starting LDUMP state */
2899 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2900 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2902 /* Wait for IO debug acknowledge */
2903 if (ipr_wait_iodbg_ack(ioa_cfg
,
2904 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2905 dev_err(&ioa_cfg
->pdev
->dev
,
2906 "IOA dump long data transfer timeout\n");
2910 /* Signal LDUMP interlocked - clear IO debug ack */
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2912 ioa_cfg
->regs
.clr_interrupt_reg
);
2914 /* Write Mailbox with starting address */
2915 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2917 /* Signal address valid - clear IOA Reset alert */
2918 writel(IPR_UPROCI_RESET_ALERT
,
2919 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2921 for (i
= 0; i
< length_in_words
; i
++) {
2922 /* Wait for IO debug acknowledge */
2923 if (ipr_wait_iodbg_ack(ioa_cfg
,
2924 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2925 dev_err(&ioa_cfg
->pdev
->dev
,
2926 "IOA dump short data transfer timeout\n");
2930 /* Read data from mailbox and increment destination pointer */
2931 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2934 /* For all but the last word of data, signal data received */
2935 if (i
< (length_in_words
- 1)) {
2936 /* Signal dump data received - Clear IO debug Ack */
2937 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2938 ioa_cfg
->regs
.clr_interrupt_reg
);
2942 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2943 writel(IPR_UPROCI_RESET_ALERT
,
2944 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2946 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2947 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2949 /* Signal dump data received - Clear IO debug Ack */
2950 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2951 ioa_cfg
->regs
.clr_interrupt_reg
);
2953 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2954 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2956 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2958 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2968 #ifdef CONFIG_SCSI_IPR_DUMP
2970 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2971 * @ioa_cfg: ioa config struct
2972 * @pci_address: adapter address
2973 * @length: length of data to copy
2975 * Copy data from PCI adapter to kernel buffer.
2976 * Note: length MUST be a 4 byte multiple
2978 * 0 on success / other on failure
2980 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2981 unsigned long pci_address
, u32 length
)
2983 int bytes_copied
= 0;
2984 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2986 unsigned long lock_flags
= 0;
2987 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2990 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2992 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2994 while (bytes_copied
< length
&&
2995 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2996 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2997 ioa_dump
->page_offset
== 0) {
2998 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
3002 return bytes_copied
;
3005 ioa_dump
->page_offset
= 0;
3006 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
3007 ioa_dump
->next_page_index
++;
3009 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
3011 rem_len
= length
- bytes_copied
;
3012 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
3013 cur_len
= min(rem_len
, rem_page_len
);
3015 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3016 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
3019 rc
= ipr_get_ldump_data_section(ioa_cfg
,
3020 pci_address
+ bytes_copied
,
3021 &page
[ioa_dump
->page_offset
/ 4],
3022 (cur_len
/ sizeof(u32
)));
3024 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3027 ioa_dump
->page_offset
+= cur_len
;
3028 bytes_copied
+= cur_len
;
3036 return bytes_copied
;
3040 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3041 * @hdr: dump entry header struct
3046 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
3048 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3050 hdr
->offset
= sizeof(*hdr
);
3051 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
3055 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3056 * @ioa_cfg: ioa config struct
3057 * @driver_dump: driver dump struct
3062 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
3063 struct ipr_driver_dump
*driver_dump
)
3065 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3067 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
3068 driver_dump
->ioa_type_entry
.hdr
.len
=
3069 sizeof(struct ipr_dump_ioa_type_entry
) -
3070 sizeof(struct ipr_dump_entry_header
);
3071 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3072 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
3073 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
3074 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
3075 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
3076 ucode_vpd
->minor_release
[1];
3077 driver_dump
->hdr
.num_entries
++;
3081 * ipr_dump_version_data - Fill in the driver version in the dump.
3082 * @ioa_cfg: ioa config struct
3083 * @driver_dump: driver dump struct
3088 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
3089 struct ipr_driver_dump
*driver_dump
)
3091 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
3092 driver_dump
->version_entry
.hdr
.len
=
3093 sizeof(struct ipr_dump_version_entry
) -
3094 sizeof(struct ipr_dump_entry_header
);
3095 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3096 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
3097 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
3098 driver_dump
->hdr
.num_entries
++;
3102 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3103 * @ioa_cfg: ioa config struct
3104 * @driver_dump: driver dump struct
3109 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3110 struct ipr_driver_dump
*driver_dump
)
3112 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3113 driver_dump
->trace_entry
.hdr
.len
=
3114 sizeof(struct ipr_dump_trace_entry
) -
3115 sizeof(struct ipr_dump_entry_header
);
3116 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3117 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3118 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3119 driver_dump
->hdr
.num_entries
++;
3123 * ipr_dump_location_data - Fill in the IOA location in the dump.
3124 * @ioa_cfg: ioa config struct
3125 * @driver_dump: driver dump struct
3130 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3131 struct ipr_driver_dump
*driver_dump
)
3133 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3134 driver_dump
->location_entry
.hdr
.len
=
3135 sizeof(struct ipr_dump_location_entry
) -
3136 sizeof(struct ipr_dump_entry_header
);
3137 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3138 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3139 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3140 driver_dump
->hdr
.num_entries
++;
3144 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3145 * @ioa_cfg: ioa config struct
3146 * @dump: dump struct
3151 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3153 unsigned long start_addr
, sdt_word
;
3154 unsigned long lock_flags
= 0;
3155 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3156 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3157 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3158 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3159 struct ipr_sdt
*sdt
;
3165 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3167 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3168 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3172 if (ioa_cfg
->sis64
) {
3173 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3174 ssleep(IPR_DUMP_DELAY_SECONDS
);
3175 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3178 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3180 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3181 dev_err(&ioa_cfg
->pdev
->dev
,
3182 "Invalid dump table format: %lx\n", start_addr
);
3183 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3187 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3189 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3191 /* Initialize the overall dump header */
3192 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3193 driver_dump
->hdr
.num_entries
= 1;
3194 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3195 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3196 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3197 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3199 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3200 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3201 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3202 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3204 /* Update dump_header */
3205 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3207 /* IOA Dump entry */
3208 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3209 ioa_dump
->hdr
.len
= 0;
3210 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3211 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3213 /* First entries in sdt are actually a list of dump addresses and
3214 lengths to gather the real dump data. sdt represents the pointer
3215 to the ioa generated dump table. Dump data will be extracted based
3216 on entries in this table */
3217 sdt
= &ioa_dump
->sdt
;
3219 if (ioa_cfg
->sis64
) {
3220 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3221 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3223 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3224 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3227 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3228 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3229 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3230 bytes_to_copy
/ sizeof(__be32
));
3232 /* Smart Dump table is ready to use and the first entry is valid */
3233 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3234 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3235 dev_err(&ioa_cfg
->pdev
->dev
,
3236 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3237 rc
, be32_to_cpu(sdt
->hdr
.state
));
3238 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3239 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3240 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3244 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3246 if (num_entries
> max_num_entries
)
3247 num_entries
= max_num_entries
;
3249 /* Update dump length to the actual data to be copied */
3250 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3252 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3254 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3256 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3258 for (i
= 0; i
< num_entries
; i
++) {
3259 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3260 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3264 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3265 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3267 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3269 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3270 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3272 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3273 bytes_to_copy
= end_off
- start_off
;
3278 if (bytes_to_copy
> max_dump_size
) {
3279 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3283 /* Copy data from adapter to driver buffers */
3284 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3287 ioa_dump
->hdr
.len
+= bytes_copied
;
3289 if (bytes_copied
!= bytes_to_copy
) {
3290 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3297 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3299 /* Update dump_header */
3300 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3302 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3307 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3311 * ipr_release_dump - Free adapter dump memory
3312 * @kref: kref struct
3317 static void ipr_release_dump(struct kref
*kref
)
3319 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3320 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3321 unsigned long lock_flags
= 0;
3325 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3326 ioa_cfg
->dump
= NULL
;
3327 ioa_cfg
->sdt_state
= INACTIVE
;
3328 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3330 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3331 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3333 vfree(dump
->ioa_dump
.ioa_data
);
3338 static void ipr_add_remove_thread(struct work_struct
*work
)
3340 unsigned long lock_flags
;
3341 struct ipr_resource_entry
*res
;
3342 struct scsi_device
*sdev
;
3343 struct ipr_ioa_cfg
*ioa_cfg
=
3344 container_of(work
, struct ipr_ioa_cfg
, scsi_add_work_q
);
3345 u8 bus
, target
, lun
;
3349 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3354 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
3355 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3359 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3360 if (res
->del_from_ml
&& res
->sdev
) {
3363 if (!scsi_device_get(sdev
)) {
3364 if (!res
->add_to_ml
)
3365 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3367 res
->del_from_ml
= 0;
3368 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3369 scsi_remove_device(sdev
);
3370 scsi_device_put(sdev
);
3371 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3378 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3379 if (res
->add_to_ml
) {
3381 target
= res
->target
;
3384 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3385 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3386 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3391 ioa_cfg
->scan_done
= 1;
3392 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3393 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3398 * ipr_worker_thread - Worker thread
3399 * @work: ioa config struct
3401 * Called at task level from a work thread. This function takes care
3402 * of adding and removing device from the mid-layer as configuration
3403 * changes are detected by the adapter.
3408 static void ipr_worker_thread(struct work_struct
*work
)
3410 unsigned long lock_flags
;
3411 struct ipr_dump
*dump
;
3412 struct ipr_ioa_cfg
*ioa_cfg
=
3413 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3416 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3418 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3419 dump
= ioa_cfg
->dump
;
3421 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3424 kref_get(&dump
->kref
);
3425 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3426 ipr_get_ioa_dump(ioa_cfg
, dump
);
3427 kref_put(&dump
->kref
, ipr_release_dump
);
3429 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3430 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3431 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3432 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3436 if (ioa_cfg
->scsi_unblock
) {
3437 ioa_cfg
->scsi_unblock
= 0;
3438 ioa_cfg
->scsi_blocked
= 0;
3439 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3440 scsi_unblock_requests(ioa_cfg
->host
);
3441 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3442 if (ioa_cfg
->scsi_blocked
)
3443 scsi_block_requests(ioa_cfg
->host
);
3446 if (!ioa_cfg
->scan_enabled
) {
3447 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3451 schedule_work(&ioa_cfg
->scsi_add_work_q
);
3453 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3457 #ifdef CONFIG_SCSI_IPR_TRACE
3459 * ipr_read_trace - Dump the adapter trace
3460 * @filp: open sysfs file
3461 * @kobj: kobject struct
3462 * @bin_attr: bin_attribute struct
3465 * @count: buffer size
3468 * number of bytes printed to buffer
3470 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3471 struct bin_attribute
*bin_attr
,
3472 char *buf
, loff_t off
, size_t count
)
3474 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3475 struct Scsi_Host
*shost
= class_to_shost(dev
);
3476 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3477 unsigned long lock_flags
= 0;
3480 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3481 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3483 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3488 static struct bin_attribute ipr_trace_attr
= {
3494 .read
= ipr_read_trace
,
3499 * ipr_show_fw_version - Show the firmware version
3500 * @dev: class device struct
3504 * number of bytes printed to buffer
3506 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3507 struct device_attribute
*attr
, char *buf
)
3509 struct Scsi_Host
*shost
= class_to_shost(dev
);
3510 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3511 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3512 unsigned long lock_flags
= 0;
3515 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3516 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3517 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3518 ucode_vpd
->minor_release
[0],
3519 ucode_vpd
->minor_release
[1]);
3520 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3524 static struct device_attribute ipr_fw_version_attr
= {
3526 .name
= "fw_version",
3529 .show
= ipr_show_fw_version
,
3533 * ipr_show_log_level - Show the adapter's error logging level
3534 * @dev: class device struct
3538 * number of bytes printed to buffer
3540 static ssize_t
ipr_show_log_level(struct device
*dev
,
3541 struct device_attribute
*attr
, char *buf
)
3543 struct Scsi_Host
*shost
= class_to_shost(dev
);
3544 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3545 unsigned long lock_flags
= 0;
3548 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3549 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3550 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3555 * ipr_store_log_level - Change the adapter's error logging level
3556 * @dev: class device struct
3560 * number of bytes printed to buffer
3562 static ssize_t
ipr_store_log_level(struct device
*dev
,
3563 struct device_attribute
*attr
,
3564 const char *buf
, size_t count
)
3566 struct Scsi_Host
*shost
= class_to_shost(dev
);
3567 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3568 unsigned long lock_flags
= 0;
3570 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3571 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3572 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3576 static struct device_attribute ipr_log_level_attr
= {
3578 .name
= "log_level",
3579 .mode
= S_IRUGO
| S_IWUSR
,
3581 .show
= ipr_show_log_level
,
3582 .store
= ipr_store_log_level
3586 * ipr_store_diagnostics - IOA Diagnostics interface
3587 * @dev: device struct
3589 * @count: buffer size
3591 * This function will reset the adapter and wait a reasonable
3592 * amount of time for any errors that the adapter might log.
3595 * count on success / other on failure
3597 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3598 struct device_attribute
*attr
,
3599 const char *buf
, size_t count
)
3601 struct Scsi_Host
*shost
= class_to_shost(dev
);
3602 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3603 unsigned long lock_flags
= 0;
3606 if (!capable(CAP_SYS_ADMIN
))
3609 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3610 while (ioa_cfg
->in_reset_reload
) {
3611 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3612 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3613 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3616 ioa_cfg
->errors_logged
= 0;
3617 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3619 if (ioa_cfg
->in_reset_reload
) {
3620 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3621 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3623 /* Wait for a second for any errors to be logged */
3626 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3630 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3631 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3633 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3638 static struct device_attribute ipr_diagnostics_attr
= {
3640 .name
= "run_diagnostics",
3643 .store
= ipr_store_diagnostics
3647 * ipr_show_adapter_state - Show the adapter's state
3648 * @class_dev: device struct
3652 * number of bytes printed to buffer
3654 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3655 struct device_attribute
*attr
, char *buf
)
3657 struct Scsi_Host
*shost
= class_to_shost(dev
);
3658 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3659 unsigned long lock_flags
= 0;
3662 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3663 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3664 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3666 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3667 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3672 * ipr_store_adapter_state - Change adapter state
3673 * @dev: device struct
3675 * @count: buffer size
3677 * This function will change the adapter's state.
3680 * count on success / other on failure
3682 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3683 struct device_attribute
*attr
,
3684 const char *buf
, size_t count
)
3686 struct Scsi_Host
*shost
= class_to_shost(dev
);
3687 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3688 unsigned long lock_flags
;
3689 int result
= count
, i
;
3691 if (!capable(CAP_SYS_ADMIN
))
3694 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3695 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3696 !strncmp(buf
, "online", 6)) {
3697 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3698 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3699 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3700 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3703 ioa_cfg
->reset_retries
= 0;
3704 ioa_cfg
->in_ioa_bringdown
= 0;
3705 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3707 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3708 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3713 static struct device_attribute ipr_ioa_state_attr
= {
3715 .name
= "online_state",
3716 .mode
= S_IRUGO
| S_IWUSR
,
3718 .show
= ipr_show_adapter_state
,
3719 .store
= ipr_store_adapter_state
3723 * ipr_store_reset_adapter - Reset the adapter
3724 * @dev: device struct
3726 * @count: buffer size
3728 * This function will reset the adapter.
3731 * count on success / other on failure
3733 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3734 struct device_attribute
*attr
,
3735 const char *buf
, size_t count
)
3737 struct Scsi_Host
*shost
= class_to_shost(dev
);
3738 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3739 unsigned long lock_flags
;
3742 if (!capable(CAP_SYS_ADMIN
))
3745 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3746 if (!ioa_cfg
->in_reset_reload
)
3747 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3748 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3749 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3754 static struct device_attribute ipr_ioa_reset_attr
= {
3756 .name
= "reset_host",
3759 .store
= ipr_store_reset_adapter
3762 static int ipr_iopoll(struct irq_poll
*iop
, int budget
);
3764 * ipr_show_iopoll_weight - Show ipr polling mode
3765 * @dev: class device struct
3769 * number of bytes printed to buffer
3771 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3772 struct device_attribute
*attr
, char *buf
)
3774 struct Scsi_Host
*shost
= class_to_shost(dev
);
3775 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3776 unsigned long lock_flags
= 0;
3779 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3780 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3781 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3787 * ipr_store_iopoll_weight - Change the adapter's polling mode
3788 * @dev: class device struct
3792 * number of bytes printed to buffer
3794 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3795 struct device_attribute
*attr
,
3796 const char *buf
, size_t count
)
3798 struct Scsi_Host
*shost
= class_to_shost(dev
);
3799 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3800 unsigned long user_iopoll_weight
;
3801 unsigned long lock_flags
= 0;
3804 if (!ioa_cfg
->sis64
) {
3805 dev_info(&ioa_cfg
->pdev
->dev
, "irq_poll not supported on this adapter\n");
3808 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3811 if (user_iopoll_weight
> 256) {
3812 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid irq_poll weight. It must be less than 256\n");
3816 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3817 dev_info(&ioa_cfg
->pdev
->dev
, "Current irq_poll weight has the same weight\n");
3821 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3822 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3823 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3826 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3827 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3828 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3829 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3830 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3831 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3834 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3839 static struct device_attribute ipr_iopoll_weight_attr
= {
3841 .name
= "iopoll_weight",
3842 .mode
= S_IRUGO
| S_IWUSR
,
3844 .show
= ipr_show_iopoll_weight
,
3845 .store
= ipr_store_iopoll_weight
3849 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3850 * @buf_len: buffer length
3852 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3853 * list to use for microcode download
3856 * pointer to sglist / NULL on failure
3858 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3861 struct ipr_sglist
*sglist
;
3863 /* Get the minimum size per scatter/gather element */
3864 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3866 /* Get the actual size per element */
3867 order
= get_order(sg_size
);
3869 /* Allocate a scatter/gather list for the DMA */
3870 sglist
= kzalloc(sizeof(struct ipr_sglist
), GFP_KERNEL
);
3871 if (sglist
== NULL
) {
3875 sglist
->order
= order
;
3876 sglist
->scatterlist
= sgl_alloc_order(buf_len
, order
, false, GFP_KERNEL
,
3878 if (!sglist
->scatterlist
) {
3887 * ipr_free_ucode_buffer - Frees a microcode download buffer
3888 * @p_dnld: scatter/gather list pointer
3890 * Free a DMA'able ucode download buffer previously allocated with
3891 * ipr_alloc_ucode_buffer
3896 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3898 sgl_free_order(sglist
->scatterlist
, sglist
->order
);
3903 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3904 * @sglist: scatter/gather list pointer
3905 * @buffer: buffer pointer
3906 * @len: buffer length
3908 * Copy a microcode image from a user buffer into a buffer allocated by
3909 * ipr_alloc_ucode_buffer
3912 * 0 on success / other on failure
3914 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3915 u8
*buffer
, u32 len
)
3917 int bsize_elem
, i
, result
= 0;
3918 struct scatterlist
*scatterlist
;
3921 /* Determine the actual number of bytes per element */
3922 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3924 scatterlist
= sglist
->scatterlist
;
3926 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3927 struct page
*page
= sg_page(&scatterlist
[i
]);
3930 memcpy(kaddr
, buffer
, bsize_elem
);
3933 scatterlist
[i
].length
= bsize_elem
;
3941 if (len
% bsize_elem
) {
3942 struct page
*page
= sg_page(&scatterlist
[i
]);
3945 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3948 scatterlist
[i
].length
= len
% bsize_elem
;
3951 sglist
->buffer_len
= len
;
3956 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3957 * @ipr_cmd: ipr command struct
3958 * @sglist: scatter/gather list
3960 * Builds a microcode download IOA data list (IOADL).
3963 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3964 struct ipr_sglist
*sglist
)
3966 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3967 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3968 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3971 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3972 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3973 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3976 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3977 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3978 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3979 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3980 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3983 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3987 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3988 * @ipr_cmd: ipr command struct
3989 * @sglist: scatter/gather list
3991 * Builds a microcode download IOA data list (IOADL).
3994 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3995 struct ipr_sglist
*sglist
)
3997 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3998 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3999 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
4002 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
4003 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
4004 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
4007 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
4009 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
4010 ioadl
[i
].flags_and_data_len
=
4011 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
4013 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
4016 ioadl
[i
-1].flags_and_data_len
|=
4017 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
4021 * ipr_update_ioa_ucode - Update IOA's microcode
4022 * @ioa_cfg: ioa config struct
4023 * @sglist: scatter/gather list
4025 * Initiate an adapter reset to update the IOA's microcode
4028 * 0 on success / -EIO on failure
4030 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
4031 struct ipr_sglist
*sglist
)
4033 unsigned long lock_flags
;
4035 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4036 while (ioa_cfg
->in_reset_reload
) {
4037 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4038 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4039 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4042 if (ioa_cfg
->ucode_sglist
) {
4043 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4044 dev_err(&ioa_cfg
->pdev
->dev
,
4045 "Microcode download already in progress\n");
4049 sglist
->num_dma_sg
= dma_map_sg(&ioa_cfg
->pdev
->dev
,
4050 sglist
->scatterlist
, sglist
->num_sg
,
4053 if (!sglist
->num_dma_sg
) {
4054 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4055 dev_err(&ioa_cfg
->pdev
->dev
,
4056 "Failed to map microcode download buffer!\n");
4060 ioa_cfg
->ucode_sglist
= sglist
;
4061 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
4062 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4063 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4065 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4066 ioa_cfg
->ucode_sglist
= NULL
;
4067 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4072 * ipr_store_update_fw - Update the firmware on the adapter
4073 * @class_dev: device struct
4075 * @count: buffer size
4077 * This function will update the firmware on the adapter.
4080 * count on success / other on failure
4082 static ssize_t
ipr_store_update_fw(struct device
*dev
,
4083 struct device_attribute
*attr
,
4084 const char *buf
, size_t count
)
4086 struct Scsi_Host
*shost
= class_to_shost(dev
);
4087 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4088 struct ipr_ucode_image_header
*image_hdr
;
4089 const struct firmware
*fw_entry
;
4090 struct ipr_sglist
*sglist
;
4094 int result
, dnld_size
;
4096 if (!capable(CAP_SYS_ADMIN
))
4099 snprintf(fname
, sizeof(fname
), "%s", buf
);
4101 endline
= strchr(fname
, '\n');
4105 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
4106 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4110 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4112 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4113 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4114 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4117 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4118 release_firmware(fw_entry
);
4122 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4125 dev_err(&ioa_cfg
->pdev
->dev
,
4126 "Microcode buffer copy to DMA buffer failed\n");
4130 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4132 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4137 ipr_free_ucode_buffer(sglist
);
4138 release_firmware(fw_entry
);
4142 static struct device_attribute ipr_update_fw_attr
= {
4144 .name
= "update_fw",
4147 .store
= ipr_store_update_fw
4151 * ipr_show_fw_type - Show the adapter's firmware type.
4152 * @dev: class device struct
4156 * number of bytes printed to buffer
4158 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4159 struct device_attribute
*attr
, char *buf
)
4161 struct Scsi_Host
*shost
= class_to_shost(dev
);
4162 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4163 unsigned long lock_flags
= 0;
4166 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4167 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4168 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4172 static struct device_attribute ipr_ioa_fw_type_attr
= {
4177 .show
= ipr_show_fw_type
4180 static ssize_t
ipr_read_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4181 struct bin_attribute
*bin_attr
, char *buf
,
4182 loff_t off
, size_t count
)
4184 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4185 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4186 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4187 struct ipr_hostrcb
*hostrcb
;
4188 unsigned long lock_flags
= 0;
4191 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4192 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4193 struct ipr_hostrcb
, queue
);
4195 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4198 ret
= memory_read_from_buffer(buf
, count
, &off
, &hostrcb
->hcam
,
4199 sizeof(hostrcb
->hcam
));
4200 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4204 static ssize_t
ipr_next_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4205 struct bin_attribute
*bin_attr
, char *buf
,
4206 loff_t off
, size_t count
)
4208 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4209 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4210 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4211 struct ipr_hostrcb
*hostrcb
;
4212 unsigned long lock_flags
= 0;
4214 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4215 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4216 struct ipr_hostrcb
, queue
);
4218 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4222 /* Reclaim hostrcb before exit */
4223 list_move_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
4224 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4228 static struct bin_attribute ipr_ioa_async_err_log
= {
4230 .name
= "async_err_log",
4231 .mode
= S_IRUGO
| S_IWUSR
,
4234 .read
= ipr_read_async_err_log
,
4235 .write
= ipr_next_async_err_log
4238 static struct device_attribute
*ipr_ioa_attrs
[] = {
4239 &ipr_fw_version_attr
,
4240 &ipr_log_level_attr
,
4241 &ipr_diagnostics_attr
,
4242 &ipr_ioa_state_attr
,
4243 &ipr_ioa_reset_attr
,
4244 &ipr_update_fw_attr
,
4245 &ipr_ioa_fw_type_attr
,
4246 &ipr_iopoll_weight_attr
,
4250 #ifdef CONFIG_SCSI_IPR_DUMP
4252 * ipr_read_dump - Dump the adapter
4253 * @filp: open sysfs file
4254 * @kobj: kobject struct
4255 * @bin_attr: bin_attribute struct
4258 * @count: buffer size
4261 * number of bytes printed to buffer
4263 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4264 struct bin_attribute
*bin_attr
,
4265 char *buf
, loff_t off
, size_t count
)
4267 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4268 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4269 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4270 struct ipr_dump
*dump
;
4271 unsigned long lock_flags
= 0;
4276 if (!capable(CAP_SYS_ADMIN
))
4279 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4280 dump
= ioa_cfg
->dump
;
4282 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4283 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4286 kref_get(&dump
->kref
);
4287 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4289 if (off
> dump
->driver_dump
.hdr
.len
) {
4290 kref_put(&dump
->kref
, ipr_release_dump
);
4294 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4295 count
= dump
->driver_dump
.hdr
.len
- off
;
4299 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4300 if (off
+ count
> sizeof(dump
->driver_dump
))
4301 len
= sizeof(dump
->driver_dump
) - off
;
4304 src
= (u8
*)&dump
->driver_dump
+ off
;
4305 memcpy(buf
, src
, len
);
4311 off
-= sizeof(dump
->driver_dump
);
4314 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4315 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4316 sizeof(struct ipr_sdt_entry
));
4318 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4319 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4321 if (count
&& off
< sdt_end
) {
4322 if (off
+ count
> sdt_end
)
4323 len
= sdt_end
- off
;
4326 src
= (u8
*)&dump
->ioa_dump
+ off
;
4327 memcpy(buf
, src
, len
);
4336 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4337 len
= PAGE_ALIGN(off
) - off
;
4340 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4341 src
+= off
& ~PAGE_MASK
;
4342 memcpy(buf
, src
, len
);
4348 kref_put(&dump
->kref
, ipr_release_dump
);
4353 * ipr_alloc_dump - Prepare for adapter dump
4354 * @ioa_cfg: ioa config struct
4357 * 0 on success / other on failure
4359 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4361 struct ipr_dump
*dump
;
4363 unsigned long lock_flags
= 0;
4365 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4368 ipr_err("Dump memory allocation failed\n");
4373 ioa_data
= vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES
,
4376 ioa_data
= vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES
,
4380 ipr_err("Dump memory allocation failed\n");
4385 dump
->ioa_dump
.ioa_data
= ioa_data
;
4387 kref_init(&dump
->kref
);
4388 dump
->ioa_cfg
= ioa_cfg
;
4390 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4392 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4393 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4394 vfree(dump
->ioa_dump
.ioa_data
);
4399 ioa_cfg
->dump
= dump
;
4400 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4401 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4402 ioa_cfg
->dump_taken
= 1;
4403 schedule_work(&ioa_cfg
->work_q
);
4405 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4411 * ipr_free_dump - Free adapter dump memory
4412 * @ioa_cfg: ioa config struct
4415 * 0 on success / other on failure
4417 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4419 struct ipr_dump
*dump
;
4420 unsigned long lock_flags
= 0;
4424 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4425 dump
= ioa_cfg
->dump
;
4427 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4431 ioa_cfg
->dump
= NULL
;
4432 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4434 kref_put(&dump
->kref
, ipr_release_dump
);
4441 * ipr_write_dump - Setup dump state of adapter
4442 * @filp: open sysfs file
4443 * @kobj: kobject struct
4444 * @bin_attr: bin_attribute struct
4447 * @count: buffer size
4450 * number of bytes printed to buffer
4452 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4453 struct bin_attribute
*bin_attr
,
4454 char *buf
, loff_t off
, size_t count
)
4456 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4457 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4458 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4461 if (!capable(CAP_SYS_ADMIN
))
4465 rc
= ipr_alloc_dump(ioa_cfg
);
4466 else if (buf
[0] == '0')
4467 rc
= ipr_free_dump(ioa_cfg
);
4477 static struct bin_attribute ipr_dump_attr
= {
4480 .mode
= S_IRUSR
| S_IWUSR
,
4483 .read
= ipr_read_dump
,
4484 .write
= ipr_write_dump
4487 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4491 * ipr_change_queue_depth - Change the device's queue depth
4492 * @sdev: scsi device struct
4493 * @qdepth: depth to set
4494 * @reason: calling context
4499 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4501 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4502 struct ipr_resource_entry
*res
;
4503 unsigned long lock_flags
= 0;
4505 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4506 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4508 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4509 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4510 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4512 scsi_change_queue_depth(sdev
, qdepth
);
4513 return sdev
->queue_depth
;
4517 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4518 * @dev: device struct
4519 * @attr: device attribute structure
4523 * number of bytes printed to buffer
4525 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4527 struct scsi_device
*sdev
= to_scsi_device(dev
);
4528 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4529 struct ipr_resource_entry
*res
;
4530 unsigned long lock_flags
= 0;
4531 ssize_t len
= -ENXIO
;
4533 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4534 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4536 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4537 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4541 static struct device_attribute ipr_adapter_handle_attr
= {
4543 .name
= "adapter_handle",
4546 .show
= ipr_show_adapter_handle
4550 * ipr_show_resource_path - Show the resource path or the resource address for
4552 * @dev: device struct
4553 * @attr: device attribute structure
4557 * number of bytes printed to buffer
4559 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4561 struct scsi_device
*sdev
= to_scsi_device(dev
);
4562 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4563 struct ipr_resource_entry
*res
;
4564 unsigned long lock_flags
= 0;
4565 ssize_t len
= -ENXIO
;
4566 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4568 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4569 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4570 if (res
&& ioa_cfg
->sis64
)
4571 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4572 __ipr_format_res_path(res
->res_path
, buffer
,
4575 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4576 res
->bus
, res
->target
, res
->lun
);
4578 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4582 static struct device_attribute ipr_resource_path_attr
= {
4584 .name
= "resource_path",
4587 .show
= ipr_show_resource_path
4591 * ipr_show_device_id - Show the device_id for this device.
4592 * @dev: device struct
4593 * @attr: device attribute structure
4597 * number of bytes printed to buffer
4599 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4601 struct scsi_device
*sdev
= to_scsi_device(dev
);
4602 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4603 struct ipr_resource_entry
*res
;
4604 unsigned long lock_flags
= 0;
4605 ssize_t len
= -ENXIO
;
4607 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4608 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4609 if (res
&& ioa_cfg
->sis64
)
4610 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", be64_to_cpu(res
->dev_id
));
4612 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4614 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4618 static struct device_attribute ipr_device_id_attr
= {
4620 .name
= "device_id",
4623 .show
= ipr_show_device_id
4627 * ipr_show_resource_type - Show the resource type for this device.
4628 * @dev: device struct
4629 * @attr: device attribute structure
4633 * number of bytes printed to buffer
4635 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4637 struct scsi_device
*sdev
= to_scsi_device(dev
);
4638 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4639 struct ipr_resource_entry
*res
;
4640 unsigned long lock_flags
= 0;
4641 ssize_t len
= -ENXIO
;
4643 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4644 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4647 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4649 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4653 static struct device_attribute ipr_resource_type_attr
= {
4655 .name
= "resource_type",
4658 .show
= ipr_show_resource_type
4662 * ipr_show_raw_mode - Show the adapter's raw mode
4663 * @dev: class device struct
4667 * number of bytes printed to buffer
4669 static ssize_t
ipr_show_raw_mode(struct device
*dev
,
4670 struct device_attribute
*attr
, char *buf
)
4672 struct scsi_device
*sdev
= to_scsi_device(dev
);
4673 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4674 struct ipr_resource_entry
*res
;
4675 unsigned long lock_flags
= 0;
4678 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4679 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4681 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", res
->raw_mode
);
4684 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4689 * ipr_store_raw_mode - Change the adapter's raw mode
4690 * @dev: class device struct
4694 * number of bytes printed to buffer
4696 static ssize_t
ipr_store_raw_mode(struct device
*dev
,
4697 struct device_attribute
*attr
,
4698 const char *buf
, size_t count
)
4700 struct scsi_device
*sdev
= to_scsi_device(dev
);
4701 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4702 struct ipr_resource_entry
*res
;
4703 unsigned long lock_flags
= 0;
4706 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4707 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4709 if (ipr_is_af_dasd_device(res
)) {
4710 res
->raw_mode
= simple_strtoul(buf
, NULL
, 10);
4713 sdev_printk(KERN_INFO
, res
->sdev
, "raw mode is %s\n",
4714 res
->raw_mode
? "enabled" : "disabled");
4719 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4723 static struct device_attribute ipr_raw_mode_attr
= {
4726 .mode
= S_IRUGO
| S_IWUSR
,
4728 .show
= ipr_show_raw_mode
,
4729 .store
= ipr_store_raw_mode
4732 static struct device_attribute
*ipr_dev_attrs
[] = {
4733 &ipr_adapter_handle_attr
,
4734 &ipr_resource_path_attr
,
4735 &ipr_device_id_attr
,
4736 &ipr_resource_type_attr
,
4742 * ipr_biosparam - Return the HSC mapping
4743 * @sdev: scsi device struct
4744 * @block_device: block device pointer
4745 * @capacity: capacity of the device
4746 * @parm: Array containing returned HSC values.
4748 * This function generates the HSC parms that fdisk uses.
4749 * We want to make sure we return something that places partitions
4750 * on 4k boundaries for best performance with the IOA.
4755 static int ipr_biosparam(struct scsi_device
*sdev
,
4756 struct block_device
*block_device
,
4757 sector_t capacity
, int *parm
)
4765 cylinders
= capacity
;
4766 sector_div(cylinders
, (128 * 32));
4771 parm
[2] = cylinders
;
4777 * ipr_find_starget - Find target based on bus/target.
4778 * @starget: scsi target struct
4781 * resource entry pointer if found / NULL if not found
4783 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4785 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4786 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4787 struct ipr_resource_entry
*res
;
4789 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4790 if ((res
->bus
== starget
->channel
) &&
4791 (res
->target
== starget
->id
)) {
4799 static struct ata_port_info sata_port_info
;
4802 * ipr_target_alloc - Prepare for commands to a SCSI target
4803 * @starget: scsi target struct
4805 * If the device is a SATA device, this function allocates an
4806 * ATA port with libata, else it does nothing.
4809 * 0 on success / non-0 on failure
4811 static int ipr_target_alloc(struct scsi_target
*starget
)
4813 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4814 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4815 struct ipr_sata_port
*sata_port
;
4816 struct ata_port
*ap
;
4817 struct ipr_resource_entry
*res
;
4818 unsigned long lock_flags
;
4820 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4821 res
= ipr_find_starget(starget
);
4822 starget
->hostdata
= NULL
;
4824 if (res
&& ipr_is_gata(res
)) {
4825 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4826 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4830 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4832 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4833 sata_port
->ioa_cfg
= ioa_cfg
;
4835 sata_port
->res
= res
;
4837 res
->sata_port
= sata_port
;
4838 ap
->private_data
= sata_port
;
4839 starget
->hostdata
= sata_port
;
4845 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4851 * ipr_target_destroy - Destroy a SCSI target
4852 * @starget: scsi target struct
4854 * If the device was a SATA device, this function frees the libata
4855 * ATA port, else it does nothing.
4858 static void ipr_target_destroy(struct scsi_target
*starget
)
4860 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4861 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4862 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4864 if (ioa_cfg
->sis64
) {
4865 if (!ipr_find_starget(starget
)) {
4866 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4867 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4868 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4869 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4870 else if (starget
->channel
== 0)
4871 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4876 starget
->hostdata
= NULL
;
4877 ata_sas_port_destroy(sata_port
->ap
);
4883 * ipr_find_sdev - Find device based on bus/target/lun.
4884 * @sdev: scsi device struct
4887 * resource entry pointer if found / NULL if not found
4889 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4891 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4892 struct ipr_resource_entry
*res
;
4894 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4895 if ((res
->bus
== sdev
->channel
) &&
4896 (res
->target
== sdev
->id
) &&
4897 (res
->lun
== sdev
->lun
))
4905 * ipr_slave_destroy - Unconfigure a SCSI device
4906 * @sdev: scsi device struct
4911 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4913 struct ipr_resource_entry
*res
;
4914 struct ipr_ioa_cfg
*ioa_cfg
;
4915 unsigned long lock_flags
= 0;
4917 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4919 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4920 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4923 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4924 sdev
->hostdata
= NULL
;
4926 res
->sata_port
= NULL
;
4928 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4932 * ipr_slave_configure - Configure a SCSI device
4933 * @sdev: scsi device struct
4935 * This function configures the specified scsi device.
4940 static int ipr_slave_configure(struct scsi_device
*sdev
)
4942 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4943 struct ipr_resource_entry
*res
;
4944 struct ata_port
*ap
= NULL
;
4945 unsigned long lock_flags
= 0;
4946 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4948 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4949 res
= sdev
->hostdata
;
4951 if (ipr_is_af_dasd_device(res
))
4952 sdev
->type
= TYPE_RAID
;
4953 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4954 sdev
->scsi_level
= 4;
4955 sdev
->no_uld_attach
= 1;
4957 if (ipr_is_vset_device(res
)) {
4958 sdev
->scsi_level
= SCSI_SPC_3
;
4959 sdev
->no_report_opcodes
= 1;
4960 blk_queue_rq_timeout(sdev
->request_queue
,
4961 IPR_VSET_RW_TIMEOUT
);
4962 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4964 if (ipr_is_gata(res
) && res
->sata_port
)
4965 ap
= res
->sata_port
->ap
;
4966 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4969 scsi_change_queue_depth(sdev
, IPR_MAX_CMD_PER_ATA_LUN
);
4970 ata_sas_slave_configure(sdev
, ap
);
4974 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4975 ipr_format_res_path(ioa_cfg
,
4976 res
->res_path
, buffer
, sizeof(buffer
)));
4979 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4984 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4985 * @sdev: scsi device struct
4987 * This function initializes an ATA port so that future commands
4988 * sent through queuecommand will work.
4993 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4995 struct ipr_sata_port
*sata_port
= NULL
;
4999 if (sdev
->sdev_target
)
5000 sata_port
= sdev
->sdev_target
->hostdata
;
5002 rc
= ata_sas_port_init(sata_port
->ap
);
5004 rc
= ata_sas_sync_probe(sata_port
->ap
);
5008 ipr_slave_destroy(sdev
);
5015 * ipr_slave_alloc - Prepare for commands to a device.
5016 * @sdev: scsi device struct
5018 * This function saves a pointer to the resource entry
5019 * in the scsi device struct if the device exists. We
5020 * can then use this pointer in ipr_queuecommand when
5021 * handling new commands.
5024 * 0 on success / -ENXIO if device does not exist
5026 static int ipr_slave_alloc(struct scsi_device
*sdev
)
5028 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
5029 struct ipr_resource_entry
*res
;
5030 unsigned long lock_flags
;
5033 sdev
->hostdata
= NULL
;
5035 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5037 res
= ipr_find_sdev(sdev
);
5042 sdev
->hostdata
= res
;
5043 if (!ipr_is_naca_model(res
))
5044 res
->needs_sync_complete
= 1;
5046 if (ipr_is_gata(res
)) {
5047 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5048 return ipr_ata_slave_alloc(sdev
);
5052 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5058 * ipr_match_lun - Match function for specified LUN
5059 * @ipr_cmd: ipr command struct
5060 * @device: device to match (sdev)
5063 * 1 if command matches sdev / 0 if command does not match sdev
5065 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
5067 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
5073 * ipr_cmnd_is_free - Check if a command is free or not
5074 * @ipr_cmd ipr command struct
5079 static bool ipr_cmnd_is_free(struct ipr_cmnd
*ipr_cmd
)
5081 struct ipr_cmnd
*loop_cmd
;
5083 list_for_each_entry(loop_cmd
, &ipr_cmd
->hrrq
->hrrq_free_q
, queue
) {
5084 if (loop_cmd
== ipr_cmd
)
5092 * ipr_match_res - Match function for specified resource entry
5093 * @ipr_cmd: ipr command struct
5094 * @resource: resource entry to match
5097 * 1 if command matches sdev / 0 if command does not match sdev
5099 static int ipr_match_res(struct ipr_cmnd
*ipr_cmd
, void *resource
)
5101 struct ipr_resource_entry
*res
= resource
;
5103 if (res
&& ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
)
5109 * ipr_wait_for_ops - Wait for matching commands to complete
5110 * @ipr_cmd: ipr command struct
5111 * @device: device to match (sdev)
5112 * @match: match function to use
5117 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
5118 int (*match
)(struct ipr_cmnd
*, void *))
5120 struct ipr_cmnd
*ipr_cmd
;
5122 unsigned long flags
;
5123 struct ipr_hrr_queue
*hrrq
;
5124 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
5125 DECLARE_COMPLETION_ONSTACK(comp
);
5131 for_each_hrrq(hrrq
, ioa_cfg
) {
5132 spin_lock_irqsave(hrrq
->lock
, flags
);
5133 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5134 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5135 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5136 if (match(ipr_cmd
, device
)) {
5137 ipr_cmd
->eh_comp
= &comp
;
5142 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5146 timeout
= wait_for_completion_timeout(&comp
, timeout
);
5151 for_each_hrrq(hrrq
, ioa_cfg
) {
5152 spin_lock_irqsave(hrrq
->lock
, flags
);
5153 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5154 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5155 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5156 if (match(ipr_cmd
, device
)) {
5157 ipr_cmd
->eh_comp
= NULL
;
5162 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5166 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
5168 return wait
? FAILED
: SUCCESS
;
5177 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
5179 struct ipr_ioa_cfg
*ioa_cfg
;
5180 unsigned long lock_flags
= 0;
5184 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5185 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5187 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5188 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5189 dev_err(&ioa_cfg
->pdev
->dev
,
5190 "Adapter being reset as a result of error recovery.\n");
5192 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5193 ioa_cfg
->sdt_state
= GET_DUMP
;
5196 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5197 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5198 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5200 /* If we got hit with a host reset while we were already resetting
5201 the adapter for some reason, and the reset failed. */
5202 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5207 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5213 * ipr_device_reset - Reset the device
5214 * @ioa_cfg: ioa config struct
5215 * @res: resource entry struct
5217 * This function issues a device reset to the affected device.
5218 * If the device is a SCSI device, a LUN reset will be sent
5219 * to the device first. If that does not work, a target reset
5220 * will be sent. If the device is a SATA device, a PHY reset will
5224 * 0 on success / non-zero on failure
5226 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5227 struct ipr_resource_entry
*res
)
5229 struct ipr_cmnd
*ipr_cmd
;
5230 struct ipr_ioarcb
*ioarcb
;
5231 struct ipr_cmd_pkt
*cmd_pkt
;
5232 struct ipr_ioarcb_ata_regs
*regs
;
5236 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5237 ioarcb
= &ipr_cmd
->ioarcb
;
5238 cmd_pkt
= &ioarcb
->cmd_pkt
;
5240 if (ipr_cmd
->ioa_cfg
->sis64
) {
5241 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
5242 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
5244 regs
= &ioarcb
->u
.add_data
.u
.regs
;
5246 ioarcb
->res_handle
= res
->res_handle
;
5247 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5248 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5249 if (ipr_is_gata(res
)) {
5250 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
5251 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
5252 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
5255 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5256 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5257 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5258 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
5259 if (ipr_cmd
->ioa_cfg
->sis64
)
5260 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
5261 sizeof(struct ipr_ioasa_gata
));
5263 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
5264 sizeof(struct ipr_ioasa_gata
));
5268 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
5272 * ipr_sata_reset - Reset the SATA port
5273 * @link: SATA link to reset
5274 * @classes: class of the attached device
5276 * This function issues a SATA phy reset to the affected ATA link.
5279 * 0 on success / non-zero on failure
5281 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5282 unsigned long deadline
)
5284 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5285 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5286 struct ipr_resource_entry
*res
;
5287 unsigned long lock_flags
= 0;
5288 int rc
= -ENXIO
, ret
;
5291 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5292 while (ioa_cfg
->in_reset_reload
) {
5293 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5294 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5295 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5298 res
= sata_port
->res
;
5300 rc
= ipr_device_reset(ioa_cfg
, res
);
5301 *classes
= res
->ata_class
;
5302 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5304 ret
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5305 if (ret
!= SUCCESS
) {
5306 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5307 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5308 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5310 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5313 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5320 * ipr_eh_dev_reset - Reset the device
5321 * @scsi_cmd: scsi command struct
5323 * This function issues a device reset to the affected device.
5324 * A LUN reset will be sent to the device first. If that does
5325 * not work, a target reset will be sent.
5330 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5332 struct ipr_cmnd
*ipr_cmd
;
5333 struct ipr_ioa_cfg
*ioa_cfg
;
5334 struct ipr_resource_entry
*res
;
5335 struct ata_port
*ap
;
5337 struct ipr_hrr_queue
*hrrq
;
5340 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5341 res
= scsi_cmd
->device
->hostdata
;
5344 * If we are currently going through reset/reload, return failed. This will force the
5345 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5348 if (ioa_cfg
->in_reset_reload
)
5350 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5353 for_each_hrrq(hrrq
, ioa_cfg
) {
5354 spin_lock(&hrrq
->_lock
);
5355 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5356 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5358 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5361 if (ipr_cmnd_is_free(ipr_cmd
))
5364 ipr_cmd
->done
= ipr_sata_eh_done
;
5365 if (!(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5366 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5367 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5371 spin_unlock(&hrrq
->_lock
);
5373 res
->resetting_device
= 1;
5374 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5376 if (ipr_is_gata(res
) && res
->sata_port
) {
5377 ap
= res
->sata_port
->ap
;
5378 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5379 ata_std_error_handler(ap
);
5380 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5382 rc
= ipr_device_reset(ioa_cfg
, res
);
5383 res
->resetting_device
= 0;
5384 res
->reset_occurred
= 1;
5387 return rc
? FAILED
: SUCCESS
;
5390 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5393 struct ipr_ioa_cfg
*ioa_cfg
;
5394 struct ipr_resource_entry
*res
;
5396 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5397 res
= cmd
->device
->hostdata
;
5402 spin_lock_irq(cmd
->device
->host
->host_lock
);
5403 rc
= __ipr_eh_dev_reset(cmd
);
5404 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5406 if (rc
== SUCCESS
) {
5407 if (ipr_is_gata(res
) && res
->sata_port
)
5408 rc
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5410 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5417 * ipr_bus_reset_done - Op done function for bus reset.
5418 * @ipr_cmd: ipr command struct
5420 * This function is the op done function for a bus reset
5425 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5427 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5428 struct ipr_resource_entry
*res
;
5431 if (!ioa_cfg
->sis64
)
5432 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5433 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5434 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5440 * If abort has not completed, indicate the reset has, else call the
5441 * abort's done function to wake the sleeping eh thread
5443 if (ipr_cmd
->sibling
->sibling
)
5444 ipr_cmd
->sibling
->sibling
= NULL
;
5446 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5448 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5453 * ipr_abort_timeout - An abort task has timed out
5454 * @ipr_cmd: ipr command struct
5456 * This function handles when an abort task times out. If this
5457 * happens we issue a bus reset since we have resources tied
5458 * up that must be freed before returning to the midlayer.
5463 static void ipr_abort_timeout(struct timer_list
*t
)
5465 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
5466 struct ipr_cmnd
*reset_cmd
;
5467 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5468 struct ipr_cmd_pkt
*cmd_pkt
;
5469 unsigned long lock_flags
= 0;
5472 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5473 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5474 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5478 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5479 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5480 ipr_cmd
->sibling
= reset_cmd
;
5481 reset_cmd
->sibling
= ipr_cmd
;
5482 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5483 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5484 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5485 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5486 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5488 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5489 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5494 * ipr_cancel_op - Cancel specified op
5495 * @scsi_cmd: scsi command struct
5497 * This function cancels specified op.
5502 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5504 struct ipr_cmnd
*ipr_cmd
;
5505 struct ipr_ioa_cfg
*ioa_cfg
;
5506 struct ipr_resource_entry
*res
;
5507 struct ipr_cmd_pkt
*cmd_pkt
;
5509 int i
, op_found
= 0;
5510 struct ipr_hrr_queue
*hrrq
;
5513 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5514 res
= scsi_cmd
->device
->hostdata
;
5516 /* If we are currently going through reset/reload, return failed.
5517 * This will force the mid-layer to call ipr_eh_host_reset,
5518 * which will then go to sleep and wait for the reset to complete
5520 if (ioa_cfg
->in_reset_reload
||
5521 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5527 * If we are aborting a timed out op, chances are that the timeout was caused
5528 * by a still not detected EEH error. In such cases, reading a register will
5529 * trigger the EEH recovery infrastructure.
5531 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5533 if (!ipr_is_gscsi(res
))
5536 for_each_hrrq(hrrq
, ioa_cfg
) {
5537 spin_lock(&hrrq
->_lock
);
5538 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5539 if (ioa_cfg
->ipr_cmnd_list
[i
]->scsi_cmd
== scsi_cmd
) {
5540 if (!ipr_cmnd_is_free(ioa_cfg
->ipr_cmnd_list
[i
])) {
5546 spin_unlock(&hrrq
->_lock
);
5552 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5553 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5554 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5555 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5556 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5557 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5559 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5561 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5562 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5565 * If the abort task timed out and we sent a bus reset, we will get
5566 * one the following responses to the abort
5568 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5573 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5574 if (!ipr_is_naca_model(res
))
5575 res
->needs_sync_complete
= 1;
5578 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5582 * ipr_eh_abort - Abort a single op
5583 * @scsi_cmd: scsi command struct
5586 * 0 if scan in progress / 1 if scan is complete
5588 static int ipr_scan_finished(struct Scsi_Host
*shost
, unsigned long elapsed_time
)
5590 unsigned long lock_flags
;
5591 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
5594 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
5595 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
|| ioa_cfg
->scan_done
)
5597 if ((elapsed_time
/HZ
) > (ioa_cfg
->transop_timeout
* 2))
5599 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
5604 * ipr_eh_host_reset - Reset the host adapter
5605 * @scsi_cmd: scsi command struct
5610 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5612 unsigned long flags
;
5614 struct ipr_ioa_cfg
*ioa_cfg
;
5618 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5620 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5621 rc
= ipr_cancel_op(scsi_cmd
);
5622 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5625 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5631 * ipr_handle_other_interrupt - Handle "other" interrupts
5632 * @ioa_cfg: ioa config struct
5633 * @int_reg: interrupt register
5636 * IRQ_NONE / IRQ_HANDLED
5638 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5641 irqreturn_t rc
= IRQ_HANDLED
;
5644 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5645 int_reg
&= ~int_mask_reg
;
5647 /* If an interrupt on the adapter did not occur, ignore it.
5648 * Or in the case of SIS 64, check for a stage change interrupt.
5650 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5651 if (ioa_cfg
->sis64
) {
5652 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5653 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5654 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5656 /* clear stage change */
5657 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5658 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5659 list_del(&ioa_cfg
->reset_cmd
->queue
);
5660 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5661 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5669 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5670 /* Mask the interrupt */
5671 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5672 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5674 list_del(&ioa_cfg
->reset_cmd
->queue
);
5675 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5676 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5677 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5678 if (ioa_cfg
->clear_isr
) {
5679 if (ipr_debug
&& printk_ratelimit())
5680 dev_err(&ioa_cfg
->pdev
->dev
,
5681 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5682 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5683 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5687 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5688 ioa_cfg
->ioa_unit_checked
= 1;
5689 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5690 dev_err(&ioa_cfg
->pdev
->dev
,
5691 "No Host RRQ. 0x%08X\n", int_reg
);
5693 dev_err(&ioa_cfg
->pdev
->dev
,
5694 "Permanent IOA failure. 0x%08X\n", int_reg
);
5696 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5697 ioa_cfg
->sdt_state
= GET_DUMP
;
5699 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5700 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5707 * ipr_isr_eh - Interrupt service routine error handler
5708 * @ioa_cfg: ioa config struct
5709 * @msg: message to log
5714 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5716 ioa_cfg
->errors_logged
++;
5717 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5719 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5720 ioa_cfg
->sdt_state
= GET_DUMP
;
5722 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5725 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5726 struct list_head
*doneq
)
5730 struct ipr_cmnd
*ipr_cmd
;
5731 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5734 /* If interrupts are disabled, ignore the interrupt */
5735 if (!hrr_queue
->allow_interrupts
)
5738 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5739 hrr_queue
->toggle_bit
) {
5741 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5742 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5743 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5745 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5746 cmd_index
< hrr_queue
->min_cmd_id
)) {
5748 "Invalid response handle from IOA: ",
5753 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5754 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5756 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5758 list_move_tail(&ipr_cmd
->queue
, doneq
);
5760 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5761 hrr_queue
->hrrq_curr
++;
5763 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5764 hrr_queue
->toggle_bit
^= 1u;
5767 if (budget
> 0 && num_hrrq
>= budget
)
5774 static int ipr_iopoll(struct irq_poll
*iop
, int budget
)
5776 struct ipr_ioa_cfg
*ioa_cfg
;
5777 struct ipr_hrr_queue
*hrrq
;
5778 struct ipr_cmnd
*ipr_cmd
, *temp
;
5779 unsigned long hrrq_flags
;
5783 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5784 ioa_cfg
= hrrq
->ioa_cfg
;
5786 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5787 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5789 if (completed_ops
< budget
)
5790 irq_poll_complete(iop
);
5791 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5793 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5794 list_del(&ipr_cmd
->queue
);
5795 del_timer(&ipr_cmd
->timer
);
5796 ipr_cmd
->fast_done(ipr_cmd
);
5799 return completed_ops
;
5803 * ipr_isr - Interrupt service routine
5805 * @devp: pointer to ioa config struct
5808 * IRQ_NONE / IRQ_HANDLED
5810 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5812 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5813 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5814 unsigned long hrrq_flags
= 0;
5818 struct ipr_cmnd
*ipr_cmd
, *temp
;
5819 irqreturn_t rc
= IRQ_NONE
;
5822 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5823 /* If interrupts are disabled, ignore the interrupt */
5824 if (!hrrq
->allow_interrupts
) {
5825 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5830 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5833 if (!ioa_cfg
->clear_isr
)
5836 /* Clear the PCI interrupt */
5839 writel(IPR_PCII_HRRQ_UPDATED
,
5840 ioa_cfg
->regs
.clr_interrupt_reg32
);
5841 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5842 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5843 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5845 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5846 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5848 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5849 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5851 "Error clearing HRRQ: ", num_hrrq
);
5858 if (unlikely(rc
== IRQ_NONE
))
5859 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5861 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5862 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5863 list_del(&ipr_cmd
->queue
);
5864 del_timer(&ipr_cmd
->timer
);
5865 ipr_cmd
->fast_done(ipr_cmd
);
5871 * ipr_isr_mhrrq - Interrupt service routine
5873 * @devp: pointer to ioa config struct
5876 * IRQ_NONE / IRQ_HANDLED
5878 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5880 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5881 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5882 unsigned long hrrq_flags
= 0;
5883 struct ipr_cmnd
*ipr_cmd
, *temp
;
5884 irqreturn_t rc
= IRQ_NONE
;
5887 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5889 /* If interrupts are disabled, ignore the interrupt */
5890 if (!hrrq
->allow_interrupts
) {
5891 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5895 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5896 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5898 irq_poll_sched(&hrrq
->iopoll
);
5899 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5903 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5906 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5910 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5912 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5913 list_del(&ipr_cmd
->queue
);
5914 del_timer(&ipr_cmd
->timer
);
5915 ipr_cmd
->fast_done(ipr_cmd
);
5921 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5922 * @ioa_cfg: ioa config struct
5923 * @ipr_cmd: ipr command struct
5926 * 0 on success / -1 on failure
5928 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5929 struct ipr_cmnd
*ipr_cmd
)
5932 struct scatterlist
*sg
;
5934 u32 ioadl_flags
= 0;
5935 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5936 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5937 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5939 length
= scsi_bufflen(scsi_cmd
);
5943 nseg
= scsi_dma_map(scsi_cmd
);
5945 if (printk_ratelimit())
5946 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5950 ipr_cmd
->dma_use_sg
= nseg
;
5952 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5954 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5956 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5957 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5958 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5959 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5960 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5962 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5963 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5964 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5965 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5968 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5973 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5974 * @ioa_cfg: ioa config struct
5975 * @ipr_cmd: ipr command struct
5978 * 0 on success / -1 on failure
5980 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5981 struct ipr_cmnd
*ipr_cmd
)
5984 struct scatterlist
*sg
;
5986 u32 ioadl_flags
= 0;
5987 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5988 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5989 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5991 length
= scsi_bufflen(scsi_cmd
);
5995 nseg
= scsi_dma_map(scsi_cmd
);
5997 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
6001 ipr_cmd
->dma_use_sg
= nseg
;
6003 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
6004 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6005 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6006 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
6008 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6009 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
6010 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6011 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
6012 ioarcb
->read_ioadl_len
=
6013 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6016 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
6017 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
6018 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
6019 offsetof(struct ipr_ioarcb
, u
.add_data
));
6020 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6023 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
6024 ioadl
[i
].flags_and_data_len
=
6025 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6026 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
6029 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6034 * __ipr_erp_done - Process completion of ERP for a device
6035 * @ipr_cmd: ipr command struct
6037 * This function copies the sense buffer into the scsi_cmd
6038 * struct and pushes the scsi_done function.
6043 static void __ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6045 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6046 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6047 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6049 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6050 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6051 scmd_printk(KERN_ERR
, scsi_cmd
,
6052 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
6054 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
6055 SCSI_SENSE_BUFFERSIZE
);
6059 if (!ipr_is_naca_model(res
))
6060 res
->needs_sync_complete
= 1;
6063 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6064 scsi_cmd
->scsi_done(scsi_cmd
);
6065 if (ipr_cmd
->eh_comp
)
6066 complete(ipr_cmd
->eh_comp
);
6067 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6071 * ipr_erp_done - Process completion of ERP for a device
6072 * @ipr_cmd: ipr command struct
6074 * This function copies the sense buffer into the scsi_cmd
6075 * struct and pushes the scsi_done function.
6080 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6082 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6083 unsigned long hrrq_flags
;
6085 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6086 __ipr_erp_done(ipr_cmd
);
6087 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6091 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6092 * @ipr_cmd: ipr command struct
6097 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
6099 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6100 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6101 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6103 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
6104 ioarcb
->data_transfer_length
= 0;
6105 ioarcb
->read_data_transfer_length
= 0;
6106 ioarcb
->ioadl_len
= 0;
6107 ioarcb
->read_ioadl_len
= 0;
6108 ioasa
->hdr
.ioasc
= 0;
6109 ioasa
->hdr
.residual_data_len
= 0;
6111 if (ipr_cmd
->ioa_cfg
->sis64
)
6112 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6113 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
6115 ioarcb
->write_ioadl_addr
=
6116 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
6117 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6122 * __ipr_erp_request_sense - Send request sense to a device
6123 * @ipr_cmd: ipr command struct
6125 * This function sends a request sense to a device as a result
6126 * of a check condition.
6131 static void __ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6133 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6134 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6136 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6137 __ipr_erp_done(ipr_cmd
);
6141 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6143 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
6144 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
6145 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
6146 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
6147 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6148 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
6150 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
6151 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
6153 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
6154 IPR_REQUEST_SENSE_TIMEOUT
* 2);
6158 * ipr_erp_request_sense - Send request sense to a device
6159 * @ipr_cmd: ipr command struct
6161 * This function sends a request sense to a device as a result
6162 * of a check condition.
6167 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6169 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6170 unsigned long hrrq_flags
;
6172 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6173 __ipr_erp_request_sense(ipr_cmd
);
6174 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6178 * ipr_erp_cancel_all - Send cancel all to a device
6179 * @ipr_cmd: ipr command struct
6181 * This function sends a cancel all to a device to clear the
6182 * queue. If we are running TCQ on the device, QERR is set to 1,
6183 * which means all outstanding ops have been dropped on the floor.
6184 * Cancel all will return them to us.
6189 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
6191 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6192 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6193 struct ipr_cmd_pkt
*cmd_pkt
;
6197 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6199 if (!scsi_cmd
->device
->simple_tags
) {
6200 __ipr_erp_request_sense(ipr_cmd
);
6204 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6205 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
6206 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
6208 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
6209 IPR_CANCEL_ALL_TIMEOUT
);
6213 * ipr_dump_ioasa - Dump contents of IOASA
6214 * @ioa_cfg: ioa config struct
6215 * @ipr_cmd: ipr command struct
6216 * @res: resource entry struct
6218 * This function is invoked by the interrupt handler when ops
6219 * fail. It will log the IOASA if appropriate. Only called
6225 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
6226 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
6230 u32 ioasc
, fd_ioasc
;
6231 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6232 __be32
*ioasa_data
= (__be32
*)ioasa
;
6235 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
6236 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
6241 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
6244 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
6245 error_index
= ipr_get_error(fd_ioasc
);
6247 error_index
= ipr_get_error(ioasc
);
6249 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
6250 /* Don't log an error if the IOA already logged one */
6251 if (ioasa
->hdr
.ilid
!= 0)
6254 if (!ipr_is_gscsi(res
))
6257 if (ipr_error_table
[error_index
].log_ioasa
== 0)
6261 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
6263 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
6264 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
6265 data_len
= sizeof(struct ipr_ioasa64
);
6266 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
6267 data_len
= sizeof(struct ipr_ioasa
);
6269 ipr_err("IOASA Dump:\n");
6271 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
6272 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
6273 be32_to_cpu(ioasa_data
[i
]),
6274 be32_to_cpu(ioasa_data
[i
+1]),
6275 be32_to_cpu(ioasa_data
[i
+2]),
6276 be32_to_cpu(ioasa_data
[i
+3]));
6281 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6283 * @sense_buf: sense data buffer
6288 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
6291 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
6292 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
6293 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6294 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
6296 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
6298 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
6301 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
6303 if (ipr_is_vset_device(res
) &&
6304 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
6305 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
6306 sense_buf
[0] = 0x72;
6307 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
6308 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
6309 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
6313 sense_buf
[9] = 0x0A;
6314 sense_buf
[10] = 0x80;
6316 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6318 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6319 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6320 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6321 sense_buf
[15] = failing_lba
& 0x000000ff;
6323 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6325 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6326 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6327 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6328 sense_buf
[19] = failing_lba
& 0x000000ff;
6330 sense_buf
[0] = 0x70;
6331 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6332 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6333 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6335 /* Illegal request */
6336 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6337 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6338 sense_buf
[7] = 10; /* additional length */
6340 /* IOARCB was in error */
6341 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6342 sense_buf
[15] = 0xC0;
6343 else /* Parameter data was invalid */
6344 sense_buf
[15] = 0x80;
6347 ((IPR_FIELD_POINTER_MASK
&
6348 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6350 (IPR_FIELD_POINTER_MASK
&
6351 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6353 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6354 if (ipr_is_vset_device(res
))
6355 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6357 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6359 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6360 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6361 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6362 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6363 sense_buf
[6] = failing_lba
& 0x000000ff;
6366 sense_buf
[7] = 6; /* additional length */
6372 * ipr_get_autosense - Copy autosense data to sense buffer
6373 * @ipr_cmd: ipr command struct
6375 * This function copies the autosense buffer to the buffer
6376 * in the scsi_cmd, if there is autosense available.
6379 * 1 if autosense was available / 0 if not
6381 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6383 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6384 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6386 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6389 if (ipr_cmd
->ioa_cfg
->sis64
)
6390 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6391 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6392 SCSI_SENSE_BUFFERSIZE
));
6394 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6395 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6396 SCSI_SENSE_BUFFERSIZE
));
6401 * ipr_erp_start - Process an error response for a SCSI op
6402 * @ioa_cfg: ioa config struct
6403 * @ipr_cmd: ipr command struct
6405 * This function determines whether or not to initiate ERP
6406 * on the affected device.
6411 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6412 struct ipr_cmnd
*ipr_cmd
)
6414 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6415 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6416 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6417 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6420 __ipr_scsi_eh_done(ipr_cmd
);
6424 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6425 ipr_gen_sense(ipr_cmd
);
6427 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6429 switch (masked_ioasc
) {
6430 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6431 if (ipr_is_naca_model(res
))
6432 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6434 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6436 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6437 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6438 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6440 case IPR_IOASC_HW_SEL_TIMEOUT
:
6441 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6442 if (!ipr_is_naca_model(res
))
6443 res
->needs_sync_complete
= 1;
6445 case IPR_IOASC_SYNC_REQUIRED
:
6447 res
->needs_sync_complete
= 1;
6448 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6450 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6451 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6453 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6454 * so SCSI mid-layer and upper layers handle it accordingly.
6456 if (scsi_cmd
->result
!= SAM_STAT_CHECK_CONDITION
)
6457 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6459 case IPR_IOASC_BUS_WAS_RESET
:
6460 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6462 * Report the bus reset and ask for a retry. The device
6463 * will give CC/UA the next command.
6465 if (!res
->resetting_device
)
6466 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6467 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6468 if (!ipr_is_naca_model(res
))
6469 res
->needs_sync_complete
= 1;
6471 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6472 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6473 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6474 if (!ipr_get_autosense(ipr_cmd
)) {
6475 if (!ipr_is_naca_model(res
)) {
6476 ipr_erp_cancel_all(ipr_cmd
);
6481 if (!ipr_is_naca_model(res
))
6482 res
->needs_sync_complete
= 1;
6484 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6486 case IPR_IOASC_IR_NON_OPTIMIZED
:
6487 if (res
->raw_mode
) {
6489 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6491 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6494 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6495 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6496 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6497 res
->needs_sync_complete
= 1;
6501 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6502 scsi_cmd
->scsi_done(scsi_cmd
);
6503 if (ipr_cmd
->eh_comp
)
6504 complete(ipr_cmd
->eh_comp
);
6505 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6509 * ipr_scsi_done - mid-layer done function
6510 * @ipr_cmd: ipr command struct
6512 * This function is invoked by the interrupt handler for
6513 * ops generated by the SCSI mid-layer
6518 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6520 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6521 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6522 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6523 unsigned long lock_flags
;
6525 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6527 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6528 scsi_dma_unmap(scsi_cmd
);
6530 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6531 scsi_cmd
->scsi_done(scsi_cmd
);
6532 if (ipr_cmd
->eh_comp
)
6533 complete(ipr_cmd
->eh_comp
);
6534 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6535 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6537 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6538 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6539 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6540 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6541 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6546 * ipr_queuecommand - Queue a mid-layer request
6547 * @shost: scsi host struct
6548 * @scsi_cmd: scsi command struct
6550 * This function queues a request generated by the mid-layer.
6554 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6555 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6557 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6558 struct scsi_cmnd
*scsi_cmd
)
6560 struct ipr_ioa_cfg
*ioa_cfg
;
6561 struct ipr_resource_entry
*res
;
6562 struct ipr_ioarcb
*ioarcb
;
6563 struct ipr_cmnd
*ipr_cmd
;
6564 unsigned long hrrq_flags
, lock_flags
;
6566 struct ipr_hrr_queue
*hrrq
;
6569 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6571 scsi_cmd
->result
= (DID_OK
<< 16);
6572 res
= scsi_cmd
->device
->hostdata
;
6574 if (ipr_is_gata(res
) && res
->sata_port
) {
6575 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6576 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6577 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6581 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6582 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6584 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6586 * We are currently blocking all devices due to a host reset
6587 * We have told the host to stop giving us new requests, but
6588 * ERP ops don't count. FIXME
6590 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6591 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6592 return SCSI_MLQUEUE_HOST_BUSY
;
6596 * FIXME - Create scsi_set_host_offline interface
6597 * and the ioa_is_dead check can be removed
6599 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6600 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6604 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6605 if (ipr_cmd
== NULL
) {
6606 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6607 return SCSI_MLQUEUE_HOST_BUSY
;
6609 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6611 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6612 ioarcb
= &ipr_cmd
->ioarcb
;
6614 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6615 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6616 ipr_cmd
->done
= ipr_scsi_eh_done
;
6618 if (ipr_is_gscsi(res
)) {
6619 if (scsi_cmd
->underflow
== 0)
6620 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6622 if (res
->reset_occurred
) {
6623 res
->reset_occurred
= 0;
6624 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6628 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6629 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6631 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6632 if (scsi_cmd
->flags
& SCMD_TAGGED
)
6633 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_SIMPLE_TASK
;
6635 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_UNTAGGED_TASK
;
6638 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6639 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6640 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6642 if (res
->raw_mode
&& ipr_is_af_dasd_device(res
)) {
6643 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_PIPE
;
6645 if (scsi_cmd
->underflow
== 0)
6646 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6650 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6652 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6654 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6655 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6656 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6657 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6659 scsi_dma_unmap(scsi_cmd
);
6660 return SCSI_MLQUEUE_HOST_BUSY
;
6663 if (unlikely(hrrq
->ioa_is_dead
)) {
6664 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6665 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6666 scsi_dma_unmap(scsi_cmd
);
6670 ioarcb
->res_handle
= res
->res_handle
;
6671 if (res
->needs_sync_complete
) {
6672 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6673 res
->needs_sync_complete
= 0;
6675 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6676 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6677 ipr_send_command(ipr_cmd
);
6678 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6682 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6683 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6684 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6685 scsi_cmd
->scsi_done(scsi_cmd
);
6686 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6691 * ipr_ioctl - IOCTL handler
6692 * @sdev: scsi device struct
6697 * 0 on success / other on failure
6699 static int ipr_ioctl(struct scsi_device
*sdev
, unsigned int cmd
,
6702 struct ipr_resource_entry
*res
;
6704 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6705 if (res
&& ipr_is_gata(res
)) {
6706 if (cmd
== HDIO_GET_IDENTITY
)
6708 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6715 * ipr_info - Get information about the card/driver
6716 * @scsi_host: scsi host struct
6719 * pointer to buffer with description string
6721 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6723 static char buffer
[512];
6724 struct ipr_ioa_cfg
*ioa_cfg
;
6725 unsigned long lock_flags
= 0;
6727 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6729 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6730 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6731 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6736 static struct scsi_host_template driver_template
= {
6737 .module
= THIS_MODULE
,
6739 .info
= ipr_ioa_info
,
6741 .queuecommand
= ipr_queuecommand
,
6742 .eh_abort_handler
= ipr_eh_abort
,
6743 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6744 .eh_host_reset_handler
= ipr_eh_host_reset
,
6745 .slave_alloc
= ipr_slave_alloc
,
6746 .slave_configure
= ipr_slave_configure
,
6747 .slave_destroy
= ipr_slave_destroy
,
6748 .scan_finished
= ipr_scan_finished
,
6749 .target_alloc
= ipr_target_alloc
,
6750 .target_destroy
= ipr_target_destroy
,
6751 .change_queue_depth
= ipr_change_queue_depth
,
6752 .bios_param
= ipr_biosparam
,
6753 .can_queue
= IPR_MAX_COMMANDS
,
6755 .sg_tablesize
= IPR_MAX_SGLIST
,
6756 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6757 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6758 .shost_attrs
= ipr_ioa_attrs
,
6759 .sdev_attrs
= ipr_dev_attrs
,
6760 .proc_name
= IPR_NAME
,
6764 * ipr_ata_phy_reset - libata phy_reset handler
6765 * @ap: ata port to reset
6768 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6770 unsigned long flags
;
6771 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6772 struct ipr_resource_entry
*res
= sata_port
->res
;
6773 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6777 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6778 while (ioa_cfg
->in_reset_reload
) {
6779 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6780 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6781 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6784 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6787 rc
= ipr_device_reset(ioa_cfg
, res
);
6790 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6794 ap
->link
.device
[0].class = res
->ata_class
;
6795 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6796 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6799 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6804 * ipr_ata_post_internal - Cleanup after an internal command
6805 * @qc: ATA queued command
6810 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6812 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6813 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6814 struct ipr_cmnd
*ipr_cmd
;
6815 struct ipr_hrr_queue
*hrrq
;
6816 unsigned long flags
;
6818 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6819 while (ioa_cfg
->in_reset_reload
) {
6820 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6821 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6822 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6825 for_each_hrrq(hrrq
, ioa_cfg
) {
6826 spin_lock(&hrrq
->_lock
);
6827 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6828 if (ipr_cmd
->qc
== qc
) {
6829 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6833 spin_unlock(&hrrq
->_lock
);
6835 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6839 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6840 * @regs: destination
6841 * @tf: source ATA taskfile
6846 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6847 struct ata_taskfile
*tf
)
6849 regs
->feature
= tf
->feature
;
6850 regs
->nsect
= tf
->nsect
;
6851 regs
->lbal
= tf
->lbal
;
6852 regs
->lbam
= tf
->lbam
;
6853 regs
->lbah
= tf
->lbah
;
6854 regs
->device
= tf
->device
;
6855 regs
->command
= tf
->command
;
6856 regs
->hob_feature
= tf
->hob_feature
;
6857 regs
->hob_nsect
= tf
->hob_nsect
;
6858 regs
->hob_lbal
= tf
->hob_lbal
;
6859 regs
->hob_lbam
= tf
->hob_lbam
;
6860 regs
->hob_lbah
= tf
->hob_lbah
;
6861 regs
->ctl
= tf
->ctl
;
6865 * ipr_sata_done - done function for SATA commands
6866 * @ipr_cmd: ipr command struct
6868 * This function is invoked by the interrupt handler for
6869 * ops generated by the SCSI mid-layer to SATA devices
6874 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6876 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6877 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6878 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6879 struct ipr_resource_entry
*res
= sata_port
->res
;
6880 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6882 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6883 if (ipr_cmd
->ioa_cfg
->sis64
)
6884 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6885 sizeof(struct ipr_ioasa_gata
));
6887 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6888 sizeof(struct ipr_ioasa_gata
));
6889 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6891 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6892 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6894 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6895 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6897 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6898 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6899 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6900 ata_qc_complete(qc
);
6904 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6905 * @ipr_cmd: ipr command struct
6906 * @qc: ATA queued command
6909 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6910 struct ata_queued_cmd
*qc
)
6912 u32 ioadl_flags
= 0;
6913 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6914 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6915 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6916 int len
= qc
->nbytes
;
6917 struct scatterlist
*sg
;
6919 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6924 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6925 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6926 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6927 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6928 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6930 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6932 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6933 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6934 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6936 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6937 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6938 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6939 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6941 last_ioadl64
= ioadl64
;
6945 if (likely(last_ioadl64
))
6946 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6950 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6951 * @ipr_cmd: ipr command struct
6952 * @qc: ATA queued command
6955 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6956 struct ata_queued_cmd
*qc
)
6958 u32 ioadl_flags
= 0;
6959 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6960 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6961 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6962 int len
= qc
->nbytes
;
6963 struct scatterlist
*sg
;
6969 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6970 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6971 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6972 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6974 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6975 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6976 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6977 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6978 ioarcb
->read_ioadl_len
=
6979 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6982 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6983 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6984 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6990 if (likely(last_ioadl
))
6991 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6995 * ipr_qc_defer - Get a free ipr_cmd
6996 * @qc: queued command
7001 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
7003 struct ata_port
*ap
= qc
->ap
;
7004 struct ipr_sata_port
*sata_port
= ap
->private_data
;
7005 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
7006 struct ipr_cmnd
*ipr_cmd
;
7007 struct ipr_hrr_queue
*hrrq
;
7010 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
7011 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
7013 qc
->lldd_task
= NULL
;
7014 spin_lock(&hrrq
->_lock
);
7015 if (unlikely(hrrq
->ioa_is_dead
)) {
7016 spin_unlock(&hrrq
->_lock
);
7020 if (unlikely(!hrrq
->allow_cmds
)) {
7021 spin_unlock(&hrrq
->_lock
);
7022 return ATA_DEFER_LINK
;
7025 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
7026 if (ipr_cmd
== NULL
) {
7027 spin_unlock(&hrrq
->_lock
);
7028 return ATA_DEFER_LINK
;
7031 qc
->lldd_task
= ipr_cmd
;
7032 spin_unlock(&hrrq
->_lock
);
7037 * ipr_qc_issue - Issue a SATA qc to a device
7038 * @qc: queued command
7043 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
7045 struct ata_port
*ap
= qc
->ap
;
7046 struct ipr_sata_port
*sata_port
= ap
->private_data
;
7047 struct ipr_resource_entry
*res
= sata_port
->res
;
7048 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
7049 struct ipr_cmnd
*ipr_cmd
;
7050 struct ipr_ioarcb
*ioarcb
;
7051 struct ipr_ioarcb_ata_regs
*regs
;
7053 if (qc
->lldd_task
== NULL
)
7056 ipr_cmd
= qc
->lldd_task
;
7057 if (ipr_cmd
== NULL
)
7058 return AC_ERR_SYSTEM
;
7060 qc
->lldd_task
= NULL
;
7061 spin_lock(&ipr_cmd
->hrrq
->_lock
);
7062 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
7063 ipr_cmd
->hrrq
->ioa_is_dead
)) {
7064 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7065 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7066 return AC_ERR_SYSTEM
;
7069 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
7070 ioarcb
= &ipr_cmd
->ioarcb
;
7072 if (ioa_cfg
->sis64
) {
7073 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
7074 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
7076 regs
= &ioarcb
->u
.add_data
.u
.regs
;
7078 memset(regs
, 0, sizeof(*regs
));
7079 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
7081 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7083 ipr_cmd
->done
= ipr_sata_done
;
7084 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
7085 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
7086 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
7087 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
7088 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
7091 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
7093 ipr_build_ata_ioadl(ipr_cmd
, qc
);
7095 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
7096 ipr_copy_sata_tf(regs
, &qc
->tf
);
7097 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
7098 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
7100 switch (qc
->tf
.protocol
) {
7101 case ATA_PROT_NODATA
:
7106 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7109 case ATAPI_PROT_PIO
:
7110 case ATAPI_PROT_NODATA
:
7111 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7114 case ATAPI_PROT_DMA
:
7115 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7116 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7121 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7122 return AC_ERR_INVALID
;
7125 ipr_send_command(ipr_cmd
);
7126 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7132 * ipr_qc_fill_rtf - Read result TF
7133 * @qc: ATA queued command
7138 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
7140 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
7141 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
7142 struct ata_taskfile
*tf
= &qc
->result_tf
;
7144 tf
->feature
= g
->error
;
7145 tf
->nsect
= g
->nsect
;
7149 tf
->device
= g
->device
;
7150 tf
->command
= g
->status
;
7151 tf
->hob_nsect
= g
->hob_nsect
;
7152 tf
->hob_lbal
= g
->hob_lbal
;
7153 tf
->hob_lbam
= g
->hob_lbam
;
7154 tf
->hob_lbah
= g
->hob_lbah
;
7159 static struct ata_port_operations ipr_sata_ops
= {
7160 .phy_reset
= ipr_ata_phy_reset
,
7161 .hardreset
= ipr_sata_reset
,
7162 .post_internal_cmd
= ipr_ata_post_internal
,
7163 .qc_prep
= ata_noop_qc_prep
,
7164 .qc_defer
= ipr_qc_defer
,
7165 .qc_issue
= ipr_qc_issue
,
7166 .qc_fill_rtf
= ipr_qc_fill_rtf
,
7167 .port_start
= ata_sas_port_start
,
7168 .port_stop
= ata_sas_port_stop
7171 static struct ata_port_info sata_port_info
= {
7172 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
7174 .pio_mask
= ATA_PIO4_ONLY
,
7175 .mwdma_mask
= ATA_MWDMA2
,
7176 .udma_mask
= ATA_UDMA6
,
7177 .port_ops
= &ipr_sata_ops
7180 #ifdef CONFIG_PPC_PSERIES
7181 static const u16 ipr_blocked_processors
[] = {
7193 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7194 * @ioa_cfg: ioa cfg struct
7196 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7197 * certain pSeries hardware. This function determines if the given
7198 * adapter is in one of these confgurations or not.
7201 * 1 if adapter is not supported / 0 if adapter is supported
7203 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
7207 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
7208 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
7209 if (pvr_version_is(ipr_blocked_processors
[i
]))
7216 #define ipr_invalid_adapter(ioa_cfg) 0
7220 * ipr_ioa_bringdown_done - IOA bring down completion.
7221 * @ipr_cmd: ipr command struct
7223 * This function processes the completion of an adapter bring down.
7224 * It wakes any reset sleepers.
7229 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
7231 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7235 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
7237 ioa_cfg
->scsi_unblock
= 1;
7238 schedule_work(&ioa_cfg
->work_q
);
7241 ioa_cfg
->in_reset_reload
= 0;
7242 ioa_cfg
->reset_retries
= 0;
7243 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7244 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7245 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
7246 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7250 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7251 wake_up_all(&ioa_cfg
->reset_wait_q
);
7254 return IPR_RC_JOB_RETURN
;
7258 * ipr_ioa_reset_done - IOA reset completion.
7259 * @ipr_cmd: ipr command struct
7261 * This function processes the completion of an adapter reset.
7262 * It schedules any necessary mid-layer add/removes and
7263 * wakes any reset sleepers.
7268 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
7270 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7271 struct ipr_resource_entry
*res
;
7275 ioa_cfg
->in_reset_reload
= 0;
7276 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
7277 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
7278 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
7279 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
7282 ioa_cfg
->reset_cmd
= NULL
;
7283 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
7285 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
7286 if (res
->add_to_ml
|| res
->del_from_ml
) {
7291 schedule_work(&ioa_cfg
->work_q
);
7293 for (j
= 0; j
< IPR_NUM_HCAMS
; j
++) {
7294 list_del_init(&ioa_cfg
->hostrcb
[j
]->queue
);
7295 if (j
< IPR_NUM_LOG_HCAMS
)
7296 ipr_send_hcam(ioa_cfg
,
7297 IPR_HCAM_CDB_OP_CODE_LOG_DATA
,
7298 ioa_cfg
->hostrcb
[j
]);
7300 ipr_send_hcam(ioa_cfg
,
7301 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
7302 ioa_cfg
->hostrcb
[j
]);
7305 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
7306 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
7308 ioa_cfg
->reset_retries
= 0;
7309 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7310 wake_up_all(&ioa_cfg
->reset_wait_q
);
7312 ioa_cfg
->scsi_unblock
= 1;
7313 schedule_work(&ioa_cfg
->work_q
);
7315 return IPR_RC_JOB_RETURN
;
7319 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7320 * @supported_dev: supported device struct
7321 * @vpids: vendor product id struct
7326 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
7327 struct ipr_std_inq_vpids
*vpids
)
7329 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
7330 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
7331 supported_dev
->num_records
= 1;
7332 supported_dev
->data_length
=
7333 cpu_to_be16(sizeof(struct ipr_supported_device
));
7334 supported_dev
->reserved
= 0;
7338 * ipr_set_supported_devs - Send Set Supported Devices for a device
7339 * @ipr_cmd: ipr command struct
7341 * This function sends a Set Supported Devices to the adapter
7344 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7346 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7348 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7349 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7350 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7351 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7353 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7355 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7356 if (!ipr_is_scsi_disk(res
))
7359 ipr_cmd
->u
.res
= res
;
7360 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7362 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7363 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7364 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7366 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7367 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7368 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7369 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7371 ipr_init_ioadl(ipr_cmd
,
7372 ioa_cfg
->vpd_cbs_dma
+
7373 offsetof(struct ipr_misc_cbs
, supp_dev
),
7374 sizeof(struct ipr_supported_device
),
7375 IPR_IOADL_FLAGS_WRITE_LAST
);
7377 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7378 IPR_SET_SUP_DEVICE_TIMEOUT
);
7380 if (!ioa_cfg
->sis64
)
7381 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7383 return IPR_RC_JOB_RETURN
;
7387 return IPR_RC_JOB_CONTINUE
;
7391 * ipr_get_mode_page - Locate specified mode page
7392 * @mode_pages: mode page buffer
7393 * @page_code: page code to find
7394 * @len: minimum required length for mode page
7397 * pointer to mode page / NULL on failure
7399 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7400 u32 page_code
, u32 len
)
7402 struct ipr_mode_page_hdr
*mode_hdr
;
7406 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7409 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7410 mode_hdr
= (struct ipr_mode_page_hdr
*)
7411 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7414 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7415 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7419 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7420 mode_hdr
->page_length
);
7421 length
-= page_length
;
7422 mode_hdr
= (struct ipr_mode_page_hdr
*)
7423 ((unsigned long)mode_hdr
+ page_length
);
7430 * ipr_check_term_power - Check for term power errors
7431 * @ioa_cfg: ioa config struct
7432 * @mode_pages: IOAFP mode pages buffer
7434 * Check the IOAFP's mode page 28 for term power errors
7439 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7440 struct ipr_mode_pages
*mode_pages
)
7444 struct ipr_dev_bus_entry
*bus
;
7445 struct ipr_mode_page28
*mode_page
;
7447 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7448 sizeof(struct ipr_mode_page28
));
7450 entry_length
= mode_page
->entry_length
;
7452 bus
= mode_page
->bus
;
7454 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7455 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7456 dev_err(&ioa_cfg
->pdev
->dev
,
7457 "Term power is absent on scsi bus %d\n",
7461 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7466 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7467 * @ioa_cfg: ioa config struct
7469 * Looks through the config table checking for SES devices. If
7470 * the SES device is in the SES table indicating a maximum SCSI
7471 * bus speed, the speed is limited for the bus.
7476 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7481 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7482 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7483 ioa_cfg
->bus_attr
[i
].bus_width
);
7485 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7486 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7491 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7492 * @ioa_cfg: ioa config struct
7493 * @mode_pages: mode page 28 buffer
7495 * Updates mode page 28 based on driver configuration
7500 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7501 struct ipr_mode_pages
*mode_pages
)
7503 int i
, entry_length
;
7504 struct ipr_dev_bus_entry
*bus
;
7505 struct ipr_bus_attributes
*bus_attr
;
7506 struct ipr_mode_page28
*mode_page
;
7508 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7509 sizeof(struct ipr_mode_page28
));
7511 entry_length
= mode_page
->entry_length
;
7513 /* Loop for each device bus entry */
7514 for (i
= 0, bus
= mode_page
->bus
;
7515 i
< mode_page
->num_entries
;
7516 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7517 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7518 dev_err(&ioa_cfg
->pdev
->dev
,
7519 "Invalid resource address reported: 0x%08X\n",
7520 IPR_GET_PHYS_LOC(bus
->res_addr
));
7524 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7525 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7526 bus
->bus_width
= bus_attr
->bus_width
;
7527 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7528 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7529 if (bus_attr
->qas_enabled
)
7530 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7532 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7537 * ipr_build_mode_select - Build a mode select command
7538 * @ipr_cmd: ipr command struct
7539 * @res_handle: resource handle to send command to
7540 * @parm: Byte 2 of Mode Sense command
7541 * @dma_addr: DMA buffer address
7542 * @xfer_len: data transfer length
7547 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7548 __be32 res_handle
, u8 parm
,
7549 dma_addr_t dma_addr
, u8 xfer_len
)
7551 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7553 ioarcb
->res_handle
= res_handle
;
7554 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7555 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7556 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7557 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7558 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7560 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7564 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7565 * @ipr_cmd: ipr command struct
7567 * This function sets up the SCSI bus attributes and sends
7568 * a Mode Select for Page 28 to activate them.
7573 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7575 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7576 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7580 ipr_scsi_bus_speed_limit(ioa_cfg
);
7581 ipr_check_term_power(ioa_cfg
, mode_pages
);
7582 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7583 length
= mode_pages
->hdr
.length
+ 1;
7584 mode_pages
->hdr
.length
= 0;
7586 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7587 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7590 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7591 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7592 struct ipr_resource_entry
, queue
);
7593 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7596 return IPR_RC_JOB_RETURN
;
7600 * ipr_build_mode_sense - Builds a mode sense command
7601 * @ipr_cmd: ipr command struct
7602 * @res: resource entry struct
7603 * @parm: Byte 2 of mode sense command
7604 * @dma_addr: DMA address of mode sense buffer
7605 * @xfer_len: Size of DMA buffer
7610 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7612 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7614 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7616 ioarcb
->res_handle
= res_handle
;
7617 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7618 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7619 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7620 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7622 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7626 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7627 * @ipr_cmd: ipr command struct
7629 * This function handles the failure of an IOA bringup command.
7634 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7636 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7637 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7639 dev_err(&ioa_cfg
->pdev
->dev
,
7640 "0x%02X failed with IOASC: 0x%08X\n",
7641 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7643 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7644 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7645 return IPR_RC_JOB_RETURN
;
7649 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7650 * @ipr_cmd: ipr command struct
7652 * This function handles the failure of a Mode Sense to the IOAFP.
7653 * Some adapters do not handle all mode pages.
7656 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7658 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7660 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7661 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7663 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7664 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7665 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7666 struct ipr_resource_entry
, queue
);
7667 return IPR_RC_JOB_CONTINUE
;
7670 return ipr_reset_cmd_failed(ipr_cmd
);
7674 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7675 * @ipr_cmd: ipr command struct
7677 * This function send a Page 28 mode sense to the IOA to
7678 * retrieve SCSI bus attributes.
7683 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7685 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7688 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7689 0x28, ioa_cfg
->vpd_cbs_dma
+
7690 offsetof(struct ipr_misc_cbs
, mode_pages
),
7691 sizeof(struct ipr_mode_pages
));
7693 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7694 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7696 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7699 return IPR_RC_JOB_RETURN
;
7703 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7704 * @ipr_cmd: ipr command struct
7706 * This function enables dual IOA RAID support if possible.
7711 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7713 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7714 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7715 struct ipr_mode_page24
*mode_page
;
7719 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7720 sizeof(struct ipr_mode_page24
));
7723 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7725 length
= mode_pages
->hdr
.length
+ 1;
7726 mode_pages
->hdr
.length
= 0;
7728 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7729 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7732 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7733 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7736 return IPR_RC_JOB_RETURN
;
7740 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7741 * @ipr_cmd: ipr command struct
7743 * This function handles the failure of a Mode Sense to the IOAFP.
7744 * Some adapters do not handle all mode pages.
7747 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7749 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7751 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7753 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7754 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7755 return IPR_RC_JOB_CONTINUE
;
7758 return ipr_reset_cmd_failed(ipr_cmd
);
7762 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7763 * @ipr_cmd: ipr command struct
7765 * This function send a mode sense to the IOA to retrieve
7766 * the IOA Advanced Function Control mode page.
7771 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7773 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7776 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7777 0x24, ioa_cfg
->vpd_cbs_dma
+
7778 offsetof(struct ipr_misc_cbs
, mode_pages
),
7779 sizeof(struct ipr_mode_pages
));
7781 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7782 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7784 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7787 return IPR_RC_JOB_RETURN
;
7791 * ipr_init_res_table - Initialize the resource table
7792 * @ipr_cmd: ipr command struct
7794 * This function looks through the existing resource table, comparing
7795 * it with the config table. This function will take care of old/new
7796 * devices and schedule adding/removing them from the mid-layer
7800 * IPR_RC_JOB_CONTINUE
7802 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7804 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7805 struct ipr_resource_entry
*res
, *temp
;
7806 struct ipr_config_table_entry_wrapper cfgtew
;
7807 int entries
, found
, flag
, i
;
7812 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7814 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7816 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7817 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7819 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7820 list_move_tail(&res
->queue
, &old_res
);
7823 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7825 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7827 for (i
= 0; i
< entries
; i
++) {
7829 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7831 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7834 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7835 if (ipr_is_same_device(res
, &cfgtew
)) {
7836 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7843 if (list_empty(&ioa_cfg
->free_res_q
)) {
7844 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7849 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7850 struct ipr_resource_entry
, queue
);
7851 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7852 ipr_init_res_entry(res
, &cfgtew
);
7854 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7855 res
->sdev
->allow_restart
= 1;
7858 ipr_update_res_entry(res
, &cfgtew
);
7861 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7863 res
->del_from_ml
= 1;
7864 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7865 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7869 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7870 ipr_clear_res_target(res
);
7871 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7874 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7875 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7877 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7880 return IPR_RC_JOB_CONTINUE
;
7884 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7885 * @ipr_cmd: ipr command struct
7887 * This function sends a Query IOA Configuration command
7888 * to the adapter to retrieve the IOA configuration table.
7893 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7895 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7896 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7897 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7898 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7901 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7902 ioa_cfg
->dual_raid
= 1;
7903 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7904 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7905 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7906 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7907 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7909 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7910 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7911 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7912 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7914 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7915 IPR_IOADL_FLAGS_READ_LAST
);
7917 ipr_cmd
->job_step
= ipr_init_res_table
;
7919 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7922 return IPR_RC_JOB_RETURN
;
7925 static int ipr_ioa_service_action_failed(struct ipr_cmnd
*ipr_cmd
)
7927 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7929 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
)
7930 return IPR_RC_JOB_CONTINUE
;
7932 return ipr_reset_cmd_failed(ipr_cmd
);
7935 static void ipr_build_ioa_service_action(struct ipr_cmnd
*ipr_cmd
,
7936 __be32 res_handle
, u8 sa_code
)
7938 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7940 ioarcb
->res_handle
= res_handle
;
7941 ioarcb
->cmd_pkt
.cdb
[0] = IPR_IOA_SERVICE_ACTION
;
7942 ioarcb
->cmd_pkt
.cdb
[1] = sa_code
;
7943 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7947 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7953 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd
*ipr_cmd
)
7955 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7956 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7957 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
7961 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7963 if (pageC4
->cache_cap
[0] & IPR_CAP_SYNC_CACHE
) {
7964 ipr_build_ioa_service_action(ipr_cmd
,
7965 cpu_to_be32(IPR_IOA_RES_HANDLE
),
7966 IPR_IOA_SA_CHANGE_CACHE_PARAMS
);
7968 ioarcb
->cmd_pkt
.cdb
[2] = 0x40;
7970 ipr_cmd
->job_step_failed
= ipr_ioa_service_action_failed
;
7971 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7972 IPR_SET_SUP_DEVICE_TIMEOUT
);
7975 return IPR_RC_JOB_RETURN
;
7979 return IPR_RC_JOB_CONTINUE
;
7983 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7984 * @ipr_cmd: ipr command struct
7986 * This utility function sends an inquiry to the adapter.
7991 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7992 dma_addr_t dma_addr
, u8 xfer_len
)
7994 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7997 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7998 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8000 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
8001 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
8002 ioarcb
->cmd_pkt
.cdb
[2] = page
;
8003 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
8005 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
8007 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
8012 * ipr_inquiry_page_supported - Is the given inquiry page supported
8013 * @page0: inquiry page 0 buffer
8016 * This function determines if the specified inquiry page is supported.
8019 * 1 if page is supported / 0 if not
8021 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
8025 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
8026 if (page0
->page
[i
] == page
)
8033 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8034 * @ipr_cmd: ipr command struct
8036 * This function sends a Page 0xC4 inquiry to the adapter
8037 * to retrieve software VPD information.
8040 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8042 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd
*ipr_cmd
)
8044 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8045 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8046 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
8049 ipr_cmd
->job_step
= ipr_ioafp_set_caching_parameters
;
8050 memset(pageC4
, 0, sizeof(*pageC4
));
8052 if (ipr_inquiry_page_supported(page0
, 0xC4)) {
8053 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xC4,
8054 (ioa_cfg
->vpd_cbs_dma
8055 + offsetof(struct ipr_misc_cbs
,
8057 sizeof(struct ipr_inquiry_pageC4
));
8058 return IPR_RC_JOB_RETURN
;
8062 return IPR_RC_JOB_CONTINUE
;
8066 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8067 * @ipr_cmd: ipr command struct
8069 * This function sends a Page 0xD0 inquiry to the adapter
8070 * to retrieve adapter capabilities.
8073 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8075 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
8077 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8078 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8079 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
8082 ipr_cmd
->job_step
= ipr_ioafp_pageC4_inquiry
;
8083 memset(cap
, 0, sizeof(*cap
));
8085 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
8086 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
8087 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
8088 sizeof(struct ipr_inquiry_cap
));
8089 return IPR_RC_JOB_RETURN
;
8093 return IPR_RC_JOB_CONTINUE
;
8097 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8098 * @ipr_cmd: ipr command struct
8100 * This function sends a Page 3 inquiry to the adapter
8101 * to retrieve software VPD information.
8104 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8106 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
8108 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8112 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
8114 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
8115 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
8116 sizeof(struct ipr_inquiry_page3
));
8119 return IPR_RC_JOB_RETURN
;
8123 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8124 * @ipr_cmd: ipr command struct
8126 * This function sends a Page 0 inquiry to the adapter
8127 * to retrieve supported inquiry pages.
8130 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8132 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
8134 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8139 /* Grab the type out of the VPD and store it away */
8140 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
8142 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
8144 if (ipr_invalid_adapter(ioa_cfg
)) {
8145 dev_err(&ioa_cfg
->pdev
->dev
,
8146 "Adapter not supported in this hardware configuration.\n");
8148 if (!ipr_testmode
) {
8149 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
8150 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8151 list_add_tail(&ipr_cmd
->queue
,
8152 &ioa_cfg
->hrrq
->hrrq_free_q
);
8153 return IPR_RC_JOB_RETURN
;
8157 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
8159 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
8160 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
8161 sizeof(struct ipr_inquiry_page0
));
8164 return IPR_RC_JOB_RETURN
;
8168 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8169 * @ipr_cmd: ipr command struct
8171 * This function sends a standard inquiry to the adapter.
8176 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
8178 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8181 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
8183 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
8184 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
8185 sizeof(struct ipr_ioa_vpd
));
8188 return IPR_RC_JOB_RETURN
;
8192 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8193 * @ipr_cmd: ipr command struct
8195 * This function send an Identify Host Request Response Queue
8196 * command to establish the HRRQ with the adapter.
8201 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
8203 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8204 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
8205 struct ipr_hrr_queue
*hrrq
;
8208 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
8209 if (ioa_cfg
->identify_hrrq_index
== 0)
8210 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
8212 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
8213 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
8215 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
8216 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8218 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8220 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
8222 if (ioa_cfg
->nvectors
== 1)
8223 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
8225 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
8227 ioarcb
->cmd_pkt
.cdb
[2] =
8228 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
8229 ioarcb
->cmd_pkt
.cdb
[3] =
8230 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
8231 ioarcb
->cmd_pkt
.cdb
[4] =
8232 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
8233 ioarcb
->cmd_pkt
.cdb
[5] =
8234 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
8235 ioarcb
->cmd_pkt
.cdb
[7] =
8236 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
8237 ioarcb
->cmd_pkt
.cdb
[8] =
8238 (sizeof(u32
) * hrrq
->size
) & 0xff;
8240 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8241 ioarcb
->cmd_pkt
.cdb
[9] =
8242 ioa_cfg
->identify_hrrq_index
;
8244 if (ioa_cfg
->sis64
) {
8245 ioarcb
->cmd_pkt
.cdb
[10] =
8246 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
8247 ioarcb
->cmd_pkt
.cdb
[11] =
8248 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
8249 ioarcb
->cmd_pkt
.cdb
[12] =
8250 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
8251 ioarcb
->cmd_pkt
.cdb
[13] =
8252 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
8255 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8256 ioarcb
->cmd_pkt
.cdb
[14] =
8257 ioa_cfg
->identify_hrrq_index
;
8259 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8260 IPR_INTERNAL_TIMEOUT
);
8262 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
8263 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8266 return IPR_RC_JOB_RETURN
;
8270 return IPR_RC_JOB_CONTINUE
;
8274 * ipr_reset_timer_done - Adapter reset timer function
8275 * @ipr_cmd: ipr command struct
8277 * Description: This function is used in adapter reset processing
8278 * for timing events. If the reset_cmd pointer in the IOA
8279 * config struct is not this adapter's we are doing nested
8280 * resets and fail_all_ops will take care of freeing the
8286 static void ipr_reset_timer_done(struct timer_list
*t
)
8288 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
8289 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8290 unsigned long lock_flags
= 0;
8292 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8294 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
8295 list_del(&ipr_cmd
->queue
);
8296 ipr_cmd
->done(ipr_cmd
);
8299 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8303 * ipr_reset_start_timer - Start a timer for adapter reset job
8304 * @ipr_cmd: ipr command struct
8305 * @timeout: timeout value
8307 * Description: This function is used in adapter reset processing
8308 * for timing events. If the reset_cmd pointer in the IOA
8309 * config struct is not this adapter's we are doing nested
8310 * resets and fail_all_ops will take care of freeing the
8316 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
8317 unsigned long timeout
)
8321 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8322 ipr_cmd
->done
= ipr_reset_ioa_job
;
8324 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
8325 ipr_cmd
->timer
.function
= ipr_reset_timer_done
;
8326 add_timer(&ipr_cmd
->timer
);
8330 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8331 * @ioa_cfg: ioa cfg struct
8336 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8338 struct ipr_hrr_queue
*hrrq
;
8340 for_each_hrrq(hrrq
, ioa_cfg
) {
8341 spin_lock(&hrrq
->_lock
);
8342 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
8344 /* Initialize Host RRQ pointers */
8345 hrrq
->hrrq_start
= hrrq
->host_rrq
;
8346 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
8347 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
8348 hrrq
->toggle_bit
= 1;
8349 spin_unlock(&hrrq
->_lock
);
8353 ioa_cfg
->identify_hrrq_index
= 0;
8354 if (ioa_cfg
->hrrq_num
== 1)
8355 atomic_set(&ioa_cfg
->hrrq_index
, 0);
8357 atomic_set(&ioa_cfg
->hrrq_index
, 1);
8359 /* Zero out config table */
8360 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
8364 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8365 * @ipr_cmd: ipr command struct
8368 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8370 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
8372 unsigned long stage
, stage_time
;
8374 volatile u32 int_reg
;
8375 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8378 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
8379 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
8380 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
8382 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
8384 /* sanity check the stage_time value */
8385 if (stage_time
== 0)
8386 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
8387 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
8388 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
8389 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
8390 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
8392 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
8393 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8394 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8395 stage_time
= ioa_cfg
->transop_timeout
;
8396 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8397 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
8398 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8399 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8400 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8401 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8402 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
8403 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8404 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8405 return IPR_RC_JOB_CONTINUE
;
8409 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
8410 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8411 ipr_cmd
->done
= ipr_reset_ioa_job
;
8412 add_timer(&ipr_cmd
->timer
);
8414 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8416 return IPR_RC_JOB_RETURN
;
8420 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8421 * @ipr_cmd: ipr command struct
8423 * This function reinitializes some control blocks and
8424 * enables destructive diagnostics on the adapter.
8429 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
8431 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8432 volatile u32 int_reg
;
8433 volatile u64 maskval
;
8437 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8438 ipr_init_ioa_mem(ioa_cfg
);
8440 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8441 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8442 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
8443 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8445 if (ioa_cfg
->sis64
) {
8446 /* Set the adapter to the correct endian mode. */
8447 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8448 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8451 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8453 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8454 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8455 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8456 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8457 return IPR_RC_JOB_CONTINUE
;
8460 /* Enable destructive diagnostics on IOA */
8461 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8463 if (ioa_cfg
->sis64
) {
8464 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8465 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8466 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8468 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8470 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8472 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8474 if (ioa_cfg
->sis64
) {
8475 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8476 return IPR_RC_JOB_CONTINUE
;
8479 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8480 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8481 ipr_cmd
->done
= ipr_reset_ioa_job
;
8482 add_timer(&ipr_cmd
->timer
);
8483 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8486 return IPR_RC_JOB_RETURN
;
8490 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8491 * @ipr_cmd: ipr command struct
8493 * This function is invoked when an adapter dump has run out
8494 * of processing time.
8497 * IPR_RC_JOB_CONTINUE
8499 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8501 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8503 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8504 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8505 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8506 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8508 ioa_cfg
->dump_timeout
= 1;
8509 ipr_cmd
->job_step
= ipr_reset_alert
;
8511 return IPR_RC_JOB_CONTINUE
;
8515 * ipr_unit_check_no_data - Log a unit check/no data error log
8516 * @ioa_cfg: ioa config struct
8518 * Logs an error indicating the adapter unit checked, but for some
8519 * reason, we were unable to fetch the unit check buffer.
8524 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8526 ioa_cfg
->errors_logged
++;
8527 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8531 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8532 * @ioa_cfg: ioa config struct
8534 * Fetches the unit check buffer from the adapter by clocking the data
8535 * through the mailbox register.
8540 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8542 unsigned long mailbox
;
8543 struct ipr_hostrcb
*hostrcb
;
8544 struct ipr_uc_sdt sdt
;
8548 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8550 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8551 ipr_unit_check_no_data(ioa_cfg
);
8555 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8556 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8557 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8559 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8560 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8561 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8562 ipr_unit_check_no_data(ioa_cfg
);
8566 /* Find length of the first sdt entry (UC buffer) */
8567 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8568 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8570 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8571 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8572 IPR_FMT2_MBX_ADDR_MASK
;
8574 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8575 struct ipr_hostrcb
, queue
);
8576 list_del_init(&hostrcb
->queue
);
8577 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8579 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8580 be32_to_cpu(sdt
.entry
[0].start_token
),
8581 (__be32
*)&hostrcb
->hcam
,
8582 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8585 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8586 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8587 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8588 ioa_cfg
->sdt_state
== GET_DUMP
)
8589 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8591 ipr_unit_check_no_data(ioa_cfg
);
8593 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8597 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8598 * @ipr_cmd: ipr command struct
8600 * Description: This function will call to get the unit check buffer.
8605 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8607 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8610 ioa_cfg
->ioa_unit_checked
= 0;
8611 ipr_get_unit_check_buffer(ioa_cfg
);
8612 ipr_cmd
->job_step
= ipr_reset_alert
;
8613 ipr_reset_start_timer(ipr_cmd
, 0);
8616 return IPR_RC_JOB_RETURN
;
8619 static int ipr_dump_mailbox_wait(struct ipr_cmnd
*ipr_cmd
)
8621 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8625 if (ioa_cfg
->sdt_state
!= GET_DUMP
)
8626 return IPR_RC_JOB_RETURN
;
8628 if (!ioa_cfg
->sis64
|| !ipr_cmd
->u
.time_left
||
8629 (readl(ioa_cfg
->regs
.sense_interrupt_reg
) &
8630 IPR_PCII_MAILBOX_STABLE
)) {
8632 if (!ipr_cmd
->u
.time_left
)
8633 dev_err(&ioa_cfg
->pdev
->dev
,
8634 "Timed out waiting for Mailbox register.\n");
8636 ioa_cfg
->sdt_state
= READ_DUMP
;
8637 ioa_cfg
->dump_timeout
= 0;
8639 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8641 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8642 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8643 schedule_work(&ioa_cfg
->work_q
);
8646 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8647 ipr_reset_start_timer(ipr_cmd
,
8648 IPR_CHECK_FOR_RESET_TIMEOUT
);
8652 return IPR_RC_JOB_RETURN
;
8656 * ipr_reset_restore_cfg_space - Restore PCI config space.
8657 * @ipr_cmd: ipr command struct
8659 * Description: This function restores the saved PCI config space of
8660 * the adapter, fails all outstanding ops back to the callers, and
8661 * fetches the dump/unit check if applicable to this reset.
8664 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8666 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8668 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8672 ioa_cfg
->pdev
->state_saved
= true;
8673 pci_restore_state(ioa_cfg
->pdev
);
8675 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8676 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8677 return IPR_RC_JOB_CONTINUE
;
8680 ipr_fail_all_ops(ioa_cfg
);
8682 if (ioa_cfg
->sis64
) {
8683 /* Set the adapter to the correct endian mode. */
8684 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8685 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8688 if (ioa_cfg
->ioa_unit_checked
) {
8689 if (ioa_cfg
->sis64
) {
8690 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8691 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8692 return IPR_RC_JOB_RETURN
;
8694 ioa_cfg
->ioa_unit_checked
= 0;
8695 ipr_get_unit_check_buffer(ioa_cfg
);
8696 ipr_cmd
->job_step
= ipr_reset_alert
;
8697 ipr_reset_start_timer(ipr_cmd
, 0);
8698 return IPR_RC_JOB_RETURN
;
8702 if (ioa_cfg
->in_ioa_bringdown
) {
8703 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8704 } else if (ioa_cfg
->sdt_state
== GET_DUMP
) {
8705 ipr_cmd
->job_step
= ipr_dump_mailbox_wait
;
8706 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_MAILBOX
;
8708 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8712 return IPR_RC_JOB_CONTINUE
;
8716 * ipr_reset_bist_done - BIST has completed on the adapter.
8717 * @ipr_cmd: ipr command struct
8719 * Description: Unblock config space and resume the reset process.
8722 * IPR_RC_JOB_CONTINUE
8724 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8726 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8729 if (ioa_cfg
->cfg_locked
)
8730 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8731 ioa_cfg
->cfg_locked
= 0;
8732 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8734 return IPR_RC_JOB_CONTINUE
;
8738 * ipr_reset_start_bist - Run BIST on the adapter.
8739 * @ipr_cmd: ipr command struct
8741 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8744 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8746 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8748 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8749 int rc
= PCIBIOS_SUCCESSFUL
;
8752 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8753 writel(IPR_UPROCI_SIS64_START_BIST
,
8754 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8756 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8758 if (rc
== PCIBIOS_SUCCESSFUL
) {
8759 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8760 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8761 rc
= IPR_RC_JOB_RETURN
;
8763 if (ioa_cfg
->cfg_locked
)
8764 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8765 ioa_cfg
->cfg_locked
= 0;
8766 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8767 rc
= IPR_RC_JOB_CONTINUE
;
8775 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8776 * @ipr_cmd: ipr command struct
8778 * Description: This clears PCI reset to the adapter and delays two seconds.
8783 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8786 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8787 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8789 return IPR_RC_JOB_RETURN
;
8793 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8794 * @work: work struct
8796 * Description: This pulses warm reset to a slot.
8799 static void ipr_reset_reset_work(struct work_struct
*work
)
8801 struct ipr_cmnd
*ipr_cmd
= container_of(work
, struct ipr_cmnd
, work
);
8802 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8803 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8804 unsigned long lock_flags
= 0;
8807 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8808 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT
));
8809 pci_set_pcie_reset_state(pdev
, pcie_deassert_reset
);
8811 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8812 if (ioa_cfg
->reset_cmd
== ipr_cmd
)
8813 ipr_reset_ioa_job(ipr_cmd
);
8814 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8819 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8820 * @ipr_cmd: ipr command struct
8822 * Description: This asserts PCI reset to the adapter.
8827 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8829 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8832 INIT_WORK(&ipr_cmd
->work
, ipr_reset_reset_work
);
8833 queue_work(ioa_cfg
->reset_work_q
, &ipr_cmd
->work
);
8834 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8836 return IPR_RC_JOB_RETURN
;
8840 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8841 * @ipr_cmd: ipr command struct
8843 * Description: This attempts to block config access to the IOA.
8846 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8848 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8850 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8851 int rc
= IPR_RC_JOB_CONTINUE
;
8853 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8854 ioa_cfg
->cfg_locked
= 1;
8855 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8857 if (ipr_cmd
->u
.time_left
) {
8858 rc
= IPR_RC_JOB_RETURN
;
8859 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8860 ipr_reset_start_timer(ipr_cmd
,
8861 IPR_CHECK_FOR_RESET_TIMEOUT
);
8863 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8864 dev_err(&ioa_cfg
->pdev
->dev
,
8865 "Timed out waiting to lock config access. Resetting anyway.\n");
8873 * ipr_reset_block_config_access - Block config access to the IOA
8874 * @ipr_cmd: ipr command struct
8876 * Description: This attempts to block config access to the IOA
8879 * IPR_RC_JOB_CONTINUE
8881 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8883 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8884 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8885 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8886 return IPR_RC_JOB_CONTINUE
;
8890 * ipr_reset_allowed - Query whether or not IOA can be reset
8891 * @ioa_cfg: ioa config struct
8894 * 0 if reset not allowed / non-zero if reset is allowed
8896 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8898 volatile u32 temp_reg
;
8900 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8901 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8905 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8906 * @ipr_cmd: ipr command struct
8908 * Description: This function waits for adapter permission to run BIST,
8909 * then runs BIST. If the adapter does not give permission after a
8910 * reasonable time, we will reset the adapter anyway. The impact of
8911 * resetting the adapter without warning the adapter is the risk of
8912 * losing the persistent error log on the adapter. If the adapter is
8913 * reset while it is writing to the flash on the adapter, the flash
8914 * segment will have bad ECC and be zeroed.
8917 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8919 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8921 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8922 int rc
= IPR_RC_JOB_RETURN
;
8924 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8925 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8926 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8928 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8929 rc
= IPR_RC_JOB_CONTINUE
;
8936 * ipr_reset_alert - Alert the adapter of a pending reset
8937 * @ipr_cmd: ipr command struct
8939 * Description: This function alerts the adapter that it will be reset.
8940 * If memory space is not currently enabled, proceed directly
8941 * to running BIST on the adapter. The timer must always be started
8942 * so we guarantee we do not run BIST from ipr_isr.
8947 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8949 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8954 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8956 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8957 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8958 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8959 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8961 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8964 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8965 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8968 return IPR_RC_JOB_RETURN
;
8972 * ipr_reset_quiesce_done - Complete IOA disconnect
8973 * @ipr_cmd: ipr command struct
8975 * Description: Freeze the adapter to complete quiesce processing
8978 * IPR_RC_JOB_CONTINUE
8980 static int ipr_reset_quiesce_done(struct ipr_cmnd
*ipr_cmd
)
8982 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8985 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8986 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8988 return IPR_RC_JOB_CONTINUE
;
8992 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8993 * @ipr_cmd: ipr command struct
8995 * Description: Ensure nothing is outstanding to the IOA and
8996 * proceed with IOA disconnect. Otherwise reset the IOA.
8999 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9001 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd
*ipr_cmd
)
9003 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9004 struct ipr_cmnd
*loop_cmd
;
9005 struct ipr_hrr_queue
*hrrq
;
9006 int rc
= IPR_RC_JOB_CONTINUE
;
9010 ipr_cmd
->job_step
= ipr_reset_quiesce_done
;
9012 for_each_hrrq(hrrq
, ioa_cfg
) {
9013 spin_lock(&hrrq
->_lock
);
9014 list_for_each_entry(loop_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
9016 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9017 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9018 rc
= IPR_RC_JOB_RETURN
;
9021 spin_unlock(&hrrq
->_lock
);
9032 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9033 * @ipr_cmd: ipr command struct
9035 * Description: Cancel any oustanding HCAMs to the IOA.
9038 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9040 static int ipr_reset_cancel_hcam(struct ipr_cmnd
*ipr_cmd
)
9042 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9043 int rc
= IPR_RC_JOB_CONTINUE
;
9044 struct ipr_cmd_pkt
*cmd_pkt
;
9045 struct ipr_cmnd
*hcam_cmd
;
9046 struct ipr_hrr_queue
*hrrq
= &ioa_cfg
->hrrq
[IPR_INIT_HRRQ
];
9049 ipr_cmd
->job_step
= ipr_reset_cancel_hcam_done
;
9051 if (!hrrq
->ioa_is_dead
) {
9052 if (!list_empty(&ioa_cfg
->hostrcb_pending_q
)) {
9053 list_for_each_entry(hcam_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
9054 if (hcam_cmd
->ioarcb
.cmd_pkt
.cdb
[0] != IPR_HOST_CONTROLLED_ASYNC
)
9057 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9058 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9059 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
9060 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
9061 cmd_pkt
->cdb
[0] = IPR_CANCEL_REQUEST
;
9062 cmd_pkt
->cdb
[1] = IPR_CANCEL_64BIT_IOARCB
;
9063 cmd_pkt
->cdb
[10] = ((u64
) hcam_cmd
->dma_addr
>> 56) & 0xff;
9064 cmd_pkt
->cdb
[11] = ((u64
) hcam_cmd
->dma_addr
>> 48) & 0xff;
9065 cmd_pkt
->cdb
[12] = ((u64
) hcam_cmd
->dma_addr
>> 40) & 0xff;
9066 cmd_pkt
->cdb
[13] = ((u64
) hcam_cmd
->dma_addr
>> 32) & 0xff;
9067 cmd_pkt
->cdb
[2] = ((u64
) hcam_cmd
->dma_addr
>> 24) & 0xff;
9068 cmd_pkt
->cdb
[3] = ((u64
) hcam_cmd
->dma_addr
>> 16) & 0xff;
9069 cmd_pkt
->cdb
[4] = ((u64
) hcam_cmd
->dma_addr
>> 8) & 0xff;
9070 cmd_pkt
->cdb
[5] = ((u64
) hcam_cmd
->dma_addr
) & 0xff;
9072 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9073 IPR_CANCEL_TIMEOUT
);
9075 rc
= IPR_RC_JOB_RETURN
;
9076 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9081 ipr_cmd
->job_step
= ipr_reset_alert
;
9088 * ipr_reset_ucode_download_done - Microcode download completion
9089 * @ipr_cmd: ipr command struct
9091 * Description: This function unmaps the microcode download buffer.
9094 * IPR_RC_JOB_CONTINUE
9096 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
9098 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9099 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9101 dma_unmap_sg(&ioa_cfg
->pdev
->dev
, sglist
->scatterlist
,
9102 sglist
->num_sg
, DMA_TO_DEVICE
);
9104 ipr_cmd
->job_step
= ipr_reset_alert
;
9105 return IPR_RC_JOB_CONTINUE
;
9109 * ipr_reset_ucode_download - Download microcode to the adapter
9110 * @ipr_cmd: ipr command struct
9112 * Description: This function checks to see if it there is microcode
9113 * to download to the adapter. If there is, a download is performed.
9116 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9118 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
9120 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9121 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9124 ipr_cmd
->job_step
= ipr_reset_alert
;
9127 return IPR_RC_JOB_CONTINUE
;
9129 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9130 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
9131 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
9132 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
9133 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
9134 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
9135 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
9138 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
9140 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
9141 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
9143 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9144 IPR_WRITE_BUFFER_TIMEOUT
);
9147 return IPR_RC_JOB_RETURN
;
9151 * ipr_reset_shutdown_ioa - Shutdown the adapter
9152 * @ipr_cmd: ipr command struct
9154 * Description: This function issues an adapter shutdown of the
9155 * specified type to the specified adapter as part of the
9156 * adapter reset job.
9159 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9161 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
9163 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9164 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
9165 unsigned long timeout
;
9166 int rc
= IPR_RC_JOB_CONTINUE
;
9169 if (shutdown_type
== IPR_SHUTDOWN_QUIESCE
)
9170 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9171 else if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
9172 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
9173 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9174 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9175 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
9176 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
9178 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
9179 timeout
= IPR_SHUTDOWN_TIMEOUT
;
9180 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
9181 timeout
= IPR_INTERNAL_TIMEOUT
;
9182 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
9183 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
9185 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
9187 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
9189 rc
= IPR_RC_JOB_RETURN
;
9190 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
9192 ipr_cmd
->job_step
= ipr_reset_alert
;
9199 * ipr_reset_ioa_job - Adapter reset job
9200 * @ipr_cmd: ipr command struct
9202 * Description: This function is the job router for the adapter reset job.
9207 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
9210 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9213 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
9215 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
9217 * We are doing nested adapter resets and this is
9218 * not the current reset job.
9220 list_add_tail(&ipr_cmd
->queue
,
9221 &ipr_cmd
->hrrq
->hrrq_free_q
);
9225 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
9226 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
9227 if (rc
== IPR_RC_JOB_RETURN
)
9231 ipr_reinit_ipr_cmnd(ipr_cmd
);
9232 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
9233 rc
= ipr_cmd
->job_step(ipr_cmd
);
9234 } while (rc
== IPR_RC_JOB_CONTINUE
);
9238 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9239 * @ioa_cfg: ioa config struct
9240 * @job_step: first job step of reset job
9241 * @shutdown_type: shutdown type
9243 * Description: This function will initiate the reset of the given adapter
9244 * starting at the selected job step.
9245 * If the caller needs to wait on the completion of the reset,
9246 * the caller must sleep on the reset_wait_q.
9251 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9252 int (*job_step
) (struct ipr_cmnd
*),
9253 enum ipr_shutdown_type shutdown_type
)
9255 struct ipr_cmnd
*ipr_cmd
;
9258 ioa_cfg
->in_reset_reload
= 1;
9259 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9260 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9261 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9262 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9265 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9266 ioa_cfg
->scsi_unblock
= 0;
9267 ioa_cfg
->scsi_blocked
= 1;
9268 scsi_block_requests(ioa_cfg
->host
);
9271 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9272 ioa_cfg
->reset_cmd
= ipr_cmd
;
9273 ipr_cmd
->job_step
= job_step
;
9274 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
9276 ipr_reset_ioa_job(ipr_cmd
);
9280 * ipr_initiate_ioa_reset - Initiate an adapter reset
9281 * @ioa_cfg: ioa config struct
9282 * @shutdown_type: shutdown type
9284 * Description: This function will initiate the reset of the given adapter.
9285 * If the caller needs to wait on the completion of the reset,
9286 * the caller must sleep on the reset_wait_q.
9291 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9292 enum ipr_shutdown_type shutdown_type
)
9296 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
9299 if (ioa_cfg
->in_reset_reload
) {
9300 if (ioa_cfg
->sdt_state
== GET_DUMP
)
9301 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9302 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
9303 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9306 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
9307 dev_err(&ioa_cfg
->pdev
->dev
,
9308 "IOA taken offline - error recovery failed\n");
9310 ioa_cfg
->reset_retries
= 0;
9311 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9312 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9313 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
9314 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9318 if (ioa_cfg
->in_ioa_bringdown
) {
9319 ioa_cfg
->reset_cmd
= NULL
;
9320 ioa_cfg
->in_reset_reload
= 0;
9321 ipr_fail_all_ops(ioa_cfg
);
9322 wake_up_all(&ioa_cfg
->reset_wait_q
);
9324 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9325 ioa_cfg
->scsi_unblock
= 1;
9326 schedule_work(&ioa_cfg
->work_q
);
9330 ioa_cfg
->in_ioa_bringdown
= 1;
9331 shutdown_type
= IPR_SHUTDOWN_NONE
;
9335 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
9340 * ipr_reset_freeze - Hold off all I/O activity
9341 * @ipr_cmd: ipr command struct
9343 * Description: If the PCI slot is frozen, hold off all I/O
9344 * activity; then, as soon as the slot is available again,
9345 * initiate an adapter reset.
9347 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
9349 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9352 /* Disallow new interrupts, avoid loop */
9353 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9354 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9355 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
9356 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9359 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
9360 ipr_cmd
->done
= ipr_reset_ioa_job
;
9361 return IPR_RC_JOB_RETURN
;
9365 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9366 * @pdev: PCI device struct
9368 * Description: This routine is called to tell us that the MMIO
9369 * access to the IOA has been restored
9371 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
9373 unsigned long flags
= 0;
9374 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9376 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9377 if (!ioa_cfg
->probe_done
)
9378 pci_save_state(pdev
);
9379 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9380 return PCI_ERS_RESULT_NEED_RESET
;
9384 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9385 * @pdev: PCI device struct
9387 * Description: This routine is called to tell us that the PCI bus
9388 * is down. Can't do anything here, except put the device driver
9389 * into a holding pattern, waiting for the PCI bus to come back.
9391 static void ipr_pci_frozen(struct pci_dev
*pdev
)
9393 unsigned long flags
= 0;
9394 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9396 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9397 if (ioa_cfg
->probe_done
)
9398 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
9399 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9403 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9404 * @pdev: PCI device struct
9406 * Description: This routine is called by the pci error recovery
9407 * code after the PCI slot has been reset, just before we
9408 * should resume normal operations.
9410 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
9412 unsigned long flags
= 0;
9413 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9415 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9416 if (ioa_cfg
->probe_done
) {
9417 if (ioa_cfg
->needs_warm_reset
)
9418 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9420 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
9423 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9424 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9425 return PCI_ERS_RESULT_RECOVERED
;
9429 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9430 * @pdev: PCI device struct
9432 * Description: This routine is called when the PCI bus has
9433 * permanently failed.
9435 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
9437 unsigned long flags
= 0;
9438 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9441 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9442 if (ioa_cfg
->probe_done
) {
9443 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9444 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9445 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
9446 ioa_cfg
->in_ioa_bringdown
= 1;
9447 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9448 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9449 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9450 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9453 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9455 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9456 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9460 * ipr_pci_error_detected - Called when a PCI error is detected.
9461 * @pdev: PCI device struct
9462 * @state: PCI channel state
9464 * Description: Called when a PCI error is detected.
9467 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9469 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
9470 pci_channel_state_t state
)
9473 case pci_channel_io_frozen
:
9474 ipr_pci_frozen(pdev
);
9475 return PCI_ERS_RESULT_CAN_RECOVER
;
9476 case pci_channel_io_perm_failure
:
9477 ipr_pci_perm_failure(pdev
);
9478 return PCI_ERS_RESULT_DISCONNECT
;
9483 return PCI_ERS_RESULT_NEED_RESET
;
9487 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9488 * @ioa_cfg: ioa cfg struct
9490 * Description: This is the second phase of adapter initialization
9491 * This function takes care of initilizing the adapter to the point
9492 * where it can accept new commands.
9495 * 0 on success / -EIO on failure
9497 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
9500 unsigned long host_lock_flags
= 0;
9503 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9504 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
9505 ioa_cfg
->probe_done
= 1;
9506 if (ioa_cfg
->needs_hard_reset
) {
9507 ioa_cfg
->needs_hard_reset
= 0;
9508 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9510 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
9512 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9519 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9520 * @ioa_cfg: ioa config struct
9525 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9529 if (ioa_cfg
->ipr_cmnd_list
) {
9530 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9531 if (ioa_cfg
->ipr_cmnd_list
[i
])
9532 dma_pool_free(ioa_cfg
->ipr_cmd_pool
,
9533 ioa_cfg
->ipr_cmnd_list
[i
],
9534 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
9536 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
9540 if (ioa_cfg
->ipr_cmd_pool
)
9541 dma_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
9543 kfree(ioa_cfg
->ipr_cmnd_list
);
9544 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
9545 ioa_cfg
->ipr_cmnd_list
= NULL
;
9546 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
9547 ioa_cfg
->ipr_cmd_pool
= NULL
;
9551 * ipr_free_mem - Frees memory allocated for an adapter
9552 * @ioa_cfg: ioa cfg struct
9557 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9561 kfree(ioa_cfg
->res_entries
);
9562 dma_free_coherent(&ioa_cfg
->pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9563 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9564 ipr_free_cmd_blks(ioa_cfg
);
9566 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
9567 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9568 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9569 ioa_cfg
->hrrq
[i
].host_rrq
,
9570 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9572 dma_free_coherent(&ioa_cfg
->pdev
->dev
, ioa_cfg
->cfg_table_size
,
9573 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9575 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9576 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9577 sizeof(struct ipr_hostrcb
),
9578 ioa_cfg
->hostrcb
[i
],
9579 ioa_cfg
->hostrcb_dma
[i
]);
9582 ipr_free_dump(ioa_cfg
);
9583 kfree(ioa_cfg
->trace
);
9587 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9588 * @ioa_cfg: ipr cfg struct
9590 * This function frees all allocated IRQs for the
9591 * specified adapter.
9596 static void ipr_free_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9598 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9601 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
9602 free_irq(pci_irq_vector(pdev
, i
), &ioa_cfg
->hrrq
[i
]);
9603 pci_free_irq_vectors(pdev
);
9607 * ipr_free_all_resources - Free all allocated resources for an adapter.
9608 * @ipr_cmd: ipr command struct
9610 * This function frees all allocated resources for the
9611 * specified adapter.
9616 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
9618 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9621 ipr_free_irqs(ioa_cfg
);
9622 if (ioa_cfg
->reset_work_q
)
9623 destroy_workqueue(ioa_cfg
->reset_work_q
);
9624 iounmap(ioa_cfg
->hdw_dma_regs
);
9625 pci_release_regions(pdev
);
9626 ipr_free_mem(ioa_cfg
);
9627 scsi_host_put(ioa_cfg
->host
);
9628 pci_disable_device(pdev
);
9633 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9634 * @ioa_cfg: ioa config struct
9637 * 0 on success / -ENOMEM on allocation failure
9639 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9641 struct ipr_cmnd
*ipr_cmd
;
9642 struct ipr_ioarcb
*ioarcb
;
9643 dma_addr_t dma_addr
;
9644 int i
, entries_each_hrrq
, hrrq_id
= 0;
9646 ioa_cfg
->ipr_cmd_pool
= dma_pool_create(IPR_NAME
, &ioa_cfg
->pdev
->dev
,
9647 sizeof(struct ipr_cmnd
), 512, 0);
9649 if (!ioa_cfg
->ipr_cmd_pool
)
9652 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9653 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9655 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9656 ipr_free_cmd_blks(ioa_cfg
);
9660 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9661 if (ioa_cfg
->hrrq_num
> 1) {
9663 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9664 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9665 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9666 (entries_each_hrrq
- 1);
9669 IPR_NUM_BASE_CMD_BLKS
/
9670 (ioa_cfg
->hrrq_num
- 1);
9671 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9672 IPR_NUM_INTERNAL_CMD_BLKS
+
9673 (i
- 1) * entries_each_hrrq
;
9674 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9675 (IPR_NUM_INTERNAL_CMD_BLKS
+
9676 i
* entries_each_hrrq
- 1);
9679 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9680 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9681 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9683 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9686 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9688 i
= IPR_NUM_CMD_BLKS
-
9689 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9691 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9692 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9695 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9696 ipr_cmd
= dma_pool_zalloc(ioa_cfg
->ipr_cmd_pool
,
9697 GFP_KERNEL
, &dma_addr
);
9700 ipr_free_cmd_blks(ioa_cfg
);
9704 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9705 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9707 ioarcb
= &ipr_cmd
->ioarcb
;
9708 ipr_cmd
->dma_addr
= dma_addr
;
9710 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9712 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9714 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9715 if (ioa_cfg
->sis64
) {
9716 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9717 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9718 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9719 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9721 ioarcb
->write_ioadl_addr
=
9722 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9723 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9724 ioarcb
->ioasa_host_pci_addr
=
9725 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9727 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9728 ipr_cmd
->cmd_index
= i
;
9729 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9730 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9731 offsetof(struct ipr_cmnd
, sense_buffer
);
9733 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9734 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9735 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9736 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9744 * ipr_alloc_mem - Allocate memory for an adapter
9745 * @ioa_cfg: ioa config struct
9748 * 0 on success / non-zero for error
9750 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9752 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9753 int i
, rc
= -ENOMEM
;
9756 ioa_cfg
->res_entries
= kcalloc(ioa_cfg
->max_devs_supported
,
9757 sizeof(struct ipr_resource_entry
),
9760 if (!ioa_cfg
->res_entries
)
9763 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9764 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9765 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9768 ioa_cfg
->vpd_cbs
= dma_alloc_coherent(&pdev
->dev
,
9769 sizeof(struct ipr_misc_cbs
),
9770 &ioa_cfg
->vpd_cbs_dma
,
9773 if (!ioa_cfg
->vpd_cbs
)
9774 goto out_free_res_entries
;
9776 if (ipr_alloc_cmd_blks(ioa_cfg
))
9777 goto out_free_vpd_cbs
;
9779 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9780 ioa_cfg
->hrrq
[i
].host_rrq
= dma_alloc_coherent(&pdev
->dev
,
9781 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9782 &ioa_cfg
->hrrq
[i
].host_rrq_dma
,
9785 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9787 dma_free_coherent(&pdev
->dev
,
9788 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9789 ioa_cfg
->hrrq
[i
].host_rrq
,
9790 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9791 goto out_ipr_free_cmd_blocks
;
9793 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9796 ioa_cfg
->u
.cfg_table
= dma_alloc_coherent(&pdev
->dev
,
9797 ioa_cfg
->cfg_table_size
,
9798 &ioa_cfg
->cfg_table_dma
,
9801 if (!ioa_cfg
->u
.cfg_table
)
9802 goto out_free_host_rrq
;
9804 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9805 ioa_cfg
->hostrcb
[i
] = dma_alloc_coherent(&pdev
->dev
,
9806 sizeof(struct ipr_hostrcb
),
9807 &ioa_cfg
->hostrcb_dma
[i
],
9810 if (!ioa_cfg
->hostrcb
[i
])
9811 goto out_free_hostrcb_dma
;
9813 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9814 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9815 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9816 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9819 ioa_cfg
->trace
= kcalloc(IPR_NUM_TRACE_ENTRIES
,
9820 sizeof(struct ipr_trace_entry
),
9823 if (!ioa_cfg
->trace
)
9824 goto out_free_hostrcb_dma
;
9831 out_free_hostrcb_dma
:
9833 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_hostrcb
),
9834 ioa_cfg
->hostrcb
[i
],
9835 ioa_cfg
->hostrcb_dma
[i
]);
9837 dma_free_coherent(&pdev
->dev
, ioa_cfg
->cfg_table_size
,
9838 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9840 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9841 dma_free_coherent(&pdev
->dev
,
9842 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9843 ioa_cfg
->hrrq
[i
].host_rrq
,
9844 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9846 out_ipr_free_cmd_blocks
:
9847 ipr_free_cmd_blks(ioa_cfg
);
9849 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9850 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9851 out_free_res_entries
:
9852 kfree(ioa_cfg
->res_entries
);
9857 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9858 * @ioa_cfg: ioa config struct
9863 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9867 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9868 ioa_cfg
->bus_attr
[i
].bus
= i
;
9869 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9870 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9871 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9872 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9874 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9879 * ipr_init_regs - Initialize IOA registers
9880 * @ioa_cfg: ioa config struct
9885 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9887 const struct ipr_interrupt_offsets
*p
;
9888 struct ipr_interrupts
*t
;
9891 p
= &ioa_cfg
->chip_cfg
->regs
;
9893 base
= ioa_cfg
->hdw_dma_regs
;
9895 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9896 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9897 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9898 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9899 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9900 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9901 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9902 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9903 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9904 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9905 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9906 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9907 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9908 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9909 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9910 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9912 if (ioa_cfg
->sis64
) {
9913 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9914 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9915 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9916 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9921 * ipr_init_ioa_cfg - Initialize IOA config struct
9922 * @ioa_cfg: ioa config struct
9923 * @host: scsi host struct
9924 * @pdev: PCI dev struct
9929 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9930 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9934 ioa_cfg
->host
= host
;
9935 ioa_cfg
->pdev
= pdev
;
9936 ioa_cfg
->log_level
= ipr_log_level
;
9937 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9938 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9939 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9940 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9941 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9942 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9943 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9945 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9946 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9947 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_report_q
);
9948 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9949 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9950 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9951 INIT_WORK(&ioa_cfg
->scsi_add_work_q
, ipr_add_remove_thread
);
9952 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9953 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9954 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9955 ioa_cfg
->sdt_state
= INACTIVE
;
9957 ipr_initialize_bus_attr(ioa_cfg
);
9958 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9960 if (ioa_cfg
->sis64
) {
9961 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9962 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9963 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9964 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9965 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9966 + ((sizeof(struct ipr_config_table_entry64
)
9967 * ioa_cfg
->max_devs_supported
)));
9969 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9970 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9971 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9972 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9973 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9974 + ((sizeof(struct ipr_config_table_entry
)
9975 * ioa_cfg
->max_devs_supported
)));
9978 host
->max_channel
= IPR_VSET_BUS
;
9979 host
->unique_id
= host
->host_no
;
9980 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9981 host
->can_queue
= ioa_cfg
->max_cmds
;
9982 pci_set_drvdata(pdev
, ioa_cfg
);
9984 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9985 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9986 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9987 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9989 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9991 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9996 * ipr_get_chip_info - Find adapter chip information
9997 * @dev_id: PCI device id struct
10000 * ptr to chip information on success / NULL on failure
10002 static const struct ipr_chip_t
*
10003 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
10007 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
10008 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
10009 ipr_chip
[i
].device
== dev_id
->device
)
10010 return &ipr_chip
[i
];
10015 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10016 * during probe time
10017 * @ioa_cfg: ioa config struct
10022 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
10024 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
10026 if (pci_channel_offline(pdev
)) {
10027 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
10028 !pci_channel_offline(pdev
),
10029 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
10030 pci_restore_state(pdev
);
10034 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
10036 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
10038 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
10039 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
10040 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
10041 ioa_cfg
->vectors_info
[vec_idx
].
10042 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
10046 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
,
10047 struct pci_dev
*pdev
)
10051 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
10052 rc
= request_irq(pci_irq_vector(pdev
, i
),
10055 ioa_cfg
->vectors_info
[i
].desc
,
10056 &ioa_cfg
->hrrq
[i
]);
10059 free_irq(pci_irq_vector(pdev
, i
),
10060 &ioa_cfg
->hrrq
[i
]);
10068 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10069 * @pdev: PCI device struct
10071 * Description: Simply set the msi_received flag to 1 indicating that
10072 * Message Signaled Interrupts are supported.
10075 * 0 on success / non-zero on failure
10077 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
10079 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
10080 unsigned long lock_flags
= 0;
10081 irqreturn_t rc
= IRQ_HANDLED
;
10083 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
10084 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10086 ioa_cfg
->msi_received
= 1;
10087 wake_up(&ioa_cfg
->msi_wait_q
);
10089 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10094 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10095 * @pdev: PCI device struct
10097 * Description: This routine sets up and initiates a test interrupt to determine
10098 * if the interrupt is received via the ipr_test_intr() service routine.
10099 * If the tests fails, the driver will fall back to LSI.
10102 * 0 on success / non-zero on failure
10104 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
10107 volatile u32 int_reg
;
10108 unsigned long lock_flags
= 0;
10109 int irq
= pci_irq_vector(pdev
, 0);
10113 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10114 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
10115 ioa_cfg
->msi_received
= 0;
10116 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10117 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
10118 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
10119 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10121 rc
= request_irq(irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
10123 dev_err(&pdev
->dev
, "Can not assign irq %d\n", irq
);
10125 } else if (ipr_debug
)
10126 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", irq
);
10128 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
10129 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10130 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
10131 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10132 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10134 if (!ioa_cfg
->msi_received
) {
10135 /* MSI test failed */
10136 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
10138 } else if (ipr_debug
)
10139 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
10141 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10143 free_irq(irq
, ioa_cfg
);
10150 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10151 * @pdev: PCI device struct
10152 * @dev_id: PCI device id struct
10155 * 0 on success / non-zero on failure
10157 static int ipr_probe_ioa(struct pci_dev
*pdev
,
10158 const struct pci_device_id
*dev_id
)
10160 struct ipr_ioa_cfg
*ioa_cfg
;
10161 struct Scsi_Host
*host
;
10162 unsigned long ipr_regs_pci
;
10163 void __iomem
*ipr_regs
;
10164 int rc
= PCIBIOS_SUCCESSFUL
;
10165 volatile u32 mask
, uproc
, interrupts
;
10166 unsigned long lock_flags
, driver_lock_flags
;
10167 unsigned int irq_flag
;
10171 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
10172 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
10175 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
10180 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
10181 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
10182 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
10184 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
10186 if (!ioa_cfg
->ipr_chip
) {
10187 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
10188 dev_id
->vendor
, dev_id
->device
);
10189 goto out_scsi_host_put
;
10192 /* set SIS 32 or SIS 64 */
10193 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
10194 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
10195 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
10196 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
10198 if (ipr_transop_timeout
)
10199 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
10200 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
10201 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
10203 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
10205 ioa_cfg
->revid
= pdev
->revision
;
10207 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
10209 ipr_regs_pci
= pci_resource_start(pdev
, 0);
10211 rc
= pci_request_regions(pdev
, IPR_NAME
);
10213 dev_err(&pdev
->dev
,
10214 "Couldn't register memory range of registers\n");
10215 goto out_scsi_host_put
;
10218 rc
= pci_enable_device(pdev
);
10220 if (rc
|| pci_channel_offline(pdev
)) {
10221 if (pci_channel_offline(pdev
)) {
10222 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10223 rc
= pci_enable_device(pdev
);
10227 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
10228 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10229 goto out_release_regions
;
10233 ipr_regs
= pci_ioremap_bar(pdev
, 0);
10236 dev_err(&pdev
->dev
,
10237 "Couldn't map memory range of registers\n");
10242 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
10243 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
10244 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
10246 ipr_init_regs(ioa_cfg
);
10248 if (ioa_cfg
->sis64
) {
10249 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10251 dev_dbg(&pdev
->dev
, "Failed to set 64 bit DMA mask\n");
10252 rc
= dma_set_mask_and_coherent(&pdev
->dev
,
10256 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10259 dev_err(&pdev
->dev
, "Failed to set DMA mask\n");
10260 goto cleanup_nomem
;
10263 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
10264 ioa_cfg
->chip_cfg
->cache_line_size
);
10266 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10267 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
10268 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10270 goto cleanup_nomem
;
10273 /* Issue MMIO read to ensure card is not in EEH */
10274 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10275 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10277 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
10278 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
10279 IPR_MAX_MSIX_VECTORS
);
10280 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
10283 irq_flag
= PCI_IRQ_LEGACY
;
10284 if (ioa_cfg
->ipr_chip
->has_msi
)
10285 irq_flag
|= PCI_IRQ_MSI
| PCI_IRQ_MSIX
;
10286 rc
= pci_alloc_irq_vectors(pdev
, 1, ipr_number_of_msix
, irq_flag
);
10288 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10289 goto cleanup_nomem
;
10291 ioa_cfg
->nvectors
= rc
;
10293 if (!pdev
->msi_enabled
&& !pdev
->msix_enabled
)
10294 ioa_cfg
->clear_isr
= 1;
10296 pci_set_master(pdev
);
10298 if (pci_channel_offline(pdev
)) {
10299 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10300 pci_set_master(pdev
);
10301 if (pci_channel_offline(pdev
)) {
10303 goto out_msi_disable
;
10307 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10308 rc
= ipr_test_msi(ioa_cfg
, pdev
);
10311 dev_info(&pdev
->dev
,
10312 "Request for %d MSI%ss succeeded.", ioa_cfg
->nvectors
,
10313 pdev
->msix_enabled
? "-X" : "");
10316 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10317 pci_free_irq_vectors(pdev
);
10319 ioa_cfg
->nvectors
= 1;
10320 ioa_cfg
->clear_isr
= 1;
10323 goto out_msi_disable
;
10327 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
10328 (unsigned int)num_online_cpus(),
10329 (unsigned int)IPR_MAX_HRRQ_NUM
);
10331 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
10332 goto out_msi_disable
;
10334 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
10335 goto out_msi_disable
;
10337 rc
= ipr_alloc_mem(ioa_cfg
);
10339 dev_err(&pdev
->dev
,
10340 "Couldn't allocate enough memory for device driver!\n");
10341 goto out_msi_disable
;
10344 /* Save away PCI config space for use following IOA reset */
10345 rc
= pci_save_state(pdev
);
10347 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10348 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
10350 goto cleanup_nolog
;
10354 * If HRRQ updated interrupt is not masked, or reset alert is set,
10355 * the card is in an unknown state and needs a hard reset
10357 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
10358 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
10359 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
10360 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
10361 ioa_cfg
->needs_hard_reset
= 1;
10362 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
10363 ioa_cfg
->needs_hard_reset
= 1;
10364 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
10365 ioa_cfg
->ioa_unit_checked
= 1;
10367 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10368 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10369 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10371 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10372 name_msi_vectors(ioa_cfg
);
10373 rc
= request_irq(pci_irq_vector(pdev
, 0), ipr_isr
, 0,
10374 ioa_cfg
->vectors_info
[0].desc
,
10375 &ioa_cfg
->hrrq
[0]);
10377 rc
= ipr_request_other_msi_irqs(ioa_cfg
, pdev
);
10379 rc
= request_irq(pdev
->irq
, ipr_isr
,
10381 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
10384 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
10386 goto cleanup_nolog
;
10389 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
10390 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
10391 ioa_cfg
->needs_warm_reset
= 1;
10392 ioa_cfg
->reset
= ipr_reset_slot_reset
;
10394 ioa_cfg
->reset_work_q
= alloc_ordered_workqueue("ipr_reset_%d",
10395 WQ_MEM_RECLAIM
, host
->host_no
);
10397 if (!ioa_cfg
->reset_work_q
) {
10398 dev_err(&pdev
->dev
, "Couldn't register reset workqueue\n");
10403 ioa_cfg
->reset
= ipr_reset_start_bist
;
10405 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10406 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
10407 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10414 ipr_free_irqs(ioa_cfg
);
10416 ipr_free_mem(ioa_cfg
);
10418 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10419 pci_free_irq_vectors(pdev
);
10423 pci_disable_device(pdev
);
10424 out_release_regions
:
10425 pci_release_regions(pdev
);
10427 scsi_host_put(host
);
10432 * ipr_initiate_ioa_bringdown - Bring down an adapter
10433 * @ioa_cfg: ioa config struct
10434 * @shutdown_type: shutdown type
10436 * Description: This function will initiate bringing down the adapter.
10437 * This consists of issuing an IOA shutdown to the adapter
10438 * to flush the cache, and running BIST.
10439 * If the caller needs to wait on the completion of the reset,
10440 * the caller must sleep on the reset_wait_q.
10445 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
10446 enum ipr_shutdown_type shutdown_type
)
10449 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
10450 ioa_cfg
->sdt_state
= ABORT_DUMP
;
10451 ioa_cfg
->reset_retries
= 0;
10452 ioa_cfg
->in_ioa_bringdown
= 1;
10453 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
10458 * __ipr_remove - Remove a single adapter
10459 * @pdev: pci device struct
10461 * Adapter hot plug remove entry point.
10466 static void __ipr_remove(struct pci_dev
*pdev
)
10468 unsigned long host_lock_flags
= 0;
10469 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10471 unsigned long driver_lock_flags
;
10474 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10475 while (ioa_cfg
->in_reset_reload
) {
10476 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10477 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10478 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10481 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
10482 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
10483 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
10484 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
10487 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10489 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10490 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10491 flush_work(&ioa_cfg
->work_q
);
10492 if (ioa_cfg
->reset_work_q
)
10493 flush_workqueue(ioa_cfg
->reset_work_q
);
10494 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
10495 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10497 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10498 list_del(&ioa_cfg
->queue
);
10499 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10501 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
10502 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
10503 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10505 ipr_free_all_resources(ioa_cfg
);
10511 * ipr_remove - IOA hot plug remove entry point
10512 * @pdev: pci device struct
10514 * Adapter hot plug remove entry point.
10519 static void ipr_remove(struct pci_dev
*pdev
)
10521 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10525 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10527 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10529 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10530 &ipr_ioa_async_err_log
);
10531 scsi_remove_host(ioa_cfg
->host
);
10533 __ipr_remove(pdev
);
10539 * ipr_probe - Adapter hot plug add entry point
10542 * 0 on success / non-zero on failure
10544 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
10546 struct ipr_ioa_cfg
*ioa_cfg
;
10547 unsigned long flags
;
10550 rc
= ipr_probe_ioa(pdev
, dev_id
);
10555 ioa_cfg
= pci_get_drvdata(pdev
);
10556 rc
= ipr_probe_ioa_part2(ioa_cfg
);
10559 __ipr_remove(pdev
);
10563 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
10566 __ipr_remove(pdev
);
10570 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10574 scsi_remove_host(ioa_cfg
->host
);
10575 __ipr_remove(pdev
);
10579 rc
= sysfs_create_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10580 &ipr_ioa_async_err_log
);
10583 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10585 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10587 scsi_remove_host(ioa_cfg
->host
);
10588 __ipr_remove(pdev
);
10592 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10596 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10597 &ipr_ioa_async_err_log
);
10598 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10600 scsi_remove_host(ioa_cfg
->host
);
10601 __ipr_remove(pdev
);
10604 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10605 ioa_cfg
->scan_enabled
= 1;
10606 schedule_work(&ioa_cfg
->work_q
);
10607 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10609 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
10611 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10612 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
10613 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
10614 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
10618 scsi_scan_host(ioa_cfg
->host
);
10624 * ipr_shutdown - Shutdown handler.
10625 * @pdev: pci device struct
10627 * This function is invoked upon system shutdown/reboot. It will issue
10628 * an adapter shutdown to the adapter to flush the write cache.
10633 static void ipr_shutdown(struct pci_dev
*pdev
)
10635 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10636 unsigned long lock_flags
= 0;
10637 enum ipr_shutdown_type shutdown_type
= IPR_SHUTDOWN_NORMAL
;
10640 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10641 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10642 ioa_cfg
->iopoll_weight
= 0;
10643 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10644 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10647 while (ioa_cfg
->in_reset_reload
) {
10648 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10649 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10650 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10653 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
)
10654 shutdown_type
= IPR_SHUTDOWN_QUIESCE
;
10656 ipr_initiate_ioa_bringdown(ioa_cfg
, shutdown_type
);
10657 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10658 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10659 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
) {
10660 ipr_free_irqs(ioa_cfg
);
10661 pci_disable_device(ioa_cfg
->pdev
);
10665 static struct pci_device_id ipr_pci_table
[] = {
10666 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10667 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10668 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10669 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10670 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10671 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10672 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10673 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10674 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10675 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10676 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10677 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10678 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10679 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10680 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10681 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10682 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10683 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10684 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10685 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10686 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10687 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10688 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10689 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10690 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10691 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10692 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10693 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10694 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10695 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10696 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10697 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10698 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10699 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10700 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10701 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10702 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10703 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10705 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10707 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10708 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10709 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10710 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10711 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10712 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10713 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10714 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10715 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10716 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10717 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10718 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10719 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10720 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10721 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10722 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10723 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10724 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10725 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10726 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10727 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10728 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10729 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10730 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10731 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10732 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10733 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10734 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10735 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10736 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10737 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10738 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10740 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10741 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10742 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10744 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10746 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10747 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10748 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10749 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10750 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10752 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10754 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10756 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10758 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10759 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10760 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10761 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10762 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10763 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10764 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10765 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10766 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10767 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10768 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10769 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10770 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10771 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10772 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580A
, 0, 0, 0 },
10773 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10774 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580B
, 0, 0, 0 },
10777 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10779 static const struct pci_error_handlers ipr_err_handler
= {
10780 .error_detected
= ipr_pci_error_detected
,
10781 .mmio_enabled
= ipr_pci_mmio_enabled
,
10782 .slot_reset
= ipr_pci_slot_reset
,
10785 static struct pci_driver ipr_driver
= {
10787 .id_table
= ipr_pci_table
,
10788 .probe
= ipr_probe
,
10789 .remove
= ipr_remove
,
10790 .shutdown
= ipr_shutdown
,
10791 .err_handler
= &ipr_err_handler
,
10795 * ipr_halt_done - Shutdown prepare completion
10800 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10802 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10806 * ipr_halt - Issue shutdown prepare to all adapters
10809 * NOTIFY_OK on success / NOTIFY_DONE on failure
10811 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10813 struct ipr_cmnd
*ipr_cmd
;
10814 struct ipr_ioa_cfg
*ioa_cfg
;
10815 unsigned long flags
= 0, driver_lock_flags
;
10817 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10818 return NOTIFY_DONE
;
10820 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10822 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10823 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10824 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
10825 (ipr_fast_reboot
&& event
== SYS_RESTART
&& ioa_cfg
->sis64
)) {
10826 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10830 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10831 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10832 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10833 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10834 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10836 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10837 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10839 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10844 static struct notifier_block ipr_notifier
= {
10849 * ipr_init - Module entry point
10852 * 0 on success / negative value on failure
10854 static int __init
ipr_init(void)
10856 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10857 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10859 register_reboot_notifier(&ipr_notifier
);
10860 return pci_register_driver(&ipr_driver
);
10864 * ipr_exit - Module unload
10866 * Module unload entry point.
10871 static void __exit
ipr_exit(void)
10873 unregister_reboot_notifier(&ipr_notifier
);
10874 pci_unregister_driver(&ipr_driver
);
10877 module_init(ipr_init
);
10878 module_exit(ipr_exit
);