2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock
);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size
= 0x20,
113 .set_interrupt_mask_reg
= 0x0022C,
114 .clr_interrupt_mask_reg
= 0x00230,
115 .clr_interrupt_mask_reg32
= 0x00230,
116 .sense_interrupt_mask_reg
= 0x0022C,
117 .sense_interrupt_mask_reg32
= 0x0022C,
118 .clr_interrupt_reg
= 0x00228,
119 .clr_interrupt_reg32
= 0x00228,
120 .sense_interrupt_reg
= 0x00224,
121 .sense_interrupt_reg32
= 0x00224,
122 .ioarrin_reg
= 0x00404,
123 .sense_uproc_interrupt_reg
= 0x00214,
124 .sense_uproc_interrupt_reg32
= 0x00214,
125 .set_uproc_interrupt_reg
= 0x00214,
126 .set_uproc_interrupt_reg32
= 0x00214,
127 .clr_uproc_interrupt_reg
= 0x00218,
128 .clr_uproc_interrupt_reg32
= 0x00218
131 { /* Snipe and Scamp */
134 .cache_line_size
= 0x20,
138 .set_interrupt_mask_reg
= 0x00288,
139 .clr_interrupt_mask_reg
= 0x0028C,
140 .clr_interrupt_mask_reg32
= 0x0028C,
141 .sense_interrupt_mask_reg
= 0x00288,
142 .sense_interrupt_mask_reg32
= 0x00288,
143 .clr_interrupt_reg
= 0x00284,
144 .clr_interrupt_reg32
= 0x00284,
145 .sense_interrupt_reg
= 0x00280,
146 .sense_interrupt_reg32
= 0x00280,
147 .ioarrin_reg
= 0x00504,
148 .sense_uproc_interrupt_reg
= 0x00290,
149 .sense_uproc_interrupt_reg32
= 0x00290,
150 .set_uproc_interrupt_reg
= 0x00290,
151 .set_uproc_interrupt_reg32
= 0x00290,
152 .clr_uproc_interrupt_reg
= 0x00294,
153 .clr_uproc_interrupt_reg32
= 0x00294
159 .cache_line_size
= 0x20,
163 .set_interrupt_mask_reg
= 0x00010,
164 .clr_interrupt_mask_reg
= 0x00018,
165 .clr_interrupt_mask_reg32
= 0x0001C,
166 .sense_interrupt_mask_reg
= 0x00010,
167 .sense_interrupt_mask_reg32
= 0x00014,
168 .clr_interrupt_reg
= 0x00008,
169 .clr_interrupt_reg32
= 0x0000C,
170 .sense_interrupt_reg
= 0x00000,
171 .sense_interrupt_reg32
= 0x00004,
172 .ioarrin_reg
= 0x00070,
173 .sense_uproc_interrupt_reg
= 0x00020,
174 .sense_uproc_interrupt_reg32
= 0x00024,
175 .set_uproc_interrupt_reg
= 0x00020,
176 .set_uproc_interrupt_reg32
= 0x00024,
177 .clr_uproc_interrupt_reg
= 0x00028,
178 .clr_uproc_interrupt_reg32
= 0x0002C,
179 .init_feedback_reg
= 0x0005C,
180 .dump_addr_reg
= 0x00064,
181 .dump_data_reg
= 0x00068,
182 .endian_swap_reg
= 0x00084
187 static const struct ipr_chip_t ipr_chip
[] = {
188 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
189 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, IPR_USE_MSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
194 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
199 static int ipr_max_bus_speeds
[] = {
200 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
206 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level
, ipr_log_level
, uint
, 0);
208 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode
, ipr_testmode
, int, 0);
210 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
212 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
213 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
214 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
216 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs
, ipr_max_devs
, int, 0);
220 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
222 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
223 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION
);
227 /* A constant array of IOASCs/URCs/Error Messages */
229 struct ipr_error_table_t ipr_error_table
[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
231 "8155: An unknown error was received"},
233 "Soft underlength error"},
235 "Command to be cancelled not found"},
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "8009: Impending cache battery pack failure"},
285 "Logical Unit in process of becoming ready"},
287 "Initializing command required"},
289 "34FF: Disk device format in progress"},
291 "Logical unit not accessible, target port in unavailable state"},
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
293 "9070: IOA requested reset"},
295 "Synchronization required"},
297 "IOA microcode download required"},
299 "Device bus connection is prohibited by host"},
301 "No ready, IOA shutdown"},
303 "Not ready, IOA has been shutdown"},
304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
305 "3020: Storage subsystem configuration error"},
307 "FFF5: Medium error, data unreadable, recommend reassign"},
309 "7000: Medium error, data unreadable, do not reassign"},
310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
311 "FFF3: Disk media format bad"},
312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
313 "3002: Addressed device failed to respond to selection"},
314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
315 "3100: Device bus error"},
316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3109: IOA timed out a device command"},
319 "3120: SCSI bus is not operational"},
320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "4100: Hard device bus fabric error"},
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
335 "310D: Logical block guard error detected by the IOA"},
336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "9000: IOA reserved area data check"},
338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "9001: IOA reserved area invalid data pattern"},
340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9002: IOA reserved area LRC error"},
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
343 "Hardware Error, IOA metadata access error"},
344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "102E: Out of alternate sectors for disk storage"},
346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "FFF4: Data transfer underlength error"},
348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
349 "FFF4: Data transfer overlength error"},
350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
351 "3400: Logical unit failure"},
352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Device microcode is corrupt"},
354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
355 "8150: PCI bus error"},
357 "Unsupported device bus message received"},
358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "FFF4: Disk device problem"},
360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
361 "8150: Permanent IOA failure"},
362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
363 "3010: Disk device returned wrong response to IOA"},
364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
365 "8151: IOA microcode error"},
367 "Device bus status error"},
368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8157: IOA error requiring IOA reset to recover"},
371 "ATA device status error"},
373 "Message reject received from the device"},
374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
375 "8008: A permanent cache battery pack failure occurred"},
376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
377 "9090: Disk unit has been modified after the last known status"},
378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "9081: IOA detected device error"},
380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9082: IOA detected device error"},
382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
383 "3110: Device bus error, message or command phase"},
384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
385 "3110: SAS Command / Task Management Function failed"},
386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
387 "9091: Incorrect hardware configuration change has been detected"},
388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
389 "9073: Invalid multi-adapter configuration"},
390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "4010: Incorrect connection between cascaded expanders"},
392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "4020: Connections exceed IOA design limits"},
394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4030: Incorrect multipath connection"},
396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4110: Unsupported enclosure function"},
398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4120: SAS cable VPD cannot be read"},
400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "FFF4: Command to logical unit failed"},
403 "Illegal request, invalid request type or request packet"},
405 "Illegal request, invalid resource handle"},
407 "Illegal request, commands not allowed to this device"},
409 "Illegal request, command not allowed to a secondary adapter"},
411 "Illegal request, command not allowed to a non-optimized resource"},
413 "Illegal request, invalid field in parameter list"},
415 "Illegal request, parameter not supported"},
417 "Illegal request, parameter value invalid"},
419 "Illegal request, command sequence error"},
421 "Illegal request, dual adapter support not enabled"},
423 "Illegal request, another cable connector was physically disabled"},
425 "Illegal request, inconsistent group id/group count"},
426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
427 "9031: Array protection temporarily suspended, protection resuming"},
428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
429 "9040: Array protection temporarily suspended, protection resuming"},
430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "4085: Service required"},
434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "3140: Device bus not ready to ready transition"},
436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "FFFB: SCSI bus was reset"},
439 "FFFE: SCSI bus transition to single ended"},
441 "FFFE: SCSI bus transition to LVD"},
442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "FFFB: SCSI bus was reset by another initiator"},
444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
445 "3029: A device replacement has occurred"},
446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
447 "4102: Device bus fabric performance degradation"},
448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "9051: IOA cache data exists for a missing or failed device"},
450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "9025: Disk unit is not supported at its physical location"},
454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "3020: IOA detected a SCSI bus configuration error"},
456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "3150: SCSI bus configuration error"},
458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "9074: Asymmetric advanced function disk configuration"},
460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "4040: Incomplete multipath connection between IOA and enclosure"},
462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "4041: Incomplete multipath connection between enclosure and device"},
464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9075: Incomplete multipath connection between IOA and remote IOA"},
466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "9076: Configuration error, missing remote IOA"},
468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "4050: Enclosure does not support a required multipath function"},
470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4070: Logically bad block written on device"},
480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "9041: Array protection temporarily suspended"},
482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "9042: Corrupt array parity detected on specified device"},
484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "9030: Array no longer protected due to missing or failed disk unit"},
486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9071: Link operational transition"},
488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9072: Link not operational transition"},
490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9032: Array exposed but still protected"},
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL
+ 1,
493 "70DD: Device forced failed by disrupt device command"},
494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "4061: Multipath redundancy level got better"},
496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "4060: Multipath redundancy level got worse"},
499 "Failure due to other device"},
500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "9008: IOA does not support functions expected by devices"},
502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "9010: Cache data associated with attached devices cannot be found"},
504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
505 "9011: Cache data belongs to devices other than those attached"},
506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
507 "9020: Array missing 2 or more devices with only 1 device present"},
508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
509 "9021: Array missing 2 or more devices with 2 or more devices present"},
510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9022: Exposed array is missing a required device"},
512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9023: Array member(s) not at required physical locations"},
514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9024: Array not functional due to present hardware configuration"},
516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9026: Array not functional due to present hardware configuration"},
518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9027: Array is missing a device and parity is out of sync"},
520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9028: Maximum number of arrays already exist"},
522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9050: Required cache data cannot be located for a disk unit"},
524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9052: Cache data exists for a device that has been modified"},
526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9054: IOA resources not available due to previous problems"},
528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9092: Disk unit requires initialization before use"},
530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9029: Incorrect hardware configuration change has been detected"},
532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9060: One or more disk pairs are missing from an array"},
534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9061: One or more disks are missing from an array"},
536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9062: One or more disks are missing from an array"},
538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9063: Maximum number of functional arrays has been exceeded"},
541 "Data protect, other volume set problem"},
543 "Aborted command, invalid descriptor"},
545 "Target operating conditions have changed, dual adapter takeover"},
547 "Aborted command, medium removal prevented"},
549 "Command terminated by host"},
551 "Aborted command, command terminated by host"}
554 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
571 * Function Prototypes
573 static int ipr_reset_alert(struct ipr_cmnd
*);
574 static void ipr_process_ccn(struct ipr_cmnd
*);
575 static void ipr_process_error(struct ipr_cmnd
*);
576 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
578 enum ipr_shutdown_type
);
580 #ifdef CONFIG_SCSI_IPR_TRACE
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
585 * @add_data: additional data
590 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
591 u8 type
, u32 add_data
)
593 struct ipr_trace_entry
*trace_entry
;
594 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
595 unsigned int trace_index
;
597 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
598 trace_entry
= &ioa_cfg
->trace
[trace_index
];
599 trace_entry
->time
= jiffies
;
600 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
601 trace_entry
->type
= type
;
602 if (ipr_cmd
->ioa_cfg
->sis64
)
603 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
605 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
606 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
607 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
608 trace_entry
->u
.add_data
= add_data
;
612 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
616 * ipr_lock_and_done - Acquire lock and complete command
617 * @ipr_cmd: ipr command struct
622 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
624 unsigned long lock_flags
;
625 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
627 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
628 ipr_cmd
->done(ipr_cmd
);
629 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
633 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
634 * @ipr_cmd: ipr command struct
639 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
641 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
642 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
643 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
644 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
647 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
648 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
649 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
650 ioarcb
->data_transfer_length
= 0;
651 ioarcb
->read_data_transfer_length
= 0;
652 ioarcb
->ioadl_len
= 0;
653 ioarcb
->read_ioadl_len
= 0;
655 if (ipr_cmd
->ioa_cfg
->sis64
) {
656 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
657 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
658 ioasa64
->u
.gata
.status
= 0;
660 ioarcb
->write_ioadl_addr
=
661 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
662 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
663 ioasa
->u
.gata
.status
= 0;
666 ioasa
->hdr
.ioasc
= 0;
667 ioasa
->hdr
.residual_data_len
= 0;
668 ipr_cmd
->scsi_cmd
= NULL
;
670 ipr_cmd
->sense_buffer
[0] = 0;
671 ipr_cmd
->dma_use_sg
= 0;
675 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
676 * @ipr_cmd: ipr command struct
681 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
682 void (*fast_done
) (struct ipr_cmnd
*))
684 ipr_reinit_ipr_cmnd(ipr_cmd
);
685 ipr_cmd
->u
.scratch
= 0;
686 ipr_cmd
->sibling
= NULL
;
687 ipr_cmd
->eh_comp
= NULL
;
688 ipr_cmd
->fast_done
= fast_done
;
689 init_timer(&ipr_cmd
->timer
);
693 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
694 * @ioa_cfg: ioa config struct
697 * pointer to ipr command struct
700 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
702 struct ipr_cmnd
*ipr_cmd
= NULL
;
704 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
705 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
706 struct ipr_cmnd
, queue
);
707 list_del(&ipr_cmd
->queue
);
715 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
716 * @ioa_cfg: ioa config struct
719 * pointer to ipr command struct
722 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
724 struct ipr_cmnd
*ipr_cmd
=
725 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
726 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
731 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
732 * @ioa_cfg: ioa config struct
733 * @clr_ints: interrupts to clear
735 * This function masks all interrupts on the adapter, then clears the
736 * interrupts specified in the mask
741 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
744 volatile u32 int_reg
;
747 /* Stop new interrupts */
748 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
749 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
750 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
751 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
755 /* Set interrupt mask to stop all new interrupts */
757 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
759 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
761 /* Clear any pending interrupts */
763 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
764 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
765 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
769 * ipr_save_pcix_cmd_reg - Save PCI-X command register
770 * @ioa_cfg: ioa config struct
773 * 0 on success / -EIO on failure
775 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
777 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
779 if (pcix_cmd_reg
== 0)
782 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
783 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
784 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
788 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
793 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
794 * @ioa_cfg: ioa config struct
797 * 0 on success / -EIO on failure
799 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
801 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
804 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
805 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
806 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
815 * ipr_sata_eh_done - done function for aborted SATA commands
816 * @ipr_cmd: ipr command struct
818 * This function is invoked for ops generated to SATA
819 * devices which are being aborted.
824 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
826 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
827 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
829 qc
->err_mask
|= AC_ERR_OTHER
;
830 sata_port
->ioasa
.status
|= ATA_BUSY
;
831 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
836 * ipr_scsi_eh_done - mid-layer done function for aborted ops
837 * @ipr_cmd: ipr command struct
839 * This function is invoked by the interrupt handler for
840 * ops generated by the SCSI mid-layer which are being aborted.
845 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
847 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
849 scsi_cmd
->result
|= (DID_ERROR
<< 16);
851 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
852 scsi_cmd
->scsi_done(scsi_cmd
);
853 if (ipr_cmd
->eh_comp
)
854 complete(ipr_cmd
->eh_comp
);
855 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
859 * ipr_fail_all_ops - Fails all outstanding ops.
860 * @ioa_cfg: ioa config struct
862 * This function fails all outstanding ops.
867 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
869 struct ipr_cmnd
*ipr_cmd
, *temp
;
870 struct ipr_hrr_queue
*hrrq
;
873 for_each_hrrq(hrrq
, ioa_cfg
) {
874 spin_lock(&hrrq
->_lock
);
875 list_for_each_entry_safe(ipr_cmd
,
876 temp
, &hrrq
->hrrq_pending_q
, queue
) {
877 list_del(&ipr_cmd
->queue
);
879 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
880 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
881 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
882 cpu_to_be32(IPR_DRIVER_ILID
);
884 if (ipr_cmd
->scsi_cmd
)
885 ipr_cmd
->done
= ipr_scsi_eh_done
;
886 else if (ipr_cmd
->qc
)
887 ipr_cmd
->done
= ipr_sata_eh_done
;
889 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
890 IPR_IOASC_IOA_WAS_RESET
);
891 del_timer(&ipr_cmd
->timer
);
892 ipr_cmd
->done(ipr_cmd
);
894 spin_unlock(&hrrq
->_lock
);
900 * ipr_send_command - Send driver initiated requests.
901 * @ipr_cmd: ipr command struct
903 * This function sends a command to the adapter using the correct write call.
904 * In the case of sis64, calculate the ioarcb size required. Then or in the
910 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
912 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
913 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
915 if (ioa_cfg
->sis64
) {
916 /* The default size is 256 bytes */
917 send_dma_addr
|= 0x1;
919 /* If the number of ioadls * size of ioadl > 128 bytes,
920 then use a 512 byte ioarcb */
921 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
922 send_dma_addr
|= 0x4;
923 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
925 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
929 * ipr_do_req - Send driver initiated requests.
930 * @ipr_cmd: ipr command struct
931 * @done: done function
932 * @timeout_func: timeout function
933 * @timeout: timeout value
935 * This function sends the specified command to the adapter with the
936 * timeout given. The done function is invoked on command completion.
941 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
942 void (*done
) (struct ipr_cmnd
*),
943 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
945 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
947 ipr_cmd
->done
= done
;
949 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
950 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
951 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
953 add_timer(&ipr_cmd
->timer
);
955 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
957 ipr_send_command(ipr_cmd
);
961 * ipr_internal_cmd_done - Op done function for an internally generated op.
962 * @ipr_cmd: ipr command struct
964 * This function is the op done function for an internally generated,
965 * blocking op. It simply wakes the sleeping thread.
970 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
972 if (ipr_cmd
->sibling
)
973 ipr_cmd
->sibling
= NULL
;
975 complete(&ipr_cmd
->completion
);
979 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
980 * @ipr_cmd: ipr command struct
981 * @dma_addr: dma address
982 * @len: transfer length
983 * @flags: ioadl flag value
985 * This function initializes an ioadl in the case where there is only a single
991 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
994 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
995 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
997 ipr_cmd
->dma_use_sg
= 1;
999 if (ipr_cmd
->ioa_cfg
->sis64
) {
1000 ioadl64
->flags
= cpu_to_be32(flags
);
1001 ioadl64
->data_len
= cpu_to_be32(len
);
1002 ioadl64
->address
= cpu_to_be64(dma_addr
);
1004 ipr_cmd
->ioarcb
.ioadl_len
=
1005 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1006 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1008 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1009 ioadl
->address
= cpu_to_be32(dma_addr
);
1011 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1012 ipr_cmd
->ioarcb
.read_ioadl_len
=
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1014 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1016 ipr_cmd
->ioarcb
.ioadl_len
=
1017 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1018 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1024 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1025 * @ipr_cmd: ipr command struct
1026 * @timeout_func: function to invoke if command times out
1032 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1033 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
1036 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1038 init_completion(&ipr_cmd
->completion
);
1039 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1041 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1042 wait_for_completion(&ipr_cmd
->completion
);
1043 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1046 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1050 if (ioa_cfg
->hrrq_num
== 1)
1053 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1054 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1060 * ipr_send_hcam - Send an HCAM to the adapter.
1061 * @ioa_cfg: ioa config struct
1063 * @hostrcb: hostrcb struct
1065 * This function will send a Host Controlled Async command to the adapter.
1066 * If HCAMs are currently not allowed to be issued to the adapter, it will
1067 * place the hostrcb on the free queue.
1072 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1073 struct ipr_hostrcb
*hostrcb
)
1075 struct ipr_cmnd
*ipr_cmd
;
1076 struct ipr_ioarcb
*ioarcb
;
1078 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1079 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1080 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1081 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1083 ipr_cmd
->u
.hostrcb
= hostrcb
;
1084 ioarcb
= &ipr_cmd
->ioarcb
;
1086 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1087 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1088 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1089 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1090 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1091 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1093 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1094 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1096 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1097 ipr_cmd
->done
= ipr_process_ccn
;
1099 ipr_cmd
->done
= ipr_process_error
;
1101 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1103 ipr_send_command(ipr_cmd
);
1105 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1110 * ipr_update_ata_class - Update the ata class in the resource entry
1111 * @res: resource entry struct
1112 * @proto: cfgte device bus protocol value
1117 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1120 case IPR_PROTO_SATA
:
1121 case IPR_PROTO_SAS_STP
:
1122 res
->ata_class
= ATA_DEV_ATA
;
1124 case IPR_PROTO_SATA_ATAPI
:
1125 case IPR_PROTO_SAS_STP_ATAPI
:
1126 res
->ata_class
= ATA_DEV_ATAPI
;
1129 res
->ata_class
= ATA_DEV_UNKNOWN
;
1135 * ipr_init_res_entry - Initialize a resource entry struct.
1136 * @res: resource entry struct
1137 * @cfgtew: config table entry wrapper struct
1142 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1143 struct ipr_config_table_entry_wrapper
*cfgtew
)
1147 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1148 struct ipr_resource_entry
*gscsi_res
= NULL
;
1150 res
->needs_sync_complete
= 0;
1153 res
->del_from_ml
= 0;
1154 res
->resetting_device
= 0;
1155 res
->reset_occurred
= 0;
1157 res
->sata_port
= NULL
;
1159 if (ioa_cfg
->sis64
) {
1160 proto
= cfgtew
->u
.cfgte64
->proto
;
1161 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1162 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1163 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1165 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1166 sizeof(res
->res_path
));
1169 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1170 sizeof(res
->dev_lun
.scsi_lun
));
1171 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1173 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1174 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1175 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1177 res
->target
= gscsi_res
->target
;
1182 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1183 ioa_cfg
->max_devs_supported
);
1184 set_bit(res
->target
, ioa_cfg
->target_ids
);
1186 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1187 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1189 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1190 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1191 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1192 ioa_cfg
->max_devs_supported
);
1193 set_bit(res
->target
, ioa_cfg
->array_ids
);
1194 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1195 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1196 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1197 ioa_cfg
->max_devs_supported
);
1198 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1200 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1201 ioa_cfg
->max_devs_supported
);
1202 set_bit(res
->target
, ioa_cfg
->target_ids
);
1205 proto
= cfgtew
->u
.cfgte
->proto
;
1206 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1207 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1208 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1209 res
->type
= IPR_RES_TYPE_IOAFP
;
1211 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1213 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1214 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1215 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1216 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1219 ipr_update_ata_class(res
, proto
);
1223 * ipr_is_same_device - Determine if two devices are the same.
1224 * @res: resource entry struct
1225 * @cfgtew: config table entry wrapper struct
1228 * 1 if the devices are the same / 0 otherwise
1230 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1231 struct ipr_config_table_entry_wrapper
*cfgtew
)
1233 if (res
->ioa_cfg
->sis64
) {
1234 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1235 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1236 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1237 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1241 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1242 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1243 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1251 * __ipr_format_res_path - Format the resource path for printing.
1252 * @res_path: resource path
1254 * @len: length of buffer provided
1259 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1265 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1266 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1267 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1273 * ipr_format_res_path - Format the resource path for printing.
1274 * @ioa_cfg: ioa config struct
1275 * @res_path: resource path
1277 * @len: length of buffer provided
1282 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1283 u8
*res_path
, char *buffer
, int len
)
1288 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1289 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1294 * ipr_update_res_entry - Update the resource entry.
1295 * @res: resource entry struct
1296 * @cfgtew: config table entry wrapper struct
1301 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1302 struct ipr_config_table_entry_wrapper
*cfgtew
)
1304 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1308 if (res
->ioa_cfg
->sis64
) {
1309 res
->flags
= cfgtew
->u
.cfgte64
->flags
;
1310 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1311 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1313 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1314 sizeof(struct ipr_std_inq_data
));
1316 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1317 proto
= cfgtew
->u
.cfgte64
->proto
;
1318 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1319 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1321 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1322 sizeof(res
->dev_lun
.scsi_lun
));
1324 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1325 sizeof(res
->res_path
))) {
1326 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1327 sizeof(res
->res_path
));
1331 if (res
->sdev
&& new_path
)
1332 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1333 ipr_format_res_path(res
->ioa_cfg
,
1334 res
->res_path
, buffer
, sizeof(buffer
)));
1336 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1337 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1338 res
->type
= IPR_RES_TYPE_IOAFP
;
1340 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1342 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1343 sizeof(struct ipr_std_inq_data
));
1345 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1346 proto
= cfgtew
->u
.cfgte
->proto
;
1347 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1350 ipr_update_ata_class(res
, proto
);
1354 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1356 * @res: resource entry struct
1357 * @cfgtew: config table entry wrapper struct
1362 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1364 struct ipr_resource_entry
*gscsi_res
= NULL
;
1365 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1367 if (!ioa_cfg
->sis64
)
1370 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1371 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1372 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1373 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1374 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1375 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1376 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1378 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1380 } else if (res
->bus
== 0)
1381 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1385 * ipr_handle_config_change - Handle a config change from the adapter
1386 * @ioa_cfg: ioa config struct
1392 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1393 struct ipr_hostrcb
*hostrcb
)
1395 struct ipr_resource_entry
*res
= NULL
;
1396 struct ipr_config_table_entry_wrapper cfgtew
;
1397 __be32 cc_res_handle
;
1401 if (ioa_cfg
->sis64
) {
1402 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1403 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1405 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1406 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1409 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1410 if (res
->res_handle
== cc_res_handle
) {
1417 if (list_empty(&ioa_cfg
->free_res_q
)) {
1418 ipr_send_hcam(ioa_cfg
,
1419 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1424 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1425 struct ipr_resource_entry
, queue
);
1427 list_del(&res
->queue
);
1428 ipr_init_res_entry(res
, &cfgtew
);
1429 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1432 ipr_update_res_entry(res
, &cfgtew
);
1434 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1436 res
->del_from_ml
= 1;
1437 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1438 if (ioa_cfg
->allow_ml_add_del
)
1439 schedule_work(&ioa_cfg
->work_q
);
1441 ipr_clear_res_target(res
);
1442 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1444 } else if (!res
->sdev
|| res
->del_from_ml
) {
1446 if (ioa_cfg
->allow_ml_add_del
)
1447 schedule_work(&ioa_cfg
->work_q
);
1450 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1454 * ipr_process_ccn - Op done function for a CCN.
1455 * @ipr_cmd: ipr command struct
1457 * This function is the op done function for a configuration
1458 * change notification host controlled async from the adapter.
1463 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1465 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1466 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1467 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1469 list_del(&hostrcb
->queue
);
1470 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1473 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
1474 dev_err(&ioa_cfg
->pdev
->dev
,
1475 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1477 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1479 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1484 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1485 * @i: index into buffer
1486 * @buf: string to modify
1488 * This function will strip all trailing whitespace, pad the end
1489 * of the string with a single space, and NULL terminate the string.
1492 * new length of string
1494 static int strip_and_pad_whitespace(int i
, char *buf
)
1496 while (i
&& buf
[i
] == ' ')
1504 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1505 * @prefix: string to print at start of printk
1506 * @hostrcb: hostrcb pointer
1507 * @vpd: vendor/product id/sn struct
1512 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1513 struct ipr_vpd
*vpd
)
1515 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1518 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1519 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1521 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1522 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1524 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1525 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1527 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1531 * ipr_log_vpd - Log the passed VPD to the error log.
1532 * @vpd: vendor/product id/sn struct
1537 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1539 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1540 + IPR_SERIAL_NUM_LEN
];
1542 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1543 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1545 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1546 ipr_err("Vendor/Product ID: %s\n", buffer
);
1548 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1549 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1550 ipr_err(" Serial Number: %s\n", buffer
);
1554 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn/wwn struct
1562 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1563 struct ipr_ext_vpd
*vpd
)
1565 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1566 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1567 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1571 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1572 * @vpd: vendor/product id/sn/wwn struct
1577 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1579 ipr_log_vpd(&vpd
->vpd
);
1580 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1581 be32_to_cpu(vpd
->wwid
[1]));
1585 * ipr_log_enhanced_cache_error - Log a cache error.
1586 * @ioa_cfg: ioa config struct
1587 * @hostrcb: hostrcb struct
1592 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1593 struct ipr_hostrcb
*hostrcb
)
1595 struct ipr_hostrcb_type_12_error
*error
;
1598 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1600 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1602 ipr_err("-----Current Configuration-----\n");
1603 ipr_err("Cache Directory Card Information:\n");
1604 ipr_log_ext_vpd(&error
->ioa_vpd
);
1605 ipr_err("Adapter Card Information:\n");
1606 ipr_log_ext_vpd(&error
->cfc_vpd
);
1608 ipr_err("-----Expected Configuration-----\n");
1609 ipr_err("Cache Directory Card Information:\n");
1610 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1611 ipr_err("Adapter Card Information:\n");
1612 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1614 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1615 be32_to_cpu(error
->ioa_data
[0]),
1616 be32_to_cpu(error
->ioa_data
[1]),
1617 be32_to_cpu(error
->ioa_data
[2]));
1621 * ipr_log_cache_error - Log a cache error.
1622 * @ioa_cfg: ioa config struct
1623 * @hostrcb: hostrcb struct
1628 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1629 struct ipr_hostrcb
*hostrcb
)
1631 struct ipr_hostrcb_type_02_error
*error
=
1632 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1634 ipr_err("-----Current Configuration-----\n");
1635 ipr_err("Cache Directory Card Information:\n");
1636 ipr_log_vpd(&error
->ioa_vpd
);
1637 ipr_err("Adapter Card Information:\n");
1638 ipr_log_vpd(&error
->cfc_vpd
);
1640 ipr_err("-----Expected Configuration-----\n");
1641 ipr_err("Cache Directory Card Information:\n");
1642 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1643 ipr_err("Adapter Card Information:\n");
1644 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1646 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1647 be32_to_cpu(error
->ioa_data
[0]),
1648 be32_to_cpu(error
->ioa_data
[1]),
1649 be32_to_cpu(error
->ioa_data
[2]));
1653 * ipr_log_enhanced_config_error - Log a configuration error.
1654 * @ioa_cfg: ioa config struct
1655 * @hostrcb: hostrcb struct
1660 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1661 struct ipr_hostrcb
*hostrcb
)
1663 int errors_logged
, i
;
1664 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1665 struct ipr_hostrcb_type_13_error
*error
;
1667 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1668 errors_logged
= be32_to_cpu(error
->errors_logged
);
1670 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1671 be32_to_cpu(error
->errors_detected
), errors_logged
);
1673 dev_entry
= error
->dev
;
1675 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1678 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1679 ipr_log_ext_vpd(&dev_entry
->vpd
);
1681 ipr_err("-----New Device Information-----\n");
1682 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1684 ipr_err("Cache Directory Card Information:\n");
1685 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1687 ipr_err("Adapter Card Information:\n");
1688 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1693 * ipr_log_sis64_config_error - Log a device error.
1694 * @ioa_cfg: ioa config struct
1695 * @hostrcb: hostrcb struct
1700 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1701 struct ipr_hostrcb
*hostrcb
)
1703 int errors_logged
, i
;
1704 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1705 struct ipr_hostrcb_type_23_error
*error
;
1706 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1708 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1709 errors_logged
= be32_to_cpu(error
->errors_logged
);
1711 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1712 be32_to_cpu(error
->errors_detected
), errors_logged
);
1714 dev_entry
= error
->dev
;
1716 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1719 ipr_err("Device %d : %s", i
+ 1,
1720 __ipr_format_res_path(dev_entry
->res_path
,
1721 buffer
, sizeof(buffer
)));
1722 ipr_log_ext_vpd(&dev_entry
->vpd
);
1724 ipr_err("-----New Device Information-----\n");
1725 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1727 ipr_err("Cache Directory Card Information:\n");
1728 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1730 ipr_err("Adapter Card Information:\n");
1731 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1736 * ipr_log_config_error - Log a configuration error.
1737 * @ioa_cfg: ioa config struct
1738 * @hostrcb: hostrcb struct
1743 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1744 struct ipr_hostrcb
*hostrcb
)
1746 int errors_logged
, i
;
1747 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1748 struct ipr_hostrcb_type_03_error
*error
;
1750 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1751 errors_logged
= be32_to_cpu(error
->errors_logged
);
1753 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1754 be32_to_cpu(error
->errors_detected
), errors_logged
);
1756 dev_entry
= error
->dev
;
1758 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1761 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1762 ipr_log_vpd(&dev_entry
->vpd
);
1764 ipr_err("-----New Device Information-----\n");
1765 ipr_log_vpd(&dev_entry
->new_vpd
);
1767 ipr_err("Cache Directory Card Information:\n");
1768 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1770 ipr_err("Adapter Card Information:\n");
1771 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1773 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1774 be32_to_cpu(dev_entry
->ioa_data
[0]),
1775 be32_to_cpu(dev_entry
->ioa_data
[1]),
1776 be32_to_cpu(dev_entry
->ioa_data
[2]),
1777 be32_to_cpu(dev_entry
->ioa_data
[3]),
1778 be32_to_cpu(dev_entry
->ioa_data
[4]));
1783 * ipr_log_enhanced_array_error - Log an array configuration error.
1784 * @ioa_cfg: ioa config struct
1785 * @hostrcb: hostrcb struct
1790 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1791 struct ipr_hostrcb
*hostrcb
)
1794 struct ipr_hostrcb_type_14_error
*error
;
1795 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1796 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1798 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1802 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1803 error
->protection_level
,
1804 ioa_cfg
->host
->host_no
,
1805 error
->last_func_vset_res_addr
.bus
,
1806 error
->last_func_vset_res_addr
.target
,
1807 error
->last_func_vset_res_addr
.lun
);
1811 array_entry
= error
->array_member
;
1812 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1813 ARRAY_SIZE(error
->array_member
));
1815 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1816 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1819 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1820 ipr_err("Exposed Array Member %d:\n", i
);
1822 ipr_err("Array Member %d:\n", i
);
1824 ipr_log_ext_vpd(&array_entry
->vpd
);
1825 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1826 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1827 "Expected Location");
1834 * ipr_log_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1841 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1842 struct ipr_hostrcb
*hostrcb
)
1845 struct ipr_hostrcb_type_04_error
*error
;
1846 struct ipr_hostrcb_array_data_entry
*array_entry
;
1847 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1849 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error
->protection_level
,
1855 ioa_cfg
->host
->host_no
,
1856 error
->last_func_vset_res_addr
.bus
,
1857 error
->last_func_vset_res_addr
.target
,
1858 error
->last_func_vset_res_addr
.lun
);
1862 array_entry
= error
->array_member
;
1864 for (i
= 0; i
< 18; i
++) {
1865 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1868 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1869 ipr_err("Exposed Array Member %d:\n", i
);
1871 ipr_err("Array Member %d:\n", i
);
1873 ipr_log_vpd(&array_entry
->vpd
);
1875 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1876 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1877 "Expected Location");
1882 array_entry
= error
->array_member2
;
1889 * ipr_log_hex_data - Log additional hex IOA error data.
1890 * @ioa_cfg: ioa config struct
1891 * @data: IOA error data
1897 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, u32
*data
, int len
)
1904 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1905 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1907 for (i
= 0; i
< len
/ 4; i
+= 4) {
1908 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1909 be32_to_cpu(data
[i
]),
1910 be32_to_cpu(data
[i
+1]),
1911 be32_to_cpu(data
[i
+2]),
1912 be32_to_cpu(data
[i
+3]));
1917 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1918 * @ioa_cfg: ioa config struct
1919 * @hostrcb: hostrcb struct
1924 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1925 struct ipr_hostrcb
*hostrcb
)
1927 struct ipr_hostrcb_type_17_error
*error
;
1930 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1932 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1934 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1935 strim(error
->failure_reason
);
1937 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1938 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1939 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1940 ipr_log_hex_data(ioa_cfg
, error
->data
,
1941 be32_to_cpu(hostrcb
->hcam
.length
) -
1942 (offsetof(struct ipr_hostrcb_error
, u
) +
1943 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1947 * ipr_log_dual_ioa_error - Log a dual adapter error.
1948 * @ioa_cfg: ioa config struct
1949 * @hostrcb: hostrcb struct
1954 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1955 struct ipr_hostrcb
*hostrcb
)
1957 struct ipr_hostrcb_type_07_error
*error
;
1959 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1960 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1961 strim(error
->failure_reason
);
1963 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1964 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1965 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1966 ipr_log_hex_data(ioa_cfg
, error
->data
,
1967 be32_to_cpu(hostrcb
->hcam
.length
) -
1968 (offsetof(struct ipr_hostrcb_error
, u
) +
1969 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1972 static const struct {
1975 } path_active_desc
[] = {
1976 { IPR_PATH_NO_INFO
, "Path" },
1977 { IPR_PATH_ACTIVE
, "Active path" },
1978 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
1981 static const struct {
1984 } path_state_desc
[] = {
1985 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
1986 { IPR_PATH_HEALTHY
, "is healthy" },
1987 { IPR_PATH_DEGRADED
, "is degraded" },
1988 { IPR_PATH_FAILED
, "is failed" }
1992 * ipr_log_fabric_path - Log a fabric path error
1993 * @hostrcb: hostrcb struct
1994 * @fabric: fabric descriptor
1999 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
2000 struct ipr_hostrcb_fabric_desc
*fabric
)
2003 u8 path_state
= fabric
->path_state
;
2004 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2005 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2007 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2008 if (path_active_desc
[i
].active
!= active
)
2011 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2012 if (path_state_desc
[j
].state
!= state
)
2015 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2016 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2017 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2019 } else if (fabric
->cascaded_expander
== 0xff) {
2020 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2021 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2022 fabric
->ioa_port
, fabric
->phy
);
2023 } else if (fabric
->phy
== 0xff) {
2024 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2025 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2026 fabric
->ioa_port
, fabric
->cascaded_expander
);
2028 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2029 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2030 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2036 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2037 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2041 * ipr_log64_fabric_path - Log a fabric path error
2042 * @hostrcb: hostrcb struct
2043 * @fabric: fabric descriptor
2048 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2049 struct ipr_hostrcb64_fabric_desc
*fabric
)
2052 u8 path_state
= fabric
->path_state
;
2053 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2054 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2055 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2057 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2058 if (path_active_desc
[i
].active
!= active
)
2061 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2062 if (path_state_desc
[j
].state
!= state
)
2065 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2066 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2067 ipr_format_res_path(hostrcb
->ioa_cfg
,
2069 buffer
, sizeof(buffer
)));
2074 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2075 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2076 buffer
, sizeof(buffer
)));
2079 static const struct {
2082 } path_type_desc
[] = {
2083 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2084 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2085 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2086 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2089 static const struct {
2092 } path_status_desc
[] = {
2093 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2094 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2095 { IPR_PATH_CFG_FAILED
, "Failed" },
2096 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2097 { IPR_PATH_NOT_DETECTED
, "Missing" },
2098 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2101 static const char *link_rate
[] = {
2104 "phy reset problem",
2121 * ipr_log_path_elem - Log a fabric path element.
2122 * @hostrcb: hostrcb struct
2123 * @cfg: fabric path element struct
2128 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2129 struct ipr_hostrcb_config_element
*cfg
)
2132 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2133 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2135 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2138 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2139 if (path_type_desc
[i
].type
!= type
)
2142 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2143 if (path_status_desc
[j
].status
!= status
)
2146 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2147 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2148 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2149 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2150 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2152 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2153 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2154 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2155 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2156 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2157 } else if (cfg
->cascaded_expander
== 0xff) {
2158 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2159 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2160 path_type_desc
[i
].desc
, cfg
->phy
,
2161 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2162 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2163 } else if (cfg
->phy
== 0xff) {
2164 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2165 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2166 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2167 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2168 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2170 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2171 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2172 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2173 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2174 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2181 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2182 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2183 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2184 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2188 * ipr_log64_path_elem - Log a fabric path element.
2189 * @hostrcb: hostrcb struct
2190 * @cfg: fabric path element struct
2195 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2196 struct ipr_hostrcb64_config_element
*cfg
)
2199 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2200 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2201 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2202 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2204 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2207 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2208 if (path_type_desc
[i
].type
!= type
)
2211 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2212 if (path_status_desc
[j
].status
!= status
)
2215 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2216 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2217 ipr_format_res_path(hostrcb
->ioa_cfg
,
2218 cfg
->res_path
, buffer
, sizeof(buffer
)),
2219 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2220 be32_to_cpu(cfg
->wwid
[0]),
2221 be32_to_cpu(cfg
->wwid
[1]));
2225 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2226 "WWN=%08X%08X\n", cfg
->type_status
,
2227 ipr_format_res_path(hostrcb
->ioa_cfg
,
2228 cfg
->res_path
, buffer
, sizeof(buffer
)),
2229 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2230 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2234 * ipr_log_fabric_error - Log a fabric error.
2235 * @ioa_cfg: ioa config struct
2236 * @hostrcb: hostrcb struct
2241 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2242 struct ipr_hostrcb
*hostrcb
)
2244 struct ipr_hostrcb_type_20_error
*error
;
2245 struct ipr_hostrcb_fabric_desc
*fabric
;
2246 struct ipr_hostrcb_config_element
*cfg
;
2249 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2250 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2251 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2253 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2254 (offsetof(struct ipr_hostrcb_error
, u
) +
2255 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2257 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2258 ipr_log_fabric_path(hostrcb
, fabric
);
2259 for_each_fabric_cfg(fabric
, cfg
)
2260 ipr_log_path_elem(hostrcb
, cfg
);
2262 add_len
-= be16_to_cpu(fabric
->length
);
2263 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2264 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2267 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2271 * ipr_log_sis64_array_error - Log a sis64 array error.
2272 * @ioa_cfg: ioa config struct
2273 * @hostrcb: hostrcb struct
2278 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2279 struct ipr_hostrcb
*hostrcb
)
2282 struct ipr_hostrcb_type_24_error
*error
;
2283 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2284 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2285 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2287 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2291 ipr_err("RAID %s Array Configuration: %s\n",
2292 error
->protection_level
,
2293 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2294 buffer
, sizeof(buffer
)));
2298 array_entry
= error
->array_member
;
2299 num_entries
= min_t(u32
, error
->num_entries
,
2300 ARRAY_SIZE(error
->array_member
));
2302 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2304 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2307 if (error
->exposed_mode_adn
== i
)
2308 ipr_err("Exposed Array Member %d:\n", i
);
2310 ipr_err("Array Member %d:\n", i
);
2312 ipr_err("Array Member %d:\n", i
);
2313 ipr_log_ext_vpd(&array_entry
->vpd
);
2314 ipr_err("Current Location: %s\n",
2315 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2316 buffer
, sizeof(buffer
)));
2317 ipr_err("Expected Location: %s\n",
2318 ipr_format_res_path(ioa_cfg
,
2319 array_entry
->expected_res_path
,
2320 buffer
, sizeof(buffer
)));
2327 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2328 * @ioa_cfg: ioa config struct
2329 * @hostrcb: hostrcb struct
2334 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2335 struct ipr_hostrcb
*hostrcb
)
2337 struct ipr_hostrcb_type_30_error
*error
;
2338 struct ipr_hostrcb64_fabric_desc
*fabric
;
2339 struct ipr_hostrcb64_config_element
*cfg
;
2342 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2344 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2345 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2347 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2348 (offsetof(struct ipr_hostrcb64_error
, u
) +
2349 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2351 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2352 ipr_log64_fabric_path(hostrcb
, fabric
);
2353 for_each_fabric_cfg(fabric
, cfg
)
2354 ipr_log64_path_elem(hostrcb
, cfg
);
2356 add_len
-= be16_to_cpu(fabric
->length
);
2357 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2358 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2361 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2365 * ipr_log_generic_error - Log an adapter error.
2366 * @ioa_cfg: ioa config struct
2367 * @hostrcb: hostrcb struct
2372 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2373 struct ipr_hostrcb
*hostrcb
)
2375 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2376 be32_to_cpu(hostrcb
->hcam
.length
));
2380 * ipr_log_sis64_device_error - Log a cache error.
2381 * @ioa_cfg: ioa config struct
2382 * @hostrcb: hostrcb struct
2387 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2388 struct ipr_hostrcb
*hostrcb
)
2390 struct ipr_hostrcb_type_21_error
*error
;
2391 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2393 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2395 ipr_err("-----Failing Device Information-----\n");
2396 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2397 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2398 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2399 ipr_err("Device Resource Path: %s\n",
2400 __ipr_format_res_path(error
->res_path
,
2401 buffer
, sizeof(buffer
)));
2402 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2403 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2404 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2405 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2406 ipr_err("SCSI Sense Data:\n");
2407 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2408 ipr_err("SCSI Command Descriptor Block: \n");
2409 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2411 ipr_err("Additional IOA Data:\n");
2412 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2416 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2419 * This function will return the index of into the ipr_error_table
2420 * for the specified IOASC. If the IOASC is not in the table,
2421 * 0 will be returned, which points to the entry used for unknown errors.
2424 * index into the ipr_error_table
2426 static u32
ipr_get_error(u32 ioasc
)
2430 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2431 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2438 * ipr_handle_log_data - Log an adapter error.
2439 * @ioa_cfg: ioa config struct
2440 * @hostrcb: hostrcb struct
2442 * This function logs an adapter error to the system.
2447 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2448 struct ipr_hostrcb
*hostrcb
)
2452 struct ipr_hostrcb_type_21_error
*error
;
2454 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2457 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2458 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2461 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2463 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2465 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2466 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2467 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2468 scsi_report_bus_reset(ioa_cfg
->host
,
2469 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2472 error_index
= ipr_get_error(ioasc
);
2474 if (!ipr_error_table
[error_index
].log_hcam
)
2477 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2478 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2479 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2481 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2482 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2486 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2488 /* Set indication we have logged an error */
2489 ioa_cfg
->errors_logged
++;
2491 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2493 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2494 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2496 switch (hostrcb
->hcam
.overlay_id
) {
2497 case IPR_HOST_RCB_OVERLAY_ID_2
:
2498 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2500 case IPR_HOST_RCB_OVERLAY_ID_3
:
2501 ipr_log_config_error(ioa_cfg
, hostrcb
);
2503 case IPR_HOST_RCB_OVERLAY_ID_4
:
2504 case IPR_HOST_RCB_OVERLAY_ID_6
:
2505 ipr_log_array_error(ioa_cfg
, hostrcb
);
2507 case IPR_HOST_RCB_OVERLAY_ID_7
:
2508 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2510 case IPR_HOST_RCB_OVERLAY_ID_12
:
2511 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2513 case IPR_HOST_RCB_OVERLAY_ID_13
:
2514 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2516 case IPR_HOST_RCB_OVERLAY_ID_14
:
2517 case IPR_HOST_RCB_OVERLAY_ID_16
:
2518 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2520 case IPR_HOST_RCB_OVERLAY_ID_17
:
2521 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2523 case IPR_HOST_RCB_OVERLAY_ID_20
:
2524 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2526 case IPR_HOST_RCB_OVERLAY_ID_21
:
2527 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2529 case IPR_HOST_RCB_OVERLAY_ID_23
:
2530 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2532 case IPR_HOST_RCB_OVERLAY_ID_24
:
2533 case IPR_HOST_RCB_OVERLAY_ID_26
:
2534 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2536 case IPR_HOST_RCB_OVERLAY_ID_30
:
2537 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2539 case IPR_HOST_RCB_OVERLAY_ID_1
:
2540 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2542 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2548 * ipr_process_error - Op done function for an adapter error log.
2549 * @ipr_cmd: ipr command struct
2551 * This function is the op done function for an error log host
2552 * controlled async from the adapter. It will log the error and
2553 * send the HCAM back to the adapter.
2558 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2560 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2561 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2562 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2566 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2568 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2570 list_del(&hostrcb
->queue
);
2571 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2574 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2575 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2576 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2577 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
2578 dev_err(&ioa_cfg
->pdev
->dev
,
2579 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2582 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2586 * ipr_timeout - An internally generated op has timed out.
2587 * @ipr_cmd: ipr command struct
2589 * This function blocks host requests and initiates an
2595 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
2597 unsigned long lock_flags
= 0;
2598 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2601 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2603 ioa_cfg
->errors_logged
++;
2604 dev_err(&ioa_cfg
->pdev
->dev
,
2605 "Adapter being reset due to command timeout.\n");
2607 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2608 ioa_cfg
->sdt_state
= GET_DUMP
;
2610 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2611 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2613 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2618 * ipr_oper_timeout - Adapter timed out transitioning to operational
2619 * @ipr_cmd: ipr command struct
2621 * This function blocks host requests and initiates an
2627 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
2629 unsigned long lock_flags
= 0;
2630 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2633 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2635 ioa_cfg
->errors_logged
++;
2636 dev_err(&ioa_cfg
->pdev
->dev
,
2637 "Adapter timed out transitioning to operational.\n");
2639 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2640 ioa_cfg
->sdt_state
= GET_DUMP
;
2642 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2644 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2645 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2648 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2653 * ipr_find_ses_entry - Find matching SES in SES table
2654 * @res: resource entry struct of SES
2657 * pointer to SES table entry / NULL on failure
2659 static const struct ipr_ses_table_entry
*
2660 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2663 struct ipr_std_inq_vpids
*vpids
;
2664 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2666 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2667 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2668 if (ste
->compare_product_id_byte
[j
] == 'X') {
2669 vpids
= &res
->std_inq_data
.vpids
;
2670 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2678 if (matches
== IPR_PROD_ID_LEN
)
2686 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2687 * @ioa_cfg: ioa config struct
2689 * @bus_width: bus width
2692 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2693 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2694 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2695 * max 160MHz = max 320MB/sec).
2697 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2699 struct ipr_resource_entry
*res
;
2700 const struct ipr_ses_table_entry
*ste
;
2701 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2703 /* Loop through each config table entry in the config table buffer */
2704 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2705 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2708 if (bus
!= res
->bus
)
2711 if (!(ste
= ipr_find_ses_entry(res
)))
2714 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2717 return max_xfer_rate
;
2721 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2722 * @ioa_cfg: ioa config struct
2723 * @max_delay: max delay in micro-seconds to wait
2725 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2728 * 0 on success / other on failure
2730 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2732 volatile u32 pcii_reg
;
2735 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2736 while (delay
< max_delay
) {
2737 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2739 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2742 /* udelay cannot be used if delay is more than a few milliseconds */
2743 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2744 mdelay(delay
/ 1000);
2754 * ipr_get_sis64_dump_data_section - Dump IOA memory
2755 * @ioa_cfg: ioa config struct
2756 * @start_addr: adapter address to dump
2757 * @dest: destination kernel buffer
2758 * @length_in_words: length to dump in 4 byte words
2763 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2765 __be32
*dest
, u32 length_in_words
)
2769 for (i
= 0; i
< length_in_words
; i
++) {
2770 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2771 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2779 * ipr_get_ldump_data_section - Dump IOA memory
2780 * @ioa_cfg: ioa config struct
2781 * @start_addr: adapter address to dump
2782 * @dest: destination kernel buffer
2783 * @length_in_words: length to dump in 4 byte words
2786 * 0 on success / -EIO on failure
2788 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2790 __be32
*dest
, u32 length_in_words
)
2792 volatile u32 temp_pcii_reg
;
2796 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2797 dest
, length_in_words
);
2799 /* Write IOA interrupt reg starting LDUMP state */
2800 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2801 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2803 /* Wait for IO debug acknowledge */
2804 if (ipr_wait_iodbg_ack(ioa_cfg
,
2805 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2806 dev_err(&ioa_cfg
->pdev
->dev
,
2807 "IOA dump long data transfer timeout\n");
2811 /* Signal LDUMP interlocked - clear IO debug ack */
2812 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2813 ioa_cfg
->regs
.clr_interrupt_reg
);
2815 /* Write Mailbox with starting address */
2816 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2818 /* Signal address valid - clear IOA Reset alert */
2819 writel(IPR_UPROCI_RESET_ALERT
,
2820 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2822 for (i
= 0; i
< length_in_words
; i
++) {
2823 /* Wait for IO debug acknowledge */
2824 if (ipr_wait_iodbg_ack(ioa_cfg
,
2825 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2826 dev_err(&ioa_cfg
->pdev
->dev
,
2827 "IOA dump short data transfer timeout\n");
2831 /* Read data from mailbox and increment destination pointer */
2832 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2835 /* For all but the last word of data, signal data received */
2836 if (i
< (length_in_words
- 1)) {
2837 /* Signal dump data received - Clear IO debug Ack */
2838 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2839 ioa_cfg
->regs
.clr_interrupt_reg
);
2843 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2844 writel(IPR_UPROCI_RESET_ALERT
,
2845 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2847 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2848 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2850 /* Signal dump data received - Clear IO debug Ack */
2851 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2852 ioa_cfg
->regs
.clr_interrupt_reg
);
2854 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2855 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2857 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2859 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2869 #ifdef CONFIG_SCSI_IPR_DUMP
2871 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2872 * @ioa_cfg: ioa config struct
2873 * @pci_address: adapter address
2874 * @length: length of data to copy
2876 * Copy data from PCI adapter to kernel buffer.
2877 * Note: length MUST be a 4 byte multiple
2879 * 0 on success / other on failure
2881 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2882 unsigned long pci_address
, u32 length
)
2884 int bytes_copied
= 0;
2885 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2887 unsigned long lock_flags
= 0;
2888 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2891 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2893 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2895 while (bytes_copied
< length
&&
2896 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2897 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2898 ioa_dump
->page_offset
== 0) {
2899 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2903 return bytes_copied
;
2906 ioa_dump
->page_offset
= 0;
2907 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2908 ioa_dump
->next_page_index
++;
2910 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2912 rem_len
= length
- bytes_copied
;
2913 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2914 cur_len
= min(rem_len
, rem_page_len
);
2916 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2917 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2920 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2921 pci_address
+ bytes_copied
,
2922 &page
[ioa_dump
->page_offset
/ 4],
2923 (cur_len
/ sizeof(u32
)));
2925 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2928 ioa_dump
->page_offset
+= cur_len
;
2929 bytes_copied
+= cur_len
;
2937 return bytes_copied
;
2941 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2942 * @hdr: dump entry header struct
2947 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
2949 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
2951 hdr
->offset
= sizeof(*hdr
);
2952 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
2956 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2957 * @ioa_cfg: ioa config struct
2958 * @driver_dump: driver dump struct
2963 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
2964 struct ipr_driver_dump
*driver_dump
)
2966 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2968 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
2969 driver_dump
->ioa_type_entry
.hdr
.len
=
2970 sizeof(struct ipr_dump_ioa_type_entry
) -
2971 sizeof(struct ipr_dump_entry_header
);
2972 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2973 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
2974 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
2975 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
2976 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
2977 ucode_vpd
->minor_release
[1];
2978 driver_dump
->hdr
.num_entries
++;
2982 * ipr_dump_version_data - Fill in the driver version in the dump.
2983 * @ioa_cfg: ioa config struct
2984 * @driver_dump: driver dump struct
2989 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
2990 struct ipr_driver_dump
*driver_dump
)
2992 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
2993 driver_dump
->version_entry
.hdr
.len
=
2994 sizeof(struct ipr_dump_version_entry
) -
2995 sizeof(struct ipr_dump_entry_header
);
2996 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2997 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
2998 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
2999 driver_dump
->hdr
.num_entries
++;
3003 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3004 * @ioa_cfg: ioa config struct
3005 * @driver_dump: driver dump struct
3010 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3011 struct ipr_driver_dump
*driver_dump
)
3013 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3014 driver_dump
->trace_entry
.hdr
.len
=
3015 sizeof(struct ipr_dump_trace_entry
) -
3016 sizeof(struct ipr_dump_entry_header
);
3017 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3018 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3019 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3020 driver_dump
->hdr
.num_entries
++;
3024 * ipr_dump_location_data - Fill in the IOA location in the dump.
3025 * @ioa_cfg: ioa config struct
3026 * @driver_dump: driver dump struct
3031 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3032 struct ipr_driver_dump
*driver_dump
)
3034 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3035 driver_dump
->location_entry
.hdr
.len
=
3036 sizeof(struct ipr_dump_location_entry
) -
3037 sizeof(struct ipr_dump_entry_header
);
3038 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3039 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3040 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3041 driver_dump
->hdr
.num_entries
++;
3045 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3046 * @ioa_cfg: ioa config struct
3047 * @dump: dump struct
3052 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3054 unsigned long start_addr
, sdt_word
;
3055 unsigned long lock_flags
= 0;
3056 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3057 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3058 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3059 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3060 struct ipr_sdt
*sdt
;
3066 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3068 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3069 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3073 if (ioa_cfg
->sis64
) {
3074 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3075 ssleep(IPR_DUMP_DELAY_SECONDS
);
3076 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3079 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3081 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3082 dev_err(&ioa_cfg
->pdev
->dev
,
3083 "Invalid dump table format: %lx\n", start_addr
);
3084 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3088 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3090 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3092 /* Initialize the overall dump header */
3093 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3094 driver_dump
->hdr
.num_entries
= 1;
3095 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3096 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3097 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3098 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3100 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3101 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3102 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3103 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3105 /* Update dump_header */
3106 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3108 /* IOA Dump entry */
3109 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3110 ioa_dump
->hdr
.len
= 0;
3111 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3112 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3114 /* First entries in sdt are actually a list of dump addresses and
3115 lengths to gather the real dump data. sdt represents the pointer
3116 to the ioa generated dump table. Dump data will be extracted based
3117 on entries in this table */
3118 sdt
= &ioa_dump
->sdt
;
3120 if (ioa_cfg
->sis64
) {
3121 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3122 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3124 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3125 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3128 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3129 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3130 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3131 bytes_to_copy
/ sizeof(__be32
));
3133 /* Smart Dump table is ready to use and the first entry is valid */
3134 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3135 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3136 dev_err(&ioa_cfg
->pdev
->dev
,
3137 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3138 rc
, be32_to_cpu(sdt
->hdr
.state
));
3139 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3140 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3141 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3145 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3147 if (num_entries
> max_num_entries
)
3148 num_entries
= max_num_entries
;
3150 /* Update dump length to the actual data to be copied */
3151 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3153 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3155 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3157 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3159 for (i
= 0; i
< num_entries
; i
++) {
3160 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3161 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3165 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3166 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3168 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3170 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3171 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3173 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3174 bytes_to_copy
= end_off
- start_off
;
3179 if (bytes_to_copy
> max_dump_size
) {
3180 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3184 /* Copy data from adapter to driver buffers */
3185 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3188 ioa_dump
->hdr
.len
+= bytes_copied
;
3190 if (bytes_copied
!= bytes_to_copy
) {
3191 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3198 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3200 /* Update dump_header */
3201 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3203 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3208 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3212 * ipr_release_dump - Free adapter dump memory
3213 * @kref: kref struct
3218 static void ipr_release_dump(struct kref
*kref
)
3220 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3221 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3222 unsigned long lock_flags
= 0;
3226 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3227 ioa_cfg
->dump
= NULL
;
3228 ioa_cfg
->sdt_state
= INACTIVE
;
3229 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3231 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3232 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3234 vfree(dump
->ioa_dump
.ioa_data
);
3240 * ipr_worker_thread - Worker thread
3241 * @work: ioa config struct
3243 * Called at task level from a work thread. This function takes care
3244 * of adding and removing device from the mid-layer as configuration
3245 * changes are detected by the adapter.
3250 static void ipr_worker_thread(struct work_struct
*work
)
3252 unsigned long lock_flags
;
3253 struct ipr_resource_entry
*res
;
3254 struct scsi_device
*sdev
;
3255 struct ipr_dump
*dump
;
3256 struct ipr_ioa_cfg
*ioa_cfg
=
3257 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3258 u8 bus
, target
, lun
;
3262 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3264 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3265 dump
= ioa_cfg
->dump
;
3267 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3270 kref_get(&dump
->kref
);
3271 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3272 ipr_get_ioa_dump(ioa_cfg
, dump
);
3273 kref_put(&dump
->kref
, ipr_release_dump
);
3275 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3276 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3277 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3278 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3285 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
3286 !ioa_cfg
->allow_ml_add_del
) {
3287 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3291 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3292 if (res
->del_from_ml
&& res
->sdev
) {
3295 if (!scsi_device_get(sdev
)) {
3296 if (!res
->add_to_ml
)
3297 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3299 res
->del_from_ml
= 0;
3300 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3301 scsi_remove_device(sdev
);
3302 scsi_device_put(sdev
);
3303 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3310 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3311 if (res
->add_to_ml
) {
3313 target
= res
->target
;
3316 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3317 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3318 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3323 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3324 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3328 #ifdef CONFIG_SCSI_IPR_TRACE
3330 * ipr_read_trace - Dump the adapter trace
3331 * @filp: open sysfs file
3332 * @kobj: kobject struct
3333 * @bin_attr: bin_attribute struct
3336 * @count: buffer size
3339 * number of bytes printed to buffer
3341 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3342 struct bin_attribute
*bin_attr
,
3343 char *buf
, loff_t off
, size_t count
)
3345 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3346 struct Scsi_Host
*shost
= class_to_shost(dev
);
3347 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3348 unsigned long lock_flags
= 0;
3351 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3352 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3354 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3359 static struct bin_attribute ipr_trace_attr
= {
3365 .read
= ipr_read_trace
,
3370 * ipr_show_fw_version - Show the firmware version
3371 * @dev: class device struct
3375 * number of bytes printed to buffer
3377 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3378 struct device_attribute
*attr
, char *buf
)
3380 struct Scsi_Host
*shost
= class_to_shost(dev
);
3381 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3382 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3383 unsigned long lock_flags
= 0;
3386 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3387 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3388 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3389 ucode_vpd
->minor_release
[0],
3390 ucode_vpd
->minor_release
[1]);
3391 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3395 static struct device_attribute ipr_fw_version_attr
= {
3397 .name
= "fw_version",
3400 .show
= ipr_show_fw_version
,
3404 * ipr_show_log_level - Show the adapter's error logging level
3405 * @dev: class device struct
3409 * number of bytes printed to buffer
3411 static ssize_t
ipr_show_log_level(struct device
*dev
,
3412 struct device_attribute
*attr
, char *buf
)
3414 struct Scsi_Host
*shost
= class_to_shost(dev
);
3415 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3416 unsigned long lock_flags
= 0;
3419 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3420 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3421 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3426 * ipr_store_log_level - Change the adapter's error logging level
3427 * @dev: class device struct
3431 * number of bytes printed to buffer
3433 static ssize_t
ipr_store_log_level(struct device
*dev
,
3434 struct device_attribute
*attr
,
3435 const char *buf
, size_t count
)
3437 struct Scsi_Host
*shost
= class_to_shost(dev
);
3438 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3439 unsigned long lock_flags
= 0;
3441 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3442 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3443 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3447 static struct device_attribute ipr_log_level_attr
= {
3449 .name
= "log_level",
3450 .mode
= S_IRUGO
| S_IWUSR
,
3452 .show
= ipr_show_log_level
,
3453 .store
= ipr_store_log_level
3457 * ipr_store_diagnostics - IOA Diagnostics interface
3458 * @dev: device struct
3460 * @count: buffer size
3462 * This function will reset the adapter and wait a reasonable
3463 * amount of time for any errors that the adapter might log.
3466 * count on success / other on failure
3468 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3469 struct device_attribute
*attr
,
3470 const char *buf
, size_t count
)
3472 struct Scsi_Host
*shost
= class_to_shost(dev
);
3473 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3474 unsigned long lock_flags
= 0;
3477 if (!capable(CAP_SYS_ADMIN
))
3480 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3481 while (ioa_cfg
->in_reset_reload
) {
3482 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3483 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3484 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3487 ioa_cfg
->errors_logged
= 0;
3488 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3490 if (ioa_cfg
->in_reset_reload
) {
3491 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3492 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3494 /* Wait for a second for any errors to be logged */
3497 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3501 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3502 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3504 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3509 static struct device_attribute ipr_diagnostics_attr
= {
3511 .name
= "run_diagnostics",
3514 .store
= ipr_store_diagnostics
3518 * ipr_show_adapter_state - Show the adapter's state
3519 * @class_dev: device struct
3523 * number of bytes printed to buffer
3525 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3526 struct device_attribute
*attr
, char *buf
)
3528 struct Scsi_Host
*shost
= class_to_shost(dev
);
3529 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3530 unsigned long lock_flags
= 0;
3533 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3534 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3535 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3537 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3538 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3543 * ipr_store_adapter_state - Change adapter state
3544 * @dev: device struct
3546 * @count: buffer size
3548 * This function will change the adapter's state.
3551 * count on success / other on failure
3553 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3554 struct device_attribute
*attr
,
3555 const char *buf
, size_t count
)
3557 struct Scsi_Host
*shost
= class_to_shost(dev
);
3558 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3559 unsigned long lock_flags
;
3560 int result
= count
, i
;
3562 if (!capable(CAP_SYS_ADMIN
))
3565 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3566 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3567 !strncmp(buf
, "online", 6)) {
3568 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3569 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3570 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3571 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3574 ioa_cfg
->reset_retries
= 0;
3575 ioa_cfg
->in_ioa_bringdown
= 0;
3576 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3578 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3579 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3584 static struct device_attribute ipr_ioa_state_attr
= {
3586 .name
= "online_state",
3587 .mode
= S_IRUGO
| S_IWUSR
,
3589 .show
= ipr_show_adapter_state
,
3590 .store
= ipr_store_adapter_state
3594 * ipr_store_reset_adapter - Reset the adapter
3595 * @dev: device struct
3597 * @count: buffer size
3599 * This function will reset the adapter.
3602 * count on success / other on failure
3604 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3605 struct device_attribute
*attr
,
3606 const char *buf
, size_t count
)
3608 struct Scsi_Host
*shost
= class_to_shost(dev
);
3609 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3610 unsigned long lock_flags
;
3613 if (!capable(CAP_SYS_ADMIN
))
3616 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3617 if (!ioa_cfg
->in_reset_reload
)
3618 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3619 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3620 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3625 static struct device_attribute ipr_ioa_reset_attr
= {
3627 .name
= "reset_host",
3630 .store
= ipr_store_reset_adapter
3633 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
);
3635 * ipr_show_iopoll_weight - Show ipr polling mode
3636 * @dev: class device struct
3640 * number of bytes printed to buffer
3642 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3643 struct device_attribute
*attr
, char *buf
)
3645 struct Scsi_Host
*shost
= class_to_shost(dev
);
3646 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3647 unsigned long lock_flags
= 0;
3650 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3651 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3652 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3658 * ipr_store_iopoll_weight - Change the adapter's polling mode
3659 * @dev: class device struct
3663 * number of bytes printed to buffer
3665 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3666 struct device_attribute
*attr
,
3667 const char *buf
, size_t count
)
3669 struct Scsi_Host
*shost
= class_to_shost(dev
);
3670 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3671 unsigned long user_iopoll_weight
;
3672 unsigned long lock_flags
= 0;
3675 if (!ioa_cfg
->sis64
) {
3676 dev_info(&ioa_cfg
->pdev
->dev
, "blk-iopoll not supported on this adapter\n");
3679 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3682 if (user_iopoll_weight
> 256) {
3683 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid blk-iopoll weight. It must be less than 256\n");
3687 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3688 dev_info(&ioa_cfg
->pdev
->dev
, "Current blk-iopoll weight has the same weight\n");
3692 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3693 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3694 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3697 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3698 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3699 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3700 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3701 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3702 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3703 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
3706 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3711 static struct device_attribute ipr_iopoll_weight_attr
= {
3713 .name
= "iopoll_weight",
3714 .mode
= S_IRUGO
| S_IWUSR
,
3716 .show
= ipr_show_iopoll_weight
,
3717 .store
= ipr_store_iopoll_weight
3721 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3722 * @buf_len: buffer length
3724 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3725 * list to use for microcode download
3728 * pointer to sglist / NULL on failure
3730 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3732 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
3733 struct ipr_sglist
*sglist
;
3734 struct scatterlist
*scatterlist
;
3737 /* Get the minimum size per scatter/gather element */
3738 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3740 /* Get the actual size per element */
3741 order
= get_order(sg_size
);
3743 /* Determine the actual number of bytes per element */
3744 bsize_elem
= PAGE_SIZE
* (1 << order
);
3746 /* Determine the actual number of sg entries needed */
3747 if (buf_len
% bsize_elem
)
3748 num_elem
= (buf_len
/ bsize_elem
) + 1;
3750 num_elem
= buf_len
/ bsize_elem
;
3752 /* Allocate a scatter/gather list for the DMA */
3753 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
3754 (sizeof(struct scatterlist
) * (num_elem
- 1)),
3757 if (sglist
== NULL
) {
3762 scatterlist
= sglist
->scatterlist
;
3763 sg_init_table(scatterlist
, num_elem
);
3765 sglist
->order
= order
;
3766 sglist
->num_sg
= num_elem
;
3768 /* Allocate a bunch of sg elements */
3769 for (i
= 0; i
< num_elem
; i
++) {
3770 page
= alloc_pages(GFP_KERNEL
, order
);
3774 /* Free up what we already allocated */
3775 for (j
= i
- 1; j
>= 0; j
--)
3776 __free_pages(sg_page(&scatterlist
[j
]), order
);
3781 sg_set_page(&scatterlist
[i
], page
, 0, 0);
3788 * ipr_free_ucode_buffer - Frees a microcode download buffer
3789 * @p_dnld: scatter/gather list pointer
3791 * Free a DMA'able ucode download buffer previously allocated with
3792 * ipr_alloc_ucode_buffer
3797 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3801 for (i
= 0; i
< sglist
->num_sg
; i
++)
3802 __free_pages(sg_page(&sglist
->scatterlist
[i
]), sglist
->order
);
3808 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3809 * @sglist: scatter/gather list pointer
3810 * @buffer: buffer pointer
3811 * @len: buffer length
3813 * Copy a microcode image from a user buffer into a buffer allocated by
3814 * ipr_alloc_ucode_buffer
3817 * 0 on success / other on failure
3819 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3820 u8
*buffer
, u32 len
)
3822 int bsize_elem
, i
, result
= 0;
3823 struct scatterlist
*scatterlist
;
3826 /* Determine the actual number of bytes per element */
3827 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3829 scatterlist
= sglist
->scatterlist
;
3831 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3832 struct page
*page
= sg_page(&scatterlist
[i
]);
3835 memcpy(kaddr
, buffer
, bsize_elem
);
3838 scatterlist
[i
].length
= bsize_elem
;
3846 if (len
% bsize_elem
) {
3847 struct page
*page
= sg_page(&scatterlist
[i
]);
3850 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3853 scatterlist
[i
].length
= len
% bsize_elem
;
3856 sglist
->buffer_len
= len
;
3861 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3862 * @ipr_cmd: ipr command struct
3863 * @sglist: scatter/gather list
3865 * Builds a microcode download IOA data list (IOADL).
3868 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3869 struct ipr_sglist
*sglist
)
3871 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3872 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3873 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3876 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3877 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3878 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3881 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3882 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3883 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3884 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3885 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3888 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3892 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3893 * @ipr_cmd: ipr command struct
3894 * @sglist: scatter/gather list
3896 * Builds a microcode download IOA data list (IOADL).
3899 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3900 struct ipr_sglist
*sglist
)
3902 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3903 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3904 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3907 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3908 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3909 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3912 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3914 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3915 ioadl
[i
].flags_and_data_len
=
3916 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3918 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3921 ioadl
[i
-1].flags_and_data_len
|=
3922 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3926 * ipr_update_ioa_ucode - Update IOA's microcode
3927 * @ioa_cfg: ioa config struct
3928 * @sglist: scatter/gather list
3930 * Initiate an adapter reset to update the IOA's microcode
3933 * 0 on success / -EIO on failure
3935 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3936 struct ipr_sglist
*sglist
)
3938 unsigned long lock_flags
;
3940 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3941 while (ioa_cfg
->in_reset_reload
) {
3942 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3943 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3944 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3947 if (ioa_cfg
->ucode_sglist
) {
3948 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3949 dev_err(&ioa_cfg
->pdev
->dev
,
3950 "Microcode download already in progress\n");
3954 sglist
->num_dma_sg
= pci_map_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
3955 sglist
->num_sg
, DMA_TO_DEVICE
);
3957 if (!sglist
->num_dma_sg
) {
3958 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3959 dev_err(&ioa_cfg
->pdev
->dev
,
3960 "Failed to map microcode download buffer!\n");
3964 ioa_cfg
->ucode_sglist
= sglist
;
3965 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3966 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3967 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3969 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3970 ioa_cfg
->ucode_sglist
= NULL
;
3971 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3976 * ipr_store_update_fw - Update the firmware on the adapter
3977 * @class_dev: device struct
3979 * @count: buffer size
3981 * This function will update the firmware on the adapter.
3984 * count on success / other on failure
3986 static ssize_t
ipr_store_update_fw(struct device
*dev
,
3987 struct device_attribute
*attr
,
3988 const char *buf
, size_t count
)
3990 struct Scsi_Host
*shost
= class_to_shost(dev
);
3991 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3992 struct ipr_ucode_image_header
*image_hdr
;
3993 const struct firmware
*fw_entry
;
3994 struct ipr_sglist
*sglist
;
3997 int len
, result
, dnld_size
;
3999 if (!capable(CAP_SYS_ADMIN
))
4002 len
= snprintf(fname
, 99, "%s", buf
);
4003 fname
[len
-1] = '\0';
4005 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
4006 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4010 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4012 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4013 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4014 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4017 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4018 release_firmware(fw_entry
);
4022 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4025 dev_err(&ioa_cfg
->pdev
->dev
,
4026 "Microcode buffer copy to DMA buffer failed\n");
4030 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4032 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4037 ipr_free_ucode_buffer(sglist
);
4038 release_firmware(fw_entry
);
4042 static struct device_attribute ipr_update_fw_attr
= {
4044 .name
= "update_fw",
4047 .store
= ipr_store_update_fw
4051 * ipr_show_fw_type - Show the adapter's firmware type.
4052 * @dev: class device struct
4056 * number of bytes printed to buffer
4058 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4059 struct device_attribute
*attr
, char *buf
)
4061 struct Scsi_Host
*shost
= class_to_shost(dev
);
4062 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4063 unsigned long lock_flags
= 0;
4066 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4067 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4068 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4072 static struct device_attribute ipr_ioa_fw_type_attr
= {
4077 .show
= ipr_show_fw_type
4080 static struct device_attribute
*ipr_ioa_attrs
[] = {
4081 &ipr_fw_version_attr
,
4082 &ipr_log_level_attr
,
4083 &ipr_diagnostics_attr
,
4084 &ipr_ioa_state_attr
,
4085 &ipr_ioa_reset_attr
,
4086 &ipr_update_fw_attr
,
4087 &ipr_ioa_fw_type_attr
,
4088 &ipr_iopoll_weight_attr
,
4092 #ifdef CONFIG_SCSI_IPR_DUMP
4094 * ipr_read_dump - Dump the adapter
4095 * @filp: open sysfs file
4096 * @kobj: kobject struct
4097 * @bin_attr: bin_attribute struct
4100 * @count: buffer size
4103 * number of bytes printed to buffer
4105 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4106 struct bin_attribute
*bin_attr
,
4107 char *buf
, loff_t off
, size_t count
)
4109 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4110 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4111 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4112 struct ipr_dump
*dump
;
4113 unsigned long lock_flags
= 0;
4118 if (!capable(CAP_SYS_ADMIN
))
4121 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4122 dump
= ioa_cfg
->dump
;
4124 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4125 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4128 kref_get(&dump
->kref
);
4129 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4131 if (off
> dump
->driver_dump
.hdr
.len
) {
4132 kref_put(&dump
->kref
, ipr_release_dump
);
4136 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4137 count
= dump
->driver_dump
.hdr
.len
- off
;
4141 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4142 if (off
+ count
> sizeof(dump
->driver_dump
))
4143 len
= sizeof(dump
->driver_dump
) - off
;
4146 src
= (u8
*)&dump
->driver_dump
+ off
;
4147 memcpy(buf
, src
, len
);
4153 off
-= sizeof(dump
->driver_dump
);
4156 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4157 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4158 sizeof(struct ipr_sdt_entry
));
4160 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4161 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4163 if (count
&& off
< sdt_end
) {
4164 if (off
+ count
> sdt_end
)
4165 len
= sdt_end
- off
;
4168 src
= (u8
*)&dump
->ioa_dump
+ off
;
4169 memcpy(buf
, src
, len
);
4178 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4179 len
= PAGE_ALIGN(off
) - off
;
4182 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4183 src
+= off
& ~PAGE_MASK
;
4184 memcpy(buf
, src
, len
);
4190 kref_put(&dump
->kref
, ipr_release_dump
);
4195 * ipr_alloc_dump - Prepare for adapter dump
4196 * @ioa_cfg: ioa config struct
4199 * 0 on success / other on failure
4201 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4203 struct ipr_dump
*dump
;
4205 unsigned long lock_flags
= 0;
4207 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4210 ipr_err("Dump memory allocation failed\n");
4215 ioa_data
= vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4217 ioa_data
= vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4220 ipr_err("Dump memory allocation failed\n");
4225 dump
->ioa_dump
.ioa_data
= ioa_data
;
4227 kref_init(&dump
->kref
);
4228 dump
->ioa_cfg
= ioa_cfg
;
4230 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4232 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4233 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4234 vfree(dump
->ioa_dump
.ioa_data
);
4239 ioa_cfg
->dump
= dump
;
4240 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4241 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4242 ioa_cfg
->dump_taken
= 1;
4243 schedule_work(&ioa_cfg
->work_q
);
4245 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4251 * ipr_free_dump - Free adapter dump memory
4252 * @ioa_cfg: ioa config struct
4255 * 0 on success / other on failure
4257 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4259 struct ipr_dump
*dump
;
4260 unsigned long lock_flags
= 0;
4264 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4265 dump
= ioa_cfg
->dump
;
4267 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4271 ioa_cfg
->dump
= NULL
;
4272 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4274 kref_put(&dump
->kref
, ipr_release_dump
);
4281 * ipr_write_dump - Setup dump state of adapter
4282 * @filp: open sysfs file
4283 * @kobj: kobject struct
4284 * @bin_attr: bin_attribute struct
4287 * @count: buffer size
4290 * number of bytes printed to buffer
4292 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4293 struct bin_attribute
*bin_attr
,
4294 char *buf
, loff_t off
, size_t count
)
4296 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4297 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4298 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4301 if (!capable(CAP_SYS_ADMIN
))
4305 rc
= ipr_alloc_dump(ioa_cfg
);
4306 else if (buf
[0] == '0')
4307 rc
= ipr_free_dump(ioa_cfg
);
4317 static struct bin_attribute ipr_dump_attr
= {
4320 .mode
= S_IRUSR
| S_IWUSR
,
4323 .read
= ipr_read_dump
,
4324 .write
= ipr_write_dump
4327 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4331 * ipr_change_queue_depth - Change the device's queue depth
4332 * @sdev: scsi device struct
4333 * @qdepth: depth to set
4334 * @reason: calling context
4339 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
,
4342 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4343 struct ipr_resource_entry
*res
;
4344 unsigned long lock_flags
= 0;
4346 if (reason
!= SCSI_QDEPTH_DEFAULT
)
4349 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4350 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4352 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4353 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4354 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4356 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
4357 return sdev
->queue_depth
;
4361 * ipr_change_queue_type - Change the device's queue type
4362 * @dsev: scsi device struct
4363 * @tag_type: type of tags to use
4366 * actual queue type set
4368 static int ipr_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
4370 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4371 struct ipr_resource_entry
*res
;
4372 unsigned long lock_flags
= 0;
4374 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4375 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4378 if (ipr_is_gscsi(res
) && sdev
->tagged_supported
) {
4380 * We don't bother quiescing the device here since the
4381 * adapter firmware does it for us.
4383 scsi_set_tag_type(sdev
, tag_type
);
4386 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
4388 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
4394 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4399 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4400 * @dev: device struct
4401 * @attr: device attribute structure
4405 * number of bytes printed to buffer
4407 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4409 struct scsi_device
*sdev
= to_scsi_device(dev
);
4410 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4411 struct ipr_resource_entry
*res
;
4412 unsigned long lock_flags
= 0;
4413 ssize_t len
= -ENXIO
;
4415 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4416 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4418 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4419 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4423 static struct device_attribute ipr_adapter_handle_attr
= {
4425 .name
= "adapter_handle",
4428 .show
= ipr_show_adapter_handle
4432 * ipr_show_resource_path - Show the resource path or the resource address for
4434 * @dev: device struct
4435 * @attr: device attribute structure
4439 * number of bytes printed to buffer
4441 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4443 struct scsi_device
*sdev
= to_scsi_device(dev
);
4444 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4445 struct ipr_resource_entry
*res
;
4446 unsigned long lock_flags
= 0;
4447 ssize_t len
= -ENXIO
;
4448 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4450 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4451 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4452 if (res
&& ioa_cfg
->sis64
)
4453 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4454 __ipr_format_res_path(res
->res_path
, buffer
,
4457 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4458 res
->bus
, res
->target
, res
->lun
);
4460 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4464 static struct device_attribute ipr_resource_path_attr
= {
4466 .name
= "resource_path",
4469 .show
= ipr_show_resource_path
4473 * ipr_show_device_id - Show the device_id for this device.
4474 * @dev: device struct
4475 * @attr: device attribute structure
4479 * number of bytes printed to buffer
4481 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4483 struct scsi_device
*sdev
= to_scsi_device(dev
);
4484 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4485 struct ipr_resource_entry
*res
;
4486 unsigned long lock_flags
= 0;
4487 ssize_t len
= -ENXIO
;
4489 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4490 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4491 if (res
&& ioa_cfg
->sis64
)
4492 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->dev_id
);
4494 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4496 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4500 static struct device_attribute ipr_device_id_attr
= {
4502 .name
= "device_id",
4505 .show
= ipr_show_device_id
4509 * ipr_show_resource_type - Show the resource type for this device.
4510 * @dev: device struct
4511 * @attr: device attribute structure
4515 * number of bytes printed to buffer
4517 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4519 struct scsi_device
*sdev
= to_scsi_device(dev
);
4520 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4521 struct ipr_resource_entry
*res
;
4522 unsigned long lock_flags
= 0;
4523 ssize_t len
= -ENXIO
;
4525 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4526 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4529 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4531 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4535 static struct device_attribute ipr_resource_type_attr
= {
4537 .name
= "resource_type",
4540 .show
= ipr_show_resource_type
4543 static struct device_attribute
*ipr_dev_attrs
[] = {
4544 &ipr_adapter_handle_attr
,
4545 &ipr_resource_path_attr
,
4546 &ipr_device_id_attr
,
4547 &ipr_resource_type_attr
,
4552 * ipr_biosparam - Return the HSC mapping
4553 * @sdev: scsi device struct
4554 * @block_device: block device pointer
4555 * @capacity: capacity of the device
4556 * @parm: Array containing returned HSC values.
4558 * This function generates the HSC parms that fdisk uses.
4559 * We want to make sure we return something that places partitions
4560 * on 4k boundaries for best performance with the IOA.
4565 static int ipr_biosparam(struct scsi_device
*sdev
,
4566 struct block_device
*block_device
,
4567 sector_t capacity
, int *parm
)
4575 cylinders
= capacity
;
4576 sector_div(cylinders
, (128 * 32));
4581 parm
[2] = cylinders
;
4587 * ipr_find_starget - Find target based on bus/target.
4588 * @starget: scsi target struct
4591 * resource entry pointer if found / NULL if not found
4593 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4595 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4596 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4597 struct ipr_resource_entry
*res
;
4599 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4600 if ((res
->bus
== starget
->channel
) &&
4601 (res
->target
== starget
->id
)) {
4609 static struct ata_port_info sata_port_info
;
4612 * ipr_target_alloc - Prepare for commands to a SCSI target
4613 * @starget: scsi target struct
4615 * If the device is a SATA device, this function allocates an
4616 * ATA port with libata, else it does nothing.
4619 * 0 on success / non-0 on failure
4621 static int ipr_target_alloc(struct scsi_target
*starget
)
4623 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4624 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4625 struct ipr_sata_port
*sata_port
;
4626 struct ata_port
*ap
;
4627 struct ipr_resource_entry
*res
;
4628 unsigned long lock_flags
;
4630 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4631 res
= ipr_find_starget(starget
);
4632 starget
->hostdata
= NULL
;
4634 if (res
&& ipr_is_gata(res
)) {
4635 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4636 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4640 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4642 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4643 sata_port
->ioa_cfg
= ioa_cfg
;
4645 sata_port
->res
= res
;
4647 res
->sata_port
= sata_port
;
4648 ap
->private_data
= sata_port
;
4649 starget
->hostdata
= sata_port
;
4655 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4661 * ipr_target_destroy - Destroy a SCSI target
4662 * @starget: scsi target struct
4664 * If the device was a SATA device, this function frees the libata
4665 * ATA port, else it does nothing.
4668 static void ipr_target_destroy(struct scsi_target
*starget
)
4670 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4671 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4672 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4674 if (ioa_cfg
->sis64
) {
4675 if (!ipr_find_starget(starget
)) {
4676 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4677 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4678 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4679 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4680 else if (starget
->channel
== 0)
4681 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4686 starget
->hostdata
= NULL
;
4687 ata_sas_port_destroy(sata_port
->ap
);
4693 * ipr_find_sdev - Find device based on bus/target/lun.
4694 * @sdev: scsi device struct
4697 * resource entry pointer if found / NULL if not found
4699 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4701 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4702 struct ipr_resource_entry
*res
;
4704 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4705 if ((res
->bus
== sdev
->channel
) &&
4706 (res
->target
== sdev
->id
) &&
4707 (res
->lun
== sdev
->lun
))
4715 * ipr_slave_destroy - Unconfigure a SCSI device
4716 * @sdev: scsi device struct
4721 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4723 struct ipr_resource_entry
*res
;
4724 struct ipr_ioa_cfg
*ioa_cfg
;
4725 unsigned long lock_flags
= 0;
4727 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4729 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4730 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4733 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4734 sdev
->hostdata
= NULL
;
4736 res
->sata_port
= NULL
;
4738 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4742 * ipr_slave_configure - Configure a SCSI device
4743 * @sdev: scsi device struct
4745 * This function configures the specified scsi device.
4750 static int ipr_slave_configure(struct scsi_device
*sdev
)
4752 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4753 struct ipr_resource_entry
*res
;
4754 struct ata_port
*ap
= NULL
;
4755 unsigned long lock_flags
= 0;
4756 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4758 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4759 res
= sdev
->hostdata
;
4761 if (ipr_is_af_dasd_device(res
))
4762 sdev
->type
= TYPE_RAID
;
4763 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4764 sdev
->scsi_level
= 4;
4765 sdev
->no_uld_attach
= 1;
4767 if (ipr_is_vset_device(res
)) {
4768 blk_queue_rq_timeout(sdev
->request_queue
,
4769 IPR_VSET_RW_TIMEOUT
);
4770 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4772 if (ipr_is_gata(res
) && res
->sata_port
)
4773 ap
= res
->sata_port
->ap
;
4774 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4777 scsi_adjust_queue_depth(sdev
, 0, IPR_MAX_CMD_PER_ATA_LUN
);
4778 ata_sas_slave_configure(sdev
, ap
);
4780 scsi_adjust_queue_depth(sdev
, 0, sdev
->host
->cmd_per_lun
);
4782 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4783 ipr_format_res_path(ioa_cfg
,
4784 res
->res_path
, buffer
, sizeof(buffer
)));
4787 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4792 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4793 * @sdev: scsi device struct
4795 * This function initializes an ATA port so that future commands
4796 * sent through queuecommand will work.
4801 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4803 struct ipr_sata_port
*sata_port
= NULL
;
4807 if (sdev
->sdev_target
)
4808 sata_port
= sdev
->sdev_target
->hostdata
;
4810 rc
= ata_sas_port_init(sata_port
->ap
);
4812 rc
= ata_sas_sync_probe(sata_port
->ap
);
4816 ipr_slave_destroy(sdev
);
4823 * ipr_slave_alloc - Prepare for commands to a device.
4824 * @sdev: scsi device struct
4826 * This function saves a pointer to the resource entry
4827 * in the scsi device struct if the device exists. We
4828 * can then use this pointer in ipr_queuecommand when
4829 * handling new commands.
4832 * 0 on success / -ENXIO if device does not exist
4834 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4836 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4837 struct ipr_resource_entry
*res
;
4838 unsigned long lock_flags
;
4841 sdev
->hostdata
= NULL
;
4843 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4845 res
= ipr_find_sdev(sdev
);
4850 sdev
->hostdata
= res
;
4851 if (!ipr_is_naca_model(res
))
4852 res
->needs_sync_complete
= 1;
4854 if (ipr_is_gata(res
)) {
4855 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4856 return ipr_ata_slave_alloc(sdev
);
4860 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4866 * ipr_match_lun - Match function for specified LUN
4867 * @ipr_cmd: ipr command struct
4868 * @device: device to match (sdev)
4871 * 1 if command matches sdev / 0 if command does not match sdev
4873 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
4875 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
4881 * ipr_wait_for_ops - Wait for matching commands to complete
4882 * @ipr_cmd: ipr command struct
4883 * @device: device to match (sdev)
4884 * @match: match function to use
4889 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
4890 int (*match
)(struct ipr_cmnd
*, void *))
4892 struct ipr_cmnd
*ipr_cmd
;
4894 unsigned long flags
;
4895 struct ipr_hrr_queue
*hrrq
;
4896 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
4897 DECLARE_COMPLETION_ONSTACK(comp
);
4903 for_each_hrrq(hrrq
, ioa_cfg
) {
4904 spin_lock_irqsave(hrrq
->lock
, flags
);
4905 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
4906 if (match(ipr_cmd
, device
)) {
4907 ipr_cmd
->eh_comp
= &comp
;
4911 spin_unlock_irqrestore(hrrq
->lock
, flags
);
4915 timeout
= wait_for_completion_timeout(&comp
, timeout
);
4920 for_each_hrrq(hrrq
, ioa_cfg
) {
4921 spin_lock_irqsave(hrrq
->lock
, flags
);
4922 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
4923 if (match(ipr_cmd
, device
)) {
4924 ipr_cmd
->eh_comp
= NULL
;
4928 spin_unlock_irqrestore(hrrq
->lock
, flags
);
4932 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
4934 return wait
? FAILED
: SUCCESS
;
4943 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
4945 struct ipr_ioa_cfg
*ioa_cfg
;
4946 unsigned long lock_flags
= 0;
4950 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
4951 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4953 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
4954 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
4955 dev_err(&ioa_cfg
->pdev
->dev
,
4956 "Adapter being reset as a result of error recovery.\n");
4958 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
4959 ioa_cfg
->sdt_state
= GET_DUMP
;
4962 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4963 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4964 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4966 /* If we got hit with a host reset while we were already resetting
4967 the adapter for some reason, and the reset failed. */
4968 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
4973 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4979 * ipr_device_reset - Reset the device
4980 * @ioa_cfg: ioa config struct
4981 * @res: resource entry struct
4983 * This function issues a device reset to the affected device.
4984 * If the device is a SCSI device, a LUN reset will be sent
4985 * to the device first. If that does not work, a target reset
4986 * will be sent. If the device is a SATA device, a PHY reset will
4990 * 0 on success / non-zero on failure
4992 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
4993 struct ipr_resource_entry
*res
)
4995 struct ipr_cmnd
*ipr_cmd
;
4996 struct ipr_ioarcb
*ioarcb
;
4997 struct ipr_cmd_pkt
*cmd_pkt
;
4998 struct ipr_ioarcb_ata_regs
*regs
;
5002 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5003 ioarcb
= &ipr_cmd
->ioarcb
;
5004 cmd_pkt
= &ioarcb
->cmd_pkt
;
5006 if (ipr_cmd
->ioa_cfg
->sis64
) {
5007 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
5008 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
5010 regs
= &ioarcb
->u
.add_data
.u
.regs
;
5012 ioarcb
->res_handle
= res
->res_handle
;
5013 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5014 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5015 if (ipr_is_gata(res
)) {
5016 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
5017 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
5018 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
5021 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5022 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5023 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5024 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
5025 if (ipr_cmd
->ioa_cfg
->sis64
)
5026 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
5027 sizeof(struct ipr_ioasa_gata
));
5029 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
5030 sizeof(struct ipr_ioasa_gata
));
5034 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
5038 * ipr_sata_reset - Reset the SATA port
5039 * @link: SATA link to reset
5040 * @classes: class of the attached device
5042 * This function issues a SATA phy reset to the affected ATA link.
5045 * 0 on success / non-zero on failure
5047 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5048 unsigned long deadline
)
5050 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5051 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5052 struct ipr_resource_entry
*res
;
5053 unsigned long lock_flags
= 0;
5057 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5058 while (ioa_cfg
->in_reset_reload
) {
5059 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5060 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5061 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5064 res
= sata_port
->res
;
5066 rc
= ipr_device_reset(ioa_cfg
, res
);
5067 *classes
= res
->ata_class
;
5070 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5076 * ipr_eh_dev_reset - Reset the device
5077 * @scsi_cmd: scsi command struct
5079 * This function issues a device reset to the affected device.
5080 * A LUN reset will be sent to the device first. If that does
5081 * not work, a target reset will be sent.
5086 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5088 struct ipr_cmnd
*ipr_cmd
;
5089 struct ipr_ioa_cfg
*ioa_cfg
;
5090 struct ipr_resource_entry
*res
;
5091 struct ata_port
*ap
;
5093 struct ipr_hrr_queue
*hrrq
;
5096 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5097 res
= scsi_cmd
->device
->hostdata
;
5103 * If we are currently going through reset/reload, return failed. This will force the
5104 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5107 if (ioa_cfg
->in_reset_reload
)
5109 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5112 for_each_hrrq(hrrq
, ioa_cfg
) {
5113 spin_lock(&hrrq
->_lock
);
5114 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5115 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5116 if (ipr_cmd
->scsi_cmd
)
5117 ipr_cmd
->done
= ipr_scsi_eh_done
;
5119 ipr_cmd
->done
= ipr_sata_eh_done
;
5121 !(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5122 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5123 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5127 spin_unlock(&hrrq
->_lock
);
5129 res
->resetting_device
= 1;
5130 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5132 if (ipr_is_gata(res
) && res
->sata_port
) {
5133 ap
= res
->sata_port
->ap
;
5134 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5135 ata_std_error_handler(ap
);
5136 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5138 for_each_hrrq(hrrq
, ioa_cfg
) {
5139 spin_lock(&hrrq
->_lock
);
5140 list_for_each_entry(ipr_cmd
,
5141 &hrrq
->hrrq_pending_q
, queue
) {
5142 if (ipr_cmd
->ioarcb
.res_handle
==
5148 spin_unlock(&hrrq
->_lock
);
5151 rc
= ipr_device_reset(ioa_cfg
, res
);
5152 res
->resetting_device
= 0;
5153 res
->reset_occurred
= 1;
5156 return rc
? FAILED
: SUCCESS
;
5159 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5162 struct ipr_ioa_cfg
*ioa_cfg
;
5164 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5166 spin_lock_irq(cmd
->device
->host
->host_lock
);
5167 rc
= __ipr_eh_dev_reset(cmd
);
5168 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5171 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5177 * ipr_bus_reset_done - Op done function for bus reset.
5178 * @ipr_cmd: ipr command struct
5180 * This function is the op done function for a bus reset
5185 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5187 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5188 struct ipr_resource_entry
*res
;
5191 if (!ioa_cfg
->sis64
)
5192 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5193 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5194 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5200 * If abort has not completed, indicate the reset has, else call the
5201 * abort's done function to wake the sleeping eh thread
5203 if (ipr_cmd
->sibling
->sibling
)
5204 ipr_cmd
->sibling
->sibling
= NULL
;
5206 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5208 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5213 * ipr_abort_timeout - An abort task has timed out
5214 * @ipr_cmd: ipr command struct
5216 * This function handles when an abort task times out. If this
5217 * happens we issue a bus reset since we have resources tied
5218 * up that must be freed before returning to the midlayer.
5223 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
5225 struct ipr_cmnd
*reset_cmd
;
5226 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5227 struct ipr_cmd_pkt
*cmd_pkt
;
5228 unsigned long lock_flags
= 0;
5231 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5232 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5233 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5237 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5238 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5239 ipr_cmd
->sibling
= reset_cmd
;
5240 reset_cmd
->sibling
= ipr_cmd
;
5241 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5242 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5243 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5244 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5245 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5247 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5248 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5253 * ipr_cancel_op - Cancel specified op
5254 * @scsi_cmd: scsi command struct
5256 * This function cancels specified op.
5261 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5263 struct ipr_cmnd
*ipr_cmd
;
5264 struct ipr_ioa_cfg
*ioa_cfg
;
5265 struct ipr_resource_entry
*res
;
5266 struct ipr_cmd_pkt
*cmd_pkt
;
5269 struct ipr_hrr_queue
*hrrq
;
5272 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5273 res
= scsi_cmd
->device
->hostdata
;
5275 /* If we are currently going through reset/reload, return failed.
5276 * This will force the mid-layer to call ipr_eh_host_reset,
5277 * which will then go to sleep and wait for the reset to complete
5279 if (ioa_cfg
->in_reset_reload
||
5280 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5286 * If we are aborting a timed out op, chances are that the timeout was caused
5287 * by a still not detected EEH error. In such cases, reading a register will
5288 * trigger the EEH recovery infrastructure.
5290 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5292 if (!ipr_is_gscsi(res
))
5295 for_each_hrrq(hrrq
, ioa_cfg
) {
5296 spin_lock(&hrrq
->_lock
);
5297 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5298 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
5299 ipr_cmd
->done
= ipr_scsi_eh_done
;
5304 spin_unlock(&hrrq
->_lock
);
5310 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5311 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5312 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5313 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5314 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5315 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5317 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5319 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5320 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5323 * If the abort task timed out and we sent a bus reset, we will get
5324 * one the following responses to the abort
5326 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5331 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5332 if (!ipr_is_naca_model(res
))
5333 res
->needs_sync_complete
= 1;
5336 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5340 * ipr_eh_abort - Abort a single op
5341 * @scsi_cmd: scsi command struct
5346 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5348 unsigned long flags
;
5350 struct ipr_ioa_cfg
*ioa_cfg
;
5354 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5356 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5357 rc
= ipr_cancel_op(scsi_cmd
);
5358 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5361 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5367 * ipr_handle_other_interrupt - Handle "other" interrupts
5368 * @ioa_cfg: ioa config struct
5369 * @int_reg: interrupt register
5372 * IRQ_NONE / IRQ_HANDLED
5374 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5377 irqreturn_t rc
= IRQ_HANDLED
;
5380 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5381 int_reg
&= ~int_mask_reg
;
5383 /* If an interrupt on the adapter did not occur, ignore it.
5384 * Or in the case of SIS 64, check for a stage change interrupt.
5386 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5387 if (ioa_cfg
->sis64
) {
5388 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5389 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5390 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5392 /* clear stage change */
5393 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5394 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5395 list_del(&ioa_cfg
->reset_cmd
->queue
);
5396 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5397 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5405 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5406 /* Mask the interrupt */
5407 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5409 /* Clear the interrupt */
5410 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.clr_interrupt_reg
);
5411 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5413 list_del(&ioa_cfg
->reset_cmd
->queue
);
5414 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5415 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5416 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5417 if (ioa_cfg
->clear_isr
) {
5418 if (ipr_debug
&& printk_ratelimit())
5419 dev_err(&ioa_cfg
->pdev
->dev
,
5420 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5421 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5422 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5426 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5427 ioa_cfg
->ioa_unit_checked
= 1;
5428 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5429 dev_err(&ioa_cfg
->pdev
->dev
,
5430 "No Host RRQ. 0x%08X\n", int_reg
);
5432 dev_err(&ioa_cfg
->pdev
->dev
,
5433 "Permanent IOA failure. 0x%08X\n", int_reg
);
5435 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5436 ioa_cfg
->sdt_state
= GET_DUMP
;
5438 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5439 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5446 * ipr_isr_eh - Interrupt service routine error handler
5447 * @ioa_cfg: ioa config struct
5448 * @msg: message to log
5453 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5455 ioa_cfg
->errors_logged
++;
5456 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5458 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5459 ioa_cfg
->sdt_state
= GET_DUMP
;
5461 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5464 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5465 struct list_head
*doneq
)
5469 struct ipr_cmnd
*ipr_cmd
;
5470 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5473 /* If interrupts are disabled, ignore the interrupt */
5474 if (!hrr_queue
->allow_interrupts
)
5477 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5478 hrr_queue
->toggle_bit
) {
5480 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5481 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5482 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5484 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5485 cmd_index
< hrr_queue
->min_cmd_id
)) {
5487 "Invalid response handle from IOA: ",
5492 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5493 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5495 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5497 list_move_tail(&ipr_cmd
->queue
, doneq
);
5499 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5500 hrr_queue
->hrrq_curr
++;
5502 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5503 hrr_queue
->toggle_bit
^= 1u;
5506 if (budget
> 0 && num_hrrq
>= budget
)
5513 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
)
5515 struct ipr_ioa_cfg
*ioa_cfg
;
5516 struct ipr_hrr_queue
*hrrq
;
5517 struct ipr_cmnd
*ipr_cmd
, *temp
;
5518 unsigned long hrrq_flags
;
5522 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5523 ioa_cfg
= hrrq
->ioa_cfg
;
5525 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5526 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5528 if (completed_ops
< budget
)
5529 blk_iopoll_complete(iop
);
5530 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5532 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5533 list_del(&ipr_cmd
->queue
);
5534 del_timer(&ipr_cmd
->timer
);
5535 ipr_cmd
->fast_done(ipr_cmd
);
5538 return completed_ops
;
5542 * ipr_isr - Interrupt service routine
5544 * @devp: pointer to ioa config struct
5547 * IRQ_NONE / IRQ_HANDLED
5549 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5551 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5552 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5553 unsigned long hrrq_flags
= 0;
5557 struct ipr_cmnd
*ipr_cmd
, *temp
;
5558 irqreturn_t rc
= IRQ_NONE
;
5561 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5562 /* If interrupts are disabled, ignore the interrupt */
5563 if (!hrrq
->allow_interrupts
) {
5564 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5569 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5572 if (!ioa_cfg
->clear_isr
)
5575 /* Clear the PCI interrupt */
5578 writel(IPR_PCII_HRRQ_UPDATED
,
5579 ioa_cfg
->regs
.clr_interrupt_reg32
);
5580 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5581 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5582 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5584 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5585 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5587 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5588 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5590 "Error clearing HRRQ: ", num_hrrq
);
5597 if (unlikely(rc
== IRQ_NONE
))
5598 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5600 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5601 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5602 list_del(&ipr_cmd
->queue
);
5603 del_timer(&ipr_cmd
->timer
);
5604 ipr_cmd
->fast_done(ipr_cmd
);
5610 * ipr_isr_mhrrq - Interrupt service routine
5612 * @devp: pointer to ioa config struct
5615 * IRQ_NONE / IRQ_HANDLED
5617 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5619 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5620 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5621 unsigned long hrrq_flags
= 0;
5622 struct ipr_cmnd
*ipr_cmd
, *temp
;
5623 irqreturn_t rc
= IRQ_NONE
;
5626 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5628 /* If interrupts are disabled, ignore the interrupt */
5629 if (!hrrq
->allow_interrupts
) {
5630 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5634 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5635 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5637 if (!blk_iopoll_sched_prep(&hrrq
->iopoll
))
5638 blk_iopoll_sched(&hrrq
->iopoll
);
5639 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5643 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5646 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5650 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5652 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5653 list_del(&ipr_cmd
->queue
);
5654 del_timer(&ipr_cmd
->timer
);
5655 ipr_cmd
->fast_done(ipr_cmd
);
5661 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5662 * @ioa_cfg: ioa config struct
5663 * @ipr_cmd: ipr command struct
5666 * 0 on success / -1 on failure
5668 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5669 struct ipr_cmnd
*ipr_cmd
)
5672 struct scatterlist
*sg
;
5674 u32 ioadl_flags
= 0;
5675 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5676 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5677 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5679 length
= scsi_bufflen(scsi_cmd
);
5683 nseg
= scsi_dma_map(scsi_cmd
);
5685 if (printk_ratelimit())
5686 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5690 ipr_cmd
->dma_use_sg
= nseg
;
5692 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5694 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5696 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5697 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5698 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5699 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5700 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5702 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5703 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5704 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5705 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5708 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5713 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5714 * @ioa_cfg: ioa config struct
5715 * @ipr_cmd: ipr command struct
5718 * 0 on success / -1 on failure
5720 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5721 struct ipr_cmnd
*ipr_cmd
)
5724 struct scatterlist
*sg
;
5726 u32 ioadl_flags
= 0;
5727 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5728 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5729 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5731 length
= scsi_bufflen(scsi_cmd
);
5735 nseg
= scsi_dma_map(scsi_cmd
);
5737 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5741 ipr_cmd
->dma_use_sg
= nseg
;
5743 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5744 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5745 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5746 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5748 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5749 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5750 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5751 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5752 ioarcb
->read_ioadl_len
=
5753 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5756 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5757 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5758 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5759 offsetof(struct ipr_ioarcb
, u
.add_data
));
5760 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5763 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5764 ioadl
[i
].flags_and_data_len
=
5765 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5766 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5769 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5774 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5775 * @scsi_cmd: scsi command struct
5780 static u8
ipr_get_task_attributes(struct scsi_cmnd
*scsi_cmd
)
5783 u8 rc
= IPR_FLAGS_LO_UNTAGGED_TASK
;
5785 if (scsi_populate_tag_msg(scsi_cmd
, tag
)) {
5787 case MSG_SIMPLE_TAG
:
5788 rc
= IPR_FLAGS_LO_SIMPLE_TASK
;
5791 rc
= IPR_FLAGS_LO_HEAD_OF_Q_TASK
;
5793 case MSG_ORDERED_TAG
:
5794 rc
= IPR_FLAGS_LO_ORDERED_TASK
;
5803 * ipr_erp_done - Process completion of ERP for a device
5804 * @ipr_cmd: ipr command struct
5806 * This function copies the sense buffer into the scsi_cmd
5807 * struct and pushes the scsi_done function.
5812 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
5814 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5815 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5816 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5818 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5819 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5820 scmd_printk(KERN_ERR
, scsi_cmd
,
5821 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
5823 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
5824 SCSI_SENSE_BUFFERSIZE
);
5828 if (!ipr_is_naca_model(res
))
5829 res
->needs_sync_complete
= 1;
5832 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5833 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5834 scsi_cmd
->scsi_done(scsi_cmd
);
5838 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5839 * @ipr_cmd: ipr command struct
5844 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
5846 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5847 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5848 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
5850 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
5851 ioarcb
->data_transfer_length
= 0;
5852 ioarcb
->read_data_transfer_length
= 0;
5853 ioarcb
->ioadl_len
= 0;
5854 ioarcb
->read_ioadl_len
= 0;
5855 ioasa
->hdr
.ioasc
= 0;
5856 ioasa
->hdr
.residual_data_len
= 0;
5858 if (ipr_cmd
->ioa_cfg
->sis64
)
5859 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
5860 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
5862 ioarcb
->write_ioadl_addr
=
5863 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
5864 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5869 * ipr_erp_request_sense - Send request sense to a device
5870 * @ipr_cmd: ipr command struct
5872 * This function sends a request sense to a device as a result
5873 * of a check condition.
5878 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
5880 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5881 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5883 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5884 ipr_erp_done(ipr_cmd
);
5888 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5890 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
5891 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
5892 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
5893 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
5894 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
5895 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
5897 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
5898 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
5900 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
5901 IPR_REQUEST_SENSE_TIMEOUT
* 2);
5905 * ipr_erp_cancel_all - Send cancel all to a device
5906 * @ipr_cmd: ipr command struct
5908 * This function sends a cancel all to a device to clear the
5909 * queue. If we are running TCQ on the device, QERR is set to 1,
5910 * which means all outstanding ops have been dropped on the floor.
5911 * Cancel all will return them to us.
5916 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
5918 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5919 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5920 struct ipr_cmd_pkt
*cmd_pkt
;
5924 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5926 if (!scsi_get_tag_type(scsi_cmd
->device
)) {
5927 ipr_erp_request_sense(ipr_cmd
);
5931 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5932 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5933 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5935 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
5936 IPR_CANCEL_ALL_TIMEOUT
);
5940 * ipr_dump_ioasa - Dump contents of IOASA
5941 * @ioa_cfg: ioa config struct
5942 * @ipr_cmd: ipr command struct
5943 * @res: resource entry struct
5945 * This function is invoked by the interrupt handler when ops
5946 * fail. It will log the IOASA if appropriate. Only called
5952 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
5953 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
5957 u32 ioasc
, fd_ioasc
;
5958 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5959 __be32
*ioasa_data
= (__be32
*)ioasa
;
5962 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
5963 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
5968 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
5971 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
5972 error_index
= ipr_get_error(fd_ioasc
);
5974 error_index
= ipr_get_error(ioasc
);
5976 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
5977 /* Don't log an error if the IOA already logged one */
5978 if (ioasa
->hdr
.ilid
!= 0)
5981 if (!ipr_is_gscsi(res
))
5984 if (ipr_error_table
[error_index
].log_ioasa
== 0)
5988 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
5990 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
5991 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
5992 data_len
= sizeof(struct ipr_ioasa64
);
5993 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
5994 data_len
= sizeof(struct ipr_ioasa
);
5996 ipr_err("IOASA Dump:\n");
5998 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
5999 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
6000 be32_to_cpu(ioasa_data
[i
]),
6001 be32_to_cpu(ioasa_data
[i
+1]),
6002 be32_to_cpu(ioasa_data
[i
+2]),
6003 be32_to_cpu(ioasa_data
[i
+3]));
6008 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6010 * @sense_buf: sense data buffer
6015 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
6018 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
6019 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
6020 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6021 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
6023 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
6025 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
6028 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
6030 if (ipr_is_vset_device(res
) &&
6031 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
6032 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
6033 sense_buf
[0] = 0x72;
6034 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
6035 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
6036 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
6040 sense_buf
[9] = 0x0A;
6041 sense_buf
[10] = 0x80;
6043 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6045 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6046 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6047 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6048 sense_buf
[15] = failing_lba
& 0x000000ff;
6050 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6052 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6053 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6054 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6055 sense_buf
[19] = failing_lba
& 0x000000ff;
6057 sense_buf
[0] = 0x70;
6058 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6059 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6060 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6062 /* Illegal request */
6063 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6064 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6065 sense_buf
[7] = 10; /* additional length */
6067 /* IOARCB was in error */
6068 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6069 sense_buf
[15] = 0xC0;
6070 else /* Parameter data was invalid */
6071 sense_buf
[15] = 0x80;
6074 ((IPR_FIELD_POINTER_MASK
&
6075 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6077 (IPR_FIELD_POINTER_MASK
&
6078 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6080 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6081 if (ipr_is_vset_device(res
))
6082 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6084 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6086 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6087 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6088 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6089 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6090 sense_buf
[6] = failing_lba
& 0x000000ff;
6093 sense_buf
[7] = 6; /* additional length */
6099 * ipr_get_autosense - Copy autosense data to sense buffer
6100 * @ipr_cmd: ipr command struct
6102 * This function copies the autosense buffer to the buffer
6103 * in the scsi_cmd, if there is autosense available.
6106 * 1 if autosense was available / 0 if not
6108 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6110 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6111 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6113 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6116 if (ipr_cmd
->ioa_cfg
->sis64
)
6117 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6118 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6119 SCSI_SENSE_BUFFERSIZE
));
6121 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6122 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6123 SCSI_SENSE_BUFFERSIZE
));
6128 * ipr_erp_start - Process an error response for a SCSI op
6129 * @ioa_cfg: ioa config struct
6130 * @ipr_cmd: ipr command struct
6132 * This function determines whether or not to initiate ERP
6133 * on the affected device.
6138 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6139 struct ipr_cmnd
*ipr_cmd
)
6141 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6142 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6143 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6144 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6147 ipr_scsi_eh_done(ipr_cmd
);
6151 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6152 ipr_gen_sense(ipr_cmd
);
6154 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6156 switch (masked_ioasc
) {
6157 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6158 if (ipr_is_naca_model(res
))
6159 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6161 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6163 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6164 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6165 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6167 case IPR_IOASC_HW_SEL_TIMEOUT
:
6168 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6169 if (!ipr_is_naca_model(res
))
6170 res
->needs_sync_complete
= 1;
6172 case IPR_IOASC_SYNC_REQUIRED
:
6174 res
->needs_sync_complete
= 1;
6175 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6177 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6178 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6179 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6181 case IPR_IOASC_BUS_WAS_RESET
:
6182 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6184 * Report the bus reset and ask for a retry. The device
6185 * will give CC/UA the next command.
6187 if (!res
->resetting_device
)
6188 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6189 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6190 if (!ipr_is_naca_model(res
))
6191 res
->needs_sync_complete
= 1;
6193 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6194 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6195 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6196 if (!ipr_get_autosense(ipr_cmd
)) {
6197 if (!ipr_is_naca_model(res
)) {
6198 ipr_erp_cancel_all(ipr_cmd
);
6203 if (!ipr_is_naca_model(res
))
6204 res
->needs_sync_complete
= 1;
6206 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6209 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6210 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6211 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6212 res
->needs_sync_complete
= 1;
6216 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6217 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6218 scsi_cmd
->scsi_done(scsi_cmd
);
6222 * ipr_scsi_done - mid-layer done function
6223 * @ipr_cmd: ipr command struct
6225 * This function is invoked by the interrupt handler for
6226 * ops generated by the SCSI mid-layer
6231 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6233 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6234 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6235 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6236 unsigned long lock_flags
;
6238 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6240 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6241 scsi_dma_unmap(scsi_cmd
);
6243 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6244 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6245 scsi_cmd
->scsi_done(scsi_cmd
);
6246 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6248 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6249 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6250 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6251 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6252 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6257 * ipr_queuecommand - Queue a mid-layer request
6258 * @shost: scsi host struct
6259 * @scsi_cmd: scsi command struct
6261 * This function queues a request generated by the mid-layer.
6265 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6266 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6268 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6269 struct scsi_cmnd
*scsi_cmd
)
6271 struct ipr_ioa_cfg
*ioa_cfg
;
6272 struct ipr_resource_entry
*res
;
6273 struct ipr_ioarcb
*ioarcb
;
6274 struct ipr_cmnd
*ipr_cmd
;
6275 unsigned long hrrq_flags
, lock_flags
;
6277 struct ipr_hrr_queue
*hrrq
;
6280 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6282 scsi_cmd
->result
= (DID_OK
<< 16);
6283 res
= scsi_cmd
->device
->hostdata
;
6285 if (ipr_is_gata(res
) && res
->sata_port
) {
6286 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6287 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6288 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6292 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6293 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6295 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6297 * We are currently blocking all devices due to a host reset
6298 * We have told the host to stop giving us new requests, but
6299 * ERP ops don't count. FIXME
6301 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6302 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6303 return SCSI_MLQUEUE_HOST_BUSY
;
6307 * FIXME - Create scsi_set_host_offline interface
6308 * and the ioa_is_dead check can be removed
6310 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6311 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6315 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6316 if (ipr_cmd
== NULL
) {
6317 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6318 return SCSI_MLQUEUE_HOST_BUSY
;
6320 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6322 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6323 ioarcb
= &ipr_cmd
->ioarcb
;
6325 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6326 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6327 ipr_cmd
->done
= ipr_scsi_eh_done
;
6329 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6330 if (scsi_cmd
->underflow
== 0)
6331 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6333 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6334 if (ipr_is_gscsi(res
) && res
->reset_occurred
) {
6335 res
->reset_occurred
= 0;
6336 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6338 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6339 ioarcb
->cmd_pkt
.flags_lo
|= ipr_get_task_attributes(scsi_cmd
);
6342 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6343 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6344 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6348 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6350 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6352 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6353 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6354 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6355 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6357 scsi_dma_unmap(scsi_cmd
);
6358 return SCSI_MLQUEUE_HOST_BUSY
;
6361 if (unlikely(hrrq
->ioa_is_dead
)) {
6362 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6363 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6364 scsi_dma_unmap(scsi_cmd
);
6368 ioarcb
->res_handle
= res
->res_handle
;
6369 if (res
->needs_sync_complete
) {
6370 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6371 res
->needs_sync_complete
= 0;
6373 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6374 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6375 ipr_send_command(ipr_cmd
);
6376 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6380 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6381 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6382 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6383 scsi_cmd
->scsi_done(scsi_cmd
);
6384 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6389 * ipr_ioctl - IOCTL handler
6390 * @sdev: scsi device struct
6395 * 0 on success / other on failure
6397 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6399 struct ipr_resource_entry
*res
;
6401 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6402 if (res
&& ipr_is_gata(res
)) {
6403 if (cmd
== HDIO_GET_IDENTITY
)
6405 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6412 * ipr_info - Get information about the card/driver
6413 * @scsi_host: scsi host struct
6416 * pointer to buffer with description string
6418 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6420 static char buffer
[512];
6421 struct ipr_ioa_cfg
*ioa_cfg
;
6422 unsigned long lock_flags
= 0;
6424 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6426 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6427 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6428 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6433 static struct scsi_host_template driver_template
= {
6434 .module
= THIS_MODULE
,
6436 .info
= ipr_ioa_info
,
6438 .queuecommand
= ipr_queuecommand
,
6439 .eh_abort_handler
= ipr_eh_abort
,
6440 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6441 .eh_host_reset_handler
= ipr_eh_host_reset
,
6442 .slave_alloc
= ipr_slave_alloc
,
6443 .slave_configure
= ipr_slave_configure
,
6444 .slave_destroy
= ipr_slave_destroy
,
6445 .target_alloc
= ipr_target_alloc
,
6446 .target_destroy
= ipr_target_destroy
,
6447 .change_queue_depth
= ipr_change_queue_depth
,
6448 .change_queue_type
= ipr_change_queue_type
,
6449 .bios_param
= ipr_biosparam
,
6450 .can_queue
= IPR_MAX_COMMANDS
,
6452 .sg_tablesize
= IPR_MAX_SGLIST
,
6453 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6454 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6455 .use_clustering
= ENABLE_CLUSTERING
,
6456 .shost_attrs
= ipr_ioa_attrs
,
6457 .sdev_attrs
= ipr_dev_attrs
,
6458 .proc_name
= IPR_NAME
,
6463 * ipr_ata_phy_reset - libata phy_reset handler
6464 * @ap: ata port to reset
6467 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6469 unsigned long flags
;
6470 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6471 struct ipr_resource_entry
*res
= sata_port
->res
;
6472 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6476 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6477 while (ioa_cfg
->in_reset_reload
) {
6478 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6479 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6480 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6483 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6486 rc
= ipr_device_reset(ioa_cfg
, res
);
6489 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6493 ap
->link
.device
[0].class = res
->ata_class
;
6494 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6495 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6498 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6503 * ipr_ata_post_internal - Cleanup after an internal command
6504 * @qc: ATA queued command
6509 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6511 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6512 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6513 struct ipr_cmnd
*ipr_cmd
;
6514 struct ipr_hrr_queue
*hrrq
;
6515 unsigned long flags
;
6517 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6518 while (ioa_cfg
->in_reset_reload
) {
6519 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6520 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6521 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6524 for_each_hrrq(hrrq
, ioa_cfg
) {
6525 spin_lock(&hrrq
->_lock
);
6526 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6527 if (ipr_cmd
->qc
== qc
) {
6528 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6532 spin_unlock(&hrrq
->_lock
);
6534 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6538 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6539 * @regs: destination
6540 * @tf: source ATA taskfile
6545 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6546 struct ata_taskfile
*tf
)
6548 regs
->feature
= tf
->feature
;
6549 regs
->nsect
= tf
->nsect
;
6550 regs
->lbal
= tf
->lbal
;
6551 regs
->lbam
= tf
->lbam
;
6552 regs
->lbah
= tf
->lbah
;
6553 regs
->device
= tf
->device
;
6554 regs
->command
= tf
->command
;
6555 regs
->hob_feature
= tf
->hob_feature
;
6556 regs
->hob_nsect
= tf
->hob_nsect
;
6557 regs
->hob_lbal
= tf
->hob_lbal
;
6558 regs
->hob_lbam
= tf
->hob_lbam
;
6559 regs
->hob_lbah
= tf
->hob_lbah
;
6560 regs
->ctl
= tf
->ctl
;
6564 * ipr_sata_done - done function for SATA commands
6565 * @ipr_cmd: ipr command struct
6567 * This function is invoked by the interrupt handler for
6568 * ops generated by the SCSI mid-layer to SATA devices
6573 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6575 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6576 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6577 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6578 struct ipr_resource_entry
*res
= sata_port
->res
;
6579 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6581 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6582 if (ipr_cmd
->ioa_cfg
->sis64
)
6583 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6584 sizeof(struct ipr_ioasa_gata
));
6586 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6587 sizeof(struct ipr_ioasa_gata
));
6588 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6590 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6591 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6593 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6594 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6596 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6597 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6598 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6599 ata_qc_complete(qc
);
6603 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6604 * @ipr_cmd: ipr command struct
6605 * @qc: ATA queued command
6608 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6609 struct ata_queued_cmd
*qc
)
6611 u32 ioadl_flags
= 0;
6612 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6613 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6614 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6615 int len
= qc
->nbytes
;
6616 struct scatterlist
*sg
;
6618 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6623 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6624 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6625 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6626 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6627 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6629 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6631 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6632 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6633 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6635 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6636 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6637 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6638 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6640 last_ioadl64
= ioadl64
;
6644 if (likely(last_ioadl64
))
6645 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6649 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6650 * @ipr_cmd: ipr command struct
6651 * @qc: ATA queued command
6654 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6655 struct ata_queued_cmd
*qc
)
6657 u32 ioadl_flags
= 0;
6658 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6659 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6660 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6661 int len
= qc
->nbytes
;
6662 struct scatterlist
*sg
;
6668 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6669 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6670 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6671 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6673 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6674 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6675 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6676 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6677 ioarcb
->read_ioadl_len
=
6678 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6681 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6682 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6683 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6689 if (likely(last_ioadl
))
6690 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6694 * ipr_qc_defer - Get a free ipr_cmd
6695 * @qc: queued command
6700 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6702 struct ata_port
*ap
= qc
->ap
;
6703 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6704 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6705 struct ipr_cmnd
*ipr_cmd
;
6706 struct ipr_hrr_queue
*hrrq
;
6709 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6710 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6712 qc
->lldd_task
= NULL
;
6713 spin_lock(&hrrq
->_lock
);
6714 if (unlikely(hrrq
->ioa_is_dead
)) {
6715 spin_unlock(&hrrq
->_lock
);
6719 if (unlikely(!hrrq
->allow_cmds
)) {
6720 spin_unlock(&hrrq
->_lock
);
6721 return ATA_DEFER_LINK
;
6724 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6725 if (ipr_cmd
== NULL
) {
6726 spin_unlock(&hrrq
->_lock
);
6727 return ATA_DEFER_LINK
;
6730 qc
->lldd_task
= ipr_cmd
;
6731 spin_unlock(&hrrq
->_lock
);
6736 * ipr_qc_issue - Issue a SATA qc to a device
6737 * @qc: queued command
6742 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
6744 struct ata_port
*ap
= qc
->ap
;
6745 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6746 struct ipr_resource_entry
*res
= sata_port
->res
;
6747 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6748 struct ipr_cmnd
*ipr_cmd
;
6749 struct ipr_ioarcb
*ioarcb
;
6750 struct ipr_ioarcb_ata_regs
*regs
;
6752 if (qc
->lldd_task
== NULL
)
6755 ipr_cmd
= qc
->lldd_task
;
6756 if (ipr_cmd
== NULL
)
6757 return AC_ERR_SYSTEM
;
6759 qc
->lldd_task
= NULL
;
6760 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6761 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
6762 ipr_cmd
->hrrq
->ioa_is_dead
)) {
6763 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6764 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6765 return AC_ERR_SYSTEM
;
6768 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
6769 ioarcb
= &ipr_cmd
->ioarcb
;
6771 if (ioa_cfg
->sis64
) {
6772 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
6773 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
6775 regs
= &ioarcb
->u
.add_data
.u
.regs
;
6777 memset(regs
, 0, sizeof(*regs
));
6778 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
6780 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
6782 ipr_cmd
->done
= ipr_sata_done
;
6783 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
6784 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
6785 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6786 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6787 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
6790 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
6792 ipr_build_ata_ioadl(ipr_cmd
, qc
);
6794 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
6795 ipr_copy_sata_tf(regs
, &qc
->tf
);
6796 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
6797 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6799 switch (qc
->tf
.protocol
) {
6800 case ATA_PROT_NODATA
:
6805 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6808 case ATAPI_PROT_PIO
:
6809 case ATAPI_PROT_NODATA
:
6810 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6813 case ATAPI_PROT_DMA
:
6814 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6815 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6820 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6821 return AC_ERR_INVALID
;
6824 ipr_send_command(ipr_cmd
);
6825 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6831 * ipr_qc_fill_rtf - Read result TF
6832 * @qc: ATA queued command
6837 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
6839 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6840 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
6841 struct ata_taskfile
*tf
= &qc
->result_tf
;
6843 tf
->feature
= g
->error
;
6844 tf
->nsect
= g
->nsect
;
6848 tf
->device
= g
->device
;
6849 tf
->command
= g
->status
;
6850 tf
->hob_nsect
= g
->hob_nsect
;
6851 tf
->hob_lbal
= g
->hob_lbal
;
6852 tf
->hob_lbam
= g
->hob_lbam
;
6853 tf
->hob_lbah
= g
->hob_lbah
;
6858 static struct ata_port_operations ipr_sata_ops
= {
6859 .phy_reset
= ipr_ata_phy_reset
,
6860 .hardreset
= ipr_sata_reset
,
6861 .post_internal_cmd
= ipr_ata_post_internal
,
6862 .qc_prep
= ata_noop_qc_prep
,
6863 .qc_defer
= ipr_qc_defer
,
6864 .qc_issue
= ipr_qc_issue
,
6865 .qc_fill_rtf
= ipr_qc_fill_rtf
,
6866 .port_start
= ata_sas_port_start
,
6867 .port_stop
= ata_sas_port_stop
6870 static struct ata_port_info sata_port_info
= {
6871 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
,
6872 .pio_mask
= ATA_PIO4_ONLY
,
6873 .mwdma_mask
= ATA_MWDMA2
,
6874 .udma_mask
= ATA_UDMA6
,
6875 .port_ops
= &ipr_sata_ops
6878 #ifdef CONFIG_PPC_PSERIES
6879 static const u16 ipr_blocked_processors
[] = {
6891 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6892 * @ioa_cfg: ioa cfg struct
6894 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6895 * certain pSeries hardware. This function determines if the given
6896 * adapter is in one of these confgurations or not.
6899 * 1 if adapter is not supported / 0 if adapter is supported
6901 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
6905 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
6906 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
6907 if (pvr_version_is(ipr_blocked_processors
[i
]))
6914 #define ipr_invalid_adapter(ioa_cfg) 0
6918 * ipr_ioa_bringdown_done - IOA bring down completion.
6919 * @ipr_cmd: ipr command struct
6921 * This function processes the completion of an adapter bring down.
6922 * It wakes any reset sleepers.
6927 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
6929 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6933 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
6935 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
6936 scsi_unblock_requests(ioa_cfg
->host
);
6937 spin_lock_irq(ioa_cfg
->host
->host_lock
);
6940 ioa_cfg
->in_reset_reload
= 0;
6941 ioa_cfg
->reset_retries
= 0;
6942 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
6943 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
6944 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
6945 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
6949 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6950 wake_up_all(&ioa_cfg
->reset_wait_q
);
6953 return IPR_RC_JOB_RETURN
;
6957 * ipr_ioa_reset_done - IOA reset completion.
6958 * @ipr_cmd: ipr command struct
6960 * This function processes the completion of an adapter reset.
6961 * It schedules any necessary mid-layer add/removes and
6962 * wakes any reset sleepers.
6967 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
6969 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6970 struct ipr_resource_entry
*res
;
6971 struct ipr_hostrcb
*hostrcb
, *temp
;
6975 ioa_cfg
->in_reset_reload
= 0;
6976 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
6977 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
6978 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
6979 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
6982 ioa_cfg
->reset_cmd
= NULL
;
6983 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
6985 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
6986 if (ioa_cfg
->allow_ml_add_del
&& (res
->add_to_ml
|| res
->del_from_ml
)) {
6991 schedule_work(&ioa_cfg
->work_q
);
6993 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
6994 list_del(&hostrcb
->queue
);
6995 if (i
++ < IPR_NUM_LOG_HCAMS
)
6996 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
6998 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
7001 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
7002 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
7004 ioa_cfg
->reset_retries
= 0;
7005 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7006 wake_up_all(&ioa_cfg
->reset_wait_q
);
7008 spin_unlock(ioa_cfg
->host
->host_lock
);
7009 scsi_unblock_requests(ioa_cfg
->host
);
7010 spin_lock(ioa_cfg
->host
->host_lock
);
7012 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
7013 scsi_block_requests(ioa_cfg
->host
);
7016 return IPR_RC_JOB_RETURN
;
7020 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7021 * @supported_dev: supported device struct
7022 * @vpids: vendor product id struct
7027 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
7028 struct ipr_std_inq_vpids
*vpids
)
7030 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
7031 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
7032 supported_dev
->num_records
= 1;
7033 supported_dev
->data_length
=
7034 cpu_to_be16(sizeof(struct ipr_supported_device
));
7035 supported_dev
->reserved
= 0;
7039 * ipr_set_supported_devs - Send Set Supported Devices for a device
7040 * @ipr_cmd: ipr command struct
7042 * This function sends a Set Supported Devices to the adapter
7045 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7047 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7049 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7050 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7051 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7052 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7054 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7056 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7057 if (!ipr_is_scsi_disk(res
))
7060 ipr_cmd
->u
.res
= res
;
7061 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7063 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7064 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7065 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7067 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7068 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7069 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7070 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7072 ipr_init_ioadl(ipr_cmd
,
7073 ioa_cfg
->vpd_cbs_dma
+
7074 offsetof(struct ipr_misc_cbs
, supp_dev
),
7075 sizeof(struct ipr_supported_device
),
7076 IPR_IOADL_FLAGS_WRITE_LAST
);
7078 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7079 IPR_SET_SUP_DEVICE_TIMEOUT
);
7081 if (!ioa_cfg
->sis64
)
7082 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7084 return IPR_RC_JOB_RETURN
;
7088 return IPR_RC_JOB_CONTINUE
;
7092 * ipr_get_mode_page - Locate specified mode page
7093 * @mode_pages: mode page buffer
7094 * @page_code: page code to find
7095 * @len: minimum required length for mode page
7098 * pointer to mode page / NULL on failure
7100 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7101 u32 page_code
, u32 len
)
7103 struct ipr_mode_page_hdr
*mode_hdr
;
7107 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7110 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7111 mode_hdr
= (struct ipr_mode_page_hdr
*)
7112 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7115 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7116 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7120 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7121 mode_hdr
->page_length
);
7122 length
-= page_length
;
7123 mode_hdr
= (struct ipr_mode_page_hdr
*)
7124 ((unsigned long)mode_hdr
+ page_length
);
7131 * ipr_check_term_power - Check for term power errors
7132 * @ioa_cfg: ioa config struct
7133 * @mode_pages: IOAFP mode pages buffer
7135 * Check the IOAFP's mode page 28 for term power errors
7140 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7141 struct ipr_mode_pages
*mode_pages
)
7145 struct ipr_dev_bus_entry
*bus
;
7146 struct ipr_mode_page28
*mode_page
;
7148 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7149 sizeof(struct ipr_mode_page28
));
7151 entry_length
= mode_page
->entry_length
;
7153 bus
= mode_page
->bus
;
7155 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7156 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7157 dev_err(&ioa_cfg
->pdev
->dev
,
7158 "Term power is absent on scsi bus %d\n",
7162 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7167 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7168 * @ioa_cfg: ioa config struct
7170 * Looks through the config table checking for SES devices. If
7171 * the SES device is in the SES table indicating a maximum SCSI
7172 * bus speed, the speed is limited for the bus.
7177 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7182 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7183 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7184 ioa_cfg
->bus_attr
[i
].bus_width
);
7186 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7187 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7192 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7193 * @ioa_cfg: ioa config struct
7194 * @mode_pages: mode page 28 buffer
7196 * Updates mode page 28 based on driver configuration
7201 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7202 struct ipr_mode_pages
*mode_pages
)
7204 int i
, entry_length
;
7205 struct ipr_dev_bus_entry
*bus
;
7206 struct ipr_bus_attributes
*bus_attr
;
7207 struct ipr_mode_page28
*mode_page
;
7209 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7210 sizeof(struct ipr_mode_page28
));
7212 entry_length
= mode_page
->entry_length
;
7214 /* Loop for each device bus entry */
7215 for (i
= 0, bus
= mode_page
->bus
;
7216 i
< mode_page
->num_entries
;
7217 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7218 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7219 dev_err(&ioa_cfg
->pdev
->dev
,
7220 "Invalid resource address reported: 0x%08X\n",
7221 IPR_GET_PHYS_LOC(bus
->res_addr
));
7225 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7226 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7227 bus
->bus_width
= bus_attr
->bus_width
;
7228 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7229 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7230 if (bus_attr
->qas_enabled
)
7231 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7233 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7238 * ipr_build_mode_select - Build a mode select command
7239 * @ipr_cmd: ipr command struct
7240 * @res_handle: resource handle to send command to
7241 * @parm: Byte 2 of Mode Sense command
7242 * @dma_addr: DMA buffer address
7243 * @xfer_len: data transfer length
7248 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7249 __be32 res_handle
, u8 parm
,
7250 dma_addr_t dma_addr
, u8 xfer_len
)
7252 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7254 ioarcb
->res_handle
= res_handle
;
7255 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7256 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7257 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7258 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7259 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7261 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7265 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7266 * @ipr_cmd: ipr command struct
7268 * This function sets up the SCSI bus attributes and sends
7269 * a Mode Select for Page 28 to activate them.
7274 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7276 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7277 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7281 ipr_scsi_bus_speed_limit(ioa_cfg
);
7282 ipr_check_term_power(ioa_cfg
, mode_pages
);
7283 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7284 length
= mode_pages
->hdr
.length
+ 1;
7285 mode_pages
->hdr
.length
= 0;
7287 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7288 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7291 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7292 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7293 struct ipr_resource_entry
, queue
);
7294 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7297 return IPR_RC_JOB_RETURN
;
7301 * ipr_build_mode_sense - Builds a mode sense command
7302 * @ipr_cmd: ipr command struct
7303 * @res: resource entry struct
7304 * @parm: Byte 2 of mode sense command
7305 * @dma_addr: DMA address of mode sense buffer
7306 * @xfer_len: Size of DMA buffer
7311 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7313 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7315 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7317 ioarcb
->res_handle
= res_handle
;
7318 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7319 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7320 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7321 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7323 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7327 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7328 * @ipr_cmd: ipr command struct
7330 * This function handles the failure of an IOA bringup command.
7335 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7337 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7338 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7340 dev_err(&ioa_cfg
->pdev
->dev
,
7341 "0x%02X failed with IOASC: 0x%08X\n",
7342 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7344 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7345 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7346 return IPR_RC_JOB_RETURN
;
7350 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7351 * @ipr_cmd: ipr command struct
7353 * This function handles the failure of a Mode Sense to the IOAFP.
7354 * Some adapters do not handle all mode pages.
7357 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7359 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7361 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7362 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7364 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7365 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7366 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7367 struct ipr_resource_entry
, queue
);
7368 return IPR_RC_JOB_CONTINUE
;
7371 return ipr_reset_cmd_failed(ipr_cmd
);
7375 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7376 * @ipr_cmd: ipr command struct
7378 * This function send a Page 28 mode sense to the IOA to
7379 * retrieve SCSI bus attributes.
7384 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7386 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7389 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7390 0x28, ioa_cfg
->vpd_cbs_dma
+
7391 offsetof(struct ipr_misc_cbs
, mode_pages
),
7392 sizeof(struct ipr_mode_pages
));
7394 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7395 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7397 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7400 return IPR_RC_JOB_RETURN
;
7404 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7405 * @ipr_cmd: ipr command struct
7407 * This function enables dual IOA RAID support if possible.
7412 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7414 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7415 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7416 struct ipr_mode_page24
*mode_page
;
7420 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7421 sizeof(struct ipr_mode_page24
));
7424 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7426 length
= mode_pages
->hdr
.length
+ 1;
7427 mode_pages
->hdr
.length
= 0;
7429 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7430 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7433 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7434 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7437 return IPR_RC_JOB_RETURN
;
7441 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7442 * @ipr_cmd: ipr command struct
7444 * This function handles the failure of a Mode Sense to the IOAFP.
7445 * Some adapters do not handle all mode pages.
7448 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7450 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7452 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7454 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7455 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7456 return IPR_RC_JOB_CONTINUE
;
7459 return ipr_reset_cmd_failed(ipr_cmd
);
7463 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7464 * @ipr_cmd: ipr command struct
7466 * This function send a mode sense to the IOA to retrieve
7467 * the IOA Advanced Function Control mode page.
7472 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7474 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7477 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7478 0x24, ioa_cfg
->vpd_cbs_dma
+
7479 offsetof(struct ipr_misc_cbs
, mode_pages
),
7480 sizeof(struct ipr_mode_pages
));
7482 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7483 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7485 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7488 return IPR_RC_JOB_RETURN
;
7492 * ipr_init_res_table - Initialize the resource table
7493 * @ipr_cmd: ipr command struct
7495 * This function looks through the existing resource table, comparing
7496 * it with the config table. This function will take care of old/new
7497 * devices and schedule adding/removing them from the mid-layer
7501 * IPR_RC_JOB_CONTINUE
7503 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7505 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7506 struct ipr_resource_entry
*res
, *temp
;
7507 struct ipr_config_table_entry_wrapper cfgtew
;
7508 int entries
, found
, flag
, i
;
7513 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7515 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7517 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7518 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7520 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7521 list_move_tail(&res
->queue
, &old_res
);
7524 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7526 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7528 for (i
= 0; i
< entries
; i
++) {
7530 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7532 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7535 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7536 if (ipr_is_same_device(res
, &cfgtew
)) {
7537 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7544 if (list_empty(&ioa_cfg
->free_res_q
)) {
7545 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7550 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7551 struct ipr_resource_entry
, queue
);
7552 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7553 ipr_init_res_entry(res
, &cfgtew
);
7555 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7556 res
->sdev
->allow_restart
= 1;
7559 ipr_update_res_entry(res
, &cfgtew
);
7562 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7564 res
->del_from_ml
= 1;
7565 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7566 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7570 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7571 ipr_clear_res_target(res
);
7572 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7575 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7576 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7578 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7581 return IPR_RC_JOB_CONTINUE
;
7585 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7586 * @ipr_cmd: ipr command struct
7588 * This function sends a Query IOA Configuration command
7589 * to the adapter to retrieve the IOA configuration table.
7594 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7596 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7597 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7598 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7599 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7602 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7603 ioa_cfg
->dual_raid
= 1;
7604 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7605 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7606 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7607 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7608 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7610 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7611 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7612 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7613 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7615 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7616 IPR_IOADL_FLAGS_READ_LAST
);
7618 ipr_cmd
->job_step
= ipr_init_res_table
;
7620 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7623 return IPR_RC_JOB_RETURN
;
7627 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7628 * @ipr_cmd: ipr command struct
7630 * This utility function sends an inquiry to the adapter.
7635 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7636 dma_addr_t dma_addr
, u8 xfer_len
)
7638 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7641 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7642 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7644 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7645 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7646 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7647 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7649 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7651 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7656 * ipr_inquiry_page_supported - Is the given inquiry page supported
7657 * @page0: inquiry page 0 buffer
7660 * This function determines if the specified inquiry page is supported.
7663 * 1 if page is supported / 0 if not
7665 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7669 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7670 if (page0
->page
[i
] == page
)
7677 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7678 * @ipr_cmd: ipr command struct
7680 * This function sends a Page 0xD0 inquiry to the adapter
7681 * to retrieve adapter capabilities.
7684 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7686 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
7688 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7689 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
7690 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7693 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7694 memset(cap
, 0, sizeof(*cap
));
7696 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
7697 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
7698 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
7699 sizeof(struct ipr_inquiry_cap
));
7700 return IPR_RC_JOB_RETURN
;
7704 return IPR_RC_JOB_CONTINUE
;
7708 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7709 * @ipr_cmd: ipr command struct
7711 * This function sends a Page 3 inquiry to the adapter
7712 * to retrieve software VPD information.
7715 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7717 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
7719 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7723 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
7725 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
7726 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
7727 sizeof(struct ipr_inquiry_page3
));
7730 return IPR_RC_JOB_RETURN
;
7734 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7735 * @ipr_cmd: ipr command struct
7737 * This function sends a Page 0 inquiry to the adapter
7738 * to retrieve supported inquiry pages.
7741 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7743 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
7745 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7750 /* Grab the type out of the VPD and store it away */
7751 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
7753 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
7755 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
7757 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
7758 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
7759 sizeof(struct ipr_inquiry_page0
));
7762 return IPR_RC_JOB_RETURN
;
7766 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7767 * @ipr_cmd: ipr command struct
7769 * This function sends a standard inquiry to the adapter.
7774 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
7776 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7779 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
7781 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
7782 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
7783 sizeof(struct ipr_ioa_vpd
));
7786 return IPR_RC_JOB_RETURN
;
7790 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7791 * @ipr_cmd: ipr command struct
7793 * This function send an Identify Host Request Response Queue
7794 * command to establish the HRRQ with the adapter.
7799 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
7801 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7802 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7803 struct ipr_hrr_queue
*hrrq
;
7806 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
7807 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
7809 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
7810 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
7812 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
7813 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7815 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7817 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
7819 if (ioa_cfg
->nvectors
== 1)
7820 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
7822 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
7824 ioarcb
->cmd_pkt
.cdb
[2] =
7825 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
7826 ioarcb
->cmd_pkt
.cdb
[3] =
7827 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
7828 ioarcb
->cmd_pkt
.cdb
[4] =
7829 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
7830 ioarcb
->cmd_pkt
.cdb
[5] =
7831 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
7832 ioarcb
->cmd_pkt
.cdb
[7] =
7833 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
7834 ioarcb
->cmd_pkt
.cdb
[8] =
7835 (sizeof(u32
) * hrrq
->size
) & 0xff;
7837 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7838 ioarcb
->cmd_pkt
.cdb
[9] =
7839 ioa_cfg
->identify_hrrq_index
;
7841 if (ioa_cfg
->sis64
) {
7842 ioarcb
->cmd_pkt
.cdb
[10] =
7843 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
7844 ioarcb
->cmd_pkt
.cdb
[11] =
7845 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
7846 ioarcb
->cmd_pkt
.cdb
[12] =
7847 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
7848 ioarcb
->cmd_pkt
.cdb
[13] =
7849 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
7852 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7853 ioarcb
->cmd_pkt
.cdb
[14] =
7854 ioa_cfg
->identify_hrrq_index
;
7856 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7857 IPR_INTERNAL_TIMEOUT
);
7859 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
7860 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7863 return IPR_RC_JOB_RETURN
;
7867 return IPR_RC_JOB_CONTINUE
;
7871 * ipr_reset_timer_done - Adapter reset timer function
7872 * @ipr_cmd: ipr command struct
7874 * Description: This function is used in adapter reset processing
7875 * for timing events. If the reset_cmd pointer in the IOA
7876 * config struct is not this adapter's we are doing nested
7877 * resets and fail_all_ops will take care of freeing the
7883 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
7885 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7886 unsigned long lock_flags
= 0;
7888 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
7890 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
7891 list_del(&ipr_cmd
->queue
);
7892 ipr_cmd
->done(ipr_cmd
);
7895 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
7899 * ipr_reset_start_timer - Start a timer for adapter reset job
7900 * @ipr_cmd: ipr command struct
7901 * @timeout: timeout value
7903 * Description: This function is used in adapter reset processing
7904 * for timing events. If the reset_cmd pointer in the IOA
7905 * config struct is not this adapter's we are doing nested
7906 * resets and fail_all_ops will take care of freeing the
7912 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
7913 unsigned long timeout
)
7917 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7918 ipr_cmd
->done
= ipr_reset_ioa_job
;
7920 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7921 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
7922 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
7923 add_timer(&ipr_cmd
->timer
);
7927 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7928 * @ioa_cfg: ioa cfg struct
7933 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
7935 struct ipr_hrr_queue
*hrrq
;
7937 for_each_hrrq(hrrq
, ioa_cfg
) {
7938 spin_lock(&hrrq
->_lock
);
7939 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
7941 /* Initialize Host RRQ pointers */
7942 hrrq
->hrrq_start
= hrrq
->host_rrq
;
7943 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
7944 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
7945 hrrq
->toggle_bit
= 1;
7946 spin_unlock(&hrrq
->_lock
);
7950 ioa_cfg
->identify_hrrq_index
= 0;
7951 if (ioa_cfg
->hrrq_num
== 1)
7952 atomic_set(&ioa_cfg
->hrrq_index
, 0);
7954 atomic_set(&ioa_cfg
->hrrq_index
, 1);
7956 /* Zero out config table */
7957 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
7961 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7962 * @ipr_cmd: ipr command struct
7965 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7967 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
7969 unsigned long stage
, stage_time
;
7971 volatile u32 int_reg
;
7972 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7975 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
7976 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
7977 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
7979 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
7981 /* sanity check the stage_time value */
7982 if (stage_time
== 0)
7983 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
7984 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
7985 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
7986 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
7987 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
7989 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
7990 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7991 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7992 stage_time
= ioa_cfg
->transop_timeout
;
7993 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7994 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
7995 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7996 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7997 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7998 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7999 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
8000 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8001 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8002 return IPR_RC_JOB_CONTINUE
;
8006 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
8007 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
8008 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
8009 ipr_cmd
->done
= ipr_reset_ioa_job
;
8010 add_timer(&ipr_cmd
->timer
);
8012 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8014 return IPR_RC_JOB_RETURN
;
8018 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8019 * @ipr_cmd: ipr command struct
8021 * This function reinitializes some control blocks and
8022 * enables destructive diagnostics on the adapter.
8027 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
8029 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8030 volatile u32 int_reg
;
8031 volatile u64 maskval
;
8035 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8036 ipr_init_ioa_mem(ioa_cfg
);
8038 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8039 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8040 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
8041 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8044 if (ioa_cfg
->sis64
) {
8045 /* Set the adapter to the correct endian mode. */
8046 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8047 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8050 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8052 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8053 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8054 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8055 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8056 return IPR_RC_JOB_CONTINUE
;
8059 /* Enable destructive diagnostics on IOA */
8060 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8062 if (ioa_cfg
->sis64
) {
8063 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8064 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8065 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8067 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8069 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8071 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8073 if (ioa_cfg
->sis64
) {
8074 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8075 return IPR_RC_JOB_CONTINUE
;
8078 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
8079 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8080 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
8081 ipr_cmd
->done
= ipr_reset_ioa_job
;
8082 add_timer(&ipr_cmd
->timer
);
8083 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8086 return IPR_RC_JOB_RETURN
;
8090 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8091 * @ipr_cmd: ipr command struct
8093 * This function is invoked when an adapter dump has run out
8094 * of processing time.
8097 * IPR_RC_JOB_CONTINUE
8099 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8101 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8103 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8104 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8105 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8106 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8108 ioa_cfg
->dump_timeout
= 1;
8109 ipr_cmd
->job_step
= ipr_reset_alert
;
8111 return IPR_RC_JOB_CONTINUE
;
8115 * ipr_unit_check_no_data - Log a unit check/no data error log
8116 * @ioa_cfg: ioa config struct
8118 * Logs an error indicating the adapter unit checked, but for some
8119 * reason, we were unable to fetch the unit check buffer.
8124 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8126 ioa_cfg
->errors_logged
++;
8127 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8131 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8132 * @ioa_cfg: ioa config struct
8134 * Fetches the unit check buffer from the adapter by clocking the data
8135 * through the mailbox register.
8140 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8142 unsigned long mailbox
;
8143 struct ipr_hostrcb
*hostrcb
;
8144 struct ipr_uc_sdt sdt
;
8148 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8150 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8151 ipr_unit_check_no_data(ioa_cfg
);
8155 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8156 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8157 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8159 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8160 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8161 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8162 ipr_unit_check_no_data(ioa_cfg
);
8166 /* Find length of the first sdt entry (UC buffer) */
8167 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8168 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8170 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8171 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8172 IPR_FMT2_MBX_ADDR_MASK
;
8174 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8175 struct ipr_hostrcb
, queue
);
8176 list_del(&hostrcb
->queue
);
8177 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8179 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8180 be32_to_cpu(sdt
.entry
[0].start_token
),
8181 (__be32
*)&hostrcb
->hcam
,
8182 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8185 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8186 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8187 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8188 ioa_cfg
->sdt_state
== GET_DUMP
)
8189 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8191 ipr_unit_check_no_data(ioa_cfg
);
8193 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8197 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8198 * @ipr_cmd: ipr command struct
8200 * Description: This function will call to get the unit check buffer.
8205 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8207 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8210 ioa_cfg
->ioa_unit_checked
= 0;
8211 ipr_get_unit_check_buffer(ioa_cfg
);
8212 ipr_cmd
->job_step
= ipr_reset_alert
;
8213 ipr_reset_start_timer(ipr_cmd
, 0);
8216 return IPR_RC_JOB_RETURN
;
8220 * ipr_reset_restore_cfg_space - Restore PCI config space.
8221 * @ipr_cmd: ipr command struct
8223 * Description: This function restores the saved PCI config space of
8224 * the adapter, fails all outstanding ops back to the callers, and
8225 * fetches the dump/unit check if applicable to this reset.
8228 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8230 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8232 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8236 ioa_cfg
->pdev
->state_saved
= true;
8237 pci_restore_state(ioa_cfg
->pdev
);
8239 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8240 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8241 return IPR_RC_JOB_CONTINUE
;
8244 ipr_fail_all_ops(ioa_cfg
);
8246 if (ioa_cfg
->sis64
) {
8247 /* Set the adapter to the correct endian mode. */
8248 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8249 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8252 if (ioa_cfg
->ioa_unit_checked
) {
8253 if (ioa_cfg
->sis64
) {
8254 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8255 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8256 return IPR_RC_JOB_RETURN
;
8258 ioa_cfg
->ioa_unit_checked
= 0;
8259 ipr_get_unit_check_buffer(ioa_cfg
);
8260 ipr_cmd
->job_step
= ipr_reset_alert
;
8261 ipr_reset_start_timer(ipr_cmd
, 0);
8262 return IPR_RC_JOB_RETURN
;
8266 if (ioa_cfg
->in_ioa_bringdown
) {
8267 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8269 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8271 if (GET_DUMP
== ioa_cfg
->sdt_state
) {
8272 ioa_cfg
->sdt_state
= READ_DUMP
;
8273 ioa_cfg
->dump_timeout
= 0;
8275 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8277 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8278 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8279 schedule_work(&ioa_cfg
->work_q
);
8280 return IPR_RC_JOB_RETURN
;
8285 return IPR_RC_JOB_CONTINUE
;
8289 * ipr_reset_bist_done - BIST has completed on the adapter.
8290 * @ipr_cmd: ipr command struct
8292 * Description: Unblock config space and resume the reset process.
8295 * IPR_RC_JOB_CONTINUE
8297 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8299 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8302 if (ioa_cfg
->cfg_locked
)
8303 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8304 ioa_cfg
->cfg_locked
= 0;
8305 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8307 return IPR_RC_JOB_CONTINUE
;
8311 * ipr_reset_start_bist - Run BIST on the adapter.
8312 * @ipr_cmd: ipr command struct
8314 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8317 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8319 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8321 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8322 int rc
= PCIBIOS_SUCCESSFUL
;
8325 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8326 writel(IPR_UPROCI_SIS64_START_BIST
,
8327 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8329 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8331 if (rc
== PCIBIOS_SUCCESSFUL
) {
8332 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8333 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8334 rc
= IPR_RC_JOB_RETURN
;
8336 if (ioa_cfg
->cfg_locked
)
8337 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8338 ioa_cfg
->cfg_locked
= 0;
8339 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8340 rc
= IPR_RC_JOB_CONTINUE
;
8348 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8349 * @ipr_cmd: ipr command struct
8351 * Description: This clears PCI reset to the adapter and delays two seconds.
8356 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8359 pci_set_pcie_reset_state(ipr_cmd
->ioa_cfg
->pdev
, pcie_deassert_reset
);
8360 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8361 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8363 return IPR_RC_JOB_RETURN
;
8367 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8368 * @ipr_cmd: ipr command struct
8370 * Description: This asserts PCI reset to the adapter.
8375 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8377 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8378 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8381 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8382 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8383 ipr_reset_start_timer(ipr_cmd
, IPR_PCI_RESET_TIMEOUT
);
8385 return IPR_RC_JOB_RETURN
;
8389 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8390 * @ipr_cmd: ipr command struct
8392 * Description: This attempts to block config access to the IOA.
8395 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8397 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8399 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8400 int rc
= IPR_RC_JOB_CONTINUE
;
8402 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8403 ioa_cfg
->cfg_locked
= 1;
8404 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8406 if (ipr_cmd
->u
.time_left
) {
8407 rc
= IPR_RC_JOB_RETURN
;
8408 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8409 ipr_reset_start_timer(ipr_cmd
,
8410 IPR_CHECK_FOR_RESET_TIMEOUT
);
8412 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8413 dev_err(&ioa_cfg
->pdev
->dev
,
8414 "Timed out waiting to lock config access. Resetting anyway.\n");
8422 * ipr_reset_block_config_access - Block config access to the IOA
8423 * @ipr_cmd: ipr command struct
8425 * Description: This attempts to block config access to the IOA
8428 * IPR_RC_JOB_CONTINUE
8430 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8432 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8433 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8434 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8435 return IPR_RC_JOB_CONTINUE
;
8439 * ipr_reset_allowed - Query whether or not IOA can be reset
8440 * @ioa_cfg: ioa config struct
8443 * 0 if reset not allowed / non-zero if reset is allowed
8445 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8447 volatile u32 temp_reg
;
8449 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8450 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8454 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8455 * @ipr_cmd: ipr command struct
8457 * Description: This function waits for adapter permission to run BIST,
8458 * then runs BIST. If the adapter does not give permission after a
8459 * reasonable time, we will reset the adapter anyway. The impact of
8460 * resetting the adapter without warning the adapter is the risk of
8461 * losing the persistent error log on the adapter. If the adapter is
8462 * reset while it is writing to the flash on the adapter, the flash
8463 * segment will have bad ECC and be zeroed.
8466 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8468 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8470 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8471 int rc
= IPR_RC_JOB_RETURN
;
8473 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8474 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8475 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8477 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8478 rc
= IPR_RC_JOB_CONTINUE
;
8485 * ipr_reset_alert - Alert the adapter of a pending reset
8486 * @ipr_cmd: ipr command struct
8488 * Description: This function alerts the adapter that it will be reset.
8489 * If memory space is not currently enabled, proceed directly
8490 * to running BIST on the adapter. The timer must always be started
8491 * so we guarantee we do not run BIST from ipr_isr.
8496 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8498 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8503 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8505 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8506 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8507 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8508 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8510 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8513 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8514 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8517 return IPR_RC_JOB_RETURN
;
8521 * ipr_reset_ucode_download_done - Microcode download completion
8522 * @ipr_cmd: ipr command struct
8524 * Description: This function unmaps the microcode download buffer.
8527 * IPR_RC_JOB_CONTINUE
8529 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
8531 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8532 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8534 pci_unmap_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
8535 sglist
->num_sg
, DMA_TO_DEVICE
);
8537 ipr_cmd
->job_step
= ipr_reset_alert
;
8538 return IPR_RC_JOB_CONTINUE
;
8542 * ipr_reset_ucode_download - Download microcode to the adapter
8543 * @ipr_cmd: ipr command struct
8545 * Description: This function checks to see if it there is microcode
8546 * to download to the adapter. If there is, a download is performed.
8549 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8551 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
8553 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8554 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8557 ipr_cmd
->job_step
= ipr_reset_alert
;
8560 return IPR_RC_JOB_CONTINUE
;
8562 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8563 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
8564 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
8565 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
8566 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
8567 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
8568 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
8571 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
8573 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
8574 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
8576 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8577 IPR_WRITE_BUFFER_TIMEOUT
);
8580 return IPR_RC_JOB_RETURN
;
8584 * ipr_reset_shutdown_ioa - Shutdown the adapter
8585 * @ipr_cmd: ipr command struct
8587 * Description: This function issues an adapter shutdown of the
8588 * specified type to the specified adapter as part of the
8589 * adapter reset job.
8592 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8594 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
8596 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8597 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
8598 unsigned long timeout
;
8599 int rc
= IPR_RC_JOB_CONTINUE
;
8602 if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
8603 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8604 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8605 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8606 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
8607 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
8609 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
8610 timeout
= IPR_SHUTDOWN_TIMEOUT
;
8611 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
8612 timeout
= IPR_INTERNAL_TIMEOUT
;
8613 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
8614 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
8616 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
8618 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
8620 rc
= IPR_RC_JOB_RETURN
;
8621 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
8623 ipr_cmd
->job_step
= ipr_reset_alert
;
8630 * ipr_reset_ioa_job - Adapter reset job
8631 * @ipr_cmd: ipr command struct
8633 * Description: This function is the job router for the adapter reset job.
8638 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
8641 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8644 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
8646 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
8648 * We are doing nested adapter resets and this is
8649 * not the current reset job.
8651 list_add_tail(&ipr_cmd
->queue
,
8652 &ipr_cmd
->hrrq
->hrrq_free_q
);
8656 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
8657 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
8658 if (rc
== IPR_RC_JOB_RETURN
)
8662 ipr_reinit_ipr_cmnd(ipr_cmd
);
8663 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
8664 rc
= ipr_cmd
->job_step(ipr_cmd
);
8665 } while (rc
== IPR_RC_JOB_CONTINUE
);
8669 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8670 * @ioa_cfg: ioa config struct
8671 * @job_step: first job step of reset job
8672 * @shutdown_type: shutdown type
8674 * Description: This function will initiate the reset of the given adapter
8675 * starting at the selected job step.
8676 * If the caller needs to wait on the completion of the reset,
8677 * the caller must sleep on the reset_wait_q.
8682 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8683 int (*job_step
) (struct ipr_cmnd
*),
8684 enum ipr_shutdown_type shutdown_type
)
8686 struct ipr_cmnd
*ipr_cmd
;
8689 ioa_cfg
->in_reset_reload
= 1;
8690 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8691 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8692 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8693 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8696 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
)
8697 scsi_block_requests(ioa_cfg
->host
);
8699 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
8700 ioa_cfg
->reset_cmd
= ipr_cmd
;
8701 ipr_cmd
->job_step
= job_step
;
8702 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
8704 ipr_reset_ioa_job(ipr_cmd
);
8708 * ipr_initiate_ioa_reset - Initiate an adapter reset
8709 * @ioa_cfg: ioa config struct
8710 * @shutdown_type: shutdown type
8712 * Description: This function will initiate the reset of the given adapter.
8713 * If the caller needs to wait on the completion of the reset,
8714 * the caller must sleep on the reset_wait_q.
8719 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8720 enum ipr_shutdown_type shutdown_type
)
8724 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
8727 if (ioa_cfg
->in_reset_reload
) {
8728 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8729 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8730 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8731 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8734 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
8735 dev_err(&ioa_cfg
->pdev
->dev
,
8736 "IOA taken offline - error recovery failed\n");
8738 ioa_cfg
->reset_retries
= 0;
8739 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8740 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8741 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
8742 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8746 if (ioa_cfg
->in_ioa_bringdown
) {
8747 ioa_cfg
->reset_cmd
= NULL
;
8748 ioa_cfg
->in_reset_reload
= 0;
8749 ipr_fail_all_ops(ioa_cfg
);
8750 wake_up_all(&ioa_cfg
->reset_wait_q
);
8752 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
8753 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
8754 scsi_unblock_requests(ioa_cfg
->host
);
8755 spin_lock_irq(ioa_cfg
->host
->host_lock
);
8759 ioa_cfg
->in_ioa_bringdown
= 1;
8760 shutdown_type
= IPR_SHUTDOWN_NONE
;
8764 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
8769 * ipr_reset_freeze - Hold off all I/O activity
8770 * @ipr_cmd: ipr command struct
8772 * Description: If the PCI slot is frozen, hold off all I/O
8773 * activity; then, as soon as the slot is available again,
8774 * initiate an adapter reset.
8776 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
8778 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8781 /* Disallow new interrupts, avoid loop */
8782 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8783 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8784 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
8785 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8788 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8789 ipr_cmd
->done
= ipr_reset_ioa_job
;
8790 return IPR_RC_JOB_RETURN
;
8794 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8795 * @pdev: PCI device struct
8797 * Description: This routine is called to tell us that the MMIO
8798 * access to the IOA has been restored
8800 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
8802 unsigned long flags
= 0;
8803 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8805 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8806 if (!ioa_cfg
->probe_done
)
8807 pci_save_state(pdev
);
8808 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8809 return PCI_ERS_RESULT_NEED_RESET
;
8813 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8814 * @pdev: PCI device struct
8816 * Description: This routine is called to tell us that the PCI bus
8817 * is down. Can't do anything here, except put the device driver
8818 * into a holding pattern, waiting for the PCI bus to come back.
8820 static void ipr_pci_frozen(struct pci_dev
*pdev
)
8822 unsigned long flags
= 0;
8823 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8825 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8826 if (ioa_cfg
->probe_done
)
8827 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
8828 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8832 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8833 * @pdev: PCI device struct
8835 * Description: This routine is called by the pci error recovery
8836 * code after the PCI slot has been reset, just before we
8837 * should resume normal operations.
8839 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
8841 unsigned long flags
= 0;
8842 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8844 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8845 if (ioa_cfg
->probe_done
) {
8846 if (ioa_cfg
->needs_warm_reset
)
8847 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8849 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
8852 wake_up_all(&ioa_cfg
->eeh_wait_q
);
8853 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8854 return PCI_ERS_RESULT_RECOVERED
;
8858 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8859 * @pdev: PCI device struct
8861 * Description: This routine is called when the PCI bus has
8862 * permanently failed.
8864 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
8866 unsigned long flags
= 0;
8867 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8870 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8871 if (ioa_cfg
->probe_done
) {
8872 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
8873 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8874 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
8875 ioa_cfg
->in_ioa_bringdown
= 1;
8876 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8877 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8878 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8879 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8882 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8884 wake_up_all(&ioa_cfg
->eeh_wait_q
);
8885 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8889 * ipr_pci_error_detected - Called when a PCI error is detected.
8890 * @pdev: PCI device struct
8891 * @state: PCI channel state
8893 * Description: Called when a PCI error is detected.
8896 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8898 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
8899 pci_channel_state_t state
)
8902 case pci_channel_io_frozen
:
8903 ipr_pci_frozen(pdev
);
8904 return PCI_ERS_RESULT_CAN_RECOVER
;
8905 case pci_channel_io_perm_failure
:
8906 ipr_pci_perm_failure(pdev
);
8907 return PCI_ERS_RESULT_DISCONNECT
;
8912 return PCI_ERS_RESULT_NEED_RESET
;
8916 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8917 * @ioa_cfg: ioa cfg struct
8919 * Description: This is the second phase of adapter intialization
8920 * This function takes care of initilizing the adapter to the point
8921 * where it can accept new commands.
8924 * 0 on success / -EIO on failure
8926 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
8929 unsigned long host_lock_flags
= 0;
8932 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8933 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
8934 ioa_cfg
->probe_done
= 1;
8935 if (ioa_cfg
->needs_hard_reset
) {
8936 ioa_cfg
->needs_hard_reset
= 0;
8937 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8939 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
8941 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8942 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
8943 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8945 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8947 } else if (ipr_invalid_adapter(ioa_cfg
)) {
8951 dev_err(&ioa_cfg
->pdev
->dev
,
8952 "Adapter not supported in this hardware configuration.\n");
8955 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8962 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8963 * @ioa_cfg: ioa config struct
8968 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8972 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8973 if (ioa_cfg
->ipr_cmnd_list
[i
])
8974 pci_pool_free(ioa_cfg
->ipr_cmd_pool
,
8975 ioa_cfg
->ipr_cmnd_list
[i
],
8976 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
8978 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
8981 if (ioa_cfg
->ipr_cmd_pool
)
8982 pci_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
8984 kfree(ioa_cfg
->ipr_cmnd_list
);
8985 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
8986 ioa_cfg
->ipr_cmnd_list
= NULL
;
8987 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
8988 ioa_cfg
->ipr_cmd_pool
= NULL
;
8992 * ipr_free_mem - Frees memory allocated for an adapter
8993 * @ioa_cfg: ioa cfg struct
8998 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9002 kfree(ioa_cfg
->res_entries
);
9003 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_misc_cbs
),
9004 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9005 ipr_free_cmd_blks(ioa_cfg
);
9007 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
9008 pci_free_consistent(ioa_cfg
->pdev
,
9009 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9010 ioa_cfg
->hrrq
[i
].host_rrq
,
9011 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9013 pci_free_consistent(ioa_cfg
->pdev
, ioa_cfg
->cfg_table_size
,
9014 ioa_cfg
->u
.cfg_table
,
9015 ioa_cfg
->cfg_table_dma
);
9017 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
9018 pci_free_consistent(ioa_cfg
->pdev
,
9019 sizeof(struct ipr_hostrcb
),
9020 ioa_cfg
->hostrcb
[i
],
9021 ioa_cfg
->hostrcb_dma
[i
]);
9024 ipr_free_dump(ioa_cfg
);
9025 kfree(ioa_cfg
->trace
);
9029 * ipr_free_all_resources - Free all allocated resources for an adapter.
9030 * @ipr_cmd: ipr command struct
9032 * This function frees all allocated resources for the
9033 * specified adapter.
9038 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
9040 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9043 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
9044 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9046 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
9047 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
9050 free_irq(pdev
->irq
, &ioa_cfg
->hrrq
[0]);
9052 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
9053 pci_disable_msi(pdev
);
9054 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
9055 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9056 pci_disable_msix(pdev
);
9057 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
9060 iounmap(ioa_cfg
->hdw_dma_regs
);
9061 pci_release_regions(pdev
);
9062 ipr_free_mem(ioa_cfg
);
9063 scsi_host_put(ioa_cfg
->host
);
9064 pci_disable_device(pdev
);
9069 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9070 * @ioa_cfg: ioa config struct
9073 * 0 on success / -ENOMEM on allocation failure
9075 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9077 struct ipr_cmnd
*ipr_cmd
;
9078 struct ipr_ioarcb
*ioarcb
;
9079 dma_addr_t dma_addr
;
9080 int i
, entries_each_hrrq
, hrrq_id
= 0;
9082 ioa_cfg
->ipr_cmd_pool
= pci_pool_create(IPR_NAME
, ioa_cfg
->pdev
,
9083 sizeof(struct ipr_cmnd
), 512, 0);
9085 if (!ioa_cfg
->ipr_cmd_pool
)
9088 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9089 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9091 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9092 ipr_free_cmd_blks(ioa_cfg
);
9096 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9097 if (ioa_cfg
->hrrq_num
> 1) {
9099 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9100 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9101 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9102 (entries_each_hrrq
- 1);
9105 IPR_NUM_BASE_CMD_BLKS
/
9106 (ioa_cfg
->hrrq_num
- 1);
9107 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9108 IPR_NUM_INTERNAL_CMD_BLKS
+
9109 (i
- 1) * entries_each_hrrq
;
9110 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9111 (IPR_NUM_INTERNAL_CMD_BLKS
+
9112 i
* entries_each_hrrq
- 1);
9115 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9116 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9117 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9119 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9122 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9124 i
= IPR_NUM_CMD_BLKS
-
9125 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9127 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9128 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9131 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9132 ipr_cmd
= pci_pool_alloc(ioa_cfg
->ipr_cmd_pool
, GFP_KERNEL
, &dma_addr
);
9135 ipr_free_cmd_blks(ioa_cfg
);
9139 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
9140 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9141 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9143 ioarcb
= &ipr_cmd
->ioarcb
;
9144 ipr_cmd
->dma_addr
= dma_addr
;
9146 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9148 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9150 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9151 if (ioa_cfg
->sis64
) {
9152 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9153 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9154 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9155 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9157 ioarcb
->write_ioadl_addr
=
9158 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9159 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9160 ioarcb
->ioasa_host_pci_addr
=
9161 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9163 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9164 ipr_cmd
->cmd_index
= i
;
9165 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9166 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9167 offsetof(struct ipr_cmnd
, sense_buffer
);
9169 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9170 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9171 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9172 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9180 * ipr_alloc_mem - Allocate memory for an adapter
9181 * @ioa_cfg: ioa config struct
9184 * 0 on success / non-zero for error
9186 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9188 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9189 int i
, rc
= -ENOMEM
;
9192 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
9193 ioa_cfg
->max_devs_supported
, GFP_KERNEL
);
9195 if (!ioa_cfg
->res_entries
)
9198 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9199 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9200 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9203 ioa_cfg
->vpd_cbs
= pci_alloc_consistent(ioa_cfg
->pdev
,
9204 sizeof(struct ipr_misc_cbs
),
9205 &ioa_cfg
->vpd_cbs_dma
);
9207 if (!ioa_cfg
->vpd_cbs
)
9208 goto out_free_res_entries
;
9210 if (ipr_alloc_cmd_blks(ioa_cfg
))
9211 goto out_free_vpd_cbs
;
9213 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9214 ioa_cfg
->hrrq
[i
].host_rrq
= pci_alloc_consistent(ioa_cfg
->pdev
,
9215 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9216 &ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9218 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9220 pci_free_consistent(pdev
,
9221 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9222 ioa_cfg
->hrrq
[i
].host_rrq
,
9223 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9224 goto out_ipr_free_cmd_blocks
;
9226 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9229 ioa_cfg
->u
.cfg_table
= pci_alloc_consistent(ioa_cfg
->pdev
,
9230 ioa_cfg
->cfg_table_size
,
9231 &ioa_cfg
->cfg_table_dma
);
9233 if (!ioa_cfg
->u
.cfg_table
)
9234 goto out_free_host_rrq
;
9236 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
9237 ioa_cfg
->hostrcb
[i
] = pci_alloc_consistent(ioa_cfg
->pdev
,
9238 sizeof(struct ipr_hostrcb
),
9239 &ioa_cfg
->hostrcb_dma
[i
]);
9241 if (!ioa_cfg
->hostrcb
[i
])
9242 goto out_free_hostrcb_dma
;
9244 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9245 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9246 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9247 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9250 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
9251 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
9253 if (!ioa_cfg
->trace
)
9254 goto out_free_hostrcb_dma
;
9261 out_free_hostrcb_dma
:
9263 pci_free_consistent(pdev
, sizeof(struct ipr_hostrcb
),
9264 ioa_cfg
->hostrcb
[i
],
9265 ioa_cfg
->hostrcb_dma
[i
]);
9267 pci_free_consistent(pdev
, ioa_cfg
->cfg_table_size
,
9268 ioa_cfg
->u
.cfg_table
,
9269 ioa_cfg
->cfg_table_dma
);
9271 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9272 pci_free_consistent(pdev
,
9273 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9274 ioa_cfg
->hrrq
[i
].host_rrq
,
9275 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9277 out_ipr_free_cmd_blocks
:
9278 ipr_free_cmd_blks(ioa_cfg
);
9280 pci_free_consistent(pdev
, sizeof(struct ipr_misc_cbs
),
9281 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9282 out_free_res_entries
:
9283 kfree(ioa_cfg
->res_entries
);
9288 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9289 * @ioa_cfg: ioa config struct
9294 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9298 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9299 ioa_cfg
->bus_attr
[i
].bus
= i
;
9300 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9301 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9302 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9303 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9305 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9310 * ipr_init_regs - Initialize IOA registers
9311 * @ioa_cfg: ioa config struct
9316 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9318 const struct ipr_interrupt_offsets
*p
;
9319 struct ipr_interrupts
*t
;
9322 p
= &ioa_cfg
->chip_cfg
->regs
;
9324 base
= ioa_cfg
->hdw_dma_regs
;
9326 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9327 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9328 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9329 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9330 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9331 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9332 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9333 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9334 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9335 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9336 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9337 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9338 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9339 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9340 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9341 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9343 if (ioa_cfg
->sis64
) {
9344 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9345 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9346 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9347 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9352 * ipr_init_ioa_cfg - Initialize IOA config struct
9353 * @ioa_cfg: ioa config struct
9354 * @host: scsi host struct
9355 * @pdev: PCI dev struct
9360 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9361 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9365 ioa_cfg
->host
= host
;
9366 ioa_cfg
->pdev
= pdev
;
9367 ioa_cfg
->log_level
= ipr_log_level
;
9368 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9369 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9370 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9371 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9372 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9373 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9374 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9376 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9377 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9378 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9379 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9380 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9381 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9382 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9383 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9384 ioa_cfg
->sdt_state
= INACTIVE
;
9386 ipr_initialize_bus_attr(ioa_cfg
);
9387 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9389 if (ioa_cfg
->sis64
) {
9390 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9391 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9392 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9393 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9394 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9395 + ((sizeof(struct ipr_config_table_entry64
)
9396 * ioa_cfg
->max_devs_supported
)));
9398 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9399 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9400 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9401 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9402 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9403 + ((sizeof(struct ipr_config_table_entry
)
9404 * ioa_cfg
->max_devs_supported
)));
9407 host
->max_channel
= IPR_MAX_BUS_TO_SCAN
;
9408 host
->unique_id
= host
->host_no
;
9409 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9410 host
->can_queue
= ioa_cfg
->max_cmds
;
9411 pci_set_drvdata(pdev
, ioa_cfg
);
9413 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9414 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9415 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9416 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9418 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9420 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9425 * ipr_get_chip_info - Find adapter chip information
9426 * @dev_id: PCI device id struct
9429 * ptr to chip information on success / NULL on failure
9431 static const struct ipr_chip_t
*
9432 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9436 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9437 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9438 ipr_chip
[i
].device
== dev_id
->device
)
9439 return &ipr_chip
[i
];
9444 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9446 * @ioa_cfg: ioa config struct
9451 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
9453 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9455 if (pci_channel_offline(pdev
)) {
9456 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
9457 !pci_channel_offline(pdev
),
9458 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
9459 pci_restore_state(pdev
);
9463 static int ipr_enable_msix(struct ipr_ioa_cfg
*ioa_cfg
)
9465 struct msix_entry entries
[IPR_MAX_MSIX_VECTORS
];
9468 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
9469 entries
[i
].entry
= i
;
9471 vectors
= pci_enable_msix_range(ioa_cfg
->pdev
,
9472 entries
, 1, ipr_number_of_msix
);
9474 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9478 for (i
= 0; i
< vectors
; i
++)
9479 ioa_cfg
->vectors_info
[i
].vec
= entries
[i
].vector
;
9480 ioa_cfg
->nvectors
= vectors
;
9485 static int ipr_enable_msi(struct ipr_ioa_cfg
*ioa_cfg
)
9489 vectors
= pci_enable_msi_range(ioa_cfg
->pdev
, 1, ipr_number_of_msix
);
9491 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9495 for (i
= 0; i
< vectors
; i
++)
9496 ioa_cfg
->vectors_info
[i
].vec
= ioa_cfg
->pdev
->irq
+ i
;
9497 ioa_cfg
->nvectors
= vectors
;
9502 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9504 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9506 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
9507 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
9508 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
9509 ioa_cfg
->vectors_info
[vec_idx
].
9510 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
9514 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9518 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
9519 rc
= request_irq(ioa_cfg
->vectors_info
[i
].vec
,
9522 ioa_cfg
->vectors_info
[i
].desc
,
9526 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
9535 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9536 * @pdev: PCI device struct
9538 * Description: Simply set the msi_received flag to 1 indicating that
9539 * Message Signaled Interrupts are supported.
9542 * 0 on success / non-zero on failure
9544 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
9546 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
9547 unsigned long lock_flags
= 0;
9548 irqreturn_t rc
= IRQ_HANDLED
;
9550 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
9551 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9553 ioa_cfg
->msi_received
= 1;
9554 wake_up(&ioa_cfg
->msi_wait_q
);
9556 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9561 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9562 * @pdev: PCI device struct
9564 * Description: The return value from pci_enable_msi_range() can not always be
9565 * trusted. This routine sets up and initiates a test interrupt to determine
9566 * if the interrupt is received via the ipr_test_intr() service routine.
9567 * If the tests fails, the driver will fall back to LSI.
9570 * 0 on success / non-zero on failure
9572 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
9575 volatile u32 int_reg
;
9576 unsigned long lock_flags
= 0;
9580 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9581 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9582 ioa_cfg
->msi_received
= 0;
9583 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9584 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
9585 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
9586 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9588 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9589 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9591 rc
= request_irq(pdev
->irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9593 dev_err(&pdev
->dev
, "Can not assign irq %d\n", pdev
->irq
);
9595 } else if (ipr_debug
)
9596 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", pdev
->irq
);
9598 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
9599 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
9600 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
9601 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9602 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9604 if (!ioa_cfg
->msi_received
) {
9605 /* MSI test failed */
9606 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
9608 } else if (ipr_debug
)
9609 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
9611 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9613 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9614 free_irq(ioa_cfg
->vectors_info
[0].vec
, ioa_cfg
);
9616 free_irq(pdev
->irq
, ioa_cfg
);
9623 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9624 * @pdev: PCI device struct
9625 * @dev_id: PCI device id struct
9628 * 0 on success / non-zero on failure
9630 static int ipr_probe_ioa(struct pci_dev
*pdev
,
9631 const struct pci_device_id
*dev_id
)
9633 struct ipr_ioa_cfg
*ioa_cfg
;
9634 struct Scsi_Host
*host
;
9635 unsigned long ipr_regs_pci
;
9636 void __iomem
*ipr_regs
;
9637 int rc
= PCIBIOS_SUCCESSFUL
;
9638 volatile u32 mask
, uproc
, interrupts
;
9639 unsigned long lock_flags
, driver_lock_flags
;
9643 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
9644 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
9647 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
9652 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
9653 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
9654 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
9656 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
9658 if (!ioa_cfg
->ipr_chip
) {
9659 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
9660 dev_id
->vendor
, dev_id
->device
);
9661 goto out_scsi_host_put
;
9664 /* set SIS 32 or SIS 64 */
9665 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
9666 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
9667 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
9668 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
9670 if (ipr_transop_timeout
)
9671 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
9672 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
9673 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
9675 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
9677 ioa_cfg
->revid
= pdev
->revision
;
9679 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
9681 ipr_regs_pci
= pci_resource_start(pdev
, 0);
9683 rc
= pci_request_regions(pdev
, IPR_NAME
);
9686 "Couldn't register memory range of registers\n");
9687 goto out_scsi_host_put
;
9690 rc
= pci_enable_device(pdev
);
9692 if (rc
|| pci_channel_offline(pdev
)) {
9693 if (pci_channel_offline(pdev
)) {
9694 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9695 rc
= pci_enable_device(pdev
);
9699 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
9700 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9701 goto out_release_regions
;
9705 ipr_regs
= pci_ioremap_bar(pdev
, 0);
9709 "Couldn't map memory range of registers\n");
9714 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
9715 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
9716 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
9718 ipr_init_regs(ioa_cfg
);
9720 if (ioa_cfg
->sis64
) {
9721 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
9723 dev_dbg(&pdev
->dev
, "Failed to set 64 bit PCI DMA mask\n");
9724 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9727 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9730 dev_err(&pdev
->dev
, "Failed to set PCI DMA mask\n");
9734 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
9735 ioa_cfg
->chip_cfg
->cache_line_size
);
9737 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9738 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
9739 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9744 /* Issue MMIO read to ensure card is not in EEH */
9745 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
9746 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9748 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
9749 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
9750 IPR_MAX_MSIX_VECTORS
);
9751 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
9754 if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9755 ipr_enable_msix(ioa_cfg
) == 0)
9756 ioa_cfg
->intr_flag
= IPR_USE_MSIX
;
9757 else if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9758 ipr_enable_msi(ioa_cfg
) == 0)
9759 ioa_cfg
->intr_flag
= IPR_USE_MSI
;
9761 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9762 ioa_cfg
->clear_isr
= 1;
9763 ioa_cfg
->nvectors
= 1;
9764 dev_info(&pdev
->dev
, "Cannot enable MSI.\n");
9767 pci_set_master(pdev
);
9769 if (pci_channel_offline(pdev
)) {
9770 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9771 pci_set_master(pdev
);
9772 if (pci_channel_offline(pdev
)) {
9774 goto out_msi_disable
;
9778 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
9779 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9780 rc
= ipr_test_msi(ioa_cfg
, pdev
);
9781 if (rc
== -EOPNOTSUPP
) {
9782 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9783 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
9784 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
9785 pci_disable_msi(pdev
);
9786 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9787 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
9788 pci_disable_msix(pdev
);
9791 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9792 ioa_cfg
->nvectors
= 1;
9795 goto out_msi_disable
;
9797 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9798 dev_info(&pdev
->dev
,
9799 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9800 ioa_cfg
->nvectors
, pdev
->irq
);
9801 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9802 dev_info(&pdev
->dev
,
9803 "Request for %d MSIXs succeeded.",
9808 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
9809 (unsigned int)num_online_cpus(),
9810 (unsigned int)IPR_MAX_HRRQ_NUM
);
9812 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
9813 goto out_msi_disable
;
9815 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
9816 goto out_msi_disable
;
9818 rc
= ipr_alloc_mem(ioa_cfg
);
9821 "Couldn't allocate enough memory for device driver!\n");
9822 goto out_msi_disable
;
9825 /* Save away PCI config space for use following IOA reset */
9826 rc
= pci_save_state(pdev
);
9828 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9829 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
9835 * If HRRQ updated interrupt is not masked, or reset alert is set,
9836 * the card is in an unknown state and needs a hard reset
9838 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
9839 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
9840 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
9841 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
9842 ioa_cfg
->needs_hard_reset
= 1;
9843 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
9844 ioa_cfg
->needs_hard_reset
= 1;
9845 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
9846 ioa_cfg
->ioa_unit_checked
= 1;
9848 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9849 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9850 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9852 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
9853 || ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9854 name_msi_vectors(ioa_cfg
);
9855 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_isr
,
9857 ioa_cfg
->vectors_info
[0].desc
,
9860 rc
= ipr_request_other_msi_irqs(ioa_cfg
);
9862 rc
= request_irq(pdev
->irq
, ipr_isr
,
9864 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
9867 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
9872 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
9873 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
9874 ioa_cfg
->needs_warm_reset
= 1;
9875 ioa_cfg
->reset
= ipr_reset_slot_reset
;
9877 ioa_cfg
->reset
= ipr_reset_start_bist
;
9879 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
9880 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
9881 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
9888 ipr_free_mem(ioa_cfg
);
9890 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9891 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9892 pci_disable_msi(pdev
);
9893 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9894 pci_disable_msix(pdev
);
9898 pci_disable_device(pdev
);
9899 out_release_regions
:
9900 pci_release_regions(pdev
);
9902 scsi_host_put(host
);
9907 * ipr_scan_vsets - Scans for VSET devices
9908 * @ioa_cfg: ioa config struct
9910 * Description: Since the VSET resources do not follow SAM in that we can have
9911 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9916 static void ipr_scan_vsets(struct ipr_ioa_cfg
*ioa_cfg
)
9920 for (target
= 0; target
< IPR_MAX_NUM_TARGETS_PER_BUS
; target
++)
9921 for (lun
= 0; lun
< IPR_MAX_NUM_VSET_LUNS_PER_TARGET
; lun
++)
9922 scsi_add_device(ioa_cfg
->host
, IPR_VSET_BUS
, target
, lun
);
9926 * ipr_initiate_ioa_bringdown - Bring down an adapter
9927 * @ioa_cfg: ioa config struct
9928 * @shutdown_type: shutdown type
9930 * Description: This function will initiate bringing down the adapter.
9931 * This consists of issuing an IOA shutdown to the adapter
9932 * to flush the cache, and running BIST.
9933 * If the caller needs to wait on the completion of the reset,
9934 * the caller must sleep on the reset_wait_q.
9939 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
9940 enum ipr_shutdown_type shutdown_type
)
9943 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9944 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9945 ioa_cfg
->reset_retries
= 0;
9946 ioa_cfg
->in_ioa_bringdown
= 1;
9947 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
9952 * __ipr_remove - Remove a single adapter
9953 * @pdev: pci device struct
9955 * Adapter hot plug remove entry point.
9960 static void __ipr_remove(struct pci_dev
*pdev
)
9962 unsigned long host_lock_flags
= 0;
9963 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9965 unsigned long driver_lock_flags
;
9968 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9969 while (ioa_cfg
->in_reset_reload
) {
9970 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9971 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9972 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9975 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9976 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9977 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
9978 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9981 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9983 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9984 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9985 flush_work(&ioa_cfg
->work_q
);
9986 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9987 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9989 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
9990 list_del(&ioa_cfg
->queue
);
9991 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
9993 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
9994 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9995 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9997 ipr_free_all_resources(ioa_cfg
);
10003 * ipr_remove - IOA hot plug remove entry point
10004 * @pdev: pci device struct
10006 * Adapter hot plug remove entry point.
10011 static void ipr_remove(struct pci_dev
*pdev
)
10013 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10017 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10019 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10021 scsi_remove_host(ioa_cfg
->host
);
10023 __ipr_remove(pdev
);
10029 * ipr_probe - Adapter hot plug add entry point
10032 * 0 on success / non-zero on failure
10034 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
10036 struct ipr_ioa_cfg
*ioa_cfg
;
10039 rc
= ipr_probe_ioa(pdev
, dev_id
);
10044 ioa_cfg
= pci_get_drvdata(pdev
);
10045 rc
= ipr_probe_ioa_part2(ioa_cfg
);
10048 __ipr_remove(pdev
);
10052 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
10055 __ipr_remove(pdev
);
10059 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10063 scsi_remove_host(ioa_cfg
->host
);
10064 __ipr_remove(pdev
);
10068 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10072 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10074 scsi_remove_host(ioa_cfg
->host
);
10075 __ipr_remove(pdev
);
10079 scsi_scan_host(ioa_cfg
->host
);
10080 ipr_scan_vsets(ioa_cfg
);
10081 scsi_add_device(ioa_cfg
->host
, IPR_IOA_BUS
, IPR_IOA_TARGET
, IPR_IOA_LUN
);
10082 ioa_cfg
->allow_ml_add_del
= 1;
10083 ioa_cfg
->host
->max_channel
= IPR_VSET_BUS
;
10084 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
10086 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10087 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
10088 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
10089 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
10090 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
10094 schedule_work(&ioa_cfg
->work_q
);
10099 * ipr_shutdown - Shutdown handler.
10100 * @pdev: pci device struct
10102 * This function is invoked upon system shutdown/reboot. It will issue
10103 * an adapter shutdown to the adapter to flush the write cache.
10108 static void ipr_shutdown(struct pci_dev
*pdev
)
10110 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10111 unsigned long lock_flags
= 0;
10114 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10115 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10116 ioa_cfg
->iopoll_weight
= 0;
10117 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10118 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10121 while (ioa_cfg
->in_reset_reload
) {
10122 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10123 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10124 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10127 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10128 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10129 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10132 static struct pci_device_id ipr_pci_table
[] = {
10133 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10134 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10135 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10136 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10137 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10138 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10139 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10140 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10141 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10142 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10143 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10144 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10145 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10146 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10147 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10148 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10149 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10150 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10151 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10152 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10153 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10154 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10155 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10156 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10157 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10158 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10159 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10160 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10161 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10162 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10163 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10164 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10165 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10166 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10167 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10168 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10169 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10170 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10171 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10172 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10173 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10174 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10175 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10176 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10177 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10178 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10179 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10180 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10181 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10182 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10183 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10184 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10185 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10186 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10187 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10188 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10189 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10191 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10193 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10195 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10197 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10198 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10199 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10200 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10201 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10202 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10203 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10204 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10205 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10206 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10207 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10208 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10209 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10210 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10211 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10212 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10213 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10214 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10215 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10216 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10217 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10218 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10219 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10220 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10221 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10222 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10223 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10224 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10225 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10226 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10227 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10228 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10229 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10230 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10231 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10232 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10233 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10234 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10235 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10236 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10237 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10240 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10242 static const struct pci_error_handlers ipr_err_handler
= {
10243 .error_detected
= ipr_pci_error_detected
,
10244 .mmio_enabled
= ipr_pci_mmio_enabled
,
10245 .slot_reset
= ipr_pci_slot_reset
,
10248 static struct pci_driver ipr_driver
= {
10250 .id_table
= ipr_pci_table
,
10251 .probe
= ipr_probe
,
10252 .remove
= ipr_remove
,
10253 .shutdown
= ipr_shutdown
,
10254 .err_handler
= &ipr_err_handler
,
10258 * ipr_halt_done - Shutdown prepare completion
10263 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10265 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10269 * ipr_halt - Issue shutdown prepare to all adapters
10272 * NOTIFY_OK on success / NOTIFY_DONE on failure
10274 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10276 struct ipr_cmnd
*ipr_cmd
;
10277 struct ipr_ioa_cfg
*ioa_cfg
;
10278 unsigned long flags
= 0, driver_lock_flags
;
10280 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10281 return NOTIFY_DONE
;
10283 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10285 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10286 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10287 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
10288 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10292 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10293 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10294 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10295 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10296 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10298 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10299 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10301 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10306 static struct notifier_block ipr_notifier
= {
10311 * ipr_init - Module entry point
10314 * 0 on success / negative value on failure
10316 static int __init
ipr_init(void)
10318 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10319 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10321 register_reboot_notifier(&ipr_notifier
);
10322 return pci_register_driver(&ipr_driver
);
10326 * ipr_exit - Module unload
10328 * Module unload entry point.
10333 static void __exit
ipr_exit(void)
10335 unregister_reboot_notifier(&ipr_notifier
);
10336 pci_unregister_driver(&ipr_driver
);
10339 module_init(ipr_init
);
10340 module_exit(ipr_exit
);