2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock
);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size
= 0x20,
113 .set_interrupt_mask_reg
= 0x0022C,
114 .clr_interrupt_mask_reg
= 0x00230,
115 .clr_interrupt_mask_reg32
= 0x00230,
116 .sense_interrupt_mask_reg
= 0x0022C,
117 .sense_interrupt_mask_reg32
= 0x0022C,
118 .clr_interrupt_reg
= 0x00228,
119 .clr_interrupt_reg32
= 0x00228,
120 .sense_interrupt_reg
= 0x00224,
121 .sense_interrupt_reg32
= 0x00224,
122 .ioarrin_reg
= 0x00404,
123 .sense_uproc_interrupt_reg
= 0x00214,
124 .sense_uproc_interrupt_reg32
= 0x00214,
125 .set_uproc_interrupt_reg
= 0x00214,
126 .set_uproc_interrupt_reg32
= 0x00214,
127 .clr_uproc_interrupt_reg
= 0x00218,
128 .clr_uproc_interrupt_reg32
= 0x00218
131 { /* Snipe and Scamp */
134 .cache_line_size
= 0x20,
138 .set_interrupt_mask_reg
= 0x00288,
139 .clr_interrupt_mask_reg
= 0x0028C,
140 .clr_interrupt_mask_reg32
= 0x0028C,
141 .sense_interrupt_mask_reg
= 0x00288,
142 .sense_interrupt_mask_reg32
= 0x00288,
143 .clr_interrupt_reg
= 0x00284,
144 .clr_interrupt_reg32
= 0x00284,
145 .sense_interrupt_reg
= 0x00280,
146 .sense_interrupt_reg32
= 0x00280,
147 .ioarrin_reg
= 0x00504,
148 .sense_uproc_interrupt_reg
= 0x00290,
149 .sense_uproc_interrupt_reg32
= 0x00290,
150 .set_uproc_interrupt_reg
= 0x00290,
151 .set_uproc_interrupt_reg32
= 0x00290,
152 .clr_uproc_interrupt_reg
= 0x00294,
153 .clr_uproc_interrupt_reg32
= 0x00294
159 .cache_line_size
= 0x20,
163 .set_interrupt_mask_reg
= 0x00010,
164 .clr_interrupt_mask_reg
= 0x00018,
165 .clr_interrupt_mask_reg32
= 0x0001C,
166 .sense_interrupt_mask_reg
= 0x00010,
167 .sense_interrupt_mask_reg32
= 0x00014,
168 .clr_interrupt_reg
= 0x00008,
169 .clr_interrupt_reg32
= 0x0000C,
170 .sense_interrupt_reg
= 0x00000,
171 .sense_interrupt_reg32
= 0x00004,
172 .ioarrin_reg
= 0x00070,
173 .sense_uproc_interrupt_reg
= 0x00020,
174 .sense_uproc_interrupt_reg32
= 0x00024,
175 .set_uproc_interrupt_reg
= 0x00020,
176 .set_uproc_interrupt_reg32
= 0x00024,
177 .clr_uproc_interrupt_reg
= 0x00028,
178 .clr_uproc_interrupt_reg32
= 0x0002C,
179 .init_feedback_reg
= 0x0005C,
180 .dump_addr_reg
= 0x00064,
181 .dump_data_reg
= 0x00068,
182 .endian_swap_reg
= 0x00084
187 static const struct ipr_chip_t ipr_chip
[] = {
188 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
189 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, IPR_USE_MSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
194 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
199 static int ipr_max_bus_speeds
[] = {
200 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
206 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level
, ipr_log_level
, uint
, 0);
208 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode
, ipr_testmode
, int, 0);
210 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
212 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
213 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
214 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
216 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs
, ipr_max_devs
, int, 0);
220 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
222 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
223 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION
);
227 /* A constant array of IOASCs/URCs/Error Messages */
229 struct ipr_error_table_t ipr_error_table
[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
231 "8155: An unknown error was received"},
233 "Soft underlength error"},
235 "Command to be cancelled not found"},
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "8009: Impending cache battery pack failure"},
285 "Logical Unit in process of becoming ready"},
287 "Initializing command required"},
289 "34FF: Disk device format in progress"},
291 "Logical unit not accessible, target port in unavailable state"},
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
293 "9070: IOA requested reset"},
295 "Synchronization required"},
297 "IOA microcode download required"},
299 "Device bus connection is prohibited by host"},
301 "No ready, IOA shutdown"},
303 "Not ready, IOA has been shutdown"},
304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
305 "3020: Storage subsystem configuration error"},
307 "FFF5: Medium error, data unreadable, recommend reassign"},
309 "7000: Medium error, data unreadable, do not reassign"},
310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
311 "FFF3: Disk media format bad"},
312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
313 "3002: Addressed device failed to respond to selection"},
314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
315 "3100: Device bus error"},
316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3109: IOA timed out a device command"},
319 "3120: SCSI bus is not operational"},
320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "4100: Hard device bus fabric error"},
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
335 "310D: Logical block guard error detected by the IOA"},
336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "9000: IOA reserved area data check"},
338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "9001: IOA reserved area invalid data pattern"},
340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9002: IOA reserved area LRC error"},
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
343 "Hardware Error, IOA metadata access error"},
344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "102E: Out of alternate sectors for disk storage"},
346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "FFF4: Data transfer underlength error"},
348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
349 "FFF4: Data transfer overlength error"},
350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
351 "3400: Logical unit failure"},
352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Device microcode is corrupt"},
354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
355 "8150: PCI bus error"},
357 "Unsupported device bus message received"},
358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "FFF4: Disk device problem"},
360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
361 "8150: Permanent IOA failure"},
362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
363 "3010: Disk device returned wrong response to IOA"},
364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
365 "8151: IOA microcode error"},
367 "Device bus status error"},
368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8157: IOA error requiring IOA reset to recover"},
371 "ATA device status error"},
373 "Message reject received from the device"},
374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
375 "8008: A permanent cache battery pack failure occurred"},
376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
377 "9090: Disk unit has been modified after the last known status"},
378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "9081: IOA detected device error"},
380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9082: IOA detected device error"},
382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
383 "3110: Device bus error, message or command phase"},
384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
385 "3110: SAS Command / Task Management Function failed"},
386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
387 "9091: Incorrect hardware configuration change has been detected"},
388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
389 "9073: Invalid multi-adapter configuration"},
390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "4010: Incorrect connection between cascaded expanders"},
392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "4020: Connections exceed IOA design limits"},
394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4030: Incorrect multipath connection"},
396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4110: Unsupported enclosure function"},
398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4120: SAS cable VPD cannot be read"},
400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "FFF4: Command to logical unit failed"},
403 "Illegal request, invalid request type or request packet"},
405 "Illegal request, invalid resource handle"},
407 "Illegal request, commands not allowed to this device"},
409 "Illegal request, command not allowed to a secondary adapter"},
411 "Illegal request, command not allowed to a non-optimized resource"},
413 "Illegal request, invalid field in parameter list"},
415 "Illegal request, parameter not supported"},
417 "Illegal request, parameter value invalid"},
419 "Illegal request, command sequence error"},
421 "Illegal request, dual adapter support not enabled"},
423 "Illegal request, another cable connector was physically disabled"},
425 "Illegal request, inconsistent group id/group count"},
426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
427 "9031: Array protection temporarily suspended, protection resuming"},
428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
429 "9040: Array protection temporarily suspended, protection resuming"},
430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "4085: Service required"},
434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "3140: Device bus not ready to ready transition"},
436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "FFFB: SCSI bus was reset"},
439 "FFFE: SCSI bus transition to single ended"},
441 "FFFE: SCSI bus transition to LVD"},
442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "FFFB: SCSI bus was reset by another initiator"},
444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
445 "3029: A device replacement has occurred"},
446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
447 "4102: Device bus fabric performance degradation"},
448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "9051: IOA cache data exists for a missing or failed device"},
450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "9025: Disk unit is not supported at its physical location"},
454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "3020: IOA detected a SCSI bus configuration error"},
456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "3150: SCSI bus configuration error"},
458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "9074: Asymmetric advanced function disk configuration"},
460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "4040: Incomplete multipath connection between IOA and enclosure"},
462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "4041: Incomplete multipath connection between enclosure and device"},
464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9075: Incomplete multipath connection between IOA and remote IOA"},
466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "9076: Configuration error, missing remote IOA"},
468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "4050: Enclosure does not support a required multipath function"},
470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4070: Logically bad block written on device"},
480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "9041: Array protection temporarily suspended"},
482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "9042: Corrupt array parity detected on specified device"},
484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "9030: Array no longer protected due to missing or failed disk unit"},
486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9071: Link operational transition"},
488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9072: Link not operational transition"},
490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9032: Array exposed but still protected"},
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL
+ 1,
493 "70DD: Device forced failed by disrupt device command"},
494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "4061: Multipath redundancy level got better"},
496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "4060: Multipath redundancy level got worse"},
499 "Failure due to other device"},
500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "9008: IOA does not support functions expected by devices"},
502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "9010: Cache data associated with attached devices cannot be found"},
504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
505 "9011: Cache data belongs to devices other than those attached"},
506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
507 "9020: Array missing 2 or more devices with only 1 device present"},
508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
509 "9021: Array missing 2 or more devices with 2 or more devices present"},
510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9022: Exposed array is missing a required device"},
512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9023: Array member(s) not at required physical locations"},
514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9024: Array not functional due to present hardware configuration"},
516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9026: Array not functional due to present hardware configuration"},
518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9027: Array is missing a device and parity is out of sync"},
520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9028: Maximum number of arrays already exist"},
522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9050: Required cache data cannot be located for a disk unit"},
524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9052: Cache data exists for a device that has been modified"},
526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9054: IOA resources not available due to previous problems"},
528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9092: Disk unit requires initialization before use"},
530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9029: Incorrect hardware configuration change has been detected"},
532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9060: One or more disk pairs are missing from an array"},
534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9061: One or more disks are missing from an array"},
536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9062: One or more disks are missing from an array"},
538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9063: Maximum number of functional arrays has been exceeded"},
541 "Data protect, other volume set problem"},
543 "Aborted command, invalid descriptor"},
545 "Target operating conditions have changed, dual adapter takeover"},
547 "Aborted command, medium removal prevented"},
549 "Command terminated by host"},
551 "Aborted command, command terminated by host"}
554 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
571 * Function Prototypes
573 static int ipr_reset_alert(struct ipr_cmnd
*);
574 static void ipr_process_ccn(struct ipr_cmnd
*);
575 static void ipr_process_error(struct ipr_cmnd
*);
576 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
578 enum ipr_shutdown_type
);
580 #ifdef CONFIG_SCSI_IPR_TRACE
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
585 * @add_data: additional data
590 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
591 u8 type
, u32 add_data
)
593 struct ipr_trace_entry
*trace_entry
;
594 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
596 trace_entry
= &ioa_cfg
->trace
[atomic_add_return
597 (1, &ioa_cfg
->trace_index
)%IPR_NUM_TRACE_ENTRIES
];
598 trace_entry
->time
= jiffies
;
599 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
600 trace_entry
->type
= type
;
601 if (ipr_cmd
->ioa_cfg
->sis64
)
602 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
604 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
605 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
606 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
607 trace_entry
->u
.add_data
= add_data
;
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
615 * ipr_lock_and_done - Acquire lock and complete command
616 * @ipr_cmd: ipr command struct
621 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
623 unsigned long lock_flags
;
624 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
626 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
627 ipr_cmd
->done(ipr_cmd
);
628 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
632 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633 * @ipr_cmd: ipr command struct
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
640 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
641 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
642 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
643 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
646 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
647 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
648 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
649 ioarcb
->data_transfer_length
= 0;
650 ioarcb
->read_data_transfer_length
= 0;
651 ioarcb
->ioadl_len
= 0;
652 ioarcb
->read_ioadl_len
= 0;
654 if (ipr_cmd
->ioa_cfg
->sis64
) {
655 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
656 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
657 ioasa64
->u
.gata
.status
= 0;
659 ioarcb
->write_ioadl_addr
=
660 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
661 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
662 ioasa
->u
.gata
.status
= 0;
665 ioasa
->hdr
.ioasc
= 0;
666 ioasa
->hdr
.residual_data_len
= 0;
667 ipr_cmd
->scsi_cmd
= NULL
;
669 ipr_cmd
->sense_buffer
[0] = 0;
670 ipr_cmd
->dma_use_sg
= 0;
674 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675 * @ipr_cmd: ipr command struct
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
681 void (*fast_done
) (struct ipr_cmnd
*))
683 ipr_reinit_ipr_cmnd(ipr_cmd
);
684 ipr_cmd
->u
.scratch
= 0;
685 ipr_cmd
->sibling
= NULL
;
686 ipr_cmd
->fast_done
= fast_done
;
687 init_timer(&ipr_cmd
->timer
);
691 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692 * @ioa_cfg: ioa config struct
695 * pointer to ipr command struct
698 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
700 struct ipr_cmnd
*ipr_cmd
= NULL
;
702 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
703 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
704 struct ipr_cmnd
, queue
);
705 list_del(&ipr_cmd
->queue
);
713 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714 * @ioa_cfg: ioa config struct
717 * pointer to ipr command struct
720 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
722 struct ipr_cmnd
*ipr_cmd
=
723 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
724 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
729 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730 * @ioa_cfg: ioa config struct
731 * @clr_ints: interrupts to clear
733 * This function masks all interrupts on the adapter, then clears the
734 * interrupts specified in the mask
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
742 volatile u32 int_reg
;
745 /* Stop new interrupts */
746 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
747 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
748 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
749 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
753 /* Set interrupt mask to stop all new interrupts */
755 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
757 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
759 /* Clear any pending interrupts */
761 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
762 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
763 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
767 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * @ioa_cfg: ioa config struct
771 * 0 on success / -EIO on failure
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
775 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
777 if (pcix_cmd_reg
== 0)
780 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
781 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
782 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
786 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
791 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * @ioa_cfg: ioa config struct
795 * 0 on success / -EIO on failure
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
799 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
802 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
803 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
804 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
813 * ipr_sata_eh_done - done function for aborted SATA commands
814 * @ipr_cmd: ipr command struct
816 * This function is invoked for ops generated to SATA
817 * devices which are being aborted.
822 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
824 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
825 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
827 qc
->err_mask
|= AC_ERR_OTHER
;
828 sata_port
->ioasa
.status
|= ATA_BUSY
;
829 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
834 * ipr_scsi_eh_done - mid-layer done function for aborted ops
835 * @ipr_cmd: ipr command struct
837 * This function is invoked by the interrupt handler for
838 * ops generated by the SCSI mid-layer which are being aborted.
843 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
845 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
847 scsi_cmd
->result
|= (DID_ERROR
<< 16);
849 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
850 scsi_cmd
->scsi_done(scsi_cmd
);
851 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
855 * ipr_fail_all_ops - Fails all outstanding ops.
856 * @ioa_cfg: ioa config struct
858 * This function fails all outstanding ops.
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
865 struct ipr_cmnd
*ipr_cmd
, *temp
;
866 struct ipr_hrr_queue
*hrrq
;
869 for_each_hrrq(hrrq
, ioa_cfg
) {
870 spin_lock(&hrrq
->_lock
);
871 list_for_each_entry_safe(ipr_cmd
,
872 temp
, &hrrq
->hrrq_pending_q
, queue
) {
873 list_del(&ipr_cmd
->queue
);
875 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
876 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
877 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
878 cpu_to_be32(IPR_DRIVER_ILID
);
880 if (ipr_cmd
->scsi_cmd
)
881 ipr_cmd
->done
= ipr_scsi_eh_done
;
882 else if (ipr_cmd
->qc
)
883 ipr_cmd
->done
= ipr_sata_eh_done
;
885 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
886 IPR_IOASC_IOA_WAS_RESET
);
887 del_timer(&ipr_cmd
->timer
);
888 ipr_cmd
->done(ipr_cmd
);
890 spin_unlock(&hrrq
->_lock
);
896 * ipr_send_command - Send driver initiated requests.
897 * @ipr_cmd: ipr command struct
899 * This function sends a command to the adapter using the correct write call.
900 * In the case of sis64, calculate the ioarcb size required. Then or in the
906 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
908 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
909 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
911 if (ioa_cfg
->sis64
) {
912 /* The default size is 256 bytes */
913 send_dma_addr
|= 0x1;
915 /* If the number of ioadls * size of ioadl > 128 bytes,
916 then use a 512 byte ioarcb */
917 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
918 send_dma_addr
|= 0x4;
919 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
921 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
925 * ipr_do_req - Send driver initiated requests.
926 * @ipr_cmd: ipr command struct
927 * @done: done function
928 * @timeout_func: timeout function
929 * @timeout: timeout value
931 * This function sends the specified command to the adapter with the
932 * timeout given. The done function is invoked on command completion.
937 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
938 void (*done
) (struct ipr_cmnd
*),
939 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
941 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
943 ipr_cmd
->done
= done
;
945 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
946 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
947 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
949 add_timer(&ipr_cmd
->timer
);
951 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
953 ipr_send_command(ipr_cmd
);
957 * ipr_internal_cmd_done - Op done function for an internally generated op.
958 * @ipr_cmd: ipr command struct
960 * This function is the op done function for an internally generated,
961 * blocking op. It simply wakes the sleeping thread.
966 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
968 if (ipr_cmd
->sibling
)
969 ipr_cmd
->sibling
= NULL
;
971 complete(&ipr_cmd
->completion
);
975 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976 * @ipr_cmd: ipr command struct
977 * @dma_addr: dma address
978 * @len: transfer length
979 * @flags: ioadl flag value
981 * This function initializes an ioadl in the case where there is only a single
987 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
990 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
991 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
993 ipr_cmd
->dma_use_sg
= 1;
995 if (ipr_cmd
->ioa_cfg
->sis64
) {
996 ioadl64
->flags
= cpu_to_be32(flags
);
997 ioadl64
->data_len
= cpu_to_be32(len
);
998 ioadl64
->address
= cpu_to_be64(dma_addr
);
1000 ipr_cmd
->ioarcb
.ioadl_len
=
1001 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1002 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1004 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1005 ioadl
->address
= cpu_to_be32(dma_addr
);
1007 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1008 ipr_cmd
->ioarcb
.read_ioadl_len
=
1009 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1010 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1012 ipr_cmd
->ioarcb
.ioadl_len
=
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1014 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1020 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021 * @ipr_cmd: ipr command struct
1022 * @timeout_func: function to invoke if command times out
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1029 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
1032 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1034 init_completion(&ipr_cmd
->completion
);
1035 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1037 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1038 wait_for_completion(&ipr_cmd
->completion
);
1039 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1044 if (ioa_cfg
->hrrq_num
== 1)
1047 return (atomic_add_return(1, &ioa_cfg
->hrrq_index
) % (ioa_cfg
->hrrq_num
- 1)) + 1;
1051 * ipr_send_hcam - Send an HCAM to the adapter.
1052 * @ioa_cfg: ioa config struct
1054 * @hostrcb: hostrcb struct
1056 * This function will send a Host Controlled Async command to the adapter.
1057 * If HCAMs are currently not allowed to be issued to the adapter, it will
1058 * place the hostrcb on the free queue.
1063 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1064 struct ipr_hostrcb
*hostrcb
)
1066 struct ipr_cmnd
*ipr_cmd
;
1067 struct ipr_ioarcb
*ioarcb
;
1069 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1070 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1071 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1072 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1074 ipr_cmd
->u
.hostrcb
= hostrcb
;
1075 ioarcb
= &ipr_cmd
->ioarcb
;
1077 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1078 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1079 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1080 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1081 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1082 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1084 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1085 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1087 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1088 ipr_cmd
->done
= ipr_process_ccn
;
1090 ipr_cmd
->done
= ipr_process_error
;
1092 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1094 ipr_send_command(ipr_cmd
);
1096 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1101 * ipr_update_ata_class - Update the ata class in the resource entry
1102 * @res: resource entry struct
1103 * @proto: cfgte device bus protocol value
1108 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1111 case IPR_PROTO_SATA
:
1112 case IPR_PROTO_SAS_STP
:
1113 res
->ata_class
= ATA_DEV_ATA
;
1115 case IPR_PROTO_SATA_ATAPI
:
1116 case IPR_PROTO_SAS_STP_ATAPI
:
1117 res
->ata_class
= ATA_DEV_ATAPI
;
1120 res
->ata_class
= ATA_DEV_UNKNOWN
;
1126 * ipr_init_res_entry - Initialize a resource entry struct.
1127 * @res: resource entry struct
1128 * @cfgtew: config table entry wrapper struct
1133 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1134 struct ipr_config_table_entry_wrapper
*cfgtew
)
1138 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1139 struct ipr_resource_entry
*gscsi_res
= NULL
;
1141 res
->needs_sync_complete
= 0;
1144 res
->del_from_ml
= 0;
1145 res
->resetting_device
= 0;
1146 res
->reset_occurred
= 0;
1148 res
->sata_port
= NULL
;
1150 if (ioa_cfg
->sis64
) {
1151 proto
= cfgtew
->u
.cfgte64
->proto
;
1152 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1153 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1154 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1156 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1157 sizeof(res
->res_path
));
1160 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1161 sizeof(res
->dev_lun
.scsi_lun
));
1162 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1164 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1165 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1166 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1168 res
->target
= gscsi_res
->target
;
1173 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1174 ioa_cfg
->max_devs_supported
);
1175 set_bit(res
->target
, ioa_cfg
->target_ids
);
1177 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1178 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1180 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1181 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1182 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1183 ioa_cfg
->max_devs_supported
);
1184 set_bit(res
->target
, ioa_cfg
->array_ids
);
1185 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1186 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1187 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1188 ioa_cfg
->max_devs_supported
);
1189 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1191 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1192 ioa_cfg
->max_devs_supported
);
1193 set_bit(res
->target
, ioa_cfg
->target_ids
);
1196 proto
= cfgtew
->u
.cfgte
->proto
;
1197 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1198 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1199 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1200 res
->type
= IPR_RES_TYPE_IOAFP
;
1202 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1204 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1205 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1206 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1207 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1210 ipr_update_ata_class(res
, proto
);
1214 * ipr_is_same_device - Determine if two devices are the same.
1215 * @res: resource entry struct
1216 * @cfgtew: config table entry wrapper struct
1219 * 1 if the devices are the same / 0 otherwise
1221 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1222 struct ipr_config_table_entry_wrapper
*cfgtew
)
1224 if (res
->ioa_cfg
->sis64
) {
1225 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1226 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1227 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1228 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1232 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1233 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1234 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1242 * __ipr_format_res_path - Format the resource path for printing.
1243 * @res_path: resource path
1245 * @len: length of buffer provided
1250 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1256 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1257 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1258 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1264 * ipr_format_res_path - Format the resource path for printing.
1265 * @ioa_cfg: ioa config struct
1266 * @res_path: resource path
1268 * @len: length of buffer provided
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1274 u8
*res_path
, char *buffer
, int len
)
1279 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1280 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1285 * ipr_update_res_entry - Update the resource entry.
1286 * @res: resource entry struct
1287 * @cfgtew: config table entry wrapper struct
1292 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1293 struct ipr_config_table_entry_wrapper
*cfgtew
)
1295 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1299 if (res
->ioa_cfg
->sis64
) {
1300 res
->flags
= cfgtew
->u
.cfgte64
->flags
;
1301 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1302 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1304 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1305 sizeof(struct ipr_std_inq_data
));
1307 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1308 proto
= cfgtew
->u
.cfgte64
->proto
;
1309 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1310 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1312 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1313 sizeof(res
->dev_lun
.scsi_lun
));
1315 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1316 sizeof(res
->res_path
))) {
1317 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1318 sizeof(res
->res_path
));
1322 if (res
->sdev
&& new_path
)
1323 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1324 ipr_format_res_path(res
->ioa_cfg
,
1325 res
->res_path
, buffer
, sizeof(buffer
)));
1327 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1328 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1329 res
->type
= IPR_RES_TYPE_IOAFP
;
1331 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1333 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1334 sizeof(struct ipr_std_inq_data
));
1336 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1337 proto
= cfgtew
->u
.cfgte
->proto
;
1338 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1341 ipr_update_ata_class(res
, proto
);
1345 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1353 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1355 struct ipr_resource_entry
*gscsi_res
= NULL
;
1356 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1358 if (!ioa_cfg
->sis64
)
1361 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1362 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1363 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1364 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1365 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1366 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1367 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1369 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1371 } else if (res
->bus
== 0)
1372 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1376 * ipr_handle_config_change - Handle a config change from the adapter
1377 * @ioa_cfg: ioa config struct
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1384 struct ipr_hostrcb
*hostrcb
)
1386 struct ipr_resource_entry
*res
= NULL
;
1387 struct ipr_config_table_entry_wrapper cfgtew
;
1388 __be32 cc_res_handle
;
1392 if (ioa_cfg
->sis64
) {
1393 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1394 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1396 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1397 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1400 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1401 if (res
->res_handle
== cc_res_handle
) {
1408 if (list_empty(&ioa_cfg
->free_res_q
)) {
1409 ipr_send_hcam(ioa_cfg
,
1410 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1415 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1416 struct ipr_resource_entry
, queue
);
1418 list_del(&res
->queue
);
1419 ipr_init_res_entry(res
, &cfgtew
);
1420 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1423 ipr_update_res_entry(res
, &cfgtew
);
1425 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1427 res
->del_from_ml
= 1;
1428 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1429 if (ioa_cfg
->allow_ml_add_del
)
1430 schedule_work(&ioa_cfg
->work_q
);
1432 ipr_clear_res_target(res
);
1433 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1435 } else if (!res
->sdev
|| res
->del_from_ml
) {
1437 if (ioa_cfg
->allow_ml_add_del
)
1438 schedule_work(&ioa_cfg
->work_q
);
1441 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1445 * ipr_process_ccn - Op done function for a CCN.
1446 * @ipr_cmd: ipr command struct
1448 * This function is the op done function for a configuration
1449 * change notification host controlled async from the adapter.
1454 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1456 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1457 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1458 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1460 list_del(&hostrcb
->queue
);
1461 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1464 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
1465 dev_err(&ioa_cfg
->pdev
->dev
,
1466 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1468 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1470 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1475 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476 * @i: index into buffer
1477 * @buf: string to modify
1479 * This function will strip all trailing whitespace, pad the end
1480 * of the string with a single space, and NULL terminate the string.
1483 * new length of string
1485 static int strip_and_pad_whitespace(int i
, char *buf
)
1487 while (i
&& buf
[i
] == ' ')
1495 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496 * @prefix: string to print at start of printk
1497 * @hostrcb: hostrcb pointer
1498 * @vpd: vendor/product id/sn struct
1503 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1504 struct ipr_vpd
*vpd
)
1506 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1509 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1510 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1512 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1513 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1515 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1516 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1518 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1522 * ipr_log_vpd - Log the passed VPD to the error log.
1523 * @vpd: vendor/product id/sn struct
1528 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1530 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1531 + IPR_SERIAL_NUM_LEN
];
1533 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1534 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1536 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1537 ipr_err("Vendor/Product ID: %s\n", buffer
);
1539 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1540 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1541 ipr_err(" Serial Number: %s\n", buffer
);
1545 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546 * @prefix: string to print at start of printk
1547 * @hostrcb: hostrcb pointer
1548 * @vpd: vendor/product id/sn/wwn struct
1553 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1554 struct ipr_ext_vpd
*vpd
)
1556 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1557 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1558 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1562 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563 * @vpd: vendor/product id/sn/wwn struct
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1570 ipr_log_vpd(&vpd
->vpd
);
1571 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1572 be32_to_cpu(vpd
->wwid
[1]));
1576 * ipr_log_enhanced_cache_error - Log a cache error.
1577 * @ioa_cfg: ioa config struct
1578 * @hostrcb: hostrcb struct
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1584 struct ipr_hostrcb
*hostrcb
)
1586 struct ipr_hostrcb_type_12_error
*error
;
1589 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1591 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1593 ipr_err("-----Current Configuration-----\n");
1594 ipr_err("Cache Directory Card Information:\n");
1595 ipr_log_ext_vpd(&error
->ioa_vpd
);
1596 ipr_err("Adapter Card Information:\n");
1597 ipr_log_ext_vpd(&error
->cfc_vpd
);
1599 ipr_err("-----Expected Configuration-----\n");
1600 ipr_err("Cache Directory Card Information:\n");
1601 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1602 ipr_err("Adapter Card Information:\n");
1603 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1605 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606 be32_to_cpu(error
->ioa_data
[0]),
1607 be32_to_cpu(error
->ioa_data
[1]),
1608 be32_to_cpu(error
->ioa_data
[2]));
1612 * ipr_log_cache_error - Log a cache error.
1613 * @ioa_cfg: ioa config struct
1614 * @hostrcb: hostrcb struct
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1620 struct ipr_hostrcb
*hostrcb
)
1622 struct ipr_hostrcb_type_02_error
*error
=
1623 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1625 ipr_err("-----Current Configuration-----\n");
1626 ipr_err("Cache Directory Card Information:\n");
1627 ipr_log_vpd(&error
->ioa_vpd
);
1628 ipr_err("Adapter Card Information:\n");
1629 ipr_log_vpd(&error
->cfc_vpd
);
1631 ipr_err("-----Expected Configuration-----\n");
1632 ipr_err("Cache Directory Card Information:\n");
1633 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1634 ipr_err("Adapter Card Information:\n");
1635 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1637 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638 be32_to_cpu(error
->ioa_data
[0]),
1639 be32_to_cpu(error
->ioa_data
[1]),
1640 be32_to_cpu(error
->ioa_data
[2]));
1644 * ipr_log_enhanced_config_error - Log a configuration error.
1645 * @ioa_cfg: ioa config struct
1646 * @hostrcb: hostrcb struct
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1652 struct ipr_hostrcb
*hostrcb
)
1654 int errors_logged
, i
;
1655 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1656 struct ipr_hostrcb_type_13_error
*error
;
1658 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1659 errors_logged
= be32_to_cpu(error
->errors_logged
);
1661 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662 be32_to_cpu(error
->errors_detected
), errors_logged
);
1664 dev_entry
= error
->dev
;
1666 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1669 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1670 ipr_log_ext_vpd(&dev_entry
->vpd
);
1672 ipr_err("-----New Device Information-----\n");
1673 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1675 ipr_err("Cache Directory Card Information:\n");
1676 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1678 ipr_err("Adapter Card Information:\n");
1679 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1684 * ipr_log_sis64_config_error - Log a device error.
1685 * @ioa_cfg: ioa config struct
1686 * @hostrcb: hostrcb struct
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1692 struct ipr_hostrcb
*hostrcb
)
1694 int errors_logged
, i
;
1695 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1696 struct ipr_hostrcb_type_23_error
*error
;
1697 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1699 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1700 errors_logged
= be32_to_cpu(error
->errors_logged
);
1702 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703 be32_to_cpu(error
->errors_detected
), errors_logged
);
1705 dev_entry
= error
->dev
;
1707 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1710 ipr_err("Device %d : %s", i
+ 1,
1711 __ipr_format_res_path(dev_entry
->res_path
,
1712 buffer
, sizeof(buffer
)));
1713 ipr_log_ext_vpd(&dev_entry
->vpd
);
1715 ipr_err("-----New Device Information-----\n");
1716 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1718 ipr_err("Cache Directory Card Information:\n");
1719 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1721 ipr_err("Adapter Card Information:\n");
1722 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1727 * ipr_log_config_error - Log a configuration error.
1728 * @ioa_cfg: ioa config struct
1729 * @hostrcb: hostrcb struct
1734 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1735 struct ipr_hostrcb
*hostrcb
)
1737 int errors_logged
, i
;
1738 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1739 struct ipr_hostrcb_type_03_error
*error
;
1741 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1742 errors_logged
= be32_to_cpu(error
->errors_logged
);
1744 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745 be32_to_cpu(error
->errors_detected
), errors_logged
);
1747 dev_entry
= error
->dev
;
1749 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1752 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1753 ipr_log_vpd(&dev_entry
->vpd
);
1755 ipr_err("-----New Device Information-----\n");
1756 ipr_log_vpd(&dev_entry
->new_vpd
);
1758 ipr_err("Cache Directory Card Information:\n");
1759 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1761 ipr_err("Adapter Card Information:\n");
1762 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1764 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765 be32_to_cpu(dev_entry
->ioa_data
[0]),
1766 be32_to_cpu(dev_entry
->ioa_data
[1]),
1767 be32_to_cpu(dev_entry
->ioa_data
[2]),
1768 be32_to_cpu(dev_entry
->ioa_data
[3]),
1769 be32_to_cpu(dev_entry
->ioa_data
[4]));
1774 * ipr_log_enhanced_array_error - Log an array configuration error.
1775 * @ioa_cfg: ioa config struct
1776 * @hostrcb: hostrcb struct
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1782 struct ipr_hostrcb
*hostrcb
)
1785 struct ipr_hostrcb_type_14_error
*error
;
1786 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1787 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1789 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1793 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794 error
->protection_level
,
1795 ioa_cfg
->host
->host_no
,
1796 error
->last_func_vset_res_addr
.bus
,
1797 error
->last_func_vset_res_addr
.target
,
1798 error
->last_func_vset_res_addr
.lun
);
1802 array_entry
= error
->array_member
;
1803 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1804 ARRAY_SIZE(error
->array_member
));
1806 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1807 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1810 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1811 ipr_err("Exposed Array Member %d:\n", i
);
1813 ipr_err("Array Member %d:\n", i
);
1815 ipr_log_ext_vpd(&array_entry
->vpd
);
1816 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1817 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1818 "Expected Location");
1825 * ipr_log_array_error - Log an array configuration error.
1826 * @ioa_cfg: ioa config struct
1827 * @hostrcb: hostrcb struct
1832 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1833 struct ipr_hostrcb
*hostrcb
)
1836 struct ipr_hostrcb_type_04_error
*error
;
1837 struct ipr_hostrcb_array_data_entry
*array_entry
;
1838 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1840 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1844 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845 error
->protection_level
,
1846 ioa_cfg
->host
->host_no
,
1847 error
->last_func_vset_res_addr
.bus
,
1848 error
->last_func_vset_res_addr
.target
,
1849 error
->last_func_vset_res_addr
.lun
);
1853 array_entry
= error
->array_member
;
1855 for (i
= 0; i
< 18; i
++) {
1856 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1859 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1860 ipr_err("Exposed Array Member %d:\n", i
);
1862 ipr_err("Array Member %d:\n", i
);
1864 ipr_log_vpd(&array_entry
->vpd
);
1866 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1867 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1868 "Expected Location");
1873 array_entry
= error
->array_member2
;
1880 * ipr_log_hex_data - Log additional hex IOA error data.
1881 * @ioa_cfg: ioa config struct
1882 * @data: IOA error data
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, u32
*data
, int len
)
1895 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1896 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1898 for (i
= 0; i
< len
/ 4; i
+= 4) {
1899 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1900 be32_to_cpu(data
[i
]),
1901 be32_to_cpu(data
[i
+1]),
1902 be32_to_cpu(data
[i
+2]),
1903 be32_to_cpu(data
[i
+3]));
1908 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909 * @ioa_cfg: ioa config struct
1910 * @hostrcb: hostrcb struct
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1916 struct ipr_hostrcb
*hostrcb
)
1918 struct ipr_hostrcb_type_17_error
*error
;
1921 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1923 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1925 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1926 strim(error
->failure_reason
);
1928 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1929 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1930 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1931 ipr_log_hex_data(ioa_cfg
, error
->data
,
1932 be32_to_cpu(hostrcb
->hcam
.length
) -
1933 (offsetof(struct ipr_hostrcb_error
, u
) +
1934 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1938 * ipr_log_dual_ioa_error - Log a dual adapter error.
1939 * @ioa_cfg: ioa config struct
1940 * @hostrcb: hostrcb struct
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1946 struct ipr_hostrcb
*hostrcb
)
1948 struct ipr_hostrcb_type_07_error
*error
;
1950 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1951 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1952 strim(error
->failure_reason
);
1954 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1955 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1956 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1957 ipr_log_hex_data(ioa_cfg
, error
->data
,
1958 be32_to_cpu(hostrcb
->hcam
.length
) -
1959 (offsetof(struct ipr_hostrcb_error
, u
) +
1960 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1963 static const struct {
1966 } path_active_desc
[] = {
1967 { IPR_PATH_NO_INFO
, "Path" },
1968 { IPR_PATH_ACTIVE
, "Active path" },
1969 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
1972 static const struct {
1975 } path_state_desc
[] = {
1976 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
1977 { IPR_PATH_HEALTHY
, "is healthy" },
1978 { IPR_PATH_DEGRADED
, "is degraded" },
1979 { IPR_PATH_FAILED
, "is failed" }
1983 * ipr_log_fabric_path - Log a fabric path error
1984 * @hostrcb: hostrcb struct
1985 * @fabric: fabric descriptor
1990 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
1991 struct ipr_hostrcb_fabric_desc
*fabric
)
1994 u8 path_state
= fabric
->path_state
;
1995 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
1996 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
1998 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
1999 if (path_active_desc
[i
].active
!= active
)
2002 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2003 if (path_state_desc
[j
].state
!= state
)
2006 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2007 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2008 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2010 } else if (fabric
->cascaded_expander
== 0xff) {
2011 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2012 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2013 fabric
->ioa_port
, fabric
->phy
);
2014 } else if (fabric
->phy
== 0xff) {
2015 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2016 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2017 fabric
->ioa_port
, fabric
->cascaded_expander
);
2019 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2021 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2027 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2028 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2032 * ipr_log64_fabric_path - Log a fabric path error
2033 * @hostrcb: hostrcb struct
2034 * @fabric: fabric descriptor
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2040 struct ipr_hostrcb64_fabric_desc
*fabric
)
2043 u8 path_state
= fabric
->path_state
;
2044 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2045 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2046 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2048 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2049 if (path_active_desc
[i
].active
!= active
)
2052 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2053 if (path_state_desc
[j
].state
!= state
)
2056 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2057 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2058 ipr_format_res_path(hostrcb
->ioa_cfg
,
2060 buffer
, sizeof(buffer
)));
2065 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2066 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2067 buffer
, sizeof(buffer
)));
2070 static const struct {
2073 } path_type_desc
[] = {
2074 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2075 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2076 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2077 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2080 static const struct {
2083 } path_status_desc
[] = {
2084 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2085 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2086 { IPR_PATH_CFG_FAILED
, "Failed" },
2087 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2088 { IPR_PATH_NOT_DETECTED
, "Missing" },
2089 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2092 static const char *link_rate
[] = {
2095 "phy reset problem",
2112 * ipr_log_path_elem - Log a fabric path element.
2113 * @hostrcb: hostrcb struct
2114 * @cfg: fabric path element struct
2119 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2120 struct ipr_hostrcb_config_element
*cfg
)
2123 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2124 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2126 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2129 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2130 if (path_type_desc
[i
].type
!= type
)
2133 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2134 if (path_status_desc
[j
].status
!= status
)
2137 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2138 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2140 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2141 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2143 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2144 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2146 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2147 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2148 } else if (cfg
->cascaded_expander
== 0xff) {
2149 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2150 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2151 path_type_desc
[i
].desc
, cfg
->phy
,
2152 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2153 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2154 } else if (cfg
->phy
== 0xff) {
2155 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2156 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2157 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2158 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2159 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2161 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2163 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2164 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2165 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2172 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2174 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2175 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2179 * ipr_log64_path_elem - Log a fabric path element.
2180 * @hostrcb: hostrcb struct
2181 * @cfg: fabric path element struct
2186 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2187 struct ipr_hostrcb64_config_element
*cfg
)
2190 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2191 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2192 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2193 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2195 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2198 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2199 if (path_type_desc
[i
].type
!= type
)
2202 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2203 if (path_status_desc
[j
].status
!= status
)
2206 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2208 ipr_format_res_path(hostrcb
->ioa_cfg
,
2209 cfg
->res_path
, buffer
, sizeof(buffer
)),
2210 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2211 be32_to_cpu(cfg
->wwid
[0]),
2212 be32_to_cpu(cfg
->wwid
[1]));
2216 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217 "WWN=%08X%08X\n", cfg
->type_status
,
2218 ipr_format_res_path(hostrcb
->ioa_cfg
,
2219 cfg
->res_path
, buffer
, sizeof(buffer
)),
2220 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2221 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2225 * ipr_log_fabric_error - Log a fabric error.
2226 * @ioa_cfg: ioa config struct
2227 * @hostrcb: hostrcb struct
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2233 struct ipr_hostrcb
*hostrcb
)
2235 struct ipr_hostrcb_type_20_error
*error
;
2236 struct ipr_hostrcb_fabric_desc
*fabric
;
2237 struct ipr_hostrcb_config_element
*cfg
;
2240 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2241 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2242 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2244 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2245 (offsetof(struct ipr_hostrcb_error
, u
) +
2246 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2248 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2249 ipr_log_fabric_path(hostrcb
, fabric
);
2250 for_each_fabric_cfg(fabric
, cfg
)
2251 ipr_log_path_elem(hostrcb
, cfg
);
2253 add_len
-= be16_to_cpu(fabric
->length
);
2254 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2255 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2258 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2262 * ipr_log_sis64_array_error - Log a sis64 array error.
2263 * @ioa_cfg: ioa config struct
2264 * @hostrcb: hostrcb struct
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2270 struct ipr_hostrcb
*hostrcb
)
2273 struct ipr_hostrcb_type_24_error
*error
;
2274 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2275 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2276 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2278 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2282 ipr_err("RAID %s Array Configuration: %s\n",
2283 error
->protection_level
,
2284 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2285 buffer
, sizeof(buffer
)));
2289 array_entry
= error
->array_member
;
2290 num_entries
= min_t(u32
, error
->num_entries
,
2291 ARRAY_SIZE(error
->array_member
));
2293 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2295 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2298 if (error
->exposed_mode_adn
== i
)
2299 ipr_err("Exposed Array Member %d:\n", i
);
2301 ipr_err("Array Member %d:\n", i
);
2303 ipr_err("Array Member %d:\n", i
);
2304 ipr_log_ext_vpd(&array_entry
->vpd
);
2305 ipr_err("Current Location: %s\n",
2306 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2307 buffer
, sizeof(buffer
)));
2308 ipr_err("Expected Location: %s\n",
2309 ipr_format_res_path(ioa_cfg
,
2310 array_entry
->expected_res_path
,
2311 buffer
, sizeof(buffer
)));
2318 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319 * @ioa_cfg: ioa config struct
2320 * @hostrcb: hostrcb struct
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2326 struct ipr_hostrcb
*hostrcb
)
2328 struct ipr_hostrcb_type_30_error
*error
;
2329 struct ipr_hostrcb64_fabric_desc
*fabric
;
2330 struct ipr_hostrcb64_config_element
*cfg
;
2333 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2335 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2336 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2338 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2339 (offsetof(struct ipr_hostrcb64_error
, u
) +
2340 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2342 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2343 ipr_log64_fabric_path(hostrcb
, fabric
);
2344 for_each_fabric_cfg(fabric
, cfg
)
2345 ipr_log64_path_elem(hostrcb
, cfg
);
2347 add_len
-= be16_to_cpu(fabric
->length
);
2348 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2349 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2352 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2356 * ipr_log_generic_error - Log an adapter error.
2357 * @ioa_cfg: ioa config struct
2358 * @hostrcb: hostrcb struct
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2364 struct ipr_hostrcb
*hostrcb
)
2366 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2367 be32_to_cpu(hostrcb
->hcam
.length
));
2371 * ipr_log_sis64_device_error - Log a cache error.
2372 * @ioa_cfg: ioa config struct
2373 * @hostrcb: hostrcb struct
2378 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2379 struct ipr_hostrcb
*hostrcb
)
2381 struct ipr_hostrcb_type_21_error
*error
;
2382 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2384 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2386 ipr_err("-----Failing Device Information-----\n");
2387 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2388 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2389 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2390 ipr_err("Device Resource Path: %s\n",
2391 __ipr_format_res_path(error
->res_path
,
2392 buffer
, sizeof(buffer
)));
2393 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2394 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2395 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2396 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2397 ipr_err("SCSI Sense Data:\n");
2398 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2399 ipr_err("SCSI Command Descriptor Block: \n");
2400 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2402 ipr_err("Additional IOA Data:\n");
2403 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2407 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2410 * This function will return the index of into the ipr_error_table
2411 * for the specified IOASC. If the IOASC is not in the table,
2412 * 0 will be returned, which points to the entry used for unknown errors.
2415 * index into the ipr_error_table
2417 static u32
ipr_get_error(u32 ioasc
)
2421 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2422 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2429 * ipr_handle_log_data - Log an adapter error.
2430 * @ioa_cfg: ioa config struct
2431 * @hostrcb: hostrcb struct
2433 * This function logs an adapter error to the system.
2438 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2439 struct ipr_hostrcb
*hostrcb
)
2443 struct ipr_hostrcb_type_21_error
*error
;
2445 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2448 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2449 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2452 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2454 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2456 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2457 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2458 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2459 scsi_report_bus_reset(ioa_cfg
->host
,
2460 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2463 error_index
= ipr_get_error(ioasc
);
2465 if (!ipr_error_table
[error_index
].log_hcam
)
2468 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2469 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2470 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2472 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2473 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2477 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2479 /* Set indication we have logged an error */
2480 ioa_cfg
->errors_logged
++;
2482 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2484 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2485 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2487 switch (hostrcb
->hcam
.overlay_id
) {
2488 case IPR_HOST_RCB_OVERLAY_ID_2
:
2489 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2491 case IPR_HOST_RCB_OVERLAY_ID_3
:
2492 ipr_log_config_error(ioa_cfg
, hostrcb
);
2494 case IPR_HOST_RCB_OVERLAY_ID_4
:
2495 case IPR_HOST_RCB_OVERLAY_ID_6
:
2496 ipr_log_array_error(ioa_cfg
, hostrcb
);
2498 case IPR_HOST_RCB_OVERLAY_ID_7
:
2499 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2501 case IPR_HOST_RCB_OVERLAY_ID_12
:
2502 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2504 case IPR_HOST_RCB_OVERLAY_ID_13
:
2505 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2507 case IPR_HOST_RCB_OVERLAY_ID_14
:
2508 case IPR_HOST_RCB_OVERLAY_ID_16
:
2509 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2511 case IPR_HOST_RCB_OVERLAY_ID_17
:
2512 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2514 case IPR_HOST_RCB_OVERLAY_ID_20
:
2515 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2517 case IPR_HOST_RCB_OVERLAY_ID_21
:
2518 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2520 case IPR_HOST_RCB_OVERLAY_ID_23
:
2521 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2523 case IPR_HOST_RCB_OVERLAY_ID_24
:
2524 case IPR_HOST_RCB_OVERLAY_ID_26
:
2525 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2527 case IPR_HOST_RCB_OVERLAY_ID_30
:
2528 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2530 case IPR_HOST_RCB_OVERLAY_ID_1
:
2531 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2533 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2539 * ipr_process_error - Op done function for an adapter error log.
2540 * @ipr_cmd: ipr command struct
2542 * This function is the op done function for an error log host
2543 * controlled async from the adapter. It will log the error and
2544 * send the HCAM back to the adapter.
2549 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2551 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2552 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2553 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2557 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2559 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2561 list_del(&hostrcb
->queue
);
2562 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2565 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2566 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2567 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2568 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
2569 dev_err(&ioa_cfg
->pdev
->dev
,
2570 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2573 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2577 * ipr_timeout - An internally generated op has timed out.
2578 * @ipr_cmd: ipr command struct
2580 * This function blocks host requests and initiates an
2586 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
2588 unsigned long lock_flags
= 0;
2589 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2592 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2594 ioa_cfg
->errors_logged
++;
2595 dev_err(&ioa_cfg
->pdev
->dev
,
2596 "Adapter being reset due to command timeout.\n");
2598 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2599 ioa_cfg
->sdt_state
= GET_DUMP
;
2601 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2602 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2604 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2609 * ipr_oper_timeout - Adapter timed out transitioning to operational
2610 * @ipr_cmd: ipr command struct
2612 * This function blocks host requests and initiates an
2618 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
2620 unsigned long lock_flags
= 0;
2621 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2624 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2626 ioa_cfg
->errors_logged
++;
2627 dev_err(&ioa_cfg
->pdev
->dev
,
2628 "Adapter timed out transitioning to operational.\n");
2630 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2631 ioa_cfg
->sdt_state
= GET_DUMP
;
2633 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2635 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2636 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2639 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2644 * ipr_find_ses_entry - Find matching SES in SES table
2645 * @res: resource entry struct of SES
2648 * pointer to SES table entry / NULL on failure
2650 static const struct ipr_ses_table_entry
*
2651 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2654 struct ipr_std_inq_vpids
*vpids
;
2655 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2657 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2658 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2659 if (ste
->compare_product_id_byte
[j
] == 'X') {
2660 vpids
= &res
->std_inq_data
.vpids
;
2661 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2669 if (matches
== IPR_PROD_ID_LEN
)
2677 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2678 * @ioa_cfg: ioa config struct
2680 * @bus_width: bus width
2683 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2684 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2685 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2686 * max 160MHz = max 320MB/sec).
2688 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2690 struct ipr_resource_entry
*res
;
2691 const struct ipr_ses_table_entry
*ste
;
2692 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2694 /* Loop through each config table entry in the config table buffer */
2695 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2696 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2699 if (bus
!= res
->bus
)
2702 if (!(ste
= ipr_find_ses_entry(res
)))
2705 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2708 return max_xfer_rate
;
2712 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2713 * @ioa_cfg: ioa config struct
2714 * @max_delay: max delay in micro-seconds to wait
2716 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2719 * 0 on success / other on failure
2721 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2723 volatile u32 pcii_reg
;
2726 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2727 while (delay
< max_delay
) {
2728 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2730 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2733 /* udelay cannot be used if delay is more than a few milliseconds */
2734 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2735 mdelay(delay
/ 1000);
2745 * ipr_get_sis64_dump_data_section - Dump IOA memory
2746 * @ioa_cfg: ioa config struct
2747 * @start_addr: adapter address to dump
2748 * @dest: destination kernel buffer
2749 * @length_in_words: length to dump in 4 byte words
2754 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2756 __be32
*dest
, u32 length_in_words
)
2760 for (i
= 0; i
< length_in_words
; i
++) {
2761 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2762 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2770 * ipr_get_ldump_data_section - Dump IOA memory
2771 * @ioa_cfg: ioa config struct
2772 * @start_addr: adapter address to dump
2773 * @dest: destination kernel buffer
2774 * @length_in_words: length to dump in 4 byte words
2777 * 0 on success / -EIO on failure
2779 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2781 __be32
*dest
, u32 length_in_words
)
2783 volatile u32 temp_pcii_reg
;
2787 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2788 dest
, length_in_words
);
2790 /* Write IOA interrupt reg starting LDUMP state */
2791 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2792 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2794 /* Wait for IO debug acknowledge */
2795 if (ipr_wait_iodbg_ack(ioa_cfg
,
2796 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2797 dev_err(&ioa_cfg
->pdev
->dev
,
2798 "IOA dump long data transfer timeout\n");
2802 /* Signal LDUMP interlocked - clear IO debug ack */
2803 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2804 ioa_cfg
->regs
.clr_interrupt_reg
);
2806 /* Write Mailbox with starting address */
2807 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2809 /* Signal address valid - clear IOA Reset alert */
2810 writel(IPR_UPROCI_RESET_ALERT
,
2811 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2813 for (i
= 0; i
< length_in_words
; i
++) {
2814 /* Wait for IO debug acknowledge */
2815 if (ipr_wait_iodbg_ack(ioa_cfg
,
2816 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2817 dev_err(&ioa_cfg
->pdev
->dev
,
2818 "IOA dump short data transfer timeout\n");
2822 /* Read data from mailbox and increment destination pointer */
2823 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2826 /* For all but the last word of data, signal data received */
2827 if (i
< (length_in_words
- 1)) {
2828 /* Signal dump data received - Clear IO debug Ack */
2829 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2830 ioa_cfg
->regs
.clr_interrupt_reg
);
2834 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2835 writel(IPR_UPROCI_RESET_ALERT
,
2836 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2838 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2839 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2841 /* Signal dump data received - Clear IO debug Ack */
2842 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2843 ioa_cfg
->regs
.clr_interrupt_reg
);
2845 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2846 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2848 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2850 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2860 #ifdef CONFIG_SCSI_IPR_DUMP
2862 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2863 * @ioa_cfg: ioa config struct
2864 * @pci_address: adapter address
2865 * @length: length of data to copy
2867 * Copy data from PCI adapter to kernel buffer.
2868 * Note: length MUST be a 4 byte multiple
2870 * 0 on success / other on failure
2872 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2873 unsigned long pci_address
, u32 length
)
2875 int bytes_copied
= 0;
2876 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2878 unsigned long lock_flags
= 0;
2879 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2882 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2884 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2886 while (bytes_copied
< length
&&
2887 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2888 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2889 ioa_dump
->page_offset
== 0) {
2890 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2894 return bytes_copied
;
2897 ioa_dump
->page_offset
= 0;
2898 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2899 ioa_dump
->next_page_index
++;
2901 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2903 rem_len
= length
- bytes_copied
;
2904 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2905 cur_len
= min(rem_len
, rem_page_len
);
2907 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2908 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2911 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2912 pci_address
+ bytes_copied
,
2913 &page
[ioa_dump
->page_offset
/ 4],
2914 (cur_len
/ sizeof(u32
)));
2916 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2919 ioa_dump
->page_offset
+= cur_len
;
2920 bytes_copied
+= cur_len
;
2928 return bytes_copied
;
2932 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2933 * @hdr: dump entry header struct
2938 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
2940 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
2942 hdr
->offset
= sizeof(*hdr
);
2943 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
2947 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2948 * @ioa_cfg: ioa config struct
2949 * @driver_dump: driver dump struct
2954 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
2955 struct ipr_driver_dump
*driver_dump
)
2957 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2959 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
2960 driver_dump
->ioa_type_entry
.hdr
.len
=
2961 sizeof(struct ipr_dump_ioa_type_entry
) -
2962 sizeof(struct ipr_dump_entry_header
);
2963 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2964 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
2965 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
2966 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
2967 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
2968 ucode_vpd
->minor_release
[1];
2969 driver_dump
->hdr
.num_entries
++;
2973 * ipr_dump_version_data - Fill in the driver version in the dump.
2974 * @ioa_cfg: ioa config struct
2975 * @driver_dump: driver dump struct
2980 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
2981 struct ipr_driver_dump
*driver_dump
)
2983 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
2984 driver_dump
->version_entry
.hdr
.len
=
2985 sizeof(struct ipr_dump_version_entry
) -
2986 sizeof(struct ipr_dump_entry_header
);
2987 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2988 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
2989 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
2990 driver_dump
->hdr
.num_entries
++;
2994 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2995 * @ioa_cfg: ioa config struct
2996 * @driver_dump: driver dump struct
3001 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3002 struct ipr_driver_dump
*driver_dump
)
3004 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3005 driver_dump
->trace_entry
.hdr
.len
=
3006 sizeof(struct ipr_dump_trace_entry
) -
3007 sizeof(struct ipr_dump_entry_header
);
3008 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3009 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3010 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3011 driver_dump
->hdr
.num_entries
++;
3015 * ipr_dump_location_data - Fill in the IOA location in the dump.
3016 * @ioa_cfg: ioa config struct
3017 * @driver_dump: driver dump struct
3022 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3023 struct ipr_driver_dump
*driver_dump
)
3025 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3026 driver_dump
->location_entry
.hdr
.len
=
3027 sizeof(struct ipr_dump_location_entry
) -
3028 sizeof(struct ipr_dump_entry_header
);
3029 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3030 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3031 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3032 driver_dump
->hdr
.num_entries
++;
3036 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3037 * @ioa_cfg: ioa config struct
3038 * @dump: dump struct
3043 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3045 unsigned long start_addr
, sdt_word
;
3046 unsigned long lock_flags
= 0;
3047 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3048 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3049 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3050 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3051 struct ipr_sdt
*sdt
;
3057 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3059 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3060 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3064 if (ioa_cfg
->sis64
) {
3065 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3066 ssleep(IPR_DUMP_DELAY_SECONDS
);
3067 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3070 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3072 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3073 dev_err(&ioa_cfg
->pdev
->dev
,
3074 "Invalid dump table format: %lx\n", start_addr
);
3075 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3079 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3081 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3083 /* Initialize the overall dump header */
3084 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3085 driver_dump
->hdr
.num_entries
= 1;
3086 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3087 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3088 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3089 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3091 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3092 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3093 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3094 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3096 /* Update dump_header */
3097 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3099 /* IOA Dump entry */
3100 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3101 ioa_dump
->hdr
.len
= 0;
3102 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3103 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3105 /* First entries in sdt are actually a list of dump addresses and
3106 lengths to gather the real dump data. sdt represents the pointer
3107 to the ioa generated dump table. Dump data will be extracted based
3108 on entries in this table */
3109 sdt
= &ioa_dump
->sdt
;
3111 if (ioa_cfg
->sis64
) {
3112 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3113 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3115 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3116 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3119 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3120 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3121 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3122 bytes_to_copy
/ sizeof(__be32
));
3124 /* Smart Dump table is ready to use and the first entry is valid */
3125 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3126 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3127 dev_err(&ioa_cfg
->pdev
->dev
,
3128 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3129 rc
, be32_to_cpu(sdt
->hdr
.state
));
3130 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3131 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3132 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3136 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3138 if (num_entries
> max_num_entries
)
3139 num_entries
= max_num_entries
;
3141 /* Update dump length to the actual data to be copied */
3142 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3144 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3146 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3148 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3150 for (i
= 0; i
< num_entries
; i
++) {
3151 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3152 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3156 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3157 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3159 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3161 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3162 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3164 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3165 bytes_to_copy
= end_off
- start_off
;
3170 if (bytes_to_copy
> max_dump_size
) {
3171 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3175 /* Copy data from adapter to driver buffers */
3176 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3179 ioa_dump
->hdr
.len
+= bytes_copied
;
3181 if (bytes_copied
!= bytes_to_copy
) {
3182 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3189 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3191 /* Update dump_header */
3192 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3194 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3199 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3203 * ipr_release_dump - Free adapter dump memory
3204 * @kref: kref struct
3209 static void ipr_release_dump(struct kref
*kref
)
3211 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3212 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3213 unsigned long lock_flags
= 0;
3217 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3218 ioa_cfg
->dump
= NULL
;
3219 ioa_cfg
->sdt_state
= INACTIVE
;
3220 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3222 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3223 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3225 vfree(dump
->ioa_dump
.ioa_data
);
3231 * ipr_worker_thread - Worker thread
3232 * @work: ioa config struct
3234 * Called at task level from a work thread. This function takes care
3235 * of adding and removing device from the mid-layer as configuration
3236 * changes are detected by the adapter.
3241 static void ipr_worker_thread(struct work_struct
*work
)
3243 unsigned long lock_flags
;
3244 struct ipr_resource_entry
*res
;
3245 struct scsi_device
*sdev
;
3246 struct ipr_dump
*dump
;
3247 struct ipr_ioa_cfg
*ioa_cfg
=
3248 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3249 u8 bus
, target
, lun
;
3253 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3255 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3256 dump
= ioa_cfg
->dump
;
3258 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3261 kref_get(&dump
->kref
);
3262 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3263 ipr_get_ioa_dump(ioa_cfg
, dump
);
3264 kref_put(&dump
->kref
, ipr_release_dump
);
3266 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3267 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3268 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3269 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3276 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
3277 !ioa_cfg
->allow_ml_add_del
) {
3278 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3282 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3283 if (res
->del_from_ml
&& res
->sdev
) {
3286 if (!scsi_device_get(sdev
)) {
3287 if (!res
->add_to_ml
)
3288 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3290 res
->del_from_ml
= 0;
3291 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3292 scsi_remove_device(sdev
);
3293 scsi_device_put(sdev
);
3294 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3301 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3302 if (res
->add_to_ml
) {
3304 target
= res
->target
;
3307 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3308 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3309 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3314 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3315 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3319 #ifdef CONFIG_SCSI_IPR_TRACE
3321 * ipr_read_trace - Dump the adapter trace
3322 * @filp: open sysfs file
3323 * @kobj: kobject struct
3324 * @bin_attr: bin_attribute struct
3327 * @count: buffer size
3330 * number of bytes printed to buffer
3332 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3333 struct bin_attribute
*bin_attr
,
3334 char *buf
, loff_t off
, size_t count
)
3336 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3337 struct Scsi_Host
*shost
= class_to_shost(dev
);
3338 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3339 unsigned long lock_flags
= 0;
3342 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3343 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3345 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3350 static struct bin_attribute ipr_trace_attr
= {
3356 .read
= ipr_read_trace
,
3361 * ipr_show_fw_version - Show the firmware version
3362 * @dev: class device struct
3366 * number of bytes printed to buffer
3368 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3369 struct device_attribute
*attr
, char *buf
)
3371 struct Scsi_Host
*shost
= class_to_shost(dev
);
3372 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3373 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3374 unsigned long lock_flags
= 0;
3377 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3378 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3379 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3380 ucode_vpd
->minor_release
[0],
3381 ucode_vpd
->minor_release
[1]);
3382 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3386 static struct device_attribute ipr_fw_version_attr
= {
3388 .name
= "fw_version",
3391 .show
= ipr_show_fw_version
,
3395 * ipr_show_log_level - Show the adapter's error logging level
3396 * @dev: class device struct
3400 * number of bytes printed to buffer
3402 static ssize_t
ipr_show_log_level(struct device
*dev
,
3403 struct device_attribute
*attr
, char *buf
)
3405 struct Scsi_Host
*shost
= class_to_shost(dev
);
3406 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3407 unsigned long lock_flags
= 0;
3410 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3411 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3412 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3417 * ipr_store_log_level - Change the adapter's error logging level
3418 * @dev: class device struct
3422 * number of bytes printed to buffer
3424 static ssize_t
ipr_store_log_level(struct device
*dev
,
3425 struct device_attribute
*attr
,
3426 const char *buf
, size_t count
)
3428 struct Scsi_Host
*shost
= class_to_shost(dev
);
3429 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3430 unsigned long lock_flags
= 0;
3432 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3433 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3434 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3438 static struct device_attribute ipr_log_level_attr
= {
3440 .name
= "log_level",
3441 .mode
= S_IRUGO
| S_IWUSR
,
3443 .show
= ipr_show_log_level
,
3444 .store
= ipr_store_log_level
3448 * ipr_store_diagnostics - IOA Diagnostics interface
3449 * @dev: device struct
3451 * @count: buffer size
3453 * This function will reset the adapter and wait a reasonable
3454 * amount of time for any errors that the adapter might log.
3457 * count on success / other on failure
3459 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3460 struct device_attribute
*attr
,
3461 const char *buf
, size_t count
)
3463 struct Scsi_Host
*shost
= class_to_shost(dev
);
3464 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3465 unsigned long lock_flags
= 0;
3468 if (!capable(CAP_SYS_ADMIN
))
3471 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3472 while (ioa_cfg
->in_reset_reload
) {
3473 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3474 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3475 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3478 ioa_cfg
->errors_logged
= 0;
3479 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3481 if (ioa_cfg
->in_reset_reload
) {
3482 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3483 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3485 /* Wait for a second for any errors to be logged */
3488 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3492 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3493 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3495 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3500 static struct device_attribute ipr_diagnostics_attr
= {
3502 .name
= "run_diagnostics",
3505 .store
= ipr_store_diagnostics
3509 * ipr_show_adapter_state - Show the adapter's state
3510 * @class_dev: device struct
3514 * number of bytes printed to buffer
3516 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3517 struct device_attribute
*attr
, char *buf
)
3519 struct Scsi_Host
*shost
= class_to_shost(dev
);
3520 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3521 unsigned long lock_flags
= 0;
3524 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3525 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3526 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3528 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3529 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3534 * ipr_store_adapter_state - Change adapter state
3535 * @dev: device struct
3537 * @count: buffer size
3539 * This function will change the adapter's state.
3542 * count on success / other on failure
3544 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3545 struct device_attribute
*attr
,
3546 const char *buf
, size_t count
)
3548 struct Scsi_Host
*shost
= class_to_shost(dev
);
3549 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3550 unsigned long lock_flags
;
3551 int result
= count
, i
;
3553 if (!capable(CAP_SYS_ADMIN
))
3556 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3557 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3558 !strncmp(buf
, "online", 6)) {
3559 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3560 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3561 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3562 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3565 ioa_cfg
->reset_retries
= 0;
3566 ioa_cfg
->in_ioa_bringdown
= 0;
3567 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3569 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3570 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3575 static struct device_attribute ipr_ioa_state_attr
= {
3577 .name
= "online_state",
3578 .mode
= S_IRUGO
| S_IWUSR
,
3580 .show
= ipr_show_adapter_state
,
3581 .store
= ipr_store_adapter_state
3585 * ipr_store_reset_adapter - Reset the adapter
3586 * @dev: device struct
3588 * @count: buffer size
3590 * This function will reset the adapter.
3593 * count on success / other on failure
3595 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3596 struct device_attribute
*attr
,
3597 const char *buf
, size_t count
)
3599 struct Scsi_Host
*shost
= class_to_shost(dev
);
3600 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3601 unsigned long lock_flags
;
3604 if (!capable(CAP_SYS_ADMIN
))
3607 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3608 if (!ioa_cfg
->in_reset_reload
)
3609 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3610 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3611 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3616 static struct device_attribute ipr_ioa_reset_attr
= {
3618 .name
= "reset_host",
3621 .store
= ipr_store_reset_adapter
3624 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
);
3626 * ipr_show_iopoll_weight - Show ipr polling mode
3627 * @dev: class device struct
3631 * number of bytes printed to buffer
3633 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3634 struct device_attribute
*attr
, char *buf
)
3636 struct Scsi_Host
*shost
= class_to_shost(dev
);
3637 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3638 unsigned long lock_flags
= 0;
3641 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3642 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3643 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3649 * ipr_store_iopoll_weight - Change the adapter's polling mode
3650 * @dev: class device struct
3654 * number of bytes printed to buffer
3656 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3657 struct device_attribute
*attr
,
3658 const char *buf
, size_t count
)
3660 struct Scsi_Host
*shost
= class_to_shost(dev
);
3661 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3662 unsigned long user_iopoll_weight
;
3663 unsigned long lock_flags
= 0;
3666 if (!ioa_cfg
->sis64
) {
3667 dev_info(&ioa_cfg
->pdev
->dev
, "blk-iopoll not supported on this adapter\n");
3670 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3673 if (user_iopoll_weight
> 256) {
3674 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid blk-iopoll weight. It must be less than 256\n");
3678 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3679 dev_info(&ioa_cfg
->pdev
->dev
, "Current blk-iopoll weight has the same weight\n");
3683 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3684 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3685 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3688 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3689 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3690 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3691 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3692 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3693 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3694 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
3697 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3702 static struct device_attribute ipr_iopoll_weight_attr
= {
3704 .name
= "iopoll_weight",
3705 .mode
= S_IRUGO
| S_IWUSR
,
3707 .show
= ipr_show_iopoll_weight
,
3708 .store
= ipr_store_iopoll_weight
3712 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3713 * @buf_len: buffer length
3715 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3716 * list to use for microcode download
3719 * pointer to sglist / NULL on failure
3721 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3723 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
3724 struct ipr_sglist
*sglist
;
3725 struct scatterlist
*scatterlist
;
3728 /* Get the minimum size per scatter/gather element */
3729 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3731 /* Get the actual size per element */
3732 order
= get_order(sg_size
);
3734 /* Determine the actual number of bytes per element */
3735 bsize_elem
= PAGE_SIZE
* (1 << order
);
3737 /* Determine the actual number of sg entries needed */
3738 if (buf_len
% bsize_elem
)
3739 num_elem
= (buf_len
/ bsize_elem
) + 1;
3741 num_elem
= buf_len
/ bsize_elem
;
3743 /* Allocate a scatter/gather list for the DMA */
3744 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
3745 (sizeof(struct scatterlist
) * (num_elem
- 1)),
3748 if (sglist
== NULL
) {
3753 scatterlist
= sglist
->scatterlist
;
3754 sg_init_table(scatterlist
, num_elem
);
3756 sglist
->order
= order
;
3757 sglist
->num_sg
= num_elem
;
3759 /* Allocate a bunch of sg elements */
3760 for (i
= 0; i
< num_elem
; i
++) {
3761 page
= alloc_pages(GFP_KERNEL
, order
);
3765 /* Free up what we already allocated */
3766 for (j
= i
- 1; j
>= 0; j
--)
3767 __free_pages(sg_page(&scatterlist
[j
]), order
);
3772 sg_set_page(&scatterlist
[i
], page
, 0, 0);
3779 * ipr_free_ucode_buffer - Frees a microcode download buffer
3780 * @p_dnld: scatter/gather list pointer
3782 * Free a DMA'able ucode download buffer previously allocated with
3783 * ipr_alloc_ucode_buffer
3788 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3792 for (i
= 0; i
< sglist
->num_sg
; i
++)
3793 __free_pages(sg_page(&sglist
->scatterlist
[i
]), sglist
->order
);
3799 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3800 * @sglist: scatter/gather list pointer
3801 * @buffer: buffer pointer
3802 * @len: buffer length
3804 * Copy a microcode image from a user buffer into a buffer allocated by
3805 * ipr_alloc_ucode_buffer
3808 * 0 on success / other on failure
3810 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3811 u8
*buffer
, u32 len
)
3813 int bsize_elem
, i
, result
= 0;
3814 struct scatterlist
*scatterlist
;
3817 /* Determine the actual number of bytes per element */
3818 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3820 scatterlist
= sglist
->scatterlist
;
3822 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3823 struct page
*page
= sg_page(&scatterlist
[i
]);
3826 memcpy(kaddr
, buffer
, bsize_elem
);
3829 scatterlist
[i
].length
= bsize_elem
;
3837 if (len
% bsize_elem
) {
3838 struct page
*page
= sg_page(&scatterlist
[i
]);
3841 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3844 scatterlist
[i
].length
= len
% bsize_elem
;
3847 sglist
->buffer_len
= len
;
3852 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3853 * @ipr_cmd: ipr command struct
3854 * @sglist: scatter/gather list
3856 * Builds a microcode download IOA data list (IOADL).
3859 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3860 struct ipr_sglist
*sglist
)
3862 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3863 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3864 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3867 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3868 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3869 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3872 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3873 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3874 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3875 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3876 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3879 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3883 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3884 * @ipr_cmd: ipr command struct
3885 * @sglist: scatter/gather list
3887 * Builds a microcode download IOA data list (IOADL).
3890 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3891 struct ipr_sglist
*sglist
)
3893 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3894 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3895 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3898 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3899 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3900 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3903 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3905 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3906 ioadl
[i
].flags_and_data_len
=
3907 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3909 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3912 ioadl
[i
-1].flags_and_data_len
|=
3913 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3917 * ipr_update_ioa_ucode - Update IOA's microcode
3918 * @ioa_cfg: ioa config struct
3919 * @sglist: scatter/gather list
3921 * Initiate an adapter reset to update the IOA's microcode
3924 * 0 on success / -EIO on failure
3926 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3927 struct ipr_sglist
*sglist
)
3929 unsigned long lock_flags
;
3931 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3932 while (ioa_cfg
->in_reset_reload
) {
3933 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3934 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3935 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3938 if (ioa_cfg
->ucode_sglist
) {
3939 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3940 dev_err(&ioa_cfg
->pdev
->dev
,
3941 "Microcode download already in progress\n");
3945 sglist
->num_dma_sg
= dma_map_sg(&ioa_cfg
->pdev
->dev
,
3946 sglist
->scatterlist
, sglist
->num_sg
,
3949 if (!sglist
->num_dma_sg
) {
3950 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3951 dev_err(&ioa_cfg
->pdev
->dev
,
3952 "Failed to map microcode download buffer!\n");
3956 ioa_cfg
->ucode_sglist
= sglist
;
3957 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3958 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3959 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3961 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3962 ioa_cfg
->ucode_sglist
= NULL
;
3963 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3968 * ipr_store_update_fw - Update the firmware on the adapter
3969 * @class_dev: device struct
3971 * @count: buffer size
3973 * This function will update the firmware on the adapter.
3976 * count on success / other on failure
3978 static ssize_t
ipr_store_update_fw(struct device
*dev
,
3979 struct device_attribute
*attr
,
3980 const char *buf
, size_t count
)
3982 struct Scsi_Host
*shost
= class_to_shost(dev
);
3983 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3984 struct ipr_ucode_image_header
*image_hdr
;
3985 const struct firmware
*fw_entry
;
3986 struct ipr_sglist
*sglist
;
3989 int len
, result
, dnld_size
;
3991 if (!capable(CAP_SYS_ADMIN
))
3994 len
= snprintf(fname
, 99, "%s", buf
);
3995 fname
[len
-1] = '\0';
3997 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
3998 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4002 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4004 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4005 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4006 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4009 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4010 release_firmware(fw_entry
);
4014 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4017 dev_err(&ioa_cfg
->pdev
->dev
,
4018 "Microcode buffer copy to DMA buffer failed\n");
4022 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4024 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4029 ipr_free_ucode_buffer(sglist
);
4030 release_firmware(fw_entry
);
4034 static struct device_attribute ipr_update_fw_attr
= {
4036 .name
= "update_fw",
4039 .store
= ipr_store_update_fw
4043 * ipr_show_fw_type - Show the adapter's firmware type.
4044 * @dev: class device struct
4048 * number of bytes printed to buffer
4050 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4051 struct device_attribute
*attr
, char *buf
)
4053 struct Scsi_Host
*shost
= class_to_shost(dev
);
4054 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4055 unsigned long lock_flags
= 0;
4058 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4059 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4060 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4064 static struct device_attribute ipr_ioa_fw_type_attr
= {
4069 .show
= ipr_show_fw_type
4072 static struct device_attribute
*ipr_ioa_attrs
[] = {
4073 &ipr_fw_version_attr
,
4074 &ipr_log_level_attr
,
4075 &ipr_diagnostics_attr
,
4076 &ipr_ioa_state_attr
,
4077 &ipr_ioa_reset_attr
,
4078 &ipr_update_fw_attr
,
4079 &ipr_ioa_fw_type_attr
,
4080 &ipr_iopoll_weight_attr
,
4084 #ifdef CONFIG_SCSI_IPR_DUMP
4086 * ipr_read_dump - Dump the adapter
4087 * @filp: open sysfs file
4088 * @kobj: kobject struct
4089 * @bin_attr: bin_attribute struct
4092 * @count: buffer size
4095 * number of bytes printed to buffer
4097 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4098 struct bin_attribute
*bin_attr
,
4099 char *buf
, loff_t off
, size_t count
)
4101 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4102 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4103 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4104 struct ipr_dump
*dump
;
4105 unsigned long lock_flags
= 0;
4110 if (!capable(CAP_SYS_ADMIN
))
4113 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4114 dump
= ioa_cfg
->dump
;
4116 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4117 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4120 kref_get(&dump
->kref
);
4121 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4123 if (off
> dump
->driver_dump
.hdr
.len
) {
4124 kref_put(&dump
->kref
, ipr_release_dump
);
4128 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4129 count
= dump
->driver_dump
.hdr
.len
- off
;
4133 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4134 if (off
+ count
> sizeof(dump
->driver_dump
))
4135 len
= sizeof(dump
->driver_dump
) - off
;
4138 src
= (u8
*)&dump
->driver_dump
+ off
;
4139 memcpy(buf
, src
, len
);
4145 off
-= sizeof(dump
->driver_dump
);
4148 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4149 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4150 sizeof(struct ipr_sdt_entry
));
4152 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4153 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4155 if (count
&& off
< sdt_end
) {
4156 if (off
+ count
> sdt_end
)
4157 len
= sdt_end
- off
;
4160 src
= (u8
*)&dump
->ioa_dump
+ off
;
4161 memcpy(buf
, src
, len
);
4170 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4171 len
= PAGE_ALIGN(off
) - off
;
4174 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4175 src
+= off
& ~PAGE_MASK
;
4176 memcpy(buf
, src
, len
);
4182 kref_put(&dump
->kref
, ipr_release_dump
);
4187 * ipr_alloc_dump - Prepare for adapter dump
4188 * @ioa_cfg: ioa config struct
4191 * 0 on success / other on failure
4193 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4195 struct ipr_dump
*dump
;
4197 unsigned long lock_flags
= 0;
4199 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4202 ipr_err("Dump memory allocation failed\n");
4207 ioa_data
= vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4209 ioa_data
= vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4212 ipr_err("Dump memory allocation failed\n");
4217 dump
->ioa_dump
.ioa_data
= ioa_data
;
4219 kref_init(&dump
->kref
);
4220 dump
->ioa_cfg
= ioa_cfg
;
4222 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4224 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4225 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4226 vfree(dump
->ioa_dump
.ioa_data
);
4231 ioa_cfg
->dump
= dump
;
4232 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4233 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4234 ioa_cfg
->dump_taken
= 1;
4235 schedule_work(&ioa_cfg
->work_q
);
4237 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4243 * ipr_free_dump - Free adapter dump memory
4244 * @ioa_cfg: ioa config struct
4247 * 0 on success / other on failure
4249 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4251 struct ipr_dump
*dump
;
4252 unsigned long lock_flags
= 0;
4256 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4257 dump
= ioa_cfg
->dump
;
4259 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4263 ioa_cfg
->dump
= NULL
;
4264 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4266 kref_put(&dump
->kref
, ipr_release_dump
);
4273 * ipr_write_dump - Setup dump state of adapter
4274 * @filp: open sysfs file
4275 * @kobj: kobject struct
4276 * @bin_attr: bin_attribute struct
4279 * @count: buffer size
4282 * number of bytes printed to buffer
4284 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4285 struct bin_attribute
*bin_attr
,
4286 char *buf
, loff_t off
, size_t count
)
4288 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4289 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4290 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4293 if (!capable(CAP_SYS_ADMIN
))
4297 rc
= ipr_alloc_dump(ioa_cfg
);
4298 else if (buf
[0] == '0')
4299 rc
= ipr_free_dump(ioa_cfg
);
4309 static struct bin_attribute ipr_dump_attr
= {
4312 .mode
= S_IRUSR
| S_IWUSR
,
4315 .read
= ipr_read_dump
,
4316 .write
= ipr_write_dump
4319 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4323 * ipr_change_queue_depth - Change the device's queue depth
4324 * @sdev: scsi device struct
4325 * @qdepth: depth to set
4326 * @reason: calling context
4331 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4333 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4334 struct ipr_resource_entry
*res
;
4335 unsigned long lock_flags
= 0;
4337 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4338 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4340 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4341 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4342 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4344 scsi_change_queue_depth(sdev
, qdepth
);
4345 return sdev
->queue_depth
;
4349 * ipr_change_queue_type - Change the device's queue type
4350 * @dsev: scsi device struct
4351 * @tag_type: type of tags to use
4354 * actual queue type set
4356 static int ipr_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
4358 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4359 struct ipr_resource_entry
*res
;
4360 unsigned long lock_flags
= 0;
4362 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4363 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4364 if (res
&& ipr_is_gscsi(res
))
4365 tag_type
= scsi_change_queue_type(sdev
, tag_type
);
4368 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4373 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4374 * @dev: device struct
4375 * @attr: device attribute structure
4379 * number of bytes printed to buffer
4381 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4383 struct scsi_device
*sdev
= to_scsi_device(dev
);
4384 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4385 struct ipr_resource_entry
*res
;
4386 unsigned long lock_flags
= 0;
4387 ssize_t len
= -ENXIO
;
4389 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4390 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4392 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4393 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4397 static struct device_attribute ipr_adapter_handle_attr
= {
4399 .name
= "adapter_handle",
4402 .show
= ipr_show_adapter_handle
4406 * ipr_show_resource_path - Show the resource path or the resource address for
4408 * @dev: device struct
4409 * @attr: device attribute structure
4413 * number of bytes printed to buffer
4415 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4417 struct scsi_device
*sdev
= to_scsi_device(dev
);
4418 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4419 struct ipr_resource_entry
*res
;
4420 unsigned long lock_flags
= 0;
4421 ssize_t len
= -ENXIO
;
4422 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4424 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4425 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4426 if (res
&& ioa_cfg
->sis64
)
4427 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4428 __ipr_format_res_path(res
->res_path
, buffer
,
4431 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4432 res
->bus
, res
->target
, res
->lun
);
4434 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4438 static struct device_attribute ipr_resource_path_attr
= {
4440 .name
= "resource_path",
4443 .show
= ipr_show_resource_path
4447 * ipr_show_device_id - Show the device_id for this device.
4448 * @dev: device struct
4449 * @attr: device attribute structure
4453 * number of bytes printed to buffer
4455 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4457 struct scsi_device
*sdev
= to_scsi_device(dev
);
4458 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4459 struct ipr_resource_entry
*res
;
4460 unsigned long lock_flags
= 0;
4461 ssize_t len
= -ENXIO
;
4463 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4464 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4465 if (res
&& ioa_cfg
->sis64
)
4466 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->dev_id
);
4468 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4470 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4474 static struct device_attribute ipr_device_id_attr
= {
4476 .name
= "device_id",
4479 .show
= ipr_show_device_id
4483 * ipr_show_resource_type - Show the resource type for this device.
4484 * @dev: device struct
4485 * @attr: device attribute structure
4489 * number of bytes printed to buffer
4491 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4493 struct scsi_device
*sdev
= to_scsi_device(dev
);
4494 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4495 struct ipr_resource_entry
*res
;
4496 unsigned long lock_flags
= 0;
4497 ssize_t len
= -ENXIO
;
4499 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4500 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4503 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4505 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4509 static struct device_attribute ipr_resource_type_attr
= {
4511 .name
= "resource_type",
4514 .show
= ipr_show_resource_type
4517 static struct device_attribute
*ipr_dev_attrs
[] = {
4518 &ipr_adapter_handle_attr
,
4519 &ipr_resource_path_attr
,
4520 &ipr_device_id_attr
,
4521 &ipr_resource_type_attr
,
4526 * ipr_biosparam - Return the HSC mapping
4527 * @sdev: scsi device struct
4528 * @block_device: block device pointer
4529 * @capacity: capacity of the device
4530 * @parm: Array containing returned HSC values.
4532 * This function generates the HSC parms that fdisk uses.
4533 * We want to make sure we return something that places partitions
4534 * on 4k boundaries for best performance with the IOA.
4539 static int ipr_biosparam(struct scsi_device
*sdev
,
4540 struct block_device
*block_device
,
4541 sector_t capacity
, int *parm
)
4549 cylinders
= capacity
;
4550 sector_div(cylinders
, (128 * 32));
4555 parm
[2] = cylinders
;
4561 * ipr_find_starget - Find target based on bus/target.
4562 * @starget: scsi target struct
4565 * resource entry pointer if found / NULL if not found
4567 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4569 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4570 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4571 struct ipr_resource_entry
*res
;
4573 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4574 if ((res
->bus
== starget
->channel
) &&
4575 (res
->target
== starget
->id
)) {
4583 static struct ata_port_info sata_port_info
;
4586 * ipr_target_alloc - Prepare for commands to a SCSI target
4587 * @starget: scsi target struct
4589 * If the device is a SATA device, this function allocates an
4590 * ATA port with libata, else it does nothing.
4593 * 0 on success / non-0 on failure
4595 static int ipr_target_alloc(struct scsi_target
*starget
)
4597 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4598 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4599 struct ipr_sata_port
*sata_port
;
4600 struct ata_port
*ap
;
4601 struct ipr_resource_entry
*res
;
4602 unsigned long lock_flags
;
4604 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4605 res
= ipr_find_starget(starget
);
4606 starget
->hostdata
= NULL
;
4608 if (res
&& ipr_is_gata(res
)) {
4609 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4610 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4614 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4616 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4617 sata_port
->ioa_cfg
= ioa_cfg
;
4619 sata_port
->res
= res
;
4621 res
->sata_port
= sata_port
;
4622 ap
->private_data
= sata_port
;
4623 starget
->hostdata
= sata_port
;
4629 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4635 * ipr_target_destroy - Destroy a SCSI target
4636 * @starget: scsi target struct
4638 * If the device was a SATA device, this function frees the libata
4639 * ATA port, else it does nothing.
4642 static void ipr_target_destroy(struct scsi_target
*starget
)
4644 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4645 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4646 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4648 if (ioa_cfg
->sis64
) {
4649 if (!ipr_find_starget(starget
)) {
4650 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4651 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4652 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4653 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4654 else if (starget
->channel
== 0)
4655 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4660 starget
->hostdata
= NULL
;
4661 ata_sas_port_destroy(sata_port
->ap
);
4667 * ipr_find_sdev - Find device based on bus/target/lun.
4668 * @sdev: scsi device struct
4671 * resource entry pointer if found / NULL if not found
4673 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4675 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4676 struct ipr_resource_entry
*res
;
4678 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4679 if ((res
->bus
== sdev
->channel
) &&
4680 (res
->target
== sdev
->id
) &&
4681 (res
->lun
== sdev
->lun
))
4689 * ipr_slave_destroy - Unconfigure a SCSI device
4690 * @sdev: scsi device struct
4695 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4697 struct ipr_resource_entry
*res
;
4698 struct ipr_ioa_cfg
*ioa_cfg
;
4699 unsigned long lock_flags
= 0;
4701 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4703 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4704 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4707 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4708 sdev
->hostdata
= NULL
;
4710 res
->sata_port
= NULL
;
4712 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4716 * ipr_slave_configure - Configure a SCSI device
4717 * @sdev: scsi device struct
4719 * This function configures the specified scsi device.
4724 static int ipr_slave_configure(struct scsi_device
*sdev
)
4726 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4727 struct ipr_resource_entry
*res
;
4728 struct ata_port
*ap
= NULL
;
4729 unsigned long lock_flags
= 0;
4730 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4732 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4733 res
= sdev
->hostdata
;
4735 if (ipr_is_af_dasd_device(res
))
4736 sdev
->type
= TYPE_RAID
;
4737 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4738 sdev
->scsi_level
= 4;
4739 sdev
->no_uld_attach
= 1;
4741 if (ipr_is_vset_device(res
)) {
4742 blk_queue_rq_timeout(sdev
->request_queue
,
4743 IPR_VSET_RW_TIMEOUT
);
4744 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4746 if (ipr_is_gata(res
) && res
->sata_port
)
4747 ap
= res
->sata_port
->ap
;
4748 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4751 scsi_change_queue_depth(sdev
, IPR_MAX_CMD_PER_ATA_LUN
);
4752 ata_sas_slave_configure(sdev
, ap
);
4756 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4757 ipr_format_res_path(ioa_cfg
,
4758 res
->res_path
, buffer
, sizeof(buffer
)));
4761 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4766 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4767 * @sdev: scsi device struct
4769 * This function initializes an ATA port so that future commands
4770 * sent through queuecommand will work.
4775 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4777 struct ipr_sata_port
*sata_port
= NULL
;
4781 if (sdev
->sdev_target
)
4782 sata_port
= sdev
->sdev_target
->hostdata
;
4784 rc
= ata_sas_port_init(sata_port
->ap
);
4786 rc
= ata_sas_sync_probe(sata_port
->ap
);
4790 ipr_slave_destroy(sdev
);
4797 * ipr_slave_alloc - Prepare for commands to a device.
4798 * @sdev: scsi device struct
4800 * This function saves a pointer to the resource entry
4801 * in the scsi device struct if the device exists. We
4802 * can then use this pointer in ipr_queuecommand when
4803 * handling new commands.
4806 * 0 on success / -ENXIO if device does not exist
4808 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4810 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4811 struct ipr_resource_entry
*res
;
4812 unsigned long lock_flags
;
4815 sdev
->hostdata
= NULL
;
4817 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4819 res
= ipr_find_sdev(sdev
);
4824 sdev
->hostdata
= res
;
4825 if (!ipr_is_naca_model(res
))
4826 res
->needs_sync_complete
= 1;
4828 if (ipr_is_gata(res
)) {
4829 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4830 return ipr_ata_slave_alloc(sdev
);
4834 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4839 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
4841 struct ipr_ioa_cfg
*ioa_cfg
;
4842 unsigned long lock_flags
= 0;
4846 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
4847 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4849 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
4850 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
4851 dev_err(&ioa_cfg
->pdev
->dev
,
4852 "Adapter being reset as a result of error recovery.\n");
4854 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
4855 ioa_cfg
->sdt_state
= GET_DUMP
;
4858 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4859 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4860 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4862 /* If we got hit with a host reset while we were already resetting
4863 the adapter for some reason, and the reset failed. */
4864 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
4869 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4875 * ipr_device_reset - Reset the device
4876 * @ioa_cfg: ioa config struct
4877 * @res: resource entry struct
4879 * This function issues a device reset to the affected device.
4880 * If the device is a SCSI device, a LUN reset will be sent
4881 * to the device first. If that does not work, a target reset
4882 * will be sent. If the device is a SATA device, a PHY reset will
4886 * 0 on success / non-zero on failure
4888 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
4889 struct ipr_resource_entry
*res
)
4891 struct ipr_cmnd
*ipr_cmd
;
4892 struct ipr_ioarcb
*ioarcb
;
4893 struct ipr_cmd_pkt
*cmd_pkt
;
4894 struct ipr_ioarcb_ata_regs
*regs
;
4898 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4899 ioarcb
= &ipr_cmd
->ioarcb
;
4900 cmd_pkt
= &ioarcb
->cmd_pkt
;
4902 if (ipr_cmd
->ioa_cfg
->sis64
) {
4903 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
4904 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
4906 regs
= &ioarcb
->u
.add_data
.u
.regs
;
4908 ioarcb
->res_handle
= res
->res_handle
;
4909 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
4910 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
4911 if (ipr_is_gata(res
)) {
4912 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
4913 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
4914 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
4917 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
4918 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
4919 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
4920 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
4921 if (ipr_cmd
->ioa_cfg
->sis64
)
4922 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
4923 sizeof(struct ipr_ioasa_gata
));
4925 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
4926 sizeof(struct ipr_ioasa_gata
));
4930 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
4934 * ipr_sata_reset - Reset the SATA port
4935 * @link: SATA link to reset
4936 * @classes: class of the attached device
4938 * This function issues a SATA phy reset to the affected ATA link.
4941 * 0 on success / non-zero on failure
4943 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
4944 unsigned long deadline
)
4946 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
4947 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
4948 struct ipr_resource_entry
*res
;
4949 unsigned long lock_flags
= 0;
4953 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4954 while (ioa_cfg
->in_reset_reload
) {
4955 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4956 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4957 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4960 res
= sata_port
->res
;
4962 rc
= ipr_device_reset(ioa_cfg
, res
);
4963 *classes
= res
->ata_class
;
4966 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4972 * ipr_eh_dev_reset - Reset the device
4973 * @scsi_cmd: scsi command struct
4975 * This function issues a device reset to the affected device.
4976 * A LUN reset will be sent to the device first. If that does
4977 * not work, a target reset will be sent.
4982 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
4984 struct ipr_cmnd
*ipr_cmd
;
4985 struct ipr_ioa_cfg
*ioa_cfg
;
4986 struct ipr_resource_entry
*res
;
4987 struct ata_port
*ap
;
4989 struct ipr_hrr_queue
*hrrq
;
4992 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
4993 res
= scsi_cmd
->device
->hostdata
;
4999 * If we are currently going through reset/reload, return failed. This will force the
5000 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5003 if (ioa_cfg
->in_reset_reload
)
5005 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5008 for_each_hrrq(hrrq
, ioa_cfg
) {
5009 spin_lock(&hrrq
->_lock
);
5010 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5011 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5012 if (ipr_cmd
->scsi_cmd
)
5013 ipr_cmd
->done
= ipr_scsi_eh_done
;
5015 ipr_cmd
->done
= ipr_sata_eh_done
;
5017 !(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5018 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5019 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5023 spin_unlock(&hrrq
->_lock
);
5025 res
->resetting_device
= 1;
5026 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5028 if (ipr_is_gata(res
) && res
->sata_port
) {
5029 ap
= res
->sata_port
->ap
;
5030 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5031 ata_std_error_handler(ap
);
5032 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5034 for_each_hrrq(hrrq
, ioa_cfg
) {
5035 spin_lock(&hrrq
->_lock
);
5036 list_for_each_entry(ipr_cmd
,
5037 &hrrq
->hrrq_pending_q
, queue
) {
5038 if (ipr_cmd
->ioarcb
.res_handle
==
5044 spin_unlock(&hrrq
->_lock
);
5047 rc
= ipr_device_reset(ioa_cfg
, res
);
5048 res
->resetting_device
= 0;
5049 res
->reset_occurred
= 1;
5052 return rc
? FAILED
: SUCCESS
;
5055 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5059 spin_lock_irq(cmd
->device
->host
->host_lock
);
5060 rc
= __ipr_eh_dev_reset(cmd
);
5061 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5067 * ipr_bus_reset_done - Op done function for bus reset.
5068 * @ipr_cmd: ipr command struct
5070 * This function is the op done function for a bus reset
5075 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5077 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5078 struct ipr_resource_entry
*res
;
5081 if (!ioa_cfg
->sis64
)
5082 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5083 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5084 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5090 * If abort has not completed, indicate the reset has, else call the
5091 * abort's done function to wake the sleeping eh thread
5093 if (ipr_cmd
->sibling
->sibling
)
5094 ipr_cmd
->sibling
->sibling
= NULL
;
5096 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5098 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5103 * ipr_abort_timeout - An abort task has timed out
5104 * @ipr_cmd: ipr command struct
5106 * This function handles when an abort task times out. If this
5107 * happens we issue a bus reset since we have resources tied
5108 * up that must be freed before returning to the midlayer.
5113 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
5115 struct ipr_cmnd
*reset_cmd
;
5116 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5117 struct ipr_cmd_pkt
*cmd_pkt
;
5118 unsigned long lock_flags
= 0;
5121 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5122 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5123 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5127 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5128 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5129 ipr_cmd
->sibling
= reset_cmd
;
5130 reset_cmd
->sibling
= ipr_cmd
;
5131 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5132 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5133 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5134 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5135 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5137 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5138 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5143 * ipr_cancel_op - Cancel specified op
5144 * @scsi_cmd: scsi command struct
5146 * This function cancels specified op.
5151 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5153 struct ipr_cmnd
*ipr_cmd
;
5154 struct ipr_ioa_cfg
*ioa_cfg
;
5155 struct ipr_resource_entry
*res
;
5156 struct ipr_cmd_pkt
*cmd_pkt
;
5159 struct ipr_hrr_queue
*hrrq
;
5162 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5163 res
= scsi_cmd
->device
->hostdata
;
5165 /* If we are currently going through reset/reload, return failed.
5166 * This will force the mid-layer to call ipr_eh_host_reset,
5167 * which will then go to sleep and wait for the reset to complete
5169 if (ioa_cfg
->in_reset_reload
||
5170 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5176 * If we are aborting a timed out op, chances are that the timeout was caused
5177 * by a still not detected EEH error. In such cases, reading a register will
5178 * trigger the EEH recovery infrastructure.
5180 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5182 if (!ipr_is_gscsi(res
))
5185 for_each_hrrq(hrrq
, ioa_cfg
) {
5186 spin_lock(&hrrq
->_lock
);
5187 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5188 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
5189 ipr_cmd
->done
= ipr_scsi_eh_done
;
5194 spin_unlock(&hrrq
->_lock
);
5200 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5201 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5202 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5203 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5204 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5205 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5207 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5209 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5210 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5213 * If the abort task timed out and we sent a bus reset, we will get
5214 * one the following responses to the abort
5216 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5221 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5222 if (!ipr_is_naca_model(res
))
5223 res
->needs_sync_complete
= 1;
5226 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5230 * ipr_eh_abort - Abort a single op
5231 * @scsi_cmd: scsi command struct
5236 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5238 unsigned long flags
;
5243 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5244 rc
= ipr_cancel_op(scsi_cmd
);
5245 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5252 * ipr_handle_other_interrupt - Handle "other" interrupts
5253 * @ioa_cfg: ioa config struct
5254 * @int_reg: interrupt register
5257 * IRQ_NONE / IRQ_HANDLED
5259 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5262 irqreturn_t rc
= IRQ_HANDLED
;
5265 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5266 int_reg
&= ~int_mask_reg
;
5268 /* If an interrupt on the adapter did not occur, ignore it.
5269 * Or in the case of SIS 64, check for a stage change interrupt.
5271 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5272 if (ioa_cfg
->sis64
) {
5273 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5274 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5275 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5277 /* clear stage change */
5278 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5279 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5280 list_del(&ioa_cfg
->reset_cmd
->queue
);
5281 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5282 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5290 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5291 /* Mask the interrupt */
5292 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5294 /* Clear the interrupt */
5295 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.clr_interrupt_reg
);
5296 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5298 list_del(&ioa_cfg
->reset_cmd
->queue
);
5299 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5300 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5301 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5302 if (ioa_cfg
->clear_isr
) {
5303 if (ipr_debug
&& printk_ratelimit())
5304 dev_err(&ioa_cfg
->pdev
->dev
,
5305 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5306 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5307 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5311 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5312 ioa_cfg
->ioa_unit_checked
= 1;
5313 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5314 dev_err(&ioa_cfg
->pdev
->dev
,
5315 "No Host RRQ. 0x%08X\n", int_reg
);
5317 dev_err(&ioa_cfg
->pdev
->dev
,
5318 "Permanent IOA failure. 0x%08X\n", int_reg
);
5320 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5321 ioa_cfg
->sdt_state
= GET_DUMP
;
5323 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5324 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5331 * ipr_isr_eh - Interrupt service routine error handler
5332 * @ioa_cfg: ioa config struct
5333 * @msg: message to log
5338 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5340 ioa_cfg
->errors_logged
++;
5341 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5343 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5344 ioa_cfg
->sdt_state
= GET_DUMP
;
5346 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5349 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5350 struct list_head
*doneq
)
5354 struct ipr_cmnd
*ipr_cmd
;
5355 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5358 /* If interrupts are disabled, ignore the interrupt */
5359 if (!hrr_queue
->allow_interrupts
)
5362 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5363 hrr_queue
->toggle_bit
) {
5365 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5366 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5367 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5369 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5370 cmd_index
< hrr_queue
->min_cmd_id
)) {
5372 "Invalid response handle from IOA: ",
5377 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5378 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5380 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5382 list_move_tail(&ipr_cmd
->queue
, doneq
);
5384 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5385 hrr_queue
->hrrq_curr
++;
5387 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5388 hrr_queue
->toggle_bit
^= 1u;
5391 if (budget
> 0 && num_hrrq
>= budget
)
5398 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
)
5400 struct ipr_ioa_cfg
*ioa_cfg
;
5401 struct ipr_hrr_queue
*hrrq
;
5402 struct ipr_cmnd
*ipr_cmd
, *temp
;
5403 unsigned long hrrq_flags
;
5407 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5408 ioa_cfg
= hrrq
->ioa_cfg
;
5410 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5411 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5413 if (completed_ops
< budget
)
5414 blk_iopoll_complete(iop
);
5415 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5417 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5418 list_del(&ipr_cmd
->queue
);
5419 del_timer(&ipr_cmd
->timer
);
5420 ipr_cmd
->fast_done(ipr_cmd
);
5423 return completed_ops
;
5427 * ipr_isr - Interrupt service routine
5429 * @devp: pointer to ioa config struct
5432 * IRQ_NONE / IRQ_HANDLED
5434 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5436 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5437 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5438 unsigned long hrrq_flags
= 0;
5442 struct ipr_cmnd
*ipr_cmd
, *temp
;
5443 irqreturn_t rc
= IRQ_NONE
;
5446 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5447 /* If interrupts are disabled, ignore the interrupt */
5448 if (!hrrq
->allow_interrupts
) {
5449 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5454 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5457 if (!ioa_cfg
->clear_isr
)
5460 /* Clear the PCI interrupt */
5463 writel(IPR_PCII_HRRQ_UPDATED
,
5464 ioa_cfg
->regs
.clr_interrupt_reg32
);
5465 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5466 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5467 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5469 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5470 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5472 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5473 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5475 "Error clearing HRRQ: ", num_hrrq
);
5482 if (unlikely(rc
== IRQ_NONE
))
5483 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5485 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5486 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5487 list_del(&ipr_cmd
->queue
);
5488 del_timer(&ipr_cmd
->timer
);
5489 ipr_cmd
->fast_done(ipr_cmd
);
5495 * ipr_isr_mhrrq - Interrupt service routine
5497 * @devp: pointer to ioa config struct
5500 * IRQ_NONE / IRQ_HANDLED
5502 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5504 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5505 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5506 unsigned long hrrq_flags
= 0;
5507 struct ipr_cmnd
*ipr_cmd
, *temp
;
5508 irqreturn_t rc
= IRQ_NONE
;
5511 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5513 /* If interrupts are disabled, ignore the interrupt */
5514 if (!hrrq
->allow_interrupts
) {
5515 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5519 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5520 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5522 if (!blk_iopoll_sched_prep(&hrrq
->iopoll
))
5523 blk_iopoll_sched(&hrrq
->iopoll
);
5524 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5528 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5531 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5535 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5537 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5538 list_del(&ipr_cmd
->queue
);
5539 del_timer(&ipr_cmd
->timer
);
5540 ipr_cmd
->fast_done(ipr_cmd
);
5546 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5547 * @ioa_cfg: ioa config struct
5548 * @ipr_cmd: ipr command struct
5551 * 0 on success / -1 on failure
5553 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5554 struct ipr_cmnd
*ipr_cmd
)
5557 struct scatterlist
*sg
;
5559 u32 ioadl_flags
= 0;
5560 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5561 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5562 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5564 length
= scsi_bufflen(scsi_cmd
);
5568 nseg
= scsi_dma_map(scsi_cmd
);
5570 if (printk_ratelimit())
5571 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5575 ipr_cmd
->dma_use_sg
= nseg
;
5577 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5579 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5581 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5582 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5583 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5584 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5585 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5587 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5588 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5589 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5590 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5593 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5598 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5599 * @ioa_cfg: ioa config struct
5600 * @ipr_cmd: ipr command struct
5603 * 0 on success / -1 on failure
5605 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5606 struct ipr_cmnd
*ipr_cmd
)
5609 struct scatterlist
*sg
;
5611 u32 ioadl_flags
= 0;
5612 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5613 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5614 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5616 length
= scsi_bufflen(scsi_cmd
);
5620 nseg
= scsi_dma_map(scsi_cmd
);
5622 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5626 ipr_cmd
->dma_use_sg
= nseg
;
5628 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5629 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5630 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5631 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5633 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5634 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5635 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5636 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5637 ioarcb
->read_ioadl_len
=
5638 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5641 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5642 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5643 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5644 offsetof(struct ipr_ioarcb
, u
.add_data
));
5645 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5648 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5649 ioadl
[i
].flags_and_data_len
=
5650 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5651 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5654 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5659 * ipr_erp_done - Process completion of ERP for a device
5660 * @ipr_cmd: ipr command struct
5662 * This function copies the sense buffer into the scsi_cmd
5663 * struct and pushes the scsi_done function.
5668 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
5670 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5671 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5672 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5674 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5675 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5676 scmd_printk(KERN_ERR
, scsi_cmd
,
5677 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
5679 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
5680 SCSI_SENSE_BUFFERSIZE
);
5684 if (!ipr_is_naca_model(res
))
5685 res
->needs_sync_complete
= 1;
5688 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5689 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5690 scsi_cmd
->scsi_done(scsi_cmd
);
5694 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5695 * @ipr_cmd: ipr command struct
5700 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
5702 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5703 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5704 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
5706 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
5707 ioarcb
->data_transfer_length
= 0;
5708 ioarcb
->read_data_transfer_length
= 0;
5709 ioarcb
->ioadl_len
= 0;
5710 ioarcb
->read_ioadl_len
= 0;
5711 ioasa
->hdr
.ioasc
= 0;
5712 ioasa
->hdr
.residual_data_len
= 0;
5714 if (ipr_cmd
->ioa_cfg
->sis64
)
5715 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
5716 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
5718 ioarcb
->write_ioadl_addr
=
5719 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
5720 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5725 * ipr_erp_request_sense - Send request sense to a device
5726 * @ipr_cmd: ipr command struct
5728 * This function sends a request sense to a device as a result
5729 * of a check condition.
5734 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
5736 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5737 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5739 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5740 ipr_erp_done(ipr_cmd
);
5744 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5746 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
5747 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
5748 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
5749 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
5750 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
5751 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
5753 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
5754 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
5756 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
5757 IPR_REQUEST_SENSE_TIMEOUT
* 2);
5761 * ipr_erp_cancel_all - Send cancel all to a device
5762 * @ipr_cmd: ipr command struct
5764 * This function sends a cancel all to a device to clear the
5765 * queue. If we are running TCQ on the device, QERR is set to 1,
5766 * which means all outstanding ops have been dropped on the floor.
5767 * Cancel all will return them to us.
5772 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
5774 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5775 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5776 struct ipr_cmd_pkt
*cmd_pkt
;
5780 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5782 if (!scsi_get_tag_type(scsi_cmd
->device
)) {
5783 ipr_erp_request_sense(ipr_cmd
);
5787 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5788 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5789 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5791 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
5792 IPR_CANCEL_ALL_TIMEOUT
);
5796 * ipr_dump_ioasa - Dump contents of IOASA
5797 * @ioa_cfg: ioa config struct
5798 * @ipr_cmd: ipr command struct
5799 * @res: resource entry struct
5801 * This function is invoked by the interrupt handler when ops
5802 * fail. It will log the IOASA if appropriate. Only called
5808 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
5809 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
5813 u32 ioasc
, fd_ioasc
;
5814 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5815 __be32
*ioasa_data
= (__be32
*)ioasa
;
5818 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
5819 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
5824 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
5827 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
5828 error_index
= ipr_get_error(fd_ioasc
);
5830 error_index
= ipr_get_error(ioasc
);
5832 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
5833 /* Don't log an error if the IOA already logged one */
5834 if (ioasa
->hdr
.ilid
!= 0)
5837 if (!ipr_is_gscsi(res
))
5840 if (ipr_error_table
[error_index
].log_ioasa
== 0)
5844 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
5846 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
5847 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
5848 data_len
= sizeof(struct ipr_ioasa64
);
5849 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
5850 data_len
= sizeof(struct ipr_ioasa
);
5852 ipr_err("IOASA Dump:\n");
5854 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
5855 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
5856 be32_to_cpu(ioasa_data
[i
]),
5857 be32_to_cpu(ioasa_data
[i
+1]),
5858 be32_to_cpu(ioasa_data
[i
+2]),
5859 be32_to_cpu(ioasa_data
[i
+3]));
5864 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5866 * @sense_buf: sense data buffer
5871 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
5874 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
5875 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
5876 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5877 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
5879 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
5881 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
5884 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
5886 if (ipr_is_vset_device(res
) &&
5887 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
5888 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
5889 sense_buf
[0] = 0x72;
5890 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
5891 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
5892 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
5896 sense_buf
[9] = 0x0A;
5897 sense_buf
[10] = 0x80;
5899 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
5901 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
5902 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
5903 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
5904 sense_buf
[15] = failing_lba
& 0x000000ff;
5906 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
5908 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
5909 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
5910 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
5911 sense_buf
[19] = failing_lba
& 0x000000ff;
5913 sense_buf
[0] = 0x70;
5914 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
5915 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
5916 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
5918 /* Illegal request */
5919 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
5920 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
5921 sense_buf
[7] = 10; /* additional length */
5923 /* IOARCB was in error */
5924 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
5925 sense_buf
[15] = 0xC0;
5926 else /* Parameter data was invalid */
5927 sense_buf
[15] = 0x80;
5930 ((IPR_FIELD_POINTER_MASK
&
5931 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
5933 (IPR_FIELD_POINTER_MASK
&
5934 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
5936 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
5937 if (ipr_is_vset_device(res
))
5938 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
5940 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
5942 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
5943 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
5944 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
5945 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
5946 sense_buf
[6] = failing_lba
& 0x000000ff;
5949 sense_buf
[7] = 6; /* additional length */
5955 * ipr_get_autosense - Copy autosense data to sense buffer
5956 * @ipr_cmd: ipr command struct
5958 * This function copies the autosense buffer to the buffer
5959 * in the scsi_cmd, if there is autosense available.
5962 * 1 if autosense was available / 0 if not
5964 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
5966 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5967 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
5969 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
5972 if (ipr_cmd
->ioa_cfg
->sis64
)
5973 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
5974 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
5975 SCSI_SENSE_BUFFERSIZE
));
5977 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
5978 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
5979 SCSI_SENSE_BUFFERSIZE
));
5984 * ipr_erp_start - Process an error response for a SCSI op
5985 * @ioa_cfg: ioa config struct
5986 * @ipr_cmd: ipr command struct
5988 * This function determines whether or not to initiate ERP
5989 * on the affected device.
5994 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
5995 struct ipr_cmnd
*ipr_cmd
)
5997 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5998 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5999 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6000 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6003 ipr_scsi_eh_done(ipr_cmd
);
6007 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6008 ipr_gen_sense(ipr_cmd
);
6010 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6012 switch (masked_ioasc
) {
6013 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6014 if (ipr_is_naca_model(res
))
6015 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6017 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6019 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6020 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6021 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6023 case IPR_IOASC_HW_SEL_TIMEOUT
:
6024 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6025 if (!ipr_is_naca_model(res
))
6026 res
->needs_sync_complete
= 1;
6028 case IPR_IOASC_SYNC_REQUIRED
:
6030 res
->needs_sync_complete
= 1;
6031 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6033 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6034 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6035 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6037 case IPR_IOASC_BUS_WAS_RESET
:
6038 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6040 * Report the bus reset and ask for a retry. The device
6041 * will give CC/UA the next command.
6043 if (!res
->resetting_device
)
6044 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6045 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6046 if (!ipr_is_naca_model(res
))
6047 res
->needs_sync_complete
= 1;
6049 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6050 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6051 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6052 if (!ipr_get_autosense(ipr_cmd
)) {
6053 if (!ipr_is_naca_model(res
)) {
6054 ipr_erp_cancel_all(ipr_cmd
);
6059 if (!ipr_is_naca_model(res
))
6060 res
->needs_sync_complete
= 1;
6062 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6065 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6066 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6067 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6068 res
->needs_sync_complete
= 1;
6072 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6073 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6074 scsi_cmd
->scsi_done(scsi_cmd
);
6078 * ipr_scsi_done - mid-layer done function
6079 * @ipr_cmd: ipr command struct
6081 * This function is invoked by the interrupt handler for
6082 * ops generated by the SCSI mid-layer
6087 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6089 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6090 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6091 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6092 unsigned long hrrq_flags
;
6094 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6096 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6097 scsi_dma_unmap(scsi_cmd
);
6099 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
6100 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6101 scsi_cmd
->scsi_done(scsi_cmd
);
6102 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
6104 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
6105 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6106 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, hrrq_flags
);
6111 * ipr_queuecommand - Queue a mid-layer request
6112 * @shost: scsi host struct
6113 * @scsi_cmd: scsi command struct
6115 * This function queues a request generated by the mid-layer.
6119 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6120 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6122 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6123 struct scsi_cmnd
*scsi_cmd
)
6125 struct ipr_ioa_cfg
*ioa_cfg
;
6126 struct ipr_resource_entry
*res
;
6127 struct ipr_ioarcb
*ioarcb
;
6128 struct ipr_cmnd
*ipr_cmd
;
6129 unsigned long hrrq_flags
, lock_flags
;
6131 struct ipr_hrr_queue
*hrrq
;
6134 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6136 scsi_cmd
->result
= (DID_OK
<< 16);
6137 res
= scsi_cmd
->device
->hostdata
;
6139 if (ipr_is_gata(res
) && res
->sata_port
) {
6140 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6141 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6142 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6146 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6147 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6149 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6151 * We are currently blocking all devices due to a host reset
6152 * We have told the host to stop giving us new requests, but
6153 * ERP ops don't count. FIXME
6155 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6156 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6157 return SCSI_MLQUEUE_HOST_BUSY
;
6161 * FIXME - Create scsi_set_host_offline interface
6162 * and the ioa_is_dead check can be removed
6164 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6165 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6169 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6170 if (ipr_cmd
== NULL
) {
6171 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6172 return SCSI_MLQUEUE_HOST_BUSY
;
6174 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6176 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6177 ioarcb
= &ipr_cmd
->ioarcb
;
6179 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6180 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6181 ipr_cmd
->done
= ipr_scsi_eh_done
;
6183 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6184 if (scsi_cmd
->underflow
== 0)
6185 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6187 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6188 if (ipr_is_gscsi(res
) && res
->reset_occurred
) {
6189 res
->reset_occurred
= 0;
6190 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6192 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6193 if (scsi_cmd
->flags
& SCMD_TAGGED
)
6194 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_SIMPLE_TASK
;
6196 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_UNTAGGED_TASK
;
6199 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6200 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6201 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6205 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6207 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6209 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6210 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6211 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6212 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6214 scsi_dma_unmap(scsi_cmd
);
6215 return SCSI_MLQUEUE_HOST_BUSY
;
6218 if (unlikely(hrrq
->ioa_is_dead
)) {
6219 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6220 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6221 scsi_dma_unmap(scsi_cmd
);
6225 ioarcb
->res_handle
= res
->res_handle
;
6226 if (res
->needs_sync_complete
) {
6227 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6228 res
->needs_sync_complete
= 0;
6230 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6231 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6232 ipr_send_command(ipr_cmd
);
6233 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6237 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6238 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6239 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6240 scsi_cmd
->scsi_done(scsi_cmd
);
6241 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6246 * ipr_ioctl - IOCTL handler
6247 * @sdev: scsi device struct
6252 * 0 on success / other on failure
6254 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6256 struct ipr_resource_entry
*res
;
6258 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6259 if (res
&& ipr_is_gata(res
)) {
6260 if (cmd
== HDIO_GET_IDENTITY
)
6262 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6269 * ipr_info - Get information about the card/driver
6270 * @scsi_host: scsi host struct
6273 * pointer to buffer with description string
6275 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6277 static char buffer
[512];
6278 struct ipr_ioa_cfg
*ioa_cfg
;
6279 unsigned long lock_flags
= 0;
6281 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6283 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6284 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6285 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6290 static struct scsi_host_template driver_template
= {
6291 .module
= THIS_MODULE
,
6293 .info
= ipr_ioa_info
,
6295 .queuecommand
= ipr_queuecommand
,
6296 .eh_abort_handler
= ipr_eh_abort
,
6297 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6298 .eh_host_reset_handler
= ipr_eh_host_reset
,
6299 .slave_alloc
= ipr_slave_alloc
,
6300 .slave_configure
= ipr_slave_configure
,
6301 .slave_destroy
= ipr_slave_destroy
,
6302 .target_alloc
= ipr_target_alloc
,
6303 .target_destroy
= ipr_target_destroy
,
6304 .change_queue_depth
= ipr_change_queue_depth
,
6305 .change_queue_type
= ipr_change_queue_type
,
6306 .bios_param
= ipr_biosparam
,
6307 .can_queue
= IPR_MAX_COMMANDS
,
6309 .sg_tablesize
= IPR_MAX_SGLIST
,
6310 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6311 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6312 .use_clustering
= ENABLE_CLUSTERING
,
6313 .shost_attrs
= ipr_ioa_attrs
,
6314 .sdev_attrs
= ipr_dev_attrs
,
6315 .proc_name
= IPR_NAME
,
6321 * ipr_ata_phy_reset - libata phy_reset handler
6322 * @ap: ata port to reset
6325 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6327 unsigned long flags
;
6328 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6329 struct ipr_resource_entry
*res
= sata_port
->res
;
6330 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6334 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6335 while (ioa_cfg
->in_reset_reload
) {
6336 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6337 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6338 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6341 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6344 rc
= ipr_device_reset(ioa_cfg
, res
);
6347 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6351 ap
->link
.device
[0].class = res
->ata_class
;
6352 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6353 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6356 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6361 * ipr_ata_post_internal - Cleanup after an internal command
6362 * @qc: ATA queued command
6367 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6369 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6370 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6371 struct ipr_cmnd
*ipr_cmd
;
6372 struct ipr_hrr_queue
*hrrq
;
6373 unsigned long flags
;
6375 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6376 while (ioa_cfg
->in_reset_reload
) {
6377 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6378 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6379 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6382 for_each_hrrq(hrrq
, ioa_cfg
) {
6383 spin_lock(&hrrq
->_lock
);
6384 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6385 if (ipr_cmd
->qc
== qc
) {
6386 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6390 spin_unlock(&hrrq
->_lock
);
6392 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6396 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6397 * @regs: destination
6398 * @tf: source ATA taskfile
6403 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6404 struct ata_taskfile
*tf
)
6406 regs
->feature
= tf
->feature
;
6407 regs
->nsect
= tf
->nsect
;
6408 regs
->lbal
= tf
->lbal
;
6409 regs
->lbam
= tf
->lbam
;
6410 regs
->lbah
= tf
->lbah
;
6411 regs
->device
= tf
->device
;
6412 regs
->command
= tf
->command
;
6413 regs
->hob_feature
= tf
->hob_feature
;
6414 regs
->hob_nsect
= tf
->hob_nsect
;
6415 regs
->hob_lbal
= tf
->hob_lbal
;
6416 regs
->hob_lbam
= tf
->hob_lbam
;
6417 regs
->hob_lbah
= tf
->hob_lbah
;
6418 regs
->ctl
= tf
->ctl
;
6422 * ipr_sata_done - done function for SATA commands
6423 * @ipr_cmd: ipr command struct
6425 * This function is invoked by the interrupt handler for
6426 * ops generated by the SCSI mid-layer to SATA devices
6431 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6433 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6434 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6435 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6436 struct ipr_resource_entry
*res
= sata_port
->res
;
6437 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6439 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6440 if (ipr_cmd
->ioa_cfg
->sis64
)
6441 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6442 sizeof(struct ipr_ioasa_gata
));
6444 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6445 sizeof(struct ipr_ioasa_gata
));
6446 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6448 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6449 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6451 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6452 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6454 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6455 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6456 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6457 ata_qc_complete(qc
);
6461 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6462 * @ipr_cmd: ipr command struct
6463 * @qc: ATA queued command
6466 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6467 struct ata_queued_cmd
*qc
)
6469 u32 ioadl_flags
= 0;
6470 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6471 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6472 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6473 int len
= qc
->nbytes
;
6474 struct scatterlist
*sg
;
6476 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6481 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6482 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6483 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6484 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6485 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6487 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6489 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6490 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6491 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6493 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6494 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6495 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6496 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6498 last_ioadl64
= ioadl64
;
6502 if (likely(last_ioadl64
))
6503 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6507 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6508 * @ipr_cmd: ipr command struct
6509 * @qc: ATA queued command
6512 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6513 struct ata_queued_cmd
*qc
)
6515 u32 ioadl_flags
= 0;
6516 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6517 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6518 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6519 int len
= qc
->nbytes
;
6520 struct scatterlist
*sg
;
6526 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6527 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6528 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6529 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6531 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6532 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6533 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6534 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6535 ioarcb
->read_ioadl_len
=
6536 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6539 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6540 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6541 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6547 if (likely(last_ioadl
))
6548 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6552 * ipr_qc_defer - Get a free ipr_cmd
6553 * @qc: queued command
6558 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6560 struct ata_port
*ap
= qc
->ap
;
6561 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6562 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6563 struct ipr_cmnd
*ipr_cmd
;
6564 struct ipr_hrr_queue
*hrrq
;
6567 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6568 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6570 qc
->lldd_task
= NULL
;
6571 spin_lock(&hrrq
->_lock
);
6572 if (unlikely(hrrq
->ioa_is_dead
)) {
6573 spin_unlock(&hrrq
->_lock
);
6577 if (unlikely(!hrrq
->allow_cmds
)) {
6578 spin_unlock(&hrrq
->_lock
);
6579 return ATA_DEFER_LINK
;
6582 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6583 if (ipr_cmd
== NULL
) {
6584 spin_unlock(&hrrq
->_lock
);
6585 return ATA_DEFER_LINK
;
6588 qc
->lldd_task
= ipr_cmd
;
6589 spin_unlock(&hrrq
->_lock
);
6594 * ipr_qc_issue - Issue a SATA qc to a device
6595 * @qc: queued command
6600 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
6602 struct ata_port
*ap
= qc
->ap
;
6603 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6604 struct ipr_resource_entry
*res
= sata_port
->res
;
6605 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6606 struct ipr_cmnd
*ipr_cmd
;
6607 struct ipr_ioarcb
*ioarcb
;
6608 struct ipr_ioarcb_ata_regs
*regs
;
6610 if (qc
->lldd_task
== NULL
)
6613 ipr_cmd
= qc
->lldd_task
;
6614 if (ipr_cmd
== NULL
)
6615 return AC_ERR_SYSTEM
;
6617 qc
->lldd_task
= NULL
;
6618 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6619 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
6620 ipr_cmd
->hrrq
->ioa_is_dead
)) {
6621 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6622 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6623 return AC_ERR_SYSTEM
;
6626 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
6627 ioarcb
= &ipr_cmd
->ioarcb
;
6629 if (ioa_cfg
->sis64
) {
6630 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
6631 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
6633 regs
= &ioarcb
->u
.add_data
.u
.regs
;
6635 memset(regs
, 0, sizeof(*regs
));
6636 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
6638 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
6640 ipr_cmd
->done
= ipr_sata_done
;
6641 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
6642 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
6643 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6644 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6645 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
6648 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
6650 ipr_build_ata_ioadl(ipr_cmd
, qc
);
6652 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
6653 ipr_copy_sata_tf(regs
, &qc
->tf
);
6654 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
6655 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6657 switch (qc
->tf
.protocol
) {
6658 case ATA_PROT_NODATA
:
6663 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6666 case ATAPI_PROT_PIO
:
6667 case ATAPI_PROT_NODATA
:
6668 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6671 case ATAPI_PROT_DMA
:
6672 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6673 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6678 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6679 return AC_ERR_INVALID
;
6682 ipr_send_command(ipr_cmd
);
6683 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6689 * ipr_qc_fill_rtf - Read result TF
6690 * @qc: ATA queued command
6695 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
6697 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6698 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
6699 struct ata_taskfile
*tf
= &qc
->result_tf
;
6701 tf
->feature
= g
->error
;
6702 tf
->nsect
= g
->nsect
;
6706 tf
->device
= g
->device
;
6707 tf
->command
= g
->status
;
6708 tf
->hob_nsect
= g
->hob_nsect
;
6709 tf
->hob_lbal
= g
->hob_lbal
;
6710 tf
->hob_lbam
= g
->hob_lbam
;
6711 tf
->hob_lbah
= g
->hob_lbah
;
6716 static struct ata_port_operations ipr_sata_ops
= {
6717 .phy_reset
= ipr_ata_phy_reset
,
6718 .hardreset
= ipr_sata_reset
,
6719 .post_internal_cmd
= ipr_ata_post_internal
,
6720 .qc_prep
= ata_noop_qc_prep
,
6721 .qc_defer
= ipr_qc_defer
,
6722 .qc_issue
= ipr_qc_issue
,
6723 .qc_fill_rtf
= ipr_qc_fill_rtf
,
6724 .port_start
= ata_sas_port_start
,
6725 .port_stop
= ata_sas_port_stop
6728 static struct ata_port_info sata_port_info
= {
6729 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
,
6730 .pio_mask
= ATA_PIO4_ONLY
,
6731 .mwdma_mask
= ATA_MWDMA2
,
6732 .udma_mask
= ATA_UDMA6
,
6733 .port_ops
= &ipr_sata_ops
6736 #ifdef CONFIG_PPC_PSERIES
6737 static const u16 ipr_blocked_processors
[] = {
6749 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6750 * @ioa_cfg: ioa cfg struct
6752 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6753 * certain pSeries hardware. This function determines if the given
6754 * adapter is in one of these confgurations or not.
6757 * 1 if adapter is not supported / 0 if adapter is supported
6759 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
6763 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
6764 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
6765 if (pvr_version_is(ipr_blocked_processors
[i
]))
6772 #define ipr_invalid_adapter(ioa_cfg) 0
6776 * ipr_ioa_bringdown_done - IOA bring down completion.
6777 * @ipr_cmd: ipr command struct
6779 * This function processes the completion of an adapter bring down.
6780 * It wakes any reset sleepers.
6785 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
6787 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6791 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
6793 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
6794 scsi_unblock_requests(ioa_cfg
->host
);
6795 spin_lock_irq(ioa_cfg
->host
->host_lock
);
6798 ioa_cfg
->in_reset_reload
= 0;
6799 ioa_cfg
->reset_retries
= 0;
6800 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
6801 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
6802 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
6803 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
6807 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6808 wake_up_all(&ioa_cfg
->reset_wait_q
);
6811 return IPR_RC_JOB_RETURN
;
6815 * ipr_ioa_reset_done - IOA reset completion.
6816 * @ipr_cmd: ipr command struct
6818 * This function processes the completion of an adapter reset.
6819 * It schedules any necessary mid-layer add/removes and
6820 * wakes any reset sleepers.
6825 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
6827 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6828 struct ipr_resource_entry
*res
;
6829 struct ipr_hostrcb
*hostrcb
, *temp
;
6833 ioa_cfg
->in_reset_reload
= 0;
6834 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
6835 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
6836 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
6837 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
6840 ioa_cfg
->reset_cmd
= NULL
;
6841 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
6843 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
6844 if (ioa_cfg
->allow_ml_add_del
&& (res
->add_to_ml
|| res
->del_from_ml
)) {
6849 schedule_work(&ioa_cfg
->work_q
);
6851 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
6852 list_del(&hostrcb
->queue
);
6853 if (i
++ < IPR_NUM_LOG_HCAMS
)
6854 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
6856 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
6859 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
6860 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
6862 ioa_cfg
->reset_retries
= 0;
6863 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6864 wake_up_all(&ioa_cfg
->reset_wait_q
);
6866 spin_unlock(ioa_cfg
->host
->host_lock
);
6867 scsi_unblock_requests(ioa_cfg
->host
);
6868 spin_lock(ioa_cfg
->host
->host_lock
);
6870 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6871 scsi_block_requests(ioa_cfg
->host
);
6874 return IPR_RC_JOB_RETURN
;
6878 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6879 * @supported_dev: supported device struct
6880 * @vpids: vendor product id struct
6885 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
6886 struct ipr_std_inq_vpids
*vpids
)
6888 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
6889 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
6890 supported_dev
->num_records
= 1;
6891 supported_dev
->data_length
=
6892 cpu_to_be16(sizeof(struct ipr_supported_device
));
6893 supported_dev
->reserved
= 0;
6897 * ipr_set_supported_devs - Send Set Supported Devices for a device
6898 * @ipr_cmd: ipr command struct
6900 * This function sends a Set Supported Devices to the adapter
6903 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6905 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
6907 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6908 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
6909 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6910 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
6912 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
6914 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
6915 if (!ipr_is_scsi_disk(res
))
6918 ipr_cmd
->u
.res
= res
;
6919 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
6921 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
6922 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6923 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6925 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
6926 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
6927 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
6928 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
6930 ipr_init_ioadl(ipr_cmd
,
6931 ioa_cfg
->vpd_cbs_dma
+
6932 offsetof(struct ipr_misc_cbs
, supp_dev
),
6933 sizeof(struct ipr_supported_device
),
6934 IPR_IOADL_FLAGS_WRITE_LAST
);
6936 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
6937 IPR_SET_SUP_DEVICE_TIMEOUT
);
6939 if (!ioa_cfg
->sis64
)
6940 ipr_cmd
->job_step
= ipr_set_supported_devs
;
6942 return IPR_RC_JOB_RETURN
;
6946 return IPR_RC_JOB_CONTINUE
;
6950 * ipr_get_mode_page - Locate specified mode page
6951 * @mode_pages: mode page buffer
6952 * @page_code: page code to find
6953 * @len: minimum required length for mode page
6956 * pointer to mode page / NULL on failure
6958 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
6959 u32 page_code
, u32 len
)
6961 struct ipr_mode_page_hdr
*mode_hdr
;
6965 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
6968 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
6969 mode_hdr
= (struct ipr_mode_page_hdr
*)
6970 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
6973 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
6974 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
6978 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
6979 mode_hdr
->page_length
);
6980 length
-= page_length
;
6981 mode_hdr
= (struct ipr_mode_page_hdr
*)
6982 ((unsigned long)mode_hdr
+ page_length
);
6989 * ipr_check_term_power - Check for term power errors
6990 * @ioa_cfg: ioa config struct
6991 * @mode_pages: IOAFP mode pages buffer
6993 * Check the IOAFP's mode page 28 for term power errors
6998 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
6999 struct ipr_mode_pages
*mode_pages
)
7003 struct ipr_dev_bus_entry
*bus
;
7004 struct ipr_mode_page28
*mode_page
;
7006 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7007 sizeof(struct ipr_mode_page28
));
7009 entry_length
= mode_page
->entry_length
;
7011 bus
= mode_page
->bus
;
7013 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7014 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7015 dev_err(&ioa_cfg
->pdev
->dev
,
7016 "Term power is absent on scsi bus %d\n",
7020 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7025 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7026 * @ioa_cfg: ioa config struct
7028 * Looks through the config table checking for SES devices. If
7029 * the SES device is in the SES table indicating a maximum SCSI
7030 * bus speed, the speed is limited for the bus.
7035 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7040 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7041 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7042 ioa_cfg
->bus_attr
[i
].bus_width
);
7044 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7045 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7050 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7051 * @ioa_cfg: ioa config struct
7052 * @mode_pages: mode page 28 buffer
7054 * Updates mode page 28 based on driver configuration
7059 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7060 struct ipr_mode_pages
*mode_pages
)
7062 int i
, entry_length
;
7063 struct ipr_dev_bus_entry
*bus
;
7064 struct ipr_bus_attributes
*bus_attr
;
7065 struct ipr_mode_page28
*mode_page
;
7067 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7068 sizeof(struct ipr_mode_page28
));
7070 entry_length
= mode_page
->entry_length
;
7072 /* Loop for each device bus entry */
7073 for (i
= 0, bus
= mode_page
->bus
;
7074 i
< mode_page
->num_entries
;
7075 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7076 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7077 dev_err(&ioa_cfg
->pdev
->dev
,
7078 "Invalid resource address reported: 0x%08X\n",
7079 IPR_GET_PHYS_LOC(bus
->res_addr
));
7083 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7084 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7085 bus
->bus_width
= bus_attr
->bus_width
;
7086 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7087 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7088 if (bus_attr
->qas_enabled
)
7089 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7091 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7096 * ipr_build_mode_select - Build a mode select command
7097 * @ipr_cmd: ipr command struct
7098 * @res_handle: resource handle to send command to
7099 * @parm: Byte 2 of Mode Sense command
7100 * @dma_addr: DMA buffer address
7101 * @xfer_len: data transfer length
7106 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7107 __be32 res_handle
, u8 parm
,
7108 dma_addr_t dma_addr
, u8 xfer_len
)
7110 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7112 ioarcb
->res_handle
= res_handle
;
7113 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7114 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7115 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7116 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7117 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7119 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7123 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7124 * @ipr_cmd: ipr command struct
7126 * This function sets up the SCSI bus attributes and sends
7127 * a Mode Select for Page 28 to activate them.
7132 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7134 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7135 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7139 ipr_scsi_bus_speed_limit(ioa_cfg
);
7140 ipr_check_term_power(ioa_cfg
, mode_pages
);
7141 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7142 length
= mode_pages
->hdr
.length
+ 1;
7143 mode_pages
->hdr
.length
= 0;
7145 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7146 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7149 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7150 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7151 struct ipr_resource_entry
, queue
);
7152 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7155 return IPR_RC_JOB_RETURN
;
7159 * ipr_build_mode_sense - Builds a mode sense command
7160 * @ipr_cmd: ipr command struct
7161 * @res: resource entry struct
7162 * @parm: Byte 2 of mode sense command
7163 * @dma_addr: DMA address of mode sense buffer
7164 * @xfer_len: Size of DMA buffer
7169 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7171 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7173 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7175 ioarcb
->res_handle
= res_handle
;
7176 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7177 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7178 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7179 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7181 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7185 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7186 * @ipr_cmd: ipr command struct
7188 * This function handles the failure of an IOA bringup command.
7193 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7195 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7196 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7198 dev_err(&ioa_cfg
->pdev
->dev
,
7199 "0x%02X failed with IOASC: 0x%08X\n",
7200 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7202 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7203 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7204 return IPR_RC_JOB_RETURN
;
7208 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7209 * @ipr_cmd: ipr command struct
7211 * This function handles the failure of a Mode Sense to the IOAFP.
7212 * Some adapters do not handle all mode pages.
7215 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7217 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7219 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7220 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7222 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7223 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7224 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7225 struct ipr_resource_entry
, queue
);
7226 return IPR_RC_JOB_CONTINUE
;
7229 return ipr_reset_cmd_failed(ipr_cmd
);
7233 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7234 * @ipr_cmd: ipr command struct
7236 * This function send a Page 28 mode sense to the IOA to
7237 * retrieve SCSI bus attributes.
7242 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7244 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7247 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7248 0x28, ioa_cfg
->vpd_cbs_dma
+
7249 offsetof(struct ipr_misc_cbs
, mode_pages
),
7250 sizeof(struct ipr_mode_pages
));
7252 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7253 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7255 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7258 return IPR_RC_JOB_RETURN
;
7262 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7263 * @ipr_cmd: ipr command struct
7265 * This function enables dual IOA RAID support if possible.
7270 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7272 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7273 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7274 struct ipr_mode_page24
*mode_page
;
7278 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7279 sizeof(struct ipr_mode_page24
));
7282 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7284 length
= mode_pages
->hdr
.length
+ 1;
7285 mode_pages
->hdr
.length
= 0;
7287 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7288 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7291 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7292 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7295 return IPR_RC_JOB_RETURN
;
7299 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7300 * @ipr_cmd: ipr command struct
7302 * This function handles the failure of a Mode Sense to the IOAFP.
7303 * Some adapters do not handle all mode pages.
7306 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7308 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7310 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7312 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7313 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7314 return IPR_RC_JOB_CONTINUE
;
7317 return ipr_reset_cmd_failed(ipr_cmd
);
7321 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7322 * @ipr_cmd: ipr command struct
7324 * This function send a mode sense to the IOA to retrieve
7325 * the IOA Advanced Function Control mode page.
7330 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7332 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7335 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7336 0x24, ioa_cfg
->vpd_cbs_dma
+
7337 offsetof(struct ipr_misc_cbs
, mode_pages
),
7338 sizeof(struct ipr_mode_pages
));
7340 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7341 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7343 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7346 return IPR_RC_JOB_RETURN
;
7350 * ipr_init_res_table - Initialize the resource table
7351 * @ipr_cmd: ipr command struct
7353 * This function looks through the existing resource table, comparing
7354 * it with the config table. This function will take care of old/new
7355 * devices and schedule adding/removing them from the mid-layer
7359 * IPR_RC_JOB_CONTINUE
7361 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7363 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7364 struct ipr_resource_entry
*res
, *temp
;
7365 struct ipr_config_table_entry_wrapper cfgtew
;
7366 int entries
, found
, flag
, i
;
7371 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7373 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7375 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7376 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7378 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7379 list_move_tail(&res
->queue
, &old_res
);
7382 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7384 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7386 for (i
= 0; i
< entries
; i
++) {
7388 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7390 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7393 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7394 if (ipr_is_same_device(res
, &cfgtew
)) {
7395 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7402 if (list_empty(&ioa_cfg
->free_res_q
)) {
7403 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7408 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7409 struct ipr_resource_entry
, queue
);
7410 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7411 ipr_init_res_entry(res
, &cfgtew
);
7413 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7414 res
->sdev
->allow_restart
= 1;
7417 ipr_update_res_entry(res
, &cfgtew
);
7420 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7422 res
->del_from_ml
= 1;
7423 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7424 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7428 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7429 ipr_clear_res_target(res
);
7430 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7433 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7434 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7436 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7439 return IPR_RC_JOB_CONTINUE
;
7443 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7444 * @ipr_cmd: ipr command struct
7446 * This function sends a Query IOA Configuration command
7447 * to the adapter to retrieve the IOA configuration table.
7452 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7454 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7455 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7456 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7457 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7460 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7461 ioa_cfg
->dual_raid
= 1;
7462 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7463 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7464 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7465 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7466 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7468 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7469 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7470 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7471 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7473 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7474 IPR_IOADL_FLAGS_READ_LAST
);
7476 ipr_cmd
->job_step
= ipr_init_res_table
;
7478 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7481 return IPR_RC_JOB_RETURN
;
7485 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7486 * @ipr_cmd: ipr command struct
7488 * This utility function sends an inquiry to the adapter.
7493 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7494 dma_addr_t dma_addr
, u8 xfer_len
)
7496 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7499 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7500 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7502 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7503 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7504 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7505 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7507 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7509 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7514 * ipr_inquiry_page_supported - Is the given inquiry page supported
7515 * @page0: inquiry page 0 buffer
7518 * This function determines if the specified inquiry page is supported.
7521 * 1 if page is supported / 0 if not
7523 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7527 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7528 if (page0
->page
[i
] == page
)
7535 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7536 * @ipr_cmd: ipr command struct
7538 * This function sends a Page 0xD0 inquiry to the adapter
7539 * to retrieve adapter capabilities.
7542 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7544 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
7546 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7547 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
7548 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7551 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7552 memset(cap
, 0, sizeof(*cap
));
7554 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
7555 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
7556 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
7557 sizeof(struct ipr_inquiry_cap
));
7558 return IPR_RC_JOB_RETURN
;
7562 return IPR_RC_JOB_CONTINUE
;
7566 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7567 * @ipr_cmd: ipr command struct
7569 * This function sends a Page 3 inquiry to the adapter
7570 * to retrieve software VPD information.
7573 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7575 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
7577 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7581 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
7583 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
7584 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
7585 sizeof(struct ipr_inquiry_page3
));
7588 return IPR_RC_JOB_RETURN
;
7592 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7593 * @ipr_cmd: ipr command struct
7595 * This function sends a Page 0 inquiry to the adapter
7596 * to retrieve supported inquiry pages.
7599 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7601 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
7603 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7608 /* Grab the type out of the VPD and store it away */
7609 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
7611 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
7613 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
7615 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
7616 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
7617 sizeof(struct ipr_inquiry_page0
));
7620 return IPR_RC_JOB_RETURN
;
7624 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7625 * @ipr_cmd: ipr command struct
7627 * This function sends a standard inquiry to the adapter.
7632 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
7634 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7637 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
7639 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
7640 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
7641 sizeof(struct ipr_ioa_vpd
));
7644 return IPR_RC_JOB_RETURN
;
7648 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7649 * @ipr_cmd: ipr command struct
7651 * This function send an Identify Host Request Response Queue
7652 * command to establish the HRRQ with the adapter.
7657 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
7659 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7660 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7661 struct ipr_hrr_queue
*hrrq
;
7664 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
7665 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
7667 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
7668 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
7670 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
7671 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7673 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7675 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
7677 if (ioa_cfg
->nvectors
== 1)
7678 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
7680 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
7682 ioarcb
->cmd_pkt
.cdb
[2] =
7683 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
7684 ioarcb
->cmd_pkt
.cdb
[3] =
7685 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
7686 ioarcb
->cmd_pkt
.cdb
[4] =
7687 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
7688 ioarcb
->cmd_pkt
.cdb
[5] =
7689 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
7690 ioarcb
->cmd_pkt
.cdb
[7] =
7691 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
7692 ioarcb
->cmd_pkt
.cdb
[8] =
7693 (sizeof(u32
) * hrrq
->size
) & 0xff;
7695 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7696 ioarcb
->cmd_pkt
.cdb
[9] =
7697 ioa_cfg
->identify_hrrq_index
;
7699 if (ioa_cfg
->sis64
) {
7700 ioarcb
->cmd_pkt
.cdb
[10] =
7701 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
7702 ioarcb
->cmd_pkt
.cdb
[11] =
7703 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
7704 ioarcb
->cmd_pkt
.cdb
[12] =
7705 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
7706 ioarcb
->cmd_pkt
.cdb
[13] =
7707 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
7710 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7711 ioarcb
->cmd_pkt
.cdb
[14] =
7712 ioa_cfg
->identify_hrrq_index
;
7714 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7715 IPR_INTERNAL_TIMEOUT
);
7717 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
7718 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7721 return IPR_RC_JOB_RETURN
;
7725 return IPR_RC_JOB_CONTINUE
;
7729 * ipr_reset_timer_done - Adapter reset timer function
7730 * @ipr_cmd: ipr command struct
7732 * Description: This function is used in adapter reset processing
7733 * for timing events. If the reset_cmd pointer in the IOA
7734 * config struct is not this adapter's we are doing nested
7735 * resets and fail_all_ops will take care of freeing the
7741 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
7743 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7744 unsigned long lock_flags
= 0;
7746 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
7748 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
7749 list_del(&ipr_cmd
->queue
);
7750 ipr_cmd
->done(ipr_cmd
);
7753 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
7757 * ipr_reset_start_timer - Start a timer for adapter reset job
7758 * @ipr_cmd: ipr command struct
7759 * @timeout: timeout value
7761 * Description: This function is used in adapter reset processing
7762 * for timing events. If the reset_cmd pointer in the IOA
7763 * config struct is not this adapter's we are doing nested
7764 * resets and fail_all_ops will take care of freeing the
7770 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
7771 unsigned long timeout
)
7775 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7776 ipr_cmd
->done
= ipr_reset_ioa_job
;
7778 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7779 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
7780 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
7781 add_timer(&ipr_cmd
->timer
);
7785 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7786 * @ioa_cfg: ioa cfg struct
7791 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
7793 struct ipr_hrr_queue
*hrrq
;
7795 for_each_hrrq(hrrq
, ioa_cfg
) {
7796 spin_lock(&hrrq
->_lock
);
7797 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
7799 /* Initialize Host RRQ pointers */
7800 hrrq
->hrrq_start
= hrrq
->host_rrq
;
7801 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
7802 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
7803 hrrq
->toggle_bit
= 1;
7804 spin_unlock(&hrrq
->_lock
);
7808 ioa_cfg
->identify_hrrq_index
= 0;
7809 if (ioa_cfg
->hrrq_num
== 1)
7810 atomic_set(&ioa_cfg
->hrrq_index
, 0);
7812 atomic_set(&ioa_cfg
->hrrq_index
, 1);
7814 /* Zero out config table */
7815 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
7819 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7820 * @ipr_cmd: ipr command struct
7823 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7825 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
7827 unsigned long stage
, stage_time
;
7829 volatile u32 int_reg
;
7830 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7833 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
7834 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
7835 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
7837 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
7839 /* sanity check the stage_time value */
7840 if (stage_time
== 0)
7841 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
7842 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
7843 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
7844 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
7845 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
7847 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
7848 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7849 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7850 stage_time
= ioa_cfg
->transop_timeout
;
7851 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7852 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
7853 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7854 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7855 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7856 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7857 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
7858 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7859 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7860 return IPR_RC_JOB_CONTINUE
;
7864 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7865 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
7866 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
7867 ipr_cmd
->done
= ipr_reset_ioa_job
;
7868 add_timer(&ipr_cmd
->timer
);
7870 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7872 return IPR_RC_JOB_RETURN
;
7876 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7877 * @ipr_cmd: ipr command struct
7879 * This function reinitializes some control blocks and
7880 * enables destructive diagnostics on the adapter.
7885 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
7887 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7888 volatile u32 int_reg
;
7889 volatile u64 maskval
;
7893 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7894 ipr_init_ioa_mem(ioa_cfg
);
7896 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7897 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7898 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
7899 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7902 if (ioa_cfg
->sis64
) {
7903 /* Set the adapter to the correct endian mode. */
7904 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
7905 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
7908 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7910 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7911 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
7912 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
7913 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7914 return IPR_RC_JOB_CONTINUE
;
7917 /* Enable destructive diagnostics on IOA */
7918 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
7920 if (ioa_cfg
->sis64
) {
7921 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7922 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
7923 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
7925 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
7927 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7929 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
7931 if (ioa_cfg
->sis64
) {
7932 ipr_cmd
->job_step
= ipr_reset_next_stage
;
7933 return IPR_RC_JOB_CONTINUE
;
7936 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7937 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
7938 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
7939 ipr_cmd
->done
= ipr_reset_ioa_job
;
7940 add_timer(&ipr_cmd
->timer
);
7941 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7944 return IPR_RC_JOB_RETURN
;
7948 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7949 * @ipr_cmd: ipr command struct
7951 * This function is invoked when an adapter dump has run out
7952 * of processing time.
7955 * IPR_RC_JOB_CONTINUE
7957 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
7959 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7961 if (ioa_cfg
->sdt_state
== GET_DUMP
)
7962 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
7963 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
7964 ioa_cfg
->sdt_state
= ABORT_DUMP
;
7966 ioa_cfg
->dump_timeout
= 1;
7967 ipr_cmd
->job_step
= ipr_reset_alert
;
7969 return IPR_RC_JOB_CONTINUE
;
7973 * ipr_unit_check_no_data - Log a unit check/no data error log
7974 * @ioa_cfg: ioa config struct
7976 * Logs an error indicating the adapter unit checked, but for some
7977 * reason, we were unable to fetch the unit check buffer.
7982 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
7984 ioa_cfg
->errors_logged
++;
7985 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
7989 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7990 * @ioa_cfg: ioa config struct
7992 * Fetches the unit check buffer from the adapter by clocking the data
7993 * through the mailbox register.
7998 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8000 unsigned long mailbox
;
8001 struct ipr_hostrcb
*hostrcb
;
8002 struct ipr_uc_sdt sdt
;
8006 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8008 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8009 ipr_unit_check_no_data(ioa_cfg
);
8013 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8014 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8015 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8017 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8018 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8019 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8020 ipr_unit_check_no_data(ioa_cfg
);
8024 /* Find length of the first sdt entry (UC buffer) */
8025 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8026 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8028 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8029 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8030 IPR_FMT2_MBX_ADDR_MASK
;
8032 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8033 struct ipr_hostrcb
, queue
);
8034 list_del(&hostrcb
->queue
);
8035 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8037 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8038 be32_to_cpu(sdt
.entry
[0].start_token
),
8039 (__be32
*)&hostrcb
->hcam
,
8040 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8043 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8044 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8045 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8046 ioa_cfg
->sdt_state
== GET_DUMP
)
8047 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8049 ipr_unit_check_no_data(ioa_cfg
);
8051 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8055 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8056 * @ipr_cmd: ipr command struct
8058 * Description: This function will call to get the unit check buffer.
8063 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8065 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8068 ioa_cfg
->ioa_unit_checked
= 0;
8069 ipr_get_unit_check_buffer(ioa_cfg
);
8070 ipr_cmd
->job_step
= ipr_reset_alert
;
8071 ipr_reset_start_timer(ipr_cmd
, 0);
8074 return IPR_RC_JOB_RETURN
;
8078 * ipr_reset_restore_cfg_space - Restore PCI config space.
8079 * @ipr_cmd: ipr command struct
8081 * Description: This function restores the saved PCI config space of
8082 * the adapter, fails all outstanding ops back to the callers, and
8083 * fetches the dump/unit check if applicable to this reset.
8086 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8088 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8090 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8094 ioa_cfg
->pdev
->state_saved
= true;
8095 pci_restore_state(ioa_cfg
->pdev
);
8097 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8098 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8099 return IPR_RC_JOB_CONTINUE
;
8102 ipr_fail_all_ops(ioa_cfg
);
8104 if (ioa_cfg
->sis64
) {
8105 /* Set the adapter to the correct endian mode. */
8106 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8107 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8110 if (ioa_cfg
->ioa_unit_checked
) {
8111 if (ioa_cfg
->sis64
) {
8112 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8113 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8114 return IPR_RC_JOB_RETURN
;
8116 ioa_cfg
->ioa_unit_checked
= 0;
8117 ipr_get_unit_check_buffer(ioa_cfg
);
8118 ipr_cmd
->job_step
= ipr_reset_alert
;
8119 ipr_reset_start_timer(ipr_cmd
, 0);
8120 return IPR_RC_JOB_RETURN
;
8124 if (ioa_cfg
->in_ioa_bringdown
) {
8125 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8127 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8129 if (GET_DUMP
== ioa_cfg
->sdt_state
) {
8130 ioa_cfg
->sdt_state
= READ_DUMP
;
8131 ioa_cfg
->dump_timeout
= 0;
8133 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8135 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8136 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8137 schedule_work(&ioa_cfg
->work_q
);
8138 return IPR_RC_JOB_RETURN
;
8143 return IPR_RC_JOB_CONTINUE
;
8147 * ipr_reset_bist_done - BIST has completed on the adapter.
8148 * @ipr_cmd: ipr command struct
8150 * Description: Unblock config space and resume the reset process.
8153 * IPR_RC_JOB_CONTINUE
8155 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8157 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8160 if (ioa_cfg
->cfg_locked
)
8161 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8162 ioa_cfg
->cfg_locked
= 0;
8163 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8165 return IPR_RC_JOB_CONTINUE
;
8169 * ipr_reset_start_bist - Run BIST on the adapter.
8170 * @ipr_cmd: ipr command struct
8172 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8175 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8177 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8179 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8180 int rc
= PCIBIOS_SUCCESSFUL
;
8183 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8184 writel(IPR_UPROCI_SIS64_START_BIST
,
8185 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8187 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8189 if (rc
== PCIBIOS_SUCCESSFUL
) {
8190 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8191 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8192 rc
= IPR_RC_JOB_RETURN
;
8194 if (ioa_cfg
->cfg_locked
)
8195 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8196 ioa_cfg
->cfg_locked
= 0;
8197 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8198 rc
= IPR_RC_JOB_CONTINUE
;
8206 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8207 * @ipr_cmd: ipr command struct
8209 * Description: This clears PCI reset to the adapter and delays two seconds.
8214 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8217 pci_set_pcie_reset_state(ipr_cmd
->ioa_cfg
->pdev
, pcie_deassert_reset
);
8218 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8219 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8221 return IPR_RC_JOB_RETURN
;
8225 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8226 * @ipr_cmd: ipr command struct
8228 * Description: This asserts PCI reset to the adapter.
8233 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8235 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8236 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8239 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8240 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8241 ipr_reset_start_timer(ipr_cmd
, IPR_PCI_RESET_TIMEOUT
);
8243 return IPR_RC_JOB_RETURN
;
8247 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8248 * @ipr_cmd: ipr command struct
8250 * Description: This attempts to block config access to the IOA.
8253 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8255 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8257 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8258 int rc
= IPR_RC_JOB_CONTINUE
;
8260 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8261 ioa_cfg
->cfg_locked
= 1;
8262 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8264 if (ipr_cmd
->u
.time_left
) {
8265 rc
= IPR_RC_JOB_RETURN
;
8266 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8267 ipr_reset_start_timer(ipr_cmd
,
8268 IPR_CHECK_FOR_RESET_TIMEOUT
);
8270 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8271 dev_err(&ioa_cfg
->pdev
->dev
,
8272 "Timed out waiting to lock config access. Resetting anyway.\n");
8280 * ipr_reset_block_config_access - Block config access to the IOA
8281 * @ipr_cmd: ipr command struct
8283 * Description: This attempts to block config access to the IOA
8286 * IPR_RC_JOB_CONTINUE
8288 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8290 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8291 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8292 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8293 return IPR_RC_JOB_CONTINUE
;
8297 * ipr_reset_allowed - Query whether or not IOA can be reset
8298 * @ioa_cfg: ioa config struct
8301 * 0 if reset not allowed / non-zero if reset is allowed
8303 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8305 volatile u32 temp_reg
;
8307 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8308 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8312 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8313 * @ipr_cmd: ipr command struct
8315 * Description: This function waits for adapter permission to run BIST,
8316 * then runs BIST. If the adapter does not give permission after a
8317 * reasonable time, we will reset the adapter anyway. The impact of
8318 * resetting the adapter without warning the adapter is the risk of
8319 * losing the persistent error log on the adapter. If the adapter is
8320 * reset while it is writing to the flash on the adapter, the flash
8321 * segment will have bad ECC and be zeroed.
8324 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8326 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8328 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8329 int rc
= IPR_RC_JOB_RETURN
;
8331 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8332 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8333 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8335 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8336 rc
= IPR_RC_JOB_CONTINUE
;
8343 * ipr_reset_alert - Alert the adapter of a pending reset
8344 * @ipr_cmd: ipr command struct
8346 * Description: This function alerts the adapter that it will be reset.
8347 * If memory space is not currently enabled, proceed directly
8348 * to running BIST on the adapter. The timer must always be started
8349 * so we guarantee we do not run BIST from ipr_isr.
8354 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8356 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8361 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8363 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8364 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8365 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8366 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8368 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8371 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8372 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8375 return IPR_RC_JOB_RETURN
;
8379 * ipr_reset_ucode_download_done - Microcode download completion
8380 * @ipr_cmd: ipr command struct
8382 * Description: This function unmaps the microcode download buffer.
8385 * IPR_RC_JOB_CONTINUE
8387 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
8389 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8390 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8392 dma_unmap_sg(&ioa_cfg
->pdev
->dev
, sglist
->scatterlist
,
8393 sglist
->num_sg
, DMA_TO_DEVICE
);
8395 ipr_cmd
->job_step
= ipr_reset_alert
;
8396 return IPR_RC_JOB_CONTINUE
;
8400 * ipr_reset_ucode_download - Download microcode to the adapter
8401 * @ipr_cmd: ipr command struct
8403 * Description: This function checks to see if it there is microcode
8404 * to download to the adapter. If there is, a download is performed.
8407 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8409 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
8411 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8412 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8415 ipr_cmd
->job_step
= ipr_reset_alert
;
8418 return IPR_RC_JOB_CONTINUE
;
8420 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8421 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
8422 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
8423 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
8424 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
8425 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
8426 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
8429 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
8431 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
8432 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
8434 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8435 IPR_WRITE_BUFFER_TIMEOUT
);
8438 return IPR_RC_JOB_RETURN
;
8442 * ipr_reset_shutdown_ioa - Shutdown the adapter
8443 * @ipr_cmd: ipr command struct
8445 * Description: This function issues an adapter shutdown of the
8446 * specified type to the specified adapter as part of the
8447 * adapter reset job.
8450 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8452 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
8454 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8455 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
8456 unsigned long timeout
;
8457 int rc
= IPR_RC_JOB_CONTINUE
;
8460 if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
8461 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8462 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8463 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8464 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
8465 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
8467 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
8468 timeout
= IPR_SHUTDOWN_TIMEOUT
;
8469 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
8470 timeout
= IPR_INTERNAL_TIMEOUT
;
8471 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
8472 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
8474 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
8476 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
8478 rc
= IPR_RC_JOB_RETURN
;
8479 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
8481 ipr_cmd
->job_step
= ipr_reset_alert
;
8488 * ipr_reset_ioa_job - Adapter reset job
8489 * @ipr_cmd: ipr command struct
8491 * Description: This function is the job router for the adapter reset job.
8496 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
8499 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8502 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
8504 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
8506 * We are doing nested adapter resets and this is
8507 * not the current reset job.
8509 list_add_tail(&ipr_cmd
->queue
,
8510 &ipr_cmd
->hrrq
->hrrq_free_q
);
8514 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
8515 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
8516 if (rc
== IPR_RC_JOB_RETURN
)
8520 ipr_reinit_ipr_cmnd(ipr_cmd
);
8521 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
8522 rc
= ipr_cmd
->job_step(ipr_cmd
);
8523 } while (rc
== IPR_RC_JOB_CONTINUE
);
8527 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8528 * @ioa_cfg: ioa config struct
8529 * @job_step: first job step of reset job
8530 * @shutdown_type: shutdown type
8532 * Description: This function will initiate the reset of the given adapter
8533 * starting at the selected job step.
8534 * If the caller needs to wait on the completion of the reset,
8535 * the caller must sleep on the reset_wait_q.
8540 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8541 int (*job_step
) (struct ipr_cmnd
*),
8542 enum ipr_shutdown_type shutdown_type
)
8544 struct ipr_cmnd
*ipr_cmd
;
8547 ioa_cfg
->in_reset_reload
= 1;
8548 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8549 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8550 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8551 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8554 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
)
8555 scsi_block_requests(ioa_cfg
->host
);
8557 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
8558 ioa_cfg
->reset_cmd
= ipr_cmd
;
8559 ipr_cmd
->job_step
= job_step
;
8560 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
8562 ipr_reset_ioa_job(ipr_cmd
);
8566 * ipr_initiate_ioa_reset - Initiate an adapter reset
8567 * @ioa_cfg: ioa config struct
8568 * @shutdown_type: shutdown type
8570 * Description: This function will initiate the reset of the given adapter.
8571 * If the caller needs to wait on the completion of the reset,
8572 * the caller must sleep on the reset_wait_q.
8577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8578 enum ipr_shutdown_type shutdown_type
)
8582 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
8585 if (ioa_cfg
->in_reset_reload
) {
8586 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8587 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8588 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8589 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8592 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
8593 dev_err(&ioa_cfg
->pdev
->dev
,
8594 "IOA taken offline - error recovery failed\n");
8596 ioa_cfg
->reset_retries
= 0;
8597 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8598 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8599 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
8600 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8604 if (ioa_cfg
->in_ioa_bringdown
) {
8605 ioa_cfg
->reset_cmd
= NULL
;
8606 ioa_cfg
->in_reset_reload
= 0;
8607 ipr_fail_all_ops(ioa_cfg
);
8608 wake_up_all(&ioa_cfg
->reset_wait_q
);
8610 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
8611 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
8612 scsi_unblock_requests(ioa_cfg
->host
);
8613 spin_lock_irq(ioa_cfg
->host
->host_lock
);
8617 ioa_cfg
->in_ioa_bringdown
= 1;
8618 shutdown_type
= IPR_SHUTDOWN_NONE
;
8622 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
8627 * ipr_reset_freeze - Hold off all I/O activity
8628 * @ipr_cmd: ipr command struct
8630 * Description: If the PCI slot is frozen, hold off all I/O
8631 * activity; then, as soon as the slot is available again,
8632 * initiate an adapter reset.
8634 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
8636 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8639 /* Disallow new interrupts, avoid loop */
8640 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8641 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8642 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
8643 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8646 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8647 ipr_cmd
->done
= ipr_reset_ioa_job
;
8648 return IPR_RC_JOB_RETURN
;
8652 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8653 * @pdev: PCI device struct
8655 * Description: This routine is called to tell us that the MMIO
8656 * access to the IOA has been restored
8658 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
8660 unsigned long flags
= 0;
8661 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8663 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8664 if (!ioa_cfg
->probe_done
)
8665 pci_save_state(pdev
);
8666 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8667 return PCI_ERS_RESULT_NEED_RESET
;
8671 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8672 * @pdev: PCI device struct
8674 * Description: This routine is called to tell us that the PCI bus
8675 * is down. Can't do anything here, except put the device driver
8676 * into a holding pattern, waiting for the PCI bus to come back.
8678 static void ipr_pci_frozen(struct pci_dev
*pdev
)
8680 unsigned long flags
= 0;
8681 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8683 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8684 if (ioa_cfg
->probe_done
)
8685 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
8686 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8690 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8691 * @pdev: PCI device struct
8693 * Description: This routine is called by the pci error recovery
8694 * code after the PCI slot has been reset, just before we
8695 * should resume normal operations.
8697 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
8699 unsigned long flags
= 0;
8700 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8702 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8703 if (ioa_cfg
->probe_done
) {
8704 if (ioa_cfg
->needs_warm_reset
)
8705 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8707 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
8710 wake_up_all(&ioa_cfg
->eeh_wait_q
);
8711 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8712 return PCI_ERS_RESULT_RECOVERED
;
8716 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8717 * @pdev: PCI device struct
8719 * Description: This routine is called when the PCI bus has
8720 * permanently failed.
8722 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
8724 unsigned long flags
= 0;
8725 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8728 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8729 if (ioa_cfg
->probe_done
) {
8730 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
8731 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8732 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
8733 ioa_cfg
->in_ioa_bringdown
= 1;
8734 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8735 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8736 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8737 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8740 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8742 wake_up_all(&ioa_cfg
->eeh_wait_q
);
8743 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8747 * ipr_pci_error_detected - Called when a PCI error is detected.
8748 * @pdev: PCI device struct
8749 * @state: PCI channel state
8751 * Description: Called when a PCI error is detected.
8754 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8756 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
8757 pci_channel_state_t state
)
8760 case pci_channel_io_frozen
:
8761 ipr_pci_frozen(pdev
);
8762 return PCI_ERS_RESULT_CAN_RECOVER
;
8763 case pci_channel_io_perm_failure
:
8764 ipr_pci_perm_failure(pdev
);
8765 return PCI_ERS_RESULT_DISCONNECT
;
8770 return PCI_ERS_RESULT_NEED_RESET
;
8774 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8775 * @ioa_cfg: ioa cfg struct
8777 * Description: This is the second phase of adapter intialization
8778 * This function takes care of initilizing the adapter to the point
8779 * where it can accept new commands.
8782 * 0 on success / -EIO on failure
8784 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
8787 unsigned long host_lock_flags
= 0;
8790 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8791 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
8792 ioa_cfg
->probe_done
= 1;
8793 if (ioa_cfg
->needs_hard_reset
) {
8794 ioa_cfg
->needs_hard_reset
= 0;
8795 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8797 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
8799 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8800 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
8801 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8803 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8805 } else if (ipr_invalid_adapter(ioa_cfg
)) {
8809 dev_err(&ioa_cfg
->pdev
->dev
,
8810 "Adapter not supported in this hardware configuration.\n");
8813 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8820 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8821 * @ioa_cfg: ioa config struct
8826 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8830 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8831 if (ioa_cfg
->ipr_cmnd_list
[i
])
8832 dma_pool_free(ioa_cfg
->ipr_cmd_pool
,
8833 ioa_cfg
->ipr_cmnd_list
[i
],
8834 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
8836 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
8839 if (ioa_cfg
->ipr_cmd_pool
)
8840 dma_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
8842 kfree(ioa_cfg
->ipr_cmnd_list
);
8843 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
8844 ioa_cfg
->ipr_cmnd_list
= NULL
;
8845 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
8846 ioa_cfg
->ipr_cmd_pool
= NULL
;
8850 * ipr_free_mem - Frees memory allocated for an adapter
8851 * @ioa_cfg: ioa cfg struct
8856 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8860 kfree(ioa_cfg
->res_entries
);
8861 dma_free_coherent(&ioa_cfg
->pdev
->dev
, sizeof(struct ipr_misc_cbs
),
8862 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
8863 ipr_free_cmd_blks(ioa_cfg
);
8865 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
8866 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
8867 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
8868 ioa_cfg
->hrrq
[i
].host_rrq
,
8869 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
8871 dma_free_coherent(&ioa_cfg
->pdev
->dev
, ioa_cfg
->cfg_table_size
,
8872 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
8874 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
8875 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
8876 sizeof(struct ipr_hostrcb
),
8877 ioa_cfg
->hostrcb
[i
],
8878 ioa_cfg
->hostrcb_dma
[i
]);
8881 ipr_free_dump(ioa_cfg
);
8882 kfree(ioa_cfg
->trace
);
8886 * ipr_free_all_resources - Free all allocated resources for an adapter.
8887 * @ipr_cmd: ipr command struct
8889 * This function frees all allocated resources for the
8890 * specified adapter.
8895 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
8897 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8900 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
8901 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
8903 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
8904 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
8907 free_irq(pdev
->irq
, &ioa_cfg
->hrrq
[0]);
8909 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
8910 pci_disable_msi(pdev
);
8911 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
8912 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
8913 pci_disable_msix(pdev
);
8914 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
8917 iounmap(ioa_cfg
->hdw_dma_regs
);
8918 pci_release_regions(pdev
);
8919 ipr_free_mem(ioa_cfg
);
8920 scsi_host_put(ioa_cfg
->host
);
8921 pci_disable_device(pdev
);
8926 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8927 * @ioa_cfg: ioa config struct
8930 * 0 on success / -ENOMEM on allocation failure
8932 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8934 struct ipr_cmnd
*ipr_cmd
;
8935 struct ipr_ioarcb
*ioarcb
;
8936 dma_addr_t dma_addr
;
8937 int i
, entries_each_hrrq
, hrrq_id
= 0;
8939 ioa_cfg
->ipr_cmd_pool
= dma_pool_create(IPR_NAME
, &ioa_cfg
->pdev
->dev
,
8940 sizeof(struct ipr_cmnd
), 512, 0);
8942 if (!ioa_cfg
->ipr_cmd_pool
)
8945 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
8946 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
8948 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
8949 ipr_free_cmd_blks(ioa_cfg
);
8953 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8954 if (ioa_cfg
->hrrq_num
> 1) {
8956 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
8957 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
8958 ioa_cfg
->hrrq
[i
].max_cmd_id
=
8959 (entries_each_hrrq
- 1);
8962 IPR_NUM_BASE_CMD_BLKS
/
8963 (ioa_cfg
->hrrq_num
- 1);
8964 ioa_cfg
->hrrq
[i
].min_cmd_id
=
8965 IPR_NUM_INTERNAL_CMD_BLKS
+
8966 (i
- 1) * entries_each_hrrq
;
8967 ioa_cfg
->hrrq
[i
].max_cmd_id
=
8968 (IPR_NUM_INTERNAL_CMD_BLKS
+
8969 i
* entries_each_hrrq
- 1);
8972 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
8973 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
8974 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
8976 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
8979 BUG_ON(ioa_cfg
->hrrq_num
== 0);
8981 i
= IPR_NUM_CMD_BLKS
-
8982 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
8984 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
8985 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
8988 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8989 ipr_cmd
= dma_pool_alloc(ioa_cfg
->ipr_cmd_pool
, GFP_KERNEL
, &dma_addr
);
8992 ipr_free_cmd_blks(ioa_cfg
);
8996 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
8997 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
8998 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9000 ioarcb
= &ipr_cmd
->ioarcb
;
9001 ipr_cmd
->dma_addr
= dma_addr
;
9003 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9005 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9007 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9008 if (ioa_cfg
->sis64
) {
9009 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9010 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9011 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9012 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9014 ioarcb
->write_ioadl_addr
=
9015 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9016 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9017 ioarcb
->ioasa_host_pci_addr
=
9018 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9020 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9021 ipr_cmd
->cmd_index
= i
;
9022 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9023 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9024 offsetof(struct ipr_cmnd
, sense_buffer
);
9026 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9027 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9028 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9029 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9037 * ipr_alloc_mem - Allocate memory for an adapter
9038 * @ioa_cfg: ioa config struct
9041 * 0 on success / non-zero for error
9043 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9045 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9046 int i
, rc
= -ENOMEM
;
9049 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
9050 ioa_cfg
->max_devs_supported
, GFP_KERNEL
);
9052 if (!ioa_cfg
->res_entries
)
9055 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9056 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9057 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9060 ioa_cfg
->vpd_cbs
= dma_alloc_coherent(&pdev
->dev
,
9061 sizeof(struct ipr_misc_cbs
),
9062 &ioa_cfg
->vpd_cbs_dma
,
9065 if (!ioa_cfg
->vpd_cbs
)
9066 goto out_free_res_entries
;
9068 if (ipr_alloc_cmd_blks(ioa_cfg
))
9069 goto out_free_vpd_cbs
;
9071 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9072 ioa_cfg
->hrrq
[i
].host_rrq
= dma_alloc_coherent(&pdev
->dev
,
9073 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9074 &ioa_cfg
->hrrq
[i
].host_rrq_dma
,
9077 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9079 dma_free_coherent(&pdev
->dev
,
9080 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9081 ioa_cfg
->hrrq
[i
].host_rrq
,
9082 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9083 goto out_ipr_free_cmd_blocks
;
9085 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9088 ioa_cfg
->u
.cfg_table
= dma_alloc_coherent(&pdev
->dev
,
9089 ioa_cfg
->cfg_table_size
,
9090 &ioa_cfg
->cfg_table_dma
,
9093 if (!ioa_cfg
->u
.cfg_table
)
9094 goto out_free_host_rrq
;
9096 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
9097 ioa_cfg
->hostrcb
[i
] = dma_alloc_coherent(&pdev
->dev
,
9098 sizeof(struct ipr_hostrcb
),
9099 &ioa_cfg
->hostrcb_dma
[i
],
9102 if (!ioa_cfg
->hostrcb
[i
])
9103 goto out_free_hostrcb_dma
;
9105 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9106 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9107 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9108 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9111 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
9112 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
9114 if (!ioa_cfg
->trace
)
9115 goto out_free_hostrcb_dma
;
9122 out_free_hostrcb_dma
:
9124 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_hostrcb
),
9125 ioa_cfg
->hostrcb
[i
],
9126 ioa_cfg
->hostrcb_dma
[i
]);
9128 dma_free_coherent(&pdev
->dev
, ioa_cfg
->cfg_table_size
,
9129 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9131 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9132 dma_free_coherent(&pdev
->dev
,
9133 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9134 ioa_cfg
->hrrq
[i
].host_rrq
,
9135 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9137 out_ipr_free_cmd_blocks
:
9138 ipr_free_cmd_blks(ioa_cfg
);
9140 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9141 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9142 out_free_res_entries
:
9143 kfree(ioa_cfg
->res_entries
);
9148 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9149 * @ioa_cfg: ioa config struct
9154 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9158 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9159 ioa_cfg
->bus_attr
[i
].bus
= i
;
9160 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9161 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9162 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9163 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9165 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9170 * ipr_init_regs - Initialize IOA registers
9171 * @ioa_cfg: ioa config struct
9176 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9178 const struct ipr_interrupt_offsets
*p
;
9179 struct ipr_interrupts
*t
;
9182 p
= &ioa_cfg
->chip_cfg
->regs
;
9184 base
= ioa_cfg
->hdw_dma_regs
;
9186 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9187 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9188 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9189 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9190 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9191 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9192 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9193 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9194 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9195 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9196 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9197 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9198 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9199 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9200 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9201 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9203 if (ioa_cfg
->sis64
) {
9204 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9205 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9206 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9207 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9212 * ipr_init_ioa_cfg - Initialize IOA config struct
9213 * @ioa_cfg: ioa config struct
9214 * @host: scsi host struct
9215 * @pdev: PCI dev struct
9220 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9221 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9225 ioa_cfg
->host
= host
;
9226 ioa_cfg
->pdev
= pdev
;
9227 ioa_cfg
->log_level
= ipr_log_level
;
9228 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9229 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9230 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9231 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9232 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9233 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9234 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9236 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9237 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9238 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9239 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9240 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9241 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9242 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9243 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9244 ioa_cfg
->sdt_state
= INACTIVE
;
9246 ipr_initialize_bus_attr(ioa_cfg
);
9247 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9249 if (ioa_cfg
->sis64
) {
9250 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9251 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9252 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9253 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9254 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9255 + ((sizeof(struct ipr_config_table_entry64
)
9256 * ioa_cfg
->max_devs_supported
)));
9258 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9259 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9260 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9261 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9262 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9263 + ((sizeof(struct ipr_config_table_entry
)
9264 * ioa_cfg
->max_devs_supported
)));
9267 host
->max_channel
= IPR_MAX_BUS_TO_SCAN
;
9268 host
->unique_id
= host
->host_no
;
9269 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9270 host
->can_queue
= ioa_cfg
->max_cmds
;
9271 pci_set_drvdata(pdev
, ioa_cfg
);
9273 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9274 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9275 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9276 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9278 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9280 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9285 * ipr_get_chip_info - Find adapter chip information
9286 * @dev_id: PCI device id struct
9289 * ptr to chip information on success / NULL on failure
9291 static const struct ipr_chip_t
*
9292 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9296 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9297 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9298 ipr_chip
[i
].device
== dev_id
->device
)
9299 return &ipr_chip
[i
];
9304 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9306 * @ioa_cfg: ioa config struct
9311 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
9313 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9315 if (pci_channel_offline(pdev
)) {
9316 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
9317 !pci_channel_offline(pdev
),
9318 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
9319 pci_restore_state(pdev
);
9323 static int ipr_enable_msix(struct ipr_ioa_cfg
*ioa_cfg
)
9325 struct msix_entry entries
[IPR_MAX_MSIX_VECTORS
];
9328 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
9329 entries
[i
].entry
= i
;
9331 vectors
= pci_enable_msix_range(ioa_cfg
->pdev
,
9332 entries
, 1, ipr_number_of_msix
);
9334 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9338 for (i
= 0; i
< vectors
; i
++)
9339 ioa_cfg
->vectors_info
[i
].vec
= entries
[i
].vector
;
9340 ioa_cfg
->nvectors
= vectors
;
9345 static int ipr_enable_msi(struct ipr_ioa_cfg
*ioa_cfg
)
9349 vectors
= pci_enable_msi_range(ioa_cfg
->pdev
, 1, ipr_number_of_msix
);
9351 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9355 for (i
= 0; i
< vectors
; i
++)
9356 ioa_cfg
->vectors_info
[i
].vec
= ioa_cfg
->pdev
->irq
+ i
;
9357 ioa_cfg
->nvectors
= vectors
;
9362 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9364 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9366 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
9367 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
9368 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
9369 ioa_cfg
->vectors_info
[vec_idx
].
9370 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
9374 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9378 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
9379 rc
= request_irq(ioa_cfg
->vectors_info
[i
].vec
,
9382 ioa_cfg
->vectors_info
[i
].desc
,
9386 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
9395 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9396 * @pdev: PCI device struct
9398 * Description: Simply set the msi_received flag to 1 indicating that
9399 * Message Signaled Interrupts are supported.
9402 * 0 on success / non-zero on failure
9404 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
9406 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
9407 unsigned long lock_flags
= 0;
9408 irqreturn_t rc
= IRQ_HANDLED
;
9410 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
9411 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9413 ioa_cfg
->msi_received
= 1;
9414 wake_up(&ioa_cfg
->msi_wait_q
);
9416 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9421 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9422 * @pdev: PCI device struct
9424 * Description: The return value from pci_enable_msi_range() can not always be
9425 * trusted. This routine sets up and initiates a test interrupt to determine
9426 * if the interrupt is received via the ipr_test_intr() service routine.
9427 * If the tests fails, the driver will fall back to LSI.
9430 * 0 on success / non-zero on failure
9432 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
9435 volatile u32 int_reg
;
9436 unsigned long lock_flags
= 0;
9440 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9441 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9442 ioa_cfg
->msi_received
= 0;
9443 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9444 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
9445 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
9446 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9448 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9449 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9451 rc
= request_irq(pdev
->irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9453 dev_err(&pdev
->dev
, "Can not assign irq %d\n", pdev
->irq
);
9455 } else if (ipr_debug
)
9456 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", pdev
->irq
);
9458 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
9459 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
9460 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
9461 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9462 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9464 if (!ioa_cfg
->msi_received
) {
9465 /* MSI test failed */
9466 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
9468 } else if (ipr_debug
)
9469 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
9471 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9473 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9474 free_irq(ioa_cfg
->vectors_info
[0].vec
, ioa_cfg
);
9476 free_irq(pdev
->irq
, ioa_cfg
);
9483 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9484 * @pdev: PCI device struct
9485 * @dev_id: PCI device id struct
9488 * 0 on success / non-zero on failure
9490 static int ipr_probe_ioa(struct pci_dev
*pdev
,
9491 const struct pci_device_id
*dev_id
)
9493 struct ipr_ioa_cfg
*ioa_cfg
;
9494 struct Scsi_Host
*host
;
9495 unsigned long ipr_regs_pci
;
9496 void __iomem
*ipr_regs
;
9497 int rc
= PCIBIOS_SUCCESSFUL
;
9498 volatile u32 mask
, uproc
, interrupts
;
9499 unsigned long lock_flags
, driver_lock_flags
;
9503 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
9504 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
9507 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
9512 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
9513 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
9514 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
9516 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
9518 if (!ioa_cfg
->ipr_chip
) {
9519 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
9520 dev_id
->vendor
, dev_id
->device
);
9521 goto out_scsi_host_put
;
9524 /* set SIS 32 or SIS 64 */
9525 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
9526 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
9527 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
9528 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
9530 if (ipr_transop_timeout
)
9531 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
9532 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
9533 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
9535 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
9537 ioa_cfg
->revid
= pdev
->revision
;
9539 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
9541 ipr_regs_pci
= pci_resource_start(pdev
, 0);
9543 rc
= pci_request_regions(pdev
, IPR_NAME
);
9546 "Couldn't register memory range of registers\n");
9547 goto out_scsi_host_put
;
9550 rc
= pci_enable_device(pdev
);
9552 if (rc
|| pci_channel_offline(pdev
)) {
9553 if (pci_channel_offline(pdev
)) {
9554 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9555 rc
= pci_enable_device(pdev
);
9559 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
9560 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9561 goto out_release_regions
;
9565 ipr_regs
= pci_ioremap_bar(pdev
, 0);
9569 "Couldn't map memory range of registers\n");
9574 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
9575 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
9576 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
9578 ipr_init_regs(ioa_cfg
);
9580 if (ioa_cfg
->sis64
) {
9581 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
9583 dev_dbg(&pdev
->dev
, "Failed to set 64 bit DMA mask\n");
9584 rc
= dma_set_mask_and_coherent(&pdev
->dev
,
9588 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
9591 dev_err(&pdev
->dev
, "Failed to set DMA mask\n");
9595 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
9596 ioa_cfg
->chip_cfg
->cache_line_size
);
9598 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9599 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
9600 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9605 /* Issue MMIO read to ensure card is not in EEH */
9606 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
9607 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9609 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
9610 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
9611 IPR_MAX_MSIX_VECTORS
);
9612 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
9615 if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9616 ipr_enable_msix(ioa_cfg
) == 0)
9617 ioa_cfg
->intr_flag
= IPR_USE_MSIX
;
9618 else if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9619 ipr_enable_msi(ioa_cfg
) == 0)
9620 ioa_cfg
->intr_flag
= IPR_USE_MSI
;
9622 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9623 ioa_cfg
->nvectors
= 1;
9624 dev_info(&pdev
->dev
, "Cannot enable MSI.\n");
9627 pci_set_master(pdev
);
9629 if (pci_channel_offline(pdev
)) {
9630 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9631 pci_set_master(pdev
);
9632 if (pci_channel_offline(pdev
)) {
9634 goto out_msi_disable
;
9638 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
9639 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9640 rc
= ipr_test_msi(ioa_cfg
, pdev
);
9641 if (rc
== -EOPNOTSUPP
) {
9642 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9643 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
9644 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
9645 pci_disable_msi(pdev
);
9646 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9647 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
9648 pci_disable_msix(pdev
);
9651 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9652 ioa_cfg
->nvectors
= 1;
9655 goto out_msi_disable
;
9657 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9658 dev_info(&pdev
->dev
,
9659 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9660 ioa_cfg
->nvectors
, pdev
->irq
);
9661 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9662 dev_info(&pdev
->dev
,
9663 "Request for %d MSIXs succeeded.",
9668 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
9669 (unsigned int)num_online_cpus(),
9670 (unsigned int)IPR_MAX_HRRQ_NUM
);
9672 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
9673 goto out_msi_disable
;
9675 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
9676 goto out_msi_disable
;
9678 rc
= ipr_alloc_mem(ioa_cfg
);
9681 "Couldn't allocate enough memory for device driver!\n");
9682 goto out_msi_disable
;
9685 /* Save away PCI config space for use following IOA reset */
9686 rc
= pci_save_state(pdev
);
9688 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9689 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
9695 * If HRRQ updated interrupt is not masked, or reset alert is set,
9696 * the card is in an unknown state and needs a hard reset
9698 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
9699 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
9700 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
9701 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
9702 ioa_cfg
->needs_hard_reset
= 1;
9703 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
9704 ioa_cfg
->needs_hard_reset
= 1;
9705 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
9706 ioa_cfg
->ioa_unit_checked
= 1;
9708 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9709 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9710 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9712 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
9713 || ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9714 name_msi_vectors(ioa_cfg
);
9715 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_isr
,
9717 ioa_cfg
->vectors_info
[0].desc
,
9720 rc
= ipr_request_other_msi_irqs(ioa_cfg
);
9722 rc
= request_irq(pdev
->irq
, ipr_isr
,
9724 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
9727 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
9732 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
9733 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
9734 ioa_cfg
->needs_warm_reset
= 1;
9735 ioa_cfg
->reset
= ipr_reset_slot_reset
;
9737 ioa_cfg
->reset
= ipr_reset_start_bist
;
9739 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
9740 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
9741 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
9748 ipr_free_mem(ioa_cfg
);
9750 ipr_wait_for_pci_err_recovery(ioa_cfg
);
9751 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9752 pci_disable_msi(pdev
);
9753 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9754 pci_disable_msix(pdev
);
9758 pci_disable_device(pdev
);
9759 out_release_regions
:
9760 pci_release_regions(pdev
);
9762 scsi_host_put(host
);
9767 * ipr_scan_vsets - Scans for VSET devices
9768 * @ioa_cfg: ioa config struct
9770 * Description: Since the VSET resources do not follow SAM in that we can have
9771 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9776 static void ipr_scan_vsets(struct ipr_ioa_cfg
*ioa_cfg
)
9780 for (target
= 0; target
< IPR_MAX_NUM_TARGETS_PER_BUS
; target
++)
9781 for (lun
= 0; lun
< IPR_MAX_NUM_VSET_LUNS_PER_TARGET
; lun
++)
9782 scsi_add_device(ioa_cfg
->host
, IPR_VSET_BUS
, target
, lun
);
9786 * ipr_initiate_ioa_bringdown - Bring down an adapter
9787 * @ioa_cfg: ioa config struct
9788 * @shutdown_type: shutdown type
9790 * Description: This function will initiate bringing down the adapter.
9791 * This consists of issuing an IOA shutdown to the adapter
9792 * to flush the cache, and running BIST.
9793 * If the caller needs to wait on the completion of the reset,
9794 * the caller must sleep on the reset_wait_q.
9799 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
9800 enum ipr_shutdown_type shutdown_type
)
9803 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9804 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9805 ioa_cfg
->reset_retries
= 0;
9806 ioa_cfg
->in_ioa_bringdown
= 1;
9807 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
9812 * __ipr_remove - Remove a single adapter
9813 * @pdev: pci device struct
9815 * Adapter hot plug remove entry point.
9820 static void __ipr_remove(struct pci_dev
*pdev
)
9822 unsigned long host_lock_flags
= 0;
9823 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9825 unsigned long driver_lock_flags
;
9828 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9829 while (ioa_cfg
->in_reset_reload
) {
9830 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9831 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9832 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9835 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9836 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9837 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
9838 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9841 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9843 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9844 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9845 flush_work(&ioa_cfg
->work_q
);
9846 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9847 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9849 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
9850 list_del(&ioa_cfg
->queue
);
9851 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
9853 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
9854 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9855 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9857 ipr_free_all_resources(ioa_cfg
);
9863 * ipr_remove - IOA hot plug remove entry point
9864 * @pdev: pci device struct
9866 * Adapter hot plug remove entry point.
9871 static void ipr_remove(struct pci_dev
*pdev
)
9873 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9877 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9879 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9881 scsi_remove_host(ioa_cfg
->host
);
9889 * ipr_probe - Adapter hot plug add entry point
9892 * 0 on success / non-zero on failure
9894 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
9896 struct ipr_ioa_cfg
*ioa_cfg
;
9899 rc
= ipr_probe_ioa(pdev
, dev_id
);
9904 ioa_cfg
= pci_get_drvdata(pdev
);
9905 rc
= ipr_probe_ioa_part2(ioa_cfg
);
9912 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
9919 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9923 scsi_remove_host(ioa_cfg
->host
);
9928 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9932 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9934 scsi_remove_host(ioa_cfg
->host
);
9939 scsi_scan_host(ioa_cfg
->host
);
9940 ipr_scan_vsets(ioa_cfg
);
9941 scsi_add_device(ioa_cfg
->host
, IPR_IOA_BUS
, IPR_IOA_TARGET
, IPR_IOA_LUN
);
9942 ioa_cfg
->allow_ml_add_del
= 1;
9943 ioa_cfg
->host
->max_channel
= IPR_VSET_BUS
;
9944 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
9946 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
9947 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
9948 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
9949 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
9950 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
9954 schedule_work(&ioa_cfg
->work_q
);
9959 * ipr_shutdown - Shutdown handler.
9960 * @pdev: pci device struct
9962 * This function is invoked upon system shutdown/reboot. It will issue
9963 * an adapter shutdown to the adapter to flush the write cache.
9968 static void ipr_shutdown(struct pci_dev
*pdev
)
9970 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9971 unsigned long lock_flags
= 0;
9974 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9975 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
9976 ioa_cfg
->iopoll_weight
= 0;
9977 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
9978 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
9981 while (ioa_cfg
->in_reset_reload
) {
9982 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9983 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9984 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9987 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9988 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9989 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9992 static struct pci_device_id ipr_pci_table
[] = {
9993 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9994 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
9995 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9996 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
9997 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
9998 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
9999 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10000 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10001 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10002 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10003 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10004 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10005 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10006 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10007 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10008 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10009 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10010 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10011 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10012 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10013 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10014 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10015 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10016 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10017 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10018 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10019 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10020 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10021 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10022 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10023 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10024 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10025 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10026 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10027 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10028 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10029 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10030 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10031 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10032 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10033 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10034 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10035 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10036 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10037 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10038 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10039 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10040 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10041 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10042 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10043 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10044 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10045 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10046 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10047 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10048 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10049 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10050 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10051 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10052 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10053 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10054 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10055 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10056 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10057 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10058 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10059 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10060 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10061 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10062 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10063 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10064 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10065 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10066 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10067 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10068 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10069 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10070 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10071 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10072 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10073 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10074 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10075 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10076 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10077 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10078 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10079 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10080 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10081 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10082 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10083 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10084 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10085 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10086 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10087 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10088 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10089 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10090 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10091 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10092 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10093 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10094 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10095 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10096 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10097 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10100 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10102 static const struct pci_error_handlers ipr_err_handler
= {
10103 .error_detected
= ipr_pci_error_detected
,
10104 .mmio_enabled
= ipr_pci_mmio_enabled
,
10105 .slot_reset
= ipr_pci_slot_reset
,
10108 static struct pci_driver ipr_driver
= {
10110 .id_table
= ipr_pci_table
,
10111 .probe
= ipr_probe
,
10112 .remove
= ipr_remove
,
10113 .shutdown
= ipr_shutdown
,
10114 .err_handler
= &ipr_err_handler
,
10118 * ipr_halt_done - Shutdown prepare completion
10123 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10125 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10129 * ipr_halt - Issue shutdown prepare to all adapters
10132 * NOTIFY_OK on success / NOTIFY_DONE on failure
10134 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10136 struct ipr_cmnd
*ipr_cmd
;
10137 struct ipr_ioa_cfg
*ioa_cfg
;
10138 unsigned long flags
= 0, driver_lock_flags
;
10140 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10141 return NOTIFY_DONE
;
10143 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10145 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10146 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10147 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
10148 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10152 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10153 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10154 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10155 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10156 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10158 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10159 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10161 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10166 static struct notifier_block ipr_notifier
= {
10171 * ipr_init - Module entry point
10174 * 0 on success / negative value on failure
10176 static int __init
ipr_init(void)
10178 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10179 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10181 register_reboot_notifier(&ipr_notifier
);
10182 return pci_register_driver(&ipr_driver
);
10186 * ipr_exit - Module unload
10188 * Module unload entry point.
10193 static void __exit
ipr_exit(void)
10195 unregister_reboot_notifier(&ipr_notifier
);
10196 pci_unregister_driver(&ipr_driver
);
10199 module_init(ipr_init
);
10200 module_exit(ipr_exit
);