2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock
);
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
109 .cache_line_size
= 0x20,
113 .set_interrupt_mask_reg
= 0x0022C,
114 .clr_interrupt_mask_reg
= 0x00230,
115 .clr_interrupt_mask_reg32
= 0x00230,
116 .sense_interrupt_mask_reg
= 0x0022C,
117 .sense_interrupt_mask_reg32
= 0x0022C,
118 .clr_interrupt_reg
= 0x00228,
119 .clr_interrupt_reg32
= 0x00228,
120 .sense_interrupt_reg
= 0x00224,
121 .sense_interrupt_reg32
= 0x00224,
122 .ioarrin_reg
= 0x00404,
123 .sense_uproc_interrupt_reg
= 0x00214,
124 .sense_uproc_interrupt_reg32
= 0x00214,
125 .set_uproc_interrupt_reg
= 0x00214,
126 .set_uproc_interrupt_reg32
= 0x00214,
127 .clr_uproc_interrupt_reg
= 0x00218,
128 .clr_uproc_interrupt_reg32
= 0x00218
131 { /* Snipe and Scamp */
134 .cache_line_size
= 0x20,
138 .set_interrupt_mask_reg
= 0x00288,
139 .clr_interrupt_mask_reg
= 0x0028C,
140 .clr_interrupt_mask_reg32
= 0x0028C,
141 .sense_interrupt_mask_reg
= 0x00288,
142 .sense_interrupt_mask_reg32
= 0x00288,
143 .clr_interrupt_reg
= 0x00284,
144 .clr_interrupt_reg32
= 0x00284,
145 .sense_interrupt_reg
= 0x00280,
146 .sense_interrupt_reg32
= 0x00280,
147 .ioarrin_reg
= 0x00504,
148 .sense_uproc_interrupt_reg
= 0x00290,
149 .sense_uproc_interrupt_reg32
= 0x00290,
150 .set_uproc_interrupt_reg
= 0x00290,
151 .set_uproc_interrupt_reg32
= 0x00290,
152 .clr_uproc_interrupt_reg
= 0x00294,
153 .clr_uproc_interrupt_reg32
= 0x00294
159 .cache_line_size
= 0x20,
163 .set_interrupt_mask_reg
= 0x00010,
164 .clr_interrupt_mask_reg
= 0x00018,
165 .clr_interrupt_mask_reg32
= 0x0001C,
166 .sense_interrupt_mask_reg
= 0x00010,
167 .sense_interrupt_mask_reg32
= 0x00014,
168 .clr_interrupt_reg
= 0x00008,
169 .clr_interrupt_reg32
= 0x0000C,
170 .sense_interrupt_reg
= 0x00000,
171 .sense_interrupt_reg32
= 0x00004,
172 .ioarrin_reg
= 0x00070,
173 .sense_uproc_interrupt_reg
= 0x00020,
174 .sense_uproc_interrupt_reg32
= 0x00024,
175 .set_uproc_interrupt_reg
= 0x00020,
176 .set_uproc_interrupt_reg32
= 0x00024,
177 .clr_uproc_interrupt_reg
= 0x00028,
178 .clr_uproc_interrupt_reg32
= 0x0002C,
179 .init_feedback_reg
= 0x0005C,
180 .dump_addr_reg
= 0x00064,
181 .dump_data_reg
= 0x00068,
182 .endian_swap_reg
= 0x00084
187 static const struct ipr_chip_t ipr_chip
[] = {
188 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
189 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, IPR_USE_MSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
194 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, IPR_USE_LSI
, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, IPR_USE_MSI
, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
199 static int ipr_max_bus_speeds
[] = {
200 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
206 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level
, ipr_log_level
, uint
, 0);
208 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode
, ipr_testmode
, int, 0);
210 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
212 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
213 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
214 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
216 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs
, ipr_max_devs
, int, 0);
220 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
222 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
223 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION
);
227 /* A constant array of IOASCs/URCs/Error Messages */
229 struct ipr_error_table_t ipr_error_table
[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
231 "8155: An unknown error was received"},
233 "Soft underlength error"},
235 "Command to be cancelled not found"},
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "8009: Impending cache battery pack failure"},
285 "Logical Unit in process of becoming ready"},
287 "Initializing command required"},
289 "34FF: Disk device format in progress"},
291 "Logical unit not accessible, target port in unavailable state"},
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
293 "9070: IOA requested reset"},
295 "Synchronization required"},
297 "IOA microcode download required"},
299 "Device bus connection is prohibited by host"},
301 "No ready, IOA shutdown"},
303 "Not ready, IOA has been shutdown"},
304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
305 "3020: Storage subsystem configuration error"},
307 "FFF5: Medium error, data unreadable, recommend reassign"},
309 "7000: Medium error, data unreadable, do not reassign"},
310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
311 "FFF3: Disk media format bad"},
312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
313 "3002: Addressed device failed to respond to selection"},
314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
315 "3100: Device bus error"},
316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3109: IOA timed out a device command"},
319 "3120: SCSI bus is not operational"},
320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "4100: Hard device bus fabric error"},
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
335 "310D: Logical block guard error detected by the IOA"},
336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "9000: IOA reserved area data check"},
338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "9001: IOA reserved area invalid data pattern"},
340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9002: IOA reserved area LRC error"},
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
343 "Hardware Error, IOA metadata access error"},
344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "102E: Out of alternate sectors for disk storage"},
346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "FFF4: Data transfer underlength error"},
348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
349 "FFF4: Data transfer overlength error"},
350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
351 "3400: Logical unit failure"},
352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Device microcode is corrupt"},
354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
355 "8150: PCI bus error"},
357 "Unsupported device bus message received"},
358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "FFF4: Disk device problem"},
360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
361 "8150: Permanent IOA failure"},
362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
363 "3010: Disk device returned wrong response to IOA"},
364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
365 "8151: IOA microcode error"},
367 "Device bus status error"},
368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8157: IOA error requiring IOA reset to recover"},
371 "ATA device status error"},
373 "Message reject received from the device"},
374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
375 "8008: A permanent cache battery pack failure occurred"},
376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
377 "9090: Disk unit has been modified after the last known status"},
378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "9081: IOA detected device error"},
380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9082: IOA detected device error"},
382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
383 "3110: Device bus error, message or command phase"},
384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
385 "3110: SAS Command / Task Management Function failed"},
386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
387 "9091: Incorrect hardware configuration change has been detected"},
388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
389 "9073: Invalid multi-adapter configuration"},
390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "4010: Incorrect connection between cascaded expanders"},
392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "4020: Connections exceed IOA design limits"},
394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4030: Incorrect multipath connection"},
396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4110: Unsupported enclosure function"},
398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4120: SAS cable VPD cannot be read"},
400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "FFF4: Command to logical unit failed"},
403 "Illegal request, invalid request type or request packet"},
405 "Illegal request, invalid resource handle"},
407 "Illegal request, commands not allowed to this device"},
409 "Illegal request, command not allowed to a secondary adapter"},
411 "Illegal request, command not allowed to a non-optimized resource"},
413 "Illegal request, invalid field in parameter list"},
415 "Illegal request, parameter not supported"},
417 "Illegal request, parameter value invalid"},
419 "Illegal request, command sequence error"},
421 "Illegal request, dual adapter support not enabled"},
423 "Illegal request, another cable connector was physically disabled"},
425 "Illegal request, inconsistent group id/group count"},
426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
427 "9031: Array protection temporarily suspended, protection resuming"},
428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
429 "9040: Array protection temporarily suspended, protection resuming"},
430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "4085: Service required"},
434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "3140: Device bus not ready to ready transition"},
436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "FFFB: SCSI bus was reset"},
439 "FFFE: SCSI bus transition to single ended"},
441 "FFFE: SCSI bus transition to LVD"},
442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "FFFB: SCSI bus was reset by another initiator"},
444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
445 "3029: A device replacement has occurred"},
446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
447 "4102: Device bus fabric performance degradation"},
448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "9051: IOA cache data exists for a missing or failed device"},
450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "9025: Disk unit is not supported at its physical location"},
454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "3020: IOA detected a SCSI bus configuration error"},
456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "3150: SCSI bus configuration error"},
458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "9074: Asymmetric advanced function disk configuration"},
460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "4040: Incomplete multipath connection between IOA and enclosure"},
462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "4041: Incomplete multipath connection between enclosure and device"},
464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9075: Incomplete multipath connection between IOA and remote IOA"},
466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "9076: Configuration error, missing remote IOA"},
468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "4050: Enclosure does not support a required multipath function"},
470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4070: Logically bad block written on device"},
480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "9041: Array protection temporarily suspended"},
482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "9042: Corrupt array parity detected on specified device"},
484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "9030: Array no longer protected due to missing or failed disk unit"},
486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9071: Link operational transition"},
488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9072: Link not operational transition"},
490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9032: Array exposed but still protected"},
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL
+ 1,
493 "70DD: Device forced failed by disrupt device command"},
494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "4061: Multipath redundancy level got better"},
496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "4060: Multipath redundancy level got worse"},
499 "Failure due to other device"},
500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "9008: IOA does not support functions expected by devices"},
502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "9010: Cache data associated with attached devices cannot be found"},
504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
505 "9011: Cache data belongs to devices other than those attached"},
506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
507 "9020: Array missing 2 or more devices with only 1 device present"},
508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
509 "9021: Array missing 2 or more devices with 2 or more devices present"},
510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9022: Exposed array is missing a required device"},
512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9023: Array member(s) not at required physical locations"},
514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9024: Array not functional due to present hardware configuration"},
516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9026: Array not functional due to present hardware configuration"},
518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9027: Array is missing a device and parity is out of sync"},
520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9028: Maximum number of arrays already exist"},
522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9050: Required cache data cannot be located for a disk unit"},
524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9052: Cache data exists for a device that has been modified"},
526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9054: IOA resources not available due to previous problems"},
528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9092: Disk unit requires initialization before use"},
530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9029: Incorrect hardware configuration change has been detected"},
532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9060: One or more disk pairs are missing from an array"},
534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9061: One or more disks are missing from an array"},
536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9062: One or more disks are missing from an array"},
538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9063: Maximum number of functional arrays has been exceeded"},
541 "Data protect, other volume set problem"},
543 "Aborted command, invalid descriptor"},
545 "Target operating conditions have changed, dual adapter takeover"},
547 "Aborted command, medium removal prevented"},
549 "Command terminated by host"},
551 "Aborted command, command terminated by host"}
554 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
571 * Function Prototypes
573 static int ipr_reset_alert(struct ipr_cmnd
*);
574 static void ipr_process_ccn(struct ipr_cmnd
*);
575 static void ipr_process_error(struct ipr_cmnd
*);
576 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
578 enum ipr_shutdown_type
);
580 #ifdef CONFIG_SCSI_IPR_TRACE
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
585 * @add_data: additional data
590 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
591 u8 type
, u32 add_data
)
593 struct ipr_trace_entry
*trace_entry
;
594 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
595 unsigned int trace_index
;
597 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
598 trace_entry
= &ioa_cfg
->trace
[trace_index
];
599 trace_entry
->time
= jiffies
;
600 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
601 trace_entry
->type
= type
;
602 if (ipr_cmd
->ioa_cfg
->sis64
)
603 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
605 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
606 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
607 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
608 trace_entry
->u
.add_data
= add_data
;
612 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
616 * ipr_lock_and_done - Acquire lock and complete command
617 * @ipr_cmd: ipr command struct
622 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
624 unsigned long lock_flags
;
625 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
627 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
628 ipr_cmd
->done(ipr_cmd
);
629 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
633 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
634 * @ipr_cmd: ipr command struct
639 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
641 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
642 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
643 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
644 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
647 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
648 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
649 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
650 ioarcb
->data_transfer_length
= 0;
651 ioarcb
->read_data_transfer_length
= 0;
652 ioarcb
->ioadl_len
= 0;
653 ioarcb
->read_ioadl_len
= 0;
655 if (ipr_cmd
->ioa_cfg
->sis64
) {
656 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
657 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
658 ioasa64
->u
.gata
.status
= 0;
660 ioarcb
->write_ioadl_addr
=
661 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
662 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
663 ioasa
->u
.gata
.status
= 0;
666 ioasa
->hdr
.ioasc
= 0;
667 ioasa
->hdr
.residual_data_len
= 0;
668 ipr_cmd
->scsi_cmd
= NULL
;
670 ipr_cmd
->sense_buffer
[0] = 0;
671 ipr_cmd
->dma_use_sg
= 0;
675 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
676 * @ipr_cmd: ipr command struct
681 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
682 void (*fast_done
) (struct ipr_cmnd
*))
684 ipr_reinit_ipr_cmnd(ipr_cmd
);
685 ipr_cmd
->u
.scratch
= 0;
686 ipr_cmd
->sibling
= NULL
;
687 ipr_cmd
->eh_comp
= NULL
;
688 ipr_cmd
->fast_done
= fast_done
;
689 init_timer(&ipr_cmd
->timer
);
693 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
694 * @ioa_cfg: ioa config struct
697 * pointer to ipr command struct
700 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
702 struct ipr_cmnd
*ipr_cmd
= NULL
;
704 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
705 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
706 struct ipr_cmnd
, queue
);
707 list_del(&ipr_cmd
->queue
);
715 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
716 * @ioa_cfg: ioa config struct
719 * pointer to ipr command struct
722 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
724 struct ipr_cmnd
*ipr_cmd
=
725 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
726 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
731 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
732 * @ioa_cfg: ioa config struct
733 * @clr_ints: interrupts to clear
735 * This function masks all interrupts on the adapter, then clears the
736 * interrupts specified in the mask
741 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
744 volatile u32 int_reg
;
747 /* Stop new interrupts */
748 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
749 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
750 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
751 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
755 /* Set interrupt mask to stop all new interrupts */
757 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
759 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
761 /* Clear any pending interrupts */
763 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
764 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
765 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
769 * ipr_save_pcix_cmd_reg - Save PCI-X command register
770 * @ioa_cfg: ioa config struct
773 * 0 on success / -EIO on failure
775 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
777 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
779 if (pcix_cmd_reg
== 0)
782 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
783 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
784 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
788 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
793 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
794 * @ioa_cfg: ioa config struct
797 * 0 on success / -EIO on failure
799 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
801 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
804 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
805 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
806 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
815 * ipr_sata_eh_done - done function for aborted SATA commands
816 * @ipr_cmd: ipr command struct
818 * This function is invoked for ops generated to SATA
819 * devices which are being aborted.
824 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
826 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
827 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
829 qc
->err_mask
|= AC_ERR_OTHER
;
830 sata_port
->ioasa
.status
|= ATA_BUSY
;
831 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
836 * ipr_scsi_eh_done - mid-layer done function for aborted ops
837 * @ipr_cmd: ipr command struct
839 * This function is invoked by the interrupt handler for
840 * ops generated by the SCSI mid-layer which are being aborted.
845 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
847 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
849 scsi_cmd
->result
|= (DID_ERROR
<< 16);
851 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
852 scsi_cmd
->scsi_done(scsi_cmd
);
853 if (ipr_cmd
->eh_comp
)
854 complete(ipr_cmd
->eh_comp
);
855 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
859 * ipr_fail_all_ops - Fails all outstanding ops.
860 * @ioa_cfg: ioa config struct
862 * This function fails all outstanding ops.
867 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
869 struct ipr_cmnd
*ipr_cmd
, *temp
;
870 struct ipr_hrr_queue
*hrrq
;
873 for_each_hrrq(hrrq
, ioa_cfg
) {
874 spin_lock(&hrrq
->_lock
);
875 list_for_each_entry_safe(ipr_cmd
,
876 temp
, &hrrq
->hrrq_pending_q
, queue
) {
877 list_del(&ipr_cmd
->queue
);
879 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
880 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
881 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
882 cpu_to_be32(IPR_DRIVER_ILID
);
884 if (ipr_cmd
->scsi_cmd
)
885 ipr_cmd
->done
= ipr_scsi_eh_done
;
886 else if (ipr_cmd
->qc
)
887 ipr_cmd
->done
= ipr_sata_eh_done
;
889 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
890 IPR_IOASC_IOA_WAS_RESET
);
891 del_timer(&ipr_cmd
->timer
);
892 ipr_cmd
->done(ipr_cmd
);
894 spin_unlock(&hrrq
->_lock
);
900 * ipr_send_command - Send driver initiated requests.
901 * @ipr_cmd: ipr command struct
903 * This function sends a command to the adapter using the correct write call.
904 * In the case of sis64, calculate the ioarcb size required. Then or in the
910 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
912 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
913 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
915 if (ioa_cfg
->sis64
) {
916 /* The default size is 256 bytes */
917 send_dma_addr
|= 0x1;
919 /* If the number of ioadls * size of ioadl > 128 bytes,
920 then use a 512 byte ioarcb */
921 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
922 send_dma_addr
|= 0x4;
923 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
925 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
929 * ipr_do_req - Send driver initiated requests.
930 * @ipr_cmd: ipr command struct
931 * @done: done function
932 * @timeout_func: timeout function
933 * @timeout: timeout value
935 * This function sends the specified command to the adapter with the
936 * timeout given. The done function is invoked on command completion.
941 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
942 void (*done
) (struct ipr_cmnd
*),
943 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
945 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
947 ipr_cmd
->done
= done
;
949 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
950 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
951 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
953 add_timer(&ipr_cmd
->timer
);
955 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
957 ipr_send_command(ipr_cmd
);
961 * ipr_internal_cmd_done - Op done function for an internally generated op.
962 * @ipr_cmd: ipr command struct
964 * This function is the op done function for an internally generated,
965 * blocking op. It simply wakes the sleeping thread.
970 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
972 if (ipr_cmd
->sibling
)
973 ipr_cmd
->sibling
= NULL
;
975 complete(&ipr_cmd
->completion
);
979 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
980 * @ipr_cmd: ipr command struct
981 * @dma_addr: dma address
982 * @len: transfer length
983 * @flags: ioadl flag value
985 * This function initializes an ioadl in the case where there is only a single
991 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
994 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
995 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
997 ipr_cmd
->dma_use_sg
= 1;
999 if (ipr_cmd
->ioa_cfg
->sis64
) {
1000 ioadl64
->flags
= cpu_to_be32(flags
);
1001 ioadl64
->data_len
= cpu_to_be32(len
);
1002 ioadl64
->address
= cpu_to_be64(dma_addr
);
1004 ipr_cmd
->ioarcb
.ioadl_len
=
1005 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1006 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1008 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1009 ioadl
->address
= cpu_to_be32(dma_addr
);
1011 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1012 ipr_cmd
->ioarcb
.read_ioadl_len
=
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1014 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1016 ipr_cmd
->ioarcb
.ioadl_len
=
1017 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1018 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1024 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1025 * @ipr_cmd: ipr command struct
1026 * @timeout_func: function to invoke if command times out
1032 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1033 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
1036 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1038 init_completion(&ipr_cmd
->completion
);
1039 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1041 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1042 wait_for_completion(&ipr_cmd
->completion
);
1043 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1046 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1050 if (ioa_cfg
->hrrq_num
== 1)
1053 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1054 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1060 * ipr_send_hcam - Send an HCAM to the adapter.
1061 * @ioa_cfg: ioa config struct
1063 * @hostrcb: hostrcb struct
1065 * This function will send a Host Controlled Async command to the adapter.
1066 * If HCAMs are currently not allowed to be issued to the adapter, it will
1067 * place the hostrcb on the free queue.
1072 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1073 struct ipr_hostrcb
*hostrcb
)
1075 struct ipr_cmnd
*ipr_cmd
;
1076 struct ipr_ioarcb
*ioarcb
;
1078 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1079 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1080 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1081 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1083 ipr_cmd
->u
.hostrcb
= hostrcb
;
1084 ioarcb
= &ipr_cmd
->ioarcb
;
1086 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1087 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1088 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1089 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1090 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1091 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1093 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1094 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1096 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1097 ipr_cmd
->done
= ipr_process_ccn
;
1099 ipr_cmd
->done
= ipr_process_error
;
1101 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1103 ipr_send_command(ipr_cmd
);
1105 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1110 * ipr_update_ata_class - Update the ata class in the resource entry
1111 * @res: resource entry struct
1112 * @proto: cfgte device bus protocol value
1117 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1120 case IPR_PROTO_SATA
:
1121 case IPR_PROTO_SAS_STP
:
1122 res
->ata_class
= ATA_DEV_ATA
;
1124 case IPR_PROTO_SATA_ATAPI
:
1125 case IPR_PROTO_SAS_STP_ATAPI
:
1126 res
->ata_class
= ATA_DEV_ATAPI
;
1129 res
->ata_class
= ATA_DEV_UNKNOWN
;
1135 * ipr_init_res_entry - Initialize a resource entry struct.
1136 * @res: resource entry struct
1137 * @cfgtew: config table entry wrapper struct
1142 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1143 struct ipr_config_table_entry_wrapper
*cfgtew
)
1147 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1148 struct ipr_resource_entry
*gscsi_res
= NULL
;
1150 res
->needs_sync_complete
= 0;
1153 res
->del_from_ml
= 0;
1154 res
->resetting_device
= 0;
1156 res
->sata_port
= NULL
;
1158 if (ioa_cfg
->sis64
) {
1159 proto
= cfgtew
->u
.cfgte64
->proto
;
1160 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1161 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1162 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1164 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1165 sizeof(res
->res_path
));
1168 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1169 sizeof(res
->dev_lun
.scsi_lun
));
1170 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1172 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1173 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1174 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1176 res
->target
= gscsi_res
->target
;
1181 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1182 ioa_cfg
->max_devs_supported
);
1183 set_bit(res
->target
, ioa_cfg
->target_ids
);
1185 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1186 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1188 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1189 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1190 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1191 ioa_cfg
->max_devs_supported
);
1192 set_bit(res
->target
, ioa_cfg
->array_ids
);
1193 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1194 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1195 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1196 ioa_cfg
->max_devs_supported
);
1197 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1199 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1200 ioa_cfg
->max_devs_supported
);
1201 set_bit(res
->target
, ioa_cfg
->target_ids
);
1204 proto
= cfgtew
->u
.cfgte
->proto
;
1205 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1206 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1207 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1208 res
->type
= IPR_RES_TYPE_IOAFP
;
1210 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1212 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1213 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1214 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1215 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1218 ipr_update_ata_class(res
, proto
);
1222 * ipr_is_same_device - Determine if two devices are the same.
1223 * @res: resource entry struct
1224 * @cfgtew: config table entry wrapper struct
1227 * 1 if the devices are the same / 0 otherwise
1229 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1230 struct ipr_config_table_entry_wrapper
*cfgtew
)
1232 if (res
->ioa_cfg
->sis64
) {
1233 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1234 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1235 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1236 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1240 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1241 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1242 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1250 * __ipr_format_res_path - Format the resource path for printing.
1251 * @res_path: resource path
1253 * @len: length of buffer provided
1258 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1264 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1265 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1266 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1272 * ipr_format_res_path - Format the resource path for printing.
1273 * @ioa_cfg: ioa config struct
1274 * @res_path: resource path
1276 * @len: length of buffer provided
1281 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1282 u8
*res_path
, char *buffer
, int len
)
1287 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1288 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1293 * ipr_update_res_entry - Update the resource entry.
1294 * @res: resource entry struct
1295 * @cfgtew: config table entry wrapper struct
1300 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1301 struct ipr_config_table_entry_wrapper
*cfgtew
)
1303 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1307 if (res
->ioa_cfg
->sis64
) {
1308 res
->flags
= cfgtew
->u
.cfgte64
->flags
;
1309 res
->res_flags
= cfgtew
->u
.cfgte64
->res_flags
;
1310 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1312 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1313 sizeof(struct ipr_std_inq_data
));
1315 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1316 proto
= cfgtew
->u
.cfgte64
->proto
;
1317 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1318 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1320 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1321 sizeof(res
->dev_lun
.scsi_lun
));
1323 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1324 sizeof(res
->res_path
))) {
1325 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1326 sizeof(res
->res_path
));
1330 if (res
->sdev
&& new_path
)
1331 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1332 ipr_format_res_path(res
->ioa_cfg
,
1333 res
->res_path
, buffer
, sizeof(buffer
)));
1335 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1336 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1337 res
->type
= IPR_RES_TYPE_IOAFP
;
1339 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1341 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1342 sizeof(struct ipr_std_inq_data
));
1344 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1345 proto
= cfgtew
->u
.cfgte
->proto
;
1346 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1349 ipr_update_ata_class(res
, proto
);
1353 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1355 * @res: resource entry struct
1356 * @cfgtew: config table entry wrapper struct
1361 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1363 struct ipr_resource_entry
*gscsi_res
= NULL
;
1364 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1366 if (!ioa_cfg
->sis64
)
1369 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1370 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1371 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1372 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1373 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1374 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1375 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1377 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1379 } else if (res
->bus
== 0)
1380 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1384 * ipr_handle_config_change - Handle a config change from the adapter
1385 * @ioa_cfg: ioa config struct
1391 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1392 struct ipr_hostrcb
*hostrcb
)
1394 struct ipr_resource_entry
*res
= NULL
;
1395 struct ipr_config_table_entry_wrapper cfgtew
;
1396 __be32 cc_res_handle
;
1400 if (ioa_cfg
->sis64
) {
1401 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1402 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1404 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1405 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1408 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1409 if (res
->res_handle
== cc_res_handle
) {
1416 if (list_empty(&ioa_cfg
->free_res_q
)) {
1417 ipr_send_hcam(ioa_cfg
,
1418 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1423 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1424 struct ipr_resource_entry
, queue
);
1426 list_del(&res
->queue
);
1427 ipr_init_res_entry(res
, &cfgtew
);
1428 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1431 ipr_update_res_entry(res
, &cfgtew
);
1433 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1435 res
->del_from_ml
= 1;
1436 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1437 if (ioa_cfg
->allow_ml_add_del
)
1438 schedule_work(&ioa_cfg
->work_q
);
1440 ipr_clear_res_target(res
);
1441 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1443 } else if (!res
->sdev
|| res
->del_from_ml
) {
1445 if (ioa_cfg
->allow_ml_add_del
)
1446 schedule_work(&ioa_cfg
->work_q
);
1449 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1453 * ipr_process_ccn - Op done function for a CCN.
1454 * @ipr_cmd: ipr command struct
1456 * This function is the op done function for a configuration
1457 * change notification host controlled async from the adapter.
1462 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1464 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1465 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1466 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1468 list_del(&hostrcb
->queue
);
1469 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1472 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
1473 dev_err(&ioa_cfg
->pdev
->dev
,
1474 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1476 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1478 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1483 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1484 * @i: index into buffer
1485 * @buf: string to modify
1487 * This function will strip all trailing whitespace, pad the end
1488 * of the string with a single space, and NULL terminate the string.
1491 * new length of string
1493 static int strip_and_pad_whitespace(int i
, char *buf
)
1495 while (i
&& buf
[i
] == ' ')
1503 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1504 * @prefix: string to print at start of printk
1505 * @hostrcb: hostrcb pointer
1506 * @vpd: vendor/product id/sn struct
1511 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1512 struct ipr_vpd
*vpd
)
1514 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1517 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1518 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1520 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1521 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1523 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1524 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1526 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1530 * ipr_log_vpd - Log the passed VPD to the error log.
1531 * @vpd: vendor/product id/sn struct
1536 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1538 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1539 + IPR_SERIAL_NUM_LEN
];
1541 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1542 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1544 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1545 ipr_err("Vendor/Product ID: %s\n", buffer
);
1547 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1548 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1549 ipr_err(" Serial Number: %s\n", buffer
);
1553 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1554 * @prefix: string to print at start of printk
1555 * @hostrcb: hostrcb pointer
1556 * @vpd: vendor/product id/sn/wwn struct
1561 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1562 struct ipr_ext_vpd
*vpd
)
1564 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1565 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1566 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1570 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1571 * @vpd: vendor/product id/sn/wwn struct
1576 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1578 ipr_log_vpd(&vpd
->vpd
);
1579 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1580 be32_to_cpu(vpd
->wwid
[1]));
1584 * ipr_log_enhanced_cache_error - Log a cache error.
1585 * @ioa_cfg: ioa config struct
1586 * @hostrcb: hostrcb struct
1591 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1592 struct ipr_hostrcb
*hostrcb
)
1594 struct ipr_hostrcb_type_12_error
*error
;
1597 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1599 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1601 ipr_err("-----Current Configuration-----\n");
1602 ipr_err("Cache Directory Card Information:\n");
1603 ipr_log_ext_vpd(&error
->ioa_vpd
);
1604 ipr_err("Adapter Card Information:\n");
1605 ipr_log_ext_vpd(&error
->cfc_vpd
);
1607 ipr_err("-----Expected Configuration-----\n");
1608 ipr_err("Cache Directory Card Information:\n");
1609 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1610 ipr_err("Adapter Card Information:\n");
1611 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1613 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1614 be32_to_cpu(error
->ioa_data
[0]),
1615 be32_to_cpu(error
->ioa_data
[1]),
1616 be32_to_cpu(error
->ioa_data
[2]));
1620 * ipr_log_cache_error - Log a cache error.
1621 * @ioa_cfg: ioa config struct
1622 * @hostrcb: hostrcb struct
1627 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1628 struct ipr_hostrcb
*hostrcb
)
1630 struct ipr_hostrcb_type_02_error
*error
=
1631 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1633 ipr_err("-----Current Configuration-----\n");
1634 ipr_err("Cache Directory Card Information:\n");
1635 ipr_log_vpd(&error
->ioa_vpd
);
1636 ipr_err("Adapter Card Information:\n");
1637 ipr_log_vpd(&error
->cfc_vpd
);
1639 ipr_err("-----Expected Configuration-----\n");
1640 ipr_err("Cache Directory Card Information:\n");
1641 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1642 ipr_err("Adapter Card Information:\n");
1643 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1645 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1646 be32_to_cpu(error
->ioa_data
[0]),
1647 be32_to_cpu(error
->ioa_data
[1]),
1648 be32_to_cpu(error
->ioa_data
[2]));
1652 * ipr_log_enhanced_config_error - Log a configuration error.
1653 * @ioa_cfg: ioa config struct
1654 * @hostrcb: hostrcb struct
1659 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1660 struct ipr_hostrcb
*hostrcb
)
1662 int errors_logged
, i
;
1663 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1664 struct ipr_hostrcb_type_13_error
*error
;
1666 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1667 errors_logged
= be32_to_cpu(error
->errors_logged
);
1669 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1670 be32_to_cpu(error
->errors_detected
), errors_logged
);
1672 dev_entry
= error
->dev
;
1674 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1677 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1678 ipr_log_ext_vpd(&dev_entry
->vpd
);
1680 ipr_err("-----New Device Information-----\n");
1681 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1683 ipr_err("Cache Directory Card Information:\n");
1684 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1686 ipr_err("Adapter Card Information:\n");
1687 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1692 * ipr_log_sis64_config_error - Log a device error.
1693 * @ioa_cfg: ioa config struct
1694 * @hostrcb: hostrcb struct
1699 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1700 struct ipr_hostrcb
*hostrcb
)
1702 int errors_logged
, i
;
1703 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1704 struct ipr_hostrcb_type_23_error
*error
;
1705 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1707 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1708 errors_logged
= be32_to_cpu(error
->errors_logged
);
1710 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1711 be32_to_cpu(error
->errors_detected
), errors_logged
);
1713 dev_entry
= error
->dev
;
1715 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1718 ipr_err("Device %d : %s", i
+ 1,
1719 __ipr_format_res_path(dev_entry
->res_path
,
1720 buffer
, sizeof(buffer
)));
1721 ipr_log_ext_vpd(&dev_entry
->vpd
);
1723 ipr_err("-----New Device Information-----\n");
1724 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1726 ipr_err("Cache Directory Card Information:\n");
1727 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1729 ipr_err("Adapter Card Information:\n");
1730 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1735 * ipr_log_config_error - Log a configuration error.
1736 * @ioa_cfg: ioa config struct
1737 * @hostrcb: hostrcb struct
1742 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1743 struct ipr_hostrcb
*hostrcb
)
1745 int errors_logged
, i
;
1746 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1747 struct ipr_hostrcb_type_03_error
*error
;
1749 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1750 errors_logged
= be32_to_cpu(error
->errors_logged
);
1752 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1753 be32_to_cpu(error
->errors_detected
), errors_logged
);
1755 dev_entry
= error
->dev
;
1757 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1760 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1761 ipr_log_vpd(&dev_entry
->vpd
);
1763 ipr_err("-----New Device Information-----\n");
1764 ipr_log_vpd(&dev_entry
->new_vpd
);
1766 ipr_err("Cache Directory Card Information:\n");
1767 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1769 ipr_err("Adapter Card Information:\n");
1770 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1772 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1773 be32_to_cpu(dev_entry
->ioa_data
[0]),
1774 be32_to_cpu(dev_entry
->ioa_data
[1]),
1775 be32_to_cpu(dev_entry
->ioa_data
[2]),
1776 be32_to_cpu(dev_entry
->ioa_data
[3]),
1777 be32_to_cpu(dev_entry
->ioa_data
[4]));
1782 * ipr_log_enhanced_array_error - Log an array configuration error.
1783 * @ioa_cfg: ioa config struct
1784 * @hostrcb: hostrcb struct
1789 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1790 struct ipr_hostrcb
*hostrcb
)
1793 struct ipr_hostrcb_type_14_error
*error
;
1794 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1795 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1797 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1801 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1802 error
->protection_level
,
1803 ioa_cfg
->host
->host_no
,
1804 error
->last_func_vset_res_addr
.bus
,
1805 error
->last_func_vset_res_addr
.target
,
1806 error
->last_func_vset_res_addr
.lun
);
1810 array_entry
= error
->array_member
;
1811 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1812 ARRAY_SIZE(error
->array_member
));
1814 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1815 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1818 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1819 ipr_err("Exposed Array Member %d:\n", i
);
1821 ipr_err("Array Member %d:\n", i
);
1823 ipr_log_ext_vpd(&array_entry
->vpd
);
1824 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1825 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1826 "Expected Location");
1833 * ipr_log_array_error - Log an array configuration error.
1834 * @ioa_cfg: ioa config struct
1835 * @hostrcb: hostrcb struct
1840 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1841 struct ipr_hostrcb
*hostrcb
)
1844 struct ipr_hostrcb_type_04_error
*error
;
1845 struct ipr_hostrcb_array_data_entry
*array_entry
;
1846 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1848 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853 error
->protection_level
,
1854 ioa_cfg
->host
->host_no
,
1855 error
->last_func_vset_res_addr
.bus
,
1856 error
->last_func_vset_res_addr
.target
,
1857 error
->last_func_vset_res_addr
.lun
);
1861 array_entry
= error
->array_member
;
1863 for (i
= 0; i
< 18; i
++) {
1864 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1867 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1868 ipr_err("Exposed Array Member %d:\n", i
);
1870 ipr_err("Array Member %d:\n", i
);
1872 ipr_log_vpd(&array_entry
->vpd
);
1874 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1875 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1876 "Expected Location");
1881 array_entry
= error
->array_member2
;
1888 * ipr_log_hex_data - Log additional hex IOA error data.
1889 * @ioa_cfg: ioa config struct
1890 * @data: IOA error data
1896 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, u32
*data
, int len
)
1903 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1904 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1906 for (i
= 0; i
< len
/ 4; i
+= 4) {
1907 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1908 be32_to_cpu(data
[i
]),
1909 be32_to_cpu(data
[i
+1]),
1910 be32_to_cpu(data
[i
+2]),
1911 be32_to_cpu(data
[i
+3]));
1916 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1917 * @ioa_cfg: ioa config struct
1918 * @hostrcb: hostrcb struct
1923 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1924 struct ipr_hostrcb
*hostrcb
)
1926 struct ipr_hostrcb_type_17_error
*error
;
1929 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1931 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1933 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1934 strim(error
->failure_reason
);
1936 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1937 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1938 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1939 ipr_log_hex_data(ioa_cfg
, error
->data
,
1940 be32_to_cpu(hostrcb
->hcam
.length
) -
1941 (offsetof(struct ipr_hostrcb_error
, u
) +
1942 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1946 * ipr_log_dual_ioa_error - Log a dual adapter error.
1947 * @ioa_cfg: ioa config struct
1948 * @hostrcb: hostrcb struct
1953 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1954 struct ipr_hostrcb
*hostrcb
)
1956 struct ipr_hostrcb_type_07_error
*error
;
1958 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1959 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1960 strim(error
->failure_reason
);
1962 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1963 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1964 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1965 ipr_log_hex_data(ioa_cfg
, error
->data
,
1966 be32_to_cpu(hostrcb
->hcam
.length
) -
1967 (offsetof(struct ipr_hostrcb_error
, u
) +
1968 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1971 static const struct {
1974 } path_active_desc
[] = {
1975 { IPR_PATH_NO_INFO
, "Path" },
1976 { IPR_PATH_ACTIVE
, "Active path" },
1977 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
1980 static const struct {
1983 } path_state_desc
[] = {
1984 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
1985 { IPR_PATH_HEALTHY
, "is healthy" },
1986 { IPR_PATH_DEGRADED
, "is degraded" },
1987 { IPR_PATH_FAILED
, "is failed" }
1991 * ipr_log_fabric_path - Log a fabric path error
1992 * @hostrcb: hostrcb struct
1993 * @fabric: fabric descriptor
1998 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
1999 struct ipr_hostrcb_fabric_desc
*fabric
)
2002 u8 path_state
= fabric
->path_state
;
2003 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2004 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2006 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2007 if (path_active_desc
[i
].active
!= active
)
2010 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2011 if (path_state_desc
[j
].state
!= state
)
2014 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2015 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2016 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2018 } else if (fabric
->cascaded_expander
== 0xff) {
2019 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2020 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2021 fabric
->ioa_port
, fabric
->phy
);
2022 } else if (fabric
->phy
== 0xff) {
2023 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2024 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2025 fabric
->ioa_port
, fabric
->cascaded_expander
);
2027 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2028 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2029 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2035 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2036 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2040 * ipr_log64_fabric_path - Log a fabric path error
2041 * @hostrcb: hostrcb struct
2042 * @fabric: fabric descriptor
2047 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2048 struct ipr_hostrcb64_fabric_desc
*fabric
)
2051 u8 path_state
= fabric
->path_state
;
2052 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2053 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2054 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2056 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2057 if (path_active_desc
[i
].active
!= active
)
2060 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2061 if (path_state_desc
[j
].state
!= state
)
2064 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2065 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2066 ipr_format_res_path(hostrcb
->ioa_cfg
,
2068 buffer
, sizeof(buffer
)));
2073 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2074 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2075 buffer
, sizeof(buffer
)));
2078 static const struct {
2081 } path_type_desc
[] = {
2082 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2083 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2084 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2085 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2088 static const struct {
2091 } path_status_desc
[] = {
2092 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2093 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2094 { IPR_PATH_CFG_FAILED
, "Failed" },
2095 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2096 { IPR_PATH_NOT_DETECTED
, "Missing" },
2097 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2100 static const char *link_rate
[] = {
2103 "phy reset problem",
2120 * ipr_log_path_elem - Log a fabric path element.
2121 * @hostrcb: hostrcb struct
2122 * @cfg: fabric path element struct
2127 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2128 struct ipr_hostrcb_config_element
*cfg
)
2131 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2132 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2134 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2137 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2138 if (path_type_desc
[i
].type
!= type
)
2141 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2142 if (path_status_desc
[j
].status
!= status
)
2145 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2146 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2147 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2148 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2149 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2151 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2152 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2153 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2154 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2155 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2156 } else if (cfg
->cascaded_expander
== 0xff) {
2157 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2158 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2159 path_type_desc
[i
].desc
, cfg
->phy
,
2160 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2161 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2162 } else if (cfg
->phy
== 0xff) {
2163 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2164 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2165 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2166 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2167 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2169 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2170 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2171 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2172 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2173 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2180 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2181 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2182 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2183 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2187 * ipr_log64_path_elem - Log a fabric path element.
2188 * @hostrcb: hostrcb struct
2189 * @cfg: fabric path element struct
2194 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2195 struct ipr_hostrcb64_config_element
*cfg
)
2198 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2199 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2200 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2201 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2203 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2206 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2207 if (path_type_desc
[i
].type
!= type
)
2210 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2211 if (path_status_desc
[j
].status
!= status
)
2214 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2215 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2216 ipr_format_res_path(hostrcb
->ioa_cfg
,
2217 cfg
->res_path
, buffer
, sizeof(buffer
)),
2218 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2219 be32_to_cpu(cfg
->wwid
[0]),
2220 be32_to_cpu(cfg
->wwid
[1]));
2224 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2225 "WWN=%08X%08X\n", cfg
->type_status
,
2226 ipr_format_res_path(hostrcb
->ioa_cfg
,
2227 cfg
->res_path
, buffer
, sizeof(buffer
)),
2228 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2229 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2233 * ipr_log_fabric_error - Log a fabric error.
2234 * @ioa_cfg: ioa config struct
2235 * @hostrcb: hostrcb struct
2240 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2241 struct ipr_hostrcb
*hostrcb
)
2243 struct ipr_hostrcb_type_20_error
*error
;
2244 struct ipr_hostrcb_fabric_desc
*fabric
;
2245 struct ipr_hostrcb_config_element
*cfg
;
2248 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2249 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2250 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2252 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2253 (offsetof(struct ipr_hostrcb_error
, u
) +
2254 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2256 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2257 ipr_log_fabric_path(hostrcb
, fabric
);
2258 for_each_fabric_cfg(fabric
, cfg
)
2259 ipr_log_path_elem(hostrcb
, cfg
);
2261 add_len
-= be16_to_cpu(fabric
->length
);
2262 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2263 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2266 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2270 * ipr_log_sis64_array_error - Log a sis64 array error.
2271 * @ioa_cfg: ioa config struct
2272 * @hostrcb: hostrcb struct
2277 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2278 struct ipr_hostrcb
*hostrcb
)
2281 struct ipr_hostrcb_type_24_error
*error
;
2282 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2283 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2284 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2286 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2290 ipr_err("RAID %s Array Configuration: %s\n",
2291 error
->protection_level
,
2292 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2293 buffer
, sizeof(buffer
)));
2297 array_entry
= error
->array_member
;
2298 num_entries
= min_t(u32
, error
->num_entries
,
2299 ARRAY_SIZE(error
->array_member
));
2301 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2303 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2306 if (error
->exposed_mode_adn
== i
)
2307 ipr_err("Exposed Array Member %d:\n", i
);
2309 ipr_err("Array Member %d:\n", i
);
2311 ipr_err("Array Member %d:\n", i
);
2312 ipr_log_ext_vpd(&array_entry
->vpd
);
2313 ipr_err("Current Location: %s\n",
2314 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2315 buffer
, sizeof(buffer
)));
2316 ipr_err("Expected Location: %s\n",
2317 ipr_format_res_path(ioa_cfg
,
2318 array_entry
->expected_res_path
,
2319 buffer
, sizeof(buffer
)));
2326 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2327 * @ioa_cfg: ioa config struct
2328 * @hostrcb: hostrcb struct
2333 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2334 struct ipr_hostrcb
*hostrcb
)
2336 struct ipr_hostrcb_type_30_error
*error
;
2337 struct ipr_hostrcb64_fabric_desc
*fabric
;
2338 struct ipr_hostrcb64_config_element
*cfg
;
2341 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2343 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2344 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2346 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2347 (offsetof(struct ipr_hostrcb64_error
, u
) +
2348 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2350 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2351 ipr_log64_fabric_path(hostrcb
, fabric
);
2352 for_each_fabric_cfg(fabric
, cfg
)
2353 ipr_log64_path_elem(hostrcb
, cfg
);
2355 add_len
-= be16_to_cpu(fabric
->length
);
2356 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2357 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2360 ipr_log_hex_data(ioa_cfg
, (u32
*)fabric
, add_len
);
2364 * ipr_log_generic_error - Log an adapter error.
2365 * @ioa_cfg: ioa config struct
2366 * @hostrcb: hostrcb struct
2371 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2372 struct ipr_hostrcb
*hostrcb
)
2374 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2375 be32_to_cpu(hostrcb
->hcam
.length
));
2379 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2382 * This function will return the index of into the ipr_error_table
2383 * for the specified IOASC. If the IOASC is not in the table,
2384 * 0 will be returned, which points to the entry used for unknown errors.
2387 * index into the ipr_error_table
2389 static u32
ipr_get_error(u32 ioasc
)
2393 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2394 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2401 * ipr_handle_log_data - Log an adapter error.
2402 * @ioa_cfg: ioa config struct
2403 * @hostrcb: hostrcb struct
2405 * This function logs an adapter error to the system.
2410 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2411 struct ipr_hostrcb
*hostrcb
)
2416 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2419 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2420 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2423 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2425 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2427 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2428 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2429 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2430 scsi_report_bus_reset(ioa_cfg
->host
,
2431 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2434 error_index
= ipr_get_error(ioasc
);
2436 if (!ipr_error_table
[error_index
].log_hcam
)
2439 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2441 /* Set indication we have logged an error */
2442 ioa_cfg
->errors_logged
++;
2444 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2446 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2447 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2449 switch (hostrcb
->hcam
.overlay_id
) {
2450 case IPR_HOST_RCB_OVERLAY_ID_2
:
2451 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2453 case IPR_HOST_RCB_OVERLAY_ID_3
:
2454 ipr_log_config_error(ioa_cfg
, hostrcb
);
2456 case IPR_HOST_RCB_OVERLAY_ID_4
:
2457 case IPR_HOST_RCB_OVERLAY_ID_6
:
2458 ipr_log_array_error(ioa_cfg
, hostrcb
);
2460 case IPR_HOST_RCB_OVERLAY_ID_7
:
2461 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2463 case IPR_HOST_RCB_OVERLAY_ID_12
:
2464 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2466 case IPR_HOST_RCB_OVERLAY_ID_13
:
2467 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2469 case IPR_HOST_RCB_OVERLAY_ID_14
:
2470 case IPR_HOST_RCB_OVERLAY_ID_16
:
2471 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2473 case IPR_HOST_RCB_OVERLAY_ID_17
:
2474 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2476 case IPR_HOST_RCB_OVERLAY_ID_20
:
2477 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2479 case IPR_HOST_RCB_OVERLAY_ID_23
:
2480 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2482 case IPR_HOST_RCB_OVERLAY_ID_24
:
2483 case IPR_HOST_RCB_OVERLAY_ID_26
:
2484 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2486 case IPR_HOST_RCB_OVERLAY_ID_30
:
2487 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2489 case IPR_HOST_RCB_OVERLAY_ID_1
:
2490 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2492 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2498 * ipr_process_error - Op done function for an adapter error log.
2499 * @ipr_cmd: ipr command struct
2501 * This function is the op done function for an error log host
2502 * controlled async from the adapter. It will log the error and
2503 * send the HCAM back to the adapter.
2508 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2510 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2511 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2512 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2516 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2518 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2520 list_del(&hostrcb
->queue
);
2521 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2524 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2525 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2526 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2527 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
2528 dev_err(&ioa_cfg
->pdev
->dev
,
2529 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2532 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2536 * ipr_timeout - An internally generated op has timed out.
2537 * @ipr_cmd: ipr command struct
2539 * This function blocks host requests and initiates an
2545 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
2547 unsigned long lock_flags
= 0;
2548 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2551 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2553 ioa_cfg
->errors_logged
++;
2554 dev_err(&ioa_cfg
->pdev
->dev
,
2555 "Adapter being reset due to command timeout.\n");
2557 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2558 ioa_cfg
->sdt_state
= GET_DUMP
;
2560 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2561 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2563 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2568 * ipr_oper_timeout - Adapter timed out transitioning to operational
2569 * @ipr_cmd: ipr command struct
2571 * This function blocks host requests and initiates an
2577 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
2579 unsigned long lock_flags
= 0;
2580 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2583 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2585 ioa_cfg
->errors_logged
++;
2586 dev_err(&ioa_cfg
->pdev
->dev
,
2587 "Adapter timed out transitioning to operational.\n");
2589 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2590 ioa_cfg
->sdt_state
= GET_DUMP
;
2592 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2594 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2595 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2598 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2603 * ipr_find_ses_entry - Find matching SES in SES table
2604 * @res: resource entry struct of SES
2607 * pointer to SES table entry / NULL on failure
2609 static const struct ipr_ses_table_entry
*
2610 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2613 struct ipr_std_inq_vpids
*vpids
;
2614 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2616 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2617 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2618 if (ste
->compare_product_id_byte
[j
] == 'X') {
2619 vpids
= &res
->std_inq_data
.vpids
;
2620 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2628 if (matches
== IPR_PROD_ID_LEN
)
2636 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2637 * @ioa_cfg: ioa config struct
2639 * @bus_width: bus width
2642 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2643 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2644 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2645 * max 160MHz = max 320MB/sec).
2647 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2649 struct ipr_resource_entry
*res
;
2650 const struct ipr_ses_table_entry
*ste
;
2651 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2653 /* Loop through each config table entry in the config table buffer */
2654 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2655 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2658 if (bus
!= res
->bus
)
2661 if (!(ste
= ipr_find_ses_entry(res
)))
2664 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2667 return max_xfer_rate
;
2671 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2672 * @ioa_cfg: ioa config struct
2673 * @max_delay: max delay in micro-seconds to wait
2675 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2678 * 0 on success / other on failure
2680 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2682 volatile u32 pcii_reg
;
2685 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2686 while (delay
< max_delay
) {
2687 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2689 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2692 /* udelay cannot be used if delay is more than a few milliseconds */
2693 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2694 mdelay(delay
/ 1000);
2704 * ipr_get_sis64_dump_data_section - Dump IOA memory
2705 * @ioa_cfg: ioa config struct
2706 * @start_addr: adapter address to dump
2707 * @dest: destination kernel buffer
2708 * @length_in_words: length to dump in 4 byte words
2713 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2715 __be32
*dest
, u32 length_in_words
)
2719 for (i
= 0; i
< length_in_words
; i
++) {
2720 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2721 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2729 * ipr_get_ldump_data_section - Dump IOA memory
2730 * @ioa_cfg: ioa config struct
2731 * @start_addr: adapter address to dump
2732 * @dest: destination kernel buffer
2733 * @length_in_words: length to dump in 4 byte words
2736 * 0 on success / -EIO on failure
2738 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2740 __be32
*dest
, u32 length_in_words
)
2742 volatile u32 temp_pcii_reg
;
2746 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2747 dest
, length_in_words
);
2749 /* Write IOA interrupt reg starting LDUMP state */
2750 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2751 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2753 /* Wait for IO debug acknowledge */
2754 if (ipr_wait_iodbg_ack(ioa_cfg
,
2755 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2756 dev_err(&ioa_cfg
->pdev
->dev
,
2757 "IOA dump long data transfer timeout\n");
2761 /* Signal LDUMP interlocked - clear IO debug ack */
2762 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2763 ioa_cfg
->regs
.clr_interrupt_reg
);
2765 /* Write Mailbox with starting address */
2766 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2768 /* Signal address valid - clear IOA Reset alert */
2769 writel(IPR_UPROCI_RESET_ALERT
,
2770 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2772 for (i
= 0; i
< length_in_words
; i
++) {
2773 /* Wait for IO debug acknowledge */
2774 if (ipr_wait_iodbg_ack(ioa_cfg
,
2775 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2776 dev_err(&ioa_cfg
->pdev
->dev
,
2777 "IOA dump short data transfer timeout\n");
2781 /* Read data from mailbox and increment destination pointer */
2782 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2785 /* For all but the last word of data, signal data received */
2786 if (i
< (length_in_words
- 1)) {
2787 /* Signal dump data received - Clear IO debug Ack */
2788 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2789 ioa_cfg
->regs
.clr_interrupt_reg
);
2793 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2794 writel(IPR_UPROCI_RESET_ALERT
,
2795 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2797 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2798 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2800 /* Signal dump data received - Clear IO debug Ack */
2801 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2802 ioa_cfg
->regs
.clr_interrupt_reg
);
2804 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2805 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2807 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2809 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2819 #ifdef CONFIG_SCSI_IPR_DUMP
2821 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2822 * @ioa_cfg: ioa config struct
2823 * @pci_address: adapter address
2824 * @length: length of data to copy
2826 * Copy data from PCI adapter to kernel buffer.
2827 * Note: length MUST be a 4 byte multiple
2829 * 0 on success / other on failure
2831 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2832 unsigned long pci_address
, u32 length
)
2834 int bytes_copied
= 0;
2835 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2837 unsigned long lock_flags
= 0;
2838 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2841 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2843 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2845 while (bytes_copied
< length
&&
2846 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2847 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2848 ioa_dump
->page_offset
== 0) {
2849 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2853 return bytes_copied
;
2856 ioa_dump
->page_offset
= 0;
2857 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2858 ioa_dump
->next_page_index
++;
2860 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2862 rem_len
= length
- bytes_copied
;
2863 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2864 cur_len
= min(rem_len
, rem_page_len
);
2866 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2867 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2870 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2871 pci_address
+ bytes_copied
,
2872 &page
[ioa_dump
->page_offset
/ 4],
2873 (cur_len
/ sizeof(u32
)));
2875 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2878 ioa_dump
->page_offset
+= cur_len
;
2879 bytes_copied
+= cur_len
;
2887 return bytes_copied
;
2891 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2892 * @hdr: dump entry header struct
2897 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
2899 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
2901 hdr
->offset
= sizeof(*hdr
);
2902 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
2906 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2907 * @ioa_cfg: ioa config struct
2908 * @driver_dump: driver dump struct
2913 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
2914 struct ipr_driver_dump
*driver_dump
)
2916 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2918 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
2919 driver_dump
->ioa_type_entry
.hdr
.len
=
2920 sizeof(struct ipr_dump_ioa_type_entry
) -
2921 sizeof(struct ipr_dump_entry_header
);
2922 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2923 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
2924 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
2925 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
2926 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
2927 ucode_vpd
->minor_release
[1];
2928 driver_dump
->hdr
.num_entries
++;
2932 * ipr_dump_version_data - Fill in the driver version in the dump.
2933 * @ioa_cfg: ioa config struct
2934 * @driver_dump: driver dump struct
2939 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
2940 struct ipr_driver_dump
*driver_dump
)
2942 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
2943 driver_dump
->version_entry
.hdr
.len
=
2944 sizeof(struct ipr_dump_version_entry
) -
2945 sizeof(struct ipr_dump_entry_header
);
2946 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2947 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
2948 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
2949 driver_dump
->hdr
.num_entries
++;
2953 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2954 * @ioa_cfg: ioa config struct
2955 * @driver_dump: driver dump struct
2960 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
2961 struct ipr_driver_dump
*driver_dump
)
2963 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
2964 driver_dump
->trace_entry
.hdr
.len
=
2965 sizeof(struct ipr_dump_trace_entry
) -
2966 sizeof(struct ipr_dump_entry_header
);
2967 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
2968 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
2969 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
2970 driver_dump
->hdr
.num_entries
++;
2974 * ipr_dump_location_data - Fill in the IOA location in the dump.
2975 * @ioa_cfg: ioa config struct
2976 * @driver_dump: driver dump struct
2981 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
2982 struct ipr_driver_dump
*driver_dump
)
2984 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
2985 driver_dump
->location_entry
.hdr
.len
=
2986 sizeof(struct ipr_dump_location_entry
) -
2987 sizeof(struct ipr_dump_entry_header
);
2988 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
2989 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
2990 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
2991 driver_dump
->hdr
.num_entries
++;
2995 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2996 * @ioa_cfg: ioa config struct
2997 * @dump: dump struct
3002 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3004 unsigned long start_addr
, sdt_word
;
3005 unsigned long lock_flags
= 0;
3006 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3007 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3008 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3009 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3010 struct ipr_sdt
*sdt
;
3016 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3018 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3019 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3023 if (ioa_cfg
->sis64
) {
3024 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3025 ssleep(IPR_DUMP_DELAY_SECONDS
);
3026 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3029 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3031 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3032 dev_err(&ioa_cfg
->pdev
->dev
,
3033 "Invalid dump table format: %lx\n", start_addr
);
3034 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3038 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3040 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3042 /* Initialize the overall dump header */
3043 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3044 driver_dump
->hdr
.num_entries
= 1;
3045 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3046 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3047 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3048 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3050 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3051 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3052 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3053 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3055 /* Update dump_header */
3056 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3058 /* IOA Dump entry */
3059 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3060 ioa_dump
->hdr
.len
= 0;
3061 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3062 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3064 /* First entries in sdt are actually a list of dump addresses and
3065 lengths to gather the real dump data. sdt represents the pointer
3066 to the ioa generated dump table. Dump data will be extracted based
3067 on entries in this table */
3068 sdt
= &ioa_dump
->sdt
;
3070 if (ioa_cfg
->sis64
) {
3071 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3072 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3074 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3075 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3078 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3079 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3080 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3081 bytes_to_copy
/ sizeof(__be32
));
3083 /* Smart Dump table is ready to use and the first entry is valid */
3084 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3085 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3086 dev_err(&ioa_cfg
->pdev
->dev
,
3087 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3088 rc
, be32_to_cpu(sdt
->hdr
.state
));
3089 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3090 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3091 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3095 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3097 if (num_entries
> max_num_entries
)
3098 num_entries
= max_num_entries
;
3100 /* Update dump length to the actual data to be copied */
3101 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3103 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3105 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3107 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3109 for (i
= 0; i
< num_entries
; i
++) {
3110 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3111 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3115 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3116 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3118 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3120 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3121 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3123 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3124 bytes_to_copy
= end_off
- start_off
;
3129 if (bytes_to_copy
> max_dump_size
) {
3130 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3134 /* Copy data from adapter to driver buffers */
3135 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3138 ioa_dump
->hdr
.len
+= bytes_copied
;
3140 if (bytes_copied
!= bytes_to_copy
) {
3141 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3148 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3150 /* Update dump_header */
3151 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3153 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3158 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3162 * ipr_release_dump - Free adapter dump memory
3163 * @kref: kref struct
3168 static void ipr_release_dump(struct kref
*kref
)
3170 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3171 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3172 unsigned long lock_flags
= 0;
3176 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3177 ioa_cfg
->dump
= NULL
;
3178 ioa_cfg
->sdt_state
= INACTIVE
;
3179 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3181 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3182 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3184 vfree(dump
->ioa_dump
.ioa_data
);
3190 * ipr_worker_thread - Worker thread
3191 * @work: ioa config struct
3193 * Called at task level from a work thread. This function takes care
3194 * of adding and removing device from the mid-layer as configuration
3195 * changes are detected by the adapter.
3200 static void ipr_worker_thread(struct work_struct
*work
)
3202 unsigned long lock_flags
;
3203 struct ipr_resource_entry
*res
;
3204 struct scsi_device
*sdev
;
3205 struct ipr_dump
*dump
;
3206 struct ipr_ioa_cfg
*ioa_cfg
=
3207 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3208 u8 bus
, target
, lun
;
3212 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3214 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3215 dump
= ioa_cfg
->dump
;
3217 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3220 kref_get(&dump
->kref
);
3221 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3222 ipr_get_ioa_dump(ioa_cfg
, dump
);
3223 kref_put(&dump
->kref
, ipr_release_dump
);
3225 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3226 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3227 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3228 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3235 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
3236 !ioa_cfg
->allow_ml_add_del
) {
3237 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3241 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3242 if (res
->del_from_ml
&& res
->sdev
) {
3245 if (!scsi_device_get(sdev
)) {
3246 if (!res
->add_to_ml
)
3247 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3249 res
->del_from_ml
= 0;
3250 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3251 scsi_remove_device(sdev
);
3252 scsi_device_put(sdev
);
3253 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3260 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3261 if (res
->add_to_ml
) {
3263 target
= res
->target
;
3266 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3267 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3268 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3273 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3274 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3278 #ifdef CONFIG_SCSI_IPR_TRACE
3280 * ipr_read_trace - Dump the adapter trace
3281 * @filp: open sysfs file
3282 * @kobj: kobject struct
3283 * @bin_attr: bin_attribute struct
3286 * @count: buffer size
3289 * number of bytes printed to buffer
3291 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3292 struct bin_attribute
*bin_attr
,
3293 char *buf
, loff_t off
, size_t count
)
3295 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3296 struct Scsi_Host
*shost
= class_to_shost(dev
);
3297 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3298 unsigned long lock_flags
= 0;
3301 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3302 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3304 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3309 static struct bin_attribute ipr_trace_attr
= {
3315 .read
= ipr_read_trace
,
3320 * ipr_show_fw_version - Show the firmware version
3321 * @dev: class device struct
3325 * number of bytes printed to buffer
3327 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3328 struct device_attribute
*attr
, char *buf
)
3330 struct Scsi_Host
*shost
= class_to_shost(dev
);
3331 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3332 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3333 unsigned long lock_flags
= 0;
3336 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3337 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3338 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3339 ucode_vpd
->minor_release
[0],
3340 ucode_vpd
->minor_release
[1]);
3341 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3345 static struct device_attribute ipr_fw_version_attr
= {
3347 .name
= "fw_version",
3350 .show
= ipr_show_fw_version
,
3354 * ipr_show_log_level - Show the adapter's error logging level
3355 * @dev: class device struct
3359 * number of bytes printed to buffer
3361 static ssize_t
ipr_show_log_level(struct device
*dev
,
3362 struct device_attribute
*attr
, char *buf
)
3364 struct Scsi_Host
*shost
= class_to_shost(dev
);
3365 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3366 unsigned long lock_flags
= 0;
3369 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3370 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3371 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3376 * ipr_store_log_level - Change the adapter's error logging level
3377 * @dev: class device struct
3381 * number of bytes printed to buffer
3383 static ssize_t
ipr_store_log_level(struct device
*dev
,
3384 struct device_attribute
*attr
,
3385 const char *buf
, size_t count
)
3387 struct Scsi_Host
*shost
= class_to_shost(dev
);
3388 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3389 unsigned long lock_flags
= 0;
3391 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3392 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3393 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3397 static struct device_attribute ipr_log_level_attr
= {
3399 .name
= "log_level",
3400 .mode
= S_IRUGO
| S_IWUSR
,
3402 .show
= ipr_show_log_level
,
3403 .store
= ipr_store_log_level
3407 * ipr_store_diagnostics - IOA Diagnostics interface
3408 * @dev: device struct
3410 * @count: buffer size
3412 * This function will reset the adapter and wait a reasonable
3413 * amount of time for any errors that the adapter might log.
3416 * count on success / other on failure
3418 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3419 struct device_attribute
*attr
,
3420 const char *buf
, size_t count
)
3422 struct Scsi_Host
*shost
= class_to_shost(dev
);
3423 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3424 unsigned long lock_flags
= 0;
3427 if (!capable(CAP_SYS_ADMIN
))
3430 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3431 while (ioa_cfg
->in_reset_reload
) {
3432 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3433 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3434 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3437 ioa_cfg
->errors_logged
= 0;
3438 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3440 if (ioa_cfg
->in_reset_reload
) {
3441 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3442 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3444 /* Wait for a second for any errors to be logged */
3447 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3451 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3452 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3454 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3459 static struct device_attribute ipr_diagnostics_attr
= {
3461 .name
= "run_diagnostics",
3464 .store
= ipr_store_diagnostics
3468 * ipr_show_adapter_state - Show the adapter's state
3469 * @class_dev: device struct
3473 * number of bytes printed to buffer
3475 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3476 struct device_attribute
*attr
, char *buf
)
3478 struct Scsi_Host
*shost
= class_to_shost(dev
);
3479 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3480 unsigned long lock_flags
= 0;
3483 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3484 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3485 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3487 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3488 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3493 * ipr_store_adapter_state - Change adapter state
3494 * @dev: device struct
3496 * @count: buffer size
3498 * This function will change the adapter's state.
3501 * count on success / other on failure
3503 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3504 struct device_attribute
*attr
,
3505 const char *buf
, size_t count
)
3507 struct Scsi_Host
*shost
= class_to_shost(dev
);
3508 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3509 unsigned long lock_flags
;
3510 int result
= count
, i
;
3512 if (!capable(CAP_SYS_ADMIN
))
3515 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3516 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3517 !strncmp(buf
, "online", 6)) {
3518 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3519 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3520 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3521 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3524 ioa_cfg
->reset_retries
= 0;
3525 ioa_cfg
->in_ioa_bringdown
= 0;
3526 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3528 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3529 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3534 static struct device_attribute ipr_ioa_state_attr
= {
3536 .name
= "online_state",
3537 .mode
= S_IRUGO
| S_IWUSR
,
3539 .show
= ipr_show_adapter_state
,
3540 .store
= ipr_store_adapter_state
3544 * ipr_store_reset_adapter - Reset the adapter
3545 * @dev: device struct
3547 * @count: buffer size
3549 * This function will reset the adapter.
3552 * count on success / other on failure
3554 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3555 struct device_attribute
*attr
,
3556 const char *buf
, size_t count
)
3558 struct Scsi_Host
*shost
= class_to_shost(dev
);
3559 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3560 unsigned long lock_flags
;
3563 if (!capable(CAP_SYS_ADMIN
))
3566 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3567 if (!ioa_cfg
->in_reset_reload
)
3568 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3569 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3570 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3575 static struct device_attribute ipr_ioa_reset_attr
= {
3577 .name
= "reset_host",
3580 .store
= ipr_store_reset_adapter
3583 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
);
3585 * ipr_show_iopoll_weight - Show ipr polling mode
3586 * @dev: class device struct
3590 * number of bytes printed to buffer
3592 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3593 struct device_attribute
*attr
, char *buf
)
3595 struct Scsi_Host
*shost
= class_to_shost(dev
);
3596 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3597 unsigned long lock_flags
= 0;
3600 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3601 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3602 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3608 * ipr_store_iopoll_weight - Change the adapter's polling mode
3609 * @dev: class device struct
3613 * number of bytes printed to buffer
3615 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3616 struct device_attribute
*attr
,
3617 const char *buf
, size_t count
)
3619 struct Scsi_Host
*shost
= class_to_shost(dev
);
3620 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3621 unsigned long user_iopoll_weight
;
3622 unsigned long lock_flags
= 0;
3625 if (!ioa_cfg
->sis64
) {
3626 dev_info(&ioa_cfg
->pdev
->dev
, "blk-iopoll not supported on this adapter\n");
3629 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3632 if (user_iopoll_weight
> 256) {
3633 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid blk-iopoll weight. It must be less than 256\n");
3637 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3638 dev_info(&ioa_cfg
->pdev
->dev
, "Current blk-iopoll weight has the same weight\n");
3642 if (blk_iopoll_enabled
&& ioa_cfg
->iopoll_weight
&&
3643 ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3644 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3645 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3648 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3649 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3650 if (blk_iopoll_enabled
&& ioa_cfg
->iopoll_weight
&&
3651 ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3652 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3653 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3654 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3655 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
3658 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3663 static struct device_attribute ipr_iopoll_weight_attr
= {
3665 .name
= "iopoll_weight",
3666 .mode
= S_IRUGO
| S_IWUSR
,
3668 .show
= ipr_show_iopoll_weight
,
3669 .store
= ipr_store_iopoll_weight
3673 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3674 * @buf_len: buffer length
3676 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3677 * list to use for microcode download
3680 * pointer to sglist / NULL on failure
3682 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3684 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
3685 struct ipr_sglist
*sglist
;
3686 struct scatterlist
*scatterlist
;
3689 /* Get the minimum size per scatter/gather element */
3690 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3692 /* Get the actual size per element */
3693 order
= get_order(sg_size
);
3695 /* Determine the actual number of bytes per element */
3696 bsize_elem
= PAGE_SIZE
* (1 << order
);
3698 /* Determine the actual number of sg entries needed */
3699 if (buf_len
% bsize_elem
)
3700 num_elem
= (buf_len
/ bsize_elem
) + 1;
3702 num_elem
= buf_len
/ bsize_elem
;
3704 /* Allocate a scatter/gather list for the DMA */
3705 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
3706 (sizeof(struct scatterlist
) * (num_elem
- 1)),
3709 if (sglist
== NULL
) {
3714 scatterlist
= sglist
->scatterlist
;
3715 sg_init_table(scatterlist
, num_elem
);
3717 sglist
->order
= order
;
3718 sglist
->num_sg
= num_elem
;
3720 /* Allocate a bunch of sg elements */
3721 for (i
= 0; i
< num_elem
; i
++) {
3722 page
= alloc_pages(GFP_KERNEL
, order
);
3726 /* Free up what we already allocated */
3727 for (j
= i
- 1; j
>= 0; j
--)
3728 __free_pages(sg_page(&scatterlist
[j
]), order
);
3733 sg_set_page(&scatterlist
[i
], page
, 0, 0);
3740 * ipr_free_ucode_buffer - Frees a microcode download buffer
3741 * @p_dnld: scatter/gather list pointer
3743 * Free a DMA'able ucode download buffer previously allocated with
3744 * ipr_alloc_ucode_buffer
3749 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3753 for (i
= 0; i
< sglist
->num_sg
; i
++)
3754 __free_pages(sg_page(&sglist
->scatterlist
[i
]), sglist
->order
);
3760 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3761 * @sglist: scatter/gather list pointer
3762 * @buffer: buffer pointer
3763 * @len: buffer length
3765 * Copy a microcode image from a user buffer into a buffer allocated by
3766 * ipr_alloc_ucode_buffer
3769 * 0 on success / other on failure
3771 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3772 u8
*buffer
, u32 len
)
3774 int bsize_elem
, i
, result
= 0;
3775 struct scatterlist
*scatterlist
;
3778 /* Determine the actual number of bytes per element */
3779 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3781 scatterlist
= sglist
->scatterlist
;
3783 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3784 struct page
*page
= sg_page(&scatterlist
[i
]);
3787 memcpy(kaddr
, buffer
, bsize_elem
);
3790 scatterlist
[i
].length
= bsize_elem
;
3798 if (len
% bsize_elem
) {
3799 struct page
*page
= sg_page(&scatterlist
[i
]);
3802 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3805 scatterlist
[i
].length
= len
% bsize_elem
;
3808 sglist
->buffer_len
= len
;
3813 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3814 * @ipr_cmd: ipr command struct
3815 * @sglist: scatter/gather list
3817 * Builds a microcode download IOA data list (IOADL).
3820 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3821 struct ipr_sglist
*sglist
)
3823 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3824 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3825 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3828 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3829 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3830 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3833 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3834 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3835 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3836 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3837 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3840 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3844 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3845 * @ipr_cmd: ipr command struct
3846 * @sglist: scatter/gather list
3848 * Builds a microcode download IOA data list (IOADL).
3851 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3852 struct ipr_sglist
*sglist
)
3854 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3855 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3856 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3859 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3860 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3861 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3864 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3866 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3867 ioadl
[i
].flags_and_data_len
=
3868 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3870 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3873 ioadl
[i
-1].flags_and_data_len
|=
3874 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3878 * ipr_update_ioa_ucode - Update IOA's microcode
3879 * @ioa_cfg: ioa config struct
3880 * @sglist: scatter/gather list
3882 * Initiate an adapter reset to update the IOA's microcode
3885 * 0 on success / -EIO on failure
3887 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3888 struct ipr_sglist
*sglist
)
3890 unsigned long lock_flags
;
3892 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3893 while (ioa_cfg
->in_reset_reload
) {
3894 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3895 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3896 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3899 if (ioa_cfg
->ucode_sglist
) {
3900 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3901 dev_err(&ioa_cfg
->pdev
->dev
,
3902 "Microcode download already in progress\n");
3906 sglist
->num_dma_sg
= pci_map_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
3907 sglist
->num_sg
, DMA_TO_DEVICE
);
3909 if (!sglist
->num_dma_sg
) {
3910 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3911 dev_err(&ioa_cfg
->pdev
->dev
,
3912 "Failed to map microcode download buffer!\n");
3916 ioa_cfg
->ucode_sglist
= sglist
;
3917 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3918 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3919 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3921 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3922 ioa_cfg
->ucode_sglist
= NULL
;
3923 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3928 * ipr_store_update_fw - Update the firmware on the adapter
3929 * @class_dev: device struct
3931 * @count: buffer size
3933 * This function will update the firmware on the adapter.
3936 * count on success / other on failure
3938 static ssize_t
ipr_store_update_fw(struct device
*dev
,
3939 struct device_attribute
*attr
,
3940 const char *buf
, size_t count
)
3942 struct Scsi_Host
*shost
= class_to_shost(dev
);
3943 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3944 struct ipr_ucode_image_header
*image_hdr
;
3945 const struct firmware
*fw_entry
;
3946 struct ipr_sglist
*sglist
;
3950 int result
, dnld_size
;
3952 if (!capable(CAP_SYS_ADMIN
))
3955 snprintf(fname
, sizeof(fname
), "%s", buf
);
3957 endline
= strchr(fname
, '\n');
3961 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
3962 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
3966 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
3968 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
3969 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
3970 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
3973 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
3974 release_firmware(fw_entry
);
3978 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
3981 dev_err(&ioa_cfg
->pdev
->dev
,
3982 "Microcode buffer copy to DMA buffer failed\n");
3986 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3988 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
3993 ipr_free_ucode_buffer(sglist
);
3994 release_firmware(fw_entry
);
3998 static struct device_attribute ipr_update_fw_attr
= {
4000 .name
= "update_fw",
4003 .store
= ipr_store_update_fw
4007 * ipr_show_fw_type - Show the adapter's firmware type.
4008 * @dev: class device struct
4012 * number of bytes printed to buffer
4014 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4015 struct device_attribute
*attr
, char *buf
)
4017 struct Scsi_Host
*shost
= class_to_shost(dev
);
4018 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4019 unsigned long lock_flags
= 0;
4022 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4023 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4024 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4028 static struct device_attribute ipr_ioa_fw_type_attr
= {
4033 .show
= ipr_show_fw_type
4036 static struct device_attribute
*ipr_ioa_attrs
[] = {
4037 &ipr_fw_version_attr
,
4038 &ipr_log_level_attr
,
4039 &ipr_diagnostics_attr
,
4040 &ipr_ioa_state_attr
,
4041 &ipr_ioa_reset_attr
,
4042 &ipr_update_fw_attr
,
4043 &ipr_ioa_fw_type_attr
,
4044 &ipr_iopoll_weight_attr
,
4048 #ifdef CONFIG_SCSI_IPR_DUMP
4050 * ipr_read_dump - Dump the adapter
4051 * @filp: open sysfs file
4052 * @kobj: kobject struct
4053 * @bin_attr: bin_attribute struct
4056 * @count: buffer size
4059 * number of bytes printed to buffer
4061 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4062 struct bin_attribute
*bin_attr
,
4063 char *buf
, loff_t off
, size_t count
)
4065 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4066 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4067 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4068 struct ipr_dump
*dump
;
4069 unsigned long lock_flags
= 0;
4074 if (!capable(CAP_SYS_ADMIN
))
4077 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4078 dump
= ioa_cfg
->dump
;
4080 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4081 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4084 kref_get(&dump
->kref
);
4085 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4087 if (off
> dump
->driver_dump
.hdr
.len
) {
4088 kref_put(&dump
->kref
, ipr_release_dump
);
4092 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4093 count
= dump
->driver_dump
.hdr
.len
- off
;
4097 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4098 if (off
+ count
> sizeof(dump
->driver_dump
))
4099 len
= sizeof(dump
->driver_dump
) - off
;
4102 src
= (u8
*)&dump
->driver_dump
+ off
;
4103 memcpy(buf
, src
, len
);
4109 off
-= sizeof(dump
->driver_dump
);
4112 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4113 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4114 sizeof(struct ipr_sdt_entry
));
4116 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4117 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4119 if (count
&& off
< sdt_end
) {
4120 if (off
+ count
> sdt_end
)
4121 len
= sdt_end
- off
;
4124 src
= (u8
*)&dump
->ioa_dump
+ off
;
4125 memcpy(buf
, src
, len
);
4134 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4135 len
= PAGE_ALIGN(off
) - off
;
4138 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4139 src
+= off
& ~PAGE_MASK
;
4140 memcpy(buf
, src
, len
);
4146 kref_put(&dump
->kref
, ipr_release_dump
);
4151 * ipr_alloc_dump - Prepare for adapter dump
4152 * @ioa_cfg: ioa config struct
4155 * 0 on success / other on failure
4157 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4159 struct ipr_dump
*dump
;
4161 unsigned long lock_flags
= 0;
4163 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4166 ipr_err("Dump memory allocation failed\n");
4171 ioa_data
= vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4173 ioa_data
= vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES
* sizeof(__be32
*));
4176 ipr_err("Dump memory allocation failed\n");
4181 dump
->ioa_dump
.ioa_data
= ioa_data
;
4183 kref_init(&dump
->kref
);
4184 dump
->ioa_cfg
= ioa_cfg
;
4186 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4188 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4189 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4190 vfree(dump
->ioa_dump
.ioa_data
);
4195 ioa_cfg
->dump
= dump
;
4196 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4197 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4198 ioa_cfg
->dump_taken
= 1;
4199 schedule_work(&ioa_cfg
->work_q
);
4201 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4207 * ipr_free_dump - Free adapter dump memory
4208 * @ioa_cfg: ioa config struct
4211 * 0 on success / other on failure
4213 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4215 struct ipr_dump
*dump
;
4216 unsigned long lock_flags
= 0;
4220 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4221 dump
= ioa_cfg
->dump
;
4223 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4227 ioa_cfg
->dump
= NULL
;
4228 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4230 kref_put(&dump
->kref
, ipr_release_dump
);
4237 * ipr_write_dump - Setup dump state of adapter
4238 * @filp: open sysfs file
4239 * @kobj: kobject struct
4240 * @bin_attr: bin_attribute struct
4243 * @count: buffer size
4246 * number of bytes printed to buffer
4248 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4249 struct bin_attribute
*bin_attr
,
4250 char *buf
, loff_t off
, size_t count
)
4252 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4253 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4254 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4257 if (!capable(CAP_SYS_ADMIN
))
4261 rc
= ipr_alloc_dump(ioa_cfg
);
4262 else if (buf
[0] == '0')
4263 rc
= ipr_free_dump(ioa_cfg
);
4273 static struct bin_attribute ipr_dump_attr
= {
4276 .mode
= S_IRUSR
| S_IWUSR
,
4279 .read
= ipr_read_dump
,
4280 .write
= ipr_write_dump
4283 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4287 * ipr_change_queue_depth - Change the device's queue depth
4288 * @sdev: scsi device struct
4289 * @qdepth: depth to set
4290 * @reason: calling context
4295 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
,
4298 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4299 struct ipr_resource_entry
*res
;
4300 unsigned long lock_flags
= 0;
4302 if (reason
!= SCSI_QDEPTH_DEFAULT
)
4305 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4306 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4308 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4309 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4310 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4312 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
4313 return sdev
->queue_depth
;
4317 * ipr_change_queue_type - Change the device's queue type
4318 * @dsev: scsi device struct
4319 * @tag_type: type of tags to use
4322 * actual queue type set
4324 static int ipr_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
4326 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4327 struct ipr_resource_entry
*res
;
4328 unsigned long lock_flags
= 0;
4330 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4331 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4334 if (ipr_is_gscsi(res
) && sdev
->tagged_supported
) {
4336 * We don't bother quiescing the device here since the
4337 * adapter firmware does it for us.
4339 scsi_set_tag_type(sdev
, tag_type
);
4342 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
4344 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
4350 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4355 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4356 * @dev: device struct
4357 * @attr: device attribute structure
4361 * number of bytes printed to buffer
4363 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4365 struct scsi_device
*sdev
= to_scsi_device(dev
);
4366 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4367 struct ipr_resource_entry
*res
;
4368 unsigned long lock_flags
= 0;
4369 ssize_t len
= -ENXIO
;
4371 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4372 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4374 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4375 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4379 static struct device_attribute ipr_adapter_handle_attr
= {
4381 .name
= "adapter_handle",
4384 .show
= ipr_show_adapter_handle
4388 * ipr_show_resource_path - Show the resource path or the resource address for
4390 * @dev: device struct
4391 * @attr: device attribute structure
4395 * number of bytes printed to buffer
4397 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4399 struct scsi_device
*sdev
= to_scsi_device(dev
);
4400 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4401 struct ipr_resource_entry
*res
;
4402 unsigned long lock_flags
= 0;
4403 ssize_t len
= -ENXIO
;
4404 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4406 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4407 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4408 if (res
&& ioa_cfg
->sis64
)
4409 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4410 __ipr_format_res_path(res
->res_path
, buffer
,
4413 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4414 res
->bus
, res
->target
, res
->lun
);
4416 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4420 static struct device_attribute ipr_resource_path_attr
= {
4422 .name
= "resource_path",
4425 .show
= ipr_show_resource_path
4429 * ipr_show_device_id - Show the device_id for this device.
4430 * @dev: device struct
4431 * @attr: device attribute structure
4435 * number of bytes printed to buffer
4437 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4439 struct scsi_device
*sdev
= to_scsi_device(dev
);
4440 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4441 struct ipr_resource_entry
*res
;
4442 unsigned long lock_flags
= 0;
4443 ssize_t len
= -ENXIO
;
4445 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4446 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4447 if (res
&& ioa_cfg
->sis64
)
4448 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->dev_id
);
4450 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4452 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4456 static struct device_attribute ipr_device_id_attr
= {
4458 .name
= "device_id",
4461 .show
= ipr_show_device_id
4465 * ipr_show_resource_type - Show the resource type for this device.
4466 * @dev: device struct
4467 * @attr: device attribute structure
4471 * number of bytes printed to buffer
4473 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4475 struct scsi_device
*sdev
= to_scsi_device(dev
);
4476 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4477 struct ipr_resource_entry
*res
;
4478 unsigned long lock_flags
= 0;
4479 ssize_t len
= -ENXIO
;
4481 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4482 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4485 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4487 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4491 static struct device_attribute ipr_resource_type_attr
= {
4493 .name
= "resource_type",
4496 .show
= ipr_show_resource_type
4499 static struct device_attribute
*ipr_dev_attrs
[] = {
4500 &ipr_adapter_handle_attr
,
4501 &ipr_resource_path_attr
,
4502 &ipr_device_id_attr
,
4503 &ipr_resource_type_attr
,
4508 * ipr_biosparam - Return the HSC mapping
4509 * @sdev: scsi device struct
4510 * @block_device: block device pointer
4511 * @capacity: capacity of the device
4512 * @parm: Array containing returned HSC values.
4514 * This function generates the HSC parms that fdisk uses.
4515 * We want to make sure we return something that places partitions
4516 * on 4k boundaries for best performance with the IOA.
4521 static int ipr_biosparam(struct scsi_device
*sdev
,
4522 struct block_device
*block_device
,
4523 sector_t capacity
, int *parm
)
4531 cylinders
= capacity
;
4532 sector_div(cylinders
, (128 * 32));
4537 parm
[2] = cylinders
;
4543 * ipr_find_starget - Find target based on bus/target.
4544 * @starget: scsi target struct
4547 * resource entry pointer if found / NULL if not found
4549 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4551 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4552 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4553 struct ipr_resource_entry
*res
;
4555 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4556 if ((res
->bus
== starget
->channel
) &&
4557 (res
->target
== starget
->id
)) {
4565 static struct ata_port_info sata_port_info
;
4568 * ipr_target_alloc - Prepare for commands to a SCSI target
4569 * @starget: scsi target struct
4571 * If the device is a SATA device, this function allocates an
4572 * ATA port with libata, else it does nothing.
4575 * 0 on success / non-0 on failure
4577 static int ipr_target_alloc(struct scsi_target
*starget
)
4579 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4580 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4581 struct ipr_sata_port
*sata_port
;
4582 struct ata_port
*ap
;
4583 struct ipr_resource_entry
*res
;
4584 unsigned long lock_flags
;
4586 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4587 res
= ipr_find_starget(starget
);
4588 starget
->hostdata
= NULL
;
4590 if (res
&& ipr_is_gata(res
)) {
4591 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4592 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4596 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4598 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4599 sata_port
->ioa_cfg
= ioa_cfg
;
4601 sata_port
->res
= res
;
4603 res
->sata_port
= sata_port
;
4604 ap
->private_data
= sata_port
;
4605 starget
->hostdata
= sata_port
;
4611 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4617 * ipr_target_destroy - Destroy a SCSI target
4618 * @starget: scsi target struct
4620 * If the device was a SATA device, this function frees the libata
4621 * ATA port, else it does nothing.
4624 static void ipr_target_destroy(struct scsi_target
*starget
)
4626 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4627 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4628 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4630 if (ioa_cfg
->sis64
) {
4631 if (!ipr_find_starget(starget
)) {
4632 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4633 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4634 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4635 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4636 else if (starget
->channel
== 0)
4637 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4642 starget
->hostdata
= NULL
;
4643 ata_sas_port_destroy(sata_port
->ap
);
4649 * ipr_find_sdev - Find device based on bus/target/lun.
4650 * @sdev: scsi device struct
4653 * resource entry pointer if found / NULL if not found
4655 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4657 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4658 struct ipr_resource_entry
*res
;
4660 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4661 if ((res
->bus
== sdev
->channel
) &&
4662 (res
->target
== sdev
->id
) &&
4663 (res
->lun
== sdev
->lun
))
4671 * ipr_slave_destroy - Unconfigure a SCSI device
4672 * @sdev: scsi device struct
4677 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4679 struct ipr_resource_entry
*res
;
4680 struct ipr_ioa_cfg
*ioa_cfg
;
4681 unsigned long lock_flags
= 0;
4683 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4685 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4686 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4689 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4690 sdev
->hostdata
= NULL
;
4692 res
->sata_port
= NULL
;
4694 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4698 * ipr_slave_configure - Configure a SCSI device
4699 * @sdev: scsi device struct
4701 * This function configures the specified scsi device.
4706 static int ipr_slave_configure(struct scsi_device
*sdev
)
4708 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4709 struct ipr_resource_entry
*res
;
4710 struct ata_port
*ap
= NULL
;
4711 unsigned long lock_flags
= 0;
4712 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4714 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4715 res
= sdev
->hostdata
;
4717 if (ipr_is_af_dasd_device(res
))
4718 sdev
->type
= TYPE_RAID
;
4719 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4720 sdev
->scsi_level
= 4;
4721 sdev
->no_uld_attach
= 1;
4723 if (ipr_is_vset_device(res
)) {
4724 blk_queue_rq_timeout(sdev
->request_queue
,
4725 IPR_VSET_RW_TIMEOUT
);
4726 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4728 if (ipr_is_gata(res
) && res
->sata_port
)
4729 ap
= res
->sata_port
->ap
;
4730 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4733 scsi_adjust_queue_depth(sdev
, 0, IPR_MAX_CMD_PER_ATA_LUN
);
4734 ata_sas_slave_configure(sdev
, ap
);
4736 scsi_adjust_queue_depth(sdev
, 0, sdev
->host
->cmd_per_lun
);
4738 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4739 ipr_format_res_path(ioa_cfg
,
4740 res
->res_path
, buffer
, sizeof(buffer
)));
4743 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4748 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4749 * @sdev: scsi device struct
4751 * This function initializes an ATA port so that future commands
4752 * sent through queuecommand will work.
4757 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4759 struct ipr_sata_port
*sata_port
= NULL
;
4763 if (sdev
->sdev_target
)
4764 sata_port
= sdev
->sdev_target
->hostdata
;
4766 rc
= ata_sas_port_init(sata_port
->ap
);
4768 rc
= ata_sas_sync_probe(sata_port
->ap
);
4772 ipr_slave_destroy(sdev
);
4779 * ipr_slave_alloc - Prepare for commands to a device.
4780 * @sdev: scsi device struct
4782 * This function saves a pointer to the resource entry
4783 * in the scsi device struct if the device exists. We
4784 * can then use this pointer in ipr_queuecommand when
4785 * handling new commands.
4788 * 0 on success / -ENXIO if device does not exist
4790 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4792 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4793 struct ipr_resource_entry
*res
;
4794 unsigned long lock_flags
;
4797 sdev
->hostdata
= NULL
;
4799 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4801 res
= ipr_find_sdev(sdev
);
4806 sdev
->hostdata
= res
;
4807 if (!ipr_is_naca_model(res
))
4808 res
->needs_sync_complete
= 1;
4810 if (ipr_is_gata(res
)) {
4811 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4812 return ipr_ata_slave_alloc(sdev
);
4816 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4822 * ipr_match_lun - Match function for specified LUN
4823 * @ipr_cmd: ipr command struct
4824 * @device: device to match (sdev)
4827 * 1 if command matches sdev / 0 if command does not match sdev
4829 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
4831 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
4837 * ipr_wait_for_ops - Wait for matching commands to complete
4838 * @ipr_cmd: ipr command struct
4839 * @device: device to match (sdev)
4840 * @match: match function to use
4845 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
4846 int (*match
)(struct ipr_cmnd
*, void *))
4848 struct ipr_cmnd
*ipr_cmd
;
4850 unsigned long flags
;
4851 struct ipr_hrr_queue
*hrrq
;
4852 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
4853 DECLARE_COMPLETION_ONSTACK(comp
);
4859 for_each_hrrq(hrrq
, ioa_cfg
) {
4860 spin_lock_irqsave(hrrq
->lock
, flags
);
4861 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
4862 if (match(ipr_cmd
, device
)) {
4863 ipr_cmd
->eh_comp
= &comp
;
4867 spin_unlock_irqrestore(hrrq
->lock
, flags
);
4871 timeout
= wait_for_completion_timeout(&comp
, timeout
);
4876 for_each_hrrq(hrrq
, ioa_cfg
) {
4877 spin_lock_irqsave(hrrq
->lock
, flags
);
4878 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
4879 if (match(ipr_cmd
, device
)) {
4880 ipr_cmd
->eh_comp
= NULL
;
4884 spin_unlock_irqrestore(hrrq
->lock
, flags
);
4888 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
4890 return wait
? FAILED
: SUCCESS
;
4899 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
4901 struct ipr_ioa_cfg
*ioa_cfg
;
4902 unsigned long lock_flags
= 0;
4906 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
4907 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4909 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
4910 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
4911 dev_err(&ioa_cfg
->pdev
->dev
,
4912 "Adapter being reset as a result of error recovery.\n");
4914 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
4915 ioa_cfg
->sdt_state
= GET_DUMP
;
4918 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4919 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4920 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4922 /* If we got hit with a host reset while we were already resetting
4923 the adapter for some reason, and the reset failed. */
4924 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
4929 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4935 * ipr_device_reset - Reset the device
4936 * @ioa_cfg: ioa config struct
4937 * @res: resource entry struct
4939 * This function issues a device reset to the affected device.
4940 * If the device is a SCSI device, a LUN reset will be sent
4941 * to the device first. If that does not work, a target reset
4942 * will be sent. If the device is a SATA device, a PHY reset will
4946 * 0 on success / non-zero on failure
4948 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
4949 struct ipr_resource_entry
*res
)
4951 struct ipr_cmnd
*ipr_cmd
;
4952 struct ipr_ioarcb
*ioarcb
;
4953 struct ipr_cmd_pkt
*cmd_pkt
;
4954 struct ipr_ioarcb_ata_regs
*regs
;
4958 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4959 ioarcb
= &ipr_cmd
->ioarcb
;
4960 cmd_pkt
= &ioarcb
->cmd_pkt
;
4962 if (ipr_cmd
->ioa_cfg
->sis64
) {
4963 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
4964 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
4966 regs
= &ioarcb
->u
.add_data
.u
.regs
;
4968 ioarcb
->res_handle
= res
->res_handle
;
4969 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
4970 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
4971 if (ipr_is_gata(res
)) {
4972 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
4973 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
4974 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
4977 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
4978 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
4979 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
4980 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
4981 if (ipr_cmd
->ioa_cfg
->sis64
)
4982 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
4983 sizeof(struct ipr_ioasa_gata
));
4985 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
4986 sizeof(struct ipr_ioasa_gata
));
4990 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
4994 * ipr_sata_reset - Reset the SATA port
4995 * @link: SATA link to reset
4996 * @classes: class of the attached device
4998 * This function issues a SATA phy reset to the affected ATA link.
5001 * 0 on success / non-zero on failure
5003 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5004 unsigned long deadline
)
5006 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5007 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5008 struct ipr_resource_entry
*res
;
5009 unsigned long lock_flags
= 0;
5013 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5014 while (ioa_cfg
->in_reset_reload
) {
5015 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5016 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5017 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5020 res
= sata_port
->res
;
5022 rc
= ipr_device_reset(ioa_cfg
, res
);
5023 *classes
= res
->ata_class
;
5026 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5032 * ipr_eh_dev_reset - Reset the device
5033 * @scsi_cmd: scsi command struct
5035 * This function issues a device reset to the affected device.
5036 * A LUN reset will be sent to the device first. If that does
5037 * not work, a target reset will be sent.
5042 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5044 struct ipr_cmnd
*ipr_cmd
;
5045 struct ipr_ioa_cfg
*ioa_cfg
;
5046 struct ipr_resource_entry
*res
;
5047 struct ata_port
*ap
;
5049 struct ipr_hrr_queue
*hrrq
;
5052 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5053 res
= scsi_cmd
->device
->hostdata
;
5059 * If we are currently going through reset/reload, return failed. This will force the
5060 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5063 if (ioa_cfg
->in_reset_reload
)
5065 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5068 for_each_hrrq(hrrq
, ioa_cfg
) {
5069 spin_lock(&hrrq
->_lock
);
5070 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5071 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5072 if (ipr_cmd
->scsi_cmd
)
5073 ipr_cmd
->done
= ipr_scsi_eh_done
;
5075 ipr_cmd
->done
= ipr_sata_eh_done
;
5077 !(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5078 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5079 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5083 spin_unlock(&hrrq
->_lock
);
5085 res
->resetting_device
= 1;
5086 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5088 if (ipr_is_gata(res
) && res
->sata_port
) {
5089 ap
= res
->sata_port
->ap
;
5090 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5091 ata_std_error_handler(ap
);
5092 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5094 for_each_hrrq(hrrq
, ioa_cfg
) {
5095 spin_lock(&hrrq
->_lock
);
5096 list_for_each_entry(ipr_cmd
,
5097 &hrrq
->hrrq_pending_q
, queue
) {
5098 if (ipr_cmd
->ioarcb
.res_handle
==
5104 spin_unlock(&hrrq
->_lock
);
5107 rc
= ipr_device_reset(ioa_cfg
, res
);
5108 res
->resetting_device
= 0;
5111 return rc
? FAILED
: SUCCESS
;
5114 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5117 struct ipr_ioa_cfg
*ioa_cfg
;
5119 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5121 spin_lock_irq(cmd
->device
->host
->host_lock
);
5122 rc
= __ipr_eh_dev_reset(cmd
);
5123 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5126 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5132 * ipr_bus_reset_done - Op done function for bus reset.
5133 * @ipr_cmd: ipr command struct
5135 * This function is the op done function for a bus reset
5140 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5142 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5143 struct ipr_resource_entry
*res
;
5146 if (!ioa_cfg
->sis64
)
5147 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5148 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5149 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5155 * If abort has not completed, indicate the reset has, else call the
5156 * abort's done function to wake the sleeping eh thread
5158 if (ipr_cmd
->sibling
->sibling
)
5159 ipr_cmd
->sibling
->sibling
= NULL
;
5161 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5163 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5168 * ipr_abort_timeout - An abort task has timed out
5169 * @ipr_cmd: ipr command struct
5171 * This function handles when an abort task times out. If this
5172 * happens we issue a bus reset since we have resources tied
5173 * up that must be freed before returning to the midlayer.
5178 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
5180 struct ipr_cmnd
*reset_cmd
;
5181 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5182 struct ipr_cmd_pkt
*cmd_pkt
;
5183 unsigned long lock_flags
= 0;
5186 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5187 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5188 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5192 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5193 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5194 ipr_cmd
->sibling
= reset_cmd
;
5195 reset_cmd
->sibling
= ipr_cmd
;
5196 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5197 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5198 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5199 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5200 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5202 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5203 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5208 * ipr_cancel_op - Cancel specified op
5209 * @scsi_cmd: scsi command struct
5211 * This function cancels specified op.
5216 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5218 struct ipr_cmnd
*ipr_cmd
;
5219 struct ipr_ioa_cfg
*ioa_cfg
;
5220 struct ipr_resource_entry
*res
;
5221 struct ipr_cmd_pkt
*cmd_pkt
;
5224 struct ipr_hrr_queue
*hrrq
;
5227 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5228 res
= scsi_cmd
->device
->hostdata
;
5230 /* If we are currently going through reset/reload, return failed.
5231 * This will force the mid-layer to call ipr_eh_host_reset,
5232 * which will then go to sleep and wait for the reset to complete
5234 if (ioa_cfg
->in_reset_reload
||
5235 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5241 * If we are aborting a timed out op, chances are that the timeout was caused
5242 * by a still not detected EEH error. In such cases, reading a register will
5243 * trigger the EEH recovery infrastructure.
5245 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5247 if (!ipr_is_gscsi(res
))
5250 for_each_hrrq(hrrq
, ioa_cfg
) {
5251 spin_lock(&hrrq
->_lock
);
5252 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
5253 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
5254 ipr_cmd
->done
= ipr_scsi_eh_done
;
5259 spin_unlock(&hrrq
->_lock
);
5265 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5266 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5267 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5268 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5269 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5270 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5272 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5274 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5275 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5278 * If the abort task timed out and we sent a bus reset, we will get
5279 * one the following responses to the abort
5281 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5286 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5287 if (!ipr_is_naca_model(res
))
5288 res
->needs_sync_complete
= 1;
5291 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5295 * ipr_eh_abort - Abort a single op
5296 * @scsi_cmd: scsi command struct
5301 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5303 unsigned long flags
;
5305 struct ipr_ioa_cfg
*ioa_cfg
;
5309 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5311 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5312 rc
= ipr_cancel_op(scsi_cmd
);
5313 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5316 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5322 * ipr_handle_other_interrupt - Handle "other" interrupts
5323 * @ioa_cfg: ioa config struct
5324 * @int_reg: interrupt register
5327 * IRQ_NONE / IRQ_HANDLED
5329 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5332 irqreturn_t rc
= IRQ_HANDLED
;
5335 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5336 int_reg
&= ~int_mask_reg
;
5338 /* If an interrupt on the adapter did not occur, ignore it.
5339 * Or in the case of SIS 64, check for a stage change interrupt.
5341 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5342 if (ioa_cfg
->sis64
) {
5343 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5344 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5345 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5347 /* clear stage change */
5348 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5349 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5350 list_del(&ioa_cfg
->reset_cmd
->queue
);
5351 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5352 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5360 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5361 /* Mask the interrupt */
5362 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5364 /* Clear the interrupt */
5365 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.clr_interrupt_reg
);
5366 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5368 list_del(&ioa_cfg
->reset_cmd
->queue
);
5369 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5370 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5371 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5372 if (ioa_cfg
->clear_isr
) {
5373 if (ipr_debug
&& printk_ratelimit())
5374 dev_err(&ioa_cfg
->pdev
->dev
,
5375 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5376 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5377 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5381 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5382 ioa_cfg
->ioa_unit_checked
= 1;
5383 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5384 dev_err(&ioa_cfg
->pdev
->dev
,
5385 "No Host RRQ. 0x%08X\n", int_reg
);
5387 dev_err(&ioa_cfg
->pdev
->dev
,
5388 "Permanent IOA failure. 0x%08X\n", int_reg
);
5390 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5391 ioa_cfg
->sdt_state
= GET_DUMP
;
5393 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5394 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5401 * ipr_isr_eh - Interrupt service routine error handler
5402 * @ioa_cfg: ioa config struct
5403 * @msg: message to log
5408 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5410 ioa_cfg
->errors_logged
++;
5411 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5413 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5414 ioa_cfg
->sdt_state
= GET_DUMP
;
5416 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5419 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5420 struct list_head
*doneq
)
5424 struct ipr_cmnd
*ipr_cmd
;
5425 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5428 /* If interrupts are disabled, ignore the interrupt */
5429 if (!hrr_queue
->allow_interrupts
)
5432 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5433 hrr_queue
->toggle_bit
) {
5435 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5436 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5437 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5439 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5440 cmd_index
< hrr_queue
->min_cmd_id
)) {
5442 "Invalid response handle from IOA: ",
5447 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5448 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5450 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5452 list_move_tail(&ipr_cmd
->queue
, doneq
);
5454 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5455 hrr_queue
->hrrq_curr
++;
5457 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5458 hrr_queue
->toggle_bit
^= 1u;
5461 if (budget
> 0 && num_hrrq
>= budget
)
5468 static int ipr_iopoll(struct blk_iopoll
*iop
, int budget
)
5470 struct ipr_ioa_cfg
*ioa_cfg
;
5471 struct ipr_hrr_queue
*hrrq
;
5472 struct ipr_cmnd
*ipr_cmd
, *temp
;
5473 unsigned long hrrq_flags
;
5477 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5478 ioa_cfg
= hrrq
->ioa_cfg
;
5480 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5481 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5483 if (completed_ops
< budget
)
5484 blk_iopoll_complete(iop
);
5485 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5487 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5488 list_del(&ipr_cmd
->queue
);
5489 del_timer(&ipr_cmd
->timer
);
5490 ipr_cmd
->fast_done(ipr_cmd
);
5493 return completed_ops
;
5497 * ipr_isr - Interrupt service routine
5499 * @devp: pointer to ioa config struct
5502 * IRQ_NONE / IRQ_HANDLED
5504 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5506 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5507 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5508 unsigned long hrrq_flags
= 0;
5512 struct ipr_cmnd
*ipr_cmd
, *temp
;
5513 irqreturn_t rc
= IRQ_NONE
;
5516 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5517 /* If interrupts are disabled, ignore the interrupt */
5518 if (!hrrq
->allow_interrupts
) {
5519 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5524 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5527 if (!ioa_cfg
->clear_isr
)
5530 /* Clear the PCI interrupt */
5533 writel(IPR_PCII_HRRQ_UPDATED
,
5534 ioa_cfg
->regs
.clr_interrupt_reg32
);
5535 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5536 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5537 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5539 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5540 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5542 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5543 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5545 "Error clearing HRRQ: ", num_hrrq
);
5552 if (unlikely(rc
== IRQ_NONE
))
5553 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5555 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5556 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5557 list_del(&ipr_cmd
->queue
);
5558 del_timer(&ipr_cmd
->timer
);
5559 ipr_cmd
->fast_done(ipr_cmd
);
5565 * ipr_isr_mhrrq - Interrupt service routine
5567 * @devp: pointer to ioa config struct
5570 * IRQ_NONE / IRQ_HANDLED
5572 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5574 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5575 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5576 unsigned long hrrq_flags
= 0;
5577 struct ipr_cmnd
*ipr_cmd
, *temp
;
5578 irqreturn_t rc
= IRQ_NONE
;
5581 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5583 /* If interrupts are disabled, ignore the interrupt */
5584 if (!hrrq
->allow_interrupts
) {
5585 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5589 if (blk_iopoll_enabled
&& ioa_cfg
->iopoll_weight
&&
5590 ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5591 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5593 if (!blk_iopoll_sched_prep(&hrrq
->iopoll
))
5594 blk_iopoll_sched(&hrrq
->iopoll
);
5595 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5599 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5602 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5606 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5608 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5609 list_del(&ipr_cmd
->queue
);
5610 del_timer(&ipr_cmd
->timer
);
5611 ipr_cmd
->fast_done(ipr_cmd
);
5617 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5618 * @ioa_cfg: ioa config struct
5619 * @ipr_cmd: ipr command struct
5622 * 0 on success / -1 on failure
5624 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5625 struct ipr_cmnd
*ipr_cmd
)
5628 struct scatterlist
*sg
;
5630 u32 ioadl_flags
= 0;
5631 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5632 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5633 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5635 length
= scsi_bufflen(scsi_cmd
);
5639 nseg
= scsi_dma_map(scsi_cmd
);
5641 if (printk_ratelimit())
5642 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5646 ipr_cmd
->dma_use_sg
= nseg
;
5648 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5650 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5652 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5653 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5654 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5655 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5656 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5658 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5659 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5660 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5661 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5664 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5669 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5670 * @ioa_cfg: ioa config struct
5671 * @ipr_cmd: ipr command struct
5674 * 0 on success / -1 on failure
5676 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5677 struct ipr_cmnd
*ipr_cmd
)
5680 struct scatterlist
*sg
;
5682 u32 ioadl_flags
= 0;
5683 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5684 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5685 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5687 length
= scsi_bufflen(scsi_cmd
);
5691 nseg
= scsi_dma_map(scsi_cmd
);
5693 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
5697 ipr_cmd
->dma_use_sg
= nseg
;
5699 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5700 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5701 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5702 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5704 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5705 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5706 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5707 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5708 ioarcb
->read_ioadl_len
=
5709 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5712 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5713 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5714 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5715 offsetof(struct ipr_ioarcb
, u
.add_data
));
5716 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5719 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5720 ioadl
[i
].flags_and_data_len
=
5721 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5722 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5725 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5730 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5731 * @scsi_cmd: scsi command struct
5736 static u8
ipr_get_task_attributes(struct scsi_cmnd
*scsi_cmd
)
5739 u8 rc
= IPR_FLAGS_LO_UNTAGGED_TASK
;
5741 if (scsi_populate_tag_msg(scsi_cmd
, tag
)) {
5743 case MSG_SIMPLE_TAG
:
5744 rc
= IPR_FLAGS_LO_SIMPLE_TASK
;
5747 rc
= IPR_FLAGS_LO_HEAD_OF_Q_TASK
;
5749 case MSG_ORDERED_TAG
:
5750 rc
= IPR_FLAGS_LO_ORDERED_TASK
;
5759 * ipr_erp_done - Process completion of ERP for a device
5760 * @ipr_cmd: ipr command struct
5762 * This function copies the sense buffer into the scsi_cmd
5763 * struct and pushes the scsi_done function.
5768 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
5770 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5771 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5772 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5774 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5775 scsi_cmd
->result
|= (DID_ERROR
<< 16);
5776 scmd_printk(KERN_ERR
, scsi_cmd
,
5777 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
5779 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
5780 SCSI_SENSE_BUFFERSIZE
);
5784 if (!ipr_is_naca_model(res
))
5785 res
->needs_sync_complete
= 1;
5788 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
5789 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5790 scsi_cmd
->scsi_done(scsi_cmd
);
5794 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5795 * @ipr_cmd: ipr command struct
5800 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
5802 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5803 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5804 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
5806 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
5807 ioarcb
->data_transfer_length
= 0;
5808 ioarcb
->read_data_transfer_length
= 0;
5809 ioarcb
->ioadl_len
= 0;
5810 ioarcb
->read_ioadl_len
= 0;
5811 ioasa
->hdr
.ioasc
= 0;
5812 ioasa
->hdr
.residual_data_len
= 0;
5814 if (ipr_cmd
->ioa_cfg
->sis64
)
5815 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
5816 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
5818 ioarcb
->write_ioadl_addr
=
5819 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
5820 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5825 * ipr_erp_request_sense - Send request sense to a device
5826 * @ipr_cmd: ipr command struct
5828 * This function sends a request sense to a device as a result
5829 * of a check condition.
5834 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
5836 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5837 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5839 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
5840 ipr_erp_done(ipr_cmd
);
5844 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5846 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
5847 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
5848 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
5849 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
5850 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
5851 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
5853 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
5854 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
5856 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
5857 IPR_REQUEST_SENSE_TIMEOUT
* 2);
5861 * ipr_erp_cancel_all - Send cancel all to a device
5862 * @ipr_cmd: ipr command struct
5864 * This function sends a cancel all to a device to clear the
5865 * queue. If we are running TCQ on the device, QERR is set to 1,
5866 * which means all outstanding ops have been dropped on the floor.
5867 * Cancel all will return them to us.
5872 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
5874 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5875 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
5876 struct ipr_cmd_pkt
*cmd_pkt
;
5880 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
5882 if (!scsi_get_tag_type(scsi_cmd
->device
)) {
5883 ipr_erp_request_sense(ipr_cmd
);
5887 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5888 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5889 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5891 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
5892 IPR_CANCEL_ALL_TIMEOUT
);
5896 * ipr_dump_ioasa - Dump contents of IOASA
5897 * @ioa_cfg: ioa config struct
5898 * @ipr_cmd: ipr command struct
5899 * @res: resource entry struct
5901 * This function is invoked by the interrupt handler when ops
5902 * fail. It will log the IOASA if appropriate. Only called
5908 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
5909 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
5913 u32 ioasc
, fd_ioasc
;
5914 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5915 __be32
*ioasa_data
= (__be32
*)ioasa
;
5918 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
5919 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
5924 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
5927 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
5928 error_index
= ipr_get_error(fd_ioasc
);
5930 error_index
= ipr_get_error(ioasc
);
5932 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
5933 /* Don't log an error if the IOA already logged one */
5934 if (ioasa
->hdr
.ilid
!= 0)
5937 if (!ipr_is_gscsi(res
))
5940 if (ipr_error_table
[error_index
].log_ioasa
== 0)
5944 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
5946 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
5947 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
5948 data_len
= sizeof(struct ipr_ioasa64
);
5949 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
5950 data_len
= sizeof(struct ipr_ioasa
);
5952 ipr_err("IOASA Dump:\n");
5954 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
5955 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
5956 be32_to_cpu(ioasa_data
[i
]),
5957 be32_to_cpu(ioasa_data
[i
+1]),
5958 be32_to_cpu(ioasa_data
[i
+2]),
5959 be32_to_cpu(ioasa_data
[i
+3]));
5964 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5966 * @sense_buf: sense data buffer
5971 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
5974 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
5975 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
5976 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
5977 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
5979 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
5981 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
5984 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
5986 if (ipr_is_vset_device(res
) &&
5987 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
5988 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
5989 sense_buf
[0] = 0x72;
5990 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
5991 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
5992 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
5996 sense_buf
[9] = 0x0A;
5997 sense_buf
[10] = 0x80;
5999 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6001 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6002 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6003 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6004 sense_buf
[15] = failing_lba
& 0x000000ff;
6006 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6008 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6009 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6010 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6011 sense_buf
[19] = failing_lba
& 0x000000ff;
6013 sense_buf
[0] = 0x70;
6014 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6015 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6016 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6018 /* Illegal request */
6019 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6020 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6021 sense_buf
[7] = 10; /* additional length */
6023 /* IOARCB was in error */
6024 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6025 sense_buf
[15] = 0xC0;
6026 else /* Parameter data was invalid */
6027 sense_buf
[15] = 0x80;
6030 ((IPR_FIELD_POINTER_MASK
&
6031 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6033 (IPR_FIELD_POINTER_MASK
&
6034 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6036 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6037 if (ipr_is_vset_device(res
))
6038 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6040 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6042 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6043 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6044 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6045 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6046 sense_buf
[6] = failing_lba
& 0x000000ff;
6049 sense_buf
[7] = 6; /* additional length */
6055 * ipr_get_autosense - Copy autosense data to sense buffer
6056 * @ipr_cmd: ipr command struct
6058 * This function copies the autosense buffer to the buffer
6059 * in the scsi_cmd, if there is autosense available.
6062 * 1 if autosense was available / 0 if not
6064 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6066 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6067 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6069 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6072 if (ipr_cmd
->ioa_cfg
->sis64
)
6073 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6074 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6075 SCSI_SENSE_BUFFERSIZE
));
6077 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6078 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6079 SCSI_SENSE_BUFFERSIZE
));
6084 * ipr_erp_start - Process an error response for a SCSI op
6085 * @ioa_cfg: ioa config struct
6086 * @ipr_cmd: ipr command struct
6088 * This function determines whether or not to initiate ERP
6089 * on the affected device.
6094 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6095 struct ipr_cmnd
*ipr_cmd
)
6097 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6098 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6099 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6100 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6103 ipr_scsi_eh_done(ipr_cmd
);
6107 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6108 ipr_gen_sense(ipr_cmd
);
6110 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6112 switch (masked_ioasc
) {
6113 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6114 if (ipr_is_naca_model(res
))
6115 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6117 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6119 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6120 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6121 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6123 case IPR_IOASC_HW_SEL_TIMEOUT
:
6124 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6125 if (!ipr_is_naca_model(res
))
6126 res
->needs_sync_complete
= 1;
6128 case IPR_IOASC_SYNC_REQUIRED
:
6130 res
->needs_sync_complete
= 1;
6131 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6133 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6134 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6135 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6137 case IPR_IOASC_BUS_WAS_RESET
:
6138 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6140 * Report the bus reset and ask for a retry. The device
6141 * will give CC/UA the next command.
6143 if (!res
->resetting_device
)
6144 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6145 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6146 if (!ipr_is_naca_model(res
))
6147 res
->needs_sync_complete
= 1;
6149 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6150 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6151 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6152 if (!ipr_get_autosense(ipr_cmd
)) {
6153 if (!ipr_is_naca_model(res
)) {
6154 ipr_erp_cancel_all(ipr_cmd
);
6159 if (!ipr_is_naca_model(res
))
6160 res
->needs_sync_complete
= 1;
6162 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6165 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6166 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6167 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6168 res
->needs_sync_complete
= 1;
6172 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6173 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6174 scsi_cmd
->scsi_done(scsi_cmd
);
6178 * ipr_scsi_done - mid-layer done function
6179 * @ipr_cmd: ipr command struct
6181 * This function is invoked by the interrupt handler for
6182 * ops generated by the SCSI mid-layer
6187 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6189 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6190 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6191 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6192 unsigned long lock_flags
;
6194 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6196 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6197 scsi_dma_unmap(scsi_cmd
);
6199 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6200 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6201 scsi_cmd
->scsi_done(scsi_cmd
);
6202 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6204 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6205 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6206 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6207 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6208 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6213 * ipr_queuecommand - Queue a mid-layer request
6214 * @shost: scsi host struct
6215 * @scsi_cmd: scsi command struct
6217 * This function queues a request generated by the mid-layer.
6221 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6222 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6224 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6225 struct scsi_cmnd
*scsi_cmd
)
6227 struct ipr_ioa_cfg
*ioa_cfg
;
6228 struct ipr_resource_entry
*res
;
6229 struct ipr_ioarcb
*ioarcb
;
6230 struct ipr_cmnd
*ipr_cmd
;
6231 unsigned long hrrq_flags
, lock_flags
;
6233 struct ipr_hrr_queue
*hrrq
;
6236 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6238 scsi_cmd
->result
= (DID_OK
<< 16);
6239 res
= scsi_cmd
->device
->hostdata
;
6241 if (ipr_is_gata(res
) && res
->sata_port
) {
6242 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6243 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6244 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6248 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6249 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6251 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6253 * We are currently blocking all devices due to a host reset
6254 * We have told the host to stop giving us new requests, but
6255 * ERP ops don't count. FIXME
6257 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6258 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6259 return SCSI_MLQUEUE_HOST_BUSY
;
6263 * FIXME - Create scsi_set_host_offline interface
6264 * and the ioa_is_dead check can be removed
6266 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6267 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6271 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6272 if (ipr_cmd
== NULL
) {
6273 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6274 return SCSI_MLQUEUE_HOST_BUSY
;
6276 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6278 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6279 ioarcb
= &ipr_cmd
->ioarcb
;
6281 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6282 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6283 ipr_cmd
->done
= ipr_scsi_eh_done
;
6285 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6286 if (scsi_cmd
->underflow
== 0)
6287 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6289 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6290 if (ipr_is_gscsi(res
))
6291 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6292 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6293 ioarcb
->cmd_pkt
.flags_lo
|= ipr_get_task_attributes(scsi_cmd
);
6296 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6297 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6298 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6302 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6304 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6306 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6307 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6308 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6309 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6311 scsi_dma_unmap(scsi_cmd
);
6312 return SCSI_MLQUEUE_HOST_BUSY
;
6315 if (unlikely(hrrq
->ioa_is_dead
)) {
6316 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6317 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6318 scsi_dma_unmap(scsi_cmd
);
6322 ioarcb
->res_handle
= res
->res_handle
;
6323 if (res
->needs_sync_complete
) {
6324 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6325 res
->needs_sync_complete
= 0;
6327 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6328 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6329 ipr_send_command(ipr_cmd
);
6330 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6334 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6335 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6336 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6337 scsi_cmd
->scsi_done(scsi_cmd
);
6338 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6343 * ipr_ioctl - IOCTL handler
6344 * @sdev: scsi device struct
6349 * 0 on success / other on failure
6351 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6353 struct ipr_resource_entry
*res
;
6355 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6356 if (res
&& ipr_is_gata(res
)) {
6357 if (cmd
== HDIO_GET_IDENTITY
)
6359 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6366 * ipr_info - Get information about the card/driver
6367 * @scsi_host: scsi host struct
6370 * pointer to buffer with description string
6372 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6374 static char buffer
[512];
6375 struct ipr_ioa_cfg
*ioa_cfg
;
6376 unsigned long lock_flags
= 0;
6378 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6380 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6381 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6382 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6387 static struct scsi_host_template driver_template
= {
6388 .module
= THIS_MODULE
,
6390 .info
= ipr_ioa_info
,
6392 .queuecommand
= ipr_queuecommand
,
6393 .eh_abort_handler
= ipr_eh_abort
,
6394 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6395 .eh_host_reset_handler
= ipr_eh_host_reset
,
6396 .slave_alloc
= ipr_slave_alloc
,
6397 .slave_configure
= ipr_slave_configure
,
6398 .slave_destroy
= ipr_slave_destroy
,
6399 .target_alloc
= ipr_target_alloc
,
6400 .target_destroy
= ipr_target_destroy
,
6401 .change_queue_depth
= ipr_change_queue_depth
,
6402 .change_queue_type
= ipr_change_queue_type
,
6403 .bios_param
= ipr_biosparam
,
6404 .can_queue
= IPR_MAX_COMMANDS
,
6406 .sg_tablesize
= IPR_MAX_SGLIST
,
6407 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6408 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6409 .use_clustering
= ENABLE_CLUSTERING
,
6410 .shost_attrs
= ipr_ioa_attrs
,
6411 .sdev_attrs
= ipr_dev_attrs
,
6412 .proc_name
= IPR_NAME
,
6417 * ipr_ata_phy_reset - libata phy_reset handler
6418 * @ap: ata port to reset
6421 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6423 unsigned long flags
;
6424 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6425 struct ipr_resource_entry
*res
= sata_port
->res
;
6426 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6430 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6431 while (ioa_cfg
->in_reset_reload
) {
6432 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6433 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6434 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6437 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6440 rc
= ipr_device_reset(ioa_cfg
, res
);
6443 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6447 ap
->link
.device
[0].class = res
->ata_class
;
6448 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6449 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6452 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6457 * ipr_ata_post_internal - Cleanup after an internal command
6458 * @qc: ATA queued command
6463 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6465 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6466 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6467 struct ipr_cmnd
*ipr_cmd
;
6468 struct ipr_hrr_queue
*hrrq
;
6469 unsigned long flags
;
6471 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6472 while (ioa_cfg
->in_reset_reload
) {
6473 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6474 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6475 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6478 for_each_hrrq(hrrq
, ioa_cfg
) {
6479 spin_lock(&hrrq
->_lock
);
6480 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6481 if (ipr_cmd
->qc
== qc
) {
6482 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6486 spin_unlock(&hrrq
->_lock
);
6488 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6492 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6493 * @regs: destination
6494 * @tf: source ATA taskfile
6499 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6500 struct ata_taskfile
*tf
)
6502 regs
->feature
= tf
->feature
;
6503 regs
->nsect
= tf
->nsect
;
6504 regs
->lbal
= tf
->lbal
;
6505 regs
->lbam
= tf
->lbam
;
6506 regs
->lbah
= tf
->lbah
;
6507 regs
->device
= tf
->device
;
6508 regs
->command
= tf
->command
;
6509 regs
->hob_feature
= tf
->hob_feature
;
6510 regs
->hob_nsect
= tf
->hob_nsect
;
6511 regs
->hob_lbal
= tf
->hob_lbal
;
6512 regs
->hob_lbam
= tf
->hob_lbam
;
6513 regs
->hob_lbah
= tf
->hob_lbah
;
6514 regs
->ctl
= tf
->ctl
;
6518 * ipr_sata_done - done function for SATA commands
6519 * @ipr_cmd: ipr command struct
6521 * This function is invoked by the interrupt handler for
6522 * ops generated by the SCSI mid-layer to SATA devices
6527 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6529 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6530 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6531 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6532 struct ipr_resource_entry
*res
= sata_port
->res
;
6533 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6535 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6536 if (ipr_cmd
->ioa_cfg
->sis64
)
6537 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6538 sizeof(struct ipr_ioasa_gata
));
6540 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6541 sizeof(struct ipr_ioasa_gata
));
6542 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6544 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6545 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6547 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6548 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6550 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6551 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6552 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6553 ata_qc_complete(qc
);
6557 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6558 * @ipr_cmd: ipr command struct
6559 * @qc: ATA queued command
6562 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6563 struct ata_queued_cmd
*qc
)
6565 u32 ioadl_flags
= 0;
6566 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6567 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6568 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6569 int len
= qc
->nbytes
;
6570 struct scatterlist
*sg
;
6572 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6577 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6578 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6579 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6580 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6581 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6583 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6585 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6586 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6587 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6589 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6590 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6591 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6592 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6594 last_ioadl64
= ioadl64
;
6598 if (likely(last_ioadl64
))
6599 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6603 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6604 * @ipr_cmd: ipr command struct
6605 * @qc: ATA queued command
6608 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6609 struct ata_queued_cmd
*qc
)
6611 u32 ioadl_flags
= 0;
6612 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6613 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6614 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6615 int len
= qc
->nbytes
;
6616 struct scatterlist
*sg
;
6622 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6623 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6624 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6625 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6627 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6628 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6629 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6630 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6631 ioarcb
->read_ioadl_len
=
6632 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6635 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6636 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6637 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6643 if (likely(last_ioadl
))
6644 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6648 * ipr_qc_defer - Get a free ipr_cmd
6649 * @qc: queued command
6654 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6656 struct ata_port
*ap
= qc
->ap
;
6657 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6658 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6659 struct ipr_cmnd
*ipr_cmd
;
6660 struct ipr_hrr_queue
*hrrq
;
6663 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6664 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6666 qc
->lldd_task
= NULL
;
6667 spin_lock(&hrrq
->_lock
);
6668 if (unlikely(hrrq
->ioa_is_dead
)) {
6669 spin_unlock(&hrrq
->_lock
);
6673 if (unlikely(!hrrq
->allow_cmds
)) {
6674 spin_unlock(&hrrq
->_lock
);
6675 return ATA_DEFER_LINK
;
6678 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6679 if (ipr_cmd
== NULL
) {
6680 spin_unlock(&hrrq
->_lock
);
6681 return ATA_DEFER_LINK
;
6684 qc
->lldd_task
= ipr_cmd
;
6685 spin_unlock(&hrrq
->_lock
);
6690 * ipr_qc_issue - Issue a SATA qc to a device
6691 * @qc: queued command
6696 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
6698 struct ata_port
*ap
= qc
->ap
;
6699 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6700 struct ipr_resource_entry
*res
= sata_port
->res
;
6701 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6702 struct ipr_cmnd
*ipr_cmd
;
6703 struct ipr_ioarcb
*ioarcb
;
6704 struct ipr_ioarcb_ata_regs
*regs
;
6706 if (qc
->lldd_task
== NULL
)
6709 ipr_cmd
= qc
->lldd_task
;
6710 if (ipr_cmd
== NULL
)
6711 return AC_ERR_SYSTEM
;
6713 qc
->lldd_task
= NULL
;
6714 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6715 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
6716 ipr_cmd
->hrrq
->ioa_is_dead
)) {
6717 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6718 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6719 return AC_ERR_SYSTEM
;
6722 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
6723 ioarcb
= &ipr_cmd
->ioarcb
;
6725 if (ioa_cfg
->sis64
) {
6726 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
6727 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
6729 regs
= &ioarcb
->u
.add_data
.u
.regs
;
6731 memset(regs
, 0, sizeof(*regs
));
6732 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
6734 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
6736 ipr_cmd
->done
= ipr_sata_done
;
6737 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
6738 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
6739 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6740 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6741 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
6744 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
6746 ipr_build_ata_ioadl(ipr_cmd
, qc
);
6748 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
6749 ipr_copy_sata_tf(regs
, &qc
->tf
);
6750 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
6751 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6753 switch (qc
->tf
.protocol
) {
6754 case ATA_PROT_NODATA
:
6759 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6762 case ATAPI_PROT_PIO
:
6763 case ATAPI_PROT_NODATA
:
6764 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6767 case ATAPI_PROT_DMA
:
6768 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
6769 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
6774 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6775 return AC_ERR_INVALID
;
6778 ipr_send_command(ipr_cmd
);
6779 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6785 * ipr_qc_fill_rtf - Read result TF
6786 * @qc: ATA queued command
6791 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
6793 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6794 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
6795 struct ata_taskfile
*tf
= &qc
->result_tf
;
6797 tf
->feature
= g
->error
;
6798 tf
->nsect
= g
->nsect
;
6802 tf
->device
= g
->device
;
6803 tf
->command
= g
->status
;
6804 tf
->hob_nsect
= g
->hob_nsect
;
6805 tf
->hob_lbal
= g
->hob_lbal
;
6806 tf
->hob_lbam
= g
->hob_lbam
;
6807 tf
->hob_lbah
= g
->hob_lbah
;
6812 static struct ata_port_operations ipr_sata_ops
= {
6813 .phy_reset
= ipr_ata_phy_reset
,
6814 .hardreset
= ipr_sata_reset
,
6815 .post_internal_cmd
= ipr_ata_post_internal
,
6816 .qc_prep
= ata_noop_qc_prep
,
6817 .qc_defer
= ipr_qc_defer
,
6818 .qc_issue
= ipr_qc_issue
,
6819 .qc_fill_rtf
= ipr_qc_fill_rtf
,
6820 .port_start
= ata_sas_port_start
,
6821 .port_stop
= ata_sas_port_stop
6824 static struct ata_port_info sata_port_info
= {
6825 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
,
6826 .pio_mask
= ATA_PIO4_ONLY
,
6827 .mwdma_mask
= ATA_MWDMA2
,
6828 .udma_mask
= ATA_UDMA6
,
6829 .port_ops
= &ipr_sata_ops
6832 #ifdef CONFIG_PPC_PSERIES
6833 static const u16 ipr_blocked_processors
[] = {
6845 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6846 * @ioa_cfg: ioa cfg struct
6848 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6849 * certain pSeries hardware. This function determines if the given
6850 * adapter is in one of these confgurations or not.
6853 * 1 if adapter is not supported / 0 if adapter is supported
6855 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
6859 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
6860 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
6861 if (pvr_version_is(ipr_blocked_processors
[i
]))
6868 #define ipr_invalid_adapter(ioa_cfg) 0
6872 * ipr_ioa_bringdown_done - IOA bring down completion.
6873 * @ipr_cmd: ipr command struct
6875 * This function processes the completion of an adapter bring down.
6876 * It wakes any reset sleepers.
6881 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
6883 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6887 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
6889 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
6890 scsi_unblock_requests(ioa_cfg
->host
);
6891 spin_lock_irq(ioa_cfg
->host
->host_lock
);
6894 ioa_cfg
->in_reset_reload
= 0;
6895 ioa_cfg
->reset_retries
= 0;
6896 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
6897 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
6898 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
6899 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
6903 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6904 wake_up_all(&ioa_cfg
->reset_wait_q
);
6907 return IPR_RC_JOB_RETURN
;
6911 * ipr_ioa_reset_done - IOA reset completion.
6912 * @ipr_cmd: ipr command struct
6914 * This function processes the completion of an adapter reset.
6915 * It schedules any necessary mid-layer add/removes and
6916 * wakes any reset sleepers.
6921 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
6923 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6924 struct ipr_resource_entry
*res
;
6925 struct ipr_hostrcb
*hostrcb
, *temp
;
6929 ioa_cfg
->in_reset_reload
= 0;
6930 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
6931 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
6932 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
6933 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
6936 ioa_cfg
->reset_cmd
= NULL
;
6937 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
6939 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
6940 if (ioa_cfg
->allow_ml_add_del
&& (res
->add_to_ml
|| res
->del_from_ml
)) {
6945 schedule_work(&ioa_cfg
->work_q
);
6947 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
6948 list_del(&hostrcb
->queue
);
6949 if (i
++ < IPR_NUM_LOG_HCAMS
)
6950 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
6952 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
6955 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
6956 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
6958 ioa_cfg
->reset_retries
= 0;
6959 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6960 wake_up_all(&ioa_cfg
->reset_wait_q
);
6962 spin_unlock(ioa_cfg
->host
->host_lock
);
6963 scsi_unblock_requests(ioa_cfg
->host
);
6964 spin_lock(ioa_cfg
->host
->host_lock
);
6966 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6967 scsi_block_requests(ioa_cfg
->host
);
6970 return IPR_RC_JOB_RETURN
;
6974 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6975 * @supported_dev: supported device struct
6976 * @vpids: vendor product id struct
6981 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
6982 struct ipr_std_inq_vpids
*vpids
)
6984 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
6985 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
6986 supported_dev
->num_records
= 1;
6987 supported_dev
->data_length
=
6988 cpu_to_be16(sizeof(struct ipr_supported_device
));
6989 supported_dev
->reserved
= 0;
6993 * ipr_set_supported_devs - Send Set Supported Devices for a device
6994 * @ipr_cmd: ipr command struct
6996 * This function sends a Set Supported Devices to the adapter
6999 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7001 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7003 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7004 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7005 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7006 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7008 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7010 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7011 if (!ipr_is_scsi_disk(res
))
7014 ipr_cmd
->u
.res
= res
;
7015 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7017 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7018 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7019 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7021 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7022 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7023 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7024 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7026 ipr_init_ioadl(ipr_cmd
,
7027 ioa_cfg
->vpd_cbs_dma
+
7028 offsetof(struct ipr_misc_cbs
, supp_dev
),
7029 sizeof(struct ipr_supported_device
),
7030 IPR_IOADL_FLAGS_WRITE_LAST
);
7032 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7033 IPR_SET_SUP_DEVICE_TIMEOUT
);
7035 if (!ioa_cfg
->sis64
)
7036 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7038 return IPR_RC_JOB_RETURN
;
7042 return IPR_RC_JOB_CONTINUE
;
7046 * ipr_get_mode_page - Locate specified mode page
7047 * @mode_pages: mode page buffer
7048 * @page_code: page code to find
7049 * @len: minimum required length for mode page
7052 * pointer to mode page / NULL on failure
7054 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7055 u32 page_code
, u32 len
)
7057 struct ipr_mode_page_hdr
*mode_hdr
;
7061 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7064 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7065 mode_hdr
= (struct ipr_mode_page_hdr
*)
7066 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7069 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7070 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7074 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7075 mode_hdr
->page_length
);
7076 length
-= page_length
;
7077 mode_hdr
= (struct ipr_mode_page_hdr
*)
7078 ((unsigned long)mode_hdr
+ page_length
);
7085 * ipr_check_term_power - Check for term power errors
7086 * @ioa_cfg: ioa config struct
7087 * @mode_pages: IOAFP mode pages buffer
7089 * Check the IOAFP's mode page 28 for term power errors
7094 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7095 struct ipr_mode_pages
*mode_pages
)
7099 struct ipr_dev_bus_entry
*bus
;
7100 struct ipr_mode_page28
*mode_page
;
7102 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7103 sizeof(struct ipr_mode_page28
));
7105 entry_length
= mode_page
->entry_length
;
7107 bus
= mode_page
->bus
;
7109 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7110 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7111 dev_err(&ioa_cfg
->pdev
->dev
,
7112 "Term power is absent on scsi bus %d\n",
7116 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7121 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7122 * @ioa_cfg: ioa config struct
7124 * Looks through the config table checking for SES devices. If
7125 * the SES device is in the SES table indicating a maximum SCSI
7126 * bus speed, the speed is limited for the bus.
7131 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7136 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7137 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7138 ioa_cfg
->bus_attr
[i
].bus_width
);
7140 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7141 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7146 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7147 * @ioa_cfg: ioa config struct
7148 * @mode_pages: mode page 28 buffer
7150 * Updates mode page 28 based on driver configuration
7155 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7156 struct ipr_mode_pages
*mode_pages
)
7158 int i
, entry_length
;
7159 struct ipr_dev_bus_entry
*bus
;
7160 struct ipr_bus_attributes
*bus_attr
;
7161 struct ipr_mode_page28
*mode_page
;
7163 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7164 sizeof(struct ipr_mode_page28
));
7166 entry_length
= mode_page
->entry_length
;
7168 /* Loop for each device bus entry */
7169 for (i
= 0, bus
= mode_page
->bus
;
7170 i
< mode_page
->num_entries
;
7171 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7172 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7173 dev_err(&ioa_cfg
->pdev
->dev
,
7174 "Invalid resource address reported: 0x%08X\n",
7175 IPR_GET_PHYS_LOC(bus
->res_addr
));
7179 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7180 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7181 bus
->bus_width
= bus_attr
->bus_width
;
7182 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7183 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7184 if (bus_attr
->qas_enabled
)
7185 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7187 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7192 * ipr_build_mode_select - Build a mode select command
7193 * @ipr_cmd: ipr command struct
7194 * @res_handle: resource handle to send command to
7195 * @parm: Byte 2 of Mode Sense command
7196 * @dma_addr: DMA buffer address
7197 * @xfer_len: data transfer length
7202 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7203 __be32 res_handle
, u8 parm
,
7204 dma_addr_t dma_addr
, u8 xfer_len
)
7206 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7208 ioarcb
->res_handle
= res_handle
;
7209 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7210 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7211 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7212 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7213 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7215 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7219 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7220 * @ipr_cmd: ipr command struct
7222 * This function sets up the SCSI bus attributes and sends
7223 * a Mode Select for Page 28 to activate them.
7228 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7230 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7231 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7235 ipr_scsi_bus_speed_limit(ioa_cfg
);
7236 ipr_check_term_power(ioa_cfg
, mode_pages
);
7237 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7238 length
= mode_pages
->hdr
.length
+ 1;
7239 mode_pages
->hdr
.length
= 0;
7241 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7242 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7245 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7246 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7247 struct ipr_resource_entry
, queue
);
7248 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7251 return IPR_RC_JOB_RETURN
;
7255 * ipr_build_mode_sense - Builds a mode sense command
7256 * @ipr_cmd: ipr command struct
7257 * @res: resource entry struct
7258 * @parm: Byte 2 of mode sense command
7259 * @dma_addr: DMA address of mode sense buffer
7260 * @xfer_len: Size of DMA buffer
7265 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7267 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7269 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7271 ioarcb
->res_handle
= res_handle
;
7272 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7273 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7274 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7275 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7277 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7281 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7282 * @ipr_cmd: ipr command struct
7284 * This function handles the failure of an IOA bringup command.
7289 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7291 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7292 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7294 dev_err(&ioa_cfg
->pdev
->dev
,
7295 "0x%02X failed with IOASC: 0x%08X\n",
7296 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7298 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7299 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7300 return IPR_RC_JOB_RETURN
;
7304 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7305 * @ipr_cmd: ipr command struct
7307 * This function handles the failure of a Mode Sense to the IOAFP.
7308 * Some adapters do not handle all mode pages.
7311 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7313 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7315 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7316 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7318 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7319 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7320 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7321 struct ipr_resource_entry
, queue
);
7322 return IPR_RC_JOB_CONTINUE
;
7325 return ipr_reset_cmd_failed(ipr_cmd
);
7329 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7330 * @ipr_cmd: ipr command struct
7332 * This function send a Page 28 mode sense to the IOA to
7333 * retrieve SCSI bus attributes.
7338 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7340 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7343 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7344 0x28, ioa_cfg
->vpd_cbs_dma
+
7345 offsetof(struct ipr_misc_cbs
, mode_pages
),
7346 sizeof(struct ipr_mode_pages
));
7348 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7349 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7351 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7354 return IPR_RC_JOB_RETURN
;
7358 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7359 * @ipr_cmd: ipr command struct
7361 * This function enables dual IOA RAID support if possible.
7366 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7368 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7369 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7370 struct ipr_mode_page24
*mode_page
;
7374 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7375 sizeof(struct ipr_mode_page24
));
7378 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7380 length
= mode_pages
->hdr
.length
+ 1;
7381 mode_pages
->hdr
.length
= 0;
7383 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7384 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7387 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7388 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7391 return IPR_RC_JOB_RETURN
;
7395 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7396 * @ipr_cmd: ipr command struct
7398 * This function handles the failure of a Mode Sense to the IOAFP.
7399 * Some adapters do not handle all mode pages.
7402 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7404 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7406 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7408 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7409 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7410 return IPR_RC_JOB_CONTINUE
;
7413 return ipr_reset_cmd_failed(ipr_cmd
);
7417 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7418 * @ipr_cmd: ipr command struct
7420 * This function send a mode sense to the IOA to retrieve
7421 * the IOA Advanced Function Control mode page.
7426 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7428 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7431 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7432 0x24, ioa_cfg
->vpd_cbs_dma
+
7433 offsetof(struct ipr_misc_cbs
, mode_pages
),
7434 sizeof(struct ipr_mode_pages
));
7436 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7437 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7439 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7442 return IPR_RC_JOB_RETURN
;
7446 * ipr_init_res_table - Initialize the resource table
7447 * @ipr_cmd: ipr command struct
7449 * This function looks through the existing resource table, comparing
7450 * it with the config table. This function will take care of old/new
7451 * devices and schedule adding/removing them from the mid-layer
7455 * IPR_RC_JOB_CONTINUE
7457 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7459 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7460 struct ipr_resource_entry
*res
, *temp
;
7461 struct ipr_config_table_entry_wrapper cfgtew
;
7462 int entries
, found
, flag
, i
;
7467 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7469 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7471 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7472 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7474 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7475 list_move_tail(&res
->queue
, &old_res
);
7478 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7480 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7482 for (i
= 0; i
< entries
; i
++) {
7484 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7486 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7489 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7490 if (ipr_is_same_device(res
, &cfgtew
)) {
7491 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7498 if (list_empty(&ioa_cfg
->free_res_q
)) {
7499 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7504 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7505 struct ipr_resource_entry
, queue
);
7506 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7507 ipr_init_res_entry(res
, &cfgtew
);
7509 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7510 res
->sdev
->allow_restart
= 1;
7513 ipr_update_res_entry(res
, &cfgtew
);
7516 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7518 res
->del_from_ml
= 1;
7519 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7520 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7524 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7525 ipr_clear_res_target(res
);
7526 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7529 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7530 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7532 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7535 return IPR_RC_JOB_CONTINUE
;
7539 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7540 * @ipr_cmd: ipr command struct
7542 * This function sends a Query IOA Configuration command
7543 * to the adapter to retrieve the IOA configuration table.
7548 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7550 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7551 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7552 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7553 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7556 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7557 ioa_cfg
->dual_raid
= 1;
7558 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7559 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7560 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7561 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7562 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7564 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7565 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7566 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7567 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7569 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7570 IPR_IOADL_FLAGS_READ_LAST
);
7572 ipr_cmd
->job_step
= ipr_init_res_table
;
7574 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7577 return IPR_RC_JOB_RETURN
;
7581 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7582 * @ipr_cmd: ipr command struct
7584 * This utility function sends an inquiry to the adapter.
7589 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7590 dma_addr_t dma_addr
, u8 xfer_len
)
7592 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7595 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7596 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7598 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7599 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7600 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7601 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7603 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7605 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7610 * ipr_inquiry_page_supported - Is the given inquiry page supported
7611 * @page0: inquiry page 0 buffer
7614 * This function determines if the specified inquiry page is supported.
7617 * 1 if page is supported / 0 if not
7619 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7623 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7624 if (page0
->page
[i
] == page
)
7631 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7632 * @ipr_cmd: ipr command struct
7634 * This function sends a Page 0xD0 inquiry to the adapter
7635 * to retrieve adapter capabilities.
7638 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7640 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
7642 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7643 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
7644 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7647 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7648 memset(cap
, 0, sizeof(*cap
));
7650 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
7651 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
7652 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
7653 sizeof(struct ipr_inquiry_cap
));
7654 return IPR_RC_JOB_RETURN
;
7658 return IPR_RC_JOB_CONTINUE
;
7662 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7663 * @ipr_cmd: ipr command struct
7665 * This function sends a Page 3 inquiry to the adapter
7666 * to retrieve software VPD information.
7669 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7671 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
7673 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7677 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
7679 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
7680 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
7681 sizeof(struct ipr_inquiry_page3
));
7684 return IPR_RC_JOB_RETURN
;
7688 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7689 * @ipr_cmd: ipr command struct
7691 * This function sends a Page 0 inquiry to the adapter
7692 * to retrieve supported inquiry pages.
7695 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7697 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
7699 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7704 /* Grab the type out of the VPD and store it away */
7705 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
7707 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
7709 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
7711 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
7712 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
7713 sizeof(struct ipr_inquiry_page0
));
7716 return IPR_RC_JOB_RETURN
;
7720 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7721 * @ipr_cmd: ipr command struct
7723 * This function sends a standard inquiry to the adapter.
7728 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
7730 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7733 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
7735 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
7736 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
7737 sizeof(struct ipr_ioa_vpd
));
7740 return IPR_RC_JOB_RETURN
;
7744 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7745 * @ipr_cmd: ipr command struct
7747 * This function send an Identify Host Request Response Queue
7748 * command to establish the HRRQ with the adapter.
7753 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
7755 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7756 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7757 struct ipr_hrr_queue
*hrrq
;
7760 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
7761 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
7763 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
7764 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
7766 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
7767 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7769 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7771 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
7773 if (ioa_cfg
->nvectors
== 1)
7774 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
7776 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
7778 ioarcb
->cmd_pkt
.cdb
[2] =
7779 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
7780 ioarcb
->cmd_pkt
.cdb
[3] =
7781 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
7782 ioarcb
->cmd_pkt
.cdb
[4] =
7783 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
7784 ioarcb
->cmd_pkt
.cdb
[5] =
7785 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
7786 ioarcb
->cmd_pkt
.cdb
[7] =
7787 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
7788 ioarcb
->cmd_pkt
.cdb
[8] =
7789 (sizeof(u32
) * hrrq
->size
) & 0xff;
7791 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7792 ioarcb
->cmd_pkt
.cdb
[9] =
7793 ioa_cfg
->identify_hrrq_index
;
7795 if (ioa_cfg
->sis64
) {
7796 ioarcb
->cmd_pkt
.cdb
[10] =
7797 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
7798 ioarcb
->cmd_pkt
.cdb
[11] =
7799 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
7800 ioarcb
->cmd_pkt
.cdb
[12] =
7801 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
7802 ioarcb
->cmd_pkt
.cdb
[13] =
7803 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
7806 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
7807 ioarcb
->cmd_pkt
.cdb
[14] =
7808 ioa_cfg
->identify_hrrq_index
;
7810 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7811 IPR_INTERNAL_TIMEOUT
);
7813 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
7814 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7817 return IPR_RC_JOB_RETURN
;
7821 return IPR_RC_JOB_CONTINUE
;
7825 * ipr_reset_timer_done - Adapter reset timer function
7826 * @ipr_cmd: ipr command struct
7828 * Description: This function is used in adapter reset processing
7829 * for timing events. If the reset_cmd pointer in the IOA
7830 * config struct is not this adapter's we are doing nested
7831 * resets and fail_all_ops will take care of freeing the
7837 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
7839 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7840 unsigned long lock_flags
= 0;
7842 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
7844 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
7845 list_del(&ipr_cmd
->queue
);
7846 ipr_cmd
->done(ipr_cmd
);
7849 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
7853 * ipr_reset_start_timer - Start a timer for adapter reset job
7854 * @ipr_cmd: ipr command struct
7855 * @timeout: timeout value
7857 * Description: This function is used in adapter reset processing
7858 * for timing events. If the reset_cmd pointer in the IOA
7859 * config struct is not this adapter's we are doing nested
7860 * resets and fail_all_ops will take care of freeing the
7866 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
7867 unsigned long timeout
)
7871 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7872 ipr_cmd
->done
= ipr_reset_ioa_job
;
7874 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7875 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
7876 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
7877 add_timer(&ipr_cmd
->timer
);
7881 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7882 * @ioa_cfg: ioa cfg struct
7887 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
7889 struct ipr_hrr_queue
*hrrq
;
7891 for_each_hrrq(hrrq
, ioa_cfg
) {
7892 spin_lock(&hrrq
->_lock
);
7893 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
7895 /* Initialize Host RRQ pointers */
7896 hrrq
->hrrq_start
= hrrq
->host_rrq
;
7897 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
7898 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
7899 hrrq
->toggle_bit
= 1;
7900 spin_unlock(&hrrq
->_lock
);
7904 ioa_cfg
->identify_hrrq_index
= 0;
7905 if (ioa_cfg
->hrrq_num
== 1)
7906 atomic_set(&ioa_cfg
->hrrq_index
, 0);
7908 atomic_set(&ioa_cfg
->hrrq_index
, 1);
7910 /* Zero out config table */
7911 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
7915 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7916 * @ipr_cmd: ipr command struct
7919 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7921 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
7923 unsigned long stage
, stage_time
;
7925 volatile u32 int_reg
;
7926 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7929 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
7930 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
7931 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
7933 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
7935 /* sanity check the stage_time value */
7936 if (stage_time
== 0)
7937 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
7938 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
7939 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
7940 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
7941 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
7943 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
7944 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7945 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7946 stage_time
= ioa_cfg
->transop_timeout
;
7947 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7948 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
7949 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
7950 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
7951 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7952 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
7953 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
7954 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
7955 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
7956 return IPR_RC_JOB_CONTINUE
;
7960 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
7961 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
7962 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
7963 ipr_cmd
->done
= ipr_reset_ioa_job
;
7964 add_timer(&ipr_cmd
->timer
);
7966 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7968 return IPR_RC_JOB_RETURN
;
7972 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7973 * @ipr_cmd: ipr command struct
7975 * This function reinitializes some control blocks and
7976 * enables destructive diagnostics on the adapter.
7981 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
7983 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7984 volatile u32 int_reg
;
7985 volatile u64 maskval
;
7989 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
7990 ipr_init_ioa_mem(ioa_cfg
);
7992 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7993 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7994 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
7995 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7998 if (ioa_cfg
->sis64
) {
7999 /* Set the adapter to the correct endian mode. */
8000 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8001 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8004 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8006 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8007 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8008 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8009 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8010 return IPR_RC_JOB_CONTINUE
;
8013 /* Enable destructive diagnostics on IOA */
8014 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8016 if (ioa_cfg
->sis64
) {
8017 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8018 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8019 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8021 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8023 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8025 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8027 if (ioa_cfg
->sis64
) {
8028 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8029 return IPR_RC_JOB_CONTINUE
;
8032 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
8033 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8034 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
8035 ipr_cmd
->done
= ipr_reset_ioa_job
;
8036 add_timer(&ipr_cmd
->timer
);
8037 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8040 return IPR_RC_JOB_RETURN
;
8044 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8045 * @ipr_cmd: ipr command struct
8047 * This function is invoked when an adapter dump has run out
8048 * of processing time.
8051 * IPR_RC_JOB_CONTINUE
8053 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8055 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8057 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8058 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8059 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8060 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8062 ioa_cfg
->dump_timeout
= 1;
8063 ipr_cmd
->job_step
= ipr_reset_alert
;
8065 return IPR_RC_JOB_CONTINUE
;
8069 * ipr_unit_check_no_data - Log a unit check/no data error log
8070 * @ioa_cfg: ioa config struct
8072 * Logs an error indicating the adapter unit checked, but for some
8073 * reason, we were unable to fetch the unit check buffer.
8078 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8080 ioa_cfg
->errors_logged
++;
8081 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8085 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8086 * @ioa_cfg: ioa config struct
8088 * Fetches the unit check buffer from the adapter by clocking the data
8089 * through the mailbox register.
8094 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8096 unsigned long mailbox
;
8097 struct ipr_hostrcb
*hostrcb
;
8098 struct ipr_uc_sdt sdt
;
8102 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8104 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8105 ipr_unit_check_no_data(ioa_cfg
);
8109 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8110 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8111 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8113 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8114 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8115 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8116 ipr_unit_check_no_data(ioa_cfg
);
8120 /* Find length of the first sdt entry (UC buffer) */
8121 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8122 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8124 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8125 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8126 IPR_FMT2_MBX_ADDR_MASK
;
8128 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8129 struct ipr_hostrcb
, queue
);
8130 list_del(&hostrcb
->queue
);
8131 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8133 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8134 be32_to_cpu(sdt
.entry
[0].start_token
),
8135 (__be32
*)&hostrcb
->hcam
,
8136 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8139 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8140 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8141 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8142 ioa_cfg
->sdt_state
== GET_DUMP
)
8143 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8145 ipr_unit_check_no_data(ioa_cfg
);
8147 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8151 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8152 * @ipr_cmd: ipr command struct
8154 * Description: This function will call to get the unit check buffer.
8159 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8161 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8164 ioa_cfg
->ioa_unit_checked
= 0;
8165 ipr_get_unit_check_buffer(ioa_cfg
);
8166 ipr_cmd
->job_step
= ipr_reset_alert
;
8167 ipr_reset_start_timer(ipr_cmd
, 0);
8170 return IPR_RC_JOB_RETURN
;
8174 * ipr_reset_restore_cfg_space - Restore PCI config space.
8175 * @ipr_cmd: ipr command struct
8177 * Description: This function restores the saved PCI config space of
8178 * the adapter, fails all outstanding ops back to the callers, and
8179 * fetches the dump/unit check if applicable to this reset.
8182 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8184 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8186 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8190 ioa_cfg
->pdev
->state_saved
= true;
8191 pci_restore_state(ioa_cfg
->pdev
);
8193 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8194 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8195 return IPR_RC_JOB_CONTINUE
;
8198 ipr_fail_all_ops(ioa_cfg
);
8200 if (ioa_cfg
->sis64
) {
8201 /* Set the adapter to the correct endian mode. */
8202 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8203 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8206 if (ioa_cfg
->ioa_unit_checked
) {
8207 if (ioa_cfg
->sis64
) {
8208 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8209 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8210 return IPR_RC_JOB_RETURN
;
8212 ioa_cfg
->ioa_unit_checked
= 0;
8213 ipr_get_unit_check_buffer(ioa_cfg
);
8214 ipr_cmd
->job_step
= ipr_reset_alert
;
8215 ipr_reset_start_timer(ipr_cmd
, 0);
8216 return IPR_RC_JOB_RETURN
;
8220 if (ioa_cfg
->in_ioa_bringdown
) {
8221 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8223 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8225 if (GET_DUMP
== ioa_cfg
->sdt_state
) {
8226 ioa_cfg
->sdt_state
= READ_DUMP
;
8227 ioa_cfg
->dump_timeout
= 0;
8229 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8231 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8232 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8233 schedule_work(&ioa_cfg
->work_q
);
8234 return IPR_RC_JOB_RETURN
;
8239 return IPR_RC_JOB_CONTINUE
;
8243 * ipr_reset_bist_done - BIST has completed on the adapter.
8244 * @ipr_cmd: ipr command struct
8246 * Description: Unblock config space and resume the reset process.
8249 * IPR_RC_JOB_CONTINUE
8251 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8253 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8256 if (ioa_cfg
->cfg_locked
)
8257 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8258 ioa_cfg
->cfg_locked
= 0;
8259 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8261 return IPR_RC_JOB_CONTINUE
;
8265 * ipr_reset_start_bist - Run BIST on the adapter.
8266 * @ipr_cmd: ipr command struct
8268 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8271 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8273 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8275 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8276 int rc
= PCIBIOS_SUCCESSFUL
;
8279 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8280 writel(IPR_UPROCI_SIS64_START_BIST
,
8281 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8283 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8285 if (rc
== PCIBIOS_SUCCESSFUL
) {
8286 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8287 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8288 rc
= IPR_RC_JOB_RETURN
;
8290 if (ioa_cfg
->cfg_locked
)
8291 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8292 ioa_cfg
->cfg_locked
= 0;
8293 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8294 rc
= IPR_RC_JOB_CONTINUE
;
8302 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8303 * @ipr_cmd: ipr command struct
8305 * Description: This clears PCI reset to the adapter and delays two seconds.
8310 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8313 pci_set_pcie_reset_state(ipr_cmd
->ioa_cfg
->pdev
, pcie_deassert_reset
);
8314 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8315 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8317 return IPR_RC_JOB_RETURN
;
8321 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8322 * @ipr_cmd: ipr command struct
8324 * Description: This asserts PCI reset to the adapter.
8329 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8331 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8332 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8335 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8336 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8337 ipr_reset_start_timer(ipr_cmd
, IPR_PCI_RESET_TIMEOUT
);
8339 return IPR_RC_JOB_RETURN
;
8343 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8344 * @ipr_cmd: ipr command struct
8346 * Description: This attempts to block config access to the IOA.
8349 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8351 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8353 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8354 int rc
= IPR_RC_JOB_CONTINUE
;
8356 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8357 ioa_cfg
->cfg_locked
= 1;
8358 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8360 if (ipr_cmd
->u
.time_left
) {
8361 rc
= IPR_RC_JOB_RETURN
;
8362 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8363 ipr_reset_start_timer(ipr_cmd
,
8364 IPR_CHECK_FOR_RESET_TIMEOUT
);
8366 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8367 dev_err(&ioa_cfg
->pdev
->dev
,
8368 "Timed out waiting to lock config access. Resetting anyway.\n");
8376 * ipr_reset_block_config_access - Block config access to the IOA
8377 * @ipr_cmd: ipr command struct
8379 * Description: This attempts to block config access to the IOA
8382 * IPR_RC_JOB_CONTINUE
8384 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8386 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8387 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8388 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8389 return IPR_RC_JOB_CONTINUE
;
8393 * ipr_reset_allowed - Query whether or not IOA can be reset
8394 * @ioa_cfg: ioa config struct
8397 * 0 if reset not allowed / non-zero if reset is allowed
8399 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8401 volatile u32 temp_reg
;
8403 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8404 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8408 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8409 * @ipr_cmd: ipr command struct
8411 * Description: This function waits for adapter permission to run BIST,
8412 * then runs BIST. If the adapter does not give permission after a
8413 * reasonable time, we will reset the adapter anyway. The impact of
8414 * resetting the adapter without warning the adapter is the risk of
8415 * losing the persistent error log on the adapter. If the adapter is
8416 * reset while it is writing to the flash on the adapter, the flash
8417 * segment will have bad ECC and be zeroed.
8420 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8422 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8424 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8425 int rc
= IPR_RC_JOB_RETURN
;
8427 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8428 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8429 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8431 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8432 rc
= IPR_RC_JOB_CONTINUE
;
8439 * ipr_reset_alert - Alert the adapter of a pending reset
8440 * @ipr_cmd: ipr command struct
8442 * Description: This function alerts the adapter that it will be reset.
8443 * If memory space is not currently enabled, proceed directly
8444 * to running BIST on the adapter. The timer must always be started
8445 * so we guarantee we do not run BIST from ipr_isr.
8450 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8452 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8457 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8459 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8460 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8461 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8462 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8464 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8467 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8468 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8471 return IPR_RC_JOB_RETURN
;
8475 * ipr_reset_ucode_download_done - Microcode download completion
8476 * @ipr_cmd: ipr command struct
8478 * Description: This function unmaps the microcode download buffer.
8481 * IPR_RC_JOB_CONTINUE
8483 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
8485 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8486 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8488 pci_unmap_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
8489 sglist
->num_sg
, DMA_TO_DEVICE
);
8491 ipr_cmd
->job_step
= ipr_reset_alert
;
8492 return IPR_RC_JOB_CONTINUE
;
8496 * ipr_reset_ucode_download - Download microcode to the adapter
8497 * @ipr_cmd: ipr command struct
8499 * Description: This function checks to see if it there is microcode
8500 * to download to the adapter. If there is, a download is performed.
8503 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8505 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
8507 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8508 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
8511 ipr_cmd
->job_step
= ipr_reset_alert
;
8514 return IPR_RC_JOB_CONTINUE
;
8516 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8517 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
8518 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
8519 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
8520 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
8521 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
8522 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
8525 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
8527 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
8528 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
8530 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8531 IPR_WRITE_BUFFER_TIMEOUT
);
8534 return IPR_RC_JOB_RETURN
;
8538 * ipr_reset_shutdown_ioa - Shutdown the adapter
8539 * @ipr_cmd: ipr command struct
8541 * Description: This function issues an adapter shutdown of the
8542 * specified type to the specified adapter as part of the
8543 * adapter reset job.
8546 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8548 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
8550 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8551 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
8552 unsigned long timeout
;
8553 int rc
= IPR_RC_JOB_CONTINUE
;
8556 if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
8557 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8558 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8559 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8560 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
8561 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
8563 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
8564 timeout
= IPR_SHUTDOWN_TIMEOUT
;
8565 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
8566 timeout
= IPR_INTERNAL_TIMEOUT
;
8567 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
8568 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
8570 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
8572 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
8574 rc
= IPR_RC_JOB_RETURN
;
8575 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
8577 ipr_cmd
->job_step
= ipr_reset_alert
;
8584 * ipr_reset_ioa_job - Adapter reset job
8585 * @ipr_cmd: ipr command struct
8587 * Description: This function is the job router for the adapter reset job.
8592 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
8595 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8598 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
8600 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
8602 * We are doing nested adapter resets and this is
8603 * not the current reset job.
8605 list_add_tail(&ipr_cmd
->queue
,
8606 &ipr_cmd
->hrrq
->hrrq_free_q
);
8610 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
8611 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
8612 if (rc
== IPR_RC_JOB_RETURN
)
8616 ipr_reinit_ipr_cmnd(ipr_cmd
);
8617 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
8618 rc
= ipr_cmd
->job_step(ipr_cmd
);
8619 } while (rc
== IPR_RC_JOB_CONTINUE
);
8623 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8624 * @ioa_cfg: ioa config struct
8625 * @job_step: first job step of reset job
8626 * @shutdown_type: shutdown type
8628 * Description: This function will initiate the reset of the given adapter
8629 * starting at the selected job step.
8630 * If the caller needs to wait on the completion of the reset,
8631 * the caller must sleep on the reset_wait_q.
8636 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8637 int (*job_step
) (struct ipr_cmnd
*),
8638 enum ipr_shutdown_type shutdown_type
)
8640 struct ipr_cmnd
*ipr_cmd
;
8643 ioa_cfg
->in_reset_reload
= 1;
8644 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8645 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8646 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8647 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8650 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
)
8651 scsi_block_requests(ioa_cfg
->host
);
8653 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
8654 ioa_cfg
->reset_cmd
= ipr_cmd
;
8655 ipr_cmd
->job_step
= job_step
;
8656 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
8658 ipr_reset_ioa_job(ipr_cmd
);
8662 * ipr_initiate_ioa_reset - Initiate an adapter reset
8663 * @ioa_cfg: ioa config struct
8664 * @shutdown_type: shutdown type
8666 * Description: This function will initiate the reset of the given adapter.
8667 * If the caller needs to wait on the completion of the reset,
8668 * the caller must sleep on the reset_wait_q.
8673 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
8674 enum ipr_shutdown_type shutdown_type
)
8678 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
8681 if (ioa_cfg
->in_reset_reload
) {
8682 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8683 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8684 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8685 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8688 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
8689 dev_err(&ioa_cfg
->pdev
->dev
,
8690 "IOA taken offline - error recovery failed\n");
8692 ioa_cfg
->reset_retries
= 0;
8693 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8694 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8695 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
8696 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8700 if (ioa_cfg
->in_ioa_bringdown
) {
8701 ioa_cfg
->reset_cmd
= NULL
;
8702 ioa_cfg
->in_reset_reload
= 0;
8703 ipr_fail_all_ops(ioa_cfg
);
8704 wake_up_all(&ioa_cfg
->reset_wait_q
);
8706 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
8707 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
8708 scsi_unblock_requests(ioa_cfg
->host
);
8709 spin_lock_irq(ioa_cfg
->host
->host_lock
);
8713 ioa_cfg
->in_ioa_bringdown
= 1;
8714 shutdown_type
= IPR_SHUTDOWN_NONE
;
8718 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
8723 * ipr_reset_freeze - Hold off all I/O activity
8724 * @ipr_cmd: ipr command struct
8726 * Description: If the PCI slot is frozen, hold off all I/O
8727 * activity; then, as soon as the slot is available again,
8728 * initiate an adapter reset.
8730 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
8732 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8735 /* Disallow new interrupts, avoid loop */
8736 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8737 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8738 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
8739 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8742 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8743 ipr_cmd
->done
= ipr_reset_ioa_job
;
8744 return IPR_RC_JOB_RETURN
;
8748 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8749 * @pdev: PCI device struct
8751 * Description: This routine is called to tell us that the PCI bus
8752 * is down. Can't do anything here, except put the device driver
8753 * into a holding pattern, waiting for the PCI bus to come back.
8755 static void ipr_pci_frozen(struct pci_dev
*pdev
)
8757 unsigned long flags
= 0;
8758 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8760 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8761 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
8762 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8766 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8767 * @pdev: PCI device struct
8769 * Description: This routine is called by the pci error recovery
8770 * code after the PCI slot has been reset, just before we
8771 * should resume normal operations.
8773 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
8775 unsigned long flags
= 0;
8776 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8778 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8779 if (ioa_cfg
->needs_warm_reset
)
8780 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8782 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
8784 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8785 return PCI_ERS_RESULT_RECOVERED
;
8789 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8790 * @pdev: PCI device struct
8792 * Description: This routine is called when the PCI bus has
8793 * permanently failed.
8795 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
8797 unsigned long flags
= 0;
8798 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
8801 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
8802 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
8803 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8804 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
8805 ioa_cfg
->in_ioa_bringdown
= 1;
8806 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8807 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8808 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
8809 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8812 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8813 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
8817 * ipr_pci_error_detected - Called when a PCI error is detected.
8818 * @pdev: PCI device struct
8819 * @state: PCI channel state
8821 * Description: Called when a PCI error is detected.
8824 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8826 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
8827 pci_channel_state_t state
)
8830 case pci_channel_io_frozen
:
8831 ipr_pci_frozen(pdev
);
8832 return PCI_ERS_RESULT_NEED_RESET
;
8833 case pci_channel_io_perm_failure
:
8834 ipr_pci_perm_failure(pdev
);
8835 return PCI_ERS_RESULT_DISCONNECT
;
8840 return PCI_ERS_RESULT_NEED_RESET
;
8844 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8845 * @ioa_cfg: ioa cfg struct
8847 * Description: This is the second phase of adapter intialization
8848 * This function takes care of initilizing the adapter to the point
8849 * where it can accept new commands.
8852 * 0 on success / -EIO on failure
8854 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
8857 unsigned long host_lock_flags
= 0;
8860 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8861 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
8862 if (ioa_cfg
->needs_hard_reset
) {
8863 ioa_cfg
->needs_hard_reset
= 0;
8864 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8866 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
8868 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8869 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
8870 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8872 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
8874 } else if (ipr_invalid_adapter(ioa_cfg
)) {
8878 dev_err(&ioa_cfg
->pdev
->dev
,
8879 "Adapter not supported in this hardware configuration.\n");
8882 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
8889 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8890 * @ioa_cfg: ioa config struct
8895 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
8899 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
8900 if (ioa_cfg
->ipr_cmnd_list
[i
])
8901 pci_pool_free(ioa_cfg
->ipr_cmd_pool
,
8902 ioa_cfg
->ipr_cmnd_list
[i
],
8903 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
8905 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
8908 if (ioa_cfg
->ipr_cmd_pool
)
8909 pci_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
8911 kfree(ioa_cfg
->ipr_cmnd_list
);
8912 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
8913 ioa_cfg
->ipr_cmnd_list
= NULL
;
8914 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
8915 ioa_cfg
->ipr_cmd_pool
= NULL
;
8919 * ipr_free_mem - Frees memory allocated for an adapter
8920 * @ioa_cfg: ioa cfg struct
8925 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8929 kfree(ioa_cfg
->res_entries
);
8930 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_misc_cbs
),
8931 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
8932 ipr_free_cmd_blks(ioa_cfg
);
8934 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
8935 pci_free_consistent(ioa_cfg
->pdev
,
8936 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
8937 ioa_cfg
->hrrq
[i
].host_rrq
,
8938 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
8940 pci_free_consistent(ioa_cfg
->pdev
, ioa_cfg
->cfg_table_size
,
8941 ioa_cfg
->u
.cfg_table
,
8942 ioa_cfg
->cfg_table_dma
);
8944 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
8945 pci_free_consistent(ioa_cfg
->pdev
,
8946 sizeof(struct ipr_hostrcb
),
8947 ioa_cfg
->hostrcb
[i
],
8948 ioa_cfg
->hostrcb_dma
[i
]);
8951 ipr_free_dump(ioa_cfg
);
8952 kfree(ioa_cfg
->trace
);
8956 * ipr_free_all_resources - Free all allocated resources for an adapter.
8957 * @ipr_cmd: ipr command struct
8959 * This function frees all allocated resources for the
8960 * specified adapter.
8965 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
8967 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8970 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
8971 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
8973 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
8974 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
8977 free_irq(pdev
->irq
, &ioa_cfg
->hrrq
[0]);
8979 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
8980 pci_disable_msi(pdev
);
8981 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
8982 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
8983 pci_disable_msix(pdev
);
8984 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
8987 iounmap(ioa_cfg
->hdw_dma_regs
);
8988 pci_release_regions(pdev
);
8989 ipr_free_mem(ioa_cfg
);
8990 scsi_host_put(ioa_cfg
->host
);
8991 pci_disable_device(pdev
);
8996 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8997 * @ioa_cfg: ioa config struct
9000 * 0 on success / -ENOMEM on allocation failure
9002 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9004 struct ipr_cmnd
*ipr_cmd
;
9005 struct ipr_ioarcb
*ioarcb
;
9006 dma_addr_t dma_addr
;
9007 int i
, entries_each_hrrq
, hrrq_id
= 0;
9009 ioa_cfg
->ipr_cmd_pool
= pci_pool_create(IPR_NAME
, ioa_cfg
->pdev
,
9010 sizeof(struct ipr_cmnd
), 512, 0);
9012 if (!ioa_cfg
->ipr_cmd_pool
)
9015 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9016 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9018 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9019 ipr_free_cmd_blks(ioa_cfg
);
9023 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9024 if (ioa_cfg
->hrrq_num
> 1) {
9026 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9027 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9028 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9029 (entries_each_hrrq
- 1);
9032 IPR_NUM_BASE_CMD_BLKS
/
9033 (ioa_cfg
->hrrq_num
- 1);
9034 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9035 IPR_NUM_INTERNAL_CMD_BLKS
+
9036 (i
- 1) * entries_each_hrrq
;
9037 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9038 (IPR_NUM_INTERNAL_CMD_BLKS
+
9039 i
* entries_each_hrrq
- 1);
9042 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9043 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9044 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9046 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9049 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9051 i
= IPR_NUM_CMD_BLKS
-
9052 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9054 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9055 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9058 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9059 ipr_cmd
= pci_pool_alloc(ioa_cfg
->ipr_cmd_pool
, GFP_KERNEL
, &dma_addr
);
9062 ipr_free_cmd_blks(ioa_cfg
);
9066 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
9067 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9068 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9070 ioarcb
= &ipr_cmd
->ioarcb
;
9071 ipr_cmd
->dma_addr
= dma_addr
;
9073 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9075 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9077 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9078 if (ioa_cfg
->sis64
) {
9079 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9080 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9081 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9082 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9084 ioarcb
->write_ioadl_addr
=
9085 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9086 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9087 ioarcb
->ioasa_host_pci_addr
=
9088 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9090 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9091 ipr_cmd
->cmd_index
= i
;
9092 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9093 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9094 offsetof(struct ipr_cmnd
, sense_buffer
);
9096 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9097 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9098 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9099 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9107 * ipr_alloc_mem - Allocate memory for an adapter
9108 * @ioa_cfg: ioa config struct
9111 * 0 on success / non-zero for error
9113 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9115 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9116 int i
, rc
= -ENOMEM
;
9119 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
9120 ioa_cfg
->max_devs_supported
, GFP_KERNEL
);
9122 if (!ioa_cfg
->res_entries
)
9125 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9126 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9127 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9130 ioa_cfg
->vpd_cbs
= pci_alloc_consistent(ioa_cfg
->pdev
,
9131 sizeof(struct ipr_misc_cbs
),
9132 &ioa_cfg
->vpd_cbs_dma
);
9134 if (!ioa_cfg
->vpd_cbs
)
9135 goto out_free_res_entries
;
9137 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9138 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9139 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9140 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9142 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9144 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9147 if (ipr_alloc_cmd_blks(ioa_cfg
))
9148 goto out_free_vpd_cbs
;
9150 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9151 ioa_cfg
->hrrq
[i
].host_rrq
= pci_alloc_consistent(ioa_cfg
->pdev
,
9152 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9153 &ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9155 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9157 pci_free_consistent(pdev
,
9158 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9159 ioa_cfg
->hrrq
[i
].host_rrq
,
9160 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9161 goto out_ipr_free_cmd_blocks
;
9163 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9166 ioa_cfg
->u
.cfg_table
= pci_alloc_consistent(ioa_cfg
->pdev
,
9167 ioa_cfg
->cfg_table_size
,
9168 &ioa_cfg
->cfg_table_dma
);
9170 if (!ioa_cfg
->u
.cfg_table
)
9171 goto out_free_host_rrq
;
9173 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
9174 ioa_cfg
->hostrcb
[i
] = pci_alloc_consistent(ioa_cfg
->pdev
,
9175 sizeof(struct ipr_hostrcb
),
9176 &ioa_cfg
->hostrcb_dma
[i
]);
9178 if (!ioa_cfg
->hostrcb
[i
])
9179 goto out_free_hostrcb_dma
;
9181 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9182 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9183 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9184 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9187 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
9188 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
9190 if (!ioa_cfg
->trace
)
9191 goto out_free_hostrcb_dma
;
9198 out_free_hostrcb_dma
:
9200 pci_free_consistent(pdev
, sizeof(struct ipr_hostrcb
),
9201 ioa_cfg
->hostrcb
[i
],
9202 ioa_cfg
->hostrcb_dma
[i
]);
9204 pci_free_consistent(pdev
, ioa_cfg
->cfg_table_size
,
9205 ioa_cfg
->u
.cfg_table
,
9206 ioa_cfg
->cfg_table_dma
);
9208 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9209 pci_free_consistent(pdev
,
9210 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9211 ioa_cfg
->hrrq
[i
].host_rrq
,
9212 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9214 out_ipr_free_cmd_blocks
:
9215 ipr_free_cmd_blks(ioa_cfg
);
9217 pci_free_consistent(pdev
, sizeof(struct ipr_misc_cbs
),
9218 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9219 out_free_res_entries
:
9220 kfree(ioa_cfg
->res_entries
);
9225 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9226 * @ioa_cfg: ioa config struct
9231 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9235 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9236 ioa_cfg
->bus_attr
[i
].bus
= i
;
9237 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9238 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9239 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9240 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9242 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9247 * ipr_init_ioa_cfg - Initialize IOA config struct
9248 * @ioa_cfg: ioa config struct
9249 * @host: scsi host struct
9250 * @pdev: PCI dev struct
9255 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9256 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9258 const struct ipr_interrupt_offsets
*p
;
9259 struct ipr_interrupts
*t
;
9262 ioa_cfg
->host
= host
;
9263 ioa_cfg
->pdev
= pdev
;
9264 ioa_cfg
->log_level
= ipr_log_level
;
9265 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9266 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9267 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9268 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9269 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9270 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9271 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9273 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9274 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9275 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9276 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9277 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9278 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9279 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9280 ioa_cfg
->sdt_state
= INACTIVE
;
9282 ipr_initialize_bus_attr(ioa_cfg
);
9283 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9285 if (ioa_cfg
->sis64
) {
9286 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9287 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9288 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9289 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9291 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9292 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9293 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9294 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9296 host
->max_channel
= IPR_MAX_BUS_TO_SCAN
;
9297 host
->unique_id
= host
->host_no
;
9298 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9299 host
->can_queue
= ioa_cfg
->max_cmds
;
9300 pci_set_drvdata(pdev
, ioa_cfg
);
9302 p
= &ioa_cfg
->chip_cfg
->regs
;
9304 base
= ioa_cfg
->hdw_dma_regs
;
9306 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9307 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9308 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9309 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9310 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9311 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9312 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9313 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9314 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9315 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9316 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9317 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9318 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9319 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9320 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9321 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9323 if (ioa_cfg
->sis64
) {
9324 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9325 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9326 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9327 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9332 * ipr_get_chip_info - Find adapter chip information
9333 * @dev_id: PCI device id struct
9336 * ptr to chip information on success / NULL on failure
9338 static const struct ipr_chip_t
*
9339 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9343 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9344 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9345 ipr_chip
[i
].device
== dev_id
->device
)
9346 return &ipr_chip
[i
];
9350 static int ipr_enable_msix(struct ipr_ioa_cfg
*ioa_cfg
)
9352 struct msix_entry entries
[IPR_MAX_MSIX_VECTORS
];
9353 int i
, err
, vectors
;
9355 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
9356 entries
[i
].entry
= i
;
9358 vectors
= ipr_number_of_msix
;
9360 while ((err
= pci_enable_msix(ioa_cfg
->pdev
, entries
, vectors
)) > 0)
9364 pci_disable_msix(ioa_cfg
->pdev
);
9369 for (i
= 0; i
< vectors
; i
++)
9370 ioa_cfg
->vectors_info
[i
].vec
= entries
[i
].vector
;
9371 ioa_cfg
->nvectors
= vectors
;
9377 static int ipr_enable_msi(struct ipr_ioa_cfg
*ioa_cfg
)
9379 int i
, err
, vectors
;
9381 vectors
= ipr_number_of_msix
;
9383 while ((err
= pci_enable_msi_block(ioa_cfg
->pdev
, vectors
)) > 0)
9387 pci_disable_msi(ioa_cfg
->pdev
);
9392 for (i
= 0; i
< vectors
; i
++)
9393 ioa_cfg
->vectors_info
[i
].vec
= ioa_cfg
->pdev
->irq
+ i
;
9394 ioa_cfg
->nvectors
= vectors
;
9400 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9402 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9404 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
9405 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
9406 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
9407 ioa_cfg
->vectors_info
[vec_idx
].
9408 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
9412 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9416 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
9417 rc
= request_irq(ioa_cfg
->vectors_info
[i
].vec
,
9420 ioa_cfg
->vectors_info
[i
].desc
,
9424 free_irq(ioa_cfg
->vectors_info
[i
].vec
,
9433 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9434 * @pdev: PCI device struct
9436 * Description: Simply set the msi_received flag to 1 indicating that
9437 * Message Signaled Interrupts are supported.
9440 * 0 on success / non-zero on failure
9442 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
9444 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
9445 unsigned long lock_flags
= 0;
9446 irqreturn_t rc
= IRQ_HANDLED
;
9448 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
9449 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9451 ioa_cfg
->msi_received
= 1;
9452 wake_up(&ioa_cfg
->msi_wait_q
);
9454 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9459 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9460 * @pdev: PCI device struct
9462 * Description: The return value from pci_enable_msi() can not always be
9463 * trusted. This routine sets up and initiates a test interrupt to determine
9464 * if the interrupt is received via the ipr_test_intr() service routine.
9465 * If the tests fails, the driver will fall back to LSI.
9468 * 0 on success / non-zero on failure
9470 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
9473 volatile u32 int_reg
;
9474 unsigned long lock_flags
= 0;
9478 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9479 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9480 ioa_cfg
->msi_received
= 0;
9481 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9482 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
9483 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
9484 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9486 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9487 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9489 rc
= request_irq(pdev
->irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
9491 dev_err(&pdev
->dev
, "Can not assign irq %d\n", pdev
->irq
);
9493 } else if (ipr_debug
)
9494 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", pdev
->irq
);
9496 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
9497 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
9498 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
9499 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9500 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9502 if (!ioa_cfg
->msi_received
) {
9503 /* MSI test failed */
9504 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
9506 } else if (ipr_debug
)
9507 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
9509 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9511 if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9512 free_irq(ioa_cfg
->vectors_info
[0].vec
, ioa_cfg
);
9514 free_irq(pdev
->irq
, ioa_cfg
);
9521 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9522 * @pdev: PCI device struct
9523 * @dev_id: PCI device id struct
9526 * 0 on success / non-zero on failure
9528 static int ipr_probe_ioa(struct pci_dev
*pdev
,
9529 const struct pci_device_id
*dev_id
)
9531 struct ipr_ioa_cfg
*ioa_cfg
;
9532 struct Scsi_Host
*host
;
9533 unsigned long ipr_regs_pci
;
9534 void __iomem
*ipr_regs
;
9535 int rc
= PCIBIOS_SUCCESSFUL
;
9536 volatile u32 mask
, uproc
, interrupts
;
9537 unsigned long lock_flags
, driver_lock_flags
;
9541 if ((rc
= pci_enable_device(pdev
))) {
9542 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
9546 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
9548 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
9551 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
9556 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
9557 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
9558 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
9560 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
9562 if (!ioa_cfg
->ipr_chip
) {
9563 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
9564 dev_id
->vendor
, dev_id
->device
);
9565 goto out_scsi_host_put
;
9568 /* set SIS 32 or SIS 64 */
9569 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
9570 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
9571 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
9572 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
9574 if (ipr_transop_timeout
)
9575 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
9576 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
9577 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
9579 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
9581 ioa_cfg
->revid
= pdev
->revision
;
9583 ipr_regs_pci
= pci_resource_start(pdev
, 0);
9585 rc
= pci_request_regions(pdev
, IPR_NAME
);
9588 "Couldn't register memory range of registers\n");
9589 goto out_scsi_host_put
;
9592 ipr_regs
= pci_ioremap_bar(pdev
, 0);
9596 "Couldn't map memory range of registers\n");
9598 goto out_release_regions
;
9601 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
9602 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
9603 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
9605 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
9607 pci_set_master(pdev
);
9609 if (ioa_cfg
->sis64
) {
9610 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
9612 dev_dbg(&pdev
->dev
, "Failed to set 64 bit PCI DMA mask\n");
9613 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9617 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
9620 dev_err(&pdev
->dev
, "Failed to set PCI DMA mask\n");
9624 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
9625 ioa_cfg
->chip_cfg
->cache_line_size
);
9627 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9628 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
9633 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
9634 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
9635 IPR_MAX_MSIX_VECTORS
);
9636 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
9639 if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9640 ipr_enable_msix(ioa_cfg
) == 0)
9641 ioa_cfg
->intr_flag
= IPR_USE_MSIX
;
9642 else if (ioa_cfg
->ipr_chip
->intr_type
== IPR_USE_MSI
&&
9643 ipr_enable_msi(ioa_cfg
) == 0)
9644 ioa_cfg
->intr_flag
= IPR_USE_MSI
;
9646 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9647 ioa_cfg
->clear_isr
= 1;
9648 ioa_cfg
->nvectors
= 1;
9649 dev_info(&pdev
->dev
, "Cannot enable MSI.\n");
9652 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
||
9653 ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9654 rc
= ipr_test_msi(ioa_cfg
, pdev
);
9655 if (rc
== -EOPNOTSUPP
) {
9656 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
) {
9657 ioa_cfg
->intr_flag
&= ~IPR_USE_MSI
;
9658 pci_disable_msi(pdev
);
9659 } else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9660 ioa_cfg
->intr_flag
&= ~IPR_USE_MSIX
;
9661 pci_disable_msix(pdev
);
9664 ioa_cfg
->intr_flag
= IPR_USE_LSI
;
9665 ioa_cfg
->nvectors
= 1;
9668 goto out_msi_disable
;
9670 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9671 dev_info(&pdev
->dev
,
9672 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9673 ioa_cfg
->nvectors
, pdev
->irq
);
9674 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9675 dev_info(&pdev
->dev
,
9676 "Request for %d MSIXs succeeded.",
9681 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
9682 (unsigned int)num_online_cpus(),
9683 (unsigned int)IPR_MAX_HRRQ_NUM
);
9685 /* Save away PCI config space for use following IOA reset */
9686 rc
= pci_save_state(pdev
);
9688 if (rc
!= PCIBIOS_SUCCESSFUL
) {
9689 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
9691 goto out_msi_disable
;
9694 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
9695 goto out_msi_disable
;
9697 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
9698 goto out_msi_disable
;
9701 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9702 + ((sizeof(struct ipr_config_table_entry64
)
9703 * ioa_cfg
->max_devs_supported
)));
9705 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9706 + ((sizeof(struct ipr_config_table_entry
)
9707 * ioa_cfg
->max_devs_supported
)));
9709 rc
= ipr_alloc_mem(ioa_cfg
);
9712 "Couldn't allocate enough memory for device driver!\n");
9713 goto out_msi_disable
;
9717 * If HRRQ updated interrupt is not masked, or reset alert is set,
9718 * the card is in an unknown state and needs a hard reset
9720 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
9721 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
9722 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
9723 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
9724 ioa_cfg
->needs_hard_reset
= 1;
9725 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
9726 ioa_cfg
->needs_hard_reset
= 1;
9727 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
9728 ioa_cfg
->ioa_unit_checked
= 1;
9730 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9731 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
9732 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
9734 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
9735 || ioa_cfg
->intr_flag
== IPR_USE_MSIX
) {
9736 name_msi_vectors(ioa_cfg
);
9737 rc
= request_irq(ioa_cfg
->vectors_info
[0].vec
, ipr_isr
,
9739 ioa_cfg
->vectors_info
[0].desc
,
9742 rc
= ipr_request_other_msi_irqs(ioa_cfg
);
9744 rc
= request_irq(pdev
->irq
, ipr_isr
,
9746 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
9749 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
9754 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
9755 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
9756 ioa_cfg
->needs_warm_reset
= 1;
9757 ioa_cfg
->reset
= ipr_reset_slot_reset
;
9759 ioa_cfg
->reset
= ipr_reset_start_bist
;
9761 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
9762 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
9763 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
9770 ipr_free_mem(ioa_cfg
);
9772 if (ioa_cfg
->intr_flag
== IPR_USE_MSI
)
9773 pci_disable_msi(pdev
);
9774 else if (ioa_cfg
->intr_flag
== IPR_USE_MSIX
)
9775 pci_disable_msix(pdev
);
9778 out_release_regions
:
9779 pci_release_regions(pdev
);
9781 scsi_host_put(host
);
9783 pci_disable_device(pdev
);
9788 * ipr_scan_vsets - Scans for VSET devices
9789 * @ioa_cfg: ioa config struct
9791 * Description: Since the VSET resources do not follow SAM in that we can have
9792 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9797 static void ipr_scan_vsets(struct ipr_ioa_cfg
*ioa_cfg
)
9801 for (target
= 0; target
< IPR_MAX_NUM_TARGETS_PER_BUS
; target
++)
9802 for (lun
= 0; lun
< IPR_MAX_NUM_VSET_LUNS_PER_TARGET
; lun
++)
9803 scsi_add_device(ioa_cfg
->host
, IPR_VSET_BUS
, target
, lun
);
9807 * ipr_initiate_ioa_bringdown - Bring down an adapter
9808 * @ioa_cfg: ioa config struct
9809 * @shutdown_type: shutdown type
9811 * Description: This function will initiate bringing down the adapter.
9812 * This consists of issuing an IOA shutdown to the adapter
9813 * to flush the cache, and running BIST.
9814 * If the caller needs to wait on the completion of the reset,
9815 * the caller must sleep on the reset_wait_q.
9820 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
9821 enum ipr_shutdown_type shutdown_type
)
9824 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9825 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9826 ioa_cfg
->reset_retries
= 0;
9827 ioa_cfg
->in_ioa_bringdown
= 1;
9828 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
9833 * __ipr_remove - Remove a single adapter
9834 * @pdev: pci device struct
9836 * Adapter hot plug remove entry point.
9841 static void __ipr_remove(struct pci_dev
*pdev
)
9843 unsigned long host_lock_flags
= 0;
9844 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9846 unsigned long driver_lock_flags
;
9849 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9850 while (ioa_cfg
->in_reset_reload
) {
9851 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9852 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9853 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9856 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9857 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9858 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
9859 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9862 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
9864 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9865 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
9866 flush_work(&ioa_cfg
->work_q
);
9867 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9868 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9870 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
9871 list_del(&ioa_cfg
->queue
);
9872 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
9874 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
9875 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9876 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9878 ipr_free_all_resources(ioa_cfg
);
9884 * ipr_remove - IOA hot plug remove entry point
9885 * @pdev: pci device struct
9887 * Adapter hot plug remove entry point.
9892 static void ipr_remove(struct pci_dev
*pdev
)
9894 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9898 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9900 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9902 scsi_remove_host(ioa_cfg
->host
);
9910 * ipr_probe - Adapter hot plug add entry point
9913 * 0 on success / non-zero on failure
9915 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
9917 struct ipr_ioa_cfg
*ioa_cfg
;
9920 rc
= ipr_probe_ioa(pdev
, dev_id
);
9925 ioa_cfg
= pci_get_drvdata(pdev
);
9926 rc
= ipr_probe_ioa_part2(ioa_cfg
);
9933 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
9940 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9944 scsi_remove_host(ioa_cfg
->host
);
9949 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9953 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
9955 scsi_remove_host(ioa_cfg
->host
);
9960 scsi_scan_host(ioa_cfg
->host
);
9961 ipr_scan_vsets(ioa_cfg
);
9962 scsi_add_device(ioa_cfg
->host
, IPR_IOA_BUS
, IPR_IOA_TARGET
, IPR_IOA_LUN
);
9963 ioa_cfg
->allow_ml_add_del
= 1;
9964 ioa_cfg
->host
->max_channel
= IPR_VSET_BUS
;
9965 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
9967 if (blk_iopoll_enabled
&& ioa_cfg
->iopoll_weight
&&
9968 ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
9969 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
9970 blk_iopoll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
9971 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
9972 blk_iopoll_enable(&ioa_cfg
->hrrq
[i
].iopoll
);
9976 schedule_work(&ioa_cfg
->work_q
);
9981 * ipr_shutdown - Shutdown handler.
9982 * @pdev: pci device struct
9984 * This function is invoked upon system shutdown/reboot. It will issue
9985 * an adapter shutdown to the adapter to flush the write cache.
9990 static void ipr_shutdown(struct pci_dev
*pdev
)
9992 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9993 unsigned long lock_flags
= 0;
9996 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
9997 if (blk_iopoll_enabled
&& ioa_cfg
->iopoll_weight
&&
9998 ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
9999 ioa_cfg
->iopoll_weight
= 0;
10000 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10001 blk_iopoll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10004 while (ioa_cfg
->in_reset_reload
) {
10005 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10006 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10007 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10010 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10011 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10012 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10015 static struct pci_device_id ipr_pci_table
[] = {
10016 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10017 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10018 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10019 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10020 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10021 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10022 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10023 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10024 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10025 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10026 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10027 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10028 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10029 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10030 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10031 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10032 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10033 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10034 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10035 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10036 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10037 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10038 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10039 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10040 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10041 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10042 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10043 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10044 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10045 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10046 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10047 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10048 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10049 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10050 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10051 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10052 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10053 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10054 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10055 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10056 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10057 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10058 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10059 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10060 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10061 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10062 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10063 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10064 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10065 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10066 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10067 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10068 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10069 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10070 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10071 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10072 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10073 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10074 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10075 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10076 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10077 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10078 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10079 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10080 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10081 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10082 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10083 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10084 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10085 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10086 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10087 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10088 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10089 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10090 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10091 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10092 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10093 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10094 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10095 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10096 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10097 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10098 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10099 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10100 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10101 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10102 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10103 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10104 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10105 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10106 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10107 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10108 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10109 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10110 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10111 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10112 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10113 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10114 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10115 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10116 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10117 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10118 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10121 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10123 static const struct pci_error_handlers ipr_err_handler
= {
10124 .error_detected
= ipr_pci_error_detected
,
10125 .slot_reset
= ipr_pci_slot_reset
,
10128 static struct pci_driver ipr_driver
= {
10130 .id_table
= ipr_pci_table
,
10131 .probe
= ipr_probe
,
10132 .remove
= ipr_remove
,
10133 .shutdown
= ipr_shutdown
,
10134 .err_handler
= &ipr_err_handler
,
10138 * ipr_halt_done - Shutdown prepare completion
10143 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10145 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10149 * ipr_halt - Issue shutdown prepare to all adapters
10152 * NOTIFY_OK on success / NOTIFY_DONE on failure
10154 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10156 struct ipr_cmnd
*ipr_cmd
;
10157 struct ipr_ioa_cfg
*ioa_cfg
;
10158 unsigned long flags
= 0, driver_lock_flags
;
10160 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10161 return NOTIFY_DONE
;
10163 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10165 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10166 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10167 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
10168 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10172 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10173 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10174 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10175 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10176 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10178 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10179 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10181 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10186 static struct notifier_block ipr_notifier
= {
10191 * ipr_init - Module entry point
10194 * 0 on success / negative value on failure
10196 static int __init
ipr_init(void)
10198 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10199 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10201 register_reboot_notifier(&ipr_notifier
);
10202 return pci_register_driver(&ipr_driver
);
10206 * ipr_exit - Module unload
10208 * Module unload entry point.
10213 static void __exit
ipr_exit(void)
10215 unregister_reboot_notifier(&ipr_notifier
);
10216 pci_unregister_driver(&ipr_driver
);
10219 module_init(ipr_init
);
10220 module_exit(ipr_exit
);