2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
75 #include <asm/processor.h>
76 #include <scsi/scsi.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_tcq.h>
79 #include <scsi/scsi_eh.h>
80 #include <scsi/scsi_cmnd.h>
86 static struct list_head ipr_ioa_head
= LIST_HEAD_INIT(ipr_ioa_head
);
87 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
88 static unsigned int ipr_max_speed
= 1;
89 static int ipr_testmode
= 0;
90 static unsigned int ipr_fastfail
= 0;
91 static unsigned int ipr_transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
92 static unsigned int ipr_enable_cache
= 1;
93 static unsigned int ipr_debug
= 0;
94 static int ipr_auto_create
= 1;
95 static DEFINE_SPINLOCK(ipr_driver_lock
);
97 /* This table describes the differences between DMA controller chips */
98 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
99 { /* Gemstone, Citrine, and Obsidian */
101 .cache_line_size
= 0x20,
103 .set_interrupt_mask_reg
= 0x0022C,
104 .clr_interrupt_mask_reg
= 0x00230,
105 .sense_interrupt_mask_reg
= 0x0022C,
106 .clr_interrupt_reg
= 0x00228,
107 .sense_interrupt_reg
= 0x00224,
108 .ioarrin_reg
= 0x00404,
109 .sense_uproc_interrupt_reg
= 0x00214,
110 .set_uproc_interrupt_reg
= 0x00214,
111 .clr_uproc_interrupt_reg
= 0x00218
114 { /* Snipe and Scamp */
116 .cache_line_size
= 0x20,
118 .set_interrupt_mask_reg
= 0x00288,
119 .clr_interrupt_mask_reg
= 0x0028C,
120 .sense_interrupt_mask_reg
= 0x00288,
121 .clr_interrupt_reg
= 0x00284,
122 .sense_interrupt_reg
= 0x00280,
123 .ioarrin_reg
= 0x00504,
124 .sense_uproc_interrupt_reg
= 0x00290,
125 .set_uproc_interrupt_reg
= 0x00290,
126 .clr_uproc_interrupt_reg
= 0x00294
131 static const struct ipr_chip_t ipr_chip
[] = {
132 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, &ipr_chip_cfg
[0] },
133 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, &ipr_chip_cfg
[0] },
134 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, &ipr_chip_cfg
[0] },
135 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, &ipr_chip_cfg
[0] },
136 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, &ipr_chip_cfg
[1] },
137 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, &ipr_chip_cfg
[1] }
140 static int ipr_max_bus_speeds
[] = {
141 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
144 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
147 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148 module_param_named(log_level
, ipr_log_level
, uint
, 0);
149 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
150 module_param_named(testmode
, ipr_testmode
, int, 0);
151 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
152 module_param_named(fastfail
, ipr_fastfail
, int, 0);
153 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
154 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
155 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
156 module_param_named(enable_cache
, ipr_enable_cache
, int, 0);
157 MODULE_PARM_DESC(enable_cache
, "Enable adapter's non-volatile write cache (default: 1)");
158 module_param_named(debug
, ipr_debug
, int, 0);
159 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
160 module_param_named(auto_create
, ipr_auto_create
, int, 0);
161 MODULE_PARM_DESC(auto_create
, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(IPR_DRIVER_VERSION
);
165 /* A constant array of IOASCs/URCs/Error Messages */
167 struct ipr_error_table_t ipr_error_table
[] = {
169 "8155: An unknown error was received"},
171 "Soft underlength error"},
173 "Command to be cancelled not found"},
175 "Qualified success"},
177 "FFFE: Soft device bus error recovered by the IOA"},
179 "4101: Soft device bus fabric error"},
181 "FFF9: Device sector reassign successful"},
183 "FFF7: Media error recovered by device rewrite procedures"},
185 "7001: IOA sector reassignment successful"},
187 "FFF9: Soft media error. Sector reassignment recommended"},
189 "FFF7: Media error recovered by IOA rewrite procedures"},
191 "FF3D: Soft PCI bus error recovered by the IOA"},
193 "FFF6: Device hardware error recovered by the IOA"},
195 "FFF6: Device hardware error recovered by the device"},
197 "FF3D: Soft IOA error recovered by the IOA"},
199 "FFFA: Undefined device response recovered by the IOA"},
201 "FFF6: Device bus error, message or command phase"},
203 "FFF6: Failure prediction threshold exceeded"},
205 "8009: Impending cache battery pack failure"},
207 "34FF: Disk device format in progress"},
209 "Synchronization required"},
211 "No ready, IOA shutdown"},
213 "Not ready, IOA has been shutdown"},
215 "3020: Storage subsystem configuration error"},
217 "FFF5: Medium error, data unreadable, recommend reassign"},
219 "7000: Medium error, data unreadable, do not reassign"},
221 "FFF3: Disk media format bad"},
223 "3002: Addressed device failed to respond to selection"},
225 "3100: Device bus error"},
227 "3109: IOA timed out a device command"},
229 "3120: SCSI bus is not operational"},
231 "4100: Hard device bus fabric error"},
233 "9000: IOA reserved area data check"},
235 "9001: IOA reserved area invalid data pattern"},
237 "9002: IOA reserved area LRC error"},
239 "102E: Out of alternate sectors for disk storage"},
241 "FFF4: Data transfer underlength error"},
243 "FFF4: Data transfer overlength error"},
245 "3400: Logical unit failure"},
247 "FFF4: Device microcode is corrupt"},
249 "8150: PCI bus error"},
251 "Unsupported device bus message received"},
253 "FFF4: Disk device problem"},
255 "8150: Permanent IOA failure"},
257 "3010: Disk device returned wrong response to IOA"},
259 "8151: IOA microcode error"},
261 "Device bus status error"},
263 "8157: IOA error requiring IOA reset to recover"},
265 "Message reject received from the device"},
267 "8008: A permanent cache battery pack failure occurred"},
269 "9090: Disk unit has been modified after the last known status"},
271 "9081: IOA detected device error"},
273 "9082: IOA detected device error"},
275 "3110: Device bus error, message or command phase"},
277 "9091: Incorrect hardware configuration change has been detected"},
279 "9073: Invalid multi-adapter configuration"},
281 "4010: Incorrect connection between cascaded expanders"},
283 "4020: Connections exceed IOA design limits"},
285 "4030: Incorrect multipath connection"},
287 "4110: Unsupported enclosure function"},
289 "FFF4: Command to logical unit failed"},
291 "Illegal request, invalid request type or request packet"},
293 "Illegal request, invalid resource handle"},
295 "Illegal request, commands not allowed to this device"},
297 "Illegal request, command not allowed to a secondary adapter"},
299 "Illegal request, invalid field in parameter list"},
301 "Illegal request, parameter not supported"},
303 "Illegal request, parameter value invalid"},
305 "Illegal request, command sequence error"},
307 "Illegal request, dual adapter support not enabled"},
309 "9031: Array protection temporarily suspended, protection resuming"},
311 "9040: Array protection temporarily suspended, protection resuming"},
313 "3140: Device bus not ready to ready transition"},
315 "FFFB: SCSI bus was reset"},
317 "FFFE: SCSI bus transition to single ended"},
319 "FFFE: SCSI bus transition to LVD"},
321 "FFFB: SCSI bus was reset by another initiator"},
323 "3029: A device replacement has occurred"},
325 "9051: IOA cache data exists for a missing or failed device"},
327 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
329 "9025: Disk unit is not supported at its physical location"},
331 "3020: IOA detected a SCSI bus configuration error"},
333 "3150: SCSI bus configuration error"},
335 "9074: Asymmetric advanced function disk configuration"},
337 "4040: Incomplete multipath connection between IOA and enclosure"},
339 "4041: Incomplete multipath connection between enclosure and device"},
341 "9075: Incomplete multipath connection between IOA and remote IOA"},
343 "9076: Configuration error, missing remote IOA"},
345 "4050: Enclosure does not support a required multipath function"},
347 "9041: Array protection temporarily suspended"},
349 "9042: Corrupt array parity detected on specified device"},
351 "9030: Array no longer protected due to missing or failed disk unit"},
353 "9071: Link operational transition"},
355 "9072: Link not operational transition"},
357 "9032: Array exposed but still protected"},
359 "4061: Multipath redundancy level got better"},
361 "4060: Multipath redundancy level got worse"},
363 "Failure due to other device"},
365 "9008: IOA does not support functions expected by devices"},
367 "9010: Cache data associated with attached devices cannot be found"},
369 "9011: Cache data belongs to devices other than those attached"},
371 "9020: Array missing 2 or more devices with only 1 device present"},
373 "9021: Array missing 2 or more devices with 2 or more devices present"},
375 "9022: Exposed array is missing a required device"},
377 "9023: Array member(s) not at required physical locations"},
379 "9024: Array not functional due to present hardware configuration"},
381 "9026: Array not functional due to present hardware configuration"},
383 "9027: Array is missing a device and parity is out of sync"},
385 "9028: Maximum number of arrays already exist"},
387 "9050: Required cache data cannot be located for a disk unit"},
389 "9052: Cache data exists for a device that has been modified"},
391 "9054: IOA resources not available due to previous problems"},
393 "9092: Disk unit requires initialization before use"},
395 "9029: Incorrect hardware configuration change has been detected"},
397 "9060: One or more disk pairs are missing from an array"},
399 "9061: One or more disks are missing from an array"},
401 "9062: One or more disks are missing from an array"},
403 "9063: Maximum number of functional arrays has been exceeded"},
405 "Aborted command, invalid descriptor"},
407 "Command terminated by host"}
410 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
411 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
412 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
413 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
414 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
415 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
416 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
417 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
418 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
419 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
420 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
421 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
422 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
423 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
427 * Function Prototypes
429 static int ipr_reset_alert(struct ipr_cmnd
*);
430 static void ipr_process_ccn(struct ipr_cmnd
*);
431 static void ipr_process_error(struct ipr_cmnd
*);
432 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
433 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
434 enum ipr_shutdown_type
);
436 #ifdef CONFIG_SCSI_IPR_TRACE
438 * ipr_trc_hook - Add a trace entry to the driver trace
439 * @ipr_cmd: ipr command struct
441 * @add_data: additional data
446 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
447 u8 type
, u32 add_data
)
449 struct ipr_trace_entry
*trace_entry
;
450 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
452 trace_entry
= &ioa_cfg
->trace
[ioa_cfg
->trace_index
++];
453 trace_entry
->time
= jiffies
;
454 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
455 trace_entry
->type
= type
;
456 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
;
457 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
458 trace_entry
->u
.add_data
= add_data
;
461 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
465 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
466 * @ipr_cmd: ipr command struct
471 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
473 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
474 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
476 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
477 ioarcb
->write_data_transfer_length
= 0;
478 ioarcb
->read_data_transfer_length
= 0;
479 ioarcb
->write_ioadl_len
= 0;
480 ioarcb
->read_ioadl_len
= 0;
482 ioasa
->residual_data_len
= 0;
484 ipr_cmd
->scsi_cmd
= NULL
;
485 ipr_cmd
->sense_buffer
[0] = 0;
486 ipr_cmd
->dma_use_sg
= 0;
490 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
491 * @ipr_cmd: ipr command struct
496 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
498 ipr_reinit_ipr_cmnd(ipr_cmd
);
499 ipr_cmd
->u
.scratch
= 0;
500 ipr_cmd
->sibling
= NULL
;
501 init_timer(&ipr_cmd
->timer
);
505 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
506 * @ioa_cfg: ioa config struct
509 * pointer to ipr command struct
512 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
514 struct ipr_cmnd
*ipr_cmd
;
516 ipr_cmd
= list_entry(ioa_cfg
->free_q
.next
, struct ipr_cmnd
, queue
);
517 list_del(&ipr_cmd
->queue
);
518 ipr_init_ipr_cmnd(ipr_cmd
);
524 * ipr_unmap_sglist - Unmap scatterlist if mapped
525 * @ioa_cfg: ioa config struct
526 * @ipr_cmd: ipr command struct
531 static void ipr_unmap_sglist(struct ipr_ioa_cfg
*ioa_cfg
,
532 struct ipr_cmnd
*ipr_cmd
)
534 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
536 if (ipr_cmd
->dma_use_sg
) {
537 if (scsi_cmd
->use_sg
> 0) {
538 pci_unmap_sg(ioa_cfg
->pdev
, scsi_cmd
->request_buffer
,
540 scsi_cmd
->sc_data_direction
);
542 pci_unmap_single(ioa_cfg
->pdev
, ipr_cmd
->dma_handle
,
543 scsi_cmd
->request_bufflen
,
544 scsi_cmd
->sc_data_direction
);
550 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
551 * @ioa_cfg: ioa config struct
552 * @clr_ints: interrupts to clear
554 * This function masks all interrupts on the adapter, then clears the
555 * interrupts specified in the mask
560 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
563 volatile u32 int_reg
;
565 /* Stop new interrupts */
566 ioa_cfg
->allow_interrupts
= 0;
568 /* Set interrupt mask to stop all new interrupts */
569 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
571 /* Clear any pending interrupts */
572 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg
);
573 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
577 * ipr_save_pcix_cmd_reg - Save PCI-X command register
578 * @ioa_cfg: ioa config struct
581 * 0 on success / -EIO on failure
583 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
585 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
587 if (pcix_cmd_reg
== 0) {
588 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
592 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
593 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
594 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
598 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
603 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
604 * @ioa_cfg: ioa config struct
607 * 0 on success / -EIO on failure
609 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
611 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
614 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
615 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
616 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
620 dev_err(&ioa_cfg
->pdev
->dev
,
621 "Failed to setup PCI-X command register\n");
629 * ipr_scsi_eh_done - mid-layer done function for aborted ops
630 * @ipr_cmd: ipr command struct
632 * This function is invoked by the interrupt handler for
633 * ops generated by the SCSI mid-layer which are being aborted.
638 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
640 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
641 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
643 scsi_cmd
->result
|= (DID_ERROR
<< 16);
645 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
646 scsi_cmd
->scsi_done(scsi_cmd
);
647 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
651 * ipr_fail_all_ops - Fails all outstanding ops.
652 * @ioa_cfg: ioa config struct
654 * This function fails all outstanding ops.
659 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
661 struct ipr_cmnd
*ipr_cmd
, *temp
;
664 list_for_each_entry_safe(ipr_cmd
, temp
, &ioa_cfg
->pending_q
, queue
) {
665 list_del(&ipr_cmd
->queue
);
667 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
668 ipr_cmd
->ioasa
.ilid
= cpu_to_be32(IPR_DRIVER_ILID
);
670 if (ipr_cmd
->scsi_cmd
)
671 ipr_cmd
->done
= ipr_scsi_eh_done
;
673 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, IPR_IOASC_IOA_WAS_RESET
);
674 del_timer(&ipr_cmd
->timer
);
675 ipr_cmd
->done(ipr_cmd
);
682 * ipr_do_req - Send driver initiated requests.
683 * @ipr_cmd: ipr command struct
684 * @done: done function
685 * @timeout_func: timeout function
686 * @timeout: timeout value
688 * This function sends the specified command to the adapter with the
689 * timeout given. The done function is invoked on command completion.
694 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
695 void (*done
) (struct ipr_cmnd
*),
696 void (*timeout_func
) (struct ipr_cmnd
*), u32 timeout
)
698 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
700 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
702 ipr_cmd
->done
= done
;
704 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
705 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
706 ipr_cmd
->timer
.function
= (void (*)(unsigned long))timeout_func
;
708 add_timer(&ipr_cmd
->timer
);
710 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
713 writel(be32_to_cpu(ipr_cmd
->ioarcb
.ioarcb_host_pci_addr
),
714 ioa_cfg
->regs
.ioarrin_reg
);
718 * ipr_internal_cmd_done - Op done function for an internally generated op.
719 * @ipr_cmd: ipr command struct
721 * This function is the op done function for an internally generated,
722 * blocking op. It simply wakes the sleeping thread.
727 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
729 if (ipr_cmd
->sibling
)
730 ipr_cmd
->sibling
= NULL
;
732 complete(&ipr_cmd
->completion
);
736 * ipr_send_blocking_cmd - Send command and sleep on its completion.
737 * @ipr_cmd: ipr command struct
738 * @timeout_func: function to invoke if command times out
744 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
745 void (*timeout_func
) (struct ipr_cmnd
*ipr_cmd
),
748 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
750 init_completion(&ipr_cmd
->completion
);
751 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
753 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
754 wait_for_completion(&ipr_cmd
->completion
);
755 spin_lock_irq(ioa_cfg
->host
->host_lock
);
759 * ipr_send_hcam - Send an HCAM to the adapter.
760 * @ioa_cfg: ioa config struct
762 * @hostrcb: hostrcb struct
764 * This function will send a Host Controlled Async command to the adapter.
765 * If HCAMs are currently not allowed to be issued to the adapter, it will
766 * place the hostrcb on the free queue.
771 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
772 struct ipr_hostrcb
*hostrcb
)
774 struct ipr_cmnd
*ipr_cmd
;
775 struct ipr_ioarcb
*ioarcb
;
777 if (ioa_cfg
->allow_cmds
) {
778 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
779 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
780 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
782 ipr_cmd
->u
.hostrcb
= hostrcb
;
783 ioarcb
= &ipr_cmd
->ioarcb
;
785 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
786 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
787 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
788 ioarcb
->cmd_pkt
.cdb
[1] = type
;
789 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
790 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
792 ioarcb
->read_data_transfer_length
= cpu_to_be32(sizeof(hostrcb
->hcam
));
793 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
794 ipr_cmd
->ioadl
[0].flags_and_data_len
=
795 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| sizeof(hostrcb
->hcam
));
796 ipr_cmd
->ioadl
[0].address
= cpu_to_be32(hostrcb
->hostrcb_dma
);
798 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
799 ipr_cmd
->done
= ipr_process_ccn
;
801 ipr_cmd
->done
= ipr_process_error
;
803 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
806 writel(be32_to_cpu(ipr_cmd
->ioarcb
.ioarcb_host_pci_addr
),
807 ioa_cfg
->regs
.ioarrin_reg
);
809 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
814 * ipr_init_res_entry - Initialize a resource entry struct.
815 * @res: resource entry struct
820 static void ipr_init_res_entry(struct ipr_resource_entry
*res
)
822 res
->needs_sync_complete
= 0;
825 res
->del_from_ml
= 0;
826 res
->resetting_device
= 0;
831 * ipr_handle_config_change - Handle a config change from the adapter
832 * @ioa_cfg: ioa config struct
838 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
839 struct ipr_hostrcb
*hostrcb
)
841 struct ipr_resource_entry
*res
= NULL
;
842 struct ipr_config_table_entry
*cfgte
;
845 cfgte
= &hostrcb
->hcam
.u
.ccn
.cfgte
;
847 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
848 if (!memcmp(&res
->cfgte
.res_addr
, &cfgte
->res_addr
,
849 sizeof(cfgte
->res_addr
))) {
856 if (list_empty(&ioa_cfg
->free_res_q
)) {
857 ipr_send_hcam(ioa_cfg
,
858 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
863 res
= list_entry(ioa_cfg
->free_res_q
.next
,
864 struct ipr_resource_entry
, queue
);
866 list_del(&res
->queue
);
867 ipr_init_res_entry(res
);
868 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
871 memcpy(&res
->cfgte
, cfgte
, sizeof(struct ipr_config_table_entry
));
873 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
875 res
->del_from_ml
= 1;
876 res
->cfgte
.res_handle
= IPR_INVALID_RES_HANDLE
;
877 if (ioa_cfg
->allow_ml_add_del
)
878 schedule_work(&ioa_cfg
->work_q
);
880 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
881 } else if (!res
->sdev
) {
883 if (ioa_cfg
->allow_ml_add_del
)
884 schedule_work(&ioa_cfg
->work_q
);
887 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
891 * ipr_process_ccn - Op done function for a CCN.
892 * @ipr_cmd: ipr command struct
894 * This function is the op done function for a configuration
895 * change notification host controlled async from the adapter.
900 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
902 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
903 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
904 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
906 list_del(&hostrcb
->queue
);
907 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
910 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
)
911 dev_err(&ioa_cfg
->pdev
->dev
,
912 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
914 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
916 ipr_handle_config_change(ioa_cfg
, hostrcb
);
921 * ipr_log_vpd - Log the passed VPD to the error log.
922 * @vpd: vendor/product id/sn struct
927 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
929 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
930 + IPR_SERIAL_NUM_LEN
];
932 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
933 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
935 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
936 ipr_err("Vendor/Product ID: %s\n", buffer
);
938 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
939 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
940 ipr_err(" Serial Number: %s\n", buffer
);
944 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
945 * @vpd: vendor/product id/sn/wwn struct
950 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
952 ipr_log_vpd(&vpd
->vpd
);
953 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
954 be32_to_cpu(vpd
->wwid
[1]));
958 * ipr_log_enhanced_cache_error - Log a cache error.
959 * @ioa_cfg: ioa config struct
960 * @hostrcb: hostrcb struct
965 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
966 struct ipr_hostrcb
*hostrcb
)
968 struct ipr_hostrcb_type_12_error
*error
=
969 &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
971 ipr_err("-----Current Configuration-----\n");
972 ipr_err("Cache Directory Card Information:\n");
973 ipr_log_ext_vpd(&error
->ioa_vpd
);
974 ipr_err("Adapter Card Information:\n");
975 ipr_log_ext_vpd(&error
->cfc_vpd
);
977 ipr_err("-----Expected Configuration-----\n");
978 ipr_err("Cache Directory Card Information:\n");
979 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
980 ipr_err("Adapter Card Information:\n");
981 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
983 ipr_err("Additional IOA Data: %08X %08X %08X\n",
984 be32_to_cpu(error
->ioa_data
[0]),
985 be32_to_cpu(error
->ioa_data
[1]),
986 be32_to_cpu(error
->ioa_data
[2]));
990 * ipr_log_cache_error - Log a cache error.
991 * @ioa_cfg: ioa config struct
992 * @hostrcb: hostrcb struct
997 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
998 struct ipr_hostrcb
*hostrcb
)
1000 struct ipr_hostrcb_type_02_error
*error
=
1001 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1003 ipr_err("-----Current Configuration-----\n");
1004 ipr_err("Cache Directory Card Information:\n");
1005 ipr_log_vpd(&error
->ioa_vpd
);
1006 ipr_err("Adapter Card Information:\n");
1007 ipr_log_vpd(&error
->cfc_vpd
);
1009 ipr_err("-----Expected Configuration-----\n");
1010 ipr_err("Cache Directory Card Information:\n");
1011 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1012 ipr_err("Adapter Card Information:\n");
1013 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1015 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1016 be32_to_cpu(error
->ioa_data
[0]),
1017 be32_to_cpu(error
->ioa_data
[1]),
1018 be32_to_cpu(error
->ioa_data
[2]));
1022 * ipr_log_enhanced_config_error - Log a configuration error.
1023 * @ioa_cfg: ioa config struct
1024 * @hostrcb: hostrcb struct
1029 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1030 struct ipr_hostrcb
*hostrcb
)
1032 int errors_logged
, i
;
1033 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1034 struct ipr_hostrcb_type_13_error
*error
;
1036 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1037 errors_logged
= be32_to_cpu(error
->errors_logged
);
1039 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1040 be32_to_cpu(error
->errors_detected
), errors_logged
);
1042 dev_entry
= error
->dev
;
1044 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1047 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1048 ipr_log_ext_vpd(&dev_entry
->vpd
);
1050 ipr_err("-----New Device Information-----\n");
1051 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1053 ipr_err("Cache Directory Card Information:\n");
1054 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1056 ipr_err("Adapter Card Information:\n");
1057 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1062 * ipr_log_config_error - Log a configuration error.
1063 * @ioa_cfg: ioa config struct
1064 * @hostrcb: hostrcb struct
1069 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1070 struct ipr_hostrcb
*hostrcb
)
1072 int errors_logged
, i
;
1073 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1074 struct ipr_hostrcb_type_03_error
*error
;
1076 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1077 errors_logged
= be32_to_cpu(error
->errors_logged
);
1079 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1080 be32_to_cpu(error
->errors_detected
), errors_logged
);
1082 dev_entry
= error
->dev
;
1084 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1087 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1088 ipr_log_vpd(&dev_entry
->vpd
);
1090 ipr_err("-----New Device Information-----\n");
1091 ipr_log_vpd(&dev_entry
->new_vpd
);
1093 ipr_err("Cache Directory Card Information:\n");
1094 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1096 ipr_err("Adapter Card Information:\n");
1097 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1099 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1100 be32_to_cpu(dev_entry
->ioa_data
[0]),
1101 be32_to_cpu(dev_entry
->ioa_data
[1]),
1102 be32_to_cpu(dev_entry
->ioa_data
[2]),
1103 be32_to_cpu(dev_entry
->ioa_data
[3]),
1104 be32_to_cpu(dev_entry
->ioa_data
[4]));
1109 * ipr_log_enhanced_array_error - Log an array configuration error.
1110 * @ioa_cfg: ioa config struct
1111 * @hostrcb: hostrcb struct
1116 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1117 struct ipr_hostrcb
*hostrcb
)
1120 struct ipr_hostrcb_type_14_error
*error
;
1121 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1122 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1124 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1128 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1129 error
->protection_level
,
1130 ioa_cfg
->host
->host_no
,
1131 error
->last_func_vset_res_addr
.bus
,
1132 error
->last_func_vset_res_addr
.target
,
1133 error
->last_func_vset_res_addr
.lun
);
1137 array_entry
= error
->array_member
;
1138 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1139 sizeof(error
->array_member
));
1141 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1142 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1145 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1146 ipr_err("Exposed Array Member %d:\n", i
);
1148 ipr_err("Array Member %d:\n", i
);
1150 ipr_log_ext_vpd(&array_entry
->vpd
);
1151 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1152 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1153 "Expected Location");
1160 * ipr_log_array_error - Log an array configuration error.
1161 * @ioa_cfg: ioa config struct
1162 * @hostrcb: hostrcb struct
1167 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1168 struct ipr_hostrcb
*hostrcb
)
1171 struct ipr_hostrcb_type_04_error
*error
;
1172 struct ipr_hostrcb_array_data_entry
*array_entry
;
1173 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1175 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1179 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1180 error
->protection_level
,
1181 ioa_cfg
->host
->host_no
,
1182 error
->last_func_vset_res_addr
.bus
,
1183 error
->last_func_vset_res_addr
.target
,
1184 error
->last_func_vset_res_addr
.lun
);
1188 array_entry
= error
->array_member
;
1190 for (i
= 0; i
< 18; i
++) {
1191 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1194 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1195 ipr_err("Exposed Array Member %d:\n", i
);
1197 ipr_err("Array Member %d:\n", i
);
1199 ipr_log_vpd(&array_entry
->vpd
);
1201 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1202 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1203 "Expected Location");
1208 array_entry
= error
->array_member2
;
1215 * ipr_log_hex_data - Log additional hex IOA error data.
1216 * @data: IOA error data
1222 static void ipr_log_hex_data(u32
*data
, int len
)
1229 for (i
= 0; i
< len
/ 4; i
+= 4) {
1230 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1231 be32_to_cpu(data
[i
]),
1232 be32_to_cpu(data
[i
+1]),
1233 be32_to_cpu(data
[i
+2]),
1234 be32_to_cpu(data
[i
+3]));
1239 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1240 * @ioa_cfg: ioa config struct
1241 * @hostrcb: hostrcb struct
1246 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1247 struct ipr_hostrcb
*hostrcb
)
1249 struct ipr_hostrcb_type_17_error
*error
;
1251 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1252 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1254 ipr_err("%s\n", error
->failure_reason
);
1255 ipr_err("Remote Adapter VPD:\n");
1256 ipr_log_ext_vpd(&error
->vpd
);
1257 ipr_log_hex_data(error
->data
,
1258 be32_to_cpu(hostrcb
->hcam
.length
) -
1259 (offsetof(struct ipr_hostrcb_error
, u
) +
1260 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1264 * ipr_log_dual_ioa_error - Log a dual adapter error.
1265 * @ioa_cfg: ioa config struct
1266 * @hostrcb: hostrcb struct
1271 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1272 struct ipr_hostrcb
*hostrcb
)
1274 struct ipr_hostrcb_type_07_error
*error
;
1276 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
1277 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1279 ipr_err("%s\n", error
->failure_reason
);
1280 ipr_err("Remote Adapter VPD:\n");
1281 ipr_log_vpd(&error
->vpd
);
1282 ipr_log_hex_data(error
->data
,
1283 be32_to_cpu(hostrcb
->hcam
.length
) -
1284 (offsetof(struct ipr_hostrcb_error
, u
) +
1285 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
1289 * ipr_log_generic_error - Log an adapter error.
1290 * @ioa_cfg: ioa config struct
1291 * @hostrcb: hostrcb struct
1296 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
1297 struct ipr_hostrcb
*hostrcb
)
1299 ipr_log_hex_data(hostrcb
->hcam
.u
.raw
.data
,
1300 be32_to_cpu(hostrcb
->hcam
.length
));
1304 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1307 * This function will return the index of into the ipr_error_table
1308 * for the specified IOASC. If the IOASC is not in the table,
1309 * 0 will be returned, which points to the entry used for unknown errors.
1312 * index into the ipr_error_table
1314 static u32
ipr_get_error(u32 ioasc
)
1318 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
1319 if (ipr_error_table
[i
].ioasc
== ioasc
)
1326 * ipr_handle_log_data - Log an adapter error.
1327 * @ioa_cfg: ioa config struct
1328 * @hostrcb: hostrcb struct
1330 * This function logs an adapter error to the system.
1335 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
1336 struct ipr_hostrcb
*hostrcb
)
1341 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
1344 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
1345 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
1347 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.failing_dev_ioasc
);
1349 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
1350 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
) {
1351 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
1352 scsi_report_bus_reset(ioa_cfg
->host
,
1353 hostrcb
->hcam
.u
.error
.failing_dev_res_addr
.bus
);
1356 error_index
= ipr_get_error(ioasc
);
1358 if (!ipr_error_table
[error_index
].log_hcam
)
1361 if (ipr_is_device(&hostrcb
->hcam
.u
.error
.failing_dev_res_addr
)) {
1362 ipr_ra_err(ioa_cfg
, hostrcb
->hcam
.u
.error
.failing_dev_res_addr
,
1363 "%s\n", ipr_error_table
[error_index
].error
);
1365 dev_err(&ioa_cfg
->pdev
->dev
, "%s\n",
1366 ipr_error_table
[error_index
].error
);
1369 /* Set indication we have logged an error */
1370 ioa_cfg
->errors_logged
++;
1372 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
1374 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
1375 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
1377 switch (hostrcb
->hcam
.overlay_id
) {
1378 case IPR_HOST_RCB_OVERLAY_ID_2
:
1379 ipr_log_cache_error(ioa_cfg
, hostrcb
);
1381 case IPR_HOST_RCB_OVERLAY_ID_3
:
1382 ipr_log_config_error(ioa_cfg
, hostrcb
);
1384 case IPR_HOST_RCB_OVERLAY_ID_4
:
1385 case IPR_HOST_RCB_OVERLAY_ID_6
:
1386 ipr_log_array_error(ioa_cfg
, hostrcb
);
1388 case IPR_HOST_RCB_OVERLAY_ID_7
:
1389 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
1391 case IPR_HOST_RCB_OVERLAY_ID_12
:
1392 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
1394 case IPR_HOST_RCB_OVERLAY_ID_13
:
1395 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
1397 case IPR_HOST_RCB_OVERLAY_ID_14
:
1398 case IPR_HOST_RCB_OVERLAY_ID_16
:
1399 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
1401 case IPR_HOST_RCB_OVERLAY_ID_17
:
1402 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
1404 case IPR_HOST_RCB_OVERLAY_ID_1
:
1405 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
1407 ipr_log_generic_error(ioa_cfg
, hostrcb
);
1413 * ipr_process_error - Op done function for an adapter error log.
1414 * @ipr_cmd: ipr command struct
1416 * This function is the op done function for an error log host
1417 * controlled async from the adapter. It will log the error and
1418 * send the HCAM back to the adapter.
1423 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
1425 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1426 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1427 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
1429 list_del(&hostrcb
->queue
);
1430 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
1433 ipr_handle_log_data(ioa_cfg
, hostrcb
);
1434 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
1435 dev_err(&ioa_cfg
->pdev
->dev
,
1436 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1439 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
1443 * ipr_timeout - An internally generated op has timed out.
1444 * @ipr_cmd: ipr command struct
1446 * This function blocks host requests and initiates an
1452 static void ipr_timeout(struct ipr_cmnd
*ipr_cmd
)
1454 unsigned long lock_flags
= 0;
1455 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1458 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1460 ioa_cfg
->errors_logged
++;
1461 dev_err(&ioa_cfg
->pdev
->dev
,
1462 "Adapter being reset due to command timeout.\n");
1464 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
1465 ioa_cfg
->sdt_state
= GET_DUMP
;
1467 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
1468 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
1470 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1475 * ipr_oper_timeout - Adapter timed out transitioning to operational
1476 * @ipr_cmd: ipr command struct
1478 * This function blocks host requests and initiates an
1484 static void ipr_oper_timeout(struct ipr_cmnd
*ipr_cmd
)
1486 unsigned long lock_flags
= 0;
1487 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1490 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1492 ioa_cfg
->errors_logged
++;
1493 dev_err(&ioa_cfg
->pdev
->dev
,
1494 "Adapter timed out transitioning to operational.\n");
1496 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
1497 ioa_cfg
->sdt_state
= GET_DUMP
;
1499 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
1501 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
1502 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
1505 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1510 * ipr_reset_reload - Reset/Reload the IOA
1511 * @ioa_cfg: ioa config struct
1512 * @shutdown_type: shutdown type
1514 * This function resets the adapter and re-initializes it.
1515 * This function assumes that all new host commands have been stopped.
1519 static int ipr_reset_reload(struct ipr_ioa_cfg
*ioa_cfg
,
1520 enum ipr_shutdown_type shutdown_type
)
1522 if (!ioa_cfg
->in_reset_reload
)
1523 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
1525 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1526 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
1527 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1529 /* If we got hit with a host reset while we were already resetting
1530 the adapter for some reason, and the reset failed. */
1531 if (ioa_cfg
->ioa_is_dead
) {
1540 * ipr_find_ses_entry - Find matching SES in SES table
1541 * @res: resource entry struct of SES
1544 * pointer to SES table entry / NULL on failure
1546 static const struct ipr_ses_table_entry
*
1547 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
1550 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
1552 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
1553 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
1554 if (ste
->compare_product_id_byte
[j
] == 'X') {
1555 if (res
->cfgte
.std_inq_data
.vpids
.product_id
[j
] == ste
->product_id
[j
])
1563 if (matches
== IPR_PROD_ID_LEN
)
1571 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1572 * @ioa_cfg: ioa config struct
1574 * @bus_width: bus width
1577 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1578 * For a 2-byte wide SCSI bus, the maximum transfer speed is
1579 * twice the maximum transfer rate (e.g. for a wide enabled bus,
1580 * max 160MHz = max 320MB/sec).
1582 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
1584 struct ipr_resource_entry
*res
;
1585 const struct ipr_ses_table_entry
*ste
;
1586 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
1588 /* Loop through each config table entry in the config table buffer */
1589 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1590 if (!(IPR_IS_SES_DEVICE(res
->cfgte
.std_inq_data
)))
1593 if (bus
!= res
->cfgte
.res_addr
.bus
)
1596 if (!(ste
= ipr_find_ses_entry(res
)))
1599 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
1602 return max_xfer_rate
;
1606 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1607 * @ioa_cfg: ioa config struct
1608 * @max_delay: max delay in micro-seconds to wait
1610 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1613 * 0 on success / other on failure
1615 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
1617 volatile u32 pcii_reg
;
1620 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
1621 while (delay
< max_delay
) {
1622 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
1624 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
1627 /* udelay cannot be used if delay is more than a few milliseconds */
1628 if ((delay
/ 1000) > MAX_UDELAY_MS
)
1629 mdelay(delay
/ 1000);
1639 * ipr_get_ldump_data_section - Dump IOA memory
1640 * @ioa_cfg: ioa config struct
1641 * @start_addr: adapter address to dump
1642 * @dest: destination kernel buffer
1643 * @length_in_words: length to dump in 4 byte words
1646 * 0 on success / -EIO on failure
1648 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
1650 __be32
*dest
, u32 length_in_words
)
1652 volatile u32 temp_pcii_reg
;
1655 /* Write IOA interrupt reg starting LDUMP state */
1656 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
1657 ioa_cfg
->regs
.set_uproc_interrupt_reg
);
1659 /* Wait for IO debug acknowledge */
1660 if (ipr_wait_iodbg_ack(ioa_cfg
,
1661 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
1662 dev_err(&ioa_cfg
->pdev
->dev
,
1663 "IOA dump long data transfer timeout\n");
1667 /* Signal LDUMP interlocked - clear IO debug ack */
1668 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
1669 ioa_cfg
->regs
.clr_interrupt_reg
);
1671 /* Write Mailbox with starting address */
1672 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
1674 /* Signal address valid - clear IOA Reset alert */
1675 writel(IPR_UPROCI_RESET_ALERT
,
1676 ioa_cfg
->regs
.clr_uproc_interrupt_reg
);
1678 for (i
= 0; i
< length_in_words
; i
++) {
1679 /* Wait for IO debug acknowledge */
1680 if (ipr_wait_iodbg_ack(ioa_cfg
,
1681 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
1682 dev_err(&ioa_cfg
->pdev
->dev
,
1683 "IOA dump short data transfer timeout\n");
1687 /* Read data from mailbox and increment destination pointer */
1688 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
1691 /* For all but the last word of data, signal data received */
1692 if (i
< (length_in_words
- 1)) {
1693 /* Signal dump data received - Clear IO debug Ack */
1694 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
1695 ioa_cfg
->regs
.clr_interrupt_reg
);
1699 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
1700 writel(IPR_UPROCI_RESET_ALERT
,
1701 ioa_cfg
->regs
.set_uproc_interrupt_reg
);
1703 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
1704 ioa_cfg
->regs
.clr_uproc_interrupt_reg
);
1706 /* Signal dump data received - Clear IO debug Ack */
1707 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
1708 ioa_cfg
->regs
.clr_interrupt_reg
);
1710 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1711 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
1713 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg
);
1715 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
1725 #ifdef CONFIG_SCSI_IPR_DUMP
1727 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1728 * @ioa_cfg: ioa config struct
1729 * @pci_address: adapter address
1730 * @length: length of data to copy
1732 * Copy data from PCI adapter to kernel buffer.
1733 * Note: length MUST be a 4 byte multiple
1735 * 0 on success / other on failure
1737 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
1738 unsigned long pci_address
, u32 length
)
1740 int bytes_copied
= 0;
1741 int cur_len
, rc
, rem_len
, rem_page_len
;
1743 unsigned long lock_flags
= 0;
1744 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
1746 while (bytes_copied
< length
&&
1747 (ioa_dump
->hdr
.len
+ bytes_copied
) < IPR_MAX_IOA_DUMP_SIZE
) {
1748 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
1749 ioa_dump
->page_offset
== 0) {
1750 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
1754 return bytes_copied
;
1757 ioa_dump
->page_offset
= 0;
1758 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
1759 ioa_dump
->next_page_index
++;
1761 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
1763 rem_len
= length
- bytes_copied
;
1764 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
1765 cur_len
= min(rem_len
, rem_page_len
);
1767 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1768 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
1771 rc
= ipr_get_ldump_data_section(ioa_cfg
,
1772 pci_address
+ bytes_copied
,
1773 &page
[ioa_dump
->page_offset
/ 4],
1774 (cur_len
/ sizeof(u32
)));
1776 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1779 ioa_dump
->page_offset
+= cur_len
;
1780 bytes_copied
+= cur_len
;
1788 return bytes_copied
;
1792 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1793 * @hdr: dump entry header struct
1798 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
1800 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
1802 hdr
->offset
= sizeof(*hdr
);
1803 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
1807 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1808 * @ioa_cfg: ioa config struct
1809 * @driver_dump: driver dump struct
1814 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
1815 struct ipr_driver_dump
*driver_dump
)
1817 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
1819 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
1820 driver_dump
->ioa_type_entry
.hdr
.len
=
1821 sizeof(struct ipr_dump_ioa_type_entry
) -
1822 sizeof(struct ipr_dump_entry_header
);
1823 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
1824 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
1825 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
1826 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
1827 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
1828 ucode_vpd
->minor_release
[1];
1829 driver_dump
->hdr
.num_entries
++;
1833 * ipr_dump_version_data - Fill in the driver version in the dump.
1834 * @ioa_cfg: ioa config struct
1835 * @driver_dump: driver dump struct
1840 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
1841 struct ipr_driver_dump
*driver_dump
)
1843 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
1844 driver_dump
->version_entry
.hdr
.len
=
1845 sizeof(struct ipr_dump_version_entry
) -
1846 sizeof(struct ipr_dump_entry_header
);
1847 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
1848 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
1849 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
1850 driver_dump
->hdr
.num_entries
++;
1854 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1855 * @ioa_cfg: ioa config struct
1856 * @driver_dump: driver dump struct
1861 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
1862 struct ipr_driver_dump
*driver_dump
)
1864 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
1865 driver_dump
->trace_entry
.hdr
.len
=
1866 sizeof(struct ipr_dump_trace_entry
) -
1867 sizeof(struct ipr_dump_entry_header
);
1868 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
1869 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
1870 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
1871 driver_dump
->hdr
.num_entries
++;
1875 * ipr_dump_location_data - Fill in the IOA location in the dump.
1876 * @ioa_cfg: ioa config struct
1877 * @driver_dump: driver dump struct
1882 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
1883 struct ipr_driver_dump
*driver_dump
)
1885 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
1886 driver_dump
->location_entry
.hdr
.len
=
1887 sizeof(struct ipr_dump_location_entry
) -
1888 sizeof(struct ipr_dump_entry_header
);
1889 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
1890 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
1891 strcpy(driver_dump
->location_entry
.location
, ioa_cfg
->pdev
->dev
.bus_id
);
1892 driver_dump
->hdr
.num_entries
++;
1896 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1897 * @ioa_cfg: ioa config struct
1898 * @dump: dump struct
1903 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
1905 unsigned long start_addr
, sdt_word
;
1906 unsigned long lock_flags
= 0;
1907 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
1908 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
1909 u32 num_entries
, start_off
, end_off
;
1910 u32 bytes_to_copy
, bytes_copied
, rc
;
1911 struct ipr_sdt
*sdt
;
1916 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
1918 if (ioa_cfg
->sdt_state
!= GET_DUMP
) {
1919 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1923 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
1925 if (!ipr_sdt_is_fmt2(start_addr
)) {
1926 dev_err(&ioa_cfg
->pdev
->dev
,
1927 "Invalid dump table format: %lx\n", start_addr
);
1928 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1932 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
1934 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
1936 /* Initialize the overall dump header */
1937 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
1938 driver_dump
->hdr
.num_entries
= 1;
1939 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
1940 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
1941 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
1942 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
1944 ipr_dump_version_data(ioa_cfg
, driver_dump
);
1945 ipr_dump_location_data(ioa_cfg
, driver_dump
);
1946 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
1947 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
1949 /* Update dump_header */
1950 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
1952 /* IOA Dump entry */
1953 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
1954 ioa_dump
->format
= IPR_SDT_FMT2
;
1955 ioa_dump
->hdr
.len
= 0;
1956 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
1957 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
1959 /* First entries in sdt are actually a list of dump addresses and
1960 lengths to gather the real dump data. sdt represents the pointer
1961 to the ioa generated dump table. Dump data will be extracted based
1962 on entries in this table */
1963 sdt
= &ioa_dump
->sdt
;
1965 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
1966 sizeof(struct ipr_sdt
) / sizeof(__be32
));
1968 /* Smart Dump table is ready to use and the first entry is valid */
1969 if (rc
|| (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
)) {
1970 dev_err(&ioa_cfg
->pdev
->dev
,
1971 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
1972 rc
, be32_to_cpu(sdt
->hdr
.state
));
1973 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
1974 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
1975 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1979 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
1981 if (num_entries
> IPR_NUM_SDT_ENTRIES
)
1982 num_entries
= IPR_NUM_SDT_ENTRIES
;
1984 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
1986 for (i
= 0; i
< num_entries
; i
++) {
1987 if (ioa_dump
->hdr
.len
> IPR_MAX_IOA_DUMP_SIZE
) {
1988 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
1992 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
1993 sdt_word
= be32_to_cpu(sdt
->entry
[i
].bar_str_offset
);
1994 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
1995 end_off
= be32_to_cpu(sdt
->entry
[i
].end_offset
);
1997 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
) {
1998 bytes_to_copy
= end_off
- start_off
;
1999 if (bytes_to_copy
> IPR_MAX_IOA_DUMP_SIZE
) {
2000 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
2004 /* Copy data from adapter to driver buffers */
2005 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
2008 ioa_dump
->hdr
.len
+= bytes_copied
;
2010 if (bytes_copied
!= bytes_to_copy
) {
2011 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
2018 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
2020 /* Update dump_header */
2021 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
2023 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
2028 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2032 * ipr_release_dump - Free adapter dump memory
2033 * @kref: kref struct
2038 static void ipr_release_dump(struct kref
*kref
)
2040 struct ipr_dump
*dump
= container_of(kref
,struct ipr_dump
,kref
);
2041 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
2042 unsigned long lock_flags
= 0;
2046 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2047 ioa_cfg
->dump
= NULL
;
2048 ioa_cfg
->sdt_state
= INACTIVE
;
2049 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2051 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
2052 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
2059 * ipr_worker_thread - Worker thread
2060 * @data: ioa config struct
2062 * Called at task level from a work thread. This function takes care
2063 * of adding and removing device from the mid-layer as configuration
2064 * changes are detected by the adapter.
2069 static void ipr_worker_thread(void *data
)
2071 unsigned long lock_flags
;
2072 struct ipr_resource_entry
*res
;
2073 struct scsi_device
*sdev
;
2074 struct ipr_dump
*dump
;
2075 struct ipr_ioa_cfg
*ioa_cfg
= data
;
2076 u8 bus
, target
, lun
;
2080 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2082 if (ioa_cfg
->sdt_state
== GET_DUMP
) {
2083 dump
= ioa_cfg
->dump
;
2085 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2088 kref_get(&dump
->kref
);
2089 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2090 ipr_get_ioa_dump(ioa_cfg
, dump
);
2091 kref_put(&dump
->kref
, ipr_release_dump
);
2093 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2094 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
)
2095 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2096 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2103 if (!ioa_cfg
->allow_cmds
|| !ioa_cfg
->allow_ml_add_del
) {
2104 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2108 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2109 if (res
->del_from_ml
&& res
->sdev
) {
2112 if (!scsi_device_get(sdev
)) {
2113 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
2114 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2115 scsi_remove_device(sdev
);
2116 scsi_device_put(sdev
);
2117 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2124 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2125 if (res
->add_to_ml
) {
2126 bus
= res
->cfgte
.res_addr
.bus
;
2127 target
= res
->cfgte
.res_addr
.target
;
2128 lun
= res
->cfgte
.res_addr
.lun
;
2130 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2131 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
2132 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2137 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2138 kobject_uevent(&ioa_cfg
->host
->shost_classdev
.kobj
, KOBJ_CHANGE
);
2142 #ifdef CONFIG_SCSI_IPR_TRACE
2144 * ipr_read_trace - Dump the adapter trace
2145 * @kobj: kobject struct
2148 * @count: buffer size
2151 * number of bytes printed to buffer
2153 static ssize_t
ipr_read_trace(struct kobject
*kobj
, char *buf
,
2154 loff_t off
, size_t count
)
2156 struct class_device
*cdev
= container_of(kobj
,struct class_device
,kobj
);
2157 struct Scsi_Host
*shost
= class_to_shost(cdev
);
2158 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2159 unsigned long lock_flags
= 0;
2160 int size
= IPR_TRACE_SIZE
;
2161 char *src
= (char *)ioa_cfg
->trace
;
2165 if (off
+ count
> size
) {
2170 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2171 memcpy(buf
, &src
[off
], count
);
2172 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2176 static struct bin_attribute ipr_trace_attr
= {
2182 .read
= ipr_read_trace
,
2186 static const struct {
2187 enum ipr_cache_state state
;
2189 } cache_state
[] = {
2190 { CACHE_NONE
, "none" },
2191 { CACHE_DISABLED
, "disabled" },
2192 { CACHE_ENABLED
, "enabled" }
2196 * ipr_show_write_caching - Show the write caching attribute
2197 * @class_dev: class device struct
2201 * number of bytes printed to buffer
2203 static ssize_t
ipr_show_write_caching(struct class_device
*class_dev
, char *buf
)
2205 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2206 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2207 unsigned long lock_flags
= 0;
2210 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2211 for (i
= 0; i
< ARRAY_SIZE(cache_state
); i
++) {
2212 if (cache_state
[i
].state
== ioa_cfg
->cache_state
) {
2213 len
= snprintf(buf
, PAGE_SIZE
, "%s\n", cache_state
[i
].name
);
2217 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2223 * ipr_store_write_caching - Enable/disable adapter write cache
2224 * @class_dev: class_device struct
2226 * @count: buffer size
2228 * This function will enable/disable adapter write cache.
2231 * count on success / other on failure
2233 static ssize_t
ipr_store_write_caching(struct class_device
*class_dev
,
2234 const char *buf
, size_t count
)
2236 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2237 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2238 unsigned long lock_flags
= 0;
2239 enum ipr_cache_state new_state
= CACHE_INVALID
;
2242 if (!capable(CAP_SYS_ADMIN
))
2244 if (ioa_cfg
->cache_state
== CACHE_NONE
)
2247 for (i
= 0; i
< ARRAY_SIZE(cache_state
); i
++) {
2248 if (!strncmp(cache_state
[i
].name
, buf
, strlen(cache_state
[i
].name
))) {
2249 new_state
= cache_state
[i
].state
;
2254 if (new_state
!= CACHE_DISABLED
&& new_state
!= CACHE_ENABLED
)
2257 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2258 if (ioa_cfg
->cache_state
== new_state
) {
2259 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2263 ioa_cfg
->cache_state
= new_state
;
2264 dev_info(&ioa_cfg
->pdev
->dev
, "%s adapter write cache.\n",
2265 new_state
== CACHE_ENABLED
? "Enabling" : "Disabling");
2266 if (!ioa_cfg
->in_reset_reload
)
2267 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2268 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2269 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2274 static struct class_device_attribute ipr_ioa_cache_attr
= {
2276 .name
= "write_cache",
2277 .mode
= S_IRUGO
| S_IWUSR
,
2279 .show
= ipr_show_write_caching
,
2280 .store
= ipr_store_write_caching
2284 * ipr_show_fw_version - Show the firmware version
2285 * @class_dev: class device struct
2289 * number of bytes printed to buffer
2291 static ssize_t
ipr_show_fw_version(struct class_device
*class_dev
, char *buf
)
2293 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2294 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2295 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
2296 unsigned long lock_flags
= 0;
2299 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2300 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
2301 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
2302 ucode_vpd
->minor_release
[0],
2303 ucode_vpd
->minor_release
[1]);
2304 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2308 static struct class_device_attribute ipr_fw_version_attr
= {
2310 .name
= "fw_version",
2313 .show
= ipr_show_fw_version
,
2317 * ipr_show_log_level - Show the adapter's error logging level
2318 * @class_dev: class device struct
2322 * number of bytes printed to buffer
2324 static ssize_t
ipr_show_log_level(struct class_device
*class_dev
, char *buf
)
2326 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2327 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2328 unsigned long lock_flags
= 0;
2331 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2332 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
2333 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2338 * ipr_store_log_level - Change the adapter's error logging level
2339 * @class_dev: class device struct
2343 * number of bytes printed to buffer
2345 static ssize_t
ipr_store_log_level(struct class_device
*class_dev
,
2346 const char *buf
, size_t count
)
2348 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2349 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2350 unsigned long lock_flags
= 0;
2352 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2353 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
2354 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2358 static struct class_device_attribute ipr_log_level_attr
= {
2360 .name
= "log_level",
2361 .mode
= S_IRUGO
| S_IWUSR
,
2363 .show
= ipr_show_log_level
,
2364 .store
= ipr_store_log_level
2368 * ipr_store_diagnostics - IOA Diagnostics interface
2369 * @class_dev: class_device struct
2371 * @count: buffer size
2373 * This function will reset the adapter and wait a reasonable
2374 * amount of time for any errors that the adapter might log.
2377 * count on success / other on failure
2379 static ssize_t
ipr_store_diagnostics(struct class_device
*class_dev
,
2380 const char *buf
, size_t count
)
2382 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2383 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2384 unsigned long lock_flags
= 0;
2387 if (!capable(CAP_SYS_ADMIN
))
2390 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2391 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2392 ioa_cfg
->errors_logged
= 0;
2393 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2395 if (ioa_cfg
->in_reset_reload
) {
2396 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2397 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2399 /* Wait for a second for any errors to be logged */
2402 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2406 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2407 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
2409 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2414 static struct class_device_attribute ipr_diagnostics_attr
= {
2416 .name
= "run_diagnostics",
2419 .store
= ipr_store_diagnostics
2423 * ipr_show_adapter_state - Show the adapter's state
2424 * @class_dev: class device struct
2428 * number of bytes printed to buffer
2430 static ssize_t
ipr_show_adapter_state(struct class_device
*class_dev
, char *buf
)
2432 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2433 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2434 unsigned long lock_flags
= 0;
2437 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2438 if (ioa_cfg
->ioa_is_dead
)
2439 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
2441 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
2442 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2447 * ipr_store_adapter_state - Change adapter state
2448 * @class_dev: class_device struct
2450 * @count: buffer size
2452 * This function will change the adapter's state.
2455 * count on success / other on failure
2457 static ssize_t
ipr_store_adapter_state(struct class_device
*class_dev
,
2458 const char *buf
, size_t count
)
2460 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2461 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2462 unsigned long lock_flags
;
2465 if (!capable(CAP_SYS_ADMIN
))
2468 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2469 if (ioa_cfg
->ioa_is_dead
&& !strncmp(buf
, "online", 6)) {
2470 ioa_cfg
->ioa_is_dead
= 0;
2471 ioa_cfg
->reset_retries
= 0;
2472 ioa_cfg
->in_ioa_bringdown
= 0;
2473 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2475 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2476 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2481 static struct class_device_attribute ipr_ioa_state_attr
= {
2484 .mode
= S_IRUGO
| S_IWUSR
,
2486 .show
= ipr_show_adapter_state
,
2487 .store
= ipr_store_adapter_state
2491 * ipr_store_reset_adapter - Reset the adapter
2492 * @class_dev: class_device struct
2494 * @count: buffer size
2496 * This function will reset the adapter.
2499 * count on success / other on failure
2501 static ssize_t
ipr_store_reset_adapter(struct class_device
*class_dev
,
2502 const char *buf
, size_t count
)
2504 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2505 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2506 unsigned long lock_flags
;
2509 if (!capable(CAP_SYS_ADMIN
))
2512 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2513 if (!ioa_cfg
->in_reset_reload
)
2514 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2515 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2516 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2521 static struct class_device_attribute ipr_ioa_reset_attr
= {
2523 .name
= "reset_host",
2526 .store
= ipr_store_reset_adapter
2530 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2531 * @buf_len: buffer length
2533 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2534 * list to use for microcode download
2537 * pointer to sglist / NULL on failure
2539 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
2541 int sg_size
, order
, bsize_elem
, num_elem
, i
, j
;
2542 struct ipr_sglist
*sglist
;
2543 struct scatterlist
*scatterlist
;
2546 /* Get the minimum size per scatter/gather element */
2547 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
2549 /* Get the actual size per element */
2550 order
= get_order(sg_size
);
2552 /* Determine the actual number of bytes per element */
2553 bsize_elem
= PAGE_SIZE
* (1 << order
);
2555 /* Determine the actual number of sg entries needed */
2556 if (buf_len
% bsize_elem
)
2557 num_elem
= (buf_len
/ bsize_elem
) + 1;
2559 num_elem
= buf_len
/ bsize_elem
;
2561 /* Allocate a scatter/gather list for the DMA */
2562 sglist
= kzalloc(sizeof(struct ipr_sglist
) +
2563 (sizeof(struct scatterlist
) * (num_elem
- 1)),
2566 if (sglist
== NULL
) {
2571 scatterlist
= sglist
->scatterlist
;
2573 sglist
->order
= order
;
2574 sglist
->num_sg
= num_elem
;
2576 /* Allocate a bunch of sg elements */
2577 for (i
= 0; i
< num_elem
; i
++) {
2578 page
= alloc_pages(GFP_KERNEL
, order
);
2582 /* Free up what we already allocated */
2583 for (j
= i
- 1; j
>= 0; j
--)
2584 __free_pages(scatterlist
[j
].page
, order
);
2589 scatterlist
[i
].page
= page
;
2596 * ipr_free_ucode_buffer - Frees a microcode download buffer
2597 * @p_dnld: scatter/gather list pointer
2599 * Free a DMA'able ucode download buffer previously allocated with
2600 * ipr_alloc_ucode_buffer
2605 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
2609 for (i
= 0; i
< sglist
->num_sg
; i
++)
2610 __free_pages(sglist
->scatterlist
[i
].page
, sglist
->order
);
2616 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2617 * @sglist: scatter/gather list pointer
2618 * @buffer: buffer pointer
2619 * @len: buffer length
2621 * Copy a microcode image from a user buffer into a buffer allocated by
2622 * ipr_alloc_ucode_buffer
2625 * 0 on success / other on failure
2627 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
2628 u8
*buffer
, u32 len
)
2630 int bsize_elem
, i
, result
= 0;
2631 struct scatterlist
*scatterlist
;
2634 /* Determine the actual number of bytes per element */
2635 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
2637 scatterlist
= sglist
->scatterlist
;
2639 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
2640 kaddr
= kmap(scatterlist
[i
].page
);
2641 memcpy(kaddr
, buffer
, bsize_elem
);
2642 kunmap(scatterlist
[i
].page
);
2644 scatterlist
[i
].length
= bsize_elem
;
2652 if (len
% bsize_elem
) {
2653 kaddr
= kmap(scatterlist
[i
].page
);
2654 memcpy(kaddr
, buffer
, len
% bsize_elem
);
2655 kunmap(scatterlist
[i
].page
);
2657 scatterlist
[i
].length
= len
% bsize_elem
;
2660 sglist
->buffer_len
= len
;
2665 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2666 * @ipr_cmd: ipr command struct
2667 * @sglist: scatter/gather list
2669 * Builds a microcode download IOA data list (IOADL).
2672 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
2673 struct ipr_sglist
*sglist
)
2675 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
2676 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
2677 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
2680 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
2681 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
2682 ioarcb
->write_data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
2683 ioarcb
->write_ioadl_len
=
2684 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
2686 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
2687 ioadl
[i
].flags_and_data_len
=
2688 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
2690 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
2693 ioadl
[i
-1].flags_and_data_len
|=
2694 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
2698 * ipr_update_ioa_ucode - Update IOA's microcode
2699 * @ioa_cfg: ioa config struct
2700 * @sglist: scatter/gather list
2702 * Initiate an adapter reset to update the IOA's microcode
2705 * 0 on success / -EIO on failure
2707 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
2708 struct ipr_sglist
*sglist
)
2710 unsigned long lock_flags
;
2712 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2714 if (ioa_cfg
->ucode_sglist
) {
2715 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2716 dev_err(&ioa_cfg
->pdev
->dev
,
2717 "Microcode download already in progress\n");
2721 sglist
->num_dma_sg
= pci_map_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
2722 sglist
->num_sg
, DMA_TO_DEVICE
);
2724 if (!sglist
->num_dma_sg
) {
2725 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2726 dev_err(&ioa_cfg
->pdev
->dev
,
2727 "Failed to map microcode download buffer!\n");
2731 ioa_cfg
->ucode_sglist
= sglist
;
2732 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
2733 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2734 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
2736 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2737 ioa_cfg
->ucode_sglist
= NULL
;
2738 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2743 * ipr_store_update_fw - Update the firmware on the adapter
2744 * @class_dev: class_device struct
2746 * @count: buffer size
2748 * This function will update the firmware on the adapter.
2751 * count on success / other on failure
2753 static ssize_t
ipr_store_update_fw(struct class_device
*class_dev
,
2754 const char *buf
, size_t count
)
2756 struct Scsi_Host
*shost
= class_to_shost(class_dev
);
2757 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2758 struct ipr_ucode_image_header
*image_hdr
;
2759 const struct firmware
*fw_entry
;
2760 struct ipr_sglist
*sglist
;
2763 int len
, result
, dnld_size
;
2765 if (!capable(CAP_SYS_ADMIN
))
2768 len
= snprintf(fname
, 99, "%s", buf
);
2769 fname
[len
-1] = '\0';
2771 if(request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
2772 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
2776 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
2778 if (be32_to_cpu(image_hdr
->header_length
) > fw_entry
->size
||
2779 (ioa_cfg
->vpd_cbs
->page3_data
.card_type
&&
2780 ioa_cfg
->vpd_cbs
->page3_data
.card_type
!= image_hdr
->card_type
)) {
2781 dev_err(&ioa_cfg
->pdev
->dev
, "Invalid microcode buffer\n");
2782 release_firmware(fw_entry
);
2786 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
2787 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
2788 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
2791 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
2792 release_firmware(fw_entry
);
2796 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
2799 dev_err(&ioa_cfg
->pdev
->dev
,
2800 "Microcode buffer copy to DMA buffer failed\n");
2804 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
2809 ipr_free_ucode_buffer(sglist
);
2810 release_firmware(fw_entry
);
2814 static struct class_device_attribute ipr_update_fw_attr
= {
2816 .name
= "update_fw",
2819 .store
= ipr_store_update_fw
2822 static struct class_device_attribute
*ipr_ioa_attrs
[] = {
2823 &ipr_fw_version_attr
,
2824 &ipr_log_level_attr
,
2825 &ipr_diagnostics_attr
,
2826 &ipr_ioa_state_attr
,
2827 &ipr_ioa_reset_attr
,
2828 &ipr_update_fw_attr
,
2829 &ipr_ioa_cache_attr
,
2833 #ifdef CONFIG_SCSI_IPR_DUMP
2835 * ipr_read_dump - Dump the adapter
2836 * @kobj: kobject struct
2839 * @count: buffer size
2842 * number of bytes printed to buffer
2844 static ssize_t
ipr_read_dump(struct kobject
*kobj
, char *buf
,
2845 loff_t off
, size_t count
)
2847 struct class_device
*cdev
= container_of(kobj
,struct class_device
,kobj
);
2848 struct Scsi_Host
*shost
= class_to_shost(cdev
);
2849 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
2850 struct ipr_dump
*dump
;
2851 unsigned long lock_flags
= 0;
2856 if (!capable(CAP_SYS_ADMIN
))
2859 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2860 dump
= ioa_cfg
->dump
;
2862 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
2863 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2866 kref_get(&dump
->kref
);
2867 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2869 if (off
> dump
->driver_dump
.hdr
.len
) {
2870 kref_put(&dump
->kref
, ipr_release_dump
);
2874 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
2875 count
= dump
->driver_dump
.hdr
.len
- off
;
2879 if (count
&& off
< sizeof(dump
->driver_dump
)) {
2880 if (off
+ count
> sizeof(dump
->driver_dump
))
2881 len
= sizeof(dump
->driver_dump
) - off
;
2884 src
= (u8
*)&dump
->driver_dump
+ off
;
2885 memcpy(buf
, src
, len
);
2891 off
-= sizeof(dump
->driver_dump
);
2893 if (count
&& off
< offsetof(struct ipr_ioa_dump
, ioa_data
)) {
2894 if (off
+ count
> offsetof(struct ipr_ioa_dump
, ioa_data
))
2895 len
= offsetof(struct ipr_ioa_dump
, ioa_data
) - off
;
2898 src
= (u8
*)&dump
->ioa_dump
+ off
;
2899 memcpy(buf
, src
, len
);
2905 off
-= offsetof(struct ipr_ioa_dump
, ioa_data
);
2908 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
2909 len
= PAGE_ALIGN(off
) - off
;
2912 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
2913 src
+= off
& ~PAGE_MASK
;
2914 memcpy(buf
, src
, len
);
2920 kref_put(&dump
->kref
, ipr_release_dump
);
2925 * ipr_alloc_dump - Prepare for adapter dump
2926 * @ioa_cfg: ioa config struct
2929 * 0 on success / other on failure
2931 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
2933 struct ipr_dump
*dump
;
2934 unsigned long lock_flags
= 0;
2937 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
2940 ipr_err("Dump memory allocation failed\n");
2944 kref_init(&dump
->kref
);
2945 dump
->ioa_cfg
= ioa_cfg
;
2947 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2949 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
2950 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2955 ioa_cfg
->dump
= dump
;
2956 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
2957 if (ioa_cfg
->ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
2958 ioa_cfg
->dump_taken
= 1;
2959 schedule_work(&ioa_cfg
->work_q
);
2961 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2968 * ipr_free_dump - Free adapter dump memory
2969 * @ioa_cfg: ioa config struct
2972 * 0 on success / other on failure
2974 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
2976 struct ipr_dump
*dump
;
2977 unsigned long lock_flags
= 0;
2981 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2982 dump
= ioa_cfg
->dump
;
2984 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2988 ioa_cfg
->dump
= NULL
;
2989 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2991 kref_put(&dump
->kref
, ipr_release_dump
);
2998 * ipr_write_dump - Setup dump state of adapter
2999 * @kobj: kobject struct
3002 * @count: buffer size
3005 * number of bytes printed to buffer
3007 static ssize_t
ipr_write_dump(struct kobject
*kobj
, char *buf
,
3008 loff_t off
, size_t count
)
3010 struct class_device
*cdev
= container_of(kobj
,struct class_device
,kobj
);
3011 struct Scsi_Host
*shost
= class_to_shost(cdev
);
3012 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3015 if (!capable(CAP_SYS_ADMIN
))
3019 rc
= ipr_alloc_dump(ioa_cfg
);
3020 else if (buf
[0] == '0')
3021 rc
= ipr_free_dump(ioa_cfg
);
3031 static struct bin_attribute ipr_dump_attr
= {
3034 .mode
= S_IRUSR
| S_IWUSR
,
3037 .read
= ipr_read_dump
,
3038 .write
= ipr_write_dump
3041 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
3045 * ipr_change_queue_depth - Change the device's queue depth
3046 * @sdev: scsi device struct
3047 * @qdepth: depth to set
3052 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
3054 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
3055 return sdev
->queue_depth
;
3059 * ipr_change_queue_type - Change the device's queue type
3060 * @dsev: scsi device struct
3061 * @tag_type: type of tags to use
3064 * actual queue type set
3066 static int ipr_change_queue_type(struct scsi_device
*sdev
, int tag_type
)
3068 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
3069 struct ipr_resource_entry
*res
;
3070 unsigned long lock_flags
= 0;
3072 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3073 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
3076 if (ipr_is_gscsi(res
) && sdev
->tagged_supported
) {
3078 * We don't bother quiescing the device here since the
3079 * adapter firmware does it for us.
3081 scsi_set_tag_type(sdev
, tag_type
);
3084 scsi_activate_tcq(sdev
, sdev
->queue_depth
);
3086 scsi_deactivate_tcq(sdev
, sdev
->queue_depth
);
3092 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3097 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3098 * @dev: device struct
3102 * number of bytes printed to buffer
3104 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3106 struct scsi_device
*sdev
= to_scsi_device(dev
);
3107 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
3108 struct ipr_resource_entry
*res
;
3109 unsigned long lock_flags
= 0;
3110 ssize_t len
= -ENXIO
;
3112 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3113 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
3115 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->cfgte
.res_handle
);
3116 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3120 static struct device_attribute ipr_adapter_handle_attr
= {
3122 .name
= "adapter_handle",
3125 .show
= ipr_show_adapter_handle
3128 static struct device_attribute
*ipr_dev_attrs
[] = {
3129 &ipr_adapter_handle_attr
,
3134 * ipr_biosparam - Return the HSC mapping
3135 * @sdev: scsi device struct
3136 * @block_device: block device pointer
3137 * @capacity: capacity of the device
3138 * @parm: Array containing returned HSC values.
3140 * This function generates the HSC parms that fdisk uses.
3141 * We want to make sure we return something that places partitions
3142 * on 4k boundaries for best performance with the IOA.
3147 static int ipr_biosparam(struct scsi_device
*sdev
,
3148 struct block_device
*block_device
,
3149 sector_t capacity
, int *parm
)
3157 cylinders
= capacity
;
3158 sector_div(cylinders
, (128 * 32));
3163 parm
[2] = cylinders
;
3169 * ipr_slave_destroy - Unconfigure a SCSI device
3170 * @sdev: scsi device struct
3175 static void ipr_slave_destroy(struct scsi_device
*sdev
)
3177 struct ipr_resource_entry
*res
;
3178 struct ipr_ioa_cfg
*ioa_cfg
;
3179 unsigned long lock_flags
= 0;
3181 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
3183 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3184 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
3186 sdev
->hostdata
= NULL
;
3189 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3193 * ipr_slave_configure - Configure a SCSI device
3194 * @sdev: scsi device struct
3196 * This function configures the specified scsi device.
3201 static int ipr_slave_configure(struct scsi_device
*sdev
)
3203 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
3204 struct ipr_resource_entry
*res
;
3205 unsigned long lock_flags
= 0;
3207 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3208 res
= sdev
->hostdata
;
3210 if (ipr_is_af_dasd_device(res
))
3211 sdev
->type
= TYPE_RAID
;
3212 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
3213 sdev
->scsi_level
= 4;
3214 sdev
->no_uld_attach
= 1;
3216 if (ipr_is_vset_device(res
)) {
3217 sdev
->timeout
= IPR_VSET_RW_TIMEOUT
;
3218 blk_queue_max_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
3220 if (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
))
3221 sdev
->allow_restart
= 1;
3222 scsi_adjust_queue_depth(sdev
, 0, sdev
->host
->cmd_per_lun
);
3224 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3229 * ipr_slave_alloc - Prepare for commands to a device.
3230 * @sdev: scsi device struct
3232 * This function saves a pointer to the resource entry
3233 * in the scsi device struct if the device exists. We
3234 * can then use this pointer in ipr_queuecommand when
3235 * handling new commands.
3238 * 0 on success / -ENXIO if device does not exist
3240 static int ipr_slave_alloc(struct scsi_device
*sdev
)
3242 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
3243 struct ipr_resource_entry
*res
;
3244 unsigned long lock_flags
;
3247 sdev
->hostdata
= NULL
;
3249 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3251 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3252 if ((res
->cfgte
.res_addr
.bus
== sdev
->channel
) &&
3253 (res
->cfgte
.res_addr
.target
== sdev
->id
) &&
3254 (res
->cfgte
.res_addr
.lun
== sdev
->lun
)) {
3258 sdev
->hostdata
= res
;
3259 if (!ipr_is_naca_model(res
))
3260 res
->needs_sync_complete
= 1;
3266 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3272 * ipr_eh_host_reset - Reset the host adapter
3273 * @scsi_cmd: scsi command struct
3278 static int __ipr_eh_host_reset(struct scsi_cmnd
* scsi_cmd
)
3280 struct ipr_ioa_cfg
*ioa_cfg
;
3284 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
3286 dev_err(&ioa_cfg
->pdev
->dev
,
3287 "Adapter being reset as a result of error recovery.\n");
3289 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
3290 ioa_cfg
->sdt_state
= GET_DUMP
;
3292 rc
= ipr_reset_reload(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
3298 static int ipr_eh_host_reset(struct scsi_cmnd
* cmd
)
3302 spin_lock_irq(cmd
->device
->host
->host_lock
);
3303 rc
= __ipr_eh_host_reset(cmd
);
3304 spin_unlock_irq(cmd
->device
->host
->host_lock
);
3310 * ipr_device_reset - Reset the device
3311 * @ioa_cfg: ioa config struct
3312 * @res: resource entry struct
3314 * This function issues a device reset to the affected device.
3315 * If the device is a SCSI device, a LUN reset will be sent
3316 * to the device first. If that does not work, a target reset
3320 * 0 on success / non-zero on failure
3322 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
3323 struct ipr_resource_entry
*res
)
3325 struct ipr_cmnd
*ipr_cmd
;
3326 struct ipr_ioarcb
*ioarcb
;
3327 struct ipr_cmd_pkt
*cmd_pkt
;
3331 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
3332 ioarcb
= &ipr_cmd
->ioarcb
;
3333 cmd_pkt
= &ioarcb
->cmd_pkt
;
3335 ioarcb
->res_handle
= res
->cfgte
.res_handle
;
3336 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
3337 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
3339 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
3340 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3341 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
3344 return (IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0);
3348 * ipr_eh_dev_reset - Reset the device
3349 * @scsi_cmd: scsi command struct
3351 * This function issues a device reset to the affected device.
3352 * A LUN reset will be sent to the device first. If that does
3353 * not work, a target reset will be sent.
3358 static int __ipr_eh_dev_reset(struct scsi_cmnd
* scsi_cmd
)
3360 struct ipr_cmnd
*ipr_cmd
;
3361 struct ipr_ioa_cfg
*ioa_cfg
;
3362 struct ipr_resource_entry
*res
;
3366 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
3367 res
= scsi_cmd
->device
->hostdata
;
3373 * If we are currently going through reset/reload, return failed. This will force the
3374 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3377 if (ioa_cfg
->in_reset_reload
)
3379 if (ioa_cfg
->ioa_is_dead
)
3382 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
3383 if (ipr_cmd
->ioarcb
.res_handle
== res
->cfgte
.res_handle
) {
3384 if (ipr_cmd
->scsi_cmd
)
3385 ipr_cmd
->done
= ipr_scsi_eh_done
;
3389 res
->resetting_device
= 1;
3390 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
3391 rc
= ipr_device_reset(ioa_cfg
, res
);
3392 res
->resetting_device
= 0;
3395 return (rc
? FAILED
: SUCCESS
);
3398 static int ipr_eh_dev_reset(struct scsi_cmnd
* cmd
)
3402 spin_lock_irq(cmd
->device
->host
->host_lock
);
3403 rc
= __ipr_eh_dev_reset(cmd
);
3404 spin_unlock_irq(cmd
->device
->host
->host_lock
);
3410 * ipr_bus_reset_done - Op done function for bus reset.
3411 * @ipr_cmd: ipr command struct
3413 * This function is the op done function for a bus reset
3418 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
3420 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
3421 struct ipr_resource_entry
*res
;
3424 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3425 if (!memcmp(&res
->cfgte
.res_handle
, &ipr_cmd
->ioarcb
.res_handle
,
3426 sizeof(res
->cfgte
.res_handle
))) {
3427 scsi_report_bus_reset(ioa_cfg
->host
, res
->cfgte
.res_addr
.bus
);
3433 * If abort has not completed, indicate the reset has, else call the
3434 * abort's done function to wake the sleeping eh thread
3436 if (ipr_cmd
->sibling
->sibling
)
3437 ipr_cmd
->sibling
->sibling
= NULL
;
3439 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
3441 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
3446 * ipr_abort_timeout - An abort task has timed out
3447 * @ipr_cmd: ipr command struct
3449 * This function handles when an abort task times out. If this
3450 * happens we issue a bus reset since we have resources tied
3451 * up that must be freed before returning to the midlayer.
3456 static void ipr_abort_timeout(struct ipr_cmnd
*ipr_cmd
)
3458 struct ipr_cmnd
*reset_cmd
;
3459 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
3460 struct ipr_cmd_pkt
*cmd_pkt
;
3461 unsigned long lock_flags
= 0;
3464 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3465 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
3466 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3470 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
3471 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
3472 ipr_cmd
->sibling
= reset_cmd
;
3473 reset_cmd
->sibling
= ipr_cmd
;
3474 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
3475 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
3476 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
3477 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
3478 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
3480 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
3481 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3486 * ipr_cancel_op - Cancel specified op
3487 * @scsi_cmd: scsi command struct
3489 * This function cancels specified op.
3494 static int ipr_cancel_op(struct scsi_cmnd
* scsi_cmd
)
3496 struct ipr_cmnd
*ipr_cmd
;
3497 struct ipr_ioa_cfg
*ioa_cfg
;
3498 struct ipr_resource_entry
*res
;
3499 struct ipr_cmd_pkt
*cmd_pkt
;
3504 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
3505 res
= scsi_cmd
->device
->hostdata
;
3507 /* If we are currently going through reset/reload, return failed.
3508 * This will force the mid-layer to call ipr_eh_host_reset,
3509 * which will then go to sleep and wait for the reset to complete
3511 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->ioa_is_dead
)
3513 if (!res
|| (!ipr_is_gscsi(res
) && !ipr_is_vset_device(res
)))
3516 list_for_each_entry(ipr_cmd
, &ioa_cfg
->pending_q
, queue
) {
3517 if (ipr_cmd
->scsi_cmd
== scsi_cmd
) {
3518 ipr_cmd
->done
= ipr_scsi_eh_done
;
3527 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
3528 ipr_cmd
->ioarcb
.res_handle
= res
->cfgte
.res_handle
;
3529 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
3530 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
3531 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
3532 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
3534 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
3536 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
3537 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3540 * If the abort task timed out and we sent a bus reset, we will get
3541 * one the following responses to the abort
3543 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
3548 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
3549 if (!ipr_is_naca_model(res
))
3550 res
->needs_sync_complete
= 1;
3553 return (IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
);
3557 * ipr_eh_abort - Abort a single op
3558 * @scsi_cmd: scsi command struct
3563 static int ipr_eh_abort(struct scsi_cmnd
* scsi_cmd
)
3565 unsigned long flags
;
3570 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
3571 rc
= ipr_cancel_op(scsi_cmd
);
3572 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
3579 * ipr_handle_other_interrupt - Handle "other" interrupts
3580 * @ioa_cfg: ioa config struct
3581 * @int_reg: interrupt register
3584 * IRQ_NONE / IRQ_HANDLED
3586 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
3587 volatile u32 int_reg
)
3589 irqreturn_t rc
= IRQ_HANDLED
;
3591 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
3592 /* Mask the interrupt */
3593 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
3595 /* Clear the interrupt */
3596 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.clr_interrupt_reg
);
3597 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
3599 list_del(&ioa_cfg
->reset_cmd
->queue
);
3600 del_timer(&ioa_cfg
->reset_cmd
->timer
);
3601 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
3603 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
3604 ioa_cfg
->ioa_unit_checked
= 1;
3606 dev_err(&ioa_cfg
->pdev
->dev
,
3607 "Permanent IOA failure. 0x%08X\n", int_reg
);
3609 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
3610 ioa_cfg
->sdt_state
= GET_DUMP
;
3612 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
3613 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3620 * ipr_isr - Interrupt service routine
3622 * @devp: pointer to ioa config struct
3623 * @regs: pt_regs struct
3626 * IRQ_NONE / IRQ_HANDLED
3628 static irqreturn_t
ipr_isr(int irq
, void *devp
, struct pt_regs
*regs
)
3630 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
3631 unsigned long lock_flags
= 0;
3632 volatile u32 int_reg
, int_mask_reg
;
3635 struct ipr_cmnd
*ipr_cmd
;
3636 irqreturn_t rc
= IRQ_NONE
;
3638 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3640 /* If interrupts are disabled, ignore the interrupt */
3641 if (!ioa_cfg
->allow_interrupts
) {
3642 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3646 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
3647 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
3649 /* If an interrupt on the adapter did not occur, ignore it */
3650 if (unlikely((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0)) {
3651 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3658 while ((be32_to_cpu(*ioa_cfg
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
3659 ioa_cfg
->toggle_bit
) {
3661 cmd_index
= (be32_to_cpu(*ioa_cfg
->hrrq_curr
) &
3662 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
3664 if (unlikely(cmd_index
>= IPR_NUM_CMD_BLKS
)) {
3665 ioa_cfg
->errors_logged
++;
3666 dev_err(&ioa_cfg
->pdev
->dev
, "Invalid response handle from IOA\n");
3668 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
3669 ioa_cfg
->sdt_state
= GET_DUMP
;
3671 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3672 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3676 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
3678 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3680 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
3682 list_del(&ipr_cmd
->queue
);
3683 del_timer(&ipr_cmd
->timer
);
3684 ipr_cmd
->done(ipr_cmd
);
3688 if (ioa_cfg
->hrrq_curr
< ioa_cfg
->hrrq_end
) {
3689 ioa_cfg
->hrrq_curr
++;
3691 ioa_cfg
->hrrq_curr
= ioa_cfg
->hrrq_start
;
3692 ioa_cfg
->toggle_bit
^= 1u;
3696 if (ipr_cmd
!= NULL
) {
3697 /* Clear the PCI interrupt */
3698 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg
);
3699 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
3704 if (unlikely(rc
== IRQ_NONE
))
3705 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
3707 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3712 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3713 * @ioa_cfg: ioa config struct
3714 * @ipr_cmd: ipr command struct
3717 * 0 on success / -1 on failure
3719 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
3720 struct ipr_cmnd
*ipr_cmd
)
3723 struct scatterlist
*sglist
;
3725 u32 ioadl_flags
= 0;
3726 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
3727 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3728 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
3730 length
= scsi_cmd
->request_bufflen
;
3735 if (scsi_cmd
->use_sg
) {
3736 ipr_cmd
->dma_use_sg
= pci_map_sg(ioa_cfg
->pdev
,
3737 scsi_cmd
->request_buffer
,
3739 scsi_cmd
->sc_data_direction
);
3741 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
3742 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
3743 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3744 ioarcb
->write_data_transfer_length
= cpu_to_be32(length
);
3745 ioarcb
->write_ioadl_len
=
3746 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3747 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
3748 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
3749 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
3750 ioarcb
->read_ioadl_len
=
3751 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3754 sglist
= scsi_cmd
->request_buffer
;
3756 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3757 ioadl
[i
].flags_and_data_len
=
3758 cpu_to_be32(ioadl_flags
| sg_dma_len(&sglist
[i
]));
3760 cpu_to_be32(sg_dma_address(&sglist
[i
]));
3763 if (likely(ipr_cmd
->dma_use_sg
)) {
3764 ioadl
[i
-1].flags_and_data_len
|=
3765 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3768 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_sg failed!\n");
3770 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
3771 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
3772 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3773 ioarcb
->write_data_transfer_length
= cpu_to_be32(length
);
3774 ioarcb
->write_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
3775 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
3776 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
3777 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
3778 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
3781 ipr_cmd
->dma_handle
= pci_map_single(ioa_cfg
->pdev
,
3782 scsi_cmd
->request_buffer
, length
,
3783 scsi_cmd
->sc_data_direction
);
3785 if (likely(!pci_dma_mapping_error(ipr_cmd
->dma_handle
))) {
3786 ipr_cmd
->dma_use_sg
= 1;
3787 ioadl
[0].flags_and_data_len
=
3788 cpu_to_be32(ioadl_flags
| length
| IPR_IOADL_FLAGS_LAST
);
3789 ioadl
[0].address
= cpu_to_be32(ipr_cmd
->dma_handle
);
3792 dev_err(&ioa_cfg
->pdev
->dev
, "pci_map_single failed!\n");
3799 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3800 * @scsi_cmd: scsi command struct
3805 static u8
ipr_get_task_attributes(struct scsi_cmnd
*scsi_cmd
)
3808 u8 rc
= IPR_FLAGS_LO_UNTAGGED_TASK
;
3810 if (scsi_populate_tag_msg(scsi_cmd
, tag
)) {
3812 case MSG_SIMPLE_TAG
:
3813 rc
= IPR_FLAGS_LO_SIMPLE_TASK
;
3816 rc
= IPR_FLAGS_LO_HEAD_OF_Q_TASK
;
3818 case MSG_ORDERED_TAG
:
3819 rc
= IPR_FLAGS_LO_ORDERED_TASK
;
3828 * ipr_erp_done - Process completion of ERP for a device
3829 * @ipr_cmd: ipr command struct
3831 * This function copies the sense buffer into the scsi_cmd
3832 * struct and pushes the scsi_done function.
3837 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
3839 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
3840 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
3841 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
3842 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3844 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
3845 scsi_cmd
->result
|= (DID_ERROR
<< 16);
3846 scmd_printk(KERN_ERR
, scsi_cmd
,
3847 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
3849 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
3850 SCSI_SENSE_BUFFERSIZE
);
3854 if (!ipr_is_naca_model(res
))
3855 res
->needs_sync_complete
= 1;
3858 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
3859 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
3860 scsi_cmd
->scsi_done(scsi_cmd
);
3864 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3865 * @ipr_cmd: ipr command struct
3870 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
3872 struct ipr_ioarcb
*ioarcb
;
3873 struct ipr_ioasa
*ioasa
;
3875 ioarcb
= &ipr_cmd
->ioarcb
;
3876 ioasa
= &ipr_cmd
->ioasa
;
3878 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
3879 ioarcb
->write_data_transfer_length
= 0;
3880 ioarcb
->read_data_transfer_length
= 0;
3881 ioarcb
->write_ioadl_len
= 0;
3882 ioarcb
->read_ioadl_len
= 0;
3884 ioasa
->residual_data_len
= 0;
3888 * ipr_erp_request_sense - Send request sense to a device
3889 * @ipr_cmd: ipr command struct
3891 * This function sends a request sense to a device as a result
3892 * of a check condition.
3897 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
3899 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
3900 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
3902 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
3903 ipr_erp_done(ipr_cmd
);
3907 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
3909 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
3910 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
3911 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
3912 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
3913 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
3914 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
3916 ipr_cmd
->ioadl
[0].flags_and_data_len
=
3917 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| SCSI_SENSE_BUFFERSIZE
);
3918 ipr_cmd
->ioadl
[0].address
=
3919 cpu_to_be32(ipr_cmd
->sense_buffer_dma
);
3921 ipr_cmd
->ioarcb
.read_ioadl_len
=
3922 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
3923 ipr_cmd
->ioarcb
.read_data_transfer_length
=
3924 cpu_to_be32(SCSI_SENSE_BUFFERSIZE
);
3926 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
3927 IPR_REQUEST_SENSE_TIMEOUT
* 2);
3931 * ipr_erp_cancel_all - Send cancel all to a device
3932 * @ipr_cmd: ipr command struct
3934 * This function sends a cancel all to a device to clear the
3935 * queue. If we are running TCQ on the device, QERR is set to 1,
3936 * which means all outstanding ops have been dropped on the floor.
3937 * Cancel all will return them to us.
3942 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
3944 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
3945 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
3946 struct ipr_cmd_pkt
*cmd_pkt
;
3950 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
3952 if (!scsi_get_tag_type(scsi_cmd
->device
)) {
3953 ipr_erp_request_sense(ipr_cmd
);
3957 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
3958 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
3959 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
3961 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
3962 IPR_CANCEL_ALL_TIMEOUT
);
3966 * ipr_dump_ioasa - Dump contents of IOASA
3967 * @ioa_cfg: ioa config struct
3968 * @ipr_cmd: ipr command struct
3969 * @res: resource entry struct
3971 * This function is invoked by the interrupt handler when ops
3972 * fail. It will log the IOASA if appropriate. Only called
3978 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
3979 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
3984 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
3985 __be32
*ioasa_data
= (__be32
*)ioasa
;
3988 ioasc
= be32_to_cpu(ioasa
->ioasc
) & IPR_IOASC_IOASC_MASK
;
3993 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
3996 error_index
= ipr_get_error(ioasc
);
3998 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
3999 /* Don't log an error if the IOA already logged one */
4000 if (ioasa
->ilid
!= 0)
4003 if (ipr_error_table
[error_index
].log_ioasa
== 0)
4007 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
4009 if (sizeof(struct ipr_ioasa
) < be16_to_cpu(ioasa
->ret_stat_len
))
4010 data_len
= sizeof(struct ipr_ioasa
);
4012 data_len
= be16_to_cpu(ioasa
->ret_stat_len
);
4014 ipr_err("IOASA Dump:\n");
4016 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
4017 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
4018 be32_to_cpu(ioasa_data
[i
]),
4019 be32_to_cpu(ioasa_data
[i
+1]),
4020 be32_to_cpu(ioasa_data
[i
+2]),
4021 be32_to_cpu(ioasa_data
[i
+3]));
4026 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4028 * @sense_buf: sense data buffer
4033 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
4036 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
4037 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
4038 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
4039 u32 ioasc
= be32_to_cpu(ioasa
->ioasc
);
4041 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
4043 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
4046 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
4048 if (ipr_is_vset_device(res
) &&
4049 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
4050 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
4051 sense_buf
[0] = 0x72;
4052 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
4053 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
4054 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
4058 sense_buf
[9] = 0x0A;
4059 sense_buf
[10] = 0x80;
4061 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
4063 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
4064 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
4065 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
4066 sense_buf
[15] = failing_lba
& 0x000000ff;
4068 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
4070 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
4071 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
4072 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
4073 sense_buf
[19] = failing_lba
& 0x000000ff;
4075 sense_buf
[0] = 0x70;
4076 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
4077 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
4078 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
4080 /* Illegal request */
4081 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
4082 (be32_to_cpu(ioasa
->ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
4083 sense_buf
[7] = 10; /* additional length */
4085 /* IOARCB was in error */
4086 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
4087 sense_buf
[15] = 0xC0;
4088 else /* Parameter data was invalid */
4089 sense_buf
[15] = 0x80;
4092 ((IPR_FIELD_POINTER_MASK
&
4093 be32_to_cpu(ioasa
->ioasc_specific
)) >> 8) & 0xff;
4095 (IPR_FIELD_POINTER_MASK
&
4096 be32_to_cpu(ioasa
->ioasc_specific
)) & 0xff;
4098 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
4099 if (ipr_is_vset_device(res
))
4100 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
4102 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
4104 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
4105 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
4106 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
4107 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
4108 sense_buf
[6] = failing_lba
& 0x000000ff;
4111 sense_buf
[7] = 6; /* additional length */
4117 * ipr_get_autosense - Copy autosense data to sense buffer
4118 * @ipr_cmd: ipr command struct
4120 * This function copies the autosense buffer to the buffer
4121 * in the scsi_cmd, if there is autosense available.
4124 * 1 if autosense was available / 0 if not
4126 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
4128 struct ipr_ioasa
*ioasa
= &ipr_cmd
->ioasa
;
4130 if ((be32_to_cpu(ioasa
->ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
4133 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
4134 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
4135 SCSI_SENSE_BUFFERSIZE
));
4140 * ipr_erp_start - Process an error response for a SCSI op
4141 * @ioa_cfg: ioa config struct
4142 * @ipr_cmd: ipr command struct
4144 * This function determines whether or not to initiate ERP
4145 * on the affected device.
4150 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
4151 struct ipr_cmnd
*ipr_cmd
)
4153 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
4154 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
4155 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4158 ipr_scsi_eh_done(ipr_cmd
);
4162 if (ipr_is_gscsi(res
))
4163 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
4165 ipr_gen_sense(ipr_cmd
);
4167 switch (ioasc
& IPR_IOASC_IOASC_MASK
) {
4168 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
4169 if (ipr_is_naca_model(res
))
4170 scsi_cmd
->result
|= (DID_ABORT
<< 16);
4172 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
4174 case IPR_IOASC_IR_RESOURCE_HANDLE
:
4175 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
4176 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
4178 case IPR_IOASC_HW_SEL_TIMEOUT
:
4179 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
4180 if (!ipr_is_naca_model(res
))
4181 res
->needs_sync_complete
= 1;
4183 case IPR_IOASC_SYNC_REQUIRED
:
4185 res
->needs_sync_complete
= 1;
4186 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
4188 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
4189 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
4190 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
4192 case IPR_IOASC_BUS_WAS_RESET
:
4193 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
4195 * Report the bus reset and ask for a retry. The device
4196 * will give CC/UA the next command.
4198 if (!res
->resetting_device
)
4199 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
4200 scsi_cmd
->result
|= (DID_ERROR
<< 16);
4201 if (!ipr_is_naca_model(res
))
4202 res
->needs_sync_complete
= 1;
4204 case IPR_IOASC_HW_DEV_BUS_STATUS
:
4205 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
4206 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
4207 if (!ipr_get_autosense(ipr_cmd
)) {
4208 if (!ipr_is_naca_model(res
)) {
4209 ipr_erp_cancel_all(ipr_cmd
);
4214 if (!ipr_is_naca_model(res
))
4215 res
->needs_sync_complete
= 1;
4217 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
4220 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
4221 scsi_cmd
->result
|= (DID_ERROR
<< 16);
4222 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
4223 res
->needs_sync_complete
= 1;
4227 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
4228 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4229 scsi_cmd
->scsi_done(scsi_cmd
);
4233 * ipr_scsi_done - mid-layer done function
4234 * @ipr_cmd: ipr command struct
4236 * This function is invoked by the interrupt handler for
4237 * ops generated by the SCSI mid-layer
4242 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
4244 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4245 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
4246 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4248 scsi_cmd
->resid
= be32_to_cpu(ipr_cmd
->ioasa
.residual_data_len
);
4250 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
4251 ipr_unmap_sglist(ioa_cfg
, ipr_cmd
);
4252 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4253 scsi_cmd
->scsi_done(scsi_cmd
);
4255 ipr_erp_start(ioa_cfg
, ipr_cmd
);
4259 * ipr_queuecommand - Queue a mid-layer request
4260 * @scsi_cmd: scsi command struct
4261 * @done: done function
4263 * This function queues a request generated by the mid-layer.
4267 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4268 * SCSI_MLQUEUE_HOST_BUSY if host is busy
4270 static int ipr_queuecommand(struct scsi_cmnd
*scsi_cmd
,
4271 void (*done
) (struct scsi_cmnd
*))
4273 struct ipr_ioa_cfg
*ioa_cfg
;
4274 struct ipr_resource_entry
*res
;
4275 struct ipr_ioarcb
*ioarcb
;
4276 struct ipr_cmnd
*ipr_cmd
;
4279 scsi_cmd
->scsi_done
= done
;
4280 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
4281 res
= scsi_cmd
->device
->hostdata
;
4282 scsi_cmd
->result
= (DID_OK
<< 16);
4285 * We are currently blocking all devices due to a host reset
4286 * We have told the host to stop giving us new requests, but
4287 * ERP ops don't count. FIXME
4289 if (unlikely(!ioa_cfg
->allow_cmds
&& !ioa_cfg
->ioa_is_dead
))
4290 return SCSI_MLQUEUE_HOST_BUSY
;
4293 * FIXME - Create scsi_set_host_offline interface
4294 * and the ioa_is_dead check can be removed
4296 if (unlikely(ioa_cfg
->ioa_is_dead
|| !res
)) {
4297 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
4298 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
4299 scsi_cmd
->scsi_done(scsi_cmd
);
4303 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
4304 ioarcb
= &ipr_cmd
->ioarcb
;
4305 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
4307 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
4308 ipr_cmd
->scsi_cmd
= scsi_cmd
;
4309 ioarcb
->res_handle
= res
->cfgte
.res_handle
;
4310 ipr_cmd
->done
= ipr_scsi_done
;
4311 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_PHYS_LOC(res
->cfgte
.res_addr
));
4313 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
4314 if (scsi_cmd
->underflow
== 0)
4315 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
4317 if (res
->needs_sync_complete
) {
4318 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
4319 res
->needs_sync_complete
= 0;
4322 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
4323 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
4324 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
4325 ioarcb
->cmd_pkt
.flags_lo
|= ipr_get_task_attributes(scsi_cmd
);
4328 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
4329 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
))
4330 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
4332 if (likely(rc
== 0))
4333 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
4335 if (likely(rc
== 0)) {
4337 writel(be32_to_cpu(ipr_cmd
->ioarcb
.ioarcb_host_pci_addr
),
4338 ioa_cfg
->regs
.ioarrin_reg
);
4340 list_move_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4341 return SCSI_MLQUEUE_HOST_BUSY
;
4348 * ipr_info - Get information about the card/driver
4349 * @scsi_host: scsi host struct
4352 * pointer to buffer with description string
4354 static const char * ipr_ioa_info(struct Scsi_Host
*host
)
4356 static char buffer
[512];
4357 struct ipr_ioa_cfg
*ioa_cfg
;
4358 unsigned long lock_flags
= 0;
4360 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
4362 spin_lock_irqsave(host
->host_lock
, lock_flags
);
4363 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
4364 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
4369 static struct scsi_host_template driver_template
= {
4370 .module
= THIS_MODULE
,
4372 .info
= ipr_ioa_info
,
4373 .queuecommand
= ipr_queuecommand
,
4374 .eh_abort_handler
= ipr_eh_abort
,
4375 .eh_device_reset_handler
= ipr_eh_dev_reset
,
4376 .eh_host_reset_handler
= ipr_eh_host_reset
,
4377 .slave_alloc
= ipr_slave_alloc
,
4378 .slave_configure
= ipr_slave_configure
,
4379 .slave_destroy
= ipr_slave_destroy
,
4380 .change_queue_depth
= ipr_change_queue_depth
,
4381 .change_queue_type
= ipr_change_queue_type
,
4382 .bios_param
= ipr_biosparam
,
4383 .can_queue
= IPR_MAX_COMMANDS
,
4385 .sg_tablesize
= IPR_MAX_SGLIST
,
4386 .max_sectors
= IPR_IOA_MAX_SECTORS
,
4387 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
4388 .use_clustering
= ENABLE_CLUSTERING
,
4389 .shost_attrs
= ipr_ioa_attrs
,
4390 .sdev_attrs
= ipr_dev_attrs
,
4391 .proc_name
= IPR_NAME
4394 #ifdef CONFIG_PPC_PSERIES
4395 static const u16 ipr_blocked_processors
[] = {
4407 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4408 * @ioa_cfg: ioa cfg struct
4410 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4411 * certain pSeries hardware. This function determines if the given
4412 * adapter is in one of these confgurations or not.
4415 * 1 if adapter is not supported / 0 if adapter is supported
4417 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
4422 if (ioa_cfg
->type
== 0x5702) {
4423 if (pci_read_config_byte(ioa_cfg
->pdev
, PCI_REVISION_ID
,
4424 &rev_id
) == PCIBIOS_SUCCESSFUL
) {
4426 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++){
4427 if (__is_processor(ipr_blocked_processors
[i
]))
4436 #define ipr_invalid_adapter(ioa_cfg) 0
4440 * ipr_ioa_bringdown_done - IOA bring down completion.
4441 * @ipr_cmd: ipr command struct
4443 * This function processes the completion of an adapter bring down.
4444 * It wakes any reset sleepers.
4449 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
4451 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4454 ioa_cfg
->in_reset_reload
= 0;
4455 ioa_cfg
->reset_retries
= 0;
4456 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4457 wake_up_all(&ioa_cfg
->reset_wait_q
);
4459 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
4460 scsi_unblock_requests(ioa_cfg
->host
);
4461 spin_lock_irq(ioa_cfg
->host
->host_lock
);
4464 return IPR_RC_JOB_RETURN
;
4468 * ipr_ioa_reset_done - IOA reset completion.
4469 * @ipr_cmd: ipr command struct
4471 * This function processes the completion of an adapter reset.
4472 * It schedules any necessary mid-layer add/removes and
4473 * wakes any reset sleepers.
4478 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
4480 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4481 struct ipr_resource_entry
*res
;
4482 struct ipr_hostrcb
*hostrcb
, *temp
;
4486 ioa_cfg
->in_reset_reload
= 0;
4487 ioa_cfg
->allow_cmds
= 1;
4488 ioa_cfg
->reset_cmd
= NULL
;
4489 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
4491 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4492 if (ioa_cfg
->allow_ml_add_del
&& (res
->add_to_ml
|| res
->del_from_ml
)) {
4497 schedule_work(&ioa_cfg
->work_q
);
4499 list_for_each_entry_safe(hostrcb
, temp
, &ioa_cfg
->hostrcb_free_q
, queue
) {
4500 list_del(&hostrcb
->queue
);
4501 if (i
++ < IPR_NUM_LOG_HCAMS
)
4502 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
4504 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
4507 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
4509 ioa_cfg
->reset_retries
= 0;
4510 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4511 wake_up_all(&ioa_cfg
->reset_wait_q
);
4513 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
4514 scsi_unblock_requests(ioa_cfg
->host
);
4515 spin_lock_irq(ioa_cfg
->host
->host_lock
);
4517 if (!ioa_cfg
->allow_cmds
)
4518 scsi_block_requests(ioa_cfg
->host
);
4521 return IPR_RC_JOB_RETURN
;
4525 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4526 * @supported_dev: supported device struct
4527 * @vpids: vendor product id struct
4532 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
4533 struct ipr_std_inq_vpids
*vpids
)
4535 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
4536 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
4537 supported_dev
->num_records
= 1;
4538 supported_dev
->data_length
=
4539 cpu_to_be16(sizeof(struct ipr_supported_device
));
4540 supported_dev
->reserved
= 0;
4544 * ipr_set_supported_devs - Send Set Supported Devices for a device
4545 * @ipr_cmd: ipr command struct
4547 * This function send a Set Supported Devices to the adapter
4550 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4552 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
4554 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4555 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
4556 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
4557 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
4558 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
4560 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
4562 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
4563 if (!ipr_is_scsi_disk(res
))
4566 ipr_cmd
->u
.res
= res
;
4567 ipr_set_sup_dev_dflt(supp_dev
, &res
->cfgte
.std_inq_data
.vpids
);
4569 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
4570 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
4571 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
4573 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
4574 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
4575 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
4577 ioadl
->flags_and_data_len
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST
|
4578 sizeof(struct ipr_supported_device
));
4579 ioadl
->address
= cpu_to_be32(ioa_cfg
->vpd_cbs_dma
+
4580 offsetof(struct ipr_misc_cbs
, supp_dev
));
4581 ioarcb
->write_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
4582 ioarcb
->write_data_transfer_length
=
4583 cpu_to_be32(sizeof(struct ipr_supported_device
));
4585 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
4586 IPR_SET_SUP_DEVICE_TIMEOUT
);
4588 ipr_cmd
->job_step
= ipr_set_supported_devs
;
4589 return IPR_RC_JOB_RETURN
;
4592 return IPR_RC_JOB_CONTINUE
;
4596 * ipr_setup_write_cache - Disable write cache if needed
4597 * @ipr_cmd: ipr command struct
4599 * This function sets up adapters write cache to desired setting
4602 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4604 static int ipr_setup_write_cache(struct ipr_cmnd
*ipr_cmd
)
4606 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4608 ipr_cmd
->job_step
= ipr_set_supported_devs
;
4609 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
4610 struct ipr_resource_entry
, queue
);
4612 if (ioa_cfg
->cache_state
!= CACHE_DISABLED
)
4613 return IPR_RC_JOB_CONTINUE
;
4615 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
4616 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
4617 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
4618 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
4620 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
4622 return IPR_RC_JOB_RETURN
;
4626 * ipr_get_mode_page - Locate specified mode page
4627 * @mode_pages: mode page buffer
4628 * @page_code: page code to find
4629 * @len: minimum required length for mode page
4632 * pointer to mode page / NULL on failure
4634 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
4635 u32 page_code
, u32 len
)
4637 struct ipr_mode_page_hdr
*mode_hdr
;
4641 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
4644 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
4645 mode_hdr
= (struct ipr_mode_page_hdr
*)
4646 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
4649 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
4650 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
4654 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
4655 mode_hdr
->page_length
);
4656 length
-= page_length
;
4657 mode_hdr
= (struct ipr_mode_page_hdr
*)
4658 ((unsigned long)mode_hdr
+ page_length
);
4665 * ipr_check_term_power - Check for term power errors
4666 * @ioa_cfg: ioa config struct
4667 * @mode_pages: IOAFP mode pages buffer
4669 * Check the IOAFP's mode page 28 for term power errors
4674 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
4675 struct ipr_mode_pages
*mode_pages
)
4679 struct ipr_dev_bus_entry
*bus
;
4680 struct ipr_mode_page28
*mode_page
;
4682 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
4683 sizeof(struct ipr_mode_page28
));
4685 entry_length
= mode_page
->entry_length
;
4687 bus
= mode_page
->bus
;
4689 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
4690 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
4691 dev_err(&ioa_cfg
->pdev
->dev
,
4692 "Term power is absent on scsi bus %d\n",
4696 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
4701 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4702 * @ioa_cfg: ioa config struct
4704 * Looks through the config table checking for SES devices. If
4705 * the SES device is in the SES table indicating a maximum SCSI
4706 * bus speed, the speed is limited for the bus.
4711 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
4716 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
4717 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
4718 ioa_cfg
->bus_attr
[i
].bus_width
);
4720 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
4721 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
4726 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4727 * @ioa_cfg: ioa config struct
4728 * @mode_pages: mode page 28 buffer
4730 * Updates mode page 28 based on driver configuration
4735 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
4736 struct ipr_mode_pages
*mode_pages
)
4738 int i
, entry_length
;
4739 struct ipr_dev_bus_entry
*bus
;
4740 struct ipr_bus_attributes
*bus_attr
;
4741 struct ipr_mode_page28
*mode_page
;
4743 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
4744 sizeof(struct ipr_mode_page28
));
4746 entry_length
= mode_page
->entry_length
;
4748 /* Loop for each device bus entry */
4749 for (i
= 0, bus
= mode_page
->bus
;
4750 i
< mode_page
->num_entries
;
4751 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
4752 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
4753 dev_err(&ioa_cfg
->pdev
->dev
,
4754 "Invalid resource address reported: 0x%08X\n",
4755 IPR_GET_PHYS_LOC(bus
->res_addr
));
4759 bus_attr
= &ioa_cfg
->bus_attr
[i
];
4760 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
4761 bus
->bus_width
= bus_attr
->bus_width
;
4762 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
4763 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
4764 if (bus_attr
->qas_enabled
)
4765 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
4767 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
4772 * ipr_build_mode_select - Build a mode select command
4773 * @ipr_cmd: ipr command struct
4774 * @res_handle: resource handle to send command to
4775 * @parm: Byte 2 of Mode Sense command
4776 * @dma_addr: DMA buffer address
4777 * @xfer_len: data transfer length
4782 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
4783 __be32 res_handle
, u8 parm
, u32 dma_addr
,
4786 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
4787 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
4789 ioarcb
->res_handle
= res_handle
;
4790 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
4791 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
4792 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
4793 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
4794 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
4796 ioadl
->flags_and_data_len
=
4797 cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST
| xfer_len
);
4798 ioadl
->address
= cpu_to_be32(dma_addr
);
4799 ioarcb
->write_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
4800 ioarcb
->write_data_transfer_length
= cpu_to_be32(xfer_len
);
4804 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4805 * @ipr_cmd: ipr command struct
4807 * This function sets up the SCSI bus attributes and sends
4808 * a Mode Select for Page 28 to activate them.
4813 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
4815 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4816 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
4820 ipr_scsi_bus_speed_limit(ioa_cfg
);
4821 ipr_check_term_power(ioa_cfg
, mode_pages
);
4822 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
4823 length
= mode_pages
->hdr
.length
+ 1;
4824 mode_pages
->hdr
.length
= 0;
4826 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
4827 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
4830 ipr_cmd
->job_step
= ipr_setup_write_cache
;
4831 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
4834 return IPR_RC_JOB_RETURN
;
4838 * ipr_build_mode_sense - Builds a mode sense command
4839 * @ipr_cmd: ipr command struct
4840 * @res: resource entry struct
4841 * @parm: Byte 2 of mode sense command
4842 * @dma_addr: DMA address of mode sense buffer
4843 * @xfer_len: Size of DMA buffer
4848 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
4850 u8 parm
, u32 dma_addr
, u8 xfer_len
)
4852 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
4853 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
4855 ioarcb
->res_handle
= res_handle
;
4856 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
4857 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
4858 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
4859 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
4861 ioadl
->flags_and_data_len
=
4862 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| xfer_len
);
4863 ioadl
->address
= cpu_to_be32(dma_addr
);
4864 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
4865 ioarcb
->read_data_transfer_length
= cpu_to_be32(xfer_len
);
4869 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4870 * @ipr_cmd: ipr command struct
4872 * This function handles the failure of an IOA bringup command.
4877 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
4879 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4880 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4882 dev_err(&ioa_cfg
->pdev
->dev
,
4883 "0x%02X failed with IOASC: 0x%08X\n",
4884 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
4886 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
4887 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
4888 return IPR_RC_JOB_RETURN
;
4892 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4893 * @ipr_cmd: ipr command struct
4895 * This function handles the failure of a Mode Sense to the IOAFP.
4896 * Some adapters do not handle all mode pages.
4899 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4901 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
4903 u32 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
4905 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
4906 ipr_cmd
->job_step
= ipr_setup_write_cache
;
4907 return IPR_RC_JOB_CONTINUE
;
4910 return ipr_reset_cmd_failed(ipr_cmd
);
4914 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4915 * @ipr_cmd: ipr command struct
4917 * This function send a Page 28 mode sense to the IOA to
4918 * retrieve SCSI bus attributes.
4923 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
4925 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4928 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
4929 0x28, ioa_cfg
->vpd_cbs_dma
+
4930 offsetof(struct ipr_misc_cbs
, mode_pages
),
4931 sizeof(struct ipr_mode_pages
));
4933 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
4934 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
4936 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
4939 return IPR_RC_JOB_RETURN
;
4943 * ipr_init_res_table - Initialize the resource table
4944 * @ipr_cmd: ipr command struct
4946 * This function looks through the existing resource table, comparing
4947 * it with the config table. This function will take care of old/new
4948 * devices and schedule adding/removing them from the mid-layer
4952 * IPR_RC_JOB_CONTINUE
4954 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
4956 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
4957 struct ipr_resource_entry
*res
, *temp
;
4958 struct ipr_config_table_entry
*cfgte
;
4963 if (ioa_cfg
->cfg_table
->hdr
.flags
& IPR_UCODE_DOWNLOAD_REQ
)
4964 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
4966 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
4967 list_move_tail(&res
->queue
, &old_res
);
4969 for (i
= 0; i
< ioa_cfg
->cfg_table
->hdr
.num_entries
; i
++) {
4970 cfgte
= &ioa_cfg
->cfg_table
->dev
[i
];
4973 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
4974 if (!memcmp(&res
->cfgte
.res_addr
,
4975 &cfgte
->res_addr
, sizeof(cfgte
->res_addr
))) {
4976 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
4983 if (list_empty(&ioa_cfg
->free_res_q
)) {
4984 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
4989 res
= list_entry(ioa_cfg
->free_res_q
.next
,
4990 struct ipr_resource_entry
, queue
);
4991 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
4992 ipr_init_res_entry(res
);
4997 memcpy(&res
->cfgte
, cfgte
, sizeof(struct ipr_config_table_entry
));
5000 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
5002 res
->del_from_ml
= 1;
5003 res
->cfgte
.res_handle
= IPR_INVALID_RES_HANDLE
;
5004 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
5006 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
5010 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
5013 return IPR_RC_JOB_CONTINUE
;
5017 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5018 * @ipr_cmd: ipr command struct
5020 * This function sends a Query IOA Configuration command
5021 * to the adapter to retrieve the IOA configuration table.
5026 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
5028 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5029 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5030 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
5031 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
5034 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
5035 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
5036 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
5037 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5038 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5040 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
5041 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_config_table
) >> 8) & 0xff;
5042 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_config_table
) & 0xff;
5044 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
5045 ioarcb
->read_data_transfer_length
=
5046 cpu_to_be32(sizeof(struct ipr_config_table
));
5048 ioadl
->address
= cpu_to_be32(ioa_cfg
->cfg_table_dma
);
5049 ioadl
->flags_and_data_len
=
5050 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| sizeof(struct ipr_config_table
));
5052 ipr_cmd
->job_step
= ipr_init_res_table
;
5054 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5057 return IPR_RC_JOB_RETURN
;
5061 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5062 * @ipr_cmd: ipr command struct
5064 * This utility function sends an inquiry to the adapter.
5069 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
5070 u32 dma_addr
, u8 xfer_len
)
5072 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5073 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->ioadl
;
5076 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
5077 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5079 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
5080 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
5081 ioarcb
->cmd_pkt
.cdb
[2] = page
;
5082 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
5084 ioarcb
->read_ioadl_len
= cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
5085 ioarcb
->read_data_transfer_length
= cpu_to_be32(xfer_len
);
5087 ioadl
->address
= cpu_to_be32(dma_addr
);
5088 ioadl
->flags_and_data_len
=
5089 cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST
| xfer_len
);
5091 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5096 * ipr_inquiry_page_supported - Is the given inquiry page supported
5097 * @page0: inquiry page 0 buffer
5100 * This function determines if the specified inquiry page is supported.
5103 * 1 if page is supported / 0 if not
5105 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
5109 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
5110 if (page0
->page
[i
] == page
)
5117 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5118 * @ipr_cmd: ipr command struct
5120 * This function sends a Page 3 inquiry to the adapter
5121 * to retrieve software VPD information.
5124 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5126 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
5128 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5129 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
5133 if (!ipr_inquiry_page_supported(page0
, 1))
5134 ioa_cfg
->cache_state
= CACHE_NONE
;
5136 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
5138 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
5139 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
5140 sizeof(struct ipr_inquiry_page3
));
5143 return IPR_RC_JOB_RETURN
;
5147 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5148 * @ipr_cmd: ipr command struct
5150 * This function sends a Page 0 inquiry to the adapter
5151 * to retrieve supported inquiry pages.
5154 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5156 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
5158 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5163 /* Grab the type out of the VPD and store it away */
5164 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
5166 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
5168 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
5170 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
5171 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
5172 sizeof(struct ipr_inquiry_page0
));
5175 return IPR_RC_JOB_RETURN
;
5179 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5180 * @ipr_cmd: ipr command struct
5182 * This function sends a standard inquiry to the adapter.
5187 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
5189 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5192 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
5194 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
5195 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
5196 sizeof(struct ipr_ioa_vpd
));
5199 return IPR_RC_JOB_RETURN
;
5203 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5204 * @ipr_cmd: ipr command struct
5206 * This function send an Identify Host Request Response Queue
5207 * command to establish the HRRQ with the adapter.
5212 static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd
*ipr_cmd
)
5214 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5215 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5218 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
5220 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
5221 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5223 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5224 ioarcb
->cmd_pkt
.cdb
[2] =
5225 ((u32
) ioa_cfg
->host_rrq_dma
>> 24) & 0xff;
5226 ioarcb
->cmd_pkt
.cdb
[3] =
5227 ((u32
) ioa_cfg
->host_rrq_dma
>> 16) & 0xff;
5228 ioarcb
->cmd_pkt
.cdb
[4] =
5229 ((u32
) ioa_cfg
->host_rrq_dma
>> 8) & 0xff;
5230 ioarcb
->cmd_pkt
.cdb
[5] =
5231 ((u32
) ioa_cfg
->host_rrq_dma
) & 0xff;
5232 ioarcb
->cmd_pkt
.cdb
[7] =
5233 ((sizeof(u32
) * IPR_NUM_CMD_BLKS
) >> 8) & 0xff;
5234 ioarcb
->cmd_pkt
.cdb
[8] =
5235 (sizeof(u32
) * IPR_NUM_CMD_BLKS
) & 0xff;
5237 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
5239 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
5242 return IPR_RC_JOB_RETURN
;
5246 * ipr_reset_timer_done - Adapter reset timer function
5247 * @ipr_cmd: ipr command struct
5249 * Description: This function is used in adapter reset processing
5250 * for timing events. If the reset_cmd pointer in the IOA
5251 * config struct is not this adapter's we are doing nested
5252 * resets and fail_all_ops will take care of freeing the
5258 static void ipr_reset_timer_done(struct ipr_cmnd
*ipr_cmd
)
5260 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5261 unsigned long lock_flags
= 0;
5263 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5265 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
5266 list_del(&ipr_cmd
->queue
);
5267 ipr_cmd
->done(ipr_cmd
);
5270 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5274 * ipr_reset_start_timer - Start a timer for adapter reset job
5275 * @ipr_cmd: ipr command struct
5276 * @timeout: timeout value
5278 * Description: This function is used in adapter reset processing
5279 * for timing events. If the reset_cmd pointer in the IOA
5280 * config struct is not this adapter's we are doing nested
5281 * resets and fail_all_ops will take care of freeing the
5287 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
5288 unsigned long timeout
)
5290 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->ioa_cfg
->pending_q
);
5291 ipr_cmd
->done
= ipr_reset_ioa_job
;
5293 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
5294 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
5295 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_reset_timer_done
;
5296 add_timer(&ipr_cmd
->timer
);
5300 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5301 * @ioa_cfg: ioa cfg struct
5306 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
5308 memset(ioa_cfg
->host_rrq
, 0, sizeof(u32
) * IPR_NUM_CMD_BLKS
);
5310 /* Initialize Host RRQ pointers */
5311 ioa_cfg
->hrrq_start
= ioa_cfg
->host_rrq
;
5312 ioa_cfg
->hrrq_end
= &ioa_cfg
->host_rrq
[IPR_NUM_CMD_BLKS
- 1];
5313 ioa_cfg
->hrrq_curr
= ioa_cfg
->hrrq_start
;
5314 ioa_cfg
->toggle_bit
= 1;
5316 /* Zero out config table */
5317 memset(ioa_cfg
->cfg_table
, 0, sizeof(struct ipr_config_table
));
5321 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5322 * @ipr_cmd: ipr command struct
5324 * This function reinitializes some control blocks and
5325 * enables destructive diagnostics on the adapter.
5330 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
5332 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5333 volatile u32 int_reg
;
5336 ipr_cmd
->job_step
= ipr_ioafp_indentify_hrrq
;
5337 ipr_init_ioa_mem(ioa_cfg
);
5339 ioa_cfg
->allow_interrupts
= 1;
5340 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5342 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5343 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
5344 ioa_cfg
->regs
.clr_interrupt_mask_reg
);
5345 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5346 return IPR_RC_JOB_CONTINUE
;
5349 /* Enable destructive diagnostics on IOA */
5350 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg
);
5352 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
5353 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5355 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
5357 ipr_cmd
->timer
.data
= (unsigned long) ipr_cmd
;
5358 ipr_cmd
->timer
.expires
= jiffies
+ (ipr_transop_timeout
* HZ
);
5359 ipr_cmd
->timer
.function
= (void (*)(unsigned long))ipr_oper_timeout
;
5360 ipr_cmd
->done
= ipr_reset_ioa_job
;
5361 add_timer(&ipr_cmd
->timer
);
5362 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->pending_q
);
5365 return IPR_RC_JOB_RETURN
;
5369 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5370 * @ipr_cmd: ipr command struct
5372 * This function is invoked when an adapter dump has run out
5373 * of processing time.
5376 * IPR_RC_JOB_CONTINUE
5378 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
5380 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5382 if (ioa_cfg
->sdt_state
== GET_DUMP
)
5383 ioa_cfg
->sdt_state
= ABORT_DUMP
;
5385 ipr_cmd
->job_step
= ipr_reset_alert
;
5387 return IPR_RC_JOB_CONTINUE
;
5391 * ipr_unit_check_no_data - Log a unit check/no data error log
5392 * @ioa_cfg: ioa config struct
5394 * Logs an error indicating the adapter unit checked, but for some
5395 * reason, we were unable to fetch the unit check buffer.
5400 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
5402 ioa_cfg
->errors_logged
++;
5403 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
5407 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5408 * @ioa_cfg: ioa config struct
5410 * Fetches the unit check buffer from the adapter by clocking the data
5411 * through the mailbox register.
5416 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
5418 unsigned long mailbox
;
5419 struct ipr_hostrcb
*hostrcb
;
5420 struct ipr_uc_sdt sdt
;
5423 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
5425 if (!ipr_sdt_is_fmt2(mailbox
)) {
5426 ipr_unit_check_no_data(ioa_cfg
);
5430 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
5431 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
5432 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
5434 if (rc
|| (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
) ||
5435 !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
)) {
5436 ipr_unit_check_no_data(ioa_cfg
);
5440 /* Find length of the first sdt entry (UC buffer) */
5441 length
= (be32_to_cpu(sdt
.entry
[0].end_offset
) -
5442 be32_to_cpu(sdt
.entry
[0].bar_str_offset
)) & IPR_FMT2_MBX_ADDR_MASK
;
5444 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
5445 struct ipr_hostrcb
, queue
);
5446 list_del(&hostrcb
->queue
);
5447 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
5449 rc
= ipr_get_ldump_data_section(ioa_cfg
,
5450 be32_to_cpu(sdt
.entry
[0].bar_str_offset
),
5451 (__be32
*)&hostrcb
->hcam
,
5452 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
5455 ipr_handle_log_data(ioa_cfg
, hostrcb
);
5457 ipr_unit_check_no_data(ioa_cfg
);
5459 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
5463 * ipr_reset_restore_cfg_space - Restore PCI config space.
5464 * @ipr_cmd: ipr command struct
5466 * Description: This function restores the saved PCI config space of
5467 * the adapter, fails all outstanding ops back to the callers, and
5468 * fetches the dump/unit check if applicable to this reset.
5471 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5473 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
5475 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5479 pci_unblock_user_cfg_access(ioa_cfg
->pdev
);
5480 rc
= pci_restore_state(ioa_cfg
->pdev
);
5482 if (rc
!= PCIBIOS_SUCCESSFUL
) {
5483 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
5484 return IPR_RC_JOB_CONTINUE
;
5487 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
5488 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
5489 return IPR_RC_JOB_CONTINUE
;
5492 ipr_fail_all_ops(ioa_cfg
);
5494 if (ioa_cfg
->ioa_unit_checked
) {
5495 ioa_cfg
->ioa_unit_checked
= 0;
5496 ipr_get_unit_check_buffer(ioa_cfg
);
5497 ipr_cmd
->job_step
= ipr_reset_alert
;
5498 ipr_reset_start_timer(ipr_cmd
, 0);
5499 return IPR_RC_JOB_RETURN
;
5502 if (ioa_cfg
->in_ioa_bringdown
) {
5503 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
5505 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
5507 if (GET_DUMP
== ioa_cfg
->sdt_state
) {
5508 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_TIMEOUT
);
5509 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
5510 schedule_work(&ioa_cfg
->work_q
);
5511 return IPR_RC_JOB_RETURN
;
5516 return IPR_RC_JOB_CONTINUE
;
5520 * ipr_reset_start_bist - Run BIST on the adapter.
5521 * @ipr_cmd: ipr command struct
5523 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5526 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5528 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
5530 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5534 pci_block_user_cfg_access(ioa_cfg
->pdev
);
5535 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
5537 if (rc
!= PCIBIOS_SUCCESSFUL
) {
5538 ipr_cmd
->ioasa
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
5539 rc
= IPR_RC_JOB_CONTINUE
;
5541 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
5542 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
5543 rc
= IPR_RC_JOB_RETURN
;
5551 * ipr_reset_allowed - Query whether or not IOA can be reset
5552 * @ioa_cfg: ioa config struct
5555 * 0 if reset not allowed / non-zero if reset is allowed
5557 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
5559 volatile u32 temp_reg
;
5561 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5562 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
5566 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5567 * @ipr_cmd: ipr command struct
5569 * Description: This function waits for adapter permission to run BIST,
5570 * then runs BIST. If the adapter does not give permission after a
5571 * reasonable time, we will reset the adapter anyway. The impact of
5572 * resetting the adapter without warning the adapter is the risk of
5573 * losing the persistent error log on the adapter. If the adapter is
5574 * reset while it is writing to the flash on the adapter, the flash
5575 * segment will have bad ECC and be zeroed.
5578 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5580 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
5582 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5583 int rc
= IPR_RC_JOB_RETURN
;
5585 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
5586 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
5587 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
5589 ipr_cmd
->job_step
= ipr_reset_start_bist
;
5590 rc
= IPR_RC_JOB_CONTINUE
;
5597 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5598 * @ipr_cmd: ipr command struct
5600 * Description: This function alerts the adapter that it will be reset.
5601 * If memory space is not currently enabled, proceed directly
5602 * to running BIST on the adapter. The timer must always be started
5603 * so we guarantee we do not run BIST from ipr_isr.
5608 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
5610 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5615 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
5617 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
5618 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5619 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg
);
5620 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
5622 ipr_cmd
->job_step
= ipr_reset_start_bist
;
5625 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
5626 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
5629 return IPR_RC_JOB_RETURN
;
5633 * ipr_reset_ucode_download_done - Microcode download completion
5634 * @ipr_cmd: ipr command struct
5636 * Description: This function unmaps the microcode download buffer.
5639 * IPR_RC_JOB_CONTINUE
5641 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
5643 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5644 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
5646 pci_unmap_sg(ioa_cfg
->pdev
, sglist
->scatterlist
,
5647 sglist
->num_sg
, DMA_TO_DEVICE
);
5649 ipr_cmd
->job_step
= ipr_reset_alert
;
5650 return IPR_RC_JOB_CONTINUE
;
5654 * ipr_reset_ucode_download - Download microcode to the adapter
5655 * @ipr_cmd: ipr command struct
5657 * Description: This function checks to see if it there is microcode
5658 * to download to the adapter. If there is, a download is performed.
5661 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5663 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
5665 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5666 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
5669 ipr_cmd
->job_step
= ipr_reset_alert
;
5672 return IPR_RC_JOB_CONTINUE
;
5674 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5675 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
5676 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
5677 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
5678 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
5679 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
5680 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
5682 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
5683 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
5685 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
5686 IPR_WRITE_BUFFER_TIMEOUT
);
5689 return IPR_RC_JOB_RETURN
;
5693 * ipr_reset_shutdown_ioa - Shutdown the adapter
5694 * @ipr_cmd: ipr command struct
5696 * Description: This function issues an adapter shutdown of the
5697 * specified type to the specified adapter as part of the
5698 * adapter reset job.
5701 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5703 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
5705 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5706 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
5707 unsigned long timeout
;
5708 int rc
= IPR_RC_JOB_CONTINUE
;
5711 if (shutdown_type
!= IPR_SHUTDOWN_NONE
&& !ioa_cfg
->ioa_is_dead
) {
5712 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
5713 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
5714 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
5715 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
5717 if (shutdown_type
== IPR_SHUTDOWN_ABBREV
)
5718 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
5719 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
5720 timeout
= IPR_INTERNAL_TIMEOUT
;
5722 timeout
= IPR_SHUTDOWN_TIMEOUT
;
5724 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
5726 rc
= IPR_RC_JOB_RETURN
;
5727 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
5729 ipr_cmd
->job_step
= ipr_reset_alert
;
5736 * ipr_reset_ioa_job - Adapter reset job
5737 * @ipr_cmd: ipr command struct
5739 * Description: This function is the job router for the adapter reset job.
5744 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
5747 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5750 ioasc
= be32_to_cpu(ipr_cmd
->ioasa
.ioasc
);
5752 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
5754 * We are doing nested adapter resets and this is
5755 * not the current reset job.
5757 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
5761 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
5762 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
5763 if (rc
== IPR_RC_JOB_RETURN
)
5767 ipr_reinit_ipr_cmnd(ipr_cmd
);
5768 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
5769 rc
= ipr_cmd
->job_step(ipr_cmd
);
5770 } while(rc
== IPR_RC_JOB_CONTINUE
);
5774 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5775 * @ioa_cfg: ioa config struct
5776 * @job_step: first job step of reset job
5777 * @shutdown_type: shutdown type
5779 * Description: This function will initiate the reset of the given adapter
5780 * starting at the selected job step.
5781 * If the caller needs to wait on the completion of the reset,
5782 * the caller must sleep on the reset_wait_q.
5787 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5788 int (*job_step
) (struct ipr_cmnd
*),
5789 enum ipr_shutdown_type shutdown_type
)
5791 struct ipr_cmnd
*ipr_cmd
;
5793 ioa_cfg
->in_reset_reload
= 1;
5794 ioa_cfg
->allow_cmds
= 0;
5795 scsi_block_requests(ioa_cfg
->host
);
5797 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5798 ioa_cfg
->reset_cmd
= ipr_cmd
;
5799 ipr_cmd
->job_step
= job_step
;
5800 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
5802 ipr_reset_ioa_job(ipr_cmd
);
5806 * ipr_initiate_ioa_reset - Initiate an adapter reset
5807 * @ioa_cfg: ioa config struct
5808 * @shutdown_type: shutdown type
5810 * Description: This function will initiate the reset of the given adapter.
5811 * If the caller needs to wait on the completion of the reset,
5812 * the caller must sleep on the reset_wait_q.
5817 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5818 enum ipr_shutdown_type shutdown_type
)
5820 if (ioa_cfg
->ioa_is_dead
)
5823 if (ioa_cfg
->in_reset_reload
&& ioa_cfg
->sdt_state
== GET_DUMP
)
5824 ioa_cfg
->sdt_state
= ABORT_DUMP
;
5826 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
5827 dev_err(&ioa_cfg
->pdev
->dev
,
5828 "IOA taken offline - error recovery failed\n");
5830 ioa_cfg
->reset_retries
= 0;
5831 ioa_cfg
->ioa_is_dead
= 1;
5833 if (ioa_cfg
->in_ioa_bringdown
) {
5834 ioa_cfg
->reset_cmd
= NULL
;
5835 ioa_cfg
->in_reset_reload
= 0;
5836 ipr_fail_all_ops(ioa_cfg
);
5837 wake_up_all(&ioa_cfg
->reset_wait_q
);
5839 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
5840 scsi_unblock_requests(ioa_cfg
->host
);
5841 spin_lock_irq(ioa_cfg
->host
->host_lock
);
5844 ioa_cfg
->in_ioa_bringdown
= 1;
5845 shutdown_type
= IPR_SHUTDOWN_NONE
;
5849 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
5854 * ipr_reset_freeze - Hold off all I/O activity
5855 * @ipr_cmd: ipr command struct
5857 * Description: If the PCI slot is frozen, hold off all I/O
5858 * activity; then, as soon as the slot is available again,
5859 * initiate an adapter reset.
5861 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
5863 /* Disallow new interrupts, avoid loop */
5864 ipr_cmd
->ioa_cfg
->allow_interrupts
= 0;
5865 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->ioa_cfg
->pending_q
);
5866 ipr_cmd
->done
= ipr_reset_ioa_job
;
5867 return IPR_RC_JOB_RETURN
;
5871 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5872 * @pdev: PCI device struct
5874 * Description: This routine is called to tell us that the PCI bus
5875 * is down. Can't do anything here, except put the device driver
5876 * into a holding pattern, waiting for the PCI bus to come back.
5878 static void ipr_pci_frozen(struct pci_dev
*pdev
)
5880 unsigned long flags
= 0;
5881 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
5883 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
5884 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
5885 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
5889 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5890 * @pdev: PCI device struct
5892 * Description: This routine is called by the pci error recovery
5893 * code after the PCI slot has been reset, just before we
5894 * should resume normal operations.
5896 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
5898 unsigned long flags
= 0;
5899 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
5901 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
5902 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
5904 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
5905 return PCI_ERS_RESULT_RECOVERED
;
5909 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5910 * @pdev: PCI device struct
5912 * Description: This routine is called when the PCI bus has
5913 * permanently failed.
5915 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
5917 unsigned long flags
= 0;
5918 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
5920 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
5921 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
5922 ioa_cfg
->sdt_state
= ABORT_DUMP
;
5923 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
;
5924 ioa_cfg
->in_ioa_bringdown
= 1;
5925 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5926 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
5930 * ipr_pci_error_detected - Called when a PCI error is detected.
5931 * @pdev: PCI device struct
5932 * @state: PCI channel state
5934 * Description: Called when a PCI error is detected.
5937 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5939 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
5940 pci_channel_state_t state
)
5943 case pci_channel_io_frozen
:
5944 ipr_pci_frozen(pdev
);
5945 return PCI_ERS_RESULT_NEED_RESET
;
5946 case pci_channel_io_perm_failure
:
5947 ipr_pci_perm_failure(pdev
);
5948 return PCI_ERS_RESULT_DISCONNECT
;
5953 return PCI_ERS_RESULT_NEED_RESET
;
5957 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5958 * @ioa_cfg: ioa cfg struct
5960 * Description: This is the second phase of adapter intialization
5961 * This function takes care of initilizing the adapter to the point
5962 * where it can accept new commands.
5965 * 0 on sucess / -EIO on failure
5967 static int __devinit
ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
5970 unsigned long host_lock_flags
= 0;
5973 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
5974 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
5975 if (ioa_cfg
->needs_hard_reset
) {
5976 ioa_cfg
->needs_hard_reset
= 0;
5977 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5979 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
5982 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
5983 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5984 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
5986 if (ioa_cfg
->ioa_is_dead
) {
5988 } else if (ipr_invalid_adapter(ioa_cfg
)) {
5992 dev_err(&ioa_cfg
->pdev
->dev
,
5993 "Adapter not supported in this hardware configuration.\n");
5996 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6003 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6004 * @ioa_cfg: ioa config struct
6009 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
6013 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
6014 if (ioa_cfg
->ipr_cmnd_list
[i
])
6015 pci_pool_free(ioa_cfg
->ipr_cmd_pool
,
6016 ioa_cfg
->ipr_cmnd_list
[i
],
6017 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
6019 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
6022 if (ioa_cfg
->ipr_cmd_pool
)
6023 pci_pool_destroy (ioa_cfg
->ipr_cmd_pool
);
6025 ioa_cfg
->ipr_cmd_pool
= NULL
;
6029 * ipr_free_mem - Frees memory allocated for an adapter
6030 * @ioa_cfg: ioa cfg struct
6035 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
6039 kfree(ioa_cfg
->res_entries
);
6040 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_misc_cbs
),
6041 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
6042 ipr_free_cmd_blks(ioa_cfg
);
6043 pci_free_consistent(ioa_cfg
->pdev
, sizeof(u32
) * IPR_NUM_CMD_BLKS
,
6044 ioa_cfg
->host_rrq
, ioa_cfg
->host_rrq_dma
);
6045 pci_free_consistent(ioa_cfg
->pdev
, sizeof(struct ipr_config_table
),
6047 ioa_cfg
->cfg_table_dma
);
6049 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
6050 pci_free_consistent(ioa_cfg
->pdev
,
6051 sizeof(struct ipr_hostrcb
),
6052 ioa_cfg
->hostrcb
[i
],
6053 ioa_cfg
->hostrcb_dma
[i
]);
6056 ipr_free_dump(ioa_cfg
);
6057 kfree(ioa_cfg
->trace
);
6061 * ipr_free_all_resources - Free all allocated resources for an adapter.
6062 * @ipr_cmd: ipr command struct
6064 * This function frees all allocated resources for the
6065 * specified adapter.
6070 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
6072 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
6075 free_irq(pdev
->irq
, ioa_cfg
);
6076 iounmap(ioa_cfg
->hdw_dma_regs
);
6077 pci_release_regions(pdev
);
6078 ipr_free_mem(ioa_cfg
);
6079 scsi_host_put(ioa_cfg
->host
);
6080 pci_disable_device(pdev
);
6085 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6086 * @ioa_cfg: ioa config struct
6089 * 0 on success / -ENOMEM on allocation failure
6091 static int __devinit
ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
6093 struct ipr_cmnd
*ipr_cmd
;
6094 struct ipr_ioarcb
*ioarcb
;
6095 dma_addr_t dma_addr
;
6098 ioa_cfg
->ipr_cmd_pool
= pci_pool_create (IPR_NAME
, ioa_cfg
->pdev
,
6099 sizeof(struct ipr_cmnd
), 8, 0);
6101 if (!ioa_cfg
->ipr_cmd_pool
)
6104 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
6105 ipr_cmd
= pci_pool_alloc (ioa_cfg
->ipr_cmd_pool
, SLAB_KERNEL
, &dma_addr
);
6108 ipr_free_cmd_blks(ioa_cfg
);
6112 memset(ipr_cmd
, 0, sizeof(*ipr_cmd
));
6113 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
6114 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
6116 ioarcb
= &ipr_cmd
->ioarcb
;
6117 ioarcb
->ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
6118 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
6119 ioarcb
->write_ioadl_addr
=
6120 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, ioadl
));
6121 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6122 ioarcb
->ioasa_host_pci_addr
=
6123 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, ioasa
));
6124 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
6125 ipr_cmd
->cmd_index
= i
;
6126 ipr_cmd
->ioa_cfg
= ioa_cfg
;
6127 ipr_cmd
->sense_buffer_dma
= dma_addr
+
6128 offsetof(struct ipr_cmnd
, sense_buffer
);
6130 list_add_tail(&ipr_cmd
->queue
, &ioa_cfg
->free_q
);
6137 * ipr_alloc_mem - Allocate memory for an adapter
6138 * @ioa_cfg: ioa config struct
6141 * 0 on success / non-zero for error
6143 static int __devinit
ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
6145 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
6146 int i
, rc
= -ENOMEM
;
6149 ioa_cfg
->res_entries
= kzalloc(sizeof(struct ipr_resource_entry
) *
6150 IPR_MAX_PHYSICAL_DEVS
, GFP_KERNEL
);
6152 if (!ioa_cfg
->res_entries
)
6155 for (i
= 0; i
< IPR_MAX_PHYSICAL_DEVS
; i
++)
6156 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
6158 ioa_cfg
->vpd_cbs
= pci_alloc_consistent(ioa_cfg
->pdev
,
6159 sizeof(struct ipr_misc_cbs
),
6160 &ioa_cfg
->vpd_cbs_dma
);
6162 if (!ioa_cfg
->vpd_cbs
)
6163 goto out_free_res_entries
;
6165 if (ipr_alloc_cmd_blks(ioa_cfg
))
6166 goto out_free_vpd_cbs
;
6168 ioa_cfg
->host_rrq
= pci_alloc_consistent(ioa_cfg
->pdev
,
6169 sizeof(u32
) * IPR_NUM_CMD_BLKS
,
6170 &ioa_cfg
->host_rrq_dma
);
6172 if (!ioa_cfg
->host_rrq
)
6173 goto out_ipr_free_cmd_blocks
;
6175 ioa_cfg
->cfg_table
= pci_alloc_consistent(ioa_cfg
->pdev
,
6176 sizeof(struct ipr_config_table
),
6177 &ioa_cfg
->cfg_table_dma
);
6179 if (!ioa_cfg
->cfg_table
)
6180 goto out_free_host_rrq
;
6182 for (i
= 0; i
< IPR_NUM_HCAMS
; i
++) {
6183 ioa_cfg
->hostrcb
[i
] = pci_alloc_consistent(ioa_cfg
->pdev
,
6184 sizeof(struct ipr_hostrcb
),
6185 &ioa_cfg
->hostrcb_dma
[i
]);
6187 if (!ioa_cfg
->hostrcb
[i
])
6188 goto out_free_hostrcb_dma
;
6190 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
6191 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
6192 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
6195 ioa_cfg
->trace
= kzalloc(sizeof(struct ipr_trace_entry
) *
6196 IPR_NUM_TRACE_ENTRIES
, GFP_KERNEL
);
6198 if (!ioa_cfg
->trace
)
6199 goto out_free_hostrcb_dma
;
6206 out_free_hostrcb_dma
:
6208 pci_free_consistent(pdev
, sizeof(struct ipr_hostrcb
),
6209 ioa_cfg
->hostrcb
[i
],
6210 ioa_cfg
->hostrcb_dma
[i
]);
6212 pci_free_consistent(pdev
, sizeof(struct ipr_config_table
),
6213 ioa_cfg
->cfg_table
, ioa_cfg
->cfg_table_dma
);
6215 pci_free_consistent(pdev
, sizeof(u32
) * IPR_NUM_CMD_BLKS
,
6216 ioa_cfg
->host_rrq
, ioa_cfg
->host_rrq_dma
);
6217 out_ipr_free_cmd_blocks
:
6218 ipr_free_cmd_blks(ioa_cfg
);
6220 pci_free_consistent(pdev
, sizeof(struct ipr_misc_cbs
),
6221 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
6222 out_free_res_entries
:
6223 kfree(ioa_cfg
->res_entries
);
6228 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6229 * @ioa_cfg: ioa config struct
6234 static void __devinit
ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
6238 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
6239 ioa_cfg
->bus_attr
[i
].bus
= i
;
6240 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
6241 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
6242 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
6243 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
6245 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
6250 * ipr_init_ioa_cfg - Initialize IOA config struct
6251 * @ioa_cfg: ioa config struct
6252 * @host: scsi host struct
6253 * @pdev: PCI dev struct
6258 static void __devinit
ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
6259 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
6261 const struct ipr_interrupt_offsets
*p
;
6262 struct ipr_interrupts
*t
;
6265 ioa_cfg
->host
= host
;
6266 ioa_cfg
->pdev
= pdev
;
6267 ioa_cfg
->log_level
= ipr_log_level
;
6268 ioa_cfg
->doorbell
= IPR_DOORBELL
;
6269 if (!ipr_auto_create
)
6270 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
6271 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
6272 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
6273 sprintf(ioa_cfg
->ipr_free_label
, IPR_FREEQ_LABEL
);
6274 sprintf(ioa_cfg
->ipr_pending_label
, IPR_PENDQ_LABEL
);
6275 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
6276 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
6277 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
6278 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
6280 INIT_LIST_HEAD(&ioa_cfg
->free_q
);
6281 INIT_LIST_HEAD(&ioa_cfg
->pending_q
);
6282 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
6283 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
6284 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
6285 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
6286 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
, ioa_cfg
);
6287 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
6288 ioa_cfg
->sdt_state
= INACTIVE
;
6289 if (ipr_enable_cache
)
6290 ioa_cfg
->cache_state
= CACHE_ENABLED
;
6292 ioa_cfg
->cache_state
= CACHE_DISABLED
;
6294 ipr_initialize_bus_attr(ioa_cfg
);
6296 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
6297 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
6298 host
->max_channel
= IPR_MAX_BUS_TO_SCAN
;
6299 host
->unique_id
= host
->host_no
;
6300 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
6301 pci_set_drvdata(pdev
, ioa_cfg
);
6303 p
= &ioa_cfg
->chip_cfg
->regs
;
6305 base
= ioa_cfg
->hdw_dma_regs
;
6307 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
6308 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
6309 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
6310 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
6311 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
6312 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
6313 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
6314 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
6315 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
6319 * ipr_get_chip_cfg - Find adapter chip configuration
6320 * @dev_id: PCI device id struct
6323 * ptr to chip config on success / NULL on failure
6325 static const struct ipr_chip_cfg_t
* __devinit
6326 ipr_get_chip_cfg(const struct pci_device_id
*dev_id
)
6330 if (dev_id
->driver_data
)
6331 return (const struct ipr_chip_cfg_t
*)dev_id
->driver_data
;
6333 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
6334 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
6335 ipr_chip
[i
].device
== dev_id
->device
)
6336 return ipr_chip
[i
].cfg
;
6341 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6342 * @pdev: PCI device struct
6343 * @dev_id: PCI device id struct
6346 * 0 on success / non-zero on failure
6348 static int __devinit
ipr_probe_ioa(struct pci_dev
*pdev
,
6349 const struct pci_device_id
*dev_id
)
6351 struct ipr_ioa_cfg
*ioa_cfg
;
6352 struct Scsi_Host
*host
;
6353 unsigned long ipr_regs_pci
;
6354 void __iomem
*ipr_regs
;
6355 u32 rc
= PCIBIOS_SUCCESSFUL
;
6356 volatile u32 mask
, uproc
;
6360 if ((rc
= pci_enable_device(pdev
))) {
6361 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
6365 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
6367 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
6370 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
6375 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
6376 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
6378 ioa_cfg
->chip_cfg
= ipr_get_chip_cfg(dev_id
);
6380 if (!ioa_cfg
->chip_cfg
) {
6381 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
6382 dev_id
->vendor
, dev_id
->device
);
6383 goto out_scsi_host_put
;
6386 ipr_regs_pci
= pci_resource_start(pdev
, 0);
6388 rc
= pci_request_regions(pdev
, IPR_NAME
);
6391 "Couldn't register memory range of registers\n");
6392 goto out_scsi_host_put
;
6395 ipr_regs
= ioremap(ipr_regs_pci
, pci_resource_len(pdev
, 0));
6399 "Couldn't map memory range of registers\n");
6401 goto out_release_regions
;
6404 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
6405 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
6406 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
6408 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
6410 pci_set_master(pdev
);
6412 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
6414 dev_err(&pdev
->dev
, "Failed to set PCI DMA mask\n");
6418 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
6419 ioa_cfg
->chip_cfg
->cache_line_size
);
6421 if (rc
!= PCIBIOS_SUCCESSFUL
) {
6422 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
6427 /* Save away PCI config space for use following IOA reset */
6428 rc
= pci_save_state(pdev
);
6430 if (rc
!= PCIBIOS_SUCCESSFUL
) {
6431 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
6436 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
6439 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
6442 rc
= ipr_alloc_mem(ioa_cfg
);
6445 "Couldn't allocate enough memory for device driver!\n");
6450 * If HRRQ updated interrupt is not masked, or reset alert is set,
6451 * the card is in an unknown state and needs a hard reset
6453 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
6454 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg
);
6455 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
6456 ioa_cfg
->needs_hard_reset
= 1;
6458 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
6459 rc
= request_irq(pdev
->irq
, ipr_isr
, IRQF_SHARED
, IPR_NAME
, ioa_cfg
);
6462 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
6467 spin_lock(&ipr_driver_lock
);
6468 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
6469 spin_unlock(&ipr_driver_lock
);
6476 ipr_free_mem(ioa_cfg
);
6479 out_release_regions
:
6480 pci_release_regions(pdev
);
6482 scsi_host_put(host
);
6484 pci_disable_device(pdev
);
6489 * ipr_scan_vsets - Scans for VSET devices
6490 * @ioa_cfg: ioa config struct
6492 * Description: Since the VSET resources do not follow SAM in that we can have
6493 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6498 static void ipr_scan_vsets(struct ipr_ioa_cfg
*ioa_cfg
)
6502 for (target
= 0; target
< IPR_MAX_NUM_TARGETS_PER_BUS
; target
++)
6503 for (lun
= 0; lun
< IPR_MAX_NUM_VSET_LUNS_PER_TARGET
; lun
++ )
6504 scsi_add_device(ioa_cfg
->host
, IPR_VSET_BUS
, target
, lun
);
6508 * ipr_initiate_ioa_bringdown - Bring down an adapter
6509 * @ioa_cfg: ioa config struct
6510 * @shutdown_type: shutdown type
6512 * Description: This function will initiate bringing down the adapter.
6513 * This consists of issuing an IOA shutdown to the adapter
6514 * to flush the cache, and running BIST.
6515 * If the caller needs to wait on the completion of the reset,
6516 * the caller must sleep on the reset_wait_q.
6521 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
6522 enum ipr_shutdown_type shutdown_type
)
6525 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
6526 ioa_cfg
->sdt_state
= ABORT_DUMP
;
6527 ioa_cfg
->reset_retries
= 0;
6528 ioa_cfg
->in_ioa_bringdown
= 1;
6529 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
6534 * __ipr_remove - Remove a single adapter
6535 * @pdev: pci device struct
6537 * Adapter hot plug remove entry point.
6542 static void __ipr_remove(struct pci_dev
*pdev
)
6544 unsigned long host_lock_flags
= 0;
6545 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
6548 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6549 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
6551 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6552 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6553 flush_scheduled_work();
6554 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6556 spin_lock(&ipr_driver_lock
);
6557 list_del(&ioa_cfg
->queue
);
6558 spin_unlock(&ipr_driver_lock
);
6560 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
6561 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
6562 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
6564 ipr_free_all_resources(ioa_cfg
);
6570 * ipr_remove - IOA hot plug remove entry point
6571 * @pdev: pci device struct
6573 * Adapter hot plug remove entry point.
6578 static void ipr_remove(struct pci_dev
*pdev
)
6580 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
6584 ipr_remove_trace_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
6586 ipr_remove_dump_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
6588 scsi_remove_host(ioa_cfg
->host
);
6596 * ipr_probe - Adapter hot plug add entry point
6599 * 0 on success / non-zero on failure
6601 static int __devinit
ipr_probe(struct pci_dev
*pdev
,
6602 const struct pci_device_id
*dev_id
)
6604 struct ipr_ioa_cfg
*ioa_cfg
;
6607 rc
= ipr_probe_ioa(pdev
, dev_id
);
6612 ioa_cfg
= pci_get_drvdata(pdev
);
6613 rc
= ipr_probe_ioa_part2(ioa_cfg
);
6620 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
6627 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
6631 scsi_remove_host(ioa_cfg
->host
);
6636 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
6640 ipr_remove_trace_file(&ioa_cfg
->host
->shost_classdev
.kobj
,
6642 scsi_remove_host(ioa_cfg
->host
);
6647 scsi_scan_host(ioa_cfg
->host
);
6648 ipr_scan_vsets(ioa_cfg
);
6649 scsi_add_device(ioa_cfg
->host
, IPR_IOA_BUS
, IPR_IOA_TARGET
, IPR_IOA_LUN
);
6650 ioa_cfg
->allow_ml_add_del
= 1;
6651 ioa_cfg
->host
->max_channel
= IPR_VSET_BUS
;
6652 schedule_work(&ioa_cfg
->work_q
);
6657 * ipr_shutdown - Shutdown handler.
6658 * @pdev: pci device struct
6660 * This function is invoked upon system shutdown/reboot. It will issue
6661 * an adapter shutdown to the adapter to flush the write cache.
6666 static void ipr_shutdown(struct pci_dev
*pdev
)
6668 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
6669 unsigned long lock_flags
= 0;
6671 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6672 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
6673 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6674 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6677 static struct pci_device_id ipr_pci_table
[] __devinitdata
= {
6678 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
6679 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
,
6680 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6681 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
6682 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
,
6683 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6684 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
6685 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
,
6686 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6687 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
6688 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
,
6689 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6690 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
6691 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
,
6692 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6693 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
6694 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
,
6695 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6696 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
6697 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
,
6698 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6699 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
6700 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
,
6701 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6702 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
6703 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
,
6704 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6705 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
6706 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
,
6707 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6708 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
6709 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
,
6710 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6711 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
6712 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
,
6713 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[0] },
6714 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
6715 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
,
6716 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[1] },
6717 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
6718 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
,
6719 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[1] },
6720 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
6721 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
,
6722 0, 0, (kernel_ulong_t
)&ipr_chip_cfg
[1] },
6725 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
6727 static struct pci_error_handlers ipr_err_handler
= {
6728 .error_detected
= ipr_pci_error_detected
,
6729 .slot_reset
= ipr_pci_slot_reset
,
6732 static struct pci_driver ipr_driver
= {
6734 .id_table
= ipr_pci_table
,
6736 .remove
= ipr_remove
,
6737 .shutdown
= ipr_shutdown
,
6738 .err_handler
= &ipr_err_handler
,
6742 * ipr_init - Module entry point
6745 * 0 on success / negative value on failure
6747 static int __init
ipr_init(void)
6749 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6750 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
6752 return pci_module_init(&ipr_driver
);
6756 * ipr_exit - Module unload
6758 * Module unload entry point.
6763 static void __exit
ipr_exit(void)
6765 pci_unregister_driver(&ipr_driver
);
6768 module_init(ipr_init
);
6769 module_exit(ipr_exit
);