2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <asm/div64.h>
55 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
56 #define HPSA_DRIVER_VERSION "3.4.4-1"
57 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
60 /* How long to wait (in milliseconds) for board to go into simple mode */
61 #define MAX_CONFIG_WAIT 30000
62 #define MAX_IOCTL_CONFIG_WAIT 1000
64 /*define how many times we will try a command because of bus resets */
65 #define MAX_CMD_RETRIES 3
67 /* Embedded module documentation macros - see modules.h */
68 MODULE_AUTHOR("Hewlett-Packard Company");
69 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
72 MODULE_VERSION(HPSA_DRIVER_VERSION
);
73 MODULE_LICENSE("GPL");
75 static int hpsa_allow_any
;
76 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
77 MODULE_PARM_DESC(hpsa_allow_any
,
78 "Allow hpsa driver to access unknown HP Smart Array hardware");
79 static int hpsa_simple_mode
;
80 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
81 MODULE_PARM_DESC(hpsa_simple_mode
,
82 "Use 'simple mode' rather than 'performant mode'");
84 /* define the PCI info for the cards we can control */
85 static const struct pci_device_id hpsa_pci_device_id
[] = {
86 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
87 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
88 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
89 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
90 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
91 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324A},
92 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324B},
93 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1925},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1929},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BD},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BE},
111 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BF},
112 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C0},
113 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C1},
114 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C2},
115 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C3},
116 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C4},
117 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C5},
118 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C7},
119 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C8},
120 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C9},
121 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0076},
122 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0087},
123 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x007D},
124 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0088},
125 {PCI_VENDOR_ID_HP
, 0x333f, 0x103c, 0x333f},
126 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
127 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
131 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
133 /* board_id = Subsystem Device ID & Vendor ID
134 * product = Marketing Name for the board
135 * access = Address of the struct of function pointers
137 static struct board_type products
[] = {
138 {0x3241103C, "Smart Array P212", &SA5_access
},
139 {0x3243103C, "Smart Array P410", &SA5_access
},
140 {0x3245103C, "Smart Array P410i", &SA5_access
},
141 {0x3247103C, "Smart Array P411", &SA5_access
},
142 {0x3249103C, "Smart Array P812", &SA5_access
},
143 {0x324A103C, "Smart Array P712m", &SA5_access
},
144 {0x324B103C, "Smart Array P711m", &SA5_access
},
145 {0x3350103C, "Smart Array P222", &SA5_access
},
146 {0x3351103C, "Smart Array P420", &SA5_access
},
147 {0x3352103C, "Smart Array P421", &SA5_access
},
148 {0x3353103C, "Smart Array P822", &SA5_access
},
149 {0x3354103C, "Smart Array P420i", &SA5_access
},
150 {0x3355103C, "Smart Array P220i", &SA5_access
},
151 {0x3356103C, "Smart Array P721m", &SA5_access
},
152 {0x1921103C, "Smart Array P830i", &SA5_access
},
153 {0x1922103C, "Smart Array P430", &SA5_access
},
154 {0x1923103C, "Smart Array P431", &SA5_access
},
155 {0x1924103C, "Smart Array P830", &SA5_access
},
156 {0x1926103C, "Smart Array P731m", &SA5_access
},
157 {0x1928103C, "Smart Array P230i", &SA5_access
},
158 {0x1929103C, "Smart Array P530", &SA5_access
},
159 {0x21BD103C, "Smart Array", &SA5_access
},
160 {0x21BE103C, "Smart Array", &SA5_access
},
161 {0x21BF103C, "Smart Array", &SA5_access
},
162 {0x21C0103C, "Smart Array", &SA5_access
},
163 {0x21C1103C, "Smart Array", &SA5_access
},
164 {0x21C2103C, "Smart Array", &SA5_access
},
165 {0x21C3103C, "Smart Array", &SA5_access
},
166 {0x21C4103C, "Smart Array", &SA5_access
},
167 {0x21C5103C, "Smart Array", &SA5_access
},
168 {0x21C7103C, "Smart Array", &SA5_access
},
169 {0x21C8103C, "Smart Array", &SA5_access
},
170 {0x21C9103C, "Smart Array", &SA5_access
},
171 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access
},
172 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access
},
173 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access
},
174 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access
},
175 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access
},
176 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
179 static int number_of_controllers
;
181 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
182 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
183 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
184 static void start_io(struct ctlr_info
*h
);
187 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
);
190 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
191 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
);
192 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
193 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
);
194 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
195 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
197 #define VPD_PAGE (1 << 8)
199 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
200 static void hpsa_scan_start(struct Scsi_Host
*);
201 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
202 unsigned long elapsed_time
);
203 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
204 int qdepth
, int reason
);
206 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
207 static int hpsa_eh_abort_handler(struct scsi_cmnd
*scsicmd
);
208 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
209 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
211 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
);
212 static int check_for_unit_attention(struct ctlr_info
*h
,
213 struct CommandList
*c
);
214 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
215 struct CommandList
*c
);
216 /* performant mode helper functions */
217 static void calc_bucket_map(int *bucket
, int num_buckets
,
218 int nsgs
, int min_blocks
, int *bucket_map
);
219 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
220 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
221 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
222 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
224 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
225 unsigned long *memory_bar
);
226 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
227 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
229 static inline void finish_cmd(struct CommandList
*c
);
230 static void hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
);
231 #define BOARD_NOT_READY 0
232 #define BOARD_READY 1
233 static void hpsa_drain_accel_commands(struct ctlr_info
*h
);
234 static void hpsa_flush_cache(struct ctlr_info
*h
);
235 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
236 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
239 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
241 unsigned long *priv
= shost_priv(sdev
->host
);
242 return (struct ctlr_info
*) *priv
;
245 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
247 unsigned long *priv
= shost_priv(sh
);
248 return (struct ctlr_info
*) *priv
;
251 static int check_for_unit_attention(struct ctlr_info
*h
,
252 struct CommandList
*c
)
254 if (c
->err_info
->SenseInfo
[2] != UNIT_ATTENTION
)
257 switch (c
->err_info
->SenseInfo
[12]) {
259 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a state change "
260 "detected, command retried\n", h
->ctlr
);
263 dev_warn(&h
->pdev
->dev
, HPSA
"%d: LUN failure "
264 "detected, action required\n", h
->ctlr
);
266 case REPORT_LUNS_CHANGED
:
267 dev_warn(&h
->pdev
->dev
, HPSA
"%d: report LUN data "
268 "changed, action required\n", h
->ctlr
);
270 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
271 * target (array) devices.
275 dev_warn(&h
->pdev
->dev
, HPSA
"%d: a power on "
276 "or device reset detected\n", h
->ctlr
);
278 case UNIT_ATTENTION_CLEARED
:
279 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unit attention "
280 "cleared by another initiator\n", h
->ctlr
);
283 dev_warn(&h
->pdev
->dev
, HPSA
"%d: unknown "
284 "unit attention detected\n", h
->ctlr
);
290 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
292 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
293 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
294 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
296 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
300 static ssize_t
host_store_hp_ssd_smart_path_status(struct device
*dev
,
301 struct device_attribute
*attr
,
302 const char *buf
, size_t count
)
306 struct Scsi_Host
*shost
= class_to_shost(dev
);
309 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
311 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
312 strncpy(tmpbuf
, buf
, len
);
314 if (sscanf(tmpbuf
, "%d", &status
) != 1)
316 h
= shost_to_hba(shost
);
317 h
->acciopath_status
= !!status
;
318 dev_warn(&h
->pdev
->dev
,
319 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
320 h
->acciopath_status
? "enabled" : "disabled");
324 static ssize_t
host_store_raid_offload_debug(struct device
*dev
,
325 struct device_attribute
*attr
,
326 const char *buf
, size_t count
)
328 int debug_level
, len
;
330 struct Scsi_Host
*shost
= class_to_shost(dev
);
333 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
335 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
336 strncpy(tmpbuf
, buf
, len
);
338 if (sscanf(tmpbuf
, "%d", &debug_level
) != 1)
342 h
= shost_to_hba(shost
);
343 h
->raid_offload_debug
= debug_level
;
344 dev_warn(&h
->pdev
->dev
, "hpsa: Set raid_offload_debug level = %d\n",
345 h
->raid_offload_debug
);
349 static ssize_t
host_store_rescan(struct device
*dev
,
350 struct device_attribute
*attr
,
351 const char *buf
, size_t count
)
354 struct Scsi_Host
*shost
= class_to_shost(dev
);
355 h
= shost_to_hba(shost
);
356 hpsa_scan_start(h
->scsi_host
);
360 static ssize_t
host_show_firmware_revision(struct device
*dev
,
361 struct device_attribute
*attr
, char *buf
)
364 struct Scsi_Host
*shost
= class_to_shost(dev
);
365 unsigned char *fwrev
;
367 h
= shost_to_hba(shost
);
368 if (!h
->hba_inquiry_data
)
370 fwrev
= &h
->hba_inquiry_data
[32];
371 return snprintf(buf
, 20, "%c%c%c%c\n",
372 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
375 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
376 struct device_attribute
*attr
, char *buf
)
378 struct Scsi_Host
*shost
= class_to_shost(dev
);
379 struct ctlr_info
*h
= shost_to_hba(shost
);
381 return snprintf(buf
, 20, "%d\n", h
->commands_outstanding
);
384 static ssize_t
host_show_transport_mode(struct device
*dev
,
385 struct device_attribute
*attr
, char *buf
)
388 struct Scsi_Host
*shost
= class_to_shost(dev
);
390 h
= shost_to_hba(shost
);
391 return snprintf(buf
, 20, "%s\n",
392 h
->transMethod
& CFGTBL_Trans_Performant
?
393 "performant" : "simple");
396 static ssize_t
host_show_hp_ssd_smart_path_status(struct device
*dev
,
397 struct device_attribute
*attr
, char *buf
)
400 struct Scsi_Host
*shost
= class_to_shost(dev
);
402 h
= shost_to_hba(shost
);
403 return snprintf(buf
, 30, "HP SSD Smart Path %s\n",
404 (h
->acciopath_status
== 1) ? "enabled" : "disabled");
407 /* List of controllers which cannot be hard reset on kexec with reset_devices */
408 static u32 unresettable_controller
[] = {
409 0x324a103C, /* Smart Array P712m */
410 0x324b103C, /* SmartArray P711m */
411 0x3223103C, /* Smart Array P800 */
412 0x3234103C, /* Smart Array P400 */
413 0x3235103C, /* Smart Array P400i */
414 0x3211103C, /* Smart Array E200i */
415 0x3212103C, /* Smart Array E200 */
416 0x3213103C, /* Smart Array E200i */
417 0x3214103C, /* Smart Array E200i */
418 0x3215103C, /* Smart Array E200i */
419 0x3237103C, /* Smart Array E500 */
420 0x323D103C, /* Smart Array P700m */
421 0x40800E11, /* Smart Array 5i */
422 0x409C0E11, /* Smart Array 6400 */
423 0x409D0E11, /* Smart Array 6400 EM */
424 0x40700E11, /* Smart Array 5300 */
425 0x40820E11, /* Smart Array 532 */
426 0x40830E11, /* Smart Array 5312 */
427 0x409A0E11, /* Smart Array 641 */
428 0x409B0E11, /* Smart Array 642 */
429 0x40910E11, /* Smart Array 6i */
432 /* List of controllers which cannot even be soft reset */
433 static u32 soft_unresettable_controller
[] = {
434 0x40800E11, /* Smart Array 5i */
435 0x40700E11, /* Smart Array 5300 */
436 0x40820E11, /* Smart Array 532 */
437 0x40830E11, /* Smart Array 5312 */
438 0x409A0E11, /* Smart Array 641 */
439 0x409B0E11, /* Smart Array 642 */
440 0x40910E11, /* Smart Array 6i */
441 /* Exclude 640x boards. These are two pci devices in one slot
442 * which share a battery backed cache module. One controls the
443 * cache, the other accesses the cache through the one that controls
444 * it. If we reset the one controlling the cache, the other will
445 * likely not be happy. Just forbid resetting this conjoined mess.
446 * The 640x isn't really supported by hpsa anyway.
448 0x409C0E11, /* Smart Array 6400 */
449 0x409D0E11, /* Smart Array 6400 EM */
452 static int ctlr_is_hard_resettable(u32 board_id
)
456 for (i
= 0; i
< ARRAY_SIZE(unresettable_controller
); i
++)
457 if (unresettable_controller
[i
] == board_id
)
462 static int ctlr_is_soft_resettable(u32 board_id
)
466 for (i
= 0; i
< ARRAY_SIZE(soft_unresettable_controller
); i
++)
467 if (soft_unresettable_controller
[i
] == board_id
)
472 static int ctlr_is_resettable(u32 board_id
)
474 return ctlr_is_hard_resettable(board_id
) ||
475 ctlr_is_soft_resettable(board_id
);
478 static ssize_t
host_show_resettable(struct device
*dev
,
479 struct device_attribute
*attr
, char *buf
)
482 struct Scsi_Host
*shost
= class_to_shost(dev
);
484 h
= shost_to_hba(shost
);
485 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
488 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
490 return (scsi3addr
[3] & 0xC0) == 0x40;
493 static const char *raid_label
[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
496 #define HPSA_RAID_0 0
497 #define HPSA_RAID_4 1
498 #define HPSA_RAID_1 2 /* also used for RAID 10 */
499 #define HPSA_RAID_5 3 /* also used for RAID 50 */
500 #define HPSA_RAID_51 4
501 #define HPSA_RAID_6 5 /* also used for RAID 60 */
502 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
503 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
505 static ssize_t
raid_level_show(struct device
*dev
,
506 struct device_attribute
*attr
, char *buf
)
509 unsigned char rlevel
;
511 struct scsi_device
*sdev
;
512 struct hpsa_scsi_dev_t
*hdev
;
515 sdev
= to_scsi_device(dev
);
516 h
= sdev_to_hba(sdev
);
517 spin_lock_irqsave(&h
->lock
, flags
);
518 hdev
= sdev
->hostdata
;
520 spin_unlock_irqrestore(&h
->lock
, flags
);
524 /* Is this even a logical drive? */
525 if (!is_logical_dev_addr_mode(hdev
->scsi3addr
)) {
526 spin_unlock_irqrestore(&h
->lock
, flags
);
527 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
531 rlevel
= hdev
->raid_level
;
532 spin_unlock_irqrestore(&h
->lock
, flags
);
533 if (rlevel
> RAID_UNKNOWN
)
534 rlevel
= RAID_UNKNOWN
;
535 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
539 static ssize_t
lunid_show(struct device
*dev
,
540 struct device_attribute
*attr
, char *buf
)
543 struct scsi_device
*sdev
;
544 struct hpsa_scsi_dev_t
*hdev
;
546 unsigned char lunid
[8];
548 sdev
= to_scsi_device(dev
);
549 h
= sdev_to_hba(sdev
);
550 spin_lock_irqsave(&h
->lock
, flags
);
551 hdev
= sdev
->hostdata
;
553 spin_unlock_irqrestore(&h
->lock
, flags
);
556 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
557 spin_unlock_irqrestore(&h
->lock
, flags
);
558 return snprintf(buf
, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
559 lunid
[0], lunid
[1], lunid
[2], lunid
[3],
560 lunid
[4], lunid
[5], lunid
[6], lunid
[7]);
563 static ssize_t
unique_id_show(struct device
*dev
,
564 struct device_attribute
*attr
, char *buf
)
567 struct scsi_device
*sdev
;
568 struct hpsa_scsi_dev_t
*hdev
;
570 unsigned char sn
[16];
572 sdev
= to_scsi_device(dev
);
573 h
= sdev_to_hba(sdev
);
574 spin_lock_irqsave(&h
->lock
, flags
);
575 hdev
= sdev
->hostdata
;
577 spin_unlock_irqrestore(&h
->lock
, flags
);
580 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
581 spin_unlock_irqrestore(&h
->lock
, flags
);
582 return snprintf(buf
, 16 * 2 + 2,
583 "%02X%02X%02X%02X%02X%02X%02X%02X"
584 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
585 sn
[0], sn
[1], sn
[2], sn
[3],
586 sn
[4], sn
[5], sn
[6], sn
[7],
587 sn
[8], sn
[9], sn
[10], sn
[11],
588 sn
[12], sn
[13], sn
[14], sn
[15]);
591 static ssize_t
host_show_hp_ssd_smart_path_enabled(struct device
*dev
,
592 struct device_attribute
*attr
, char *buf
)
595 struct scsi_device
*sdev
;
596 struct hpsa_scsi_dev_t
*hdev
;
600 sdev
= to_scsi_device(dev
);
601 h
= sdev_to_hba(sdev
);
602 spin_lock_irqsave(&h
->lock
, flags
);
603 hdev
= sdev
->hostdata
;
605 spin_unlock_irqrestore(&h
->lock
, flags
);
608 offload_enabled
= hdev
->offload_enabled
;
609 spin_unlock_irqrestore(&h
->lock
, flags
);
610 return snprintf(buf
, 20, "%d\n", offload_enabled
);
613 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
614 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
615 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
616 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
617 static DEVICE_ATTR(hp_ssd_smart_path_enabled
, S_IRUGO
,
618 host_show_hp_ssd_smart_path_enabled
, NULL
);
619 static DEVICE_ATTR(hp_ssd_smart_path_status
, S_IWUSR
|S_IRUGO
|S_IROTH
,
620 host_show_hp_ssd_smart_path_status
,
621 host_store_hp_ssd_smart_path_status
);
622 static DEVICE_ATTR(raid_offload_debug
, S_IWUSR
, NULL
,
623 host_store_raid_offload_debug
);
624 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
625 host_show_firmware_revision
, NULL
);
626 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
627 host_show_commands_outstanding
, NULL
);
628 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
629 host_show_transport_mode
, NULL
);
630 static DEVICE_ATTR(resettable
, S_IRUGO
,
631 host_show_resettable
, NULL
);
633 static struct device_attribute
*hpsa_sdev_attrs
[] = {
634 &dev_attr_raid_level
,
637 &dev_attr_hp_ssd_smart_path_enabled
,
641 static struct device_attribute
*hpsa_shost_attrs
[] = {
643 &dev_attr_firmware_revision
,
644 &dev_attr_commands_outstanding
,
645 &dev_attr_transport_mode
,
646 &dev_attr_resettable
,
647 &dev_attr_hp_ssd_smart_path_status
,
648 &dev_attr_raid_offload_debug
,
652 static struct scsi_host_template hpsa_driver_template
= {
653 .module
= THIS_MODULE
,
656 .queuecommand
= hpsa_scsi_queue_command
,
657 .scan_start
= hpsa_scan_start
,
658 .scan_finished
= hpsa_scan_finished
,
659 .change_queue_depth
= hpsa_change_queue_depth
,
661 .use_clustering
= ENABLE_CLUSTERING
,
662 .eh_abort_handler
= hpsa_eh_abort_handler
,
663 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
665 .slave_alloc
= hpsa_slave_alloc
,
666 .slave_destroy
= hpsa_slave_destroy
,
668 .compat_ioctl
= hpsa_compat_ioctl
,
670 .sdev_attrs
= hpsa_sdev_attrs
,
671 .shost_attrs
= hpsa_shost_attrs
,
677 /* Enqueuing and dequeuing functions for cmdlists. */
678 static inline void addQ(struct list_head
*list
, struct CommandList
*c
)
680 list_add_tail(&c
->list
, list
);
683 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
686 struct reply_pool
*rq
= &h
->reply_queue
[q
];
689 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
690 return h
->access
.command_completed(h
, q
);
692 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
693 return h
->access
.command_completed(h
, q
);
695 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
696 a
= rq
->head
[rq
->current_entry
];
698 spin_lock_irqsave(&h
->lock
, flags
);
699 h
->commands_outstanding
--;
700 spin_unlock_irqrestore(&h
->lock
, flags
);
704 /* Check for wraparound */
705 if (rq
->current_entry
== h
->max_commands
) {
706 rq
->current_entry
= 0;
713 * There are some special bits in the bus address of the
714 * command that we have to set for the controller to know
715 * how to process the command:
717 * Normal performant mode:
718 * bit 0: 1 means performant mode, 0 means simple mode.
719 * bits 1-3 = block fetch table entry
720 * bits 4-6 = command type (== 0)
723 * bit 0 = "performant mode" bit.
724 * bits 1-3 = block fetch table entry
725 * bits 4-6 = command type (== 110)
726 * (command type is needed because ioaccel1 mode
727 * commands are submitted through the same register as normal
728 * mode commands, so this is how the controller knows whether
729 * the command is normal mode or ioaccel1 mode.)
732 * bit 0 = "performant mode" bit.
733 * bits 1-4 = block fetch table entry (note extra bit)
734 * bits 4-6 = not needed, because ioaccel2 mode has
735 * a separate special register for submitting commands.
738 /* set_performant_mode: Modify the tag for cciss performant
739 * set bit 0 for pull model, bits 3-1 for block fetch
742 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
)
744 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
745 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
746 if (likely(h
->msix_vector
> 0))
747 c
->Header
.ReplyQueue
=
748 raw_smp_processor_id() % h
->nreply_queues
;
752 static void set_ioaccel1_performant_mode(struct ctlr_info
*h
,
753 struct CommandList
*c
)
755 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
757 /* Tell the controller to post the reply to the queue for this
758 * processor. This seems to give the best I/O throughput.
760 cp
->ReplyQueue
= smp_processor_id() % h
->nreply_queues
;
761 /* Set the bits in the address sent down to include:
762 * - performant mode bit (bit 0)
763 * - pull count (bits 1-3)
764 * - command type (bits 4-6)
766 c
->busaddr
|= 1 | (h
->ioaccel1_blockFetchTable
[c
->Header
.SGList
] << 1) |
767 IOACCEL1_BUSADDR_CMDTYPE
;
770 static void set_ioaccel2_performant_mode(struct ctlr_info
*h
,
771 struct CommandList
*c
)
773 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
775 /* Tell the controller to post the reply to the queue for this
776 * processor. This seems to give the best I/O throughput.
778 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
779 /* Set the bits in the address sent down to include:
780 * - performant mode bit not used in ioaccel mode 2
781 * - pull count (bits 0-3)
782 * - command type isn't needed for ioaccel2
784 c
->busaddr
|= (h
->ioaccel2_blockFetchTable
[cp
->sg_count
]);
787 static int is_firmware_flash_cmd(u8
*cdb
)
789 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
793 * During firmware flash, the heartbeat register may not update as frequently
794 * as it should. So we dial down lockup detection during firmware flash. and
795 * dial it back up when firmware flash completes.
797 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
798 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
799 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
800 struct CommandList
*c
)
802 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
804 atomic_inc(&h
->firmware_flash_in_progress
);
805 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
808 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
809 struct CommandList
*c
)
811 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
812 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
813 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
816 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
,
817 struct CommandList
*c
)
821 switch (c
->cmd_type
) {
823 set_ioaccel1_performant_mode(h
, c
);
826 set_ioaccel2_performant_mode(h
, c
);
829 set_performant_mode(h
, c
);
831 dial_down_lockup_detection_during_fw_flash(h
, c
);
832 spin_lock_irqsave(&h
->lock
, flags
);
835 spin_unlock_irqrestore(&h
->lock
, flags
);
839 static inline void removeQ(struct CommandList
*c
)
841 if (WARN_ON(list_empty(&c
->list
)))
843 list_del_init(&c
->list
);
846 static inline int is_hba_lunid(unsigned char scsi3addr
[])
848 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
851 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
853 if (!h
->hba_inquiry_data
)
855 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
860 static int hpsa_find_target_lun(struct ctlr_info
*h
,
861 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
863 /* finds an unused bus, target, lun for a new physical device
864 * assumes h->devlock is held
867 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
869 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
871 for (i
= 0; i
< h
->ndevices
; i
++) {
872 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
873 __set_bit(h
->dev
[i
]->target
, lun_taken
);
876 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
877 if (i
< HPSA_MAX_DEVICES
) {
886 /* Add an entry into h->dev[] array. */
887 static int hpsa_scsi_add_entry(struct ctlr_info
*h
, int hostno
,
888 struct hpsa_scsi_dev_t
*device
,
889 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
891 /* assumes h->devlock is held */
894 unsigned char addr1
[8], addr2
[8];
895 struct hpsa_scsi_dev_t
*sd
;
897 if (n
>= HPSA_MAX_DEVICES
) {
898 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
903 /* physical devices do not have lun or target assigned until now. */
904 if (device
->lun
!= -1)
905 /* Logical device, lun is already assigned. */
908 /* If this device a non-zero lun of a multi-lun device
909 * byte 4 of the 8-byte LUN addr will contain the logical
910 * unit no, zero otherise.
912 if (device
->scsi3addr
[4] == 0) {
913 /* This is not a non-zero lun of a multi-lun device */
914 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
915 device
->bus
, &device
->target
, &device
->lun
) != 0)
920 /* This is a non-zero lun of a multi-lun device.
921 * Search through our list and find the device which
922 * has the same 8 byte LUN address, excepting byte 4.
923 * Assign the same bus and target for this new LUN.
924 * Use the logical unit number from the firmware.
926 memcpy(addr1
, device
->scsi3addr
, 8);
928 for (i
= 0; i
< n
; i
++) {
930 memcpy(addr2
, sd
->scsi3addr
, 8);
932 /* differ only in byte 4? */
933 if (memcmp(addr1
, addr2
, 8) == 0) {
934 device
->bus
= sd
->bus
;
935 device
->target
= sd
->target
;
936 device
->lun
= device
->scsi3addr
[4];
940 if (device
->lun
== -1) {
941 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
942 " suspect firmware bug or unsupported hardware "
951 added
[*nadded
] = device
;
954 /* initially, (before registering with scsi layer) we don't
955 * know our hostno and we don't want to print anything first
956 * time anyway (the scsi layer's inquiries will show that info)
958 /* if (hostno != -1) */
959 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d added.\n",
960 scsi_device_type(device
->devtype
), hostno
,
961 device
->bus
, device
->target
, device
->lun
);
965 /* Update an entry in h->dev[] array. */
966 static void hpsa_scsi_update_entry(struct ctlr_info
*h
, int hostno
,
967 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
969 /* assumes h->devlock is held */
970 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
972 /* Raid level changed. */
973 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
975 /* Raid offload parameters changed. */
976 h
->dev
[entry
]->offload_config
= new_entry
->offload_config
;
977 h
->dev
[entry
]->offload_enabled
= new_entry
->offload_enabled
;
978 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
979 h
->dev
[entry
]->offload_to_mirror
= new_entry
->offload_to_mirror
;
980 h
->dev
[entry
]->raid_map
= new_entry
->raid_map
;
982 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d updated.\n",
983 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
984 new_entry
->target
, new_entry
->lun
);
987 /* Replace an entry from h->dev[] array. */
988 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
, int hostno
,
989 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
990 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
991 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
993 /* assumes h->devlock is held */
994 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
995 removed
[*nremoved
] = h
->dev
[entry
];
999 * New physical devices won't have target/lun assigned yet
1000 * so we need to preserve the values in the slot we are replacing.
1002 if (new_entry
->target
== -1) {
1003 new_entry
->target
= h
->dev
[entry
]->target
;
1004 new_entry
->lun
= h
->dev
[entry
]->lun
;
1007 h
->dev
[entry
] = new_entry
;
1008 added
[*nadded
] = new_entry
;
1010 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d changed.\n",
1011 scsi_device_type(new_entry
->devtype
), hostno
, new_entry
->bus
,
1012 new_entry
->target
, new_entry
->lun
);
1015 /* Remove an entry from h->dev[] array. */
1016 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int hostno
, int entry
,
1017 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1019 /* assumes h->devlock is held */
1021 struct hpsa_scsi_dev_t
*sd
;
1023 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1026 removed
[*nremoved
] = h
->dev
[entry
];
1029 for (i
= entry
; i
< h
->ndevices
-1; i
++)
1030 h
->dev
[i
] = h
->dev
[i
+1];
1032 dev_info(&h
->pdev
->dev
, "%s device c%db%dt%dl%d removed.\n",
1033 scsi_device_type(sd
->devtype
), hostno
, sd
->bus
, sd
->target
,
1037 #define SCSI3ADDR_EQ(a, b) ( \
1038 (a)[7] == (b)[7] && \
1039 (a)[6] == (b)[6] && \
1040 (a)[5] == (b)[5] && \
1041 (a)[4] == (b)[4] && \
1042 (a)[3] == (b)[3] && \
1043 (a)[2] == (b)[2] && \
1044 (a)[1] == (b)[1] && \
1047 static void fixup_botched_add(struct ctlr_info
*h
,
1048 struct hpsa_scsi_dev_t
*added
)
1050 /* called when scsi_add_device fails in order to re-adjust
1051 * h->dev[] to match the mid layer's view.
1053 unsigned long flags
;
1056 spin_lock_irqsave(&h
->lock
, flags
);
1057 for (i
= 0; i
< h
->ndevices
; i
++) {
1058 if (h
->dev
[i
] == added
) {
1059 for (j
= i
; j
< h
->ndevices
-1; j
++)
1060 h
->dev
[j
] = h
->dev
[j
+1];
1065 spin_unlock_irqrestore(&h
->lock
, flags
);
1069 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
1070 struct hpsa_scsi_dev_t
*dev2
)
1072 /* we compare everything except lun and target as these
1073 * are not yet assigned. Compare parts likely
1076 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
1077 sizeof(dev1
->scsi3addr
)) != 0)
1079 if (memcmp(dev1
->device_id
, dev2
->device_id
,
1080 sizeof(dev1
->device_id
)) != 0)
1082 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
1084 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
1086 if (dev1
->devtype
!= dev2
->devtype
)
1088 if (dev1
->bus
!= dev2
->bus
)
1093 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
1094 struct hpsa_scsi_dev_t
*dev2
)
1096 /* Device attributes that can change, but don't mean
1097 * that the device is a different device, nor that the OS
1098 * needs to be told anything about the change.
1100 if (dev1
->raid_level
!= dev2
->raid_level
)
1102 if (dev1
->offload_config
!= dev2
->offload_config
)
1104 if (dev1
->offload_enabled
!= dev2
->offload_enabled
)
1109 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1110 * and return needle location in *index. If scsi3addr matches, but not
1111 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1112 * location in *index.
1113 * In the case of a minor device attribute change, such as RAID level, just
1114 * return DEVICE_UPDATED, along with the updated device's location in index.
1115 * If needle not found, return DEVICE_NOT_FOUND.
1117 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
1118 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
1122 #define DEVICE_NOT_FOUND 0
1123 #define DEVICE_CHANGED 1
1124 #define DEVICE_SAME 2
1125 #define DEVICE_UPDATED 3
1126 for (i
= 0; i
< haystack_size
; i
++) {
1127 if (haystack
[i
] == NULL
) /* previously removed. */
1129 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
1131 if (device_is_the_same(needle
, haystack
[i
])) {
1132 if (device_updated(needle
, haystack
[i
]))
1133 return DEVICE_UPDATED
;
1136 /* Keep offline devices offline */
1137 if (needle
->volume_offline
)
1138 return DEVICE_NOT_FOUND
;
1139 return DEVICE_CHANGED
;
1144 return DEVICE_NOT_FOUND
;
1147 static void hpsa_monitor_offline_device(struct ctlr_info
*h
,
1148 unsigned char scsi3addr
[])
1150 struct offline_device_entry
*device
;
1151 unsigned long flags
;
1153 /* Check to see if device is already on the list */
1154 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1155 list_for_each_entry(device
, &h
->offline_device_list
, offline_list
) {
1156 if (memcmp(device
->scsi3addr
, scsi3addr
,
1157 sizeof(device
->scsi3addr
)) == 0) {
1158 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1162 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1164 /* Device is not on the list, add it. */
1165 device
= kmalloc(sizeof(*device
), GFP_KERNEL
);
1167 dev_warn(&h
->pdev
->dev
, "out of memory in %s\n", __func__
);
1170 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1171 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1172 list_add_tail(&device
->offline_list
, &h
->offline_device_list
);
1173 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1176 /* Print a message explaining various offline volume states */
1177 static void hpsa_show_volume_status(struct ctlr_info
*h
,
1178 struct hpsa_scsi_dev_t
*sd
)
1180 if (sd
->volume_offline
== HPSA_VPD_LV_STATUS_UNSUPPORTED
)
1181 dev_info(&h
->pdev
->dev
,
1182 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1183 h
->scsi_host
->host_no
,
1184 sd
->bus
, sd
->target
, sd
->lun
);
1185 switch (sd
->volume_offline
) {
1188 case HPSA_LV_UNDERGOING_ERASE
:
1189 dev_info(&h
->pdev
->dev
,
1190 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1191 h
->scsi_host
->host_no
,
1192 sd
->bus
, sd
->target
, sd
->lun
);
1194 case HPSA_LV_UNDERGOING_RPI
:
1195 dev_info(&h
->pdev
->dev
,
1196 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1197 h
->scsi_host
->host_no
,
1198 sd
->bus
, sd
->target
, sd
->lun
);
1200 case HPSA_LV_PENDING_RPI
:
1201 dev_info(&h
->pdev
->dev
,
1202 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1203 h
->scsi_host
->host_no
,
1204 sd
->bus
, sd
->target
, sd
->lun
);
1206 case HPSA_LV_ENCRYPTED_NO_KEY
:
1207 dev_info(&h
->pdev
->dev
,
1208 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1209 h
->scsi_host
->host_no
,
1210 sd
->bus
, sd
->target
, sd
->lun
);
1212 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
1213 dev_info(&h
->pdev
->dev
,
1214 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1215 h
->scsi_host
->host_no
,
1216 sd
->bus
, sd
->target
, sd
->lun
);
1218 case HPSA_LV_UNDERGOING_ENCRYPTION
:
1219 dev_info(&h
->pdev
->dev
,
1220 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1221 h
->scsi_host
->host_no
,
1222 sd
->bus
, sd
->target
, sd
->lun
);
1224 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1225 dev_info(&h
->pdev
->dev
,
1226 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1227 h
->scsi_host
->host_no
,
1228 sd
->bus
, sd
->target
, sd
->lun
);
1230 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1231 dev_info(&h
->pdev
->dev
,
1232 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1233 h
->scsi_host
->host_no
,
1234 sd
->bus
, sd
->target
, sd
->lun
);
1236 case HPSA_LV_PENDING_ENCRYPTION
:
1237 dev_info(&h
->pdev
->dev
,
1238 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1239 h
->scsi_host
->host_no
,
1240 sd
->bus
, sd
->target
, sd
->lun
);
1242 case HPSA_LV_PENDING_ENCRYPTION_REKEYING
:
1243 dev_info(&h
->pdev
->dev
,
1244 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1245 h
->scsi_host
->host_no
,
1246 sd
->bus
, sd
->target
, sd
->lun
);
1251 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
, int hostno
,
1252 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
1254 /* sd contains scsi3 addresses and devtypes, and inquiry
1255 * data. This function takes what's in sd to be the current
1256 * reality and updates h->dev[] to reflect that reality.
1258 int i
, entry
, device_change
, changes
= 0;
1259 struct hpsa_scsi_dev_t
*csd
;
1260 unsigned long flags
;
1261 struct hpsa_scsi_dev_t
**added
, **removed
;
1262 int nadded
, nremoved
;
1263 struct Scsi_Host
*sh
= NULL
;
1265 added
= kzalloc(sizeof(*added
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1266 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1268 if (!added
|| !removed
) {
1269 dev_warn(&h
->pdev
->dev
, "out of memory in "
1270 "adjust_hpsa_scsi_table\n");
1274 spin_lock_irqsave(&h
->devlock
, flags
);
1276 /* find any devices in h->dev[] that are not in
1277 * sd[] and remove them from h->dev[], and for any
1278 * devices which have changed, remove the old device
1279 * info and add the new device info.
1280 * If minor device attributes change, just update
1281 * the existing device structure.
1286 while (i
< h
->ndevices
) {
1288 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
1289 if (device_change
== DEVICE_NOT_FOUND
) {
1291 hpsa_scsi_remove_entry(h
, hostno
, i
,
1292 removed
, &nremoved
);
1293 continue; /* remove ^^^, hence i not incremented */
1294 } else if (device_change
== DEVICE_CHANGED
) {
1296 hpsa_scsi_replace_entry(h
, hostno
, i
, sd
[entry
],
1297 added
, &nadded
, removed
, &nremoved
);
1298 /* Set it to NULL to prevent it from being freed
1299 * at the bottom of hpsa_update_scsi_devices()
1302 } else if (device_change
== DEVICE_UPDATED
) {
1303 hpsa_scsi_update_entry(h
, hostno
, i
, sd
[entry
]);
1308 /* Now, make sure every device listed in sd[] is also
1309 * listed in h->dev[], adding them if they aren't found
1312 for (i
= 0; i
< nsds
; i
++) {
1313 if (!sd
[i
]) /* if already added above. */
1316 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1317 * as the SCSI mid-layer does not handle such devices well.
1318 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1319 * at 160Hz, and prevents the system from coming up.
1321 if (sd
[i
]->volume_offline
) {
1322 hpsa_show_volume_status(h
, sd
[i
]);
1323 dev_info(&h
->pdev
->dev
, "c%db%dt%dl%d: temporarily offline\n",
1324 h
->scsi_host
->host_no
,
1325 sd
[i
]->bus
, sd
[i
]->target
, sd
[i
]->lun
);
1329 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
1330 h
->ndevices
, &entry
);
1331 if (device_change
== DEVICE_NOT_FOUND
) {
1333 if (hpsa_scsi_add_entry(h
, hostno
, sd
[i
],
1334 added
, &nadded
) != 0)
1336 sd
[i
] = NULL
; /* prevent from being freed later. */
1337 } else if (device_change
== DEVICE_CHANGED
) {
1338 /* should never happen... */
1340 dev_warn(&h
->pdev
->dev
,
1341 "device unexpectedly changed.\n");
1342 /* but if it does happen, we just ignore that device */
1345 spin_unlock_irqrestore(&h
->devlock
, flags
);
1347 /* Monitor devices which are in one of several NOT READY states to be
1348 * brought online later. This must be done without holding h->devlock,
1349 * so don't touch h->dev[]
1351 for (i
= 0; i
< nsds
; i
++) {
1352 if (!sd
[i
]) /* if already added above. */
1354 if (sd
[i
]->volume_offline
)
1355 hpsa_monitor_offline_device(h
, sd
[i
]->scsi3addr
);
1358 /* Don't notify scsi mid layer of any changes the first time through
1359 * (or if there are no changes) scsi_scan_host will do it later the
1360 * first time through.
1362 if (hostno
== -1 || !changes
)
1366 /* Notify scsi mid layer of any removed devices */
1367 for (i
= 0; i
< nremoved
; i
++) {
1368 struct scsi_device
*sdev
=
1369 scsi_device_lookup(sh
, removed
[i
]->bus
,
1370 removed
[i
]->target
, removed
[i
]->lun
);
1372 scsi_remove_device(sdev
);
1373 scsi_device_put(sdev
);
1375 /* We don't expect to get here.
1376 * future cmds to this device will get selection
1377 * timeout as if the device was gone.
1379 dev_warn(&h
->pdev
->dev
, "didn't find c%db%dt%dl%d "
1380 " for removal.", hostno
, removed
[i
]->bus
,
1381 removed
[i
]->target
, removed
[i
]->lun
);
1387 /* Notify scsi mid layer of any added devices */
1388 for (i
= 0; i
< nadded
; i
++) {
1389 if (scsi_add_device(sh
, added
[i
]->bus
,
1390 added
[i
]->target
, added
[i
]->lun
) == 0)
1392 dev_warn(&h
->pdev
->dev
, "scsi_add_device c%db%dt%dl%d failed, "
1393 "device not added.\n", hostno
, added
[i
]->bus
,
1394 added
[i
]->target
, added
[i
]->lun
);
1395 /* now we have to remove it from h->dev,
1396 * since it didn't get added to scsi mid layer
1398 fixup_botched_add(h
, added
[i
]);
1407 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1408 * Assume's h->devlock is held.
1410 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
1411 int bus
, int target
, int lun
)
1414 struct hpsa_scsi_dev_t
*sd
;
1416 for (i
= 0; i
< h
->ndevices
; i
++) {
1418 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
1424 /* link sdev->hostdata to our per-device structure. */
1425 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
1427 struct hpsa_scsi_dev_t
*sd
;
1428 unsigned long flags
;
1429 struct ctlr_info
*h
;
1431 h
= sdev_to_hba(sdev
);
1432 spin_lock_irqsave(&h
->devlock
, flags
);
1433 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
1434 sdev_id(sdev
), sdev
->lun
);
1436 sdev
->hostdata
= sd
;
1437 spin_unlock_irqrestore(&h
->devlock
, flags
);
1441 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
1443 /* nothing to do. */
1446 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
1450 if (!h
->cmd_sg_list
)
1452 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1453 kfree(h
->cmd_sg_list
[i
]);
1454 h
->cmd_sg_list
[i
] = NULL
;
1456 kfree(h
->cmd_sg_list
);
1457 h
->cmd_sg_list
= NULL
;
1460 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info
*h
)
1464 if (h
->chainsize
<= 0)
1467 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
1469 if (!h
->cmd_sg_list
)
1471 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1472 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
1473 h
->chainsize
, GFP_KERNEL
);
1474 if (!h
->cmd_sg_list
[i
])
1480 hpsa_free_sg_chain_blocks(h
);
1484 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
1485 struct CommandList
*c
)
1487 struct SGDescriptor
*chain_sg
, *chain_block
;
1490 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1491 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
1492 chain_sg
->Ext
= HPSA_SG_CHAIN
;
1493 chain_sg
->Len
= sizeof(*chain_sg
) *
1494 (c
->Header
.SGTotal
- h
->max_cmd_sg_entries
);
1495 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_sg
->Len
,
1497 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
1498 /* prevent subsequent unmapping */
1499 chain_sg
->Addr
.lower
= 0;
1500 chain_sg
->Addr
.upper
= 0;
1503 chain_sg
->Addr
.lower
= (u32
) (temp64
& 0x0FFFFFFFFULL
);
1504 chain_sg
->Addr
.upper
= (u32
) ((temp64
>> 32) & 0x0FFFFFFFFULL
);
1508 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
1509 struct CommandList
*c
)
1511 struct SGDescriptor
*chain_sg
;
1512 union u64bit temp64
;
1514 if (c
->Header
.SGTotal
<= h
->max_cmd_sg_entries
)
1517 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
1518 temp64
.val32
.lower
= chain_sg
->Addr
.lower
;
1519 temp64
.val32
.upper
= chain_sg
->Addr
.upper
;
1520 pci_unmap_single(h
->pdev
, temp64
.val
, chain_sg
->Len
, PCI_DMA_TODEVICE
);
1524 /* Decode the various types of errors on ioaccel2 path.
1525 * Return 1 for any error that should generate a RAID path retry.
1526 * Return 0 for errors that don't require a RAID path retry.
1528 static int handle_ioaccel_mode2_error(struct ctlr_info
*h
,
1529 struct CommandList
*c
,
1530 struct scsi_cmnd
*cmd
,
1531 struct io_accel2_cmd
*c2
)
1536 switch (c2
->error_data
.serv_response
) {
1537 case IOACCEL2_SERV_RESPONSE_COMPLETE
:
1538 switch (c2
->error_data
.status
) {
1539 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD
:
1541 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND
:
1542 dev_warn(&h
->pdev
->dev
,
1543 "%s: task complete with check condition.\n",
1544 "HP SSD Smart Path");
1545 if (c2
->error_data
.data_present
!=
1546 IOACCEL2_SENSE_DATA_PRESENT
)
1548 /* copy the sense data */
1549 data_len
= c2
->error_data
.sense_data_len
;
1550 if (data_len
> SCSI_SENSE_BUFFERSIZE
)
1551 data_len
= SCSI_SENSE_BUFFERSIZE
;
1552 if (data_len
> sizeof(c2
->error_data
.sense_data_buff
))
1554 sizeof(c2
->error_data
.sense_data_buff
);
1555 memcpy(cmd
->sense_buffer
,
1556 c2
->error_data
.sense_data_buff
, data_len
);
1557 cmd
->result
|= SAM_STAT_CHECK_CONDITION
;
1560 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY
:
1561 dev_warn(&h
->pdev
->dev
,
1562 "%s: task complete with BUSY status.\n",
1563 "HP SSD Smart Path");
1566 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON
:
1567 dev_warn(&h
->pdev
->dev
,
1568 "%s: task complete with reservation conflict.\n",
1569 "HP SSD Smart Path");
1572 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
:
1573 /* Make scsi midlayer do unlimited retries */
1574 cmd
->result
= DID_IMM_RETRY
<< 16;
1576 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED
:
1577 dev_warn(&h
->pdev
->dev
,
1578 "%s: task complete with aborted status.\n",
1579 "HP SSD Smart Path");
1583 dev_warn(&h
->pdev
->dev
,
1584 "%s: task complete with unrecognized status: 0x%02x\n",
1585 "HP SSD Smart Path", c2
->error_data
.status
);
1590 case IOACCEL2_SERV_RESPONSE_FAILURE
:
1591 /* don't expect to get here. */
1592 dev_warn(&h
->pdev
->dev
,
1593 "unexpected delivery or target failure, status = 0x%02x\n",
1594 c2
->error_data
.status
);
1597 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
1599 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
1601 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
1602 dev_warn(&h
->pdev
->dev
, "task management function rejected.\n");
1605 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
1606 dev_warn(&h
->pdev
->dev
, "task management function invalid LUN\n");
1609 dev_warn(&h
->pdev
->dev
,
1610 "%s: Unrecognized server response: 0x%02x\n",
1611 "HP SSD Smart Path",
1612 c2
->error_data
.serv_response
);
1617 return retry
; /* retry on raid path? */
1620 static void process_ioaccel2_completion(struct ctlr_info
*h
,
1621 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
1622 struct hpsa_scsi_dev_t
*dev
)
1624 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1627 /* check for good status */
1628 if (likely(c2
->error_data
.serv_response
== 0 &&
1629 c2
->error_data
.status
== 0)) {
1631 cmd
->scsi_done(cmd
);
1635 /* Any RAID offload error results in retry which will use
1636 * the normal I/O path so the controller can handle whatever's
1639 if (is_logical_dev_addr_mode(dev
->scsi3addr
) &&
1640 c2
->error_data
.serv_response
==
1641 IOACCEL2_SERV_RESPONSE_FAILURE
) {
1642 if (c2
->error_data
.status
==
1643 IOACCEL2_STATUS_SR_IOACCEL_DISABLED
)
1644 dev_warn(&h
->pdev
->dev
,
1645 "%s: Path is unavailable, retrying on standard path.\n",
1646 "HP SSD Smart Path");
1648 dev_warn(&h
->pdev
->dev
,
1649 "%s: Error 0x%02x, retrying on standard path.\n",
1650 "HP SSD Smart Path", c2
->error_data
.status
);
1652 dev
->offload_enabled
= 0;
1653 h
->drv_req_rescan
= 1; /* schedule controller for a rescan */
1654 cmd
->result
= DID_SOFT_ERROR
<< 16;
1656 cmd
->scsi_done(cmd
);
1659 raid_retry
= handle_ioaccel_mode2_error(h
, c
, cmd
, c2
);
1660 /* If error found, disable Smart Path, schedule a rescan,
1661 * and force a retry on the standard path.
1664 dev_warn(&h
->pdev
->dev
, "%s: Retrying on standard path.\n",
1665 "HP SSD Smart Path");
1666 dev
->offload_enabled
= 0; /* Disable Smart Path */
1667 h
->drv_req_rescan
= 1; /* schedule controller rescan */
1668 cmd
->result
= DID_SOFT_ERROR
<< 16;
1671 cmd
->scsi_done(cmd
);
1674 static void complete_scsi_command(struct CommandList
*cp
)
1676 struct scsi_cmnd
*cmd
;
1677 struct ctlr_info
*h
;
1678 struct ErrorInfo
*ei
;
1679 struct hpsa_scsi_dev_t
*dev
;
1681 unsigned char sense_key
;
1682 unsigned char asc
; /* additional sense code */
1683 unsigned char ascq
; /* additional sense code qualifier */
1684 unsigned long sense_data_size
;
1687 cmd
= (struct scsi_cmnd
*) cp
->scsi_cmd
;
1689 dev
= cmd
->device
->hostdata
;
1691 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
1692 if ((cp
->cmd_type
== CMD_SCSI
) &&
1693 (cp
->Header
.SGTotal
> h
->max_cmd_sg_entries
))
1694 hpsa_unmap_sg_chain_block(h
, cp
);
1696 cmd
->result
= (DID_OK
<< 16); /* host byte */
1697 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
1699 if (cp
->cmd_type
== CMD_IOACCEL2
)
1700 return process_ioaccel2_completion(h
, cp
, cmd
, dev
);
1702 cmd
->result
|= ei
->ScsiStatus
;
1704 /* copy the sense data whether we need to or not. */
1705 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
1706 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
1708 sense_data_size
= sizeof(ei
->SenseInfo
);
1709 if (ei
->SenseLen
< sense_data_size
)
1710 sense_data_size
= ei
->SenseLen
;
1712 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
1713 scsi_set_resid(cmd
, ei
->ResidualCnt
);
1715 if (ei
->CommandStatus
== 0) {
1717 cmd
->scsi_done(cmd
);
1721 /* For I/O accelerator commands, copy over some fields to the normal
1722 * CISS header used below for error handling.
1724 if (cp
->cmd_type
== CMD_IOACCEL1
) {
1725 struct io_accel1_cmd
*c
= &h
->ioaccel_cmd_pool
[cp
->cmdindex
];
1726 cp
->Header
.SGList
= cp
->Header
.SGTotal
= scsi_sg_count(cmd
);
1727 cp
->Request
.CDBLen
= c
->io_flags
& IOACCEL1_IOFLAGS_CDBLEN_MASK
;
1728 cp
->Header
.Tag
.lower
= c
->Tag
.lower
;
1729 cp
->Header
.Tag
.upper
= c
->Tag
.upper
;
1730 memcpy(cp
->Header
.LUN
.LunAddrBytes
, c
->CISS_LUN
, 8);
1731 memcpy(cp
->Request
.CDB
, c
->CDB
, cp
->Request
.CDBLen
);
1733 /* Any RAID offload error results in retry which will use
1734 * the normal I/O path so the controller can handle whatever's
1737 if (is_logical_dev_addr_mode(dev
->scsi3addr
)) {
1738 if (ei
->CommandStatus
== CMD_IOACCEL_DISABLED
)
1739 dev
->offload_enabled
= 0;
1740 cmd
->result
= DID_SOFT_ERROR
<< 16;
1742 cmd
->scsi_done(cmd
);
1747 /* an error has occurred */
1748 switch (ei
->CommandStatus
) {
1750 case CMD_TARGET_STATUS
:
1751 if (ei
->ScsiStatus
) {
1753 sense_key
= 0xf & ei
->SenseInfo
[2];
1754 /* Get additional sense code */
1755 asc
= ei
->SenseInfo
[12];
1756 /* Get addition sense code qualifier */
1757 ascq
= ei
->SenseInfo
[13];
1760 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
1761 if (check_for_unit_attention(h
, cp
))
1763 if (sense_key
== ILLEGAL_REQUEST
) {
1765 * SCSI REPORT_LUNS is commonly unsupported on
1766 * Smart Array. Suppress noisy complaint.
1768 if (cp
->Request
.CDB
[0] == REPORT_LUNS
)
1771 /* If ASC/ASCQ indicate Logical Unit
1772 * Not Supported condition,
1774 if ((asc
== 0x25) && (ascq
== 0x0)) {
1775 dev_warn(&h
->pdev
->dev
, "cp %p "
1776 "has check condition\n", cp
);
1781 if (sense_key
== NOT_READY
) {
1782 /* If Sense is Not Ready, Logical Unit
1783 * Not ready, Manual Intervention
1786 if ((asc
== 0x04) && (ascq
== 0x03)) {
1787 dev_warn(&h
->pdev
->dev
, "cp %p "
1788 "has check condition: unit "
1789 "not ready, manual "
1790 "intervention required\n", cp
);
1794 if (sense_key
== ABORTED_COMMAND
) {
1795 /* Aborted command is retryable */
1796 dev_warn(&h
->pdev
->dev
, "cp %p "
1797 "has check condition: aborted command: "
1798 "ASC: 0x%x, ASCQ: 0x%x\n",
1800 cmd
->result
|= DID_SOFT_ERROR
<< 16;
1803 /* Must be some other type of check condition */
1804 dev_dbg(&h
->pdev
->dev
, "cp %p has check condition: "
1806 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1807 "Returning result: 0x%x, "
1808 "cmd=[%02x %02x %02x %02x %02x "
1809 "%02x %02x %02x %02x %02x %02x "
1810 "%02x %02x %02x %02x %02x]\n",
1811 cp
, sense_key
, asc
, ascq
,
1813 cmd
->cmnd
[0], cmd
->cmnd
[1],
1814 cmd
->cmnd
[2], cmd
->cmnd
[3],
1815 cmd
->cmnd
[4], cmd
->cmnd
[5],
1816 cmd
->cmnd
[6], cmd
->cmnd
[7],
1817 cmd
->cmnd
[8], cmd
->cmnd
[9],
1818 cmd
->cmnd
[10], cmd
->cmnd
[11],
1819 cmd
->cmnd
[12], cmd
->cmnd
[13],
1820 cmd
->cmnd
[14], cmd
->cmnd
[15]);
1825 /* Problem was not a check condition
1826 * Pass it up to the upper layers...
1828 if (ei
->ScsiStatus
) {
1829 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
1830 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1831 "Returning result: 0x%x\n",
1833 sense_key
, asc
, ascq
,
1835 } else { /* scsi status is zero??? How??? */
1836 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
1837 "Returning no connection.\n", cp
),
1839 /* Ordinarily, this case should never happen,
1840 * but there is a bug in some released firmware
1841 * revisions that allows it to happen if, for
1842 * example, a 4100 backplane loses power and
1843 * the tape drive is in it. We assume that
1844 * it's a fatal error of some kind because we
1845 * can't show that it wasn't. We will make it
1846 * look like selection timeout since that is
1847 * the most common reason for this to occur,
1848 * and it's severe enough.
1851 cmd
->result
= DID_NO_CONNECT
<< 16;
1855 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
1857 case CMD_DATA_OVERRUN
:
1858 dev_warn(&h
->pdev
->dev
, "cp %p has"
1859 " completed with data overrun "
1863 /* print_bytes(cp, sizeof(*cp), 1, 0);
1865 /* We get CMD_INVALID if you address a non-existent device
1866 * instead of a selection timeout (no response). You will
1867 * see this if you yank out a drive, then try to access it.
1868 * This is kind of a shame because it means that any other
1869 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1870 * missing target. */
1871 cmd
->result
= DID_NO_CONNECT
<< 16;
1874 case CMD_PROTOCOL_ERR
:
1875 cmd
->result
= DID_ERROR
<< 16;
1876 dev_warn(&h
->pdev
->dev
, "cp %p has "
1877 "protocol error\n", cp
);
1879 case CMD_HARDWARE_ERR
:
1880 cmd
->result
= DID_ERROR
<< 16;
1881 dev_warn(&h
->pdev
->dev
, "cp %p had hardware error\n", cp
);
1883 case CMD_CONNECTION_LOST
:
1884 cmd
->result
= DID_ERROR
<< 16;
1885 dev_warn(&h
->pdev
->dev
, "cp %p had connection lost\n", cp
);
1888 cmd
->result
= DID_ABORT
<< 16;
1889 dev_warn(&h
->pdev
->dev
, "cp %p was aborted with status 0x%x\n",
1890 cp
, ei
->ScsiStatus
);
1892 case CMD_ABORT_FAILED
:
1893 cmd
->result
= DID_ERROR
<< 16;
1894 dev_warn(&h
->pdev
->dev
, "cp %p reports abort failed\n", cp
);
1896 case CMD_UNSOLICITED_ABORT
:
1897 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
1898 dev_warn(&h
->pdev
->dev
, "cp %p aborted due to an unsolicited "
1902 cmd
->result
= DID_TIME_OUT
<< 16;
1903 dev_warn(&h
->pdev
->dev
, "cp %p timedout\n", cp
);
1905 case CMD_UNABORTABLE
:
1906 cmd
->result
= DID_ERROR
<< 16;
1907 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
1909 case CMD_IOACCEL_DISABLED
:
1910 /* This only handles the direct pass-through case since RAID
1911 * offload is handled above. Just attempt a retry.
1913 cmd
->result
= DID_SOFT_ERROR
<< 16;
1914 dev_warn(&h
->pdev
->dev
,
1915 "cp %p had HP SSD Smart Path error\n", cp
);
1918 cmd
->result
= DID_ERROR
<< 16;
1919 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
1920 cp
, ei
->CommandStatus
);
1923 cmd
->scsi_done(cmd
);
1926 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
1927 struct CommandList
*c
, int sg_used
, int data_direction
)
1930 union u64bit addr64
;
1932 for (i
= 0; i
< sg_used
; i
++) {
1933 addr64
.val32
.lower
= c
->SG
[i
].Addr
.lower
;
1934 addr64
.val32
.upper
= c
->SG
[i
].Addr
.upper
;
1935 pci_unmap_single(pdev
, (dma_addr_t
) addr64
.val
, c
->SG
[i
].Len
,
1940 static int hpsa_map_one(struct pci_dev
*pdev
,
1941 struct CommandList
*cp
,
1948 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
1949 cp
->Header
.SGList
= 0;
1950 cp
->Header
.SGTotal
= 0;
1954 addr64
= (u64
) pci_map_single(pdev
, buf
, buflen
, data_direction
);
1955 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
1956 /* Prevent subsequent unmap of something never mapped */
1957 cp
->Header
.SGList
= 0;
1958 cp
->Header
.SGTotal
= 0;
1961 cp
->SG
[0].Addr
.lower
=
1962 (u32
) (addr64
& (u64
) 0x00000000FFFFFFFF);
1963 cp
->SG
[0].Addr
.upper
=
1964 (u32
) ((addr64
>> 32) & (u64
) 0x00000000FFFFFFFF);
1965 cp
->SG
[0].Len
= buflen
;
1966 cp
->SG
[0].Ext
= HPSA_SG_LAST
; /* we are not chaining */
1967 cp
->Header
.SGList
= (u8
) 1; /* no. SGs contig in this cmd */
1968 cp
->Header
.SGTotal
= (u16
) 1; /* total sgs in this cmd list */
1972 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
1973 struct CommandList
*c
)
1975 DECLARE_COMPLETION_ONSTACK(wait
);
1978 enqueue_cmd_and_start_io(h
, c
);
1979 wait_for_completion(&wait
);
1982 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info
*h
,
1983 struct CommandList
*c
)
1985 unsigned long flags
;
1987 /* If controller lockup detected, fake a hardware error. */
1988 spin_lock_irqsave(&h
->lock
, flags
);
1989 if (unlikely(h
->lockup_detected
)) {
1990 spin_unlock_irqrestore(&h
->lock
, flags
);
1991 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
1993 spin_unlock_irqrestore(&h
->lock
, flags
);
1994 hpsa_scsi_do_simple_cmd_core(h
, c
);
1998 #define MAX_DRIVER_CMD_RETRIES 25
1999 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
2000 struct CommandList
*c
, int data_direction
)
2002 int backoff_time
= 10, retry_count
= 0;
2005 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2006 hpsa_scsi_do_simple_cmd_core(h
, c
);
2008 if (retry_count
> 3) {
2009 msleep(backoff_time
);
2010 if (backoff_time
< 1000)
2013 } while ((check_for_unit_attention(h
, c
) ||
2014 check_for_busy(h
, c
)) &&
2015 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
2016 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
2019 static void hpsa_print_cmd(struct ctlr_info
*h
, char *txt
,
2020 struct CommandList
*c
)
2022 const u8
*cdb
= c
->Request
.CDB
;
2023 const u8
*lun
= c
->Header
.LUN
.LunAddrBytes
;
2025 dev_warn(&h
->pdev
->dev
, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2026 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2027 txt
, lun
[0], lun
[1], lun
[2], lun
[3],
2028 lun
[4], lun
[5], lun
[6], lun
[7],
2029 cdb
[0], cdb
[1], cdb
[2], cdb
[3],
2030 cdb
[4], cdb
[5], cdb
[6], cdb
[7],
2031 cdb
[8], cdb
[9], cdb
[10], cdb
[11],
2032 cdb
[12], cdb
[13], cdb
[14], cdb
[15]);
2035 static void hpsa_scsi_interpret_error(struct ctlr_info
*h
,
2036 struct CommandList
*cp
)
2038 const struct ErrorInfo
*ei
= cp
->err_info
;
2039 struct device
*d
= &cp
->h
->pdev
->dev
;
2040 const u8
*sd
= ei
->SenseInfo
;
2042 switch (ei
->CommandStatus
) {
2043 case CMD_TARGET_STATUS
:
2044 hpsa_print_cmd(h
, "SCSI status", cp
);
2045 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
)
2046 dev_warn(d
, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2047 sd
[2] & 0x0f, sd
[12], sd
[13]);
2049 dev_warn(d
, "SCSI Status = %02x\n", ei
->ScsiStatus
);
2050 if (ei
->ScsiStatus
== 0)
2051 dev_warn(d
, "SCSI status is abnormally zero. "
2052 "(probably indicates selection timeout "
2053 "reported incorrectly due to a known "
2054 "firmware bug, circa July, 2001.)\n");
2056 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2058 case CMD_DATA_OVERRUN
:
2059 hpsa_print_cmd(h
, "overrun condition", cp
);
2062 /* controller unfortunately reports SCSI passthru's
2063 * to non-existent targets as invalid commands.
2065 hpsa_print_cmd(h
, "invalid command", cp
);
2066 dev_warn(d
, "probably means device no longer present\n");
2069 case CMD_PROTOCOL_ERR
:
2070 hpsa_print_cmd(h
, "protocol error", cp
);
2072 case CMD_HARDWARE_ERR
:
2073 hpsa_print_cmd(h
, "hardware error", cp
);
2075 case CMD_CONNECTION_LOST
:
2076 hpsa_print_cmd(h
, "connection lost", cp
);
2079 hpsa_print_cmd(h
, "aborted", cp
);
2081 case CMD_ABORT_FAILED
:
2082 hpsa_print_cmd(h
, "abort failed", cp
);
2084 case CMD_UNSOLICITED_ABORT
:
2085 hpsa_print_cmd(h
, "unsolicited abort", cp
);
2088 hpsa_print_cmd(h
, "timed out", cp
);
2090 case CMD_UNABORTABLE
:
2091 hpsa_print_cmd(h
, "unabortable", cp
);
2094 hpsa_print_cmd(h
, "unknown status", cp
);
2095 dev_warn(d
, "Unknown command status %x\n",
2100 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2101 u16 page
, unsigned char *buf
,
2102 unsigned char bufsize
)
2105 struct CommandList
*c
;
2106 struct ErrorInfo
*ei
;
2108 c
= cmd_special_alloc(h
);
2110 if (c
== NULL
) { /* trouble... */
2111 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
2115 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
2116 page
, scsi3addr
, TYPE_CMD
)) {
2120 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2122 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2123 hpsa_scsi_interpret_error(h
, c
);
2127 cmd_special_free(h
, c
);
2131 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info
*h
,
2132 unsigned char *scsi3addr
, unsigned char page
,
2133 struct bmic_controller_parameters
*buf
, size_t bufsize
)
2136 struct CommandList
*c
;
2137 struct ErrorInfo
*ei
;
2139 c
= cmd_special_alloc(h
);
2141 if (c
== NULL
) { /* trouble... */
2142 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
2146 if (fill_cmd(c
, BMIC_SENSE_CONTROLLER_PARAMETERS
, h
, buf
, bufsize
,
2147 page
, scsi3addr
, TYPE_CMD
)) {
2151 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2153 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2154 hpsa_scsi_interpret_error(h
, c
);
2158 cmd_special_free(h
, c
);
2162 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2166 struct CommandList
*c
;
2167 struct ErrorInfo
*ei
;
2169 c
= cmd_special_alloc(h
);
2171 if (c
== NULL
) { /* trouble... */
2172 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
2176 /* fill_cmd can't fail here, no data buffer to map. */
2177 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
2178 scsi3addr
, TYPE_MSG
);
2179 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to LUN reset */
2180 hpsa_scsi_do_simple_cmd_core(h
, c
);
2181 /* no unmap needed here because no data xfer. */
2184 if (ei
->CommandStatus
!= 0) {
2185 hpsa_scsi_interpret_error(h
, c
);
2188 cmd_special_free(h
, c
);
2192 static void hpsa_get_raid_level(struct ctlr_info
*h
,
2193 unsigned char *scsi3addr
, unsigned char *raid_level
)
2198 *raid_level
= RAID_UNKNOWN
;
2199 buf
= kzalloc(64, GFP_KERNEL
);
2202 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0xC1, buf
, 64);
2204 *raid_level
= buf
[8];
2205 if (*raid_level
> RAID_UNKNOWN
)
2206 *raid_level
= RAID_UNKNOWN
;
2211 #define HPSA_MAP_DEBUG
2212 #ifdef HPSA_MAP_DEBUG
2213 static void hpsa_debug_map_buff(struct ctlr_info
*h
, int rc
,
2214 struct raid_map_data
*map_buff
)
2216 struct raid_map_disk_data
*dd
= &map_buff
->data
[0];
2218 u16 map_cnt
, row_cnt
, disks_per_row
;
2223 /* Show details only if debugging has been activated. */
2224 if (h
->raid_offload_debug
< 2)
2227 dev_info(&h
->pdev
->dev
, "structure_size = %u\n",
2228 le32_to_cpu(map_buff
->structure_size
));
2229 dev_info(&h
->pdev
->dev
, "volume_blk_size = %u\n",
2230 le32_to_cpu(map_buff
->volume_blk_size
));
2231 dev_info(&h
->pdev
->dev
, "volume_blk_cnt = 0x%llx\n",
2232 le64_to_cpu(map_buff
->volume_blk_cnt
));
2233 dev_info(&h
->pdev
->dev
, "physicalBlockShift = %u\n",
2234 map_buff
->phys_blk_shift
);
2235 dev_info(&h
->pdev
->dev
, "parity_rotation_shift = %u\n",
2236 map_buff
->parity_rotation_shift
);
2237 dev_info(&h
->pdev
->dev
, "strip_size = %u\n",
2238 le16_to_cpu(map_buff
->strip_size
));
2239 dev_info(&h
->pdev
->dev
, "disk_starting_blk = 0x%llx\n",
2240 le64_to_cpu(map_buff
->disk_starting_blk
));
2241 dev_info(&h
->pdev
->dev
, "disk_blk_cnt = 0x%llx\n",
2242 le64_to_cpu(map_buff
->disk_blk_cnt
));
2243 dev_info(&h
->pdev
->dev
, "data_disks_per_row = %u\n",
2244 le16_to_cpu(map_buff
->data_disks_per_row
));
2245 dev_info(&h
->pdev
->dev
, "metadata_disks_per_row = %u\n",
2246 le16_to_cpu(map_buff
->metadata_disks_per_row
));
2247 dev_info(&h
->pdev
->dev
, "row_cnt = %u\n",
2248 le16_to_cpu(map_buff
->row_cnt
));
2249 dev_info(&h
->pdev
->dev
, "layout_map_count = %u\n",
2250 le16_to_cpu(map_buff
->layout_map_count
));
2251 dev_info(&h
->pdev
->dev
, "flags = %u\n",
2252 le16_to_cpu(map_buff
->flags
));
2253 if (map_buff
->flags
& RAID_MAP_FLAG_ENCRYPT_ON
)
2254 dev_info(&h
->pdev
->dev
, "encrypytion = ON\n");
2256 dev_info(&h
->pdev
->dev
, "encrypytion = OFF\n");
2257 dev_info(&h
->pdev
->dev
, "dekindex = %u\n",
2258 le16_to_cpu(map_buff
->dekindex
));
2260 map_cnt
= le16_to_cpu(map_buff
->layout_map_count
);
2261 for (map
= 0; map
< map_cnt
; map
++) {
2262 dev_info(&h
->pdev
->dev
, "Map%u:\n", map
);
2263 row_cnt
= le16_to_cpu(map_buff
->row_cnt
);
2264 for (row
= 0; row
< row_cnt
; row
++) {
2265 dev_info(&h
->pdev
->dev
, " Row%u:\n", row
);
2267 le16_to_cpu(map_buff
->data_disks_per_row
);
2268 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
2269 dev_info(&h
->pdev
->dev
,
2270 " D%02u: h=0x%04x xor=%u,%u\n",
2271 col
, dd
->ioaccel_handle
,
2272 dd
->xor_mult
[0], dd
->xor_mult
[1]);
2274 le16_to_cpu(map_buff
->metadata_disks_per_row
);
2275 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
2276 dev_info(&h
->pdev
->dev
,
2277 " M%02u: h=0x%04x xor=%u,%u\n",
2278 col
, dd
->ioaccel_handle
,
2279 dd
->xor_mult
[0], dd
->xor_mult
[1]);
2284 static void hpsa_debug_map_buff(__attribute__((unused
)) struct ctlr_info
*h
,
2285 __attribute__((unused
)) int rc
,
2286 __attribute__((unused
)) struct raid_map_data
*map_buff
)
2291 static int hpsa_get_raid_map(struct ctlr_info
*h
,
2292 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
2295 struct CommandList
*c
;
2296 struct ErrorInfo
*ei
;
2298 c
= cmd_special_alloc(h
);
2300 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
2303 if (fill_cmd(c
, HPSA_GET_RAID_MAP
, h
, &this_device
->raid_map
,
2304 sizeof(this_device
->raid_map
), 0,
2305 scsi3addr
, TYPE_CMD
)) {
2306 dev_warn(&h
->pdev
->dev
, "Out of memory in hpsa_get_raid_map()\n");
2307 cmd_special_free(h
, c
);
2310 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2312 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2313 hpsa_scsi_interpret_error(h
, c
);
2314 cmd_special_free(h
, c
);
2317 cmd_special_free(h
, c
);
2319 /* @todo in the future, dynamically allocate RAID map memory */
2320 if (le32_to_cpu(this_device
->raid_map
.structure_size
) >
2321 sizeof(this_device
->raid_map
)) {
2322 dev_warn(&h
->pdev
->dev
, "RAID map size is too large!\n");
2325 hpsa_debug_map_buff(h
, rc
, &this_device
->raid_map
);
2329 static int hpsa_vpd_page_supported(struct ctlr_info
*h
,
2330 unsigned char scsi3addr
[], u8 page
)
2335 unsigned char *buf
, bufsize
;
2337 buf
= kzalloc(256, GFP_KERNEL
);
2341 /* Get the size of the page list first */
2342 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
2343 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
2344 buf
, HPSA_VPD_HEADER_SZ
);
2346 goto exit_unsupported
;
2348 if ((pages
+ HPSA_VPD_HEADER_SZ
) <= 255)
2349 bufsize
= pages
+ HPSA_VPD_HEADER_SZ
;
2353 /* Get the whole VPD page list */
2354 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
2355 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
2358 goto exit_unsupported
;
2361 for (i
= 1; i
<= pages
; i
++)
2362 if (buf
[3 + i
] == page
)
2363 goto exit_supported
;
2372 static void hpsa_get_ioaccel_status(struct ctlr_info
*h
,
2373 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
2379 this_device
->offload_config
= 0;
2380 this_device
->offload_enabled
= 0;
2382 buf
= kzalloc(64, GFP_KERNEL
);
2385 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_IOACCEL_STATUS
))
2387 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
2388 VPD_PAGE
| HPSA_VPD_LV_IOACCEL_STATUS
, buf
, 64);
2392 #define IOACCEL_STATUS_BYTE 4
2393 #define OFFLOAD_CONFIGURED_BIT 0x01
2394 #define OFFLOAD_ENABLED_BIT 0x02
2395 ioaccel_status
= buf
[IOACCEL_STATUS_BYTE
];
2396 this_device
->offload_config
=
2397 !!(ioaccel_status
& OFFLOAD_CONFIGURED_BIT
);
2398 if (this_device
->offload_config
) {
2399 this_device
->offload_enabled
=
2400 !!(ioaccel_status
& OFFLOAD_ENABLED_BIT
);
2401 if (hpsa_get_raid_map(h
, scsi3addr
, this_device
))
2402 this_device
->offload_enabled
= 0;
2409 /* Get the device id from inquiry page 0x83 */
2410 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2411 unsigned char *device_id
, int buflen
)
2418 buf
= kzalloc(64, GFP_KERNEL
);
2421 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0x83, buf
, 64);
2423 memcpy(device_id
, &buf
[8], buflen
);
2428 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
2429 struct ReportLUNdata
*buf
, int bufsize
,
2430 int extended_response
)
2433 struct CommandList
*c
;
2434 unsigned char scsi3addr
[8];
2435 struct ErrorInfo
*ei
;
2437 c
= cmd_special_alloc(h
);
2438 if (c
== NULL
) { /* trouble... */
2439 dev_err(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
2442 /* address the controller */
2443 memset(scsi3addr
, 0, sizeof(scsi3addr
));
2444 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
2445 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
2449 if (extended_response
)
2450 c
->Request
.CDB
[1] = extended_response
;
2451 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
);
2453 if (ei
->CommandStatus
!= 0 &&
2454 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2455 hpsa_scsi_interpret_error(h
, c
);
2458 if (buf
->extended_response_flag
!= extended_response
) {
2459 dev_err(&h
->pdev
->dev
,
2460 "report luns requested format %u, got %u\n",
2462 buf
->extended_response_flag
);
2467 cmd_special_free(h
, c
);
2471 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
2472 struct ReportLUNdata
*buf
,
2473 int bufsize
, int extended_response
)
2475 return hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
, extended_response
);
2478 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
2479 struct ReportLUNdata
*buf
, int bufsize
)
2481 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
2484 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
2485 int bus
, int target
, int lun
)
2488 device
->target
= target
;
2492 /* Use VPD inquiry to get details of volume status */
2493 static int hpsa_get_volume_status(struct ctlr_info
*h
,
2494 unsigned char scsi3addr
[])
2501 buf
= kzalloc(64, GFP_KERNEL
);
2503 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
2505 /* Does controller have VPD for logical volume status? */
2506 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_STATUS
)) {
2507 dev_warn(&h
->pdev
->dev
, "Logical volume status VPD page is unsupported.\n");
2511 /* Get the size of the VPD return buffer */
2512 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
2513 buf
, HPSA_VPD_HEADER_SZ
);
2515 dev_warn(&h
->pdev
->dev
, "Logical volume status VPD inquiry failed.\n");
2520 /* Now get the whole VPD buffer */
2521 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
2522 buf
, size
+ HPSA_VPD_HEADER_SZ
);
2524 dev_warn(&h
->pdev
->dev
, "Logical volume status VPD inquiry failed.\n");
2527 status
= buf
[4]; /* status byte */
2533 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
2536 /* Determine offline status of a volume.
2539 * -1 (offline for unknown reasons)
2540 * # (integer code indicating one of several NOT READY states
2541 * describing why a volume is to be kept offline)
2543 static unsigned char hpsa_volume_offline(struct ctlr_info
*h
,
2544 unsigned char scsi3addr
[])
2546 struct CommandList
*c
;
2547 unsigned char *sense
, sense_key
, asc
, ascq
;
2551 #define ASC_LUN_NOT_READY 0x04
2552 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2553 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2558 (void) fill_cmd(c
, TEST_UNIT_READY
, h
, NULL
, 0, 0, scsi3addr
, TYPE_CMD
);
2559 hpsa_scsi_do_simple_cmd_core(h
, c
);
2560 sense
= c
->err_info
->SenseInfo
;
2561 sense_key
= sense
[2];
2564 cmd_status
= c
->err_info
->CommandStatus
;
2565 scsi_status
= c
->err_info
->ScsiStatus
;
2567 /* Is the volume 'not ready'? */
2568 if (cmd_status
!= CMD_TARGET_STATUS
||
2569 scsi_status
!= SAM_STAT_CHECK_CONDITION
||
2570 sense_key
!= NOT_READY
||
2571 asc
!= ASC_LUN_NOT_READY
) {
2575 /* Determine the reason for not ready state */
2576 ldstat
= hpsa_get_volume_status(h
, scsi3addr
);
2578 /* Keep volume offline in certain cases: */
2580 case HPSA_LV_UNDERGOING_ERASE
:
2581 case HPSA_LV_UNDERGOING_RPI
:
2582 case HPSA_LV_PENDING_RPI
:
2583 case HPSA_LV_ENCRYPTED_NO_KEY
:
2584 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
2585 case HPSA_LV_UNDERGOING_ENCRYPTION
:
2586 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
2587 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
2589 case HPSA_VPD_LV_STATUS_UNSUPPORTED
:
2590 /* If VPD status page isn't available,
2591 * use ASC/ASCQ to determine state
2593 if ((ascq
== ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS
) ||
2594 (ascq
== ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ
))
2603 static int hpsa_update_device_info(struct ctlr_info
*h
,
2604 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
2605 unsigned char *is_OBDR_device
)
2608 #define OBDR_SIG_OFFSET 43
2609 #define OBDR_TAPE_SIG "$DR-10"
2610 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2611 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2613 unsigned char *inq_buff
;
2614 unsigned char *obdr_sig
;
2616 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
2620 /* Do an inquiry to the device to see what it is. */
2621 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
2622 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
2623 /* Inquiry failed (msg printed already) */
2624 dev_err(&h
->pdev
->dev
,
2625 "hpsa_update_device_info: inquiry failed\n");
2629 this_device
->devtype
= (inq_buff
[0] & 0x1f);
2630 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
2631 memcpy(this_device
->vendor
, &inq_buff
[8],
2632 sizeof(this_device
->vendor
));
2633 memcpy(this_device
->model
, &inq_buff
[16],
2634 sizeof(this_device
->model
));
2635 memset(this_device
->device_id
, 0,
2636 sizeof(this_device
->device_id
));
2637 hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
,
2638 sizeof(this_device
->device_id
));
2640 if (this_device
->devtype
== TYPE_DISK
&&
2641 is_logical_dev_addr_mode(scsi3addr
)) {
2642 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
2643 if (h
->fw_support
& MISC_FW_RAID_OFFLOAD_BASIC
)
2644 hpsa_get_ioaccel_status(h
, scsi3addr
, this_device
);
2645 this_device
->volume_offline
=
2646 hpsa_volume_offline(h
, scsi3addr
);
2648 this_device
->raid_level
= RAID_UNKNOWN
;
2649 this_device
->offload_config
= 0;
2650 this_device
->offload_enabled
= 0;
2651 this_device
->volume_offline
= 0;
2654 if (is_OBDR_device
) {
2655 /* See if this is a One-Button-Disaster-Recovery device
2656 * by looking for "$DR-10" at offset 43 in inquiry data.
2658 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
2659 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
2660 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
2661 OBDR_SIG_LEN
) == 0);
2672 static unsigned char *ext_target_model
[] = {
2682 static int is_ext_target(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
2686 for (i
= 0; ext_target_model
[i
]; i
++)
2687 if (strncmp(device
->model
, ext_target_model
[i
],
2688 strlen(ext_target_model
[i
])) == 0)
2693 /* Helper function to assign bus, target, lun mapping of devices.
2694 * Puts non-external target logical volumes on bus 0, external target logical
2695 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2696 * Logical drive target and lun are assigned at this time, but
2697 * physical device lun and target assignment are deferred (assigned
2698 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2700 static void figure_bus_target_lun(struct ctlr_info
*h
,
2701 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
2703 u32 lunid
= le32_to_cpu(*((__le32
*) lunaddrbytes
));
2705 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
2706 /* physical device, target and lun filled in later */
2707 if (is_hba_lunid(lunaddrbytes
))
2708 hpsa_set_bus_target_lun(device
, 3, 0, lunid
& 0x3fff);
2710 /* defer target, lun assignment for physical devices */
2711 hpsa_set_bus_target_lun(device
, 2, -1, -1);
2714 /* It's a logical device */
2715 if (is_ext_target(h
, device
)) {
2716 /* external target way, put logicals on bus 1
2717 * and match target/lun numbers box
2718 * reports, other smart array, bus 0, target 0, match lunid
2720 hpsa_set_bus_target_lun(device
,
2721 1, (lunid
>> 16) & 0x3fff, lunid
& 0x00ff);
2724 hpsa_set_bus_target_lun(device
, 0, 0, lunid
& 0x3fff);
2728 * If there is no lun 0 on a target, linux won't find any devices.
2729 * For the external targets (arrays), we have to manually detect the enclosure
2730 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2731 * it for some reason. *tmpdevice is the target we're adding,
2732 * this_device is a pointer into the current element of currentsd[]
2733 * that we're building up in update_scsi_devices(), below.
2734 * lunzerobits is a bitmap that tracks which targets already have a
2736 * Returns 1 if an enclosure was added, 0 if not.
2738 static int add_ext_target_dev(struct ctlr_info
*h
,
2739 struct hpsa_scsi_dev_t
*tmpdevice
,
2740 struct hpsa_scsi_dev_t
*this_device
, u8
*lunaddrbytes
,
2741 unsigned long lunzerobits
[], int *n_ext_target_devs
)
2743 unsigned char scsi3addr
[8];
2745 if (test_bit(tmpdevice
->target
, lunzerobits
))
2746 return 0; /* There is already a lun 0 on this target. */
2748 if (!is_logical_dev_addr_mode(lunaddrbytes
))
2749 return 0; /* It's the logical targets that may lack lun 0. */
2751 if (!is_ext_target(h
, tmpdevice
))
2752 return 0; /* Only external target devices have this problem. */
2754 if (tmpdevice
->lun
== 0) /* if lun is 0, then we have a lun 0. */
2757 memset(scsi3addr
, 0, 8);
2758 scsi3addr
[3] = tmpdevice
->target
;
2759 if (is_hba_lunid(scsi3addr
))
2760 return 0; /* Don't add the RAID controller here. */
2762 if (is_scsi_rev_5(h
))
2763 return 0; /* p1210m doesn't need to do this. */
2765 if (*n_ext_target_devs
>= MAX_EXT_TARGETS
) {
2766 dev_warn(&h
->pdev
->dev
, "Maximum number of external "
2767 "target devices exceeded. Check your hardware "
2772 if (hpsa_update_device_info(h
, scsi3addr
, this_device
, NULL
))
2774 (*n_ext_target_devs
)++;
2775 hpsa_set_bus_target_lun(this_device
,
2776 tmpdevice
->bus
, tmpdevice
->target
, 0);
2777 set_bit(tmpdevice
->target
, lunzerobits
);
2782 * Get address of physical disk used for an ioaccel2 mode command:
2783 * 1. Extract ioaccel2 handle from the command.
2784 * 2. Find a matching ioaccel2 handle from list of physical disks.
2786 * 1 and set scsi3addr to address of matching physical
2787 * 0 if no matching physical disk was found.
2789 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info
*h
,
2790 struct CommandList
*ioaccel2_cmd_to_abort
, unsigned char *scsi3addr
)
2792 struct ReportExtendedLUNdata
*physicals
= NULL
;
2793 int responsesize
= 24; /* size of physical extended response */
2794 int extended
= 2; /* flag forces reporting 'other dev info'. */
2795 int reportsize
= sizeof(*physicals
) + HPSA_MAX_PHYS_LUN
* responsesize
;
2796 u32 nphysicals
= 0; /* number of reported physical devs */
2797 int found
= 0; /* found match (1) or not (0) */
2798 u32 find
; /* handle we need to match */
2800 struct scsi_cmnd
*scmd
; /* scsi command within request being aborted */
2801 struct hpsa_scsi_dev_t
*d
; /* device of request being aborted */
2802 struct io_accel2_cmd
*c2a
; /* ioaccel2 command to abort */
2803 u32 it_nexus
; /* 4 byte device handle for the ioaccel2 cmd */
2804 u32 scsi_nexus
; /* 4 byte device handle for the ioaccel2 cmd */
2806 if (ioaccel2_cmd_to_abort
->cmd_type
!= CMD_IOACCEL2
)
2807 return 0; /* no match */
2809 /* point to the ioaccel2 device handle */
2810 c2a
= &h
->ioaccel2_cmd_pool
[ioaccel2_cmd_to_abort
->cmdindex
];
2812 return 0; /* no match */
2814 scmd
= (struct scsi_cmnd
*) ioaccel2_cmd_to_abort
->scsi_cmd
;
2816 return 0; /* no match */
2818 d
= scmd
->device
->hostdata
;
2820 return 0; /* no match */
2822 it_nexus
= cpu_to_le32((u32
) d
->ioaccel_handle
);
2823 scsi_nexus
= cpu_to_le32((u32
) c2a
->scsi_nexus
);
2824 find
= c2a
->scsi_nexus
;
2826 if (h
->raid_offload_debug
> 0)
2827 dev_info(&h
->pdev
->dev
,
2828 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2829 __func__
, scsi_nexus
,
2830 d
->device_id
[0], d
->device_id
[1], d
->device_id
[2],
2831 d
->device_id
[3], d
->device_id
[4], d
->device_id
[5],
2832 d
->device_id
[6], d
->device_id
[7], d
->device_id
[8],
2833 d
->device_id
[9], d
->device_id
[10], d
->device_id
[11],
2834 d
->device_id
[12], d
->device_id
[13], d
->device_id
[14],
2837 /* Get the list of physical devices */
2838 physicals
= kzalloc(reportsize
, GFP_KERNEL
);
2839 if (hpsa_scsi_do_report_phys_luns(h
, (struct ReportLUNdata
*) physicals
,
2840 reportsize
, extended
)) {
2841 dev_err(&h
->pdev
->dev
,
2842 "Can't lookup %s device handle: report physical LUNs failed.\n",
2843 "HP SSD Smart Path");
2847 nphysicals
= be32_to_cpu(*((__be32
*)physicals
->LUNListLength
)) /
2851 /* find ioaccel2 handle in list of physicals: */
2852 for (i
= 0; i
< nphysicals
; i
++) {
2853 /* handle is in bytes 28-31 of each lun */
2854 if (memcmp(&((struct ReportExtendedLUNdata
*)
2855 physicals
)->LUN
[i
][20], &find
, 4) != 0) {
2856 continue; /* didn't match */
2859 memcpy(scsi3addr
, &((struct ReportExtendedLUNdata
*)
2860 physicals
)->LUN
[i
][0], 8);
2861 if (h
->raid_offload_debug
> 0)
2862 dev_info(&h
->pdev
->dev
,
2863 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2865 ((struct ReportExtendedLUNdata
*)
2866 physicals
)->LUN
[i
][20],
2867 scsi3addr
[0], scsi3addr
[1], scsi3addr
[2],
2868 scsi3addr
[3], scsi3addr
[4], scsi3addr
[5],
2869 scsi3addr
[6], scsi3addr
[7]);
2870 break; /* found it */
2881 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2882 * logdev. The number of luns in physdev and logdev are returned in
2883 * *nphysicals and *nlogicals, respectively.
2884 * Returns 0 on success, -1 otherwise.
2886 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
2888 struct ReportLUNdata
*physdev
, u32
*nphysicals
, int *physical_mode
,
2889 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
2891 int physical_entry_size
= 8;
2895 /* For I/O accelerator mode we need to read physical device handles */
2896 if (h
->transMethod
& CFGTBL_Trans_io_accel1
||
2897 h
->transMethod
& CFGTBL_Trans_io_accel2
) {
2898 *physical_mode
= HPSA_REPORT_PHYS_EXTENDED
;
2899 physical_entry_size
= 24;
2901 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, reportlunsize
,
2903 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
2906 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) /
2907 physical_entry_size
;
2908 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
2909 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded."
2910 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
2911 *nphysicals
- HPSA_MAX_PHYS_LUN
);
2912 *nphysicals
= HPSA_MAX_PHYS_LUN
;
2914 if (hpsa_scsi_do_report_log_luns(h
, logdev
, reportlunsize
)) {
2915 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
2918 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
2919 /* Reject Logicals in excess of our max capability. */
2920 if (*nlogicals
> HPSA_MAX_LUN
) {
2921 dev_warn(&h
->pdev
->dev
,
2922 "maximum logical LUNs (%d) exceeded. "
2923 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
2924 *nlogicals
- HPSA_MAX_LUN
);
2925 *nlogicals
= HPSA_MAX_LUN
;
2927 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
2928 dev_warn(&h
->pdev
->dev
,
2929 "maximum logical + physical LUNs (%d) exceeded. "
2930 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
2931 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
2932 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
2937 u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
, int i
,
2938 int nphysicals
, int nlogicals
,
2939 struct ReportExtendedLUNdata
*physdev_list
,
2940 struct ReportLUNdata
*logdev_list
)
2942 /* Helper function, figure out where the LUN ID info is coming from
2943 * given index i, lists of physical and logical devices, where in
2944 * the list the raid controller is supposed to appear (first or last)
2947 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
2948 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
2950 if (i
== raid_ctlr_position
)
2951 return RAID_CTLR_LUNID
;
2953 if (i
< logicals_start
)
2954 return &physdev_list
->LUN
[i
- (raid_ctlr_position
== 0)][0];
2956 if (i
< last_device
)
2957 return &logdev_list
->LUN
[i
- nphysicals
-
2958 (raid_ctlr_position
== 0)][0];
2963 static int hpsa_hba_mode_enabled(struct ctlr_info
*h
)
2966 struct bmic_controller_parameters
*ctlr_params
;
2967 ctlr_params
= kzalloc(sizeof(struct bmic_controller_parameters
),
2972 rc
= hpsa_bmic_ctrl_mode_sense(h
, RAID_CTLR_LUNID
, 0, ctlr_params
,
2973 sizeof(struct bmic_controller_parameters
));
2978 return ctlr_params
->nvram_flags
& (1 << 3) ? 1 : 0;
2981 static void hpsa_update_scsi_devices(struct ctlr_info
*h
, int hostno
)
2983 /* the idea here is we could get notified
2984 * that some devices have changed, so we do a report
2985 * physical luns and report logical luns cmd, and adjust
2986 * our list of devices accordingly.
2988 * The scsi3addr's of devices won't change so long as the
2989 * adapter is not reset. That means we can rescan and
2990 * tell which devices we already know about, vs. new
2991 * devices, vs. disappearing devices.
2993 struct ReportExtendedLUNdata
*physdev_list
= NULL
;
2994 struct ReportLUNdata
*logdev_list
= NULL
;
2997 int physical_mode
= 0;
2998 u32 ndev_allocated
= 0;
2999 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
3001 int reportlunsize
= sizeof(*physdev_list
) + HPSA_MAX_PHYS_LUN
* 24;
3002 int i
, n_ext_target_devs
, ndevs_to_allocate
;
3003 int raid_ctlr_position
;
3005 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
3007 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
3008 physdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
3009 logdev_list
= kzalloc(reportlunsize
, GFP_KERNEL
);
3010 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
3012 if (!currentsd
|| !physdev_list
|| !logdev_list
|| !tmpdevice
) {
3013 dev_err(&h
->pdev
->dev
, "out of memory\n");
3016 memset(lunzerobits
, 0, sizeof(lunzerobits
));
3018 rescan_hba_mode
= hpsa_hba_mode_enabled(h
);
3020 if (!h
->hba_mode_enabled
&& rescan_hba_mode
)
3021 dev_warn(&h
->pdev
->dev
, "HBA mode enabled\n");
3022 else if (h
->hba_mode_enabled
&& !rescan_hba_mode
)
3023 dev_warn(&h
->pdev
->dev
, "HBA mode disabled\n");
3025 h
->hba_mode_enabled
= rescan_hba_mode
;
3027 if (hpsa_gather_lun_info(h
, reportlunsize
,
3028 (struct ReportLUNdata
*) physdev_list
, &nphysicals
,
3029 &physical_mode
, logdev_list
, &nlogicals
))
3032 /* We might see up to the maximum number of logical and physical disks
3033 * plus external target devices, and a device for the local RAID
3036 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
3038 /* Allocate the per device structures */
3039 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
3040 if (i
>= HPSA_MAX_DEVICES
) {
3041 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
3042 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
3043 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
3047 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
3048 if (!currentsd
[i
]) {
3049 dev_warn(&h
->pdev
->dev
, "out of memory at %s:%d\n",
3050 __FILE__
, __LINE__
);
3056 if (unlikely(is_scsi_rev_5(h
)))
3057 raid_ctlr_position
= 0;
3059 raid_ctlr_position
= nphysicals
+ nlogicals
;
3061 /* adjust our table of devices */
3062 n_ext_target_devs
= 0;
3063 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
3064 u8
*lunaddrbytes
, is_OBDR
= 0;
3066 /* Figure out where the LUN ID info is coming from */
3067 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
3068 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
3069 /* skip masked physical devices. */
3070 if (lunaddrbytes
[3] & 0xC0 &&
3071 i
< nphysicals
+ (raid_ctlr_position
== 0))
3074 /* Get device type, vendor, model, device id */
3075 if (hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
3077 continue; /* skip it if we can't talk to it. */
3078 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
3079 this_device
= currentsd
[ncurrent
];
3082 * For external target devices, we have to insert a LUN 0 which
3083 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3084 * is nonetheless an enclosure device there. We have to
3085 * present that otherwise linux won't find anything if
3086 * there is no lun 0.
3088 if (add_ext_target_dev(h
, tmpdevice
, this_device
,
3089 lunaddrbytes
, lunzerobits
,
3090 &n_ext_target_devs
)) {
3092 this_device
= currentsd
[ncurrent
];
3095 *this_device
= *tmpdevice
;
3097 switch (this_device
->devtype
) {
3099 /* We don't *really* support actual CD-ROM devices,
3100 * just "One Button Disaster Recovery" tape drive
3101 * which temporarily pretends to be a CD-ROM drive.
3102 * So we check that the device is really an OBDR tape
3103 * device by checking for "$DR-10" in bytes 43-48 of
3110 if (h
->hba_mode_enabled
) {
3111 /* never use raid mapper in HBA mode */
3112 this_device
->offload_enabled
= 0;
3115 } else if (h
->acciopath_status
) {
3116 if (i
>= nphysicals
) {
3126 if (physical_mode
== HPSA_REPORT_PHYS_EXTENDED
) {
3127 memcpy(&this_device
->ioaccel_handle
,
3129 sizeof(this_device
->ioaccel_handle
));
3134 case TYPE_MEDIUM_CHANGER
:
3138 /* Only present the Smartarray HBA as a RAID controller.
3139 * If it's a RAID controller other than the HBA itself
3140 * (an external RAID controller, MSA500 or similar)
3143 if (!is_hba_lunid(lunaddrbytes
))
3150 if (ncurrent
>= HPSA_MAX_DEVICES
)
3153 adjust_hpsa_scsi_table(h
, hostno
, currentsd
, ncurrent
);
3156 for (i
= 0; i
< ndev_allocated
; i
++)
3157 kfree(currentsd
[i
]);
3159 kfree(physdev_list
);
3163 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3164 * dma mapping and fills in the scatter gather entries of the
3167 static int hpsa_scatter_gather(struct ctlr_info
*h
,
3168 struct CommandList
*cp
,
3169 struct scsi_cmnd
*cmd
)
3172 struct scatterlist
*sg
;
3174 int use_sg
, i
, sg_index
, chained
;
3175 struct SGDescriptor
*curr_sg
;
3177 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
3179 use_sg
= scsi_dma_map(cmd
);
3184 goto sglist_finished
;
3189 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
3190 if (i
== h
->max_cmd_sg_entries
- 1 &&
3191 use_sg
> h
->max_cmd_sg_entries
) {
3193 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
3196 addr64
= (u64
) sg_dma_address(sg
);
3197 len
= sg_dma_len(sg
);
3198 curr_sg
->Addr
.lower
= (u32
) (addr64
& 0x0FFFFFFFFULL
);
3199 curr_sg
->Addr
.upper
= (u32
) ((addr64
>> 32) & 0x0FFFFFFFFULL
);
3201 curr_sg
->Ext
= (i
< scsi_sg_count(cmd
) - 1) ? 0 : HPSA_SG_LAST
;
3205 if (use_sg
+ chained
> h
->maxSG
)
3206 h
->maxSG
= use_sg
+ chained
;
3209 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
3210 cp
->Header
.SGTotal
= (u16
) (use_sg
+ 1);
3211 if (hpsa_map_sg_chain_block(h
, cp
)) {
3212 scsi_dma_unmap(cmd
);
3220 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
3221 cp
->Header
.SGTotal
= (u16
) use_sg
; /* total sgs in this cmd list */
3225 #define IO_ACCEL_INELIGIBLE (1)
3226 static int fixup_ioaccel_cdb(u8
*cdb
, int *cdb_len
)
3232 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3239 if (*cdb_len
== 6) {
3240 block
= (((u32
) cdb
[2]) << 8) | cdb
[3];
3243 BUG_ON(*cdb_len
!= 12);
3244 block
= (((u32
) cdb
[2]) << 24) |
3245 (((u32
) cdb
[3]) << 16) |
3246 (((u32
) cdb
[4]) << 8) |
3249 (((u32
) cdb
[6]) << 24) |
3250 (((u32
) cdb
[7]) << 16) |
3251 (((u32
) cdb
[8]) << 8) |
3254 if (block_cnt
> 0xffff)
3255 return IO_ACCEL_INELIGIBLE
;
3257 cdb
[0] = is_write
? WRITE_10
: READ_10
;
3259 cdb
[2] = (u8
) (block
>> 24);
3260 cdb
[3] = (u8
) (block
>> 16);
3261 cdb
[4] = (u8
) (block
>> 8);
3262 cdb
[5] = (u8
) (block
);
3264 cdb
[7] = (u8
) (block_cnt
>> 8);
3265 cdb
[8] = (u8
) (block_cnt
);
3273 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info
*h
,
3274 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
3277 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3278 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
3280 unsigned int total_len
= 0;
3281 struct scatterlist
*sg
;
3284 struct SGDescriptor
*curr_sg
;
3285 u32 control
= IOACCEL1_CONTROL_SIMPLEQUEUE
;
3287 /* TODO: implement chaining support */
3288 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
)
3289 return IO_ACCEL_INELIGIBLE
;
3291 BUG_ON(cmd
->cmd_len
> IOACCEL1_IOFLAGS_CDBLEN_MAX
);
3293 if (fixup_ioaccel_cdb(cdb
, &cdb_len
))
3294 return IO_ACCEL_INELIGIBLE
;
3296 c
->cmd_type
= CMD_IOACCEL1
;
3298 /* Adjust the DMA address to point to the accelerated command buffer */
3299 c
->busaddr
= (u32
) h
->ioaccel_cmd_pool_dhandle
+
3300 (c
->cmdindex
* sizeof(*cp
));
3301 BUG_ON(c
->busaddr
& 0x0000007F);
3303 use_sg
= scsi_dma_map(cmd
);
3309 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
3310 addr64
= (u64
) sg_dma_address(sg
);
3311 len
= sg_dma_len(sg
);
3313 curr_sg
->Addr
.lower
= (u32
) (addr64
& 0x0FFFFFFFFULL
);
3314 curr_sg
->Addr
.upper
=
3315 (u32
) ((addr64
>> 32) & 0x0FFFFFFFFULL
);
3318 if (i
== (scsi_sg_count(cmd
) - 1))
3319 curr_sg
->Ext
= HPSA_SG_LAST
;
3321 curr_sg
->Ext
= 0; /* we are not chaining */
3325 switch (cmd
->sc_data_direction
) {
3327 control
|= IOACCEL1_CONTROL_DATA_OUT
;
3329 case DMA_FROM_DEVICE
:
3330 control
|= IOACCEL1_CONTROL_DATA_IN
;
3333 control
|= IOACCEL1_CONTROL_NODATAXFER
;
3336 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
3337 cmd
->sc_data_direction
);
3342 control
|= IOACCEL1_CONTROL_NODATAXFER
;
3345 c
->Header
.SGList
= use_sg
;
3346 /* Fill out the command structure to submit */
3347 cp
->dev_handle
= ioaccel_handle
& 0xFFFF;
3348 cp
->transfer_len
= total_len
;
3349 cp
->io_flags
= IOACCEL1_IOFLAGS_IO_REQ
|
3350 (cdb_len
& IOACCEL1_IOFLAGS_CDBLEN_MASK
);
3351 cp
->control
= control
;
3352 memcpy(cp
->CDB
, cdb
, cdb_len
);
3353 memcpy(cp
->CISS_LUN
, scsi3addr
, 8);
3354 /* Tag was already set at init time. */
3355 enqueue_cmd_and_start_io(h
, c
);
3360 * Queue a command directly to a device behind the controller using the
3361 * I/O accelerator path.
3363 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info
*h
,
3364 struct CommandList
*c
)
3366 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3367 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
3369 return hpsa_scsi_ioaccel_queue_command(h
, c
, dev
->ioaccel_handle
,
3370 cmd
->cmnd
, cmd
->cmd_len
, dev
->scsi3addr
);
3374 * Set encryption parameters for the ioaccel2 request
3376 static void set_encrypt_ioaccel2(struct ctlr_info
*h
,
3377 struct CommandList
*c
, struct io_accel2_cmd
*cp
)
3379 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3380 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
3381 struct raid_map_data
*map
= &dev
->raid_map
;
3384 BUG_ON(!(dev
->offload_config
&& dev
->offload_enabled
));
3386 /* Are we doing encryption on this device */
3387 if (!(map
->flags
& RAID_MAP_FLAG_ENCRYPT_ON
))
3389 /* Set the data encryption key index. */
3390 cp
->dekindex
= map
->dekindex
;
3392 /* Set the encryption enable flag, encoded into direction field. */
3393 cp
->direction
|= IOACCEL2_DIRECTION_ENCRYPT_MASK
;
3395 /* Set encryption tweak values based on logical block address
3396 * If block size is 512, tweak value is LBA.
3397 * For other block sizes, tweak is (LBA * block size)/ 512)
3399 switch (cmd
->cmnd
[0]) {
3400 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3403 if (map
->volume_blk_size
== 512) {
3405 (((u32
) cmd
->cmnd
[2]) << 8) |
3407 cp
->tweak_upper
= 0;
3410 (((u64
) cmd
->cmnd
[2]) << 8) |
3412 first_block
= (first_block
* map
->volume_blk_size
)/512;
3413 cp
->tweak_lower
= (u32
)first_block
;
3414 cp
->tweak_upper
= (u32
)(first_block
>> 32);
3419 if (map
->volume_blk_size
== 512) {
3421 (((u32
) cmd
->cmnd
[2]) << 24) |
3422 (((u32
) cmd
->cmnd
[3]) << 16) |
3423 (((u32
) cmd
->cmnd
[4]) << 8) |
3425 cp
->tweak_upper
= 0;
3428 (((u64
) cmd
->cmnd
[2]) << 24) |
3429 (((u64
) cmd
->cmnd
[3]) << 16) |
3430 (((u64
) cmd
->cmnd
[4]) << 8) |
3432 first_block
= (first_block
* map
->volume_blk_size
)/512;
3433 cp
->tweak_lower
= (u32
)first_block
;
3434 cp
->tweak_upper
= (u32
)(first_block
>> 32);
3437 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3440 if (map
->volume_blk_size
== 512) {
3442 (((u32
) cmd
->cmnd
[2]) << 24) |
3443 (((u32
) cmd
->cmnd
[3]) << 16) |
3444 (((u32
) cmd
->cmnd
[4]) << 8) |
3446 cp
->tweak_upper
= 0;
3449 (((u64
) cmd
->cmnd
[2]) << 24) |
3450 (((u64
) cmd
->cmnd
[3]) << 16) |
3451 (((u64
) cmd
->cmnd
[4]) << 8) |
3453 first_block
= (first_block
* map
->volume_blk_size
)/512;
3454 cp
->tweak_lower
= (u32
)first_block
;
3455 cp
->tweak_upper
= (u32
)(first_block
>> 32);
3460 if (map
->volume_blk_size
== 512) {
3462 (((u32
) cmd
->cmnd
[6]) << 24) |
3463 (((u32
) cmd
->cmnd
[7]) << 16) |
3464 (((u32
) cmd
->cmnd
[8]) << 8) |
3467 (((u32
) cmd
->cmnd
[2]) << 24) |
3468 (((u32
) cmd
->cmnd
[3]) << 16) |
3469 (((u32
) cmd
->cmnd
[4]) << 8) |
3473 (((u64
) cmd
->cmnd
[2]) << 56) |
3474 (((u64
) cmd
->cmnd
[3]) << 48) |
3475 (((u64
) cmd
->cmnd
[4]) << 40) |
3476 (((u64
) cmd
->cmnd
[5]) << 32) |
3477 (((u64
) cmd
->cmnd
[6]) << 24) |
3478 (((u64
) cmd
->cmnd
[7]) << 16) |
3479 (((u64
) cmd
->cmnd
[8]) << 8) |
3481 first_block
= (first_block
* map
->volume_blk_size
)/512;
3482 cp
->tweak_lower
= (u32
)first_block
;
3483 cp
->tweak_upper
= (u32
)(first_block
>> 32);
3487 dev_err(&h
->pdev
->dev
,
3488 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3495 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info
*h
,
3496 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
3499 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3500 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
3501 struct ioaccel2_sg_element
*curr_sg
;
3503 struct scatterlist
*sg
;
3508 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
)
3509 return IO_ACCEL_INELIGIBLE
;
3511 if (fixup_ioaccel_cdb(cdb
, &cdb_len
))
3512 return IO_ACCEL_INELIGIBLE
;
3513 c
->cmd_type
= CMD_IOACCEL2
;
3514 /* Adjust the DMA address to point to the accelerated command buffer */
3515 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
3516 (c
->cmdindex
* sizeof(*cp
));
3517 BUG_ON(c
->busaddr
& 0x0000007F);
3519 memset(cp
, 0, sizeof(*cp
));
3520 cp
->IU_type
= IOACCEL2_IU_TYPE
;
3522 use_sg
= scsi_dma_map(cmd
);
3527 BUG_ON(use_sg
> IOACCEL2_MAXSGENTRIES
);
3529 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
3530 addr64
= (u64
) sg_dma_address(sg
);
3531 len
= sg_dma_len(sg
);
3533 curr_sg
->address
= cpu_to_le64(addr64
);
3534 curr_sg
->length
= cpu_to_le32(len
);
3535 curr_sg
->reserved
[0] = 0;
3536 curr_sg
->reserved
[1] = 0;
3537 curr_sg
->reserved
[2] = 0;
3538 curr_sg
->chain_indicator
= 0;
3542 switch (cmd
->sc_data_direction
) {
3544 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3545 cp
->direction
|= IOACCEL2_DIR_DATA_OUT
;
3547 case DMA_FROM_DEVICE
:
3548 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3549 cp
->direction
|= IOACCEL2_DIR_DATA_IN
;
3552 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3553 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
3556 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
3557 cmd
->sc_data_direction
);
3562 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
3563 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
3566 /* Set encryption parameters, if necessary */
3567 set_encrypt_ioaccel2(h
, c
, cp
);
3569 cp
->scsi_nexus
= ioaccel_handle
;
3570 cp
->Tag
= (c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
) |
3572 memcpy(cp
->cdb
, cdb
, sizeof(cp
->cdb
));
3574 /* fill in sg elements */
3575 cp
->sg_count
= (u8
) use_sg
;
3577 cp
->data_len
= cpu_to_le32(total_len
);
3578 cp
->err_ptr
= cpu_to_le64(c
->busaddr
+
3579 offsetof(struct io_accel2_cmd
, error_data
));
3580 cp
->err_len
= cpu_to_le32((u32
) sizeof(cp
->error_data
));
3582 enqueue_cmd_and_start_io(h
, c
);
3587 * Queue a command to the correct I/O accelerator path.
3589 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
3590 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
3593 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
3594 return hpsa_scsi_ioaccel1_queue_command(h
, c
, ioaccel_handle
,
3595 cdb
, cdb_len
, scsi3addr
);
3597 return hpsa_scsi_ioaccel2_queue_command(h
, c
, ioaccel_handle
,
3598 cdb
, cdb_len
, scsi3addr
);
3601 static void raid_map_helper(struct raid_map_data
*map
,
3602 int offload_to_mirror
, u32
*map_index
, u32
*current_group
)
3604 if (offload_to_mirror
== 0) {
3605 /* use physical disk in the first mirrored group. */
3606 *map_index
%= map
->data_disks_per_row
;
3610 /* determine mirror group that *map_index indicates */
3611 *current_group
= *map_index
/ map
->data_disks_per_row
;
3612 if (offload_to_mirror
== *current_group
)
3614 if (*current_group
< (map
->layout_map_count
- 1)) {
3615 /* select map index from next group */
3616 *map_index
+= map
->data_disks_per_row
;
3619 /* select map index from first group */
3620 *map_index
%= map
->data_disks_per_row
;
3623 } while (offload_to_mirror
!= *current_group
);
3627 * Attempt to perform offload RAID mapping for a logical volume I/O.
3629 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info
*h
,
3630 struct CommandList
*c
)
3632 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
3633 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
3634 struct raid_map_data
*map
= &dev
->raid_map
;
3635 struct raid_map_disk_data
*dd
= &map
->data
[0];
3638 u64 first_block
, last_block
;
3641 u64 first_row
, last_row
;
3642 u32 first_row_offset
, last_row_offset
;
3643 u32 first_column
, last_column
;
3644 u64 r0_first_row
, r0_last_row
;
3645 u32 r5or6_blocks_per_row
;
3646 u64 r5or6_first_row
, r5or6_last_row
;
3647 u32 r5or6_first_row_offset
, r5or6_last_row_offset
;
3648 u32 r5or6_first_column
, r5or6_last_column
;
3649 u32 total_disks_per_row
;
3651 u32 first_group
, last_group
, current_group
;
3658 #if BITS_PER_LONG == 32
3661 int offload_to_mirror
;
3663 BUG_ON(!(dev
->offload_config
&& dev
->offload_enabled
));
3665 /* check for valid opcode, get LBA and block count */
3666 switch (cmd
->cmnd
[0]) {
3671 (((u64
) cmd
->cmnd
[2]) << 8) |
3673 block_cnt
= cmd
->cmnd
[4];
3679 (((u64
) cmd
->cmnd
[2]) << 24) |
3680 (((u64
) cmd
->cmnd
[3]) << 16) |
3681 (((u64
) cmd
->cmnd
[4]) << 8) |
3684 (((u32
) cmd
->cmnd
[7]) << 8) |
3691 (((u64
) cmd
->cmnd
[2]) << 24) |
3692 (((u64
) cmd
->cmnd
[3]) << 16) |
3693 (((u64
) cmd
->cmnd
[4]) << 8) |
3696 (((u32
) cmd
->cmnd
[6]) << 24) |
3697 (((u32
) cmd
->cmnd
[7]) << 16) |
3698 (((u32
) cmd
->cmnd
[8]) << 8) |
3705 (((u64
) cmd
->cmnd
[2]) << 56) |
3706 (((u64
) cmd
->cmnd
[3]) << 48) |
3707 (((u64
) cmd
->cmnd
[4]) << 40) |
3708 (((u64
) cmd
->cmnd
[5]) << 32) |
3709 (((u64
) cmd
->cmnd
[6]) << 24) |
3710 (((u64
) cmd
->cmnd
[7]) << 16) |
3711 (((u64
) cmd
->cmnd
[8]) << 8) |
3714 (((u32
) cmd
->cmnd
[10]) << 24) |
3715 (((u32
) cmd
->cmnd
[11]) << 16) |
3716 (((u32
) cmd
->cmnd
[12]) << 8) |
3720 return IO_ACCEL_INELIGIBLE
; /* process via normal I/O path */
3722 BUG_ON(block_cnt
== 0);
3723 last_block
= first_block
+ block_cnt
- 1;
3725 /* check for write to non-RAID-0 */
3726 if (is_write
&& dev
->raid_level
!= 0)
3727 return IO_ACCEL_INELIGIBLE
;
3729 /* check for invalid block or wraparound */
3730 if (last_block
>= map
->volume_blk_cnt
|| last_block
< first_block
)
3731 return IO_ACCEL_INELIGIBLE
;
3733 /* calculate stripe information for the request */
3734 blocks_per_row
= map
->data_disks_per_row
* map
->strip_size
;
3735 #if BITS_PER_LONG == 32
3736 tmpdiv
= first_block
;
3737 (void) do_div(tmpdiv
, blocks_per_row
);
3739 tmpdiv
= last_block
;
3740 (void) do_div(tmpdiv
, blocks_per_row
);
3742 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
3743 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
3744 tmpdiv
= first_row_offset
;
3745 (void) do_div(tmpdiv
, map
->strip_size
);
3746 first_column
= tmpdiv
;
3747 tmpdiv
= last_row_offset
;
3748 (void) do_div(tmpdiv
, map
->strip_size
);
3749 last_column
= tmpdiv
;
3751 first_row
= first_block
/ blocks_per_row
;
3752 last_row
= last_block
/ blocks_per_row
;
3753 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
3754 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
3755 first_column
= first_row_offset
/ map
->strip_size
;
3756 last_column
= last_row_offset
/ map
->strip_size
;
3759 /* if this isn't a single row/column then give to the controller */
3760 if ((first_row
!= last_row
) || (first_column
!= last_column
))
3761 return IO_ACCEL_INELIGIBLE
;
3763 /* proceeding with driver mapping */
3764 total_disks_per_row
= map
->data_disks_per_row
+
3765 map
->metadata_disks_per_row
;
3766 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
3768 map_index
= (map_row
* total_disks_per_row
) + first_column
;
3770 switch (dev
->raid_level
) {
3772 break; /* nothing special to do */
3774 /* Handles load balance across RAID 1 members.
3775 * (2-drive R1 and R10 with even # of drives.)
3776 * Appropriate for SSDs, not optimal for HDDs
3778 BUG_ON(map
->layout_map_count
!= 2);
3779 if (dev
->offload_to_mirror
)
3780 map_index
+= map
->data_disks_per_row
;
3781 dev
->offload_to_mirror
= !dev
->offload_to_mirror
;
3784 /* Handles N-way mirrors (R1-ADM)
3785 * and R10 with # of drives divisible by 3.)
3787 BUG_ON(map
->layout_map_count
!= 3);
3789 offload_to_mirror
= dev
->offload_to_mirror
;
3790 raid_map_helper(map
, offload_to_mirror
,
3791 &map_index
, ¤t_group
);
3792 /* set mirror group to use next time */
3794 (offload_to_mirror
>= map
->layout_map_count
- 1)
3795 ? 0 : offload_to_mirror
+ 1;
3796 /* FIXME: remove after debug/dev */
3797 BUG_ON(offload_to_mirror
>= map
->layout_map_count
);
3798 dev_warn(&h
->pdev
->dev
,
3799 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3800 map_index
, offload_to_mirror
);
3801 dev
->offload_to_mirror
= offload_to_mirror
;
3802 /* Avoid direct use of dev->offload_to_mirror within this
3803 * function since multiple threads might simultaneously
3804 * increment it beyond the range of dev->layout_map_count -1.
3809 if (map
->layout_map_count
<= 1)
3812 /* Verify first and last block are in same RAID group */
3813 r5or6_blocks_per_row
=
3814 map
->strip_size
* map
->data_disks_per_row
;
3815 BUG_ON(r5or6_blocks_per_row
== 0);
3816 stripesize
= r5or6_blocks_per_row
* map
->layout_map_count
;
3817 #if BITS_PER_LONG == 32
3818 tmpdiv
= first_block
;
3819 first_group
= do_div(tmpdiv
, stripesize
);
3820 tmpdiv
= first_group
;
3821 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
3822 first_group
= tmpdiv
;
3823 tmpdiv
= last_block
;
3824 last_group
= do_div(tmpdiv
, stripesize
);
3825 tmpdiv
= last_group
;
3826 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
3827 last_group
= tmpdiv
;
3829 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
3830 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
3832 if (first_group
!= last_group
)
3833 return IO_ACCEL_INELIGIBLE
;
3835 /* Verify request is in a single row of RAID 5/6 */
3836 #if BITS_PER_LONG == 32
3837 tmpdiv
= first_block
;
3838 (void) do_div(tmpdiv
, stripesize
);
3839 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
3840 tmpdiv
= last_block
;
3841 (void) do_div(tmpdiv
, stripesize
);
3842 r5or6_last_row
= r0_last_row
= tmpdiv
;
3844 first_row
= r5or6_first_row
= r0_first_row
=
3845 first_block
/ stripesize
;
3846 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
3848 if (r5or6_first_row
!= r5or6_last_row
)
3849 return IO_ACCEL_INELIGIBLE
;
3852 /* Verify request is in a single column */
3853 #if BITS_PER_LONG == 32
3854 tmpdiv
= first_block
;
3855 first_row_offset
= do_div(tmpdiv
, stripesize
);
3856 tmpdiv
= first_row_offset
;
3857 first_row_offset
= (u32
) do_div(tmpdiv
, r5or6_blocks_per_row
);
3858 r5or6_first_row_offset
= first_row_offset
;
3859 tmpdiv
= last_block
;
3860 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
3861 tmpdiv
= r5or6_last_row_offset
;
3862 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
3863 tmpdiv
= r5or6_first_row_offset
;
3864 (void) do_div(tmpdiv
, map
->strip_size
);
3865 first_column
= r5or6_first_column
= tmpdiv
;
3866 tmpdiv
= r5or6_last_row_offset
;
3867 (void) do_div(tmpdiv
, map
->strip_size
);
3868 r5or6_last_column
= tmpdiv
;
3870 first_row_offset
= r5or6_first_row_offset
=
3871 (u32
)((first_block
% stripesize
) %
3872 r5or6_blocks_per_row
);
3874 r5or6_last_row_offset
=
3875 (u32
)((last_block
% stripesize
) %
3876 r5or6_blocks_per_row
);
3878 first_column
= r5or6_first_column
=
3879 r5or6_first_row_offset
/ map
->strip_size
;
3881 r5or6_last_row_offset
/ map
->strip_size
;
3883 if (r5or6_first_column
!= r5or6_last_column
)
3884 return IO_ACCEL_INELIGIBLE
;
3886 /* Request is eligible */
3887 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
3890 map_index
= (first_group
*
3891 (map
->row_cnt
* total_disks_per_row
)) +
3892 (map_row
* total_disks_per_row
) + first_column
;
3895 return IO_ACCEL_INELIGIBLE
;
3898 disk_handle
= dd
[map_index
].ioaccel_handle
;
3899 disk_block
= map
->disk_starting_blk
+ (first_row
* map
->strip_size
) +
3900 (first_row_offset
- (first_column
* map
->strip_size
));
3901 disk_block_cnt
= block_cnt
;
3903 /* handle differing logical/physical block sizes */
3904 if (map
->phys_blk_shift
) {
3905 disk_block
<<= map
->phys_blk_shift
;
3906 disk_block_cnt
<<= map
->phys_blk_shift
;
3908 BUG_ON(disk_block_cnt
> 0xffff);
3910 /* build the new CDB for the physical disk I/O */
3911 if (disk_block
> 0xffffffff) {
3912 cdb
[0] = is_write
? WRITE_16
: READ_16
;
3914 cdb
[2] = (u8
) (disk_block
>> 56);
3915 cdb
[3] = (u8
) (disk_block
>> 48);
3916 cdb
[4] = (u8
) (disk_block
>> 40);
3917 cdb
[5] = (u8
) (disk_block
>> 32);
3918 cdb
[6] = (u8
) (disk_block
>> 24);
3919 cdb
[7] = (u8
) (disk_block
>> 16);
3920 cdb
[8] = (u8
) (disk_block
>> 8);
3921 cdb
[9] = (u8
) (disk_block
);
3922 cdb
[10] = (u8
) (disk_block_cnt
>> 24);
3923 cdb
[11] = (u8
) (disk_block_cnt
>> 16);
3924 cdb
[12] = (u8
) (disk_block_cnt
>> 8);
3925 cdb
[13] = (u8
) (disk_block_cnt
);
3930 cdb
[0] = is_write
? WRITE_10
: READ_10
;
3932 cdb
[2] = (u8
) (disk_block
>> 24);
3933 cdb
[3] = (u8
) (disk_block
>> 16);
3934 cdb
[4] = (u8
) (disk_block
>> 8);
3935 cdb
[5] = (u8
) (disk_block
);
3937 cdb
[7] = (u8
) (disk_block_cnt
>> 8);
3938 cdb
[8] = (u8
) (disk_block_cnt
);
3942 return hpsa_scsi_ioaccel_queue_command(h
, c
, disk_handle
, cdb
, cdb_len
,
3946 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd
*cmd
,
3947 void (*done
)(struct scsi_cmnd
*))
3949 struct ctlr_info
*h
;
3950 struct hpsa_scsi_dev_t
*dev
;
3951 unsigned char scsi3addr
[8];
3952 struct CommandList
*c
;
3953 unsigned long flags
;
3956 /* Get the ptr to our adapter structure out of cmd->host. */
3957 h
= sdev_to_hba(cmd
->device
);
3958 dev
= cmd
->device
->hostdata
;
3960 cmd
->result
= DID_NO_CONNECT
<< 16;
3964 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
3966 spin_lock_irqsave(&h
->lock
, flags
);
3967 if (unlikely(h
->lockup_detected
)) {
3968 spin_unlock_irqrestore(&h
->lock
, flags
);
3969 cmd
->result
= DID_ERROR
<< 16;
3973 spin_unlock_irqrestore(&h
->lock
, flags
);
3975 if (c
== NULL
) { /* trouble... */
3976 dev_err(&h
->pdev
->dev
, "cmd_alloc returned NULL!\n");
3977 return SCSI_MLQUEUE_HOST_BUSY
;
3980 /* Fill in the command list header */
3982 cmd
->scsi_done
= done
; /* save this for use by completion code */
3984 /* save c in case we have to abort it */
3985 cmd
->host_scribble
= (unsigned char *) c
;
3987 c
->cmd_type
= CMD_SCSI
;
3990 /* Call alternate submit routine for I/O accelerated commands.
3991 * Retries always go down the normal I/O path.
3993 if (likely(cmd
->retries
== 0 &&
3994 cmd
->request
->cmd_type
== REQ_TYPE_FS
&&
3995 h
->acciopath_status
)) {
3996 if (dev
->offload_enabled
) {
3997 rc
= hpsa_scsi_ioaccel_raid_map(h
, c
);
3999 return 0; /* Sent on ioaccel path */
4000 if (rc
< 0) { /* scsi_dma_map failed. */
4002 return SCSI_MLQUEUE_HOST_BUSY
;
4004 } else if (dev
->ioaccel_handle
) {
4005 rc
= hpsa_scsi_ioaccel_direct_map(h
, c
);
4007 return 0; /* Sent on direct map path */
4008 if (rc
< 0) { /* scsi_dma_map failed. */
4010 return SCSI_MLQUEUE_HOST_BUSY
;
4015 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
4016 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
4017 c
->Header
.Tag
.lower
= (c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
4018 c
->Header
.Tag
.lower
|= DIRECT_LOOKUP_BIT
;
4020 /* Fill in the request block... */
4022 c
->Request
.Timeout
= 0;
4023 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
4024 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
4025 c
->Request
.CDBLen
= cmd
->cmd_len
;
4026 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
4027 c
->Request
.Type
.Type
= TYPE_CMD
;
4028 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
4029 switch (cmd
->sc_data_direction
) {
4031 c
->Request
.Type
.Direction
= XFER_WRITE
;
4033 case DMA_FROM_DEVICE
:
4034 c
->Request
.Type
.Direction
= XFER_READ
;
4037 c
->Request
.Type
.Direction
= XFER_NONE
;
4039 case DMA_BIDIRECTIONAL
:
4040 /* This can happen if a buggy application does a scsi passthru
4041 * and sets both inlen and outlen to non-zero. ( see
4042 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4045 c
->Request
.Type
.Direction
= XFER_RSVD
;
4046 /* This is technically wrong, and hpsa controllers should
4047 * reject it with CMD_INVALID, which is the most correct
4048 * response, but non-fibre backends appear to let it
4049 * slide by, and give the same results as if this field
4050 * were set correctly. Either way is acceptable for
4051 * our purposes here.
4057 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4058 cmd
->sc_data_direction
);
4063 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
4065 return SCSI_MLQUEUE_HOST_BUSY
;
4067 enqueue_cmd_and_start_io(h
, c
);
4068 /* the cmd'll come back via intr handler in complete_scsi_command() */
4072 static DEF_SCSI_QCMD(hpsa_scsi_queue_command
)
4074 static int do_not_scan_if_controller_locked_up(struct ctlr_info
*h
)
4076 unsigned long flags
;
4079 * Don't let rescans be initiated on a controller known
4080 * to be locked up. If the controller locks up *during*
4081 * a rescan, that thread is probably hosed, but at least
4082 * we can prevent new rescan threads from piling up on a
4083 * locked up controller.
4085 spin_lock_irqsave(&h
->lock
, flags
);
4086 if (unlikely(h
->lockup_detected
)) {
4087 spin_unlock_irqrestore(&h
->lock
, flags
);
4088 spin_lock_irqsave(&h
->scan_lock
, flags
);
4089 h
->scan_finished
= 1;
4090 wake_up_all(&h
->scan_wait_queue
);
4091 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4094 spin_unlock_irqrestore(&h
->lock
, flags
);
4098 static void hpsa_scan_start(struct Scsi_Host
*sh
)
4100 struct ctlr_info
*h
= shost_to_hba(sh
);
4101 unsigned long flags
;
4103 if (do_not_scan_if_controller_locked_up(h
))
4106 /* wait until any scan already in progress is finished. */
4108 spin_lock_irqsave(&h
->scan_lock
, flags
);
4109 if (h
->scan_finished
)
4111 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4112 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
4113 /* Note: We don't need to worry about a race between this
4114 * thread and driver unload because the midlayer will
4115 * have incremented the reference count, so unload won't
4116 * happen if we're in here.
4119 h
->scan_finished
= 0; /* mark scan as in progress */
4120 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4122 if (do_not_scan_if_controller_locked_up(h
))
4125 hpsa_update_scsi_devices(h
, h
->scsi_host
->host_no
);
4127 spin_lock_irqsave(&h
->scan_lock
, flags
);
4128 h
->scan_finished
= 1; /* mark scan as finished. */
4129 wake_up_all(&h
->scan_wait_queue
);
4130 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4133 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
4134 unsigned long elapsed_time
)
4136 struct ctlr_info
*h
= shost_to_hba(sh
);
4137 unsigned long flags
;
4140 spin_lock_irqsave(&h
->scan_lock
, flags
);
4141 finished
= h
->scan_finished
;
4142 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4146 static int hpsa_change_queue_depth(struct scsi_device
*sdev
,
4147 int qdepth
, int reason
)
4149 struct ctlr_info
*h
= sdev_to_hba(sdev
);
4151 if (reason
!= SCSI_QDEPTH_DEFAULT
)
4157 if (qdepth
> h
->nr_cmds
)
4158 qdepth
= h
->nr_cmds
;
4159 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), qdepth
);
4160 return sdev
->queue_depth
;
4163 static void hpsa_unregister_scsi(struct ctlr_info
*h
)
4165 /* we are being forcibly unloaded, and may not refuse. */
4166 scsi_remove_host(h
->scsi_host
);
4167 scsi_host_put(h
->scsi_host
);
4168 h
->scsi_host
= NULL
;
4171 static int hpsa_register_scsi(struct ctlr_info
*h
)
4173 struct Scsi_Host
*sh
;
4176 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
4183 sh
->max_channel
= 3;
4184 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
4185 sh
->max_lun
= HPSA_MAX_LUN
;
4186 sh
->max_id
= HPSA_MAX_LUN
;
4187 sh
->can_queue
= h
->nr_cmds
;
4188 if (h
->hba_mode_enabled
)
4189 sh
->cmd_per_lun
= 7;
4191 sh
->cmd_per_lun
= h
->nr_cmds
;
4192 sh
->sg_tablesize
= h
->maxsgentries
;
4194 sh
->hostdata
[0] = (unsigned long) h
;
4195 sh
->irq
= h
->intr
[h
->intr_mode
];
4196 sh
->unique_id
= sh
->irq
;
4197 error
= scsi_add_host(sh
, &h
->pdev
->dev
);
4204 dev_err(&h
->pdev
->dev
, "%s: scsi_add_host"
4205 " failed for controller %d\n", __func__
, h
->ctlr
);
4209 dev_err(&h
->pdev
->dev
, "%s: scsi_host_alloc"
4210 " failed for controller %d\n", __func__
, h
->ctlr
);
4214 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
4215 unsigned char lunaddr
[])
4219 int waittime
= 1; /* seconds */
4220 struct CommandList
*c
;
4222 c
= cmd_special_alloc(h
);
4224 dev_warn(&h
->pdev
->dev
, "out of memory in "
4225 "wait_for_device_to_become_ready.\n");
4229 /* Send test unit ready until device ready, or give up. */
4230 while (count
< HPSA_TUR_RETRY_LIMIT
) {
4232 /* Wait for a bit. do this first, because if we send
4233 * the TUR right away, the reset will just abort it.
4235 msleep(1000 * waittime
);
4237 rc
= 0; /* Device ready. */
4239 /* Increase wait time with each try, up to a point. */
4240 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
4241 waittime
= waittime
* 2;
4243 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4244 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
4245 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
4246 hpsa_scsi_do_simple_cmd_core(h
, c
);
4247 /* no unmap needed here because no data xfer. */
4249 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
4252 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
4253 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
4254 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
4255 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
4258 dev_warn(&h
->pdev
->dev
, "waiting %d secs "
4259 "for device to become ready.\n", waittime
);
4260 rc
= 1; /* device not ready. */
4264 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
4266 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
4268 cmd_special_free(h
, c
);
4272 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4273 * complaining. Doing a host- or bus-reset can't do anything good here.
4275 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
4278 struct ctlr_info
*h
;
4279 struct hpsa_scsi_dev_t
*dev
;
4281 /* find the controller to which the command to be aborted was sent */
4282 h
= sdev_to_hba(scsicmd
->device
);
4283 if (h
== NULL
) /* paranoia */
4285 dev
= scsicmd
->device
->hostdata
;
4287 dev_err(&h
->pdev
->dev
, "hpsa_eh_device_reset_handler: "
4288 "device lookup failed.\n");
4291 dev_warn(&h
->pdev
->dev
, "resetting device %d:%d:%d:%d\n",
4292 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
4293 /* send a reset to the SCSI LUN which the command was sent to */
4294 rc
= hpsa_send_reset(h
, dev
->scsi3addr
, HPSA_RESET_TYPE_LUN
);
4295 if (rc
== 0 && wait_for_device_to_become_ready(h
, dev
->scsi3addr
) == 0)
4298 dev_warn(&h
->pdev
->dev
, "resetting device failed.\n");
4302 static void swizzle_abort_tag(u8
*tag
)
4306 memcpy(original_tag
, tag
, 8);
4307 tag
[0] = original_tag
[3];
4308 tag
[1] = original_tag
[2];
4309 tag
[2] = original_tag
[1];
4310 tag
[3] = original_tag
[0];
4311 tag
[4] = original_tag
[7];
4312 tag
[5] = original_tag
[6];
4313 tag
[6] = original_tag
[5];
4314 tag
[7] = original_tag
[4];
4317 static void hpsa_get_tag(struct ctlr_info
*h
,
4318 struct CommandList
*c
, u32
*taglower
, u32
*tagupper
)
4320 if (c
->cmd_type
== CMD_IOACCEL1
) {
4321 struct io_accel1_cmd
*cm1
= (struct io_accel1_cmd
*)
4322 &h
->ioaccel_cmd_pool
[c
->cmdindex
];
4323 *tagupper
= cm1
->Tag
.upper
;
4324 *taglower
= cm1
->Tag
.lower
;
4327 if (c
->cmd_type
== CMD_IOACCEL2
) {
4328 struct io_accel2_cmd
*cm2
= (struct io_accel2_cmd
*)
4329 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
4330 /* upper tag not used in ioaccel2 mode */
4331 memset(tagupper
, 0, sizeof(*tagupper
));
4332 *taglower
= cm2
->Tag
;
4335 *tagupper
= c
->Header
.Tag
.upper
;
4336 *taglower
= c
->Header
.Tag
.lower
;
4340 static int hpsa_send_abort(struct ctlr_info
*h
, unsigned char *scsi3addr
,
4341 struct CommandList
*abort
, int swizzle
)
4344 struct CommandList
*c
;
4345 struct ErrorInfo
*ei
;
4346 u32 tagupper
, taglower
;
4348 c
= cmd_special_alloc(h
);
4349 if (c
== NULL
) { /* trouble... */
4350 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
4354 /* fill_cmd can't fail here, no buffer to map */
4355 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, abort
,
4356 0, 0, scsi3addr
, TYPE_MSG
);
4358 swizzle_abort_tag(&c
->Request
.CDB
[4]);
4359 hpsa_scsi_do_simple_cmd_core(h
, c
);
4360 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
4361 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4362 __func__
, tagupper
, taglower
);
4363 /* no unmap needed here because no data xfer. */
4366 switch (ei
->CommandStatus
) {
4369 case CMD_UNABORTABLE
: /* Very common, don't make noise. */
4373 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4374 __func__
, tagupper
, taglower
);
4375 hpsa_scsi_interpret_error(h
, c
);
4379 cmd_special_free(h
, c
);
4380 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n",
4381 __func__
, tagupper
, taglower
);
4386 * hpsa_find_cmd_in_queue
4388 * Used to determine whether a command (find) is still present
4389 * in queue_head. Optionally excludes the last element of queue_head.
4391 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
4392 * not yet been submitted, and so can be aborted by the driver without
4393 * sending an abort to the hardware.
4395 * Returns pointer to command if found in queue, NULL otherwise.
4397 static struct CommandList
*hpsa_find_cmd_in_queue(struct ctlr_info
*h
,
4398 struct scsi_cmnd
*find
, struct list_head
*queue_head
)
4400 unsigned long flags
;
4401 struct CommandList
*c
= NULL
; /* ptr into cmpQ */
4405 spin_lock_irqsave(&h
->lock
, flags
);
4406 list_for_each_entry(c
, queue_head
, list
) {
4407 if (c
->scsi_cmd
== NULL
) /* e.g.: passthru ioctl */
4409 if (c
->scsi_cmd
== find
) {
4410 spin_unlock_irqrestore(&h
->lock
, flags
);
4414 spin_unlock_irqrestore(&h
->lock
, flags
);
4418 static struct CommandList
*hpsa_find_cmd_in_queue_by_tag(struct ctlr_info
*h
,
4419 u8
*tag
, struct list_head
*queue_head
)
4421 unsigned long flags
;
4422 struct CommandList
*c
;
4424 spin_lock_irqsave(&h
->lock
, flags
);
4425 list_for_each_entry(c
, queue_head
, list
) {
4426 if (memcmp(&c
->Header
.Tag
, tag
, 8) != 0)
4428 spin_unlock_irqrestore(&h
->lock
, flags
);
4431 spin_unlock_irqrestore(&h
->lock
, flags
);
4435 /* ioaccel2 path firmware cannot handle abort task requests.
4436 * Change abort requests to physical target reset, and send to the
4437 * address of the physical disk used for the ioaccel 2 command.
4438 * Return 0 on success (IO_OK)
4442 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info
*h
,
4443 unsigned char *scsi3addr
, struct CommandList
*abort
)
4446 struct scsi_cmnd
*scmd
; /* scsi command within request being aborted */
4447 struct hpsa_scsi_dev_t
*dev
; /* device to which scsi cmd was sent */
4448 unsigned char phys_scsi3addr
[8]; /* addr of phys disk with volume */
4449 unsigned char *psa
= &phys_scsi3addr
[0];
4451 /* Get a pointer to the hpsa logical device. */
4452 scmd
= (struct scsi_cmnd
*) abort
->scsi_cmd
;
4453 dev
= (struct hpsa_scsi_dev_t
*)(scmd
->device
->hostdata
);
4455 dev_warn(&h
->pdev
->dev
,
4456 "Cannot abort: no device pointer for command.\n");
4457 return -1; /* not abortable */
4460 if (h
->raid_offload_debug
> 0)
4461 dev_info(&h
->pdev
->dev
,
4462 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4463 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
4464 scsi3addr
[0], scsi3addr
[1], scsi3addr
[2], scsi3addr
[3],
4465 scsi3addr
[4], scsi3addr
[5], scsi3addr
[6], scsi3addr
[7]);
4467 if (!dev
->offload_enabled
) {
4468 dev_warn(&h
->pdev
->dev
,
4469 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4470 return -1; /* not abortable */
4473 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4474 if (!hpsa_get_pdisk_of_ioaccel2(h
, abort
, psa
)) {
4475 dev_warn(&h
->pdev
->dev
, "Can't abort: Failed lookup of physical address.\n");
4476 return -1; /* not abortable */
4479 /* send the reset */
4480 if (h
->raid_offload_debug
> 0)
4481 dev_info(&h
->pdev
->dev
,
4482 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4483 psa
[0], psa
[1], psa
[2], psa
[3],
4484 psa
[4], psa
[5], psa
[6], psa
[7]);
4485 rc
= hpsa_send_reset(h
, psa
, HPSA_RESET_TYPE_TARGET
);
4487 dev_warn(&h
->pdev
->dev
,
4488 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4489 psa
[0], psa
[1], psa
[2], psa
[3],
4490 psa
[4], psa
[5], psa
[6], psa
[7]);
4491 return rc
; /* failed to reset */
4494 /* wait for device to recover */
4495 if (wait_for_device_to_become_ready(h
, psa
) != 0) {
4496 dev_warn(&h
->pdev
->dev
,
4497 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4498 psa
[0], psa
[1], psa
[2], psa
[3],
4499 psa
[4], psa
[5], psa
[6], psa
[7]);
4500 return -1; /* failed to recover */
4503 /* device recovered */
4504 dev_info(&h
->pdev
->dev
,
4505 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4506 psa
[0], psa
[1], psa
[2], psa
[3],
4507 psa
[4], psa
[5], psa
[6], psa
[7]);
4509 return rc
; /* success */
4512 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4513 * tell which kind we're dealing with, so we send the abort both ways. There
4514 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4515 * way we construct our tags but we check anyway in case the assumptions which
4516 * make this true someday become false.
4518 static int hpsa_send_abort_both_ways(struct ctlr_info
*h
,
4519 unsigned char *scsi3addr
, struct CommandList
*abort
)
4522 struct CommandList
*c
;
4523 int rc
= 0, rc2
= 0;
4525 /* ioccelerator mode 2 commands should be aborted via the
4526 * accelerated path, since RAID path is unaware of these commands,
4527 * but underlying firmware can't handle abort TMF.
4528 * Change abort to physical device reset.
4530 if (abort
->cmd_type
== CMD_IOACCEL2
)
4531 return hpsa_send_reset_as_abort_ioaccel2(h
, scsi3addr
, abort
);
4533 /* we do not expect to find the swizzled tag in our queue, but
4534 * check anyway just to be sure the assumptions which make this
4535 * the case haven't become wrong.
4537 memcpy(swizzled_tag
, &abort
->Request
.CDB
[4], 8);
4538 swizzle_abort_tag(swizzled_tag
);
4539 c
= hpsa_find_cmd_in_queue_by_tag(h
, swizzled_tag
, &h
->cmpQ
);
4541 dev_warn(&h
->pdev
->dev
, "Unexpectedly found byte-swapped tag in completion queue.\n");
4542 return hpsa_send_abort(h
, scsi3addr
, abort
, 0);
4544 rc
= hpsa_send_abort(h
, scsi3addr
, abort
, 0);
4546 /* if the command is still in our queue, we can't conclude that it was
4547 * aborted (it might have just completed normally) but in any case
4548 * we don't need to try to abort it another way.
4550 c
= hpsa_find_cmd_in_queue(h
, abort
->scsi_cmd
, &h
->cmpQ
);
4552 rc2
= hpsa_send_abort(h
, scsi3addr
, abort
, 1);
4556 /* Send an abort for the specified command.
4557 * If the device and controller support it,
4558 * send a task abort request.
4560 static int hpsa_eh_abort_handler(struct scsi_cmnd
*sc
)
4564 struct ctlr_info
*h
;
4565 struct hpsa_scsi_dev_t
*dev
;
4566 struct CommandList
*abort
; /* pointer to command to be aborted */
4567 struct CommandList
*found
;
4568 struct scsi_cmnd
*as
; /* ptr to scsi cmd inside aborted command. */
4569 char msg
[256]; /* For debug messaging. */
4571 u32 tagupper
, taglower
;
4573 /* Find the controller of the command to be aborted */
4574 h
= sdev_to_hba(sc
->device
);
4576 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4579 /* Check that controller supports some kind of task abort */
4580 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
) &&
4581 !(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
4584 memset(msg
, 0, sizeof(msg
));
4585 ml
+= sprintf(msg
+ml
, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
4586 h
->scsi_host
->host_no
, sc
->device
->channel
,
4587 sc
->device
->id
, sc
->device
->lun
);
4589 /* Find the device of the command to be aborted */
4590 dev
= sc
->device
->hostdata
;
4592 dev_err(&h
->pdev
->dev
, "%s FAILED, Device lookup failed.\n",
4597 /* Get SCSI command to be aborted */
4598 abort
= (struct CommandList
*) sc
->host_scribble
;
4599 if (abort
== NULL
) {
4600 dev_err(&h
->pdev
->dev
, "%s FAILED, Command to abort is NULL.\n",
4604 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
4605 ml
+= sprintf(msg
+ml
, "Tag:0x%08x:%08x ", tagupper
, taglower
);
4606 as
= (struct scsi_cmnd
*) abort
->scsi_cmd
;
4608 ml
+= sprintf(msg
+ml
, "Command:0x%x SN:0x%lx ",
4609 as
->cmnd
[0], as
->serial_number
);
4610 dev_dbg(&h
->pdev
->dev
, "%s\n", msg
);
4611 dev_warn(&h
->pdev
->dev
, "Abort request on C%d:B%d:T%d:L%d\n",
4612 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
);
4614 /* Search reqQ to See if command is queued but not submitted,
4615 * if so, complete the command with aborted status and remove
4618 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->reqQ
);
4620 found
->err_info
->CommandStatus
= CMD_ABORTED
;
4622 dev_info(&h
->pdev
->dev
, "%s Request SUCCEEDED (driver queue).\n",
4627 /* not in reqQ, if also not in cmpQ, must have already completed */
4628 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->cmpQ
);
4630 dev_dbg(&h
->pdev
->dev
, "%s Request SUCCEEDED (not known to driver).\n",
4636 * Command is in flight, or possibly already completed
4637 * by the firmware (but not to the scsi mid layer) but we can't
4638 * distinguish which. Send the abort down.
4640 rc
= hpsa_send_abort_both_ways(h
, dev
->scsi3addr
, abort
);
4642 dev_dbg(&h
->pdev
->dev
, "%s Request FAILED.\n", msg
);
4643 dev_warn(&h
->pdev
->dev
, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4644 h
->scsi_host
->host_no
,
4645 dev
->bus
, dev
->target
, dev
->lun
);
4648 dev_info(&h
->pdev
->dev
, "%s REQUEST SUCCEEDED.\n", msg
);
4650 /* If the abort(s) above completed and actually aborted the
4651 * command, then the command to be aborted should already be
4652 * completed. If not, wait around a bit more to see if they
4653 * manage to complete normally.
4655 #define ABORT_COMPLETE_WAIT_SECS 30
4656 for (i
= 0; i
< ABORT_COMPLETE_WAIT_SECS
* 10; i
++) {
4657 found
= hpsa_find_cmd_in_queue(h
, sc
, &h
->cmpQ
);
4662 dev_warn(&h
->pdev
->dev
, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4663 msg
, ABORT_COMPLETE_WAIT_SECS
);
4669 * For operations that cannot sleep, a command block is allocated at init,
4670 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4671 * which ones are free or in use. Lock must be held when calling this.
4672 * cmd_free() is the complement.
4674 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
4676 struct CommandList
*c
;
4678 union u64bit temp64
;
4679 dma_addr_t cmd_dma_handle
, err_dma_handle
;
4680 unsigned long flags
;
4682 spin_lock_irqsave(&h
->lock
, flags
);
4684 i
= find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
);
4685 if (i
== h
->nr_cmds
) {
4686 spin_unlock_irqrestore(&h
->lock
, flags
);
4689 } while (test_and_set_bit
4690 (i
& (BITS_PER_LONG
- 1),
4691 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
)) != 0);
4692 spin_unlock_irqrestore(&h
->lock
, flags
);
4694 c
= h
->cmd_pool
+ i
;
4695 memset(c
, 0, sizeof(*c
));
4696 cmd_dma_handle
= h
->cmd_pool_dhandle
4698 c
->err_info
= h
->errinfo_pool
+ i
;
4699 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
4700 err_dma_handle
= h
->errinfo_pool_dhandle
4701 + i
* sizeof(*c
->err_info
);
4705 INIT_LIST_HEAD(&c
->list
);
4706 c
->busaddr
= (u32
) cmd_dma_handle
;
4707 temp64
.val
= (u64
) err_dma_handle
;
4708 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
4709 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
4710 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
4716 /* For operations that can wait for kmalloc to possibly sleep,
4717 * this routine can be called. Lock need not be held to call
4718 * cmd_special_alloc. cmd_special_free() is the complement.
4720 static struct CommandList
*cmd_special_alloc(struct ctlr_info
*h
)
4722 struct CommandList
*c
;
4723 union u64bit temp64
;
4724 dma_addr_t cmd_dma_handle
, err_dma_handle
;
4726 c
= pci_alloc_consistent(h
->pdev
, sizeof(*c
), &cmd_dma_handle
);
4729 memset(c
, 0, sizeof(*c
));
4731 c
->cmd_type
= CMD_SCSI
;
4734 c
->err_info
= pci_alloc_consistent(h
->pdev
, sizeof(*c
->err_info
),
4737 if (c
->err_info
== NULL
) {
4738 pci_free_consistent(h
->pdev
,
4739 sizeof(*c
), c
, cmd_dma_handle
);
4742 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
4744 INIT_LIST_HEAD(&c
->list
);
4745 c
->busaddr
= (u32
) cmd_dma_handle
;
4746 temp64
.val
= (u64
) err_dma_handle
;
4747 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
4748 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
4749 c
->ErrDesc
.Len
= sizeof(*c
->err_info
);
4755 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
4758 unsigned long flags
;
4760 i
= c
- h
->cmd_pool
;
4761 spin_lock_irqsave(&h
->lock
, flags
);
4762 clear_bit(i
& (BITS_PER_LONG
- 1),
4763 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
4764 spin_unlock_irqrestore(&h
->lock
, flags
);
4767 static void cmd_special_free(struct ctlr_info
*h
, struct CommandList
*c
)
4769 union u64bit temp64
;
4771 temp64
.val32
.lower
= c
->ErrDesc
.Addr
.lower
;
4772 temp64
.val32
.upper
= c
->ErrDesc
.Addr
.upper
;
4773 pci_free_consistent(h
->pdev
, sizeof(*c
->err_info
),
4774 c
->err_info
, (dma_addr_t
) temp64
.val
);
4775 pci_free_consistent(h
->pdev
, sizeof(*c
),
4776 c
, (dma_addr_t
) (c
->busaddr
& DIRECT_LOOKUP_MASK
));
4779 #ifdef CONFIG_COMPAT
4781 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
, void *arg
)
4783 IOCTL32_Command_struct __user
*arg32
=
4784 (IOCTL32_Command_struct __user
*) arg
;
4785 IOCTL_Command_struct arg64
;
4786 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
4790 memset(&arg64
, 0, sizeof(arg64
));
4792 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
4793 sizeof(arg64
.LUN_info
));
4794 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
4795 sizeof(arg64
.Request
));
4796 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
4797 sizeof(arg64
.error_info
));
4798 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
4799 err
|= get_user(cp
, &arg32
->buf
);
4800 arg64
.buf
= compat_ptr(cp
);
4801 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
4806 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, (void *)p
);
4809 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
4810 sizeof(arg32
->error_info
));
4816 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
4819 BIG_IOCTL32_Command_struct __user
*arg32
=
4820 (BIG_IOCTL32_Command_struct __user
*) arg
;
4821 BIG_IOCTL_Command_struct arg64
;
4822 BIG_IOCTL_Command_struct __user
*p
=
4823 compat_alloc_user_space(sizeof(arg64
));
4827 memset(&arg64
, 0, sizeof(arg64
));
4829 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
4830 sizeof(arg64
.LUN_info
));
4831 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
4832 sizeof(arg64
.Request
));
4833 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
4834 sizeof(arg64
.error_info
));
4835 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
4836 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
4837 err
|= get_user(cp
, &arg32
->buf
);
4838 arg64
.buf
= compat_ptr(cp
);
4839 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
4844 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, (void *)p
);
4847 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
4848 sizeof(arg32
->error_info
));
4854 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
4857 case CCISS_GETPCIINFO
:
4858 case CCISS_GETINTINFO
:
4859 case CCISS_SETINTINFO
:
4860 case CCISS_GETNODENAME
:
4861 case CCISS_SETNODENAME
:
4862 case CCISS_GETHEARTBEAT
:
4863 case CCISS_GETBUSTYPES
:
4864 case CCISS_GETFIRMVER
:
4865 case CCISS_GETDRIVVER
:
4866 case CCISS_REVALIDVOLS
:
4867 case CCISS_DEREGDISK
:
4868 case CCISS_REGNEWDISK
:
4870 case CCISS_RESCANDISK
:
4871 case CCISS_GETLUNINFO
:
4872 return hpsa_ioctl(dev
, cmd
, arg
);
4874 case CCISS_PASSTHRU32
:
4875 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
4876 case CCISS_BIG_PASSTHRU32
:
4877 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
4880 return -ENOIOCTLCMD
;
4885 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
4887 struct hpsa_pci_info pciinfo
;
4891 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
4892 pciinfo
.bus
= h
->pdev
->bus
->number
;
4893 pciinfo
.dev_fn
= h
->pdev
->devfn
;
4894 pciinfo
.board_id
= h
->board_id
;
4895 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
4900 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
4902 DriverVer_type DriverVer
;
4903 unsigned char vmaj
, vmin
, vsubmin
;
4906 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
4907 &vmaj
, &vmin
, &vsubmin
);
4909 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
4910 "unrecognized.", HPSA_DRIVER_VERSION
);
4915 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
4918 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
4923 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
4925 IOCTL_Command_struct iocommand
;
4926 struct CommandList
*c
;
4928 union u64bit temp64
;
4933 if (!capable(CAP_SYS_RAWIO
))
4935 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
4937 if ((iocommand
.buf_size
< 1) &&
4938 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
4941 if (iocommand
.buf_size
> 0) {
4942 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
4945 if (iocommand
.Request
.Type
.Direction
== XFER_WRITE
) {
4946 /* Copy the data into the buffer we created */
4947 if (copy_from_user(buff
, iocommand
.buf
,
4948 iocommand
.buf_size
)) {
4953 memset(buff
, 0, iocommand
.buf_size
);
4956 c
= cmd_special_alloc(h
);
4961 /* Fill in the command type */
4962 c
->cmd_type
= CMD_IOCTL_PEND
;
4963 /* Fill in Command Header */
4964 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
4965 if (iocommand
.buf_size
> 0) { /* buffer to fill */
4966 c
->Header
.SGList
= 1;
4967 c
->Header
.SGTotal
= 1;
4968 } else { /* no buffers to fill */
4969 c
->Header
.SGList
= 0;
4970 c
->Header
.SGTotal
= 0;
4972 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
4973 /* use the kernel address the cmd block for tag */
4974 c
->Header
.Tag
.lower
= c
->busaddr
;
4976 /* Fill in Request block */
4977 memcpy(&c
->Request
, &iocommand
.Request
,
4978 sizeof(c
->Request
));
4980 /* Fill in the scatter gather information */
4981 if (iocommand
.buf_size
> 0) {
4982 temp64
.val
= pci_map_single(h
->pdev
, buff
,
4983 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
4984 if (dma_mapping_error(&h
->pdev
->dev
, temp64
.val
)) {
4985 c
->SG
[0].Addr
.lower
= 0;
4986 c
->SG
[0].Addr
.upper
= 0;
4991 c
->SG
[0].Addr
.lower
= temp64
.val32
.lower
;
4992 c
->SG
[0].Addr
.upper
= temp64
.val32
.upper
;
4993 c
->SG
[0].Len
= iocommand
.buf_size
;
4994 c
->SG
[0].Ext
= HPSA_SG_LAST
; /* we are not chaining*/
4996 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
4997 if (iocommand
.buf_size
> 0)
4998 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
4999 check_ioctl_unit_attention(h
, c
);
5001 /* Copy the error information out */
5002 memcpy(&iocommand
.error_info
, c
->err_info
,
5003 sizeof(iocommand
.error_info
));
5004 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
5008 if (iocommand
.Request
.Type
.Direction
== XFER_READ
&&
5009 iocommand
.buf_size
> 0) {
5010 /* Copy the data out of the buffer we created */
5011 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
5017 cmd_special_free(h
, c
);
5023 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
5025 BIG_IOCTL_Command_struct
*ioc
;
5026 struct CommandList
*c
;
5027 unsigned char **buff
= NULL
;
5028 int *buff_size
= NULL
;
5029 union u64bit temp64
;
5035 BYTE __user
*data_ptr
;
5039 if (!capable(CAP_SYS_RAWIO
))
5041 ioc
= (BIG_IOCTL_Command_struct
*)
5042 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
5047 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
5051 if ((ioc
->buf_size
< 1) &&
5052 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
5056 /* Check kmalloc limits using all SGs */
5057 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
5061 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
5065 buff
= kzalloc(SG_ENTRIES_IN_CMD
* sizeof(char *), GFP_KERNEL
);
5070 buff_size
= kmalloc(SG_ENTRIES_IN_CMD
* sizeof(int), GFP_KERNEL
);
5075 left
= ioc
->buf_size
;
5076 data_ptr
= ioc
->buf
;
5078 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
5079 buff_size
[sg_used
] = sz
;
5080 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
5081 if (buff
[sg_used
] == NULL
) {
5085 if (ioc
->Request
.Type
.Direction
== XFER_WRITE
) {
5086 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
5091 memset(buff
[sg_used
], 0, sz
);
5096 c
= cmd_special_alloc(h
);
5101 c
->cmd_type
= CMD_IOCTL_PEND
;
5102 c
->Header
.ReplyQueue
= 0;
5103 c
->Header
.SGList
= c
->Header
.SGTotal
= sg_used
;
5104 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
5105 c
->Header
.Tag
.lower
= c
->busaddr
;
5106 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
5107 if (ioc
->buf_size
> 0) {
5109 for (i
= 0; i
< sg_used
; i
++) {
5110 temp64
.val
= pci_map_single(h
->pdev
, buff
[i
],
5111 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
5112 if (dma_mapping_error(&h
->pdev
->dev
, temp64
.val
)) {
5113 c
->SG
[i
].Addr
.lower
= 0;
5114 c
->SG
[i
].Addr
.upper
= 0;
5116 hpsa_pci_unmap(h
->pdev
, c
, i
,
5117 PCI_DMA_BIDIRECTIONAL
);
5121 c
->SG
[i
].Addr
.lower
= temp64
.val32
.lower
;
5122 c
->SG
[i
].Addr
.upper
= temp64
.val32
.upper
;
5123 c
->SG
[i
].Len
= buff_size
[i
];
5124 c
->SG
[i
].Ext
= i
< sg_used
- 1 ? 0 : HPSA_SG_LAST
;
5127 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h
, c
);
5129 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
5130 check_ioctl_unit_attention(h
, c
);
5131 /* Copy the error information out */
5132 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
5133 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
5137 if (ioc
->Request
.Type
.Direction
== XFER_READ
&& ioc
->buf_size
> 0) {
5138 /* Copy the data out of the buffer we created */
5139 BYTE __user
*ptr
= ioc
->buf
;
5140 for (i
= 0; i
< sg_used
; i
++) {
5141 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
5145 ptr
+= buff_size
[i
];
5150 cmd_special_free(h
, c
);
5153 for (i
= 0; i
< sg_used
; i
++)
5162 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
5163 struct CommandList
*c
)
5165 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
5166 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
5167 (void) check_for_unit_attention(h
, c
);
5170 static int increment_passthru_count(struct ctlr_info
*h
)
5172 unsigned long flags
;
5174 spin_lock_irqsave(&h
->passthru_count_lock
, flags
);
5175 if (h
->passthru_count
>= HPSA_MAX_CONCURRENT_PASSTHRUS
) {
5176 spin_unlock_irqrestore(&h
->passthru_count_lock
, flags
);
5179 h
->passthru_count
++;
5180 spin_unlock_irqrestore(&h
->passthru_count_lock
, flags
);
5184 static void decrement_passthru_count(struct ctlr_info
*h
)
5186 unsigned long flags
;
5188 spin_lock_irqsave(&h
->passthru_count_lock
, flags
);
5189 if (h
->passthru_count
<= 0) {
5190 spin_unlock_irqrestore(&h
->passthru_count_lock
, flags
);
5191 /* not expecting to get here. */
5192 dev_warn(&h
->pdev
->dev
, "Bug detected, passthru_count seems to be incorrect.\n");
5195 h
->passthru_count
--;
5196 spin_unlock_irqrestore(&h
->passthru_count_lock
, flags
);
5202 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void *arg
)
5204 struct ctlr_info
*h
;
5205 void __user
*argp
= (void __user
*)arg
;
5208 h
= sdev_to_hba(dev
);
5211 case CCISS_DEREGDISK
:
5212 case CCISS_REGNEWDISK
:
5214 hpsa_scan_start(h
->scsi_host
);
5216 case CCISS_GETPCIINFO
:
5217 return hpsa_getpciinfo_ioctl(h
, argp
);
5218 case CCISS_GETDRIVVER
:
5219 return hpsa_getdrivver_ioctl(h
, argp
);
5220 case CCISS_PASSTHRU
:
5221 if (increment_passthru_count(h
))
5223 rc
= hpsa_passthru_ioctl(h
, argp
);
5224 decrement_passthru_count(h
);
5226 case CCISS_BIG_PASSTHRU
:
5227 if (increment_passthru_count(h
))
5229 rc
= hpsa_big_passthru_ioctl(h
, argp
);
5230 decrement_passthru_count(h
);
5237 static int hpsa_send_host_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
5240 struct CommandList
*c
;
5245 /* fill_cmd can't fail here, no data buffer to map */
5246 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
5247 RAID_CTLR_LUNID
, TYPE_MSG
);
5248 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
5250 enqueue_cmd_and_start_io(h
, c
);
5251 /* Don't wait for completion, the reset won't complete. Don't free
5252 * the command either. This is the last command we will send before
5253 * re-initializing everything, so it doesn't matter and won't leak.
5258 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
5259 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
5262 int pci_dir
= XFER_NONE
;
5263 struct CommandList
*a
; /* for commands to be aborted */
5265 c
->cmd_type
= CMD_IOCTL_PEND
;
5266 c
->Header
.ReplyQueue
= 0;
5267 if (buff
!= NULL
&& size
> 0) {
5268 c
->Header
.SGList
= 1;
5269 c
->Header
.SGTotal
= 1;
5271 c
->Header
.SGList
= 0;
5272 c
->Header
.SGTotal
= 0;
5274 c
->Header
.Tag
.lower
= c
->busaddr
;
5275 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
5277 c
->Request
.Type
.Type
= cmd_type
;
5278 if (cmd_type
== TYPE_CMD
) {
5281 /* are we trying to read a vital product page */
5282 if (page_code
& VPD_PAGE
) {
5283 c
->Request
.CDB
[1] = 0x01;
5284 c
->Request
.CDB
[2] = (page_code
& 0xff);
5286 c
->Request
.CDBLen
= 6;
5287 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5288 c
->Request
.Type
.Direction
= XFER_READ
;
5289 c
->Request
.Timeout
= 0;
5290 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
5291 c
->Request
.CDB
[4] = size
& 0xFF;
5293 case HPSA_REPORT_LOG
:
5294 case HPSA_REPORT_PHYS
:
5295 /* Talking to controller so It's a physical command
5296 mode = 00 target = 0. Nothing to write.
5298 c
->Request
.CDBLen
= 12;
5299 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5300 c
->Request
.Type
.Direction
= XFER_READ
;
5301 c
->Request
.Timeout
= 0;
5302 c
->Request
.CDB
[0] = cmd
;
5303 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
5304 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
5305 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
5306 c
->Request
.CDB
[9] = size
& 0xFF;
5308 case HPSA_CACHE_FLUSH
:
5309 c
->Request
.CDBLen
= 12;
5310 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5311 c
->Request
.Type
.Direction
= XFER_WRITE
;
5312 c
->Request
.Timeout
= 0;
5313 c
->Request
.CDB
[0] = BMIC_WRITE
;
5314 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
5315 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
5316 c
->Request
.CDB
[8] = size
& 0xFF;
5318 case TEST_UNIT_READY
:
5319 c
->Request
.CDBLen
= 6;
5320 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5321 c
->Request
.Type
.Direction
= XFER_NONE
;
5322 c
->Request
.Timeout
= 0;
5324 case HPSA_GET_RAID_MAP
:
5325 c
->Request
.CDBLen
= 12;
5326 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5327 c
->Request
.Type
.Direction
= XFER_READ
;
5328 c
->Request
.Timeout
= 0;
5329 c
->Request
.CDB
[0] = HPSA_CISS_READ
;
5330 c
->Request
.CDB
[1] = cmd
;
5331 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
5332 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
5333 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
5334 c
->Request
.CDB
[9] = size
& 0xFF;
5336 case BMIC_SENSE_CONTROLLER_PARAMETERS
:
5337 c
->Request
.CDBLen
= 10;
5338 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5339 c
->Request
.Type
.Direction
= XFER_READ
;
5340 c
->Request
.Timeout
= 0;
5341 c
->Request
.CDB
[0] = BMIC_READ
;
5342 c
->Request
.CDB
[6] = BMIC_SENSE_CONTROLLER_PARAMETERS
;
5343 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
5344 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
5347 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
5351 } else if (cmd_type
== TYPE_MSG
) {
5354 case HPSA_DEVICE_RESET_MSG
:
5355 c
->Request
.CDBLen
= 16;
5356 c
->Request
.Type
.Type
= 1; /* It is a MSG not a CMD */
5357 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5358 c
->Request
.Type
.Direction
= XFER_NONE
;
5359 c
->Request
.Timeout
= 0; /* Don't time out */
5360 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
5361 c
->Request
.CDB
[0] = cmd
;
5362 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
5363 /* If bytes 4-7 are zero, it means reset the */
5365 c
->Request
.CDB
[4] = 0x00;
5366 c
->Request
.CDB
[5] = 0x00;
5367 c
->Request
.CDB
[6] = 0x00;
5368 c
->Request
.CDB
[7] = 0x00;
5370 case HPSA_ABORT_MSG
:
5371 a
= buff
; /* point to command to be aborted */
5372 dev_dbg(&h
->pdev
->dev
, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
5373 a
->Header
.Tag
.upper
, a
->Header
.Tag
.lower
,
5374 c
->Header
.Tag
.upper
, c
->Header
.Tag
.lower
);
5375 c
->Request
.CDBLen
= 16;
5376 c
->Request
.Type
.Type
= TYPE_MSG
;
5377 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
5378 c
->Request
.Type
.Direction
= XFER_WRITE
;
5379 c
->Request
.Timeout
= 0; /* Don't time out */
5380 c
->Request
.CDB
[0] = HPSA_TASK_MANAGEMENT
;
5381 c
->Request
.CDB
[1] = HPSA_TMF_ABORT_TASK
;
5382 c
->Request
.CDB
[2] = 0x00; /* reserved */
5383 c
->Request
.CDB
[3] = 0x00; /* reserved */
5384 /* Tag to abort goes in CDB[4]-CDB[11] */
5385 c
->Request
.CDB
[4] = a
->Header
.Tag
.lower
& 0xFF;
5386 c
->Request
.CDB
[5] = (a
->Header
.Tag
.lower
>> 8) & 0xFF;
5387 c
->Request
.CDB
[6] = (a
->Header
.Tag
.lower
>> 16) & 0xFF;
5388 c
->Request
.CDB
[7] = (a
->Header
.Tag
.lower
>> 24) & 0xFF;
5389 c
->Request
.CDB
[8] = a
->Header
.Tag
.upper
& 0xFF;
5390 c
->Request
.CDB
[9] = (a
->Header
.Tag
.upper
>> 8) & 0xFF;
5391 c
->Request
.CDB
[10] = (a
->Header
.Tag
.upper
>> 16) & 0xFF;
5392 c
->Request
.CDB
[11] = (a
->Header
.Tag
.upper
>> 24) & 0xFF;
5393 c
->Request
.CDB
[12] = 0x00; /* reserved */
5394 c
->Request
.CDB
[13] = 0x00; /* reserved */
5395 c
->Request
.CDB
[14] = 0x00; /* reserved */
5396 c
->Request
.CDB
[15] = 0x00; /* reserved */
5399 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
5404 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
5408 switch (c
->Request
.Type
.Direction
) {
5410 pci_dir
= PCI_DMA_FROMDEVICE
;
5413 pci_dir
= PCI_DMA_TODEVICE
;
5416 pci_dir
= PCI_DMA_NONE
;
5419 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
5421 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
))
5427 * Map (physical) PCI mem into (virtual) kernel space
5429 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
5431 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
5432 ulong page_offs
= ((ulong
) base
) - page_base
;
5433 void __iomem
*page_remapped
= ioremap_nocache(page_base
,
5436 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
5439 /* Takes cmds off the submission queue and sends them to the hardware,
5440 * then puts them on the queue of cmds waiting for completion.
5442 static void start_io(struct ctlr_info
*h
)
5444 struct CommandList
*c
;
5445 unsigned long flags
;
5447 spin_lock_irqsave(&h
->lock
, flags
);
5448 while (!list_empty(&h
->reqQ
)) {
5449 c
= list_entry(h
->reqQ
.next
, struct CommandList
, list
);
5450 /* can't do anything if fifo is full */
5451 if ((h
->access
.fifo_full(h
))) {
5452 h
->fifo_recently_full
= 1;
5453 dev_warn(&h
->pdev
->dev
, "fifo full\n");
5456 h
->fifo_recently_full
= 0;
5458 /* Get the first entry from the Request Q */
5462 /* Put job onto the completed Q */
5465 /* Must increment commands_outstanding before unlocking
5466 * and submitting to avoid race checking for fifo full
5469 h
->commands_outstanding
++;
5470 if (h
->commands_outstanding
> h
->max_outstanding
)
5471 h
->max_outstanding
= h
->commands_outstanding
;
5473 /* Tell the controller execute command */
5474 spin_unlock_irqrestore(&h
->lock
, flags
);
5475 h
->access
.submit_command(h
, c
);
5476 spin_lock_irqsave(&h
->lock
, flags
);
5478 spin_unlock_irqrestore(&h
->lock
, flags
);
5481 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
5483 return h
->access
.command_completed(h
, q
);
5486 static inline bool interrupt_pending(struct ctlr_info
*h
)
5488 return h
->access
.intr_pending(h
);
5491 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
5493 return (h
->access
.intr_pending(h
) == 0) ||
5494 (h
->interrupts_enabled
== 0);
5497 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
5500 if (unlikely(tag_index
>= h
->nr_cmds
)) {
5501 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
5507 static inline void finish_cmd(struct CommandList
*c
)
5509 unsigned long flags
;
5510 int io_may_be_stalled
= 0;
5511 struct ctlr_info
*h
= c
->h
;
5513 spin_lock_irqsave(&h
->lock
, flags
);
5517 * Check for possibly stalled i/o.
5519 * If a fifo_full condition is encountered, requests will back up
5520 * in h->reqQ. This queue is only emptied out by start_io which is
5521 * only called when a new i/o request comes in. If no i/o's are
5522 * forthcoming, the i/o's in h->reqQ can get stuck. So we call
5523 * start_io from here if we detect such a danger.
5525 * Normally, we shouldn't hit this case, but pounding on the
5526 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
5527 * commands_outstanding is low. We want to avoid calling
5528 * start_io from in here as much as possible, and esp. don't
5529 * want to get in a cycle where we call start_io every time
5532 if (unlikely(h
->fifo_recently_full
) &&
5533 h
->commands_outstanding
< 5)
5534 io_may_be_stalled
= 1;
5536 spin_unlock_irqrestore(&h
->lock
, flags
);
5538 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
5539 if (likely(c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_SCSI
5540 || c
->cmd_type
== CMD_IOACCEL2
))
5541 complete_scsi_command(c
);
5542 else if (c
->cmd_type
== CMD_IOCTL_PEND
)
5543 complete(c
->waiting
);
5544 if (unlikely(io_may_be_stalled
))
5548 static inline u32
hpsa_tag_contains_index(u32 tag
)
5550 return tag
& DIRECT_LOOKUP_BIT
;
5553 static inline u32
hpsa_tag_to_index(u32 tag
)
5555 return tag
>> DIRECT_LOOKUP_SHIFT
;
5559 static inline u32
hpsa_tag_discard_error_bits(struct ctlr_info
*h
, u32 tag
)
5561 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5562 #define HPSA_SIMPLE_ERROR_BITS 0x03
5563 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
5564 return tag
& ~HPSA_SIMPLE_ERROR_BITS
;
5565 return tag
& ~HPSA_PERF_ERROR_BITS
;
5568 /* process completion of an indexed ("direct lookup") command */
5569 static inline void process_indexed_cmd(struct ctlr_info
*h
,
5573 struct CommandList
*c
;
5575 tag_index
= hpsa_tag_to_index(raw_tag
);
5576 if (!bad_tag(h
, tag_index
, raw_tag
)) {
5577 c
= h
->cmd_pool
+ tag_index
;
5582 /* process completion of a non-indexed command */
5583 static inline void process_nonindexed_cmd(struct ctlr_info
*h
,
5587 struct CommandList
*c
= NULL
;
5588 unsigned long flags
;
5590 tag
= hpsa_tag_discard_error_bits(h
, raw_tag
);
5591 spin_lock_irqsave(&h
->lock
, flags
);
5592 list_for_each_entry(c
, &h
->cmpQ
, list
) {
5593 if ((c
->busaddr
& 0xFFFFFFE0) == (tag
& 0xFFFFFFE0)) {
5594 spin_unlock_irqrestore(&h
->lock
, flags
);
5599 spin_unlock_irqrestore(&h
->lock
, flags
);
5600 bad_tag(h
, h
->nr_cmds
+ 1, raw_tag
);
5603 /* Some controllers, like p400, will give us one interrupt
5604 * after a soft reset, even if we turned interrupts off.
5605 * Only need to check for this in the hpsa_xxx_discard_completions
5608 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
5610 if (likely(!reset_devices
))
5613 if (likely(h
->interrupts_enabled
))
5616 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
5617 "(known firmware bug.) Ignoring.\n");
5623 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5624 * Relies on (h-q[x] == x) being true for x such that
5625 * 0 <= x < MAX_REPLY_QUEUES.
5627 static struct ctlr_info
*queue_to_hba(u8
*queue
)
5629 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
5632 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
5634 struct ctlr_info
*h
= queue_to_hba(queue
);
5635 u8 q
= *(u8
*) queue
;
5638 if (ignore_bogus_interrupt(h
))
5641 if (interrupt_not_for_us(h
))
5643 h
->last_intr_timestamp
= get_jiffies_64();
5644 while (interrupt_pending(h
)) {
5645 raw_tag
= get_next_completion(h
, q
);
5646 while (raw_tag
!= FIFO_EMPTY
)
5647 raw_tag
= next_command(h
, q
);
5652 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
5654 struct ctlr_info
*h
= queue_to_hba(queue
);
5656 u8 q
= *(u8
*) queue
;
5658 if (ignore_bogus_interrupt(h
))
5661 h
->last_intr_timestamp
= get_jiffies_64();
5662 raw_tag
= get_next_completion(h
, q
);
5663 while (raw_tag
!= FIFO_EMPTY
)
5664 raw_tag
= next_command(h
, q
);
5668 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
5670 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
5672 u8 q
= *(u8
*) queue
;
5674 if (interrupt_not_for_us(h
))
5676 h
->last_intr_timestamp
= get_jiffies_64();
5677 while (interrupt_pending(h
)) {
5678 raw_tag
= get_next_completion(h
, q
);
5679 while (raw_tag
!= FIFO_EMPTY
) {
5680 if (likely(hpsa_tag_contains_index(raw_tag
)))
5681 process_indexed_cmd(h
, raw_tag
);
5683 process_nonindexed_cmd(h
, raw_tag
);
5684 raw_tag
= next_command(h
, q
);
5690 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
5692 struct ctlr_info
*h
= queue_to_hba(queue
);
5694 u8 q
= *(u8
*) queue
;
5696 h
->last_intr_timestamp
= get_jiffies_64();
5697 raw_tag
= get_next_completion(h
, q
);
5698 while (raw_tag
!= FIFO_EMPTY
) {
5699 if (likely(hpsa_tag_contains_index(raw_tag
)))
5700 process_indexed_cmd(h
, raw_tag
);
5702 process_nonindexed_cmd(h
, raw_tag
);
5703 raw_tag
= next_command(h
, q
);
5708 /* Send a message CDB to the firmware. Careful, this only works
5709 * in simple mode, not performant mode due to the tag lookup.
5710 * We only ever use this immediately after a controller reset.
5712 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
5716 struct CommandListHeader CommandHeader
;
5717 struct RequestBlock Request
;
5718 struct ErrDescriptor ErrorDescriptor
;
5720 struct Command
*cmd
;
5721 static const size_t cmd_sz
= sizeof(*cmd
) +
5722 sizeof(cmd
->ErrorDescriptor
);
5724 uint32_t paddr32
, tag
;
5725 void __iomem
*vaddr
;
5728 vaddr
= pci_ioremap_bar(pdev
, 0);
5732 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5733 * CCISS commands, so they must be allocated from the lower 4GiB of
5736 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
5742 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
5748 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5749 * although there's no guarantee, we assume that the address is at
5750 * least 4-byte aligned (most likely, it's page-aligned).
5754 cmd
->CommandHeader
.ReplyQueue
= 0;
5755 cmd
->CommandHeader
.SGList
= 0;
5756 cmd
->CommandHeader
.SGTotal
= 0;
5757 cmd
->CommandHeader
.Tag
.lower
= paddr32
;
5758 cmd
->CommandHeader
.Tag
.upper
= 0;
5759 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
5761 cmd
->Request
.CDBLen
= 16;
5762 cmd
->Request
.Type
.Type
= TYPE_MSG
;
5763 cmd
->Request
.Type
.Attribute
= ATTR_HEADOFQUEUE
;
5764 cmd
->Request
.Type
.Direction
= XFER_NONE
;
5765 cmd
->Request
.Timeout
= 0; /* Don't time out */
5766 cmd
->Request
.CDB
[0] = opcode
;
5767 cmd
->Request
.CDB
[1] = type
;
5768 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
5769 cmd
->ErrorDescriptor
.Addr
.lower
= paddr32
+ sizeof(*cmd
);
5770 cmd
->ErrorDescriptor
.Addr
.upper
= 0;
5771 cmd
->ErrorDescriptor
.Len
= sizeof(struct ErrorInfo
);
5773 writel(paddr32
, vaddr
+ SA5_REQUEST_PORT_OFFSET
);
5775 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
5776 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
5777 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr32
)
5779 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
5784 /* we leak the DMA buffer here ... no choice since the controller could
5785 * still complete the command.
5787 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
5788 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
5793 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
5795 if (tag
& HPSA_ERROR_BIT
) {
5796 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
5801 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
5806 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5808 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
5809 void * __iomem vaddr
, u32 use_doorbell
)
5815 /* For everything after the P600, the PCI power state method
5816 * of resetting the controller doesn't work, so we have this
5817 * other way using the doorbell register.
5819 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
5820 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
5822 /* PMC hardware guys tell us we need a 5 second delay after
5823 * doorbell reset and before any attempt to talk to the board
5824 * at all to ensure that this actually works and doesn't fall
5825 * over in some weird corner cases.
5828 } else { /* Try to do it the PCI power state way */
5830 /* Quoting from the Open CISS Specification: "The Power
5831 * Management Control/Status Register (CSR) controls the power
5832 * state of the device. The normal operating state is D0,
5833 * CSR=00h. The software off state is D3, CSR=03h. To reset
5834 * the controller, place the interface device in D3 then to D0,
5835 * this causes a secondary PCI reset which will reset the
5838 pos
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
5841 "hpsa_reset_controller: "
5842 "PCI PM not supported\n");
5845 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
5846 /* enter the D3hot power management state */
5847 pci_read_config_word(pdev
, pos
+ PCI_PM_CTRL
, &pmcsr
);
5848 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
5850 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
5854 /* enter the D0 power management state */
5855 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
5857 pci_write_config_word(pdev
, pos
+ PCI_PM_CTRL
, pmcsr
);
5860 * The P600 requires a small delay when changing states.
5861 * Otherwise we may think the board did not reset and we bail.
5862 * This for kdump only and is particular to the P600.
5869 static void init_driver_version(char *driver_version
, int len
)
5871 memset(driver_version
, 0, len
);
5872 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
5875 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
5877 char *driver_version
;
5878 int i
, size
= sizeof(cfgtable
->driver_version
);
5880 driver_version
= kmalloc(size
, GFP_KERNEL
);
5881 if (!driver_version
)
5884 init_driver_version(driver_version
, size
);
5885 for (i
= 0; i
< size
; i
++)
5886 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
5887 kfree(driver_version
);
5891 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
5892 unsigned char *driver_ver
)
5896 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
5897 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
5900 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
5903 char *driver_ver
, *old_driver_ver
;
5904 int rc
, size
= sizeof(cfgtable
->driver_version
);
5906 old_driver_ver
= kmalloc(2 * size
, GFP_KERNEL
);
5907 if (!old_driver_ver
)
5909 driver_ver
= old_driver_ver
+ size
;
5911 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5912 * should have been changed, otherwise we know the reset failed.
5914 init_driver_version(old_driver_ver
, size
);
5915 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
5916 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
5917 kfree(old_driver_ver
);
5920 /* This does a hard reset of the controller using PCI power management
5921 * states or the using the doorbell register.
5923 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
)
5927 u64 cfg_base_addr_index
;
5928 void __iomem
*vaddr
;
5929 unsigned long paddr
;
5930 u32 misc_fw_support
;
5932 struct CfgTable __iomem
*cfgtable
;
5935 u16 command_register
;
5937 /* For controllers as old as the P600, this is very nearly
5940 * pci_save_state(pci_dev);
5941 * pci_set_power_state(pci_dev, PCI_D3hot);
5942 * pci_set_power_state(pci_dev, PCI_D0);
5943 * pci_restore_state(pci_dev);
5945 * For controllers newer than the P600, the pci power state
5946 * method of resetting doesn't work so we have another way
5947 * using the doorbell register.
5950 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
5951 if (rc
< 0 || !ctlr_is_resettable(board_id
)) {
5952 dev_warn(&pdev
->dev
, "Not resetting device.\n");
5956 /* if controller is soft- but not hard resettable... */
5957 if (!ctlr_is_hard_resettable(board_id
))
5958 return -ENOTSUPP
; /* try soft reset later. */
5960 /* Save the PCI command register */
5961 pci_read_config_word(pdev
, 4, &command_register
);
5962 /* Turn the board off. This is so that later pci_restore_state()
5963 * won't turn the board on before the rest of config space is ready.
5965 pci_disable_device(pdev
);
5966 pci_save_state(pdev
);
5968 /* find the first memory BAR, so we can find the cfg table */
5969 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
5972 vaddr
= remap_pci_mem(paddr
, 0x250);
5976 /* find cfgtable in order to check if reset via doorbell is supported */
5977 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
5978 &cfg_base_addr_index
, &cfg_offset
);
5981 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
5982 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
5987 rc
= write_driver_ver_to_cfgtable(cfgtable
);
5991 /* If reset via doorbell register is supported, use that.
5992 * There are two such methods. Favor the newest method.
5994 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
5995 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
5997 use_doorbell
= DOORBELL_CTLR_RESET2
;
5999 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
6001 dev_warn(&pdev
->dev
, "Soft reset not supported. "
6002 "Firmware update is required.\n");
6003 rc
= -ENOTSUPP
; /* try soft reset */
6004 goto unmap_cfgtable
;
6008 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
6010 goto unmap_cfgtable
;
6012 pci_restore_state(pdev
);
6013 rc
= pci_enable_device(pdev
);
6015 dev_warn(&pdev
->dev
, "failed to enable device.\n");
6016 goto unmap_cfgtable
;
6018 pci_write_config_word(pdev
, 4, command_register
);
6020 /* Some devices (notably the HP Smart Array 5i Controller)
6021 need a little pause here */
6022 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
6024 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
6026 dev_warn(&pdev
->dev
,
6027 "failed waiting for board to become ready "
6028 "after hard reset\n");
6029 goto unmap_cfgtable
;
6032 rc
= controller_reset_failed(vaddr
);
6034 goto unmap_cfgtable
;
6036 dev_warn(&pdev
->dev
, "Unable to successfully reset "
6037 "controller. Will try soft reset.\n");
6040 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
6052 * We cannot read the structure directly, for portability we must use
6054 * This is for debug only.
6056 static void print_cfg_table(struct device
*dev
, struct CfgTable
*tb
)
6062 dev_info(dev
, "Controller Configuration information\n");
6063 dev_info(dev
, "------------------------------------\n");
6064 for (i
= 0; i
< 4; i
++)
6065 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
6066 temp_name
[4] = '\0';
6067 dev_info(dev
, " Signature = %s\n", temp_name
);
6068 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
6069 dev_info(dev
, " Transport methods supported = 0x%x\n",
6070 readl(&(tb
->TransportSupport
)));
6071 dev_info(dev
, " Transport methods active = 0x%x\n",
6072 readl(&(tb
->TransportActive
)));
6073 dev_info(dev
, " Requested transport Method = 0x%x\n",
6074 readl(&(tb
->HostWrite
.TransportRequest
)));
6075 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
6076 readl(&(tb
->HostWrite
.CoalIntDelay
)));
6077 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
6078 readl(&(tb
->HostWrite
.CoalIntCount
)));
6079 dev_info(dev
, " Max outstanding commands = 0x%d\n",
6080 readl(&(tb
->CmdsOutMax
)));
6081 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
6082 for (i
= 0; i
< 16; i
++)
6083 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
6084 temp_name
[16] = '\0';
6085 dev_info(dev
, " Server Name = %s\n", temp_name
);
6086 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
6087 readl(&(tb
->HeartBeat
)));
6088 #endif /* HPSA_DEBUG */
6091 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
6093 int i
, offset
, mem_type
, bar_type
;
6095 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
6098 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
6099 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
6100 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
6103 mem_type
= pci_resource_flags(pdev
, i
) &
6104 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
6106 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
6107 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
6108 offset
+= 4; /* 32 bit */
6110 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
6113 default: /* reserved in PCI 2.2 */
6114 dev_warn(&pdev
->dev
,
6115 "base address is invalid\n");
6120 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
6126 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6127 * controllers that are capable. If not, we use IO-APIC mode.
6130 static void hpsa_interrupt_mode(struct ctlr_info
*h
)
6132 #ifdef CONFIG_PCI_MSI
6134 struct msix_entry hpsa_msix_entries
[MAX_REPLY_QUEUES
];
6136 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++) {
6137 hpsa_msix_entries
[i
].vector
= 0;
6138 hpsa_msix_entries
[i
].entry
= i
;
6141 /* Some boards advertise MSI but don't really support it */
6142 if ((h
->board_id
== 0x40700E11) || (h
->board_id
== 0x40800E11) ||
6143 (h
->board_id
== 0x40820E11) || (h
->board_id
== 0x40830E11))
6144 goto default_int_mode
;
6145 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSIX
)) {
6146 dev_info(&h
->pdev
->dev
, "MSIX\n");
6147 h
->msix_vector
= MAX_REPLY_QUEUES
;
6148 err
= pci_enable_msix(h
->pdev
, hpsa_msix_entries
,
6151 dev_warn(&h
->pdev
->dev
, "only %d MSI-X vectors "
6152 "available\n", err
);
6153 h
->msix_vector
= err
;
6154 err
= pci_enable_msix(h
->pdev
, hpsa_msix_entries
,
6158 for (i
= 0; i
< h
->msix_vector
; i
++)
6159 h
->intr
[i
] = hpsa_msix_entries
[i
].vector
;
6162 dev_warn(&h
->pdev
->dev
, "MSI-X init failed %d\n",
6165 goto default_int_mode
;
6168 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSI
)) {
6169 dev_info(&h
->pdev
->dev
, "MSI\n");
6170 if (!pci_enable_msi(h
->pdev
))
6173 dev_warn(&h
->pdev
->dev
, "MSI init failed\n");
6176 #endif /* CONFIG_PCI_MSI */
6177 /* if we get here we're going to use the default interrupt mode */
6178 h
->intr
[h
->intr_mode
] = h
->pdev
->irq
;
6181 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
6184 u32 subsystem_vendor_id
, subsystem_device_id
;
6186 subsystem_vendor_id
= pdev
->subsystem_vendor
;
6187 subsystem_device_id
= pdev
->subsystem_device
;
6188 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
6189 subsystem_vendor_id
;
6191 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
6192 if (*board_id
== products
[i
].board_id
)
6195 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
6196 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
6198 dev_warn(&pdev
->dev
, "unrecognized board ID: "
6199 "0x%08x, ignoring.\n", *board_id
);
6202 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
6205 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
6206 unsigned long *memory_bar
)
6210 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
6211 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
6212 /* addressing mode bits already removed */
6213 *memory_bar
= pci_resource_start(pdev
, i
);
6214 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
6218 dev_warn(&pdev
->dev
, "no memory BAR found\n");
6222 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
6228 iterations
= HPSA_BOARD_READY_ITERATIONS
;
6230 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
6232 for (i
= 0; i
< iterations
; i
++) {
6233 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
6234 if (wait_for_ready
) {
6235 if (scratchpad
== HPSA_FIRMWARE_READY
)
6238 if (scratchpad
!= HPSA_FIRMWARE_READY
)
6241 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
6243 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
6247 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
6248 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
6251 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
6252 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
6253 *cfg_base_addr
&= (u32
) 0x0000ffff;
6254 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
6255 if (*cfg_base_addr_index
== -1) {
6256 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
6262 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
6266 u64 cfg_base_addr_index
;
6270 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
6271 &cfg_base_addr_index
, &cfg_offset
);
6274 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
6275 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
6278 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
6281 /* Find performant mode table. */
6282 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
6283 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
6284 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
6285 sizeof(*h
->transtable
));
6291 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
6293 h
->max_commands
= readl(&(h
->cfgtable
->MaxPerformantModeCommands
));
6295 /* Limit commands in memory limited kdump scenario. */
6296 if (reset_devices
&& h
->max_commands
> 32)
6297 h
->max_commands
= 32;
6299 if (h
->max_commands
< 16) {
6300 dev_warn(&h
->pdev
->dev
, "Controller reports "
6301 "max supported commands of %d, an obvious lie. "
6302 "Using 16. Ensure that firmware is up to date.\n",
6304 h
->max_commands
= 16;
6308 /* Interrogate the hardware for some limits:
6309 * max commands, max SG elements without chaining, and with chaining,
6310 * SG chain block size, etc.
6312 static void hpsa_find_board_params(struct ctlr_info
*h
)
6314 hpsa_get_max_perf_mode_cmds(h
);
6315 h
->nr_cmds
= h
->max_commands
- 4; /* Allow room for some ioctls */
6316 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
6317 h
->fw_support
= readl(&(h
->cfgtable
->misc_fw_support
));
6319 * Limit in-command s/g elements to 32 save dma'able memory.
6320 * Howvever spec says if 0, use 31
6322 h
->max_cmd_sg_entries
= 31;
6323 if (h
->maxsgentries
> 512) {
6324 h
->max_cmd_sg_entries
= 32;
6325 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
+ 1;
6326 h
->maxsgentries
--; /* save one for chain pointer */
6328 h
->maxsgentries
= 31; /* default to traditional values */
6332 /* Find out what task management functions are supported and cache */
6333 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
6334 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
))
6335 dev_warn(&h
->pdev
->dev
, "Physical aborts not supported\n");
6336 if (!(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
6337 dev_warn(&h
->pdev
->dev
, "Logical aborts not supported\n");
6340 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
6342 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
6343 dev_warn(&h
->pdev
->dev
, "not a valid CISS config table\n");
6349 static inline void hpsa_set_driver_support_bits(struct ctlr_info
*h
)
6354 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6355 driver_support
= readl(&(h
->cfgtable
->driver_support
));
6356 driver_support
|= ENABLE_SCSI_PREFETCH
;
6358 driver_support
|= ENABLE_UNIT_ATTN
;
6359 writel(driver_support
, &(h
->cfgtable
->driver_support
));
6362 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6363 * in a prefetch beyond physical memory.
6365 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
6369 if (h
->board_id
!= 0x3225103C)
6371 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
6372 dma_prefetch
|= 0x8000;
6373 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
6376 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info
*h
)
6380 unsigned long flags
;
6381 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6382 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
6383 spin_lock_irqsave(&h
->lock
, flags
);
6384 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
6385 spin_unlock_irqrestore(&h
->lock
, flags
);
6386 if (!(doorbell_value
& DOORBELL_CLEAR_EVENTS
))
6388 /* delay and try again */
6393 static void hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
6397 unsigned long flags
;
6399 /* under certain very rare conditions, this can take awhile.
6400 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6401 * as we enter this code.)
6403 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
6404 spin_lock_irqsave(&h
->lock
, flags
);
6405 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
6406 spin_unlock_irqrestore(&h
->lock
, flags
);
6407 if (!(doorbell_value
& CFGTBL_ChangeReq
))
6409 /* delay and try again */
6410 usleep_range(10000, 20000);
6414 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
6418 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
6419 if (!(trans_support
& SIMPLE_MODE
))
6422 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
6424 /* Update the field, and then ring the doorbell */
6425 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
6426 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
6427 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
6428 hpsa_wait_for_mode_change_ack(h
);
6429 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
6430 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
))
6432 h
->transMethod
= CFGTBL_Trans_Simple
;
6435 dev_warn(&h
->pdev
->dev
, "unable to get board into simple mode\n");
6439 static int hpsa_pci_init(struct ctlr_info
*h
)
6441 int prod_index
, err
;
6443 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
6446 h
->product_name
= products
[prod_index
].product_name
;
6447 h
->access
= *(products
[prod_index
].access
);
6449 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
6450 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
6452 err
= pci_enable_device(h
->pdev
);
6454 dev_warn(&h
->pdev
->dev
, "unable to enable PCI device\n");
6458 /* Enable bus mastering (pci_disable_device may disable this) */
6459 pci_set_master(h
->pdev
);
6461 err
= pci_request_regions(h
->pdev
, HPSA
);
6463 dev_err(&h
->pdev
->dev
,
6464 "cannot obtain PCI resources, aborting\n");
6467 hpsa_interrupt_mode(h
);
6468 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
6470 goto err_out_free_res
;
6471 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
6474 goto err_out_free_res
;
6476 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
6478 goto err_out_free_res
;
6479 err
= hpsa_find_cfgtables(h
);
6481 goto err_out_free_res
;
6482 hpsa_find_board_params(h
);
6484 if (!hpsa_CISS_signature_present(h
)) {
6486 goto err_out_free_res
;
6488 hpsa_set_driver_support_bits(h
);
6489 hpsa_p600_dma_prefetch_quirk(h
);
6490 err
= hpsa_enter_simple_mode(h
);
6492 goto err_out_free_res
;
6497 iounmap(h
->transtable
);
6499 iounmap(h
->cfgtable
);
6502 pci_disable_device(h
->pdev
);
6503 pci_release_regions(h
->pdev
);
6507 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
6511 #define HBA_INQUIRY_BYTE_COUNT 64
6512 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
6513 if (!h
->hba_inquiry_data
)
6515 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
6516 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
6518 kfree(h
->hba_inquiry_data
);
6519 h
->hba_inquiry_data
= NULL
;
6523 static int hpsa_init_reset_devices(struct pci_dev
*pdev
)
6530 /* Reset the controller with a PCI power-cycle or via doorbell */
6531 rc
= hpsa_kdump_hard_reset_controller(pdev
);
6533 /* -ENOTSUPP here means we cannot reset the controller
6534 * but it's already (and still) up and running in
6535 * "performant mode". Or, it might be 640x, which can't reset
6536 * due to concerns about shared bbwc between 6402/6404 pair.
6538 if (rc
== -ENOTSUPP
)
6539 return rc
; /* just try to do the kdump anyhow. */
6543 /* Now try to get the controller to respond to a no-op */
6544 dev_warn(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
6545 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
6546 if (hpsa_noop(pdev
) == 0)
6549 dev_warn(&pdev
->dev
, "no-op failed%s\n",
6550 (i
< 11 ? "; re-trying" : ""));
6555 static int hpsa_allocate_cmd_pool(struct ctlr_info
*h
)
6557 h
->cmd_pool_bits
= kzalloc(
6558 DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
) *
6559 sizeof(unsigned long), GFP_KERNEL
);
6560 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
6561 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
6562 &(h
->cmd_pool_dhandle
));
6563 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
6564 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
6565 &(h
->errinfo_pool_dhandle
));
6566 if ((h
->cmd_pool_bits
== NULL
)
6567 || (h
->cmd_pool
== NULL
)
6568 || (h
->errinfo_pool
== NULL
)) {
6569 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
6575 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
6577 kfree(h
->cmd_pool_bits
);
6579 pci_free_consistent(h
->pdev
,
6580 h
->nr_cmds
* sizeof(struct CommandList
),
6581 h
->cmd_pool
, h
->cmd_pool_dhandle
);
6582 if (h
->ioaccel2_cmd_pool
)
6583 pci_free_consistent(h
->pdev
,
6584 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
6585 h
->ioaccel2_cmd_pool
, h
->ioaccel2_cmd_pool_dhandle
);
6586 if (h
->errinfo_pool
)
6587 pci_free_consistent(h
->pdev
,
6588 h
->nr_cmds
* sizeof(struct ErrorInfo
),
6590 h
->errinfo_pool_dhandle
);
6591 if (h
->ioaccel_cmd_pool
)
6592 pci_free_consistent(h
->pdev
,
6593 h
->nr_cmds
* sizeof(struct io_accel1_cmd
),
6594 h
->ioaccel_cmd_pool
, h
->ioaccel_cmd_pool_dhandle
);
6597 static int hpsa_request_irq(struct ctlr_info
*h
,
6598 irqreturn_t (*msixhandler
)(int, void *),
6599 irqreturn_t (*intxhandler
)(int, void *))
6604 * initialize h->q[x] = x so that interrupt handlers know which
6607 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
6610 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vector
> 0) {
6611 /* If performant mode and MSI-X, use multiple reply queues */
6612 for (i
= 0; i
< h
->msix_vector
; i
++)
6613 rc
= request_irq(h
->intr
[i
], msixhandler
,
6617 /* Use single reply pool */
6618 if (h
->msix_vector
> 0 || h
->msi_vector
) {
6619 rc
= request_irq(h
->intr
[h
->intr_mode
],
6620 msixhandler
, 0, h
->devname
,
6621 &h
->q
[h
->intr_mode
]);
6623 rc
= request_irq(h
->intr
[h
->intr_mode
],
6624 intxhandler
, IRQF_SHARED
, h
->devname
,
6625 &h
->q
[h
->intr_mode
]);
6629 dev_err(&h
->pdev
->dev
, "unable to get irq %d for %s\n",
6630 h
->intr
[h
->intr_mode
], h
->devname
);
6636 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
6638 if (hpsa_send_host_reset(h
, RAID_CTLR_LUNID
,
6639 HPSA_RESET_TYPE_CONTROLLER
)) {
6640 dev_warn(&h
->pdev
->dev
, "Resetting array controller failed.\n");
6644 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
6645 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
)) {
6646 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
6650 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
6651 if (hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
)) {
6652 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
6653 "after soft reset.\n");
6660 static void free_irqs(struct ctlr_info
*h
)
6664 if (!h
->msix_vector
|| h
->intr_mode
!= PERF_MODE_INT
) {
6665 /* Single reply queue, only one irq to free */
6667 free_irq(h
->intr
[i
], &h
->q
[i
]);
6671 for (i
= 0; i
< h
->msix_vector
; i
++)
6672 free_irq(h
->intr
[i
], &h
->q
[i
]);
6675 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info
*h
)
6678 #ifdef CONFIG_PCI_MSI
6679 if (h
->msix_vector
) {
6680 if (h
->pdev
->msix_enabled
)
6681 pci_disable_msix(h
->pdev
);
6682 } else if (h
->msi_vector
) {
6683 if (h
->pdev
->msi_enabled
)
6684 pci_disable_msi(h
->pdev
);
6686 #endif /* CONFIG_PCI_MSI */
6689 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
6691 hpsa_free_irqs_and_disable_msix(h
);
6692 hpsa_free_sg_chain_blocks(h
);
6693 hpsa_free_cmd_pool(h
);
6694 kfree(h
->ioaccel1_blockFetchTable
);
6695 kfree(h
->blockFetchTable
);
6696 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
6697 h
->reply_pool
, h
->reply_pool_dhandle
);
6701 iounmap(h
->transtable
);
6703 iounmap(h
->cfgtable
);
6704 pci_release_regions(h
->pdev
);
6708 /* Called when controller lockup detected. */
6709 static void fail_all_cmds_on_list(struct ctlr_info
*h
, struct list_head
*list
)
6711 struct CommandList
*c
= NULL
;
6713 assert_spin_locked(&h
->lock
);
6714 /* Mark all outstanding commands as failed and complete them. */
6715 while (!list_empty(list
)) {
6716 c
= list_entry(list
->next
, struct CommandList
, list
);
6717 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
6722 static void controller_lockup_detected(struct ctlr_info
*h
)
6724 unsigned long flags
;
6726 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
6727 spin_lock_irqsave(&h
->lock
, flags
);
6728 h
->lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
6729 spin_unlock_irqrestore(&h
->lock
, flags
);
6730 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x\n",
6731 h
->lockup_detected
);
6732 pci_disable_device(h
->pdev
);
6733 spin_lock_irqsave(&h
->lock
, flags
);
6734 fail_all_cmds_on_list(h
, &h
->cmpQ
);
6735 fail_all_cmds_on_list(h
, &h
->reqQ
);
6736 spin_unlock_irqrestore(&h
->lock
, flags
);
6739 static void detect_controller_lockup(struct ctlr_info
*h
)
6743 unsigned long flags
;
6745 now
= get_jiffies_64();
6746 /* If we've received an interrupt recently, we're ok. */
6747 if (time_after64(h
->last_intr_timestamp
+
6748 (h
->heartbeat_sample_interval
), now
))
6752 * If we've already checked the heartbeat recently, we're ok.
6753 * This could happen if someone sends us a signal. We
6754 * otherwise don't care about signals in this thread.
6756 if (time_after64(h
->last_heartbeat_timestamp
+
6757 (h
->heartbeat_sample_interval
), now
))
6760 /* If heartbeat has not changed since we last looked, we're not ok. */
6761 spin_lock_irqsave(&h
->lock
, flags
);
6762 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
6763 spin_unlock_irqrestore(&h
->lock
, flags
);
6764 if (h
->last_heartbeat
== heartbeat
) {
6765 controller_lockup_detected(h
);
6770 h
->last_heartbeat
= heartbeat
;
6771 h
->last_heartbeat_timestamp
= now
;
6774 static void hpsa_ack_ctlr_events(struct ctlr_info
*h
)
6779 /* Clear the driver-requested rescan flag */
6780 h
->drv_req_rescan
= 0;
6782 /* Ask the controller to clear the events we're handling. */
6783 if ((h
->transMethod
& (CFGTBL_Trans_io_accel1
6784 | CFGTBL_Trans_io_accel2
)) &&
6785 (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
||
6786 h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)) {
6788 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
)
6789 event_type
= "state change";
6790 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)
6791 event_type
= "configuration change";
6792 /* Stop sending new RAID offload reqs via the IO accelerator */
6793 scsi_block_requests(h
->scsi_host
);
6794 for (i
= 0; i
< h
->ndevices
; i
++)
6795 h
->dev
[i
]->offload_enabled
= 0;
6796 hpsa_drain_accel_commands(h
);
6797 /* Set 'accelerator path config change' bit */
6798 dev_warn(&h
->pdev
->dev
,
6799 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6800 h
->events
, event_type
);
6801 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
6802 /* Set the "clear event notify field update" bit 6 */
6803 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
6804 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6805 hpsa_wait_for_clear_event_notify_ack(h
);
6806 scsi_unblock_requests(h
->scsi_host
);
6808 /* Acknowledge controller notification events. */
6809 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
6810 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
6811 hpsa_wait_for_clear_event_notify_ack(h
);
6813 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
6814 hpsa_wait_for_mode_change_ack(h
);
6820 /* Check a register on the controller to see if there are configuration
6821 * changes (added/changed/removed logical drives, etc.) which mean that
6822 * we should rescan the controller for devices.
6823 * Also check flag for driver-initiated rescan.
6825 static int hpsa_ctlr_needs_rescan(struct ctlr_info
*h
)
6827 if (h
->drv_req_rescan
)
6830 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
6833 h
->events
= readl(&(h
->cfgtable
->event_notify
));
6834 return h
->events
& RESCAN_REQUIRED_EVENT_BITS
;
6838 * Check if any of the offline devices have become ready
6840 static int hpsa_offline_devices_ready(struct ctlr_info
*h
)
6842 unsigned long flags
;
6843 struct offline_device_entry
*d
;
6844 struct list_head
*this, *tmp
;
6846 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
6847 list_for_each_safe(this, tmp
, &h
->offline_device_list
) {
6848 d
= list_entry(this, struct offline_device_entry
,
6850 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
6851 if (!hpsa_volume_offline(h
, d
->scsi3addr
))
6853 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
6855 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
6860 static void hpsa_monitor_ctlr_worker(struct work_struct
*work
)
6862 unsigned long flags
;
6863 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
6864 struct ctlr_info
, monitor_ctlr_work
);
6865 detect_controller_lockup(h
);
6866 if (h
->lockup_detected
)
6869 if (hpsa_ctlr_needs_rescan(h
) || hpsa_offline_devices_ready(h
)) {
6870 scsi_host_get(h
->scsi_host
);
6871 h
->drv_req_rescan
= 0;
6872 hpsa_ack_ctlr_events(h
);
6873 hpsa_scan_start(h
->scsi_host
);
6874 scsi_host_put(h
->scsi_host
);
6877 spin_lock_irqsave(&h
->lock
, flags
);
6878 if (h
->remove_in_progress
) {
6879 spin_unlock_irqrestore(&h
->lock
, flags
);
6882 schedule_delayed_work(&h
->monitor_ctlr_work
,
6883 h
->heartbeat_sample_interval
);
6884 spin_unlock_irqrestore(&h
->lock
, flags
);
6887 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6890 struct ctlr_info
*h
;
6891 int try_soft_reset
= 0;
6892 unsigned long flags
;
6894 if (number_of_controllers
== 0)
6895 printk(KERN_INFO DRIVER_NAME
"\n");
6897 rc
= hpsa_init_reset_devices(pdev
);
6899 if (rc
!= -ENOTSUPP
)
6901 /* If the reset fails in a particular way (it has no way to do
6902 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6903 * a soft reset once we get the controller configured up to the
6904 * point that it can accept a command.
6910 reinit_after_soft_reset
:
6912 /* Command structures must be aligned on a 32-byte boundary because
6913 * the 5 lower bits of the address are used by the hardware. and by
6914 * the driver. See comments in hpsa.h for more info.
6916 #define COMMANDLIST_ALIGNMENT 128
6917 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
6918 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
6923 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
6924 INIT_LIST_HEAD(&h
->cmpQ
);
6925 INIT_LIST_HEAD(&h
->reqQ
);
6926 INIT_LIST_HEAD(&h
->offline_device_list
);
6927 spin_lock_init(&h
->lock
);
6928 spin_lock_init(&h
->offline_device_lock
);
6929 spin_lock_init(&h
->scan_lock
);
6930 spin_lock_init(&h
->passthru_count_lock
);
6931 rc
= hpsa_pci_init(h
);
6935 sprintf(h
->devname
, HPSA
"%d", number_of_controllers
);
6936 h
->ctlr
= number_of_controllers
;
6937 number_of_controllers
++;
6939 /* configure PCI DMA stuff */
6940 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
6944 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
6948 dev_err(&pdev
->dev
, "no suitable DMA available\n");
6953 /* make sure the board interrupts are off */
6954 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
6956 if (hpsa_request_irq(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
))
6958 dev_info(&pdev
->dev
, "%s: <0x%x> at IRQ %d%s using DAC\n",
6959 h
->devname
, pdev
->device
,
6960 h
->intr
[h
->intr_mode
], dac
? "" : " not");
6961 if (hpsa_allocate_cmd_pool(h
))
6963 if (hpsa_allocate_sg_chain_blocks(h
))
6965 init_waitqueue_head(&h
->scan_wait_queue
);
6966 h
->scan_finished
= 1; /* no scan currently in progress */
6968 pci_set_drvdata(pdev
, h
);
6970 h
->hba_mode_enabled
= 0;
6971 h
->scsi_host
= NULL
;
6972 spin_lock_init(&h
->devlock
);
6973 hpsa_put_ctlr_into_performant_mode(h
);
6975 /* At this point, the controller is ready to take commands.
6976 * Now, if reset_devices and the hard reset didn't work, try
6977 * the soft reset and see if that works.
6979 if (try_soft_reset
) {
6981 /* This is kind of gross. We may or may not get a completion
6982 * from the soft reset command, and if we do, then the value
6983 * from the fifo may or may not be valid. So, we wait 10 secs
6984 * after the reset throwing away any completions we get during
6985 * that time. Unregister the interrupt handler and register
6986 * fake ones to scoop up any residual completions.
6988 spin_lock_irqsave(&h
->lock
, flags
);
6989 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
6990 spin_unlock_irqrestore(&h
->lock
, flags
);
6992 rc
= hpsa_request_irq(h
, hpsa_msix_discard_completions
,
6993 hpsa_intx_discard_completions
);
6995 dev_warn(&h
->pdev
->dev
, "Failed to request_irq after "
7000 rc
= hpsa_kdump_soft_reset(h
);
7002 /* Neither hard nor soft reset worked, we're hosed. */
7005 dev_info(&h
->pdev
->dev
, "Board READY.\n");
7006 dev_info(&h
->pdev
->dev
,
7007 "Waiting for stale completions to drain.\n");
7008 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
7010 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
7012 rc
= controller_reset_failed(h
->cfgtable
);
7014 dev_info(&h
->pdev
->dev
,
7015 "Soft reset appears to have failed.\n");
7017 /* since the controller's reset, we have to go back and re-init
7018 * everything. Easiest to just forget what we've done and do it
7021 hpsa_undo_allocations_after_kdump_soft_reset(h
);
7024 /* don't go to clean4, we already unallocated */
7027 goto reinit_after_soft_reset
;
7030 /* Enable Accelerated IO path at driver layer */
7031 h
->acciopath_status
= 1;
7033 h
->drv_req_rescan
= 0;
7035 /* Turn the interrupts on so we can service requests */
7036 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
7038 hpsa_hba_inquiry(h
);
7039 hpsa_register_scsi(h
); /* hook ourselves into SCSI subsystem */
7041 /* Monitor the controller for firmware lockups */
7042 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
7043 INIT_DELAYED_WORK(&h
->monitor_ctlr_work
, hpsa_monitor_ctlr_worker
);
7044 schedule_delayed_work(&h
->monitor_ctlr_work
,
7045 h
->heartbeat_sample_interval
);
7049 hpsa_free_sg_chain_blocks(h
);
7050 hpsa_free_cmd_pool(h
);
7058 static void hpsa_flush_cache(struct ctlr_info
*h
)
7061 struct CommandList
*c
;
7062 unsigned long flags
;
7064 /* Don't bother trying to flush the cache if locked up */
7065 spin_lock_irqsave(&h
->lock
, flags
);
7066 if (unlikely(h
->lockup_detected
)) {
7067 spin_unlock_irqrestore(&h
->lock
, flags
);
7070 spin_unlock_irqrestore(&h
->lock
, flags
);
7072 flush_buf
= kzalloc(4, GFP_KERNEL
);
7076 c
= cmd_special_alloc(h
);
7078 dev_warn(&h
->pdev
->dev
, "cmd_special_alloc returned NULL!\n");
7081 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
7082 RAID_CTLR_LUNID
, TYPE_CMD
)) {
7085 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_TODEVICE
);
7086 if (c
->err_info
->CommandStatus
!= 0)
7088 dev_warn(&h
->pdev
->dev
,
7089 "error flushing cache on controller\n");
7090 cmd_special_free(h
, c
);
7095 static void hpsa_shutdown(struct pci_dev
*pdev
)
7097 struct ctlr_info
*h
;
7099 h
= pci_get_drvdata(pdev
);
7100 /* Turn board interrupts off and send the flush cache command
7101 * sendcmd will turn off interrupt, and send the flush...
7102 * To write all data in the battery backed cache to disks
7104 hpsa_flush_cache(h
);
7105 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
7106 hpsa_free_irqs_and_disable_msix(h
);
7109 static void hpsa_free_device_info(struct ctlr_info
*h
)
7113 for (i
= 0; i
< h
->ndevices
; i
++)
7117 static void hpsa_remove_one(struct pci_dev
*pdev
)
7119 struct ctlr_info
*h
;
7120 unsigned long flags
;
7122 if (pci_get_drvdata(pdev
) == NULL
) {
7123 dev_err(&pdev
->dev
, "unable to remove device\n");
7126 h
= pci_get_drvdata(pdev
);
7128 /* Get rid of any controller monitoring work items */
7129 spin_lock_irqsave(&h
->lock
, flags
);
7130 h
->remove_in_progress
= 1;
7131 cancel_delayed_work(&h
->monitor_ctlr_work
);
7132 spin_unlock_irqrestore(&h
->lock
, flags
);
7134 hpsa_unregister_scsi(h
); /* unhook from SCSI subsystem */
7135 hpsa_shutdown(pdev
);
7137 iounmap(h
->transtable
);
7138 iounmap(h
->cfgtable
);
7139 hpsa_free_device_info(h
);
7140 hpsa_free_sg_chain_blocks(h
);
7141 pci_free_consistent(h
->pdev
,
7142 h
->nr_cmds
* sizeof(struct CommandList
),
7143 h
->cmd_pool
, h
->cmd_pool_dhandle
);
7144 pci_free_consistent(h
->pdev
,
7145 h
->nr_cmds
* sizeof(struct ErrorInfo
),
7146 h
->errinfo_pool
, h
->errinfo_pool_dhandle
);
7147 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
7148 h
->reply_pool
, h
->reply_pool_dhandle
);
7149 kfree(h
->cmd_pool_bits
);
7150 kfree(h
->blockFetchTable
);
7151 kfree(h
->ioaccel1_blockFetchTable
);
7152 kfree(h
->ioaccel2_blockFetchTable
);
7153 kfree(h
->hba_inquiry_data
);
7154 pci_disable_device(pdev
);
7155 pci_release_regions(pdev
);
7159 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
7160 __attribute__((unused
)) pm_message_t state
)
7165 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
7170 static struct pci_driver hpsa_pci_driver
= {
7172 .probe
= hpsa_init_one
,
7173 .remove
= hpsa_remove_one
,
7174 .id_table
= hpsa_pci_device_id
, /* id_table */
7175 .shutdown
= hpsa_shutdown
,
7176 .suspend
= hpsa_suspend
,
7177 .resume
= hpsa_resume
,
7180 /* Fill in bucket_map[], given nsgs (the max number of
7181 * scatter gather elements supported) and bucket[],
7182 * which is an array of 8 integers. The bucket[] array
7183 * contains 8 different DMA transfer sizes (in 16
7184 * byte increments) which the controller uses to fetch
7185 * commands. This function fills in bucket_map[], which
7186 * maps a given number of scatter gather elements to one of
7187 * the 8 DMA transfer sizes. The point of it is to allow the
7188 * controller to only do as much DMA as needed to fetch the
7189 * command, with the DMA transfer size encoded in the lower
7190 * bits of the command address.
7192 static void calc_bucket_map(int bucket
[], int num_buckets
,
7193 int nsgs
, int min_blocks
, int *bucket_map
)
7197 /* Note, bucket_map must have nsgs+1 entries. */
7198 for (i
= 0; i
<= nsgs
; i
++) {
7199 /* Compute size of a command with i SG entries */
7200 size
= i
+ min_blocks
;
7201 b
= num_buckets
; /* Assume the biggest bucket */
7202 /* Find the bucket that is just big enough */
7203 for (j
= 0; j
< num_buckets
; j
++) {
7204 if (bucket
[j
] >= size
) {
7209 /* for a command with i SG entries, use bucket b. */
7214 static void hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 trans_support
)
7217 unsigned long register_value
;
7218 unsigned long transMethod
= CFGTBL_Trans_Performant
|
7219 (trans_support
& CFGTBL_Trans_use_short_tags
) |
7220 CFGTBL_Trans_enable_directed_msix
|
7221 (trans_support
& (CFGTBL_Trans_io_accel1
|
7222 CFGTBL_Trans_io_accel2
));
7223 struct access_method access
= SA5_performant_access
;
7225 /* This is a bit complicated. There are 8 registers on
7226 * the controller which we write to to tell it 8 different
7227 * sizes of commands which there may be. It's a way of
7228 * reducing the DMA done to fetch each command. Encoded into
7229 * each command's tag are 3 bits which communicate to the controller
7230 * which of the eight sizes that command fits within. The size of
7231 * each command depends on how many scatter gather entries there are.
7232 * Each SG entry requires 16 bytes. The eight registers are programmed
7233 * with the number of 16-byte blocks a command of that size requires.
7234 * The smallest command possible requires 5 such 16 byte blocks.
7235 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7236 * blocks. Note, this only extends to the SG entries contained
7237 * within the command block, and does not extend to chained blocks
7238 * of SG elements. bft[] contains the eight values we write to
7239 * the registers. They are not evenly distributed, but have more
7240 * sizes for small commands, and fewer sizes for larger commands.
7242 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
7243 #define MIN_IOACCEL2_BFT_ENTRY 5
7244 #define HPSA_IOACCEL2_HEADER_SZ 4
7245 int bft2
[16] = {MIN_IOACCEL2_BFT_ENTRY
, 6, 7, 8, 9, 10, 11, 12,
7246 13, 14, 15, 16, 17, 18, 19,
7247 HPSA_IOACCEL2_HEADER_SZ
+ IOACCEL2_MAXSGENTRIES
};
7248 BUILD_BUG_ON(ARRAY_SIZE(bft2
) != 16);
7249 BUILD_BUG_ON(ARRAY_SIZE(bft
) != 8);
7250 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) >
7251 16 * MIN_IOACCEL2_BFT_ENTRY
);
7252 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element
) != 16);
7253 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
7254 /* 5 = 1 s/g entry or 4k
7255 * 6 = 2 s/g entry or 8k
7256 * 8 = 4 s/g entry or 16k
7257 * 10 = 6 s/g entry or 24k
7260 /* Controller spec: zero out this buffer. */
7261 memset(h
->reply_pool
, 0, h
->reply_pool_size
);
7263 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
7264 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
7265 SG_ENTRIES_IN_CMD
, 4, h
->blockFetchTable
);
7266 for (i
= 0; i
< 8; i
++)
7267 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
7269 /* size of controller ring buffer */
7270 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
7271 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
7272 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
7273 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
7275 for (i
= 0; i
< h
->nreply_queues
; i
++) {
7276 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
7277 writel(h
->reply_pool_dhandle
+
7278 (h
->max_commands
* sizeof(u64
) * i
),
7279 &h
->transtable
->RepQAddr
[i
].lower
);
7282 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
7283 writel(transMethod
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
7285 * enable outbound interrupt coalescing in accelerator mode;
7287 if (trans_support
& CFGTBL_Trans_io_accel1
) {
7288 access
= SA5_ioaccel_mode1_access
;
7289 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
7290 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
7292 if (trans_support
& CFGTBL_Trans_io_accel2
) {
7293 access
= SA5_ioaccel_mode2_access
;
7294 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
7295 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
7298 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7299 hpsa_wait_for_mode_change_ack(h
);
7300 register_value
= readl(&(h
->cfgtable
->TransportActive
));
7301 if (!(register_value
& CFGTBL_Trans_Performant
)) {
7302 dev_warn(&h
->pdev
->dev
, "unable to get board into"
7303 " performant mode\n");
7306 /* Change the access methods to the performant access methods */
7308 h
->transMethod
= transMethod
;
7310 if (!((trans_support
& CFGTBL_Trans_io_accel1
) ||
7311 (trans_support
& CFGTBL_Trans_io_accel2
)))
7314 if (trans_support
& CFGTBL_Trans_io_accel1
) {
7315 /* Set up I/O accelerator mode */
7316 for (i
= 0; i
< h
->nreply_queues
; i
++) {
7317 writel(i
, h
->vaddr
+ IOACCEL_MODE1_REPLY_QUEUE_INDEX
);
7318 h
->reply_queue
[i
].current_entry
=
7319 readl(h
->vaddr
+ IOACCEL_MODE1_PRODUCER_INDEX
);
7321 bft
[7] = h
->ioaccel_maxsg
+ 8;
7322 calc_bucket_map(bft
, ARRAY_SIZE(bft
), h
->ioaccel_maxsg
, 8,
7323 h
->ioaccel1_blockFetchTable
);
7325 /* initialize all reply queue entries to unused */
7326 memset(h
->reply_pool
, (u8
) IOACCEL_MODE1_REPLY_UNUSED
,
7327 h
->reply_pool_size
);
7329 /* set all the constant fields in the accelerator command
7330 * frames once at init time to save CPU cycles later.
7332 for (i
= 0; i
< h
->nr_cmds
; i
++) {
7333 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[i
];
7335 cp
->function
= IOACCEL1_FUNCTION_SCSIIO
;
7336 cp
->err_info
= (u32
) (h
->errinfo_pool_dhandle
+
7337 (i
* sizeof(struct ErrorInfo
)));
7338 cp
->err_info_len
= sizeof(struct ErrorInfo
);
7339 cp
->sgl_offset
= IOACCEL1_SGLOFFSET
;
7340 cp
->host_context_flags
= IOACCEL1_HCFLAGS_CISS_FORMAT
;
7341 cp
->timeout_sec
= 0;
7343 cp
->Tag
.lower
= (i
<< DIRECT_LOOKUP_SHIFT
) |
7346 cp
->host_addr
.lower
=
7347 (u32
) (h
->ioaccel_cmd_pool_dhandle
+
7348 (i
* sizeof(struct io_accel1_cmd
)));
7349 cp
->host_addr
.upper
= 0;
7351 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
7352 u64 cfg_offset
, cfg_base_addr_index
;
7353 u32 bft2_offset
, cfg_base_addr
;
7356 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
7357 &cfg_base_addr_index
, &cfg_offset
);
7358 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) != 64);
7359 bft2
[15] = h
->ioaccel_maxsg
+ HPSA_IOACCEL2_HEADER_SZ
;
7360 calc_bucket_map(bft2
, ARRAY_SIZE(bft2
), h
->ioaccel_maxsg
,
7361 4, h
->ioaccel2_blockFetchTable
);
7362 bft2_offset
= readl(&h
->cfgtable
->io_accel_request_size_offset
);
7363 BUILD_BUG_ON(offsetof(struct CfgTable
,
7364 io_accel_request_size_offset
) != 0xb8);
7365 h
->ioaccel2_bft2_regs
=
7366 remap_pci_mem(pci_resource_start(h
->pdev
,
7367 cfg_base_addr_index
) +
7368 cfg_offset
+ bft2_offset
,
7370 sizeof(*h
->ioaccel2_bft2_regs
));
7371 for (i
= 0; i
< ARRAY_SIZE(bft2
); i
++)
7372 writel(bft2
[i
], &h
->ioaccel2_bft2_regs
[i
]);
7374 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7375 hpsa_wait_for_mode_change_ack(h
);
7378 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info
*h
)
7381 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
7382 if (h
->ioaccel_maxsg
> IOACCEL1_MAXSGENTRIES
)
7383 h
->ioaccel_maxsg
= IOACCEL1_MAXSGENTRIES
;
7385 /* Command structures must be aligned on a 128-byte boundary
7386 * because the 7 lower bits of the address are used by the
7389 #define IOACCEL1_COMMANDLIST_ALIGNMENT 128
7390 BUILD_BUG_ON(sizeof(struct io_accel1_cmd
) %
7391 IOACCEL1_COMMANDLIST_ALIGNMENT
);
7392 h
->ioaccel_cmd_pool
=
7393 pci_alloc_consistent(h
->pdev
,
7394 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
7395 &(h
->ioaccel_cmd_pool_dhandle
));
7397 h
->ioaccel1_blockFetchTable
=
7398 kmalloc(((h
->ioaccel_maxsg
+ 1) *
7399 sizeof(u32
)), GFP_KERNEL
);
7401 if ((h
->ioaccel_cmd_pool
== NULL
) ||
7402 (h
->ioaccel1_blockFetchTable
== NULL
))
7405 memset(h
->ioaccel_cmd_pool
, 0,
7406 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
));
7410 if (h
->ioaccel_cmd_pool
)
7411 pci_free_consistent(h
->pdev
,
7412 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
7413 h
->ioaccel_cmd_pool
, h
->ioaccel_cmd_pool_dhandle
);
7414 kfree(h
->ioaccel1_blockFetchTable
);
7418 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info
*h
)
7420 /* Allocate ioaccel2 mode command blocks and block fetch table */
7423 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
7424 if (h
->ioaccel_maxsg
> IOACCEL2_MAXSGENTRIES
)
7425 h
->ioaccel_maxsg
= IOACCEL2_MAXSGENTRIES
;
7427 #define IOACCEL2_COMMANDLIST_ALIGNMENT 128
7428 BUILD_BUG_ON(sizeof(struct io_accel2_cmd
) %
7429 IOACCEL2_COMMANDLIST_ALIGNMENT
);
7430 h
->ioaccel2_cmd_pool
=
7431 pci_alloc_consistent(h
->pdev
,
7432 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
7433 &(h
->ioaccel2_cmd_pool_dhandle
));
7435 h
->ioaccel2_blockFetchTable
=
7436 kmalloc(((h
->ioaccel_maxsg
+ 1) *
7437 sizeof(u32
)), GFP_KERNEL
);
7439 if ((h
->ioaccel2_cmd_pool
== NULL
) ||
7440 (h
->ioaccel2_blockFetchTable
== NULL
))
7443 memset(h
->ioaccel2_cmd_pool
, 0,
7444 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
));
7448 if (h
->ioaccel2_cmd_pool
)
7449 pci_free_consistent(h
->pdev
,
7450 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
7451 h
->ioaccel2_cmd_pool
, h
->ioaccel2_cmd_pool_dhandle
);
7452 kfree(h
->ioaccel2_blockFetchTable
);
7456 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
7459 unsigned long transMethod
= CFGTBL_Trans_Performant
|
7460 CFGTBL_Trans_use_short_tags
;
7463 if (hpsa_simple_mode
)
7466 /* Check for I/O accelerator mode support */
7467 if (trans_support
& CFGTBL_Trans_io_accel1
) {
7468 transMethod
|= CFGTBL_Trans_io_accel1
|
7469 CFGTBL_Trans_enable_directed_msix
;
7470 if (hpsa_alloc_ioaccel_cmd_and_bft(h
))
7473 if (trans_support
& CFGTBL_Trans_io_accel2
) {
7474 transMethod
|= CFGTBL_Trans_io_accel2
|
7475 CFGTBL_Trans_enable_directed_msix
;
7476 if (ioaccel2_alloc_cmds_and_bft(h
))
7481 /* TODO, check that this next line h->nreply_queues is correct */
7482 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
7483 if (!(trans_support
& PERFORMANT_MODE
))
7486 h
->nreply_queues
= h
->msix_vector
> 0 ? h
->msix_vector
: 1;
7487 hpsa_get_max_perf_mode_cmds(h
);
7488 /* Performant mode ring buffer and supporting data structures */
7489 h
->reply_pool_size
= h
->max_commands
* sizeof(u64
) * h
->nreply_queues
;
7490 h
->reply_pool
= pci_alloc_consistent(h
->pdev
, h
->reply_pool_size
,
7491 &(h
->reply_pool_dhandle
));
7493 for (i
= 0; i
< h
->nreply_queues
; i
++) {
7494 h
->reply_queue
[i
].head
= &h
->reply_pool
[h
->max_commands
* i
];
7495 h
->reply_queue
[i
].size
= h
->max_commands
;
7496 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
7497 h
->reply_queue
[i
].current_entry
= 0;
7500 /* Need a block fetch table for performant mode */
7501 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
7502 sizeof(u32
)), GFP_KERNEL
);
7504 if ((h
->reply_pool
== NULL
)
7505 || (h
->blockFetchTable
== NULL
))
7508 hpsa_enter_performant_mode(h
, trans_support
);
7513 pci_free_consistent(h
->pdev
, h
->reply_pool_size
,
7514 h
->reply_pool
, h
->reply_pool_dhandle
);
7515 kfree(h
->blockFetchTable
);
7518 static int is_accelerated_cmd(struct CommandList
*c
)
7520 return c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_IOACCEL2
;
7523 static void hpsa_drain_accel_commands(struct ctlr_info
*h
)
7525 struct CommandList
*c
= NULL
;
7526 unsigned long flags
;
7529 do { /* wait for all outstanding commands to drain out */
7531 spin_lock_irqsave(&h
->lock
, flags
);
7532 list_for_each_entry(c
, &h
->cmpQ
, list
)
7533 accel_cmds_out
+= is_accelerated_cmd(c
);
7534 list_for_each_entry(c
, &h
->reqQ
, list
)
7535 accel_cmds_out
+= is_accelerated_cmd(c
);
7536 spin_unlock_irqrestore(&h
->lock
, flags
);
7537 if (accel_cmds_out
<= 0)
7544 * This is it. Register the PCI driver information for the cards we control
7545 * the OS will call our registered routines when it finds one of our cards.
7547 static int __init
hpsa_init(void)
7549 return pci_register_driver(&hpsa_pci_driver
);
7552 static void __exit
hpsa_cleanup(void)
7554 pci_unregister_driver(&hpsa_pci_driver
);
7557 static void __attribute__((unused
)) verify_offsets(void)
7559 #define VERIFY_OFFSET(member, offset) \
7560 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7562 VERIFY_OFFSET(structure_size
, 0);
7563 VERIFY_OFFSET(volume_blk_size
, 4);
7564 VERIFY_OFFSET(volume_blk_cnt
, 8);
7565 VERIFY_OFFSET(phys_blk_shift
, 16);
7566 VERIFY_OFFSET(parity_rotation_shift
, 17);
7567 VERIFY_OFFSET(strip_size
, 18);
7568 VERIFY_OFFSET(disk_starting_blk
, 20);
7569 VERIFY_OFFSET(disk_blk_cnt
, 28);
7570 VERIFY_OFFSET(data_disks_per_row
, 36);
7571 VERIFY_OFFSET(metadata_disks_per_row
, 38);
7572 VERIFY_OFFSET(row_cnt
, 40);
7573 VERIFY_OFFSET(layout_map_count
, 42);
7574 VERIFY_OFFSET(flags
, 44);
7575 VERIFY_OFFSET(dekindex
, 46);
7576 /* VERIFY_OFFSET(reserved, 48 */
7577 VERIFY_OFFSET(data
, 64);
7579 #undef VERIFY_OFFSET
7581 #define VERIFY_OFFSET(member, offset) \
7582 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7584 VERIFY_OFFSET(IU_type
, 0);
7585 VERIFY_OFFSET(direction
, 1);
7586 VERIFY_OFFSET(reply_queue
, 2);
7587 /* VERIFY_OFFSET(reserved1, 3); */
7588 VERIFY_OFFSET(scsi_nexus
, 4);
7589 VERIFY_OFFSET(Tag
, 8);
7590 VERIFY_OFFSET(cdb
, 16);
7591 VERIFY_OFFSET(cciss_lun
, 32);
7592 VERIFY_OFFSET(data_len
, 40);
7593 VERIFY_OFFSET(cmd_priority_task_attr
, 44);
7594 VERIFY_OFFSET(sg_count
, 45);
7595 /* VERIFY_OFFSET(reserved3 */
7596 VERIFY_OFFSET(err_ptr
, 48);
7597 VERIFY_OFFSET(err_len
, 56);
7598 /* VERIFY_OFFSET(reserved4 */
7599 VERIFY_OFFSET(sg
, 64);
7601 #undef VERIFY_OFFSET
7603 #define VERIFY_OFFSET(member, offset) \
7604 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7606 VERIFY_OFFSET(dev_handle
, 0x00);
7607 VERIFY_OFFSET(reserved1
, 0x02);
7608 VERIFY_OFFSET(function
, 0x03);
7609 VERIFY_OFFSET(reserved2
, 0x04);
7610 VERIFY_OFFSET(err_info
, 0x0C);
7611 VERIFY_OFFSET(reserved3
, 0x10);
7612 VERIFY_OFFSET(err_info_len
, 0x12);
7613 VERIFY_OFFSET(reserved4
, 0x13);
7614 VERIFY_OFFSET(sgl_offset
, 0x14);
7615 VERIFY_OFFSET(reserved5
, 0x15);
7616 VERIFY_OFFSET(transfer_len
, 0x1C);
7617 VERIFY_OFFSET(reserved6
, 0x20);
7618 VERIFY_OFFSET(io_flags
, 0x24);
7619 VERIFY_OFFSET(reserved7
, 0x26);
7620 VERIFY_OFFSET(LUN
, 0x34);
7621 VERIFY_OFFSET(control
, 0x3C);
7622 VERIFY_OFFSET(CDB
, 0x40);
7623 VERIFY_OFFSET(reserved8
, 0x50);
7624 VERIFY_OFFSET(host_context_flags
, 0x60);
7625 VERIFY_OFFSET(timeout_sec
, 0x62);
7626 VERIFY_OFFSET(ReplyQueue
, 0x64);
7627 VERIFY_OFFSET(reserved9
, 0x65);
7628 VERIFY_OFFSET(Tag
, 0x68);
7629 VERIFY_OFFSET(host_addr
, 0x70);
7630 VERIFY_OFFSET(CISS_LUN
, 0x78);
7631 VERIFY_OFFSET(SG
, 0x78 + 8);
7632 #undef VERIFY_OFFSET
7635 module_init(hpsa_init
);
7636 module_exit(hpsa_cleanup
);