2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_transport_sas.h>
45 #include <scsi/scsi_dbg.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
59 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
60 * with an optional trailing '-' followed by a byte value (0-255).
62 #define HPSA_DRIVER_VERSION "3.4.20-170"
63 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
66 /* How long to wait for CISS doorbell communication */
67 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
68 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
69 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
70 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
71 #define MAX_IOCTL_CONFIG_WAIT 1000
73 /*define how many times we will try a command because of bus resets */
74 #define MAX_CMD_RETRIES 3
75 /* How long to wait before giving up on a command */
76 #define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
78 /* Embedded module documentation macros - see modules.h */
79 MODULE_AUTHOR("Hewlett-Packard Company");
80 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
82 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
83 MODULE_VERSION(HPSA_DRIVER_VERSION
);
84 MODULE_LICENSE("GPL");
85 MODULE_ALIAS("cciss");
87 static int hpsa_simple_mode
;
88 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
89 MODULE_PARM_DESC(hpsa_simple_mode
,
90 "Use 'simple mode' rather than 'performant mode'");
92 /* define the PCI info for the cards we can control */
93 static const struct pci_device_id hpsa_pci_device_id
[] = {
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324B},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103c, 0x1920},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
111 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
112 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
113 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
114 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103c, 0x1925},
115 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
116 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
117 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1929},
118 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BD},
119 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BE},
120 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BF},
121 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C0},
122 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C1},
123 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C2},
124 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C3},
125 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C4},
126 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C5},
127 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C6},
128 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C7},
129 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C8},
130 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C9},
131 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CA},
132 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CB},
133 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CC},
134 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CD},
135 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CE},
136 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0580},
137 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0581},
138 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0582},
139 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0583},
140 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0584},
141 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0585},
142 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0076},
143 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0087},
144 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x007D},
145 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0088},
146 {PCI_VENDOR_ID_HP
, 0x333f, 0x103c, 0x333f},
147 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
148 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
149 {PCI_VENDOR_ID_COMPAQ
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
150 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
154 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
156 /* board_id = Subsystem Device ID & Vendor ID
157 * product = Marketing Name for the board
158 * access = Address of the struct of function pointers
160 static struct board_type products
[] = {
161 {0x40700E11, "Smart Array 5300", &SA5A_access
},
162 {0x40800E11, "Smart Array 5i", &SA5B_access
},
163 {0x40820E11, "Smart Array 532", &SA5B_access
},
164 {0x40830E11, "Smart Array 5312", &SA5B_access
},
165 {0x409A0E11, "Smart Array 641", &SA5A_access
},
166 {0x409B0E11, "Smart Array 642", &SA5A_access
},
167 {0x409C0E11, "Smart Array 6400", &SA5A_access
},
168 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access
},
169 {0x40910E11, "Smart Array 6i", &SA5A_access
},
170 {0x3225103C, "Smart Array P600", &SA5A_access
},
171 {0x3223103C, "Smart Array P800", &SA5A_access
},
172 {0x3234103C, "Smart Array P400", &SA5A_access
},
173 {0x3235103C, "Smart Array P400i", &SA5A_access
},
174 {0x3211103C, "Smart Array E200i", &SA5A_access
},
175 {0x3212103C, "Smart Array E200", &SA5A_access
},
176 {0x3213103C, "Smart Array E200i", &SA5A_access
},
177 {0x3214103C, "Smart Array E200i", &SA5A_access
},
178 {0x3215103C, "Smart Array E200i", &SA5A_access
},
179 {0x3237103C, "Smart Array E500", &SA5A_access
},
180 {0x323D103C, "Smart Array P700m", &SA5A_access
},
181 {0x3241103C, "Smart Array P212", &SA5_access
},
182 {0x3243103C, "Smart Array P410", &SA5_access
},
183 {0x3245103C, "Smart Array P410i", &SA5_access
},
184 {0x3247103C, "Smart Array P411", &SA5_access
},
185 {0x3249103C, "Smart Array P812", &SA5_access
},
186 {0x324A103C, "Smart Array P712m", &SA5_access
},
187 {0x324B103C, "Smart Array P711m", &SA5_access
},
188 {0x3233103C, "HP StorageWorks 1210m", &SA5_access
}, /* alias of 333f */
189 {0x3350103C, "Smart Array P222", &SA5_access
},
190 {0x3351103C, "Smart Array P420", &SA5_access
},
191 {0x3352103C, "Smart Array P421", &SA5_access
},
192 {0x3353103C, "Smart Array P822", &SA5_access
},
193 {0x3354103C, "Smart Array P420i", &SA5_access
},
194 {0x3355103C, "Smart Array P220i", &SA5_access
},
195 {0x3356103C, "Smart Array P721m", &SA5_access
},
196 {0x1920103C, "Smart Array P430i", &SA5_access
},
197 {0x1921103C, "Smart Array P830i", &SA5_access
},
198 {0x1922103C, "Smart Array P430", &SA5_access
},
199 {0x1923103C, "Smart Array P431", &SA5_access
},
200 {0x1924103C, "Smart Array P830", &SA5_access
},
201 {0x1925103C, "Smart Array P831", &SA5_access
},
202 {0x1926103C, "Smart Array P731m", &SA5_access
},
203 {0x1928103C, "Smart Array P230i", &SA5_access
},
204 {0x1929103C, "Smart Array P530", &SA5_access
},
205 {0x21BD103C, "Smart Array P244br", &SA5_access
},
206 {0x21BE103C, "Smart Array P741m", &SA5_access
},
207 {0x21BF103C, "Smart HBA H240ar", &SA5_access
},
208 {0x21C0103C, "Smart Array P440ar", &SA5_access
},
209 {0x21C1103C, "Smart Array P840ar", &SA5_access
},
210 {0x21C2103C, "Smart Array P440", &SA5_access
},
211 {0x21C3103C, "Smart Array P441", &SA5_access
},
212 {0x21C4103C, "Smart Array", &SA5_access
},
213 {0x21C5103C, "Smart Array P841", &SA5_access
},
214 {0x21C6103C, "Smart HBA H244br", &SA5_access
},
215 {0x21C7103C, "Smart HBA H240", &SA5_access
},
216 {0x21C8103C, "Smart HBA H241", &SA5_access
},
217 {0x21C9103C, "Smart Array", &SA5_access
},
218 {0x21CA103C, "Smart Array P246br", &SA5_access
},
219 {0x21CB103C, "Smart Array P840", &SA5_access
},
220 {0x21CC103C, "Smart Array", &SA5_access
},
221 {0x21CD103C, "Smart Array", &SA5_access
},
222 {0x21CE103C, "Smart HBA", &SA5_access
},
223 {0x05809005, "SmartHBA-SA", &SA5_access
},
224 {0x05819005, "SmartHBA-SA 8i", &SA5_access
},
225 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access
},
226 {0x05839005, "SmartHBA-SA 8e", &SA5_access
},
227 {0x05849005, "SmartHBA-SA 16i", &SA5_access
},
228 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access
},
229 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access
},
230 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access
},
231 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access
},
232 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access
},
233 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access
},
234 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
237 static struct scsi_transport_template
*hpsa_sas_transport_template
;
238 static int hpsa_add_sas_host(struct ctlr_info
*h
);
239 static void hpsa_delete_sas_host(struct ctlr_info
*h
);
240 static int hpsa_add_sas_device(struct hpsa_sas_node
*hpsa_sas_node
,
241 struct hpsa_scsi_dev_t
*device
);
242 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t
*device
);
243 static struct hpsa_scsi_dev_t
244 *hpsa_find_device_by_sas_rphy(struct ctlr_info
*h
,
245 struct sas_rphy
*rphy
);
247 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
248 static const struct scsi_cmnd hpsa_cmd_busy
;
249 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
250 static const struct scsi_cmnd hpsa_cmd_idle
;
251 static int number_of_controllers
;
253 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
254 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
255 static int hpsa_ioctl(struct scsi_device
*dev
, unsigned int cmd
,
259 static int hpsa_compat_ioctl(struct scsi_device
*dev
, unsigned int cmd
,
263 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
264 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
265 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
);
266 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
267 struct scsi_cmnd
*scmd
);
268 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
269 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
271 static void hpsa_free_cmd_pool(struct ctlr_info
*h
);
272 #define VPD_PAGE (1 << 8)
273 #define HPSA_SIMPLE_ERROR_BITS 0x03
275 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
276 static void hpsa_scan_start(struct Scsi_Host
*);
277 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
278 unsigned long elapsed_time
);
279 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
);
281 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
282 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
283 static int hpsa_slave_configure(struct scsi_device
*sdev
);
284 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
286 static void hpsa_update_scsi_devices(struct ctlr_info
*h
);
287 static int check_for_unit_attention(struct ctlr_info
*h
,
288 struct CommandList
*c
);
289 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
290 struct CommandList
*c
);
291 /* performant mode helper functions */
292 static void calc_bucket_map(int *bucket
, int num_buckets
,
293 int nsgs
, int min_blocks
, u32
*bucket_map
);
294 static void hpsa_free_performant_mode(struct ctlr_info
*h
);
295 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
296 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
297 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
298 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
300 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
301 unsigned long *memory_bar
);
302 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
,
304 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
305 unsigned char lunaddr
[],
307 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
309 static inline void finish_cmd(struct CommandList
*c
);
310 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
);
311 #define BOARD_NOT_READY 0
312 #define BOARD_READY 1
313 static void hpsa_drain_accel_commands(struct ctlr_info
*h
);
314 static void hpsa_flush_cache(struct ctlr_info
*h
);
315 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
316 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
317 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
);
318 static void hpsa_command_resubmit_worker(struct work_struct
*work
);
319 static u32
lockup_detected(struct ctlr_info
*h
);
320 static int detect_controller_lockup(struct ctlr_info
*h
);
321 static void hpsa_disable_rld_caching(struct ctlr_info
*h
);
322 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
323 struct ReportExtendedLUNdata
*buf
, int bufsize
);
324 static bool hpsa_vpd_page_supported(struct ctlr_info
*h
,
325 unsigned char scsi3addr
[], u8 page
);
326 static int hpsa_luns_changed(struct ctlr_info
*h
);
327 static bool hpsa_cmd_dev_match(struct ctlr_info
*h
, struct CommandList
*c
,
328 struct hpsa_scsi_dev_t
*dev
,
329 unsigned char *scsi3addr
);
331 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
333 unsigned long *priv
= shost_priv(sdev
->host
);
334 return (struct ctlr_info
*) *priv
;
337 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
339 unsigned long *priv
= shost_priv(sh
);
340 return (struct ctlr_info
*) *priv
;
343 static inline bool hpsa_is_cmd_idle(struct CommandList
*c
)
345 return c
->scsi_cmd
== SCSI_CMD_IDLE
;
348 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
349 static void decode_sense_data(const u8
*sense_data
, int sense_data_len
,
350 u8
*sense_key
, u8
*asc
, u8
*ascq
)
352 struct scsi_sense_hdr sshdr
;
359 if (sense_data_len
< 1)
362 rc
= scsi_normalize_sense(sense_data
, sense_data_len
, &sshdr
);
364 *sense_key
= sshdr
.sense_key
;
370 static int check_for_unit_attention(struct ctlr_info
*h
,
371 struct CommandList
*c
)
373 u8 sense_key
, asc
, ascq
;
376 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
377 sense_len
= sizeof(c
->err_info
->SenseInfo
);
379 sense_len
= c
->err_info
->SenseLen
;
381 decode_sense_data(c
->err_info
->SenseInfo
, sense_len
,
382 &sense_key
, &asc
, &ascq
);
383 if (sense_key
!= UNIT_ATTENTION
|| asc
== 0xff)
388 dev_warn(&h
->pdev
->dev
,
389 "%s: a state change detected, command retried\n",
393 dev_warn(&h
->pdev
->dev
,
394 "%s: LUN failure detected\n", h
->devname
);
396 case REPORT_LUNS_CHANGED
:
397 dev_warn(&h
->pdev
->dev
,
398 "%s: report LUN data changed\n", h
->devname
);
400 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
401 * target (array) devices.
405 dev_warn(&h
->pdev
->dev
,
406 "%s: a power on or device reset detected\n",
409 case UNIT_ATTENTION_CLEARED
:
410 dev_warn(&h
->pdev
->dev
,
411 "%s: unit attention cleared by another initiator\n",
415 dev_warn(&h
->pdev
->dev
,
416 "%s: unknown unit attention detected\n",
423 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
425 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
426 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
427 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
429 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
433 static u32
lockup_detected(struct ctlr_info
*h
);
434 static ssize_t
host_show_lockup_detected(struct device
*dev
,
435 struct device_attribute
*attr
, char *buf
)
439 struct Scsi_Host
*shost
= class_to_shost(dev
);
441 h
= shost_to_hba(shost
);
442 ld
= lockup_detected(h
);
444 return sprintf(buf
, "ld=%d\n", ld
);
447 static ssize_t
host_store_hp_ssd_smart_path_status(struct device
*dev
,
448 struct device_attribute
*attr
,
449 const char *buf
, size_t count
)
453 struct Scsi_Host
*shost
= class_to_shost(dev
);
456 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
458 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
459 strncpy(tmpbuf
, buf
, len
);
461 if (sscanf(tmpbuf
, "%d", &status
) != 1)
463 h
= shost_to_hba(shost
);
464 h
->acciopath_status
= !!status
;
465 dev_warn(&h
->pdev
->dev
,
466 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
467 h
->acciopath_status
? "enabled" : "disabled");
471 static ssize_t
host_store_raid_offload_debug(struct device
*dev
,
472 struct device_attribute
*attr
,
473 const char *buf
, size_t count
)
475 int debug_level
, len
;
477 struct Scsi_Host
*shost
= class_to_shost(dev
);
480 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
482 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
483 strncpy(tmpbuf
, buf
, len
);
485 if (sscanf(tmpbuf
, "%d", &debug_level
) != 1)
489 h
= shost_to_hba(shost
);
490 h
->raid_offload_debug
= debug_level
;
491 dev_warn(&h
->pdev
->dev
, "hpsa: Set raid_offload_debug level = %d\n",
492 h
->raid_offload_debug
);
496 static ssize_t
host_store_rescan(struct device
*dev
,
497 struct device_attribute
*attr
,
498 const char *buf
, size_t count
)
501 struct Scsi_Host
*shost
= class_to_shost(dev
);
502 h
= shost_to_hba(shost
);
503 hpsa_scan_start(h
->scsi_host
);
507 static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t
*device
)
509 device
->offload_enabled
= 0;
510 device
->offload_to_be_enabled
= 0;
513 static ssize_t
host_show_firmware_revision(struct device
*dev
,
514 struct device_attribute
*attr
, char *buf
)
517 struct Scsi_Host
*shost
= class_to_shost(dev
);
518 unsigned char *fwrev
;
520 h
= shost_to_hba(shost
);
521 if (!h
->hba_inquiry_data
)
523 fwrev
= &h
->hba_inquiry_data
[32];
524 return snprintf(buf
, 20, "%c%c%c%c\n",
525 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
528 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
529 struct device_attribute
*attr
, char *buf
)
531 struct Scsi_Host
*shost
= class_to_shost(dev
);
532 struct ctlr_info
*h
= shost_to_hba(shost
);
534 return snprintf(buf
, 20, "%d\n",
535 atomic_read(&h
->commands_outstanding
));
538 static ssize_t
host_show_transport_mode(struct device
*dev
,
539 struct device_attribute
*attr
, char *buf
)
542 struct Scsi_Host
*shost
= class_to_shost(dev
);
544 h
= shost_to_hba(shost
);
545 return snprintf(buf
, 20, "%s\n",
546 h
->transMethod
& CFGTBL_Trans_Performant
?
547 "performant" : "simple");
550 static ssize_t
host_show_hp_ssd_smart_path_status(struct device
*dev
,
551 struct device_attribute
*attr
, char *buf
)
554 struct Scsi_Host
*shost
= class_to_shost(dev
);
556 h
= shost_to_hba(shost
);
557 return snprintf(buf
, 30, "HP SSD Smart Path %s\n",
558 (h
->acciopath_status
== 1) ? "enabled" : "disabled");
561 /* List of controllers which cannot be hard reset on kexec with reset_devices */
562 static u32 unresettable_controller
[] = {
563 0x324a103C, /* Smart Array P712m */
564 0x324b103C, /* Smart Array P711m */
565 0x3223103C, /* Smart Array P800 */
566 0x3234103C, /* Smart Array P400 */
567 0x3235103C, /* Smart Array P400i */
568 0x3211103C, /* Smart Array E200i */
569 0x3212103C, /* Smart Array E200 */
570 0x3213103C, /* Smart Array E200i */
571 0x3214103C, /* Smart Array E200i */
572 0x3215103C, /* Smart Array E200i */
573 0x3237103C, /* Smart Array E500 */
574 0x323D103C, /* Smart Array P700m */
575 0x40800E11, /* Smart Array 5i */
576 0x409C0E11, /* Smart Array 6400 */
577 0x409D0E11, /* Smart Array 6400 EM */
578 0x40700E11, /* Smart Array 5300 */
579 0x40820E11, /* Smart Array 532 */
580 0x40830E11, /* Smart Array 5312 */
581 0x409A0E11, /* Smart Array 641 */
582 0x409B0E11, /* Smart Array 642 */
583 0x40910E11, /* Smart Array 6i */
586 /* List of controllers which cannot even be soft reset */
587 static u32 soft_unresettable_controller
[] = {
588 0x40800E11, /* Smart Array 5i */
589 0x40700E11, /* Smart Array 5300 */
590 0x40820E11, /* Smart Array 532 */
591 0x40830E11, /* Smart Array 5312 */
592 0x409A0E11, /* Smart Array 641 */
593 0x409B0E11, /* Smart Array 642 */
594 0x40910E11, /* Smart Array 6i */
595 /* Exclude 640x boards. These are two pci devices in one slot
596 * which share a battery backed cache module. One controls the
597 * cache, the other accesses the cache through the one that controls
598 * it. If we reset the one controlling the cache, the other will
599 * likely not be happy. Just forbid resetting this conjoined mess.
600 * The 640x isn't really supported by hpsa anyway.
602 0x409C0E11, /* Smart Array 6400 */
603 0x409D0E11, /* Smart Array 6400 EM */
606 static int board_id_in_array(u32 a
[], int nelems
, u32 board_id
)
610 for (i
= 0; i
< nelems
; i
++)
611 if (a
[i
] == board_id
)
616 static int ctlr_is_hard_resettable(u32 board_id
)
618 return !board_id_in_array(unresettable_controller
,
619 ARRAY_SIZE(unresettable_controller
), board_id
);
622 static int ctlr_is_soft_resettable(u32 board_id
)
624 return !board_id_in_array(soft_unresettable_controller
,
625 ARRAY_SIZE(soft_unresettable_controller
), board_id
);
628 static int ctlr_is_resettable(u32 board_id
)
630 return ctlr_is_hard_resettable(board_id
) ||
631 ctlr_is_soft_resettable(board_id
);
634 static ssize_t
host_show_resettable(struct device
*dev
,
635 struct device_attribute
*attr
, char *buf
)
638 struct Scsi_Host
*shost
= class_to_shost(dev
);
640 h
= shost_to_hba(shost
);
641 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
644 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
646 return (scsi3addr
[3] & 0xC0) == 0x40;
649 static const char * const raid_label
[] = { "0", "4", "1(+0)", "5", "5+1", "6",
650 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
652 #define HPSA_RAID_0 0
653 #define HPSA_RAID_4 1
654 #define HPSA_RAID_1 2 /* also used for RAID 10 */
655 #define HPSA_RAID_5 3 /* also used for RAID 50 */
656 #define HPSA_RAID_51 4
657 #define HPSA_RAID_6 5 /* also used for RAID 60 */
658 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
659 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
660 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
662 static inline bool is_logical_device(struct hpsa_scsi_dev_t
*device
)
664 return !device
->physical_device
;
667 static ssize_t
raid_level_show(struct device
*dev
,
668 struct device_attribute
*attr
, char *buf
)
671 unsigned char rlevel
;
673 struct scsi_device
*sdev
;
674 struct hpsa_scsi_dev_t
*hdev
;
677 sdev
= to_scsi_device(dev
);
678 h
= sdev_to_hba(sdev
);
679 spin_lock_irqsave(&h
->lock
, flags
);
680 hdev
= sdev
->hostdata
;
682 spin_unlock_irqrestore(&h
->lock
, flags
);
686 /* Is this even a logical drive? */
687 if (!is_logical_device(hdev
)) {
688 spin_unlock_irqrestore(&h
->lock
, flags
);
689 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
693 rlevel
= hdev
->raid_level
;
694 spin_unlock_irqrestore(&h
->lock
, flags
);
695 if (rlevel
> RAID_UNKNOWN
)
696 rlevel
= RAID_UNKNOWN
;
697 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
701 static ssize_t
lunid_show(struct device
*dev
,
702 struct device_attribute
*attr
, char *buf
)
705 struct scsi_device
*sdev
;
706 struct hpsa_scsi_dev_t
*hdev
;
708 unsigned char lunid
[8];
710 sdev
= to_scsi_device(dev
);
711 h
= sdev_to_hba(sdev
);
712 spin_lock_irqsave(&h
->lock
, flags
);
713 hdev
= sdev
->hostdata
;
715 spin_unlock_irqrestore(&h
->lock
, flags
);
718 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
719 spin_unlock_irqrestore(&h
->lock
, flags
);
720 return snprintf(buf
, 20, "0x%8phN\n", lunid
);
723 static ssize_t
unique_id_show(struct device
*dev
,
724 struct device_attribute
*attr
, char *buf
)
727 struct scsi_device
*sdev
;
728 struct hpsa_scsi_dev_t
*hdev
;
730 unsigned char sn
[16];
732 sdev
= to_scsi_device(dev
);
733 h
= sdev_to_hba(sdev
);
734 spin_lock_irqsave(&h
->lock
, flags
);
735 hdev
= sdev
->hostdata
;
737 spin_unlock_irqrestore(&h
->lock
, flags
);
740 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
741 spin_unlock_irqrestore(&h
->lock
, flags
);
742 return snprintf(buf
, 16 * 2 + 2,
743 "%02X%02X%02X%02X%02X%02X%02X%02X"
744 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
745 sn
[0], sn
[1], sn
[2], sn
[3],
746 sn
[4], sn
[5], sn
[6], sn
[7],
747 sn
[8], sn
[9], sn
[10], sn
[11],
748 sn
[12], sn
[13], sn
[14], sn
[15]);
751 static ssize_t
sas_address_show(struct device
*dev
,
752 struct device_attribute
*attr
, char *buf
)
755 struct scsi_device
*sdev
;
756 struct hpsa_scsi_dev_t
*hdev
;
760 sdev
= to_scsi_device(dev
);
761 h
= sdev_to_hba(sdev
);
762 spin_lock_irqsave(&h
->lock
, flags
);
763 hdev
= sdev
->hostdata
;
764 if (!hdev
|| is_logical_device(hdev
) || !hdev
->expose_device
) {
765 spin_unlock_irqrestore(&h
->lock
, flags
);
768 sas_address
= hdev
->sas_address
;
769 spin_unlock_irqrestore(&h
->lock
, flags
);
771 return snprintf(buf
, PAGE_SIZE
, "0x%016llx\n", sas_address
);
774 static ssize_t
host_show_hp_ssd_smart_path_enabled(struct device
*dev
,
775 struct device_attribute
*attr
, char *buf
)
778 struct scsi_device
*sdev
;
779 struct hpsa_scsi_dev_t
*hdev
;
783 sdev
= to_scsi_device(dev
);
784 h
= sdev_to_hba(sdev
);
785 spin_lock_irqsave(&h
->lock
, flags
);
786 hdev
= sdev
->hostdata
;
788 spin_unlock_irqrestore(&h
->lock
, flags
);
791 offload_enabled
= hdev
->offload_enabled
;
792 spin_unlock_irqrestore(&h
->lock
, flags
);
794 if (hdev
->devtype
== TYPE_DISK
|| hdev
->devtype
== TYPE_ZBC
)
795 return snprintf(buf
, 20, "%d\n", offload_enabled
);
797 return snprintf(buf
, 40, "%s\n",
798 "Not applicable for a controller");
802 static ssize_t
path_info_show(struct device
*dev
,
803 struct device_attribute
*attr
, char *buf
)
806 struct scsi_device
*sdev
;
807 struct hpsa_scsi_dev_t
*hdev
;
813 u8 path_map_index
= 0;
815 unsigned char phys_connector
[2];
817 sdev
= to_scsi_device(dev
);
818 h
= sdev_to_hba(sdev
);
819 spin_lock_irqsave(&h
->devlock
, flags
);
820 hdev
= sdev
->hostdata
;
822 spin_unlock_irqrestore(&h
->devlock
, flags
);
827 for (i
= 0; i
< MAX_PATHS
; i
++) {
828 path_map_index
= 1<<i
;
829 if (i
== hdev
->active_path_index
)
831 else if (hdev
->path_map
& path_map_index
)
836 output_len
+= scnprintf(buf
+ output_len
,
837 PAGE_SIZE
- output_len
,
838 "[%d:%d:%d:%d] %20.20s ",
839 h
->scsi_host
->host_no
,
840 hdev
->bus
, hdev
->target
, hdev
->lun
,
841 scsi_device_type(hdev
->devtype
));
843 if (hdev
->devtype
== TYPE_RAID
|| is_logical_device(hdev
)) {
844 output_len
+= scnprintf(buf
+ output_len
,
845 PAGE_SIZE
- output_len
,
851 memcpy(&phys_connector
, &hdev
->phys_connector
[i
],
852 sizeof(phys_connector
));
853 if (phys_connector
[0] < '0')
854 phys_connector
[0] = '0';
855 if (phys_connector
[1] < '0')
856 phys_connector
[1] = '0';
857 output_len
+= scnprintf(buf
+ output_len
,
858 PAGE_SIZE
- output_len
,
861 if ((hdev
->devtype
== TYPE_DISK
|| hdev
->devtype
== TYPE_ZBC
) &&
862 hdev
->expose_device
) {
863 if (box
== 0 || box
== 0xFF) {
864 output_len
+= scnprintf(buf
+ output_len
,
865 PAGE_SIZE
- output_len
,
869 output_len
+= scnprintf(buf
+ output_len
,
870 PAGE_SIZE
- output_len
,
871 "BOX: %hhu BAY: %hhu %s\n",
874 } else if (box
!= 0 && box
!= 0xFF) {
875 output_len
+= scnprintf(buf
+ output_len
,
876 PAGE_SIZE
- output_len
, "BOX: %hhu %s\n",
879 output_len
+= scnprintf(buf
+ output_len
,
880 PAGE_SIZE
- output_len
, "%s\n", active
);
883 spin_unlock_irqrestore(&h
->devlock
, flags
);
887 static ssize_t
host_show_ctlr_num(struct device
*dev
,
888 struct device_attribute
*attr
, char *buf
)
891 struct Scsi_Host
*shost
= class_to_shost(dev
);
893 h
= shost_to_hba(shost
);
894 return snprintf(buf
, 20, "%d\n", h
->ctlr
);
897 static ssize_t
host_show_legacy_board(struct device
*dev
,
898 struct device_attribute
*attr
, char *buf
)
901 struct Scsi_Host
*shost
= class_to_shost(dev
);
903 h
= shost_to_hba(shost
);
904 return snprintf(buf
, 20, "%d\n", h
->legacy_board
? 1 : 0);
907 static DEVICE_ATTR_RO(raid_level
);
908 static DEVICE_ATTR_RO(lunid
);
909 static DEVICE_ATTR_RO(unique_id
);
910 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
911 static DEVICE_ATTR_RO(sas_address
);
912 static DEVICE_ATTR(hp_ssd_smart_path_enabled
, S_IRUGO
,
913 host_show_hp_ssd_smart_path_enabled
, NULL
);
914 static DEVICE_ATTR_RO(path_info
);
915 static DEVICE_ATTR(hp_ssd_smart_path_status
, S_IWUSR
|S_IRUGO
|S_IROTH
,
916 host_show_hp_ssd_smart_path_status
,
917 host_store_hp_ssd_smart_path_status
);
918 static DEVICE_ATTR(raid_offload_debug
, S_IWUSR
, NULL
,
919 host_store_raid_offload_debug
);
920 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
921 host_show_firmware_revision
, NULL
);
922 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
923 host_show_commands_outstanding
, NULL
);
924 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
925 host_show_transport_mode
, NULL
);
926 static DEVICE_ATTR(resettable
, S_IRUGO
,
927 host_show_resettable
, NULL
);
928 static DEVICE_ATTR(lockup_detected
, S_IRUGO
,
929 host_show_lockup_detected
, NULL
);
930 static DEVICE_ATTR(ctlr_num
, S_IRUGO
,
931 host_show_ctlr_num
, NULL
);
932 static DEVICE_ATTR(legacy_board
, S_IRUGO
,
933 host_show_legacy_board
, NULL
);
935 static struct device_attribute
*hpsa_sdev_attrs
[] = {
936 &dev_attr_raid_level
,
939 &dev_attr_hp_ssd_smart_path_enabled
,
941 &dev_attr_sas_address
,
945 static struct device_attribute
*hpsa_shost_attrs
[] = {
947 &dev_attr_firmware_revision
,
948 &dev_attr_commands_outstanding
,
949 &dev_attr_transport_mode
,
950 &dev_attr_resettable
,
951 &dev_attr_hp_ssd_smart_path_status
,
952 &dev_attr_raid_offload_debug
,
953 &dev_attr_lockup_detected
,
955 &dev_attr_legacy_board
,
959 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
960 HPSA_MAX_CONCURRENT_PASSTHRUS)
962 static struct scsi_host_template hpsa_driver_template
= {
963 .module
= THIS_MODULE
,
966 .queuecommand
= hpsa_scsi_queue_command
,
967 .scan_start
= hpsa_scan_start
,
968 .scan_finished
= hpsa_scan_finished
,
969 .change_queue_depth
= hpsa_change_queue_depth
,
971 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
973 .slave_alloc
= hpsa_slave_alloc
,
974 .slave_configure
= hpsa_slave_configure
,
975 .slave_destroy
= hpsa_slave_destroy
,
977 .compat_ioctl
= hpsa_compat_ioctl
,
979 .sdev_attrs
= hpsa_sdev_attrs
,
980 .shost_attrs
= hpsa_shost_attrs
,
985 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
988 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
990 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
991 return h
->access
.command_completed(h
, q
);
993 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
994 return h
->access
.command_completed(h
, q
);
996 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
997 a
= rq
->head
[rq
->current_entry
];
999 atomic_dec(&h
->commands_outstanding
);
1003 /* Check for wraparound */
1004 if (rq
->current_entry
== h
->max_commands
) {
1005 rq
->current_entry
= 0;
1006 rq
->wraparound
^= 1;
1012 * There are some special bits in the bus address of the
1013 * command that we have to set for the controller to know
1014 * how to process the command:
1016 * Normal performant mode:
1017 * bit 0: 1 means performant mode, 0 means simple mode.
1018 * bits 1-3 = block fetch table entry
1019 * bits 4-6 = command type (== 0)
1022 * bit 0 = "performant mode" bit.
1023 * bits 1-3 = block fetch table entry
1024 * bits 4-6 = command type (== 110)
1025 * (command type is needed because ioaccel1 mode
1026 * commands are submitted through the same register as normal
1027 * mode commands, so this is how the controller knows whether
1028 * the command is normal mode or ioaccel1 mode.)
1031 * bit 0 = "performant mode" bit.
1032 * bits 1-4 = block fetch table entry (note extra bit)
1033 * bits 4-6 = not needed, because ioaccel2 mode has
1034 * a separate special register for submitting commands.
1038 * set_performant_mode: Modify the tag for cciss performant
1039 * set bit 0 for pull model, bits 3-1 for block fetch
1042 #define DEFAULT_REPLY_QUEUE (-1)
1043 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
,
1046 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
1047 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
1048 if (unlikely(!h
->msix_vectors
))
1050 c
->Header
.ReplyQueue
= reply_queue
;
1054 static void set_ioaccel1_performant_mode(struct ctlr_info
*h
,
1055 struct CommandList
*c
,
1058 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
1061 * Tell the controller to post the reply to the queue for this
1062 * processor. This seems to give the best I/O throughput.
1064 cp
->ReplyQueue
= reply_queue
;
1066 * Set the bits in the address sent down to include:
1067 * - performant mode bit (bit 0)
1068 * - pull count (bits 1-3)
1069 * - command type (bits 4-6)
1071 c
->busaddr
|= 1 | (h
->ioaccel1_blockFetchTable
[c
->Header
.SGList
] << 1) |
1072 IOACCEL1_BUSADDR_CMDTYPE
;
1075 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info
*h
,
1076 struct CommandList
*c
,
1079 struct hpsa_tmf_struct
*cp
= (struct hpsa_tmf_struct
*)
1080 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1082 /* Tell the controller to post the reply to the queue for this
1083 * processor. This seems to give the best I/O throughput.
1085 cp
->reply_queue
= reply_queue
;
1086 /* Set the bits in the address sent down to include:
1087 * - performant mode bit not used in ioaccel mode 2
1088 * - pull count (bits 0-3)
1089 * - command type isn't needed for ioaccel2
1091 c
->busaddr
|= h
->ioaccel2_blockFetchTable
[0];
1094 static void set_ioaccel2_performant_mode(struct ctlr_info
*h
,
1095 struct CommandList
*c
,
1098 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1101 * Tell the controller to post the reply to the queue for this
1102 * processor. This seems to give the best I/O throughput.
1104 cp
->reply_queue
= reply_queue
;
1106 * Set the bits in the address sent down to include:
1107 * - performant mode bit not used in ioaccel mode 2
1108 * - pull count (bits 0-3)
1109 * - command type isn't needed for ioaccel2
1111 c
->busaddr
|= (h
->ioaccel2_blockFetchTable
[cp
->sg_count
]);
1114 static int is_firmware_flash_cmd(u8
*cdb
)
1116 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
1120 * During firmware flash, the heartbeat register may not update as frequently
1121 * as it should. So we dial down lockup detection during firmware flash. and
1122 * dial it back up when firmware flash completes.
1124 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1125 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1126 #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1127 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
1128 struct CommandList
*c
)
1130 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
1132 atomic_inc(&h
->firmware_flash_in_progress
);
1133 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
1136 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
1137 struct CommandList
*c
)
1139 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
1140 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
1141 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
1144 static void __enqueue_cmd_and_start_io(struct ctlr_info
*h
,
1145 struct CommandList
*c
, int reply_queue
)
1147 dial_down_lockup_detection_during_fw_flash(h
, c
);
1148 atomic_inc(&h
->commands_outstanding
);
1150 atomic_inc(&c
->device
->commands_outstanding
);
1152 reply_queue
= h
->reply_map
[raw_smp_processor_id()];
1153 switch (c
->cmd_type
) {
1155 set_ioaccel1_performant_mode(h
, c
, reply_queue
);
1156 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
1159 set_ioaccel2_performant_mode(h
, c
, reply_queue
);
1160 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1163 set_ioaccel2_tmf_performant_mode(h
, c
, reply_queue
);
1164 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1167 set_performant_mode(h
, c
, reply_queue
);
1168 h
->access
.submit_command(h
, c
);
1172 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
, struct CommandList
*c
)
1174 __enqueue_cmd_and_start_io(h
, c
, DEFAULT_REPLY_QUEUE
);
1177 static inline int is_hba_lunid(unsigned char scsi3addr
[])
1179 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
1182 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
1184 if (!h
->hba_inquiry_data
)
1186 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
1191 static int hpsa_find_target_lun(struct ctlr_info
*h
,
1192 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
1194 /* finds an unused bus, target, lun for a new physical device
1195 * assumes h->devlock is held
1198 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
1200 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
1202 for (i
= 0; i
< h
->ndevices
; i
++) {
1203 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
1204 __set_bit(h
->dev
[i
]->target
, lun_taken
);
1207 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
1208 if (i
< HPSA_MAX_DEVICES
) {
1217 static void hpsa_show_dev_msg(const char *level
, struct ctlr_info
*h
,
1218 struct hpsa_scsi_dev_t
*dev
, char *description
)
1220 #define LABEL_SIZE 25
1221 char label
[LABEL_SIZE
];
1223 if (h
== NULL
|| h
->pdev
== NULL
|| h
->scsi_host
== NULL
)
1226 switch (dev
->devtype
) {
1228 snprintf(label
, LABEL_SIZE
, "controller");
1230 case TYPE_ENCLOSURE
:
1231 snprintf(label
, LABEL_SIZE
, "enclosure");
1236 snprintf(label
, LABEL_SIZE
, "external");
1237 else if (!is_logical_dev_addr_mode(dev
->scsi3addr
))
1238 snprintf(label
, LABEL_SIZE
, "%s",
1239 raid_label
[PHYSICAL_DRIVE
]);
1241 snprintf(label
, LABEL_SIZE
, "RAID-%s",
1242 dev
->raid_level
> RAID_UNKNOWN
? "?" :
1243 raid_label
[dev
->raid_level
]);
1246 snprintf(label
, LABEL_SIZE
, "rom");
1249 snprintf(label
, LABEL_SIZE
, "tape");
1251 case TYPE_MEDIUM_CHANGER
:
1252 snprintf(label
, LABEL_SIZE
, "changer");
1255 snprintf(label
, LABEL_SIZE
, "UNKNOWN");
1259 dev_printk(level
, &h
->pdev
->dev
,
1260 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1261 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
1263 scsi_device_type(dev
->devtype
),
1267 dev
->offload_config
? '+' : '-',
1268 dev
->offload_to_be_enabled
? '+' : '-',
1269 dev
->expose_device
);
1272 /* Add an entry into h->dev[] array. */
1273 static int hpsa_scsi_add_entry(struct ctlr_info
*h
,
1274 struct hpsa_scsi_dev_t
*device
,
1275 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
1277 /* assumes h->devlock is held */
1278 int n
= h
->ndevices
;
1280 unsigned char addr1
[8], addr2
[8];
1281 struct hpsa_scsi_dev_t
*sd
;
1283 if (n
>= HPSA_MAX_DEVICES
) {
1284 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
1289 /* physical devices do not have lun or target assigned until now. */
1290 if (device
->lun
!= -1)
1291 /* Logical device, lun is already assigned. */
1294 /* If this device a non-zero lun of a multi-lun device
1295 * byte 4 of the 8-byte LUN addr will contain the logical
1296 * unit no, zero otherwise.
1298 if (device
->scsi3addr
[4] == 0) {
1299 /* This is not a non-zero lun of a multi-lun device */
1300 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
1301 device
->bus
, &device
->target
, &device
->lun
) != 0)
1306 /* This is a non-zero lun of a multi-lun device.
1307 * Search through our list and find the device which
1308 * has the same 8 byte LUN address, excepting byte 4 and 5.
1309 * Assign the same bus and target for this new LUN.
1310 * Use the logical unit number from the firmware.
1312 memcpy(addr1
, device
->scsi3addr
, 8);
1315 for (i
= 0; i
< n
; i
++) {
1317 memcpy(addr2
, sd
->scsi3addr
, 8);
1320 /* differ only in byte 4 and 5? */
1321 if (memcmp(addr1
, addr2
, 8) == 0) {
1322 device
->bus
= sd
->bus
;
1323 device
->target
= sd
->target
;
1324 device
->lun
= device
->scsi3addr
[4];
1328 if (device
->lun
== -1) {
1329 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
1330 " suspect firmware bug or unsupported hardware "
1331 "configuration.\n");
1339 added
[*nadded
] = device
;
1341 hpsa_show_dev_msg(KERN_INFO
, h
, device
,
1342 device
->expose_device
? "added" : "masked");
1347 * Called during a scan operation.
1349 * Update an entry in h->dev[] array.
1351 static void hpsa_scsi_update_entry(struct ctlr_info
*h
,
1352 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
1354 /* assumes h->devlock is held */
1355 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1357 /* Raid level changed. */
1358 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
1361 * ioacccel_handle may have changed for a dual domain disk
1363 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1365 /* Raid offload parameters changed. Careful about the ordering. */
1366 if (new_entry
->offload_config
&& new_entry
->offload_to_be_enabled
) {
1368 * if drive is newly offload_enabled, we want to copy the
1369 * raid map data first. If previously offload_enabled and
1370 * offload_config were set, raid map data had better be
1371 * the same as it was before. If raid map data has changed
1372 * then it had better be the case that
1373 * h->dev[entry]->offload_enabled is currently 0.
1375 h
->dev
[entry
]->raid_map
= new_entry
->raid_map
;
1376 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1378 if (new_entry
->offload_to_be_enabled
) {
1379 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1380 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1382 h
->dev
[entry
]->hba_ioaccel_enabled
= new_entry
->hba_ioaccel_enabled
;
1383 h
->dev
[entry
]->offload_config
= new_entry
->offload_config
;
1384 h
->dev
[entry
]->offload_to_mirror
= new_entry
->offload_to_mirror
;
1385 h
->dev
[entry
]->queue_depth
= new_entry
->queue_depth
;
1388 * We can turn off ioaccel offload now, but need to delay turning
1389 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1390 * can't do that until all the devices are updated.
1392 h
->dev
[entry
]->offload_to_be_enabled
= new_entry
->offload_to_be_enabled
;
1395 * turn ioaccel off immediately if told to do so.
1397 if (!new_entry
->offload_to_be_enabled
)
1398 h
->dev
[entry
]->offload_enabled
= 0;
1400 hpsa_show_dev_msg(KERN_INFO
, h
, h
->dev
[entry
], "updated");
1403 /* Replace an entry from h->dev[] array. */
1404 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
,
1405 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
1406 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
1407 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1409 /* assumes h->devlock is held */
1410 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1411 removed
[*nremoved
] = h
->dev
[entry
];
1415 * New physical devices won't have target/lun assigned yet
1416 * so we need to preserve the values in the slot we are replacing.
1418 if (new_entry
->target
== -1) {
1419 new_entry
->target
= h
->dev
[entry
]->target
;
1420 new_entry
->lun
= h
->dev
[entry
]->lun
;
1423 h
->dev
[entry
] = new_entry
;
1424 added
[*nadded
] = new_entry
;
1427 hpsa_show_dev_msg(KERN_INFO
, h
, new_entry
, "replaced");
1430 /* Remove an entry from h->dev[] array. */
1431 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int entry
,
1432 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1434 /* assumes h->devlock is held */
1436 struct hpsa_scsi_dev_t
*sd
;
1438 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1441 removed
[*nremoved
] = h
->dev
[entry
];
1444 for (i
= entry
; i
< h
->ndevices
-1; i
++)
1445 h
->dev
[i
] = h
->dev
[i
+1];
1447 hpsa_show_dev_msg(KERN_INFO
, h
, sd
, "removed");
1450 #define SCSI3ADDR_EQ(a, b) ( \
1451 (a)[7] == (b)[7] && \
1452 (a)[6] == (b)[6] && \
1453 (a)[5] == (b)[5] && \
1454 (a)[4] == (b)[4] && \
1455 (a)[3] == (b)[3] && \
1456 (a)[2] == (b)[2] && \
1457 (a)[1] == (b)[1] && \
1460 static void fixup_botched_add(struct ctlr_info
*h
,
1461 struct hpsa_scsi_dev_t
*added
)
1463 /* called when scsi_add_device fails in order to re-adjust
1464 * h->dev[] to match the mid layer's view.
1466 unsigned long flags
;
1469 spin_lock_irqsave(&h
->lock
, flags
);
1470 for (i
= 0; i
< h
->ndevices
; i
++) {
1471 if (h
->dev
[i
] == added
) {
1472 for (j
= i
; j
< h
->ndevices
-1; j
++)
1473 h
->dev
[j
] = h
->dev
[j
+1];
1478 spin_unlock_irqrestore(&h
->lock
, flags
);
1482 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
1483 struct hpsa_scsi_dev_t
*dev2
)
1485 /* we compare everything except lun and target as these
1486 * are not yet assigned. Compare parts likely
1489 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
1490 sizeof(dev1
->scsi3addr
)) != 0)
1492 if (memcmp(dev1
->device_id
, dev2
->device_id
,
1493 sizeof(dev1
->device_id
)) != 0)
1495 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
1497 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
1499 if (dev1
->devtype
!= dev2
->devtype
)
1501 if (dev1
->bus
!= dev2
->bus
)
1506 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
1507 struct hpsa_scsi_dev_t
*dev2
)
1509 /* Device attributes that can change, but don't mean
1510 * that the device is a different device, nor that the OS
1511 * needs to be told anything about the change.
1513 if (dev1
->raid_level
!= dev2
->raid_level
)
1515 if (dev1
->offload_config
!= dev2
->offload_config
)
1517 if (dev1
->offload_to_be_enabled
!= dev2
->offload_to_be_enabled
)
1519 if (!is_logical_dev_addr_mode(dev1
->scsi3addr
))
1520 if (dev1
->queue_depth
!= dev2
->queue_depth
)
1523 * This can happen for dual domain devices. An active
1524 * path change causes the ioaccel handle to change
1526 * for example note the handle differences between p0 and p1
1527 * Device WWN ,WWN hash,Handle
1528 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1529 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
1531 if (dev1
->ioaccel_handle
!= dev2
->ioaccel_handle
)
1536 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1537 * and return needle location in *index. If scsi3addr matches, but not
1538 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1539 * location in *index.
1540 * In the case of a minor device attribute change, such as RAID level, just
1541 * return DEVICE_UPDATED, along with the updated device's location in index.
1542 * If needle not found, return DEVICE_NOT_FOUND.
1544 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
1545 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
1549 #define DEVICE_NOT_FOUND 0
1550 #define DEVICE_CHANGED 1
1551 #define DEVICE_SAME 2
1552 #define DEVICE_UPDATED 3
1554 return DEVICE_NOT_FOUND
;
1556 for (i
= 0; i
< haystack_size
; i
++) {
1557 if (haystack
[i
] == NULL
) /* previously removed. */
1559 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
1561 if (device_is_the_same(needle
, haystack
[i
])) {
1562 if (device_updated(needle
, haystack
[i
]))
1563 return DEVICE_UPDATED
;
1566 /* Keep offline devices offline */
1567 if (needle
->volume_offline
)
1568 return DEVICE_NOT_FOUND
;
1569 return DEVICE_CHANGED
;
1574 return DEVICE_NOT_FOUND
;
1577 static void hpsa_monitor_offline_device(struct ctlr_info
*h
,
1578 unsigned char scsi3addr
[])
1580 struct offline_device_entry
*device
;
1581 unsigned long flags
;
1583 /* Check to see if device is already on the list */
1584 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1585 list_for_each_entry(device
, &h
->offline_device_list
, offline_list
) {
1586 if (memcmp(device
->scsi3addr
, scsi3addr
,
1587 sizeof(device
->scsi3addr
)) == 0) {
1588 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1592 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1594 /* Device is not on the list, add it. */
1595 device
= kmalloc(sizeof(*device
), GFP_KERNEL
);
1599 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1600 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1601 list_add_tail(&device
->offline_list
, &h
->offline_device_list
);
1602 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1605 /* Print a message explaining various offline volume states */
1606 static void hpsa_show_volume_status(struct ctlr_info
*h
,
1607 struct hpsa_scsi_dev_t
*sd
)
1609 if (sd
->volume_offline
== HPSA_VPD_LV_STATUS_UNSUPPORTED
)
1610 dev_info(&h
->pdev
->dev
,
1611 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1612 h
->scsi_host
->host_no
,
1613 sd
->bus
, sd
->target
, sd
->lun
);
1614 switch (sd
->volume_offline
) {
1617 case HPSA_LV_UNDERGOING_ERASE
:
1618 dev_info(&h
->pdev
->dev
,
1619 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1620 h
->scsi_host
->host_no
,
1621 sd
->bus
, sd
->target
, sd
->lun
);
1623 case HPSA_LV_NOT_AVAILABLE
:
1624 dev_info(&h
->pdev
->dev
,
1625 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1626 h
->scsi_host
->host_no
,
1627 sd
->bus
, sd
->target
, sd
->lun
);
1629 case HPSA_LV_UNDERGOING_RPI
:
1630 dev_info(&h
->pdev
->dev
,
1631 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1632 h
->scsi_host
->host_no
,
1633 sd
->bus
, sd
->target
, sd
->lun
);
1635 case HPSA_LV_PENDING_RPI
:
1636 dev_info(&h
->pdev
->dev
,
1637 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1638 h
->scsi_host
->host_no
,
1639 sd
->bus
, sd
->target
, sd
->lun
);
1641 case HPSA_LV_ENCRYPTED_NO_KEY
:
1642 dev_info(&h
->pdev
->dev
,
1643 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1644 h
->scsi_host
->host_no
,
1645 sd
->bus
, sd
->target
, sd
->lun
);
1647 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
1648 dev_info(&h
->pdev
->dev
,
1649 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1650 h
->scsi_host
->host_no
,
1651 sd
->bus
, sd
->target
, sd
->lun
);
1653 case HPSA_LV_UNDERGOING_ENCRYPTION
:
1654 dev_info(&h
->pdev
->dev
,
1655 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1656 h
->scsi_host
->host_no
,
1657 sd
->bus
, sd
->target
, sd
->lun
);
1659 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1660 dev_info(&h
->pdev
->dev
,
1661 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1662 h
->scsi_host
->host_no
,
1663 sd
->bus
, sd
->target
, sd
->lun
);
1665 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1666 dev_info(&h
->pdev
->dev
,
1667 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1668 h
->scsi_host
->host_no
,
1669 sd
->bus
, sd
->target
, sd
->lun
);
1671 case HPSA_LV_PENDING_ENCRYPTION
:
1672 dev_info(&h
->pdev
->dev
,
1673 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1674 h
->scsi_host
->host_no
,
1675 sd
->bus
, sd
->target
, sd
->lun
);
1677 case HPSA_LV_PENDING_ENCRYPTION_REKEYING
:
1678 dev_info(&h
->pdev
->dev
,
1679 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1680 h
->scsi_host
->host_no
,
1681 sd
->bus
, sd
->target
, sd
->lun
);
1687 * Figure the list of physical drive pointers for a logical drive with
1688 * raid offload configured.
1690 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info
*h
,
1691 struct hpsa_scsi_dev_t
*dev
[], int ndevices
,
1692 struct hpsa_scsi_dev_t
*logical_drive
)
1694 struct raid_map_data
*map
= &logical_drive
->raid_map
;
1695 struct raid_map_disk_data
*dd
= &map
->data
[0];
1697 int total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
1698 le16_to_cpu(map
->metadata_disks_per_row
);
1699 int nraid_map_entries
= le16_to_cpu(map
->row_cnt
) *
1700 le16_to_cpu(map
->layout_map_count
) *
1701 total_disks_per_row
;
1702 int nphys_disk
= le16_to_cpu(map
->layout_map_count
) *
1703 total_disks_per_row
;
1706 if (nraid_map_entries
> RAID_MAP_MAX_ENTRIES
)
1707 nraid_map_entries
= RAID_MAP_MAX_ENTRIES
;
1709 logical_drive
->nphysical_disks
= nraid_map_entries
;
1712 for (i
= 0; i
< nraid_map_entries
; i
++) {
1713 logical_drive
->phys_disk
[i
] = NULL
;
1714 if (!logical_drive
->offload_config
)
1716 for (j
= 0; j
< ndevices
; j
++) {
1719 if (dev
[j
]->devtype
!= TYPE_DISK
&&
1720 dev
[j
]->devtype
!= TYPE_ZBC
)
1722 if (is_logical_device(dev
[j
]))
1724 if (dev
[j
]->ioaccel_handle
!= dd
[i
].ioaccel_handle
)
1727 logical_drive
->phys_disk
[i
] = dev
[j
];
1729 qdepth
= min(h
->nr_cmds
, qdepth
+
1730 logical_drive
->phys_disk
[i
]->queue_depth
);
1735 * This can happen if a physical drive is removed and
1736 * the logical drive is degraded. In that case, the RAID
1737 * map data will refer to a physical disk which isn't actually
1738 * present. And in that case offload_enabled should already
1739 * be 0, but we'll turn it off here just in case
1741 if (!logical_drive
->phys_disk
[i
]) {
1742 dev_warn(&h
->pdev
->dev
,
1743 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1745 h
->scsi_host
->host_no
, logical_drive
->bus
,
1746 logical_drive
->target
, logical_drive
->lun
);
1747 hpsa_turn_off_ioaccel_for_device(logical_drive
);
1748 logical_drive
->queue_depth
= 8;
1751 if (nraid_map_entries
)
1753 * This is correct for reads, too high for full stripe writes,
1754 * way too high for partial stripe writes
1756 logical_drive
->queue_depth
= qdepth
;
1758 if (logical_drive
->external
)
1759 logical_drive
->queue_depth
= EXTERNAL_QD
;
1761 logical_drive
->queue_depth
= h
->nr_cmds
;
1765 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info
*h
,
1766 struct hpsa_scsi_dev_t
*dev
[], int ndevices
)
1770 for (i
= 0; i
< ndevices
; i
++) {
1773 if (dev
[i
]->devtype
!= TYPE_DISK
&&
1774 dev
[i
]->devtype
!= TYPE_ZBC
)
1776 if (!is_logical_device(dev
[i
]))
1780 * If offload is currently enabled, the RAID map and
1781 * phys_disk[] assignment *better* not be changing
1782 * because we would be changing ioaccel phsy_disk[] pointers
1783 * on a ioaccel volume processing I/O requests.
1785 * If an ioaccel volume status changed, initially because it was
1786 * re-configured and thus underwent a transformation, or
1787 * a drive failed, we would have received a state change
1788 * request and ioaccel should have been turned off. When the
1789 * transformation completes, we get another state change
1790 * request to turn ioaccel back on. In this case, we need
1791 * to update the ioaccel information.
1793 * Thus: If it is not currently enabled, but will be after
1794 * the scan completes, make sure the ioaccel pointers
1798 if (!dev
[i
]->offload_enabled
&& dev
[i
]->offload_to_be_enabled
)
1799 hpsa_figure_phys_disk_ptrs(h
, dev
, ndevices
, dev
[i
]);
1803 static int hpsa_add_device(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
1810 if (is_logical_device(device
)) /* RAID */
1811 rc
= scsi_add_device(h
->scsi_host
, device
->bus
,
1812 device
->target
, device
->lun
);
1814 rc
= hpsa_add_sas_device(h
->sas_host
, device
);
1819 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info
*h
,
1820 struct hpsa_scsi_dev_t
*dev
)
1825 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1826 struct CommandList
*c
= h
->cmd_pool
+ i
;
1827 int refcount
= atomic_inc_return(&c
->refcount
);
1829 if (refcount
> 1 && hpsa_cmd_dev_match(h
, c
, dev
,
1831 unsigned long flags
;
1833 spin_lock_irqsave(&h
->lock
, flags
); /* Implied MB */
1834 if (!hpsa_is_cmd_idle(c
))
1836 spin_unlock_irqrestore(&h
->lock
, flags
);
1846 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info
*h
,
1847 struct hpsa_scsi_dev_t
*device
)
1851 int num_wait
= NUM_WAIT
;
1853 if (device
->external
)
1854 num_wait
= HPSA_EH_PTRAID_TIMEOUT
;
1857 cmds
= hpsa_find_outstanding_commands_for_dev(h
, device
);
1860 if (++waits
> num_wait
)
1865 if (waits
> num_wait
) {
1866 dev_warn(&h
->pdev
->dev
,
1867 "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
1869 h
->scsi_host
->host_no
,
1870 device
->bus
, device
->target
, device
->lun
, cmds
);
1874 static void hpsa_remove_device(struct ctlr_info
*h
,
1875 struct hpsa_scsi_dev_t
*device
)
1877 struct scsi_device
*sdev
= NULL
;
1883 * Allow for commands to drain
1885 device
->removed
= 1;
1886 hpsa_wait_for_outstanding_commands_for_dev(h
, device
);
1888 if (is_logical_device(device
)) { /* RAID */
1889 sdev
= scsi_device_lookup(h
->scsi_host
, device
->bus
,
1890 device
->target
, device
->lun
);
1892 scsi_remove_device(sdev
);
1893 scsi_device_put(sdev
);
1896 * We don't expect to get here. Future commands
1897 * to this device will get a selection timeout as
1898 * if the device were gone.
1900 hpsa_show_dev_msg(KERN_WARNING
, h
, device
,
1901 "didn't find device for removal.");
1905 hpsa_remove_sas_device(device
);
1909 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
,
1910 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
1912 /* sd contains scsi3 addresses and devtypes, and inquiry
1913 * data. This function takes what's in sd to be the current
1914 * reality and updates h->dev[] to reflect that reality.
1916 int i
, entry
, device_change
, changes
= 0;
1917 struct hpsa_scsi_dev_t
*csd
;
1918 unsigned long flags
;
1919 struct hpsa_scsi_dev_t
**added
, **removed
;
1920 int nadded
, nremoved
;
1923 * A reset can cause a device status to change
1924 * re-schedule the scan to see what happened.
1926 spin_lock_irqsave(&h
->reset_lock
, flags
);
1927 if (h
->reset_in_progress
) {
1928 h
->drv_req_rescan
= 1;
1929 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
1932 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
1934 added
= kcalloc(HPSA_MAX_DEVICES
, sizeof(*added
), GFP_KERNEL
);
1935 removed
= kcalloc(HPSA_MAX_DEVICES
, sizeof(*removed
), GFP_KERNEL
);
1937 if (!added
|| !removed
) {
1938 dev_warn(&h
->pdev
->dev
, "out of memory in "
1939 "adjust_hpsa_scsi_table\n");
1943 spin_lock_irqsave(&h
->devlock
, flags
);
1945 /* find any devices in h->dev[] that are not in
1946 * sd[] and remove them from h->dev[], and for any
1947 * devices which have changed, remove the old device
1948 * info and add the new device info.
1949 * If minor device attributes change, just update
1950 * the existing device structure.
1955 while (i
< h
->ndevices
) {
1957 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
1958 if (device_change
== DEVICE_NOT_FOUND
) {
1960 hpsa_scsi_remove_entry(h
, i
, removed
, &nremoved
);
1961 continue; /* remove ^^^, hence i not incremented */
1962 } else if (device_change
== DEVICE_CHANGED
) {
1964 hpsa_scsi_replace_entry(h
, i
, sd
[entry
],
1965 added
, &nadded
, removed
, &nremoved
);
1966 /* Set it to NULL to prevent it from being freed
1967 * at the bottom of hpsa_update_scsi_devices()
1970 } else if (device_change
== DEVICE_UPDATED
) {
1971 hpsa_scsi_update_entry(h
, i
, sd
[entry
]);
1976 /* Now, make sure every device listed in sd[] is also
1977 * listed in h->dev[], adding them if they aren't found
1980 for (i
= 0; i
< nsds
; i
++) {
1981 if (!sd
[i
]) /* if already added above. */
1984 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1985 * as the SCSI mid-layer does not handle such devices well.
1986 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1987 * at 160Hz, and prevents the system from coming up.
1989 if (sd
[i
]->volume_offline
) {
1990 hpsa_show_volume_status(h
, sd
[i
]);
1991 hpsa_show_dev_msg(KERN_INFO
, h
, sd
[i
], "offline");
1995 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
1996 h
->ndevices
, &entry
);
1997 if (device_change
== DEVICE_NOT_FOUND
) {
1999 if (hpsa_scsi_add_entry(h
, sd
[i
], added
, &nadded
) != 0)
2001 sd
[i
] = NULL
; /* prevent from being freed later. */
2002 } else if (device_change
== DEVICE_CHANGED
) {
2003 /* should never happen... */
2005 dev_warn(&h
->pdev
->dev
,
2006 "device unexpectedly changed.\n");
2007 /* but if it does happen, we just ignore that device */
2010 hpsa_update_log_drive_phys_drive_ptrs(h
, h
->dev
, h
->ndevices
);
2013 * Now that h->dev[]->phys_disk[] is coherent, we can enable
2014 * any logical drives that need it enabled.
2016 * The raid map should be current by now.
2018 * We are updating the device list used for I/O requests.
2020 for (i
= 0; i
< h
->ndevices
; i
++) {
2021 if (h
->dev
[i
] == NULL
)
2023 h
->dev
[i
]->offload_enabled
= h
->dev
[i
]->offload_to_be_enabled
;
2026 spin_unlock_irqrestore(&h
->devlock
, flags
);
2028 /* Monitor devices which are in one of several NOT READY states to be
2029 * brought online later. This must be done without holding h->devlock,
2030 * so don't touch h->dev[]
2032 for (i
= 0; i
< nsds
; i
++) {
2033 if (!sd
[i
]) /* if already added above. */
2035 if (sd
[i
]->volume_offline
)
2036 hpsa_monitor_offline_device(h
, sd
[i
]->scsi3addr
);
2039 /* Don't notify scsi mid layer of any changes the first time through
2040 * (or if there are no changes) scsi_scan_host will do it later the
2041 * first time through.
2046 /* Notify scsi mid layer of any removed devices */
2047 for (i
= 0; i
< nremoved
; i
++) {
2048 if (removed
[i
] == NULL
)
2050 if (removed
[i
]->expose_device
)
2051 hpsa_remove_device(h
, removed
[i
]);
2056 /* Notify scsi mid layer of any added devices */
2057 for (i
= 0; i
< nadded
; i
++) {
2060 if (added
[i
] == NULL
)
2062 if (!(added
[i
]->expose_device
))
2064 rc
= hpsa_add_device(h
, added
[i
]);
2067 dev_warn(&h
->pdev
->dev
,
2068 "addition failed %d, device not added.", rc
);
2069 /* now we have to remove it from h->dev,
2070 * since it didn't get added to scsi mid layer
2072 fixup_botched_add(h
, added
[i
]);
2073 h
->drv_req_rescan
= 1;
2082 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2083 * Assume's h->devlock is held.
2085 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
2086 int bus
, int target
, int lun
)
2089 struct hpsa_scsi_dev_t
*sd
;
2091 for (i
= 0; i
< h
->ndevices
; i
++) {
2093 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
2099 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
2101 struct hpsa_scsi_dev_t
*sd
= NULL
;
2102 unsigned long flags
;
2103 struct ctlr_info
*h
;
2105 h
= sdev_to_hba(sdev
);
2106 spin_lock_irqsave(&h
->devlock
, flags
);
2107 if (sdev_channel(sdev
) == HPSA_PHYSICAL_DEVICE_BUS
) {
2108 struct scsi_target
*starget
;
2109 struct sas_rphy
*rphy
;
2111 starget
= scsi_target(sdev
);
2112 rphy
= target_to_rphy(starget
);
2113 sd
= hpsa_find_device_by_sas_rphy(h
, rphy
);
2115 sd
->target
= sdev_id(sdev
);
2116 sd
->lun
= sdev
->lun
;
2120 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
2121 sdev_id(sdev
), sdev
->lun
);
2123 if (sd
&& sd
->expose_device
) {
2124 atomic_set(&sd
->ioaccel_cmds_out
, 0);
2125 sdev
->hostdata
= sd
;
2127 sdev
->hostdata
= NULL
;
2128 spin_unlock_irqrestore(&h
->devlock
, flags
);
2132 /* configure scsi device based on internal per-device structure */
2133 static int hpsa_slave_configure(struct scsi_device
*sdev
)
2135 struct hpsa_scsi_dev_t
*sd
;
2138 sd
= sdev
->hostdata
;
2139 sdev
->no_uld_attach
= !sd
|| !sd
->expose_device
;
2142 sd
->was_removed
= 0;
2144 queue_depth
= EXTERNAL_QD
;
2145 sdev
->eh_timeout
= HPSA_EH_PTRAID_TIMEOUT
;
2146 blk_queue_rq_timeout(sdev
->request_queue
,
2147 HPSA_EH_PTRAID_TIMEOUT
);
2149 queue_depth
= sd
->queue_depth
!= 0 ?
2150 sd
->queue_depth
: sdev
->host
->can_queue
;
2153 queue_depth
= sdev
->host
->can_queue
;
2155 scsi_change_queue_depth(sdev
, queue_depth
);
2160 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
2162 struct hpsa_scsi_dev_t
*hdev
= NULL
;
2164 hdev
= sdev
->hostdata
;
2167 hdev
->was_removed
= 1;
2170 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
2174 if (!h
->ioaccel2_cmd_sg_list
)
2176 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2177 kfree(h
->ioaccel2_cmd_sg_list
[i
]);
2178 h
->ioaccel2_cmd_sg_list
[i
] = NULL
;
2180 kfree(h
->ioaccel2_cmd_sg_list
);
2181 h
->ioaccel2_cmd_sg_list
= NULL
;
2184 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
2188 if (h
->chainsize
<= 0)
2191 h
->ioaccel2_cmd_sg_list
=
2192 kcalloc(h
->nr_cmds
, sizeof(*h
->ioaccel2_cmd_sg_list
),
2194 if (!h
->ioaccel2_cmd_sg_list
)
2196 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2197 h
->ioaccel2_cmd_sg_list
[i
] =
2198 kmalloc_array(h
->maxsgentries
,
2199 sizeof(*h
->ioaccel2_cmd_sg_list
[i
]),
2201 if (!h
->ioaccel2_cmd_sg_list
[i
])
2207 hpsa_free_ioaccel2_sg_chain_blocks(h
);
2211 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
2215 if (!h
->cmd_sg_list
)
2217 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2218 kfree(h
->cmd_sg_list
[i
]);
2219 h
->cmd_sg_list
[i
] = NULL
;
2221 kfree(h
->cmd_sg_list
);
2222 h
->cmd_sg_list
= NULL
;
2225 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info
*h
)
2229 if (h
->chainsize
<= 0)
2232 h
->cmd_sg_list
= kcalloc(h
->nr_cmds
, sizeof(*h
->cmd_sg_list
),
2234 if (!h
->cmd_sg_list
)
2237 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2238 h
->cmd_sg_list
[i
] = kmalloc_array(h
->chainsize
,
2239 sizeof(*h
->cmd_sg_list
[i
]),
2241 if (!h
->cmd_sg_list
[i
])
2248 hpsa_free_sg_chain_blocks(h
);
2252 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
2253 struct io_accel2_cmd
*cp
, struct CommandList
*c
)
2255 struct ioaccel2_sg_element
*chain_block
;
2259 chain_block
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
2260 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
2261 temp64
= dma_map_single(&h
->pdev
->dev
, chain_block
, chain_size
,
2263 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
2264 /* prevent subsequent unmapping */
2265 cp
->sg
->address
= 0;
2268 cp
->sg
->address
= cpu_to_le64(temp64
);
2272 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
2273 struct io_accel2_cmd
*cp
)
2275 struct ioaccel2_sg_element
*chain_sg
;
2280 temp64
= le64_to_cpu(chain_sg
->address
);
2281 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
2282 dma_unmap_single(&h
->pdev
->dev
, temp64
, chain_size
, DMA_TO_DEVICE
);
2285 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
2286 struct CommandList
*c
)
2288 struct SGDescriptor
*chain_sg
, *chain_block
;
2292 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2293 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
2294 chain_sg
->Ext
= cpu_to_le32(HPSA_SG_CHAIN
);
2295 chain_len
= sizeof(*chain_sg
) *
2296 (le16_to_cpu(c
->Header
.SGTotal
) - h
->max_cmd_sg_entries
);
2297 chain_sg
->Len
= cpu_to_le32(chain_len
);
2298 temp64
= dma_map_single(&h
->pdev
->dev
, chain_block
, chain_len
,
2300 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
2301 /* prevent subsequent unmapping */
2302 chain_sg
->Addr
= cpu_to_le64(0);
2305 chain_sg
->Addr
= cpu_to_le64(temp64
);
2309 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
2310 struct CommandList
*c
)
2312 struct SGDescriptor
*chain_sg
;
2314 if (le16_to_cpu(c
->Header
.SGTotal
) <= h
->max_cmd_sg_entries
)
2317 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2318 dma_unmap_single(&h
->pdev
->dev
, le64_to_cpu(chain_sg
->Addr
),
2319 le32_to_cpu(chain_sg
->Len
), DMA_TO_DEVICE
);
2323 /* Decode the various types of errors on ioaccel2 path.
2324 * Return 1 for any error that should generate a RAID path retry.
2325 * Return 0 for errors that don't require a RAID path retry.
2327 static int handle_ioaccel_mode2_error(struct ctlr_info
*h
,
2328 struct CommandList
*c
,
2329 struct scsi_cmnd
*cmd
,
2330 struct io_accel2_cmd
*c2
,
2331 struct hpsa_scsi_dev_t
*dev
)
2335 u32 ioaccel2_resid
= 0;
2337 switch (c2
->error_data
.serv_response
) {
2338 case IOACCEL2_SERV_RESPONSE_COMPLETE
:
2339 switch (c2
->error_data
.status
) {
2340 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD
:
2344 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND
:
2345 cmd
->result
|= SAM_STAT_CHECK_CONDITION
;
2346 if (c2
->error_data
.data_present
!=
2347 IOACCEL2_SENSE_DATA_PRESENT
) {
2348 memset(cmd
->sense_buffer
, 0,
2349 SCSI_SENSE_BUFFERSIZE
);
2352 /* copy the sense data */
2353 data_len
= c2
->error_data
.sense_data_len
;
2354 if (data_len
> SCSI_SENSE_BUFFERSIZE
)
2355 data_len
= SCSI_SENSE_BUFFERSIZE
;
2356 if (data_len
> sizeof(c2
->error_data
.sense_data_buff
))
2358 sizeof(c2
->error_data
.sense_data_buff
);
2359 memcpy(cmd
->sense_buffer
,
2360 c2
->error_data
.sense_data_buff
, data_len
);
2363 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY
:
2366 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON
:
2369 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
:
2372 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED
:
2380 case IOACCEL2_SERV_RESPONSE_FAILURE
:
2381 switch (c2
->error_data
.status
) {
2382 case IOACCEL2_STATUS_SR_IO_ERROR
:
2383 case IOACCEL2_STATUS_SR_IO_ABORTED
:
2384 case IOACCEL2_STATUS_SR_OVERRUN
:
2387 case IOACCEL2_STATUS_SR_UNDERRUN
:
2388 cmd
->result
= (DID_OK
<< 16); /* host byte */
2389 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2390 ioaccel2_resid
= get_unaligned_le32(
2391 &c2
->error_data
.resid_cnt
[0]);
2392 scsi_set_resid(cmd
, ioaccel2_resid
);
2394 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE
:
2395 case IOACCEL2_STATUS_SR_INVALID_DEVICE
:
2396 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED
:
2398 * Did an HBA disk disappear? We will eventually
2399 * get a state change event from the controller but
2400 * in the meantime, we need to tell the OS that the
2401 * HBA disk is no longer there and stop I/O
2402 * from going down. This allows the potential re-insert
2403 * of the disk to get the same device node.
2405 if (dev
->physical_device
&& dev
->expose_device
) {
2406 cmd
->result
= DID_NO_CONNECT
<< 16;
2408 h
->drv_req_rescan
= 1;
2409 dev_warn(&h
->pdev
->dev
,
2410 "%s: device is gone!\n", __func__
);
2413 * Retry by sending down the RAID path.
2414 * We will get an event from ctlr to
2415 * trigger rescan regardless.
2423 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
2425 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
2427 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
2430 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
2440 return retry
; /* retry on raid path? */
2443 static void hpsa_cmd_resolve_events(struct ctlr_info
*h
,
2444 struct CommandList
*c
)
2446 struct hpsa_scsi_dev_t
*dev
= c
->device
;
2449 * Reset c->scsi_cmd here so that the reset handler will know
2450 * this command has completed. Then, check to see if the handler is
2451 * waiting for this command, and, if so, wake it.
2453 c
->scsi_cmd
= SCSI_CMD_IDLE
;
2454 mb(); /* Declare command idle before checking for pending events. */
2456 atomic_dec(&dev
->commands_outstanding
);
2457 if (dev
->in_reset
&&
2458 atomic_read(&dev
->commands_outstanding
) <= 0)
2459 wake_up_all(&h
->event_sync_wait_queue
);
2463 static void hpsa_cmd_resolve_and_free(struct ctlr_info
*h
,
2464 struct CommandList
*c
)
2466 hpsa_cmd_resolve_events(h
, c
);
2467 cmd_tagged_free(h
, c
);
2470 static void hpsa_cmd_free_and_done(struct ctlr_info
*h
,
2471 struct CommandList
*c
, struct scsi_cmnd
*cmd
)
2473 hpsa_cmd_resolve_and_free(h
, c
);
2474 if (cmd
&& cmd
->scsi_done
)
2475 cmd
->scsi_done(cmd
);
2478 static void hpsa_retry_cmd(struct ctlr_info
*h
, struct CommandList
*c
)
2480 INIT_WORK(&c
->work
, hpsa_command_resubmit_worker
);
2481 queue_work_on(raw_smp_processor_id(), h
->resubmit_wq
, &c
->work
);
2484 static void process_ioaccel2_completion(struct ctlr_info
*h
,
2485 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
2486 struct hpsa_scsi_dev_t
*dev
)
2488 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
2490 /* check for good status */
2491 if (likely(c2
->error_data
.serv_response
== 0 &&
2492 c2
->error_data
.status
== 0)) {
2494 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2498 * Any RAID offload error results in retry which will use
2499 * the normal I/O path so the controller can handle whatever is
2502 if (is_logical_device(dev
) &&
2503 c2
->error_data
.serv_response
==
2504 IOACCEL2_SERV_RESPONSE_FAILURE
) {
2505 if (c2
->error_data
.status
==
2506 IOACCEL2_STATUS_SR_IOACCEL_DISABLED
) {
2507 hpsa_turn_off_ioaccel_for_device(dev
);
2510 if (dev
->in_reset
) {
2511 cmd
->result
= DID_RESET
<< 16;
2512 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2515 return hpsa_retry_cmd(h
, c
);
2518 if (handle_ioaccel_mode2_error(h
, c
, cmd
, c2
, dev
))
2519 return hpsa_retry_cmd(h
, c
);
2521 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2524 /* Returns 0 on success, < 0 otherwise. */
2525 static int hpsa_evaluate_tmf_status(struct ctlr_info
*h
,
2526 struct CommandList
*cp
)
2528 u8 tmf_status
= cp
->err_info
->ScsiStatus
;
2530 switch (tmf_status
) {
2531 case CISS_TMF_COMPLETE
:
2533 * CISS_TMF_COMPLETE never happens, instead,
2534 * ei->CommandStatus == 0 for this case.
2536 case CISS_TMF_SUCCESS
:
2538 case CISS_TMF_INVALID_FRAME
:
2539 case CISS_TMF_NOT_SUPPORTED
:
2540 case CISS_TMF_FAILED
:
2541 case CISS_TMF_WRONG_LUN
:
2542 case CISS_TMF_OVERLAPPED_TAG
:
2545 dev_warn(&h
->pdev
->dev
, "Unknown TMF status: 0x%02x\n",
2552 static void complete_scsi_command(struct CommandList
*cp
)
2554 struct scsi_cmnd
*cmd
;
2555 struct ctlr_info
*h
;
2556 struct ErrorInfo
*ei
;
2557 struct hpsa_scsi_dev_t
*dev
;
2558 struct io_accel2_cmd
*c2
;
2561 u8 asc
; /* additional sense code */
2562 u8 ascq
; /* additional sense code qualifier */
2563 unsigned long sense_data_size
;
2570 cmd
->result
= DID_NO_CONNECT
<< 16;
2571 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2574 dev
= cmd
->device
->hostdata
;
2576 cmd
->result
= DID_NO_CONNECT
<< 16;
2577 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2579 c2
= &h
->ioaccel2_cmd_pool
[cp
->cmdindex
];
2581 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
2582 if ((cp
->cmd_type
== CMD_SCSI
) &&
2583 (le16_to_cpu(cp
->Header
.SGTotal
) > h
->max_cmd_sg_entries
))
2584 hpsa_unmap_sg_chain_block(h
, cp
);
2586 if ((cp
->cmd_type
== CMD_IOACCEL2
) &&
2587 (c2
->sg
[0].chain_indicator
== IOACCEL2_CHAIN
))
2588 hpsa_unmap_ioaccel2_sg_chain_block(h
, c2
);
2590 cmd
->result
= (DID_OK
<< 16); /* host byte */
2591 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2593 /* SCSI command has already been cleaned up in SML */
2594 if (dev
->was_removed
) {
2595 hpsa_cmd_resolve_and_free(h
, cp
);
2599 if (cp
->cmd_type
== CMD_IOACCEL2
|| cp
->cmd_type
== CMD_IOACCEL1
) {
2600 if (dev
->physical_device
&& dev
->expose_device
&&
2602 cmd
->result
= DID_NO_CONNECT
<< 16;
2603 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2605 if (likely(cp
->phys_disk
!= NULL
))
2606 atomic_dec(&cp
->phys_disk
->ioaccel_cmds_out
);
2610 * We check for lockup status here as it may be set for
2611 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2612 * fail_all_oustanding_cmds()
2614 if (unlikely(ei
->CommandStatus
== CMD_CTLR_LOCKUP
)) {
2615 /* DID_NO_CONNECT will prevent a retry */
2616 cmd
->result
= DID_NO_CONNECT
<< 16;
2617 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2620 if (cp
->cmd_type
== CMD_IOACCEL2
)
2621 return process_ioaccel2_completion(h
, cp
, cmd
, dev
);
2623 scsi_set_resid(cmd
, ei
->ResidualCnt
);
2624 if (ei
->CommandStatus
== 0)
2625 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2627 /* For I/O accelerator commands, copy over some fields to the normal
2628 * CISS header used below for error handling.
2630 if (cp
->cmd_type
== CMD_IOACCEL1
) {
2631 struct io_accel1_cmd
*c
= &h
->ioaccel_cmd_pool
[cp
->cmdindex
];
2632 cp
->Header
.SGList
= scsi_sg_count(cmd
);
2633 cp
->Header
.SGTotal
= cpu_to_le16(cp
->Header
.SGList
);
2634 cp
->Request
.CDBLen
= le16_to_cpu(c
->io_flags
) &
2635 IOACCEL1_IOFLAGS_CDBLEN_MASK
;
2636 cp
->Header
.tag
= c
->tag
;
2637 memcpy(cp
->Header
.LUN
.LunAddrBytes
, c
->CISS_LUN
, 8);
2638 memcpy(cp
->Request
.CDB
, c
->CDB
, cp
->Request
.CDBLen
);
2640 /* Any RAID offload error results in retry which will use
2641 * the normal I/O path so the controller can handle whatever's
2644 if (is_logical_device(dev
)) {
2645 if (ei
->CommandStatus
== CMD_IOACCEL_DISABLED
)
2646 dev
->offload_enabled
= 0;
2647 return hpsa_retry_cmd(h
, cp
);
2651 /* an error has occurred */
2652 switch (ei
->CommandStatus
) {
2654 case CMD_TARGET_STATUS
:
2655 cmd
->result
|= ei
->ScsiStatus
;
2656 /* copy the sense data */
2657 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
2658 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
2660 sense_data_size
= sizeof(ei
->SenseInfo
);
2661 if (ei
->SenseLen
< sense_data_size
)
2662 sense_data_size
= ei
->SenseLen
;
2663 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
2665 decode_sense_data(ei
->SenseInfo
, sense_data_size
,
2666 &sense_key
, &asc
, &ascq
);
2667 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
2668 switch (sense_key
) {
2669 case ABORTED_COMMAND
:
2670 cmd
->result
|= DID_SOFT_ERROR
<< 16;
2672 case UNIT_ATTENTION
:
2673 if (asc
== 0x3F && ascq
== 0x0E)
2674 h
->drv_req_rescan
= 1;
2676 case ILLEGAL_REQUEST
:
2677 if (asc
== 0x25 && ascq
== 0x00) {
2679 cmd
->result
= DID_NO_CONNECT
<< 16;
2685 /* Problem was not a check condition
2686 * Pass it up to the upper layers...
2688 if (ei
->ScsiStatus
) {
2689 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
2690 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2691 "Returning result: 0x%x\n",
2693 sense_key
, asc
, ascq
,
2695 } else { /* scsi status is zero??? How??? */
2696 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
2697 "Returning no connection.\n", cp
),
2699 /* Ordinarily, this case should never happen,
2700 * but there is a bug in some released firmware
2701 * revisions that allows it to happen if, for
2702 * example, a 4100 backplane loses power and
2703 * the tape drive is in it. We assume that
2704 * it's a fatal error of some kind because we
2705 * can't show that it wasn't. We will make it
2706 * look like selection timeout since that is
2707 * the most common reason for this to occur,
2708 * and it's severe enough.
2711 cmd
->result
= DID_NO_CONNECT
<< 16;
2715 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2717 case CMD_DATA_OVERRUN
:
2718 dev_warn(&h
->pdev
->dev
,
2719 "CDB %16phN data overrun\n", cp
->Request
.CDB
);
2722 /* print_bytes(cp, sizeof(*cp), 1, 0);
2724 /* We get CMD_INVALID if you address a non-existent device
2725 * instead of a selection timeout (no response). You will
2726 * see this if you yank out a drive, then try to access it.
2727 * This is kind of a shame because it means that any other
2728 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2729 * missing target. */
2730 cmd
->result
= DID_NO_CONNECT
<< 16;
2733 case CMD_PROTOCOL_ERR
:
2734 cmd
->result
= DID_ERROR
<< 16;
2735 dev_warn(&h
->pdev
->dev
, "CDB %16phN : protocol error\n",
2738 case CMD_HARDWARE_ERR
:
2739 cmd
->result
= DID_ERROR
<< 16;
2740 dev_warn(&h
->pdev
->dev
, "CDB %16phN : hardware error\n",
2743 case CMD_CONNECTION_LOST
:
2744 cmd
->result
= DID_ERROR
<< 16;
2745 dev_warn(&h
->pdev
->dev
, "CDB %16phN : connection lost\n",
2749 cmd
->result
= DID_ABORT
<< 16;
2751 case CMD_ABORT_FAILED
:
2752 cmd
->result
= DID_ERROR
<< 16;
2753 dev_warn(&h
->pdev
->dev
, "CDB %16phN : abort failed\n",
2756 case CMD_UNSOLICITED_ABORT
:
2757 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
2758 dev_warn(&h
->pdev
->dev
, "CDB %16phN : unsolicited abort\n",
2762 cmd
->result
= DID_TIME_OUT
<< 16;
2763 dev_warn(&h
->pdev
->dev
, "CDB %16phN timed out\n",
2766 case CMD_UNABORTABLE
:
2767 cmd
->result
= DID_ERROR
<< 16;
2768 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
2770 case CMD_TMF_STATUS
:
2771 if (hpsa_evaluate_tmf_status(h
, cp
)) /* TMF failed? */
2772 cmd
->result
= DID_ERROR
<< 16;
2774 case CMD_IOACCEL_DISABLED
:
2775 /* This only handles the direct pass-through case since RAID
2776 * offload is handled above. Just attempt a retry.
2778 cmd
->result
= DID_SOFT_ERROR
<< 16;
2779 dev_warn(&h
->pdev
->dev
,
2780 "cp %p had HP SSD Smart Path error\n", cp
);
2783 cmd
->result
= DID_ERROR
<< 16;
2784 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
2785 cp
, ei
->CommandStatus
);
2788 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2791 static void hpsa_pci_unmap(struct pci_dev
*pdev
, struct CommandList
*c
,
2792 int sg_used
, enum dma_data_direction data_direction
)
2796 for (i
= 0; i
< sg_used
; i
++)
2797 dma_unmap_single(&pdev
->dev
, le64_to_cpu(c
->SG
[i
].Addr
),
2798 le32_to_cpu(c
->SG
[i
].Len
),
2802 static int hpsa_map_one(struct pci_dev
*pdev
,
2803 struct CommandList
*cp
,
2806 enum dma_data_direction data_direction
)
2810 if (buflen
== 0 || data_direction
== DMA_NONE
) {
2811 cp
->Header
.SGList
= 0;
2812 cp
->Header
.SGTotal
= cpu_to_le16(0);
2816 addr64
= dma_map_single(&pdev
->dev
, buf
, buflen
, data_direction
);
2817 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
2818 /* Prevent subsequent unmap of something never mapped */
2819 cp
->Header
.SGList
= 0;
2820 cp
->Header
.SGTotal
= cpu_to_le16(0);
2823 cp
->SG
[0].Addr
= cpu_to_le64(addr64
);
2824 cp
->SG
[0].Len
= cpu_to_le32(buflen
);
2825 cp
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* we are not chaining */
2826 cp
->Header
.SGList
= 1; /* no. SGs contig in this cmd */
2827 cp
->Header
.SGTotal
= cpu_to_le16(1); /* total sgs in cmd list */
2831 #define NO_TIMEOUT ((unsigned long) -1)
2832 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2833 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
2834 struct CommandList
*c
, int reply_queue
, unsigned long timeout_msecs
)
2836 DECLARE_COMPLETION_ONSTACK(wait
);
2839 __enqueue_cmd_and_start_io(h
, c
, reply_queue
);
2840 if (timeout_msecs
== NO_TIMEOUT
) {
2841 /* TODO: get rid of this no-timeout thing */
2842 wait_for_completion_io(&wait
);
2845 if (!wait_for_completion_io_timeout(&wait
,
2846 msecs_to_jiffies(timeout_msecs
))) {
2847 dev_warn(&h
->pdev
->dev
, "Command timed out.\n");
2853 static int hpsa_scsi_do_simple_cmd(struct ctlr_info
*h
, struct CommandList
*c
,
2854 int reply_queue
, unsigned long timeout_msecs
)
2856 if (unlikely(lockup_detected(h
))) {
2857 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
2860 return hpsa_scsi_do_simple_cmd_core(h
, c
, reply_queue
, timeout_msecs
);
2863 static u32
lockup_detected(struct ctlr_info
*h
)
2866 u32 rc
, *lockup_detected
;
2869 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
2870 rc
= *lockup_detected
;
2875 #define MAX_DRIVER_CMD_RETRIES 25
2876 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
2877 struct CommandList
*c
, enum dma_data_direction data_direction
,
2878 unsigned long timeout_msecs
)
2880 int backoff_time
= 10, retry_count
= 0;
2884 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2885 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
2890 if (retry_count
> 3) {
2891 msleep(backoff_time
);
2892 if (backoff_time
< 1000)
2895 } while ((check_for_unit_attention(h
, c
) ||
2896 check_for_busy(h
, c
)) &&
2897 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
2898 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
2899 if (retry_count
> MAX_DRIVER_CMD_RETRIES
)
2904 static void hpsa_print_cmd(struct ctlr_info
*h
, char *txt
,
2905 struct CommandList
*c
)
2907 const u8
*cdb
= c
->Request
.CDB
;
2908 const u8
*lun
= c
->Header
.LUN
.LunAddrBytes
;
2910 dev_warn(&h
->pdev
->dev
, "%s: LUN:%8phN CDB:%16phN\n",
2914 static void hpsa_scsi_interpret_error(struct ctlr_info
*h
,
2915 struct CommandList
*cp
)
2917 const struct ErrorInfo
*ei
= cp
->err_info
;
2918 struct device
*d
= &cp
->h
->pdev
->dev
;
2919 u8 sense_key
, asc
, ascq
;
2922 switch (ei
->CommandStatus
) {
2923 case CMD_TARGET_STATUS
:
2924 if (ei
->SenseLen
> sizeof(ei
->SenseInfo
))
2925 sense_len
= sizeof(ei
->SenseInfo
);
2927 sense_len
= ei
->SenseLen
;
2928 decode_sense_data(ei
->SenseInfo
, sense_len
,
2929 &sense_key
, &asc
, &ascq
);
2930 hpsa_print_cmd(h
, "SCSI status", cp
);
2931 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
)
2932 dev_warn(d
, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2933 sense_key
, asc
, ascq
);
2935 dev_warn(d
, "SCSI Status = 0x%02x\n", ei
->ScsiStatus
);
2936 if (ei
->ScsiStatus
== 0)
2937 dev_warn(d
, "SCSI status is abnormally zero. "
2938 "(probably indicates selection timeout "
2939 "reported incorrectly due to a known "
2940 "firmware bug, circa July, 2001.)\n");
2942 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2944 case CMD_DATA_OVERRUN
:
2945 hpsa_print_cmd(h
, "overrun condition", cp
);
2948 /* controller unfortunately reports SCSI passthru's
2949 * to non-existent targets as invalid commands.
2951 hpsa_print_cmd(h
, "invalid command", cp
);
2952 dev_warn(d
, "probably means device no longer present\n");
2955 case CMD_PROTOCOL_ERR
:
2956 hpsa_print_cmd(h
, "protocol error", cp
);
2958 case CMD_HARDWARE_ERR
:
2959 hpsa_print_cmd(h
, "hardware error", cp
);
2961 case CMD_CONNECTION_LOST
:
2962 hpsa_print_cmd(h
, "connection lost", cp
);
2965 hpsa_print_cmd(h
, "aborted", cp
);
2967 case CMD_ABORT_FAILED
:
2968 hpsa_print_cmd(h
, "abort failed", cp
);
2970 case CMD_UNSOLICITED_ABORT
:
2971 hpsa_print_cmd(h
, "unsolicited abort", cp
);
2974 hpsa_print_cmd(h
, "timed out", cp
);
2976 case CMD_UNABORTABLE
:
2977 hpsa_print_cmd(h
, "unabortable", cp
);
2979 case CMD_CTLR_LOCKUP
:
2980 hpsa_print_cmd(h
, "controller lockup detected", cp
);
2983 hpsa_print_cmd(h
, "unknown status", cp
);
2984 dev_warn(d
, "Unknown command status %x\n",
2989 static int hpsa_do_receive_diagnostic(struct ctlr_info
*h
, u8
*scsi3addr
,
2990 u8 page
, u8
*buf
, size_t bufsize
)
2993 struct CommandList
*c
;
2994 struct ErrorInfo
*ei
;
2997 if (fill_cmd(c
, RECEIVE_DIAGNOSTIC
, h
, buf
, bufsize
,
2998 page
, scsi3addr
, TYPE_CMD
)) {
3002 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3007 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3008 hpsa_scsi_interpret_error(h
, c
);
3016 static u64
hpsa_get_enclosure_logical_identifier(struct ctlr_info
*h
,
3023 buf
= kzalloc(1024, GFP_KERNEL
);
3027 rc
= hpsa_do_receive_diagnostic(h
, scsi3addr
, RECEIVE_DIAGNOSTIC
,
3033 sa
= get_unaligned_be64(buf
+12);
3040 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3041 u16 page
, unsigned char *buf
,
3042 unsigned char bufsize
)
3045 struct CommandList
*c
;
3046 struct ErrorInfo
*ei
;
3050 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
3051 page
, scsi3addr
, TYPE_CMD
)) {
3055 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3060 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3061 hpsa_scsi_interpret_error(h
, c
);
3069 static int hpsa_send_reset(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*dev
,
3070 u8 reset_type
, int reply_queue
)
3073 struct CommandList
*c
;
3074 struct ErrorInfo
*ei
;
3079 /* fill_cmd can't fail here, no data buffer to map. */
3080 (void) fill_cmd(c
, reset_type
, h
, NULL
, 0, 0, dev
->scsi3addr
, TYPE_MSG
);
3081 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
3083 dev_warn(&h
->pdev
->dev
, "Failed to send reset command\n");
3086 /* no unmap needed here because no data xfer. */
3089 if (ei
->CommandStatus
!= 0) {
3090 hpsa_scsi_interpret_error(h
, c
);
3098 static bool hpsa_cmd_dev_match(struct ctlr_info
*h
, struct CommandList
*c
,
3099 struct hpsa_scsi_dev_t
*dev
,
3100 unsigned char *scsi3addr
)
3104 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
3105 struct hpsa_tmf_struct
*ac
= (struct hpsa_tmf_struct
*) c2
;
3107 if (hpsa_is_cmd_idle(c
))
3110 switch (c
->cmd_type
) {
3112 case CMD_IOCTL_PEND
:
3113 match
= !memcmp(scsi3addr
, &c
->Header
.LUN
.LunAddrBytes
,
3114 sizeof(c
->Header
.LUN
.LunAddrBytes
));
3119 if (c
->phys_disk
== dev
) {
3120 /* HBA mode match */
3123 /* Possible RAID mode -- check each phys dev. */
3124 /* FIXME: Do we need to take out a lock here? If
3125 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3127 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
3128 /* FIXME: an alternate test might be
3130 * match = dev->phys_disk[i]->ioaccel_handle
3131 * == c2->scsi_nexus; */
3132 match
= dev
->phys_disk
[i
] == c
->phys_disk
;
3138 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
3139 match
= dev
->phys_disk
[i
]->ioaccel_handle
==
3140 le32_to_cpu(ac
->it_nexus
);
3144 case 0: /* The command is in the middle of being initialized. */
3149 dev_err(&h
->pdev
->dev
, "unexpected cmd_type: %d\n",
3157 static int hpsa_do_reset(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*dev
,
3158 u8 reset_type
, int reply_queue
)
3162 /* We can really only handle one reset at a time */
3163 if (mutex_lock_interruptible(&h
->reset_mutex
) == -EINTR
) {
3164 dev_warn(&h
->pdev
->dev
, "concurrent reset wait interrupted.\n");
3168 rc
= hpsa_send_reset(h
, dev
, reset_type
, reply_queue
);
3170 /* incremented by sending the reset request */
3171 atomic_dec(&dev
->commands_outstanding
);
3172 wait_event(h
->event_sync_wait_queue
,
3173 atomic_read(&dev
->commands_outstanding
) <= 0 ||
3174 lockup_detected(h
));
3177 if (unlikely(lockup_detected(h
))) {
3178 dev_warn(&h
->pdev
->dev
,
3179 "Controller lockup detected during reset wait\n");
3184 rc
= wait_for_device_to_become_ready(h
, dev
->scsi3addr
, 0);
3186 mutex_unlock(&h
->reset_mutex
);
3190 static void hpsa_get_raid_level(struct ctlr_info
*h
,
3191 unsigned char *scsi3addr
, unsigned char *raid_level
)
3196 *raid_level
= RAID_UNKNOWN
;
3197 buf
= kzalloc(64, GFP_KERNEL
);
3201 if (!hpsa_vpd_page_supported(h
, scsi3addr
,
3202 HPSA_VPD_LV_DEVICE_GEOMETRY
))
3205 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
|
3206 HPSA_VPD_LV_DEVICE_GEOMETRY
, buf
, 64);
3209 *raid_level
= buf
[8];
3210 if (*raid_level
> RAID_UNKNOWN
)
3211 *raid_level
= RAID_UNKNOWN
;
3217 #define HPSA_MAP_DEBUG
3218 #ifdef HPSA_MAP_DEBUG
3219 static void hpsa_debug_map_buff(struct ctlr_info
*h
, int rc
,
3220 struct raid_map_data
*map_buff
)
3222 struct raid_map_disk_data
*dd
= &map_buff
->data
[0];
3224 u16 map_cnt
, row_cnt
, disks_per_row
;
3229 /* Show details only if debugging has been activated. */
3230 if (h
->raid_offload_debug
< 2)
3233 dev_info(&h
->pdev
->dev
, "structure_size = %u\n",
3234 le32_to_cpu(map_buff
->structure_size
));
3235 dev_info(&h
->pdev
->dev
, "volume_blk_size = %u\n",
3236 le32_to_cpu(map_buff
->volume_blk_size
));
3237 dev_info(&h
->pdev
->dev
, "volume_blk_cnt = 0x%llx\n",
3238 le64_to_cpu(map_buff
->volume_blk_cnt
));
3239 dev_info(&h
->pdev
->dev
, "physicalBlockShift = %u\n",
3240 map_buff
->phys_blk_shift
);
3241 dev_info(&h
->pdev
->dev
, "parity_rotation_shift = %u\n",
3242 map_buff
->parity_rotation_shift
);
3243 dev_info(&h
->pdev
->dev
, "strip_size = %u\n",
3244 le16_to_cpu(map_buff
->strip_size
));
3245 dev_info(&h
->pdev
->dev
, "disk_starting_blk = 0x%llx\n",
3246 le64_to_cpu(map_buff
->disk_starting_blk
));
3247 dev_info(&h
->pdev
->dev
, "disk_blk_cnt = 0x%llx\n",
3248 le64_to_cpu(map_buff
->disk_blk_cnt
));
3249 dev_info(&h
->pdev
->dev
, "data_disks_per_row = %u\n",
3250 le16_to_cpu(map_buff
->data_disks_per_row
));
3251 dev_info(&h
->pdev
->dev
, "metadata_disks_per_row = %u\n",
3252 le16_to_cpu(map_buff
->metadata_disks_per_row
));
3253 dev_info(&h
->pdev
->dev
, "row_cnt = %u\n",
3254 le16_to_cpu(map_buff
->row_cnt
));
3255 dev_info(&h
->pdev
->dev
, "layout_map_count = %u\n",
3256 le16_to_cpu(map_buff
->layout_map_count
));
3257 dev_info(&h
->pdev
->dev
, "flags = 0x%x\n",
3258 le16_to_cpu(map_buff
->flags
));
3259 dev_info(&h
->pdev
->dev
, "encryption = %s\n",
3260 le16_to_cpu(map_buff
->flags
) &
3261 RAID_MAP_FLAG_ENCRYPT_ON
? "ON" : "OFF");
3262 dev_info(&h
->pdev
->dev
, "dekindex = %u\n",
3263 le16_to_cpu(map_buff
->dekindex
));
3264 map_cnt
= le16_to_cpu(map_buff
->layout_map_count
);
3265 for (map
= 0; map
< map_cnt
; map
++) {
3266 dev_info(&h
->pdev
->dev
, "Map%u:\n", map
);
3267 row_cnt
= le16_to_cpu(map_buff
->row_cnt
);
3268 for (row
= 0; row
< row_cnt
; row
++) {
3269 dev_info(&h
->pdev
->dev
, " Row%u:\n", row
);
3271 le16_to_cpu(map_buff
->data_disks_per_row
);
3272 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
3273 dev_info(&h
->pdev
->dev
,
3274 " D%02u: h=0x%04x xor=%u,%u\n",
3275 col
, dd
->ioaccel_handle
,
3276 dd
->xor_mult
[0], dd
->xor_mult
[1]);
3278 le16_to_cpu(map_buff
->metadata_disks_per_row
);
3279 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
3280 dev_info(&h
->pdev
->dev
,
3281 " M%02u: h=0x%04x xor=%u,%u\n",
3282 col
, dd
->ioaccel_handle
,
3283 dd
->xor_mult
[0], dd
->xor_mult
[1]);
3288 static void hpsa_debug_map_buff(__attribute__((unused
)) struct ctlr_info
*h
,
3289 __attribute__((unused
)) int rc
,
3290 __attribute__((unused
)) struct raid_map_data
*map_buff
)
3295 static int hpsa_get_raid_map(struct ctlr_info
*h
,
3296 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
3299 struct CommandList
*c
;
3300 struct ErrorInfo
*ei
;
3304 if (fill_cmd(c
, HPSA_GET_RAID_MAP
, h
, &this_device
->raid_map
,
3305 sizeof(this_device
->raid_map
), 0,
3306 scsi3addr
, TYPE_CMD
)) {
3307 dev_warn(&h
->pdev
->dev
, "hpsa_get_raid_map fill_cmd failed\n");
3311 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3316 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3317 hpsa_scsi_interpret_error(h
, c
);
3323 /* @todo in the future, dynamically allocate RAID map memory */
3324 if (le32_to_cpu(this_device
->raid_map
.structure_size
) >
3325 sizeof(this_device
->raid_map
)) {
3326 dev_warn(&h
->pdev
->dev
, "RAID map size is too large!\n");
3329 hpsa_debug_map_buff(h
, rc
, &this_device
->raid_map
);
3336 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info
*h
,
3337 unsigned char scsi3addr
[], u16 bmic_device_index
,
3338 struct bmic_sense_subsystem_info
*buf
, size_t bufsize
)
3341 struct CommandList
*c
;
3342 struct ErrorInfo
*ei
;
3346 rc
= fill_cmd(c
, BMIC_SENSE_SUBSYSTEM_INFORMATION
, h
, buf
, bufsize
,
3347 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3351 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
3352 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
3354 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3359 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3360 hpsa_scsi_interpret_error(h
, c
);
3368 static int hpsa_bmic_id_controller(struct ctlr_info
*h
,
3369 struct bmic_identify_controller
*buf
, size_t bufsize
)
3372 struct CommandList
*c
;
3373 struct ErrorInfo
*ei
;
3377 rc
= fill_cmd(c
, BMIC_IDENTIFY_CONTROLLER
, h
, buf
, bufsize
,
3378 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3382 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3387 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3388 hpsa_scsi_interpret_error(h
, c
);
3396 static int hpsa_bmic_id_physical_device(struct ctlr_info
*h
,
3397 unsigned char scsi3addr
[], u16 bmic_device_index
,
3398 struct bmic_identify_physical_device
*buf
, size_t bufsize
)
3401 struct CommandList
*c
;
3402 struct ErrorInfo
*ei
;
3405 rc
= fill_cmd(c
, BMIC_IDENTIFY_PHYSICAL_DEVICE
, h
, buf
, bufsize
,
3406 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3410 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
3411 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
3413 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3416 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3417 hpsa_scsi_interpret_error(h
, c
);
3427 * get enclosure information
3428 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3429 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3430 * Uses id_physical_device to determine the box_index.
3432 static void hpsa_get_enclosure_info(struct ctlr_info
*h
,
3433 unsigned char *scsi3addr
,
3434 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
3435 struct hpsa_scsi_dev_t
*encl_dev
)
3438 struct CommandList
*c
= NULL
;
3439 struct ErrorInfo
*ei
= NULL
;
3440 struct bmic_sense_storage_box_params
*bssbp
= NULL
;
3441 struct bmic_identify_physical_device
*id_phys
= NULL
;
3442 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
3443 u16 bmic_device_index
= 0;
3446 hpsa_get_enclosure_logical_identifier(h
, scsi3addr
);
3448 bmic_device_index
= GET_BMIC_DRIVE_NUMBER(&rle
->lunid
[0]);
3450 if (encl_dev
->target
== -1 || encl_dev
->lun
== -1) {
3455 if (bmic_device_index
== 0xFF00 || MASKED_DEVICE(&rle
->lunid
[0])) {
3460 bssbp
= kzalloc(sizeof(*bssbp
), GFP_KERNEL
);
3464 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
3468 rc
= hpsa_bmic_id_physical_device(h
, scsi3addr
, bmic_device_index
,
3469 id_phys
, sizeof(*id_phys
));
3471 dev_warn(&h
->pdev
->dev
, "%s: id_phys failed %d bdi[0x%x]\n",
3472 __func__
, encl_dev
->external
, bmic_device_index
);
3478 rc
= fill_cmd(c
, BMIC_SENSE_STORAGE_BOX_PARAMS
, h
, bssbp
,
3479 sizeof(*bssbp
), 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3484 if (id_phys
->phys_connector
[1] == 'E')
3485 c
->Request
.CDB
[5] = id_phys
->box_index
;
3487 c
->Request
.CDB
[5] = 0;
3489 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3495 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3500 encl_dev
->box
[id_phys
->active_path_number
] = bssbp
->phys_box_on_port
;
3501 memcpy(&encl_dev
->phys_connector
[id_phys
->active_path_number
],
3502 bssbp
->phys_connector
, sizeof(bssbp
->phys_connector
));
3513 hpsa_show_dev_msg(KERN_INFO
, h
, encl_dev
,
3514 "Error, could not get enclosure information");
3517 static u64
hpsa_get_sas_address_from_report_physical(struct ctlr_info
*h
,
3518 unsigned char *scsi3addr
)
3520 struct ReportExtendedLUNdata
*physdev
;
3525 physdev
= kzalloc(sizeof(*physdev
), GFP_KERNEL
);
3529 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
3530 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
3534 nphysicals
= get_unaligned_be32(physdev
->LUNListLength
) / 24;
3536 for (i
= 0; i
< nphysicals
; i
++)
3537 if (!memcmp(&physdev
->LUN
[i
].lunid
[0], scsi3addr
, 8)) {
3538 sa
= get_unaligned_be64(&physdev
->LUN
[i
].wwid
[0]);
3547 static void hpsa_get_sas_address(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3548 struct hpsa_scsi_dev_t
*dev
)
3553 if (is_hba_lunid(scsi3addr
)) {
3554 struct bmic_sense_subsystem_info
*ssi
;
3556 ssi
= kzalloc(sizeof(*ssi
), GFP_KERNEL
);
3560 rc
= hpsa_bmic_sense_subsystem_information(h
,
3561 scsi3addr
, 0, ssi
, sizeof(*ssi
));
3563 sa
= get_unaligned_be64(ssi
->primary_world_wide_id
);
3564 h
->sas_address
= sa
;
3569 sa
= hpsa_get_sas_address_from_report_physical(h
, scsi3addr
);
3571 dev
->sas_address
= sa
;
3574 static void hpsa_ext_ctrl_present(struct ctlr_info
*h
,
3575 struct ReportExtendedLUNdata
*physdev
)
3580 if (h
->discovery_polling
)
3583 nphysicals
= (get_unaligned_be32(physdev
->LUNListLength
) / 24) + 1;
3585 for (i
= 0; i
< nphysicals
; i
++) {
3586 if (physdev
->LUN
[i
].device_type
==
3587 BMIC_DEVICE_TYPE_CONTROLLER
3588 && !is_hba_lunid(physdev
->LUN
[i
].lunid
)) {
3589 dev_info(&h
->pdev
->dev
,
3590 "External controller present, activate discovery polling and disable rld caching\n");
3591 hpsa_disable_rld_caching(h
);
3592 h
->discovery_polling
= 1;
3598 /* Get a device id from inquiry page 0x83 */
3599 static bool hpsa_vpd_page_supported(struct ctlr_info
*h
,
3600 unsigned char scsi3addr
[], u8 page
)
3605 unsigned char *buf
, bufsize
;
3607 buf
= kzalloc(256, GFP_KERNEL
);
3611 /* Get the size of the page list first */
3612 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3613 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3614 buf
, HPSA_VPD_HEADER_SZ
);
3616 goto exit_unsupported
;
3618 if ((pages
+ HPSA_VPD_HEADER_SZ
) <= 255)
3619 bufsize
= pages
+ HPSA_VPD_HEADER_SZ
;
3623 /* Get the whole VPD page list */
3624 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3625 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3628 goto exit_unsupported
;
3631 for (i
= 1; i
<= pages
; i
++)
3632 if (buf
[3 + i
] == page
)
3633 goto exit_supported
;
3643 * Called during a scan operation.
3644 * Sets ioaccel status on the new device list, not the existing device list
3646 * The device list used during I/O will be updated later in
3647 * adjust_hpsa_scsi_table.
3649 static void hpsa_get_ioaccel_status(struct ctlr_info
*h
,
3650 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
3656 this_device
->offload_config
= 0;
3657 this_device
->offload_enabled
= 0;
3658 this_device
->offload_to_be_enabled
= 0;
3660 buf
= kzalloc(64, GFP_KERNEL
);
3663 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_IOACCEL_STATUS
))
3665 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3666 VPD_PAGE
| HPSA_VPD_LV_IOACCEL_STATUS
, buf
, 64);
3670 #define IOACCEL_STATUS_BYTE 4
3671 #define OFFLOAD_CONFIGURED_BIT 0x01
3672 #define OFFLOAD_ENABLED_BIT 0x02
3673 ioaccel_status
= buf
[IOACCEL_STATUS_BYTE
];
3674 this_device
->offload_config
=
3675 !!(ioaccel_status
& OFFLOAD_CONFIGURED_BIT
);
3676 if (this_device
->offload_config
) {
3677 bool offload_enabled
=
3678 !!(ioaccel_status
& OFFLOAD_ENABLED_BIT
);
3680 * Check to see if offload can be enabled.
3682 if (offload_enabled
) {
3683 rc
= hpsa_get_raid_map(h
, scsi3addr
, this_device
);
3684 if (rc
) /* could not load raid_map */
3686 this_device
->offload_to_be_enabled
= 1;
3695 /* Get the device id from inquiry page 0x83 */
3696 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3697 unsigned char *device_id
, int index
, int buflen
)
3702 /* Does controller have VPD for device id? */
3703 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_DEVICE_ID
))
3704 return 1; /* not supported */
3706 buf
= kzalloc(64, GFP_KERNEL
);
3710 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
|
3711 HPSA_VPD_LV_DEVICE_ID
, buf
, 64);
3715 memcpy(device_id
, &buf
[8], buflen
);
3720 return rc
; /*0 - got id, otherwise, didn't */
3723 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
3724 void *buf
, int bufsize
,
3725 int extended_response
)
3728 struct CommandList
*c
;
3729 unsigned char scsi3addr
[8];
3730 struct ErrorInfo
*ei
;
3734 /* address the controller */
3735 memset(scsi3addr
, 0, sizeof(scsi3addr
));
3736 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
3737 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
3741 if (extended_response
)
3742 c
->Request
.CDB
[1] = extended_response
;
3743 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
3748 if (ei
->CommandStatus
!= 0 &&
3749 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3750 hpsa_scsi_interpret_error(h
, c
);
3753 struct ReportLUNdata
*rld
= buf
;
3755 if (rld
->extended_response_flag
!= extended_response
) {
3756 if (!h
->legacy_board
) {
3757 dev_err(&h
->pdev
->dev
,
3758 "report luns requested format %u, got %u\n",
3760 rld
->extended_response_flag
);
3771 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
3772 struct ReportExtendedLUNdata
*buf
, int bufsize
)
3775 struct ReportLUNdata
*lbuf
;
3777 rc
= hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
,
3778 HPSA_REPORT_PHYS_EXTENDED
);
3779 if (!rc
|| rc
!= -EOPNOTSUPP
)
3782 /* REPORT PHYS EXTENDED is not supported */
3783 lbuf
= kzalloc(sizeof(*lbuf
), GFP_KERNEL
);
3787 rc
= hpsa_scsi_do_report_luns(h
, 0, lbuf
, sizeof(*lbuf
), 0);
3792 /* Copy ReportLUNdata header */
3793 memcpy(buf
, lbuf
, 8);
3794 nphys
= be32_to_cpu(*((__be32
*)lbuf
->LUNListLength
)) / 8;
3795 for (i
= 0; i
< nphys
; i
++)
3796 memcpy(buf
->LUN
[i
].lunid
, lbuf
->LUN
[i
], 8);
3802 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
3803 struct ReportLUNdata
*buf
, int bufsize
)
3805 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
3808 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
3809 int bus
, int target
, int lun
)
3812 device
->target
= target
;
3816 /* Use VPD inquiry to get details of volume status */
3817 static int hpsa_get_volume_status(struct ctlr_info
*h
,
3818 unsigned char scsi3addr
[])
3825 buf
= kzalloc(64, GFP_KERNEL
);
3827 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3829 /* Does controller have VPD for logical volume status? */
3830 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_STATUS
))
3833 /* Get the size of the VPD return buffer */
3834 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3835 buf
, HPSA_VPD_HEADER_SZ
);
3840 /* Now get the whole VPD buffer */
3841 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3842 buf
, size
+ HPSA_VPD_HEADER_SZ
);
3845 status
= buf
[4]; /* status byte */
3851 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3854 /* Determine offline status of a volume.
3857 * 0xff (offline for unknown reasons)
3858 * # (integer code indicating one of several NOT READY states
3859 * describing why a volume is to be kept offline)
3861 static unsigned char hpsa_volume_offline(struct ctlr_info
*h
,
3862 unsigned char scsi3addr
[])
3864 struct CommandList
*c
;
3865 unsigned char *sense
;
3866 u8 sense_key
, asc
, ascq
;
3871 #define ASC_LUN_NOT_READY 0x04
3872 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3873 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3877 (void) fill_cmd(c
, TEST_UNIT_READY
, h
, NULL
, 0, 0, scsi3addr
, TYPE_CMD
);
3878 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
3882 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3884 sense
= c
->err_info
->SenseInfo
;
3885 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
3886 sense_len
= sizeof(c
->err_info
->SenseInfo
);
3888 sense_len
= c
->err_info
->SenseLen
;
3889 decode_sense_data(sense
, sense_len
, &sense_key
, &asc
, &ascq
);
3890 cmd_status
= c
->err_info
->CommandStatus
;
3891 scsi_status
= c
->err_info
->ScsiStatus
;
3894 /* Determine the reason for not ready state */
3895 ldstat
= hpsa_get_volume_status(h
, scsi3addr
);
3897 /* Keep volume offline in certain cases: */
3899 case HPSA_LV_FAILED
:
3900 case HPSA_LV_UNDERGOING_ERASE
:
3901 case HPSA_LV_NOT_AVAILABLE
:
3902 case HPSA_LV_UNDERGOING_RPI
:
3903 case HPSA_LV_PENDING_RPI
:
3904 case HPSA_LV_ENCRYPTED_NO_KEY
:
3905 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
3906 case HPSA_LV_UNDERGOING_ENCRYPTION
:
3907 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
3908 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
3910 case HPSA_VPD_LV_STATUS_UNSUPPORTED
:
3911 /* If VPD status page isn't available,
3912 * use ASC/ASCQ to determine state
3914 if ((ascq
== ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS
) ||
3915 (ascq
== ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ
))
3924 static int hpsa_update_device_info(struct ctlr_info
*h
,
3925 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
3926 unsigned char *is_OBDR_device
)
3929 #define OBDR_SIG_OFFSET 43
3930 #define OBDR_TAPE_SIG "$DR-10"
3931 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3932 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3934 unsigned char *inq_buff
;
3935 unsigned char *obdr_sig
;
3938 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
3944 /* Do an inquiry to the device to see what it is. */
3945 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
3946 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
3947 dev_err(&h
->pdev
->dev
,
3948 "%s: inquiry failed, device will be skipped.\n",
3950 rc
= HPSA_INQUIRY_FAILED
;
3954 scsi_sanitize_inquiry_string(&inq_buff
[8], 8);
3955 scsi_sanitize_inquiry_string(&inq_buff
[16], 16);
3957 this_device
->devtype
= (inq_buff
[0] & 0x1f);
3958 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
3959 memcpy(this_device
->vendor
, &inq_buff
[8],
3960 sizeof(this_device
->vendor
));
3961 memcpy(this_device
->model
, &inq_buff
[16],
3962 sizeof(this_device
->model
));
3963 this_device
->rev
= inq_buff
[2];
3964 memset(this_device
->device_id
, 0,
3965 sizeof(this_device
->device_id
));
3966 if (hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
, 8,
3967 sizeof(this_device
->device_id
)) < 0) {
3968 dev_err(&h
->pdev
->dev
,
3969 "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
3971 h
->scsi_host
->host_no
,
3972 this_device
->bus
, this_device
->target
,
3974 scsi_device_type(this_device
->devtype
),
3975 this_device
->model
);
3976 rc
= HPSA_LV_FAILED
;
3980 if ((this_device
->devtype
== TYPE_DISK
||
3981 this_device
->devtype
== TYPE_ZBC
) &&
3982 is_logical_dev_addr_mode(scsi3addr
)) {
3983 unsigned char volume_offline
;
3985 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
3986 if (h
->fw_support
& MISC_FW_RAID_OFFLOAD_BASIC
)
3987 hpsa_get_ioaccel_status(h
, scsi3addr
, this_device
);
3988 volume_offline
= hpsa_volume_offline(h
, scsi3addr
);
3989 if (volume_offline
== HPSA_VPD_LV_STATUS_UNSUPPORTED
&&
3992 * Legacy boards might not support volume status
3994 dev_info(&h
->pdev
->dev
,
3995 "C0:T%d:L%d Volume status not available, assuming online.\n",
3996 this_device
->target
, this_device
->lun
);
3999 this_device
->volume_offline
= volume_offline
;
4000 if (volume_offline
== HPSA_LV_FAILED
) {
4001 rc
= HPSA_LV_FAILED
;
4002 dev_err(&h
->pdev
->dev
,
4003 "%s: LV failed, device will be skipped.\n",
4008 this_device
->raid_level
= RAID_UNKNOWN
;
4009 this_device
->offload_config
= 0;
4010 hpsa_turn_off_ioaccel_for_device(this_device
);
4011 this_device
->hba_ioaccel_enabled
= 0;
4012 this_device
->volume_offline
= 0;
4013 this_device
->queue_depth
= h
->nr_cmds
;
4016 if (this_device
->external
)
4017 this_device
->queue_depth
= EXTERNAL_QD
;
4019 if (is_OBDR_device
) {
4020 /* See if this is a One-Button-Disaster-Recovery device
4021 * by looking for "$DR-10" at offset 43 in inquiry data.
4023 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
4024 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
4025 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
4026 OBDR_SIG_LEN
) == 0);
4037 * Helper function to assign bus, target, lun mapping of devices.
4038 * Logical drive target and lun are assigned at this time, but
4039 * physical device lun and target assignment are deferred (assigned
4040 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
4042 static void figure_bus_target_lun(struct ctlr_info
*h
,
4043 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
4045 u32 lunid
= get_unaligned_le32(lunaddrbytes
);
4047 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
4048 /* physical device, target and lun filled in later */
4049 if (is_hba_lunid(lunaddrbytes
)) {
4050 int bus
= HPSA_HBA_BUS
;
4053 bus
= HPSA_LEGACY_HBA_BUS
;
4054 hpsa_set_bus_target_lun(device
,
4055 bus
, 0, lunid
& 0x3fff);
4057 /* defer target, lun assignment for physical devices */
4058 hpsa_set_bus_target_lun(device
,
4059 HPSA_PHYSICAL_DEVICE_BUS
, -1, -1);
4062 /* It's a logical device */
4063 if (device
->external
) {
4064 hpsa_set_bus_target_lun(device
,
4065 HPSA_EXTERNAL_RAID_VOLUME_BUS
, (lunid
>> 16) & 0x3fff,
4069 hpsa_set_bus_target_lun(device
, HPSA_RAID_VOLUME_BUS
,
4073 static int figure_external_status(struct ctlr_info
*h
, int raid_ctlr_position
,
4074 int i
, int nphysicals
, int nlocal_logicals
)
4076 /* In report logicals, local logicals are listed first,
4077 * then any externals.
4079 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
4081 if (i
== raid_ctlr_position
)
4084 if (i
< logicals_start
)
4087 /* i is in logicals range, but still within local logicals */
4088 if ((i
- nphysicals
- (raid_ctlr_position
== 0)) < nlocal_logicals
)
4091 return 1; /* it's an external lun */
4095 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
4096 * logdev. The number of luns in physdev and logdev are returned in
4097 * *nphysicals and *nlogicals, respectively.
4098 * Returns 0 on success, -1 otherwise.
4100 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
4101 struct ReportExtendedLUNdata
*physdev
, u32
*nphysicals
,
4102 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
4104 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
4105 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
4108 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 24;
4109 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
4110 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4111 HPSA_MAX_PHYS_LUN
, *nphysicals
- HPSA_MAX_PHYS_LUN
);
4112 *nphysicals
= HPSA_MAX_PHYS_LUN
;
4114 if (hpsa_scsi_do_report_log_luns(h
, logdev
, sizeof(*logdev
))) {
4115 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
4118 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
4119 /* Reject Logicals in excess of our max capability. */
4120 if (*nlogicals
> HPSA_MAX_LUN
) {
4121 dev_warn(&h
->pdev
->dev
,
4122 "maximum logical LUNs (%d) exceeded. "
4123 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
4124 *nlogicals
- HPSA_MAX_LUN
);
4125 *nlogicals
= HPSA_MAX_LUN
;
4127 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
4128 dev_warn(&h
->pdev
->dev
,
4129 "maximum logical + physical LUNs (%d) exceeded. "
4130 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
4131 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
4132 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
4137 static u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
,
4138 int i
, int nphysicals
, int nlogicals
,
4139 struct ReportExtendedLUNdata
*physdev_list
,
4140 struct ReportLUNdata
*logdev_list
)
4142 /* Helper function, figure out where the LUN ID info is coming from
4143 * given index i, lists of physical and logical devices, where in
4144 * the list the raid controller is supposed to appear (first or last)
4147 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
4148 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
4150 if (i
== raid_ctlr_position
)
4151 return RAID_CTLR_LUNID
;
4153 if (i
< logicals_start
)
4154 return &physdev_list
->LUN
[i
-
4155 (raid_ctlr_position
== 0)].lunid
[0];
4157 if (i
< last_device
)
4158 return &logdev_list
->LUN
[i
- nphysicals
-
4159 (raid_ctlr_position
== 0)][0];
4164 /* get physical drive ioaccel handle and queue depth */
4165 static void hpsa_get_ioaccel_drive_info(struct ctlr_info
*h
,
4166 struct hpsa_scsi_dev_t
*dev
,
4167 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
4168 struct bmic_identify_physical_device
*id_phys
)
4171 struct ext_report_lun_entry
*rle
;
4173 rle
= &rlep
->LUN
[rle_index
];
4175 dev
->ioaccel_handle
= rle
->ioaccel_handle
;
4176 if ((rle
->device_flags
& 0x08) && dev
->ioaccel_handle
)
4177 dev
->hba_ioaccel_enabled
= 1;
4178 memset(id_phys
, 0, sizeof(*id_phys
));
4179 rc
= hpsa_bmic_id_physical_device(h
, &rle
->lunid
[0],
4180 GET_BMIC_DRIVE_NUMBER(&rle
->lunid
[0]), id_phys
,
4183 /* Reserve space for FW operations */
4184 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4185 #define DRIVE_QUEUE_DEPTH 7
4187 le16_to_cpu(id_phys
->current_queue_depth_limit
) -
4188 DRIVE_CMDS_RESERVED_FOR_FW
;
4190 dev
->queue_depth
= DRIVE_QUEUE_DEPTH
; /* conservative */
4193 static void hpsa_get_path_info(struct hpsa_scsi_dev_t
*this_device
,
4194 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
4195 struct bmic_identify_physical_device
*id_phys
)
4197 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
4199 if ((rle
->device_flags
& 0x08) && this_device
->ioaccel_handle
)
4200 this_device
->hba_ioaccel_enabled
= 1;
4202 memcpy(&this_device
->active_path_index
,
4203 &id_phys
->active_path_number
,
4204 sizeof(this_device
->active_path_index
));
4205 memcpy(&this_device
->path_map
,
4206 &id_phys
->redundant_path_present_map
,
4207 sizeof(this_device
->path_map
));
4208 memcpy(&this_device
->box
,
4209 &id_phys
->alternate_paths_phys_box_on_port
,
4210 sizeof(this_device
->box
));
4211 memcpy(&this_device
->phys_connector
,
4212 &id_phys
->alternate_paths_phys_connector
,
4213 sizeof(this_device
->phys_connector
));
4214 memcpy(&this_device
->bay
,
4215 &id_phys
->phys_bay_in_box
,
4216 sizeof(this_device
->bay
));
4219 /* get number of local logical disks. */
4220 static int hpsa_set_local_logical_count(struct ctlr_info
*h
,
4221 struct bmic_identify_controller
*id_ctlr
,
4227 dev_warn(&h
->pdev
->dev
, "%s: id_ctlr buffer is NULL.\n",
4231 memset(id_ctlr
, 0, sizeof(*id_ctlr
));
4232 rc
= hpsa_bmic_id_controller(h
, id_ctlr
, sizeof(*id_ctlr
));
4234 if (id_ctlr
->configured_logical_drive_count
< 255)
4235 *nlocals
= id_ctlr
->configured_logical_drive_count
;
4237 *nlocals
= le16_to_cpu(
4238 id_ctlr
->extended_logical_unit_count
);
4244 static bool hpsa_is_disk_spare(struct ctlr_info
*h
, u8
*lunaddrbytes
)
4246 struct bmic_identify_physical_device
*id_phys
;
4247 bool is_spare
= false;
4250 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
4254 rc
= hpsa_bmic_id_physical_device(h
,
4256 GET_BMIC_DRIVE_NUMBER(lunaddrbytes
),
4257 id_phys
, sizeof(*id_phys
));
4259 is_spare
= (id_phys
->more_flags
>> 6) & 0x01;
4265 #define RPL_DEV_FLAG_NON_DISK 0x1
4266 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4267 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4269 #define BMIC_DEVICE_TYPE_ENCLOSURE 6
4271 static bool hpsa_skip_device(struct ctlr_info
*h
, u8
*lunaddrbytes
,
4272 struct ext_report_lun_entry
*rle
)
4277 if (!MASKED_DEVICE(lunaddrbytes
))
4280 device_flags
= rle
->device_flags
;
4281 device_type
= rle
->device_type
;
4283 if (device_flags
& RPL_DEV_FLAG_NON_DISK
) {
4284 if (device_type
== BMIC_DEVICE_TYPE_ENCLOSURE
)
4289 if (!(device_flags
& RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED
))
4292 if (device_flags
& RPL_DEV_FLAG_UNCONFIG_DISK
)
4296 * Spares may be spun down, we do not want to
4297 * do an Inquiry to a RAID set spare drive as
4298 * that would have them spun up, that is a
4299 * performance hit because I/O to the RAID device
4300 * stops while the spin up occurs which can take
4303 if (hpsa_is_disk_spare(h
, lunaddrbytes
))
4309 static void hpsa_update_scsi_devices(struct ctlr_info
*h
)
4311 /* the idea here is we could get notified
4312 * that some devices have changed, so we do a report
4313 * physical luns and report logical luns cmd, and adjust
4314 * our list of devices accordingly.
4316 * The scsi3addr's of devices won't change so long as the
4317 * adapter is not reset. That means we can rescan and
4318 * tell which devices we already know about, vs. new
4319 * devices, vs. disappearing devices.
4321 struct ReportExtendedLUNdata
*physdev_list
= NULL
;
4322 struct ReportLUNdata
*logdev_list
= NULL
;
4323 struct bmic_identify_physical_device
*id_phys
= NULL
;
4324 struct bmic_identify_controller
*id_ctlr
= NULL
;
4327 u32 nlocal_logicals
= 0;
4328 u32 ndev_allocated
= 0;
4329 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
4331 int i
, n_ext_target_devs
, ndevs_to_allocate
;
4332 int raid_ctlr_position
;
4333 bool physical_device
;
4334 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
4336 currentsd
= kcalloc(HPSA_MAX_DEVICES
, sizeof(*currentsd
), GFP_KERNEL
);
4337 physdev_list
= kzalloc(sizeof(*physdev_list
), GFP_KERNEL
);
4338 logdev_list
= kzalloc(sizeof(*logdev_list
), GFP_KERNEL
);
4339 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
4340 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
4341 id_ctlr
= kzalloc(sizeof(*id_ctlr
), GFP_KERNEL
);
4343 if (!currentsd
|| !physdev_list
|| !logdev_list
||
4344 !tmpdevice
|| !id_phys
|| !id_ctlr
) {
4345 dev_err(&h
->pdev
->dev
, "out of memory\n");
4348 memset(lunzerobits
, 0, sizeof(lunzerobits
));
4350 h
->drv_req_rescan
= 0; /* cancel scheduled rescan - we're doing it. */
4352 if (hpsa_gather_lun_info(h
, physdev_list
, &nphysicals
,
4353 logdev_list
, &nlogicals
)) {
4354 h
->drv_req_rescan
= 1;
4358 /* Set number of local logicals (non PTRAID) */
4359 if (hpsa_set_local_logical_count(h
, id_ctlr
, &nlocal_logicals
)) {
4360 dev_warn(&h
->pdev
->dev
,
4361 "%s: Can't determine number of local logical devices.\n",
4365 /* We might see up to the maximum number of logical and physical disks
4366 * plus external target devices, and a device for the local RAID
4369 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
4371 hpsa_ext_ctrl_present(h
, physdev_list
);
4373 /* Allocate the per device structures */
4374 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
4375 if (i
>= HPSA_MAX_DEVICES
) {
4376 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
4377 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
4378 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
4382 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
4383 if (!currentsd
[i
]) {
4384 h
->drv_req_rescan
= 1;
4390 if (is_scsi_rev_5(h
))
4391 raid_ctlr_position
= 0;
4393 raid_ctlr_position
= nphysicals
+ nlogicals
;
4395 /* adjust our table of devices */
4396 n_ext_target_devs
= 0;
4397 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
4398 u8
*lunaddrbytes
, is_OBDR
= 0;
4400 int phys_dev_index
= i
- (raid_ctlr_position
== 0);
4401 bool skip_device
= false;
4403 memset(tmpdevice
, 0, sizeof(*tmpdevice
));
4405 physical_device
= i
< nphysicals
+ (raid_ctlr_position
== 0);
4407 /* Figure out where the LUN ID info is coming from */
4408 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
4409 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
4411 /* Determine if this is a lun from an external target array */
4412 tmpdevice
->external
=
4413 figure_external_status(h
, raid_ctlr_position
, i
,
4414 nphysicals
, nlocal_logicals
);
4417 * Skip over some devices such as a spare.
4419 if (!tmpdevice
->external
&& physical_device
) {
4420 skip_device
= hpsa_skip_device(h
, lunaddrbytes
,
4421 &physdev_list
->LUN
[phys_dev_index
]);
4426 /* Get device type, vendor, model, device id, raid_map */
4427 rc
= hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
4429 if (rc
== -ENOMEM
) {
4430 dev_warn(&h
->pdev
->dev
,
4431 "Out of memory, rescan deferred.\n");
4432 h
->drv_req_rescan
= 1;
4436 h
->drv_req_rescan
= 1;
4440 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
4441 this_device
= currentsd
[ncurrent
];
4443 *this_device
= *tmpdevice
;
4444 this_device
->physical_device
= physical_device
;
4447 * Expose all devices except for physical devices that
4450 if (MASKED_DEVICE(lunaddrbytes
) && this_device
->physical_device
)
4451 this_device
->expose_device
= 0;
4453 this_device
->expose_device
= 1;
4457 * Get the SAS address for physical devices that are exposed.
4459 if (this_device
->physical_device
&& this_device
->expose_device
)
4460 hpsa_get_sas_address(h
, lunaddrbytes
, this_device
);
4462 switch (this_device
->devtype
) {
4464 /* We don't *really* support actual CD-ROM devices,
4465 * just "One Button Disaster Recovery" tape drive
4466 * which temporarily pretends to be a CD-ROM drive.
4467 * So we check that the device is really an OBDR tape
4468 * device by checking for "$DR-10" in bytes 43-48 of
4476 if (this_device
->physical_device
) {
4477 /* The disk is in HBA mode. */
4478 /* Never use RAID mapper in HBA mode. */
4479 this_device
->offload_enabled
= 0;
4480 hpsa_get_ioaccel_drive_info(h
, this_device
,
4481 physdev_list
, phys_dev_index
, id_phys
);
4482 hpsa_get_path_info(this_device
,
4483 physdev_list
, phys_dev_index
, id_phys
);
4488 case TYPE_MEDIUM_CHANGER
:
4491 case TYPE_ENCLOSURE
:
4492 if (!this_device
->external
)
4493 hpsa_get_enclosure_info(h
, lunaddrbytes
,
4494 physdev_list
, phys_dev_index
,
4499 /* Only present the Smartarray HBA as a RAID controller.
4500 * If it's a RAID controller other than the HBA itself
4501 * (an external RAID controller, MSA500 or similar)
4504 if (!is_hba_lunid(lunaddrbytes
))
4511 if (ncurrent
>= HPSA_MAX_DEVICES
)
4515 if (h
->sas_host
== NULL
) {
4518 rc
= hpsa_add_sas_host(h
);
4520 dev_warn(&h
->pdev
->dev
,
4521 "Could not add sas host %d\n", rc
);
4526 adjust_hpsa_scsi_table(h
, currentsd
, ncurrent
);
4529 for (i
= 0; i
< ndev_allocated
; i
++)
4530 kfree(currentsd
[i
]);
4532 kfree(physdev_list
);
4538 static void hpsa_set_sg_descriptor(struct SGDescriptor
*desc
,
4539 struct scatterlist
*sg
)
4541 u64 addr64
= (u64
) sg_dma_address(sg
);
4542 unsigned int len
= sg_dma_len(sg
);
4544 desc
->Addr
= cpu_to_le64(addr64
);
4545 desc
->Len
= cpu_to_le32(len
);
4550 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4551 * dma mapping and fills in the scatter gather entries of the
4554 static int hpsa_scatter_gather(struct ctlr_info
*h
,
4555 struct CommandList
*cp
,
4556 struct scsi_cmnd
*cmd
)
4558 struct scatterlist
*sg
;
4559 int use_sg
, i
, sg_limit
, chained
, last_sg
;
4560 struct SGDescriptor
*curr_sg
;
4562 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
4564 use_sg
= scsi_dma_map(cmd
);
4569 goto sglist_finished
;
4572 * If the number of entries is greater than the max for a single list,
4573 * then we have a chained list; we will set up all but one entry in the
4574 * first list (the last entry is saved for link information);
4575 * otherwise, we don't have a chained list and we'll set up at each of
4576 * the entries in the one list.
4579 chained
= use_sg
> h
->max_cmd_sg_entries
;
4580 sg_limit
= chained
? h
->max_cmd_sg_entries
- 1 : use_sg
;
4581 last_sg
= scsi_sg_count(cmd
) - 1;
4582 scsi_for_each_sg(cmd
, sg
, sg_limit
, i
) {
4583 hpsa_set_sg_descriptor(curr_sg
, sg
);
4589 * Continue with the chained list. Set curr_sg to the chained
4590 * list. Modify the limit to the total count less the entries
4591 * we've already set up. Resume the scan at the list entry
4592 * where the previous loop left off.
4594 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
4595 sg_limit
= use_sg
- sg_limit
;
4596 for_each_sg(sg
, sg
, sg_limit
, i
) {
4597 hpsa_set_sg_descriptor(curr_sg
, sg
);
4602 /* Back the pointer up to the last entry and mark it as "last". */
4603 (curr_sg
- 1)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4605 if (use_sg
+ chained
> h
->maxSG
)
4606 h
->maxSG
= use_sg
+ chained
;
4609 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
4610 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
+ 1);
4611 if (hpsa_map_sg_chain_block(h
, cp
)) {
4612 scsi_dma_unmap(cmd
);
4620 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
4621 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
); /* total sgs in cmd list */
4625 static inline void warn_zero_length_transfer(struct ctlr_info
*h
,
4626 u8
*cdb
, int cdb_len
,
4629 dev_warn(&h
->pdev
->dev
,
4630 "%s: Blocking zero-length request: CDB:%*phN\n",
4631 func
, cdb_len
, cdb
);
4634 #define IO_ACCEL_INELIGIBLE 1
4635 /* zero-length transfers trigger hardware errors. */
4636 static bool is_zero_length_transfer(u8
*cdb
)
4640 /* Block zero-length transfer sizes on certain commands. */
4644 case VERIFY
: /* 0x2F */
4645 case WRITE_VERIFY
: /* 0x2E */
4646 block_cnt
= get_unaligned_be16(&cdb
[7]);
4650 case VERIFY_12
: /* 0xAF */
4651 case WRITE_VERIFY_12
: /* 0xAE */
4652 block_cnt
= get_unaligned_be32(&cdb
[6]);
4656 case VERIFY_16
: /* 0x8F */
4657 block_cnt
= get_unaligned_be32(&cdb
[10]);
4663 return block_cnt
== 0;
4666 static int fixup_ioaccel_cdb(u8
*cdb
, int *cdb_len
)
4672 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4680 if (*cdb_len
== 6) {
4681 block
= (((cdb
[1] & 0x1F) << 16) |
4688 BUG_ON(*cdb_len
!= 12);
4689 block
= get_unaligned_be32(&cdb
[2]);
4690 block_cnt
= get_unaligned_be32(&cdb
[6]);
4692 if (block_cnt
> 0xffff)
4693 return IO_ACCEL_INELIGIBLE
;
4695 cdb
[0] = is_write
? WRITE_10
: READ_10
;
4697 cdb
[2] = (u8
) (block
>> 24);
4698 cdb
[3] = (u8
) (block
>> 16);
4699 cdb
[4] = (u8
) (block
>> 8);
4700 cdb
[5] = (u8
) (block
);
4702 cdb
[7] = (u8
) (block_cnt
>> 8);
4703 cdb
[8] = (u8
) (block_cnt
);
4711 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info
*h
,
4712 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4713 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4715 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4716 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
4718 unsigned int total_len
= 0;
4719 struct scatterlist
*sg
;
4722 struct SGDescriptor
*curr_sg
;
4723 u32 control
= IOACCEL1_CONTROL_SIMPLEQUEUE
;
4725 /* TODO: implement chaining support */
4726 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
) {
4727 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4728 return IO_ACCEL_INELIGIBLE
;
4731 BUG_ON(cmd
->cmd_len
> IOACCEL1_IOFLAGS_CDBLEN_MAX
);
4733 if (is_zero_length_transfer(cdb
)) {
4734 warn_zero_length_transfer(h
, cdb
, cdb_len
, __func__
);
4735 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4736 return IO_ACCEL_INELIGIBLE
;
4739 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4740 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4741 return IO_ACCEL_INELIGIBLE
;
4744 c
->cmd_type
= CMD_IOACCEL1
;
4746 /* Adjust the DMA address to point to the accelerated command buffer */
4747 c
->busaddr
= (u32
) h
->ioaccel_cmd_pool_dhandle
+
4748 (c
->cmdindex
* sizeof(*cp
));
4749 BUG_ON(c
->busaddr
& 0x0000007F);
4751 use_sg
= scsi_dma_map(cmd
);
4753 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4759 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4760 addr64
= (u64
) sg_dma_address(sg
);
4761 len
= sg_dma_len(sg
);
4763 curr_sg
->Addr
= cpu_to_le64(addr64
);
4764 curr_sg
->Len
= cpu_to_le32(len
);
4765 curr_sg
->Ext
= cpu_to_le32(0);
4768 (--curr_sg
)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4770 switch (cmd
->sc_data_direction
) {
4772 control
|= IOACCEL1_CONTROL_DATA_OUT
;
4774 case DMA_FROM_DEVICE
:
4775 control
|= IOACCEL1_CONTROL_DATA_IN
;
4778 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4781 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4782 cmd
->sc_data_direction
);
4787 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4790 c
->Header
.SGList
= use_sg
;
4791 /* Fill out the command structure to submit */
4792 cp
->dev_handle
= cpu_to_le16(ioaccel_handle
& 0xFFFF);
4793 cp
->transfer_len
= cpu_to_le32(total_len
);
4794 cp
->io_flags
= cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ
|
4795 (cdb_len
& IOACCEL1_IOFLAGS_CDBLEN_MASK
));
4796 cp
->control
= cpu_to_le32(control
);
4797 memcpy(cp
->CDB
, cdb
, cdb_len
);
4798 memcpy(cp
->CISS_LUN
, scsi3addr
, 8);
4799 /* Tag was already set at init time. */
4800 enqueue_cmd_and_start_io(h
, c
);
4805 * Queue a command directly to a device behind the controller using the
4806 * I/O accelerator path.
4808 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info
*h
,
4809 struct CommandList
*c
)
4811 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4812 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4822 return hpsa_scsi_ioaccel_queue_command(h
, c
, dev
->ioaccel_handle
,
4823 cmd
->cmnd
, cmd
->cmd_len
, dev
->scsi3addr
, dev
);
4827 * Set encryption parameters for the ioaccel2 request
4829 static void set_encrypt_ioaccel2(struct ctlr_info
*h
,
4830 struct CommandList
*c
, struct io_accel2_cmd
*cp
)
4832 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4833 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4834 struct raid_map_data
*map
= &dev
->raid_map
;
4837 /* Are we doing encryption on this device */
4838 if (!(le16_to_cpu(map
->flags
) & RAID_MAP_FLAG_ENCRYPT_ON
))
4840 /* Set the data encryption key index. */
4841 cp
->dekindex
= map
->dekindex
;
4843 /* Set the encryption enable flag, encoded into direction field. */
4844 cp
->direction
|= IOACCEL2_DIRECTION_ENCRYPT_MASK
;
4846 /* Set encryption tweak values based on logical block address
4847 * If block size is 512, tweak value is LBA.
4848 * For other block sizes, tweak is (LBA * block size)/ 512)
4850 switch (cmd
->cmnd
[0]) {
4851 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4854 first_block
= (((cmd
->cmnd
[1] & 0x1F) << 16) |
4855 (cmd
->cmnd
[2] << 8) |
4860 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4863 first_block
= get_unaligned_be32(&cmd
->cmnd
[2]);
4867 first_block
= get_unaligned_be64(&cmd
->cmnd
[2]);
4870 dev_err(&h
->pdev
->dev
,
4871 "ERROR: %s: size (0x%x) not supported for encryption\n",
4872 __func__
, cmd
->cmnd
[0]);
4877 if (le32_to_cpu(map
->volume_blk_size
) != 512)
4878 first_block
= first_block
*
4879 le32_to_cpu(map
->volume_blk_size
)/512;
4881 cp
->tweak_lower
= cpu_to_le32(first_block
);
4882 cp
->tweak_upper
= cpu_to_le32(first_block
>> 32);
4885 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info
*h
,
4886 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4887 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4889 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4890 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
4891 struct ioaccel2_sg_element
*curr_sg
;
4893 struct scatterlist
*sg
;
4901 if (!cmd
->device
->hostdata
)
4904 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
4906 if (is_zero_length_transfer(cdb
)) {
4907 warn_zero_length_transfer(h
, cdb
, cdb_len
, __func__
);
4908 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4909 return IO_ACCEL_INELIGIBLE
;
4912 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4913 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4914 return IO_ACCEL_INELIGIBLE
;
4917 c
->cmd_type
= CMD_IOACCEL2
;
4918 /* Adjust the DMA address to point to the accelerated command buffer */
4919 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
4920 (c
->cmdindex
* sizeof(*cp
));
4921 BUG_ON(c
->busaddr
& 0x0000007F);
4923 memset(cp
, 0, sizeof(*cp
));
4924 cp
->IU_type
= IOACCEL2_IU_TYPE
;
4926 use_sg
= scsi_dma_map(cmd
);
4928 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4934 if (use_sg
> h
->ioaccel_maxsg
) {
4935 addr64
= le64_to_cpu(
4936 h
->ioaccel2_cmd_sg_list
[c
->cmdindex
]->address
);
4937 curr_sg
->address
= cpu_to_le64(addr64
);
4938 curr_sg
->length
= 0;
4939 curr_sg
->reserved
[0] = 0;
4940 curr_sg
->reserved
[1] = 0;
4941 curr_sg
->reserved
[2] = 0;
4942 curr_sg
->chain_indicator
= IOACCEL2_CHAIN
;
4944 curr_sg
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
4946 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4947 addr64
= (u64
) sg_dma_address(sg
);
4948 len
= sg_dma_len(sg
);
4950 curr_sg
->address
= cpu_to_le64(addr64
);
4951 curr_sg
->length
= cpu_to_le32(len
);
4952 curr_sg
->reserved
[0] = 0;
4953 curr_sg
->reserved
[1] = 0;
4954 curr_sg
->reserved
[2] = 0;
4955 curr_sg
->chain_indicator
= 0;
4960 * Set the last s/g element bit
4962 (curr_sg
- 1)->chain_indicator
= IOACCEL2_LAST_SG
;
4964 switch (cmd
->sc_data_direction
) {
4966 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4967 cp
->direction
|= IOACCEL2_DIR_DATA_OUT
;
4969 case DMA_FROM_DEVICE
:
4970 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4971 cp
->direction
|= IOACCEL2_DIR_DATA_IN
;
4974 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4975 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4978 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4979 cmd
->sc_data_direction
);
4984 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4985 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4988 /* Set encryption parameters, if necessary */
4989 set_encrypt_ioaccel2(h
, c
, cp
);
4991 cp
->scsi_nexus
= cpu_to_le32(ioaccel_handle
);
4992 cp
->Tag
= cpu_to_le32(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
4993 memcpy(cp
->cdb
, cdb
, sizeof(cp
->cdb
));
4995 cp
->data_len
= cpu_to_le32(total_len
);
4996 cp
->err_ptr
= cpu_to_le64(c
->busaddr
+
4997 offsetof(struct io_accel2_cmd
, error_data
));
4998 cp
->err_len
= cpu_to_le32(sizeof(cp
->error_data
));
5000 /* fill in sg elements */
5001 if (use_sg
> h
->ioaccel_maxsg
) {
5003 cp
->sg
[0].length
= cpu_to_le32(use_sg
* sizeof(cp
->sg
[0]));
5004 if (hpsa_map_ioaccel2_sg_chain_block(h
, cp
, c
)) {
5005 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
5006 scsi_dma_unmap(cmd
);
5010 cp
->sg_count
= (u8
) use_sg
;
5012 if (phys_disk
->in_reset
) {
5013 cmd
->result
= DID_RESET
<< 16;
5017 enqueue_cmd_and_start_io(h
, c
);
5022 * Queue a command to the correct I/O accelerator path.
5024 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
5025 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
5026 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
5028 if (!c
->scsi_cmd
->device
)
5031 if (!c
->scsi_cmd
->device
->hostdata
)
5034 if (phys_disk
->in_reset
)
5037 /* Try to honor the device's queue depth */
5038 if (atomic_inc_return(&phys_disk
->ioaccel_cmds_out
) >
5039 phys_disk
->queue_depth
) {
5040 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
5041 return IO_ACCEL_INELIGIBLE
;
5043 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
5044 return hpsa_scsi_ioaccel1_queue_command(h
, c
, ioaccel_handle
,
5045 cdb
, cdb_len
, scsi3addr
,
5048 return hpsa_scsi_ioaccel2_queue_command(h
, c
, ioaccel_handle
,
5049 cdb
, cdb_len
, scsi3addr
,
5053 static void raid_map_helper(struct raid_map_data
*map
,
5054 int offload_to_mirror
, u32
*map_index
, u32
*current_group
)
5056 if (offload_to_mirror
== 0) {
5057 /* use physical disk in the first mirrored group. */
5058 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
5062 /* determine mirror group that *map_index indicates */
5063 *current_group
= *map_index
/
5064 le16_to_cpu(map
->data_disks_per_row
);
5065 if (offload_to_mirror
== *current_group
)
5067 if (*current_group
< le16_to_cpu(map
->layout_map_count
) - 1) {
5068 /* select map index from next group */
5069 *map_index
+= le16_to_cpu(map
->data_disks_per_row
);
5072 /* select map index from first group */
5073 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
5076 } while (offload_to_mirror
!= *current_group
);
5080 * Attempt to perform offload RAID mapping for a logical volume I/O.
5082 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info
*h
,
5083 struct CommandList
*c
)
5085 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
5086 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
5087 struct raid_map_data
*map
= &dev
->raid_map
;
5088 struct raid_map_disk_data
*dd
= &map
->data
[0];
5091 u64 first_block
, last_block
;
5094 u64 first_row
, last_row
;
5095 u32 first_row_offset
, last_row_offset
;
5096 u32 first_column
, last_column
;
5097 u64 r0_first_row
, r0_last_row
;
5098 u32 r5or6_blocks_per_row
;
5099 u64 r5or6_first_row
, r5or6_last_row
;
5100 u32 r5or6_first_row_offset
, r5or6_last_row_offset
;
5101 u32 r5or6_first_column
, r5or6_last_column
;
5102 u32 total_disks_per_row
;
5104 u32 first_group
, last_group
, current_group
;
5112 #if BITS_PER_LONG == 32
5115 int offload_to_mirror
;
5123 /* check for valid opcode, get LBA and block count */
5124 switch (cmd
->cmnd
[0]) {
5129 first_block
= (((cmd
->cmnd
[1] & 0x1F) << 16) |
5130 (cmd
->cmnd
[2] << 8) |
5132 block_cnt
= cmd
->cmnd
[4];
5141 (((u64
) cmd
->cmnd
[2]) << 24) |
5142 (((u64
) cmd
->cmnd
[3]) << 16) |
5143 (((u64
) cmd
->cmnd
[4]) << 8) |
5146 (((u32
) cmd
->cmnd
[7]) << 8) |
5154 (((u64
) cmd
->cmnd
[2]) << 24) |
5155 (((u64
) cmd
->cmnd
[3]) << 16) |
5156 (((u64
) cmd
->cmnd
[4]) << 8) |
5159 (((u32
) cmd
->cmnd
[6]) << 24) |
5160 (((u32
) cmd
->cmnd
[7]) << 16) |
5161 (((u32
) cmd
->cmnd
[8]) << 8) |
5169 (((u64
) cmd
->cmnd
[2]) << 56) |
5170 (((u64
) cmd
->cmnd
[3]) << 48) |
5171 (((u64
) cmd
->cmnd
[4]) << 40) |
5172 (((u64
) cmd
->cmnd
[5]) << 32) |
5173 (((u64
) cmd
->cmnd
[6]) << 24) |
5174 (((u64
) cmd
->cmnd
[7]) << 16) |
5175 (((u64
) cmd
->cmnd
[8]) << 8) |
5178 (((u32
) cmd
->cmnd
[10]) << 24) |
5179 (((u32
) cmd
->cmnd
[11]) << 16) |
5180 (((u32
) cmd
->cmnd
[12]) << 8) |
5184 return IO_ACCEL_INELIGIBLE
; /* process via normal I/O path */
5186 last_block
= first_block
+ block_cnt
- 1;
5188 /* check for write to non-RAID-0 */
5189 if (is_write
&& dev
->raid_level
!= 0)
5190 return IO_ACCEL_INELIGIBLE
;
5192 /* check for invalid block or wraparound */
5193 if (last_block
>= le64_to_cpu(map
->volume_blk_cnt
) ||
5194 last_block
< first_block
)
5195 return IO_ACCEL_INELIGIBLE
;
5197 /* calculate stripe information for the request */
5198 blocks_per_row
= le16_to_cpu(map
->data_disks_per_row
) *
5199 le16_to_cpu(map
->strip_size
);
5200 strip_size
= le16_to_cpu(map
->strip_size
);
5201 #if BITS_PER_LONG == 32
5202 tmpdiv
= first_block
;
5203 (void) do_div(tmpdiv
, blocks_per_row
);
5205 tmpdiv
= last_block
;
5206 (void) do_div(tmpdiv
, blocks_per_row
);
5208 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
5209 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
5210 tmpdiv
= first_row_offset
;
5211 (void) do_div(tmpdiv
, strip_size
);
5212 first_column
= tmpdiv
;
5213 tmpdiv
= last_row_offset
;
5214 (void) do_div(tmpdiv
, strip_size
);
5215 last_column
= tmpdiv
;
5217 first_row
= first_block
/ blocks_per_row
;
5218 last_row
= last_block
/ blocks_per_row
;
5219 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
5220 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
5221 first_column
= first_row_offset
/ strip_size
;
5222 last_column
= last_row_offset
/ strip_size
;
5225 /* if this isn't a single row/column then give to the controller */
5226 if ((first_row
!= last_row
) || (first_column
!= last_column
))
5227 return IO_ACCEL_INELIGIBLE
;
5229 /* proceeding with driver mapping */
5230 total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
5231 le16_to_cpu(map
->metadata_disks_per_row
);
5232 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
5233 le16_to_cpu(map
->row_cnt
);
5234 map_index
= (map_row
* total_disks_per_row
) + first_column
;
5236 switch (dev
->raid_level
) {
5238 break; /* nothing special to do */
5240 /* Handles load balance across RAID 1 members.
5241 * (2-drive R1 and R10 with even # of drives.)
5242 * Appropriate for SSDs, not optimal for HDDs
5243 * Ensure we have the correct raid_map.
5245 if (le16_to_cpu(map
->layout_map_count
) != 2) {
5246 hpsa_turn_off_ioaccel_for_device(dev
);
5247 return IO_ACCEL_INELIGIBLE
;
5249 if (dev
->offload_to_mirror
)
5250 map_index
+= le16_to_cpu(map
->data_disks_per_row
);
5251 dev
->offload_to_mirror
= !dev
->offload_to_mirror
;
5254 /* Handles N-way mirrors (R1-ADM)
5255 * and R10 with # of drives divisible by 3.)
5256 * Ensure we have the correct raid_map.
5258 if (le16_to_cpu(map
->layout_map_count
) != 3) {
5259 hpsa_turn_off_ioaccel_for_device(dev
);
5260 return IO_ACCEL_INELIGIBLE
;
5263 offload_to_mirror
= dev
->offload_to_mirror
;
5264 raid_map_helper(map
, offload_to_mirror
,
5265 &map_index
, ¤t_group
);
5266 /* set mirror group to use next time */
5268 (offload_to_mirror
>=
5269 le16_to_cpu(map
->layout_map_count
) - 1)
5270 ? 0 : offload_to_mirror
+ 1;
5271 dev
->offload_to_mirror
= offload_to_mirror
;
5272 /* Avoid direct use of dev->offload_to_mirror within this
5273 * function since multiple threads might simultaneously
5274 * increment it beyond the range of dev->layout_map_count -1.
5279 if (le16_to_cpu(map
->layout_map_count
) <= 1)
5282 /* Verify first and last block are in same RAID group */
5283 r5or6_blocks_per_row
=
5284 le16_to_cpu(map
->strip_size
) *
5285 le16_to_cpu(map
->data_disks_per_row
);
5286 if (r5or6_blocks_per_row
== 0) {
5287 hpsa_turn_off_ioaccel_for_device(dev
);
5288 return IO_ACCEL_INELIGIBLE
;
5290 stripesize
= r5or6_blocks_per_row
*
5291 le16_to_cpu(map
->layout_map_count
);
5292 #if BITS_PER_LONG == 32
5293 tmpdiv
= first_block
;
5294 first_group
= do_div(tmpdiv
, stripesize
);
5295 tmpdiv
= first_group
;
5296 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
5297 first_group
= tmpdiv
;
5298 tmpdiv
= last_block
;
5299 last_group
= do_div(tmpdiv
, stripesize
);
5300 tmpdiv
= last_group
;
5301 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
5302 last_group
= tmpdiv
;
5304 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
5305 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
5307 if (first_group
!= last_group
)
5308 return IO_ACCEL_INELIGIBLE
;
5310 /* Verify request is in a single row of RAID 5/6 */
5311 #if BITS_PER_LONG == 32
5312 tmpdiv
= first_block
;
5313 (void) do_div(tmpdiv
, stripesize
);
5314 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
5315 tmpdiv
= last_block
;
5316 (void) do_div(tmpdiv
, stripesize
);
5317 r5or6_last_row
= r0_last_row
= tmpdiv
;
5319 first_row
= r5or6_first_row
= r0_first_row
=
5320 first_block
/ stripesize
;
5321 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
5323 if (r5or6_first_row
!= r5or6_last_row
)
5324 return IO_ACCEL_INELIGIBLE
;
5327 /* Verify request is in a single column */
5328 #if BITS_PER_LONG == 32
5329 tmpdiv
= first_block
;
5330 first_row_offset
= do_div(tmpdiv
, stripesize
);
5331 tmpdiv
= first_row_offset
;
5332 first_row_offset
= (u32
) do_div(tmpdiv
, r5or6_blocks_per_row
);
5333 r5or6_first_row_offset
= first_row_offset
;
5334 tmpdiv
= last_block
;
5335 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
5336 tmpdiv
= r5or6_last_row_offset
;
5337 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
5338 tmpdiv
= r5or6_first_row_offset
;
5339 (void) do_div(tmpdiv
, map
->strip_size
);
5340 first_column
= r5or6_first_column
= tmpdiv
;
5341 tmpdiv
= r5or6_last_row_offset
;
5342 (void) do_div(tmpdiv
, map
->strip_size
);
5343 r5or6_last_column
= tmpdiv
;
5345 first_row_offset
= r5or6_first_row_offset
=
5346 (u32
)((first_block
% stripesize
) %
5347 r5or6_blocks_per_row
);
5349 r5or6_last_row_offset
=
5350 (u32
)((last_block
% stripesize
) %
5351 r5or6_blocks_per_row
);
5353 first_column
= r5or6_first_column
=
5354 r5or6_first_row_offset
/ le16_to_cpu(map
->strip_size
);
5356 r5or6_last_row_offset
/ le16_to_cpu(map
->strip_size
);
5358 if (r5or6_first_column
!= r5or6_last_column
)
5359 return IO_ACCEL_INELIGIBLE
;
5361 /* Request is eligible */
5362 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
5363 le16_to_cpu(map
->row_cnt
);
5365 map_index
= (first_group
*
5366 (le16_to_cpu(map
->row_cnt
) * total_disks_per_row
)) +
5367 (map_row
* total_disks_per_row
) + first_column
;
5370 return IO_ACCEL_INELIGIBLE
;
5373 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
5374 return IO_ACCEL_INELIGIBLE
;
5376 c
->phys_disk
= dev
->phys_disk
[map_index
];
5378 return IO_ACCEL_INELIGIBLE
;
5380 disk_handle
= dd
[map_index
].ioaccel_handle
;
5381 disk_block
= le64_to_cpu(map
->disk_starting_blk
) +
5382 first_row
* le16_to_cpu(map
->strip_size
) +
5383 (first_row_offset
- first_column
*
5384 le16_to_cpu(map
->strip_size
));
5385 disk_block_cnt
= block_cnt
;
5387 /* handle differing logical/physical block sizes */
5388 if (map
->phys_blk_shift
) {
5389 disk_block
<<= map
->phys_blk_shift
;
5390 disk_block_cnt
<<= map
->phys_blk_shift
;
5392 BUG_ON(disk_block_cnt
> 0xffff);
5394 /* build the new CDB for the physical disk I/O */
5395 if (disk_block
> 0xffffffff) {
5396 cdb
[0] = is_write
? WRITE_16
: READ_16
;
5398 cdb
[2] = (u8
) (disk_block
>> 56);
5399 cdb
[3] = (u8
) (disk_block
>> 48);
5400 cdb
[4] = (u8
) (disk_block
>> 40);
5401 cdb
[5] = (u8
) (disk_block
>> 32);
5402 cdb
[6] = (u8
) (disk_block
>> 24);
5403 cdb
[7] = (u8
) (disk_block
>> 16);
5404 cdb
[8] = (u8
) (disk_block
>> 8);
5405 cdb
[9] = (u8
) (disk_block
);
5406 cdb
[10] = (u8
) (disk_block_cnt
>> 24);
5407 cdb
[11] = (u8
) (disk_block_cnt
>> 16);
5408 cdb
[12] = (u8
) (disk_block_cnt
>> 8);
5409 cdb
[13] = (u8
) (disk_block_cnt
);
5414 cdb
[0] = is_write
? WRITE_10
: READ_10
;
5416 cdb
[2] = (u8
) (disk_block
>> 24);
5417 cdb
[3] = (u8
) (disk_block
>> 16);
5418 cdb
[4] = (u8
) (disk_block
>> 8);
5419 cdb
[5] = (u8
) (disk_block
);
5421 cdb
[7] = (u8
) (disk_block_cnt
>> 8);
5422 cdb
[8] = (u8
) (disk_block_cnt
);
5426 return hpsa_scsi_ioaccel_queue_command(h
, c
, disk_handle
, cdb
, cdb_len
,
5428 dev
->phys_disk
[map_index
]);
5432 * Submit commands down the "normal" RAID stack path
5433 * All callers to hpsa_ciss_submit must check lockup_detected
5434 * beforehand, before (opt.) and after calling cmd_alloc
5436 static int hpsa_ciss_submit(struct ctlr_info
*h
,
5437 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
5438 struct hpsa_scsi_dev_t
*dev
)
5440 cmd
->host_scribble
= (unsigned char *) c
;
5441 c
->cmd_type
= CMD_SCSI
;
5443 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
5444 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &dev
->scsi3addr
[0], 8);
5445 c
->Header
.tag
= cpu_to_le64((c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
));
5447 /* Fill in the request block... */
5449 c
->Request
.Timeout
= 0;
5450 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
5451 c
->Request
.CDBLen
= cmd
->cmd_len
;
5452 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
5453 switch (cmd
->sc_data_direction
) {
5455 c
->Request
.type_attr_dir
=
5456 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_WRITE
);
5458 case DMA_FROM_DEVICE
:
5459 c
->Request
.type_attr_dir
=
5460 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_READ
);
5463 c
->Request
.type_attr_dir
=
5464 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_NONE
);
5466 case DMA_BIDIRECTIONAL
:
5467 /* This can happen if a buggy application does a scsi passthru
5468 * and sets both inlen and outlen to non-zero. ( see
5469 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5472 c
->Request
.type_attr_dir
=
5473 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_RSVD
);
5474 /* This is technically wrong, and hpsa controllers should
5475 * reject it with CMD_INVALID, which is the most correct
5476 * response, but non-fibre backends appear to let it
5477 * slide by, and give the same results as if this field
5478 * were set correctly. Either way is acceptable for
5479 * our purposes here.
5485 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
5486 cmd
->sc_data_direction
);
5491 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
5492 hpsa_cmd_resolve_and_free(h
, c
);
5493 return SCSI_MLQUEUE_HOST_BUSY
;
5496 if (dev
->in_reset
) {
5497 hpsa_cmd_resolve_and_free(h
, c
);
5498 return SCSI_MLQUEUE_HOST_BUSY
;
5503 enqueue_cmd_and_start_io(h
, c
);
5504 /* the cmd'll come back via intr handler in complete_scsi_command() */
5508 static void hpsa_cmd_init(struct ctlr_info
*h
, int index
,
5509 struct CommandList
*c
)
5511 dma_addr_t cmd_dma_handle
, err_dma_handle
;
5513 /* Zero out all of commandlist except the last field, refcount */
5514 memset(c
, 0, offsetof(struct CommandList
, refcount
));
5515 c
->Header
.tag
= cpu_to_le64((u64
) (index
<< DIRECT_LOOKUP_SHIFT
));
5516 cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
5517 c
->err_info
= h
->errinfo_pool
+ index
;
5518 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
5519 err_dma_handle
= h
->errinfo_pool_dhandle
5520 + index
* sizeof(*c
->err_info
);
5521 c
->cmdindex
= index
;
5522 c
->busaddr
= (u32
) cmd_dma_handle
;
5523 c
->ErrDesc
.Addr
= cpu_to_le64((u64
) err_dma_handle
);
5524 c
->ErrDesc
.Len
= cpu_to_le32((u32
) sizeof(*c
->err_info
));
5526 c
->scsi_cmd
= SCSI_CMD_IDLE
;
5529 static void hpsa_preinitialize_commands(struct ctlr_info
*h
)
5533 for (i
= 0; i
< h
->nr_cmds
; i
++) {
5534 struct CommandList
*c
= h
->cmd_pool
+ i
;
5536 hpsa_cmd_init(h
, i
, c
);
5537 atomic_set(&c
->refcount
, 0);
5541 static inline void hpsa_cmd_partial_init(struct ctlr_info
*h
, int index
,
5542 struct CommandList
*c
)
5544 dma_addr_t cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
5546 BUG_ON(c
->cmdindex
!= index
);
5548 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
5549 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
5550 c
->busaddr
= (u32
) cmd_dma_handle
;
5553 static int hpsa_ioaccel_submit(struct ctlr_info
*h
,
5554 struct CommandList
*c
, struct scsi_cmnd
*cmd
)
5556 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
5557 int rc
= IO_ACCEL_INELIGIBLE
;
5560 return SCSI_MLQUEUE_HOST_BUSY
;
5563 return SCSI_MLQUEUE_HOST_BUSY
;
5565 if (hpsa_simple_mode
)
5566 return IO_ACCEL_INELIGIBLE
;
5568 cmd
->host_scribble
= (unsigned char *) c
;
5570 if (dev
->offload_enabled
) {
5571 hpsa_cmd_init(h
, c
->cmdindex
, c
);
5572 c
->cmd_type
= CMD_SCSI
;
5575 rc
= hpsa_scsi_ioaccel_raid_map(h
, c
);
5576 if (rc
< 0) /* scsi_dma_map failed. */
5577 rc
= SCSI_MLQUEUE_HOST_BUSY
;
5578 } else if (dev
->hba_ioaccel_enabled
) {
5579 hpsa_cmd_init(h
, c
->cmdindex
, c
);
5580 c
->cmd_type
= CMD_SCSI
;
5583 rc
= hpsa_scsi_ioaccel_direct_map(h
, c
);
5584 if (rc
< 0) /* scsi_dma_map failed. */
5585 rc
= SCSI_MLQUEUE_HOST_BUSY
;
5590 static void hpsa_command_resubmit_worker(struct work_struct
*work
)
5592 struct scsi_cmnd
*cmd
;
5593 struct hpsa_scsi_dev_t
*dev
;
5594 struct CommandList
*c
= container_of(work
, struct CommandList
, work
);
5597 dev
= cmd
->device
->hostdata
;
5599 cmd
->result
= DID_NO_CONNECT
<< 16;
5600 return hpsa_cmd_free_and_done(c
->h
, c
, cmd
);
5603 if (dev
->in_reset
) {
5604 cmd
->result
= DID_RESET
<< 16;
5605 return hpsa_cmd_free_and_done(c
->h
, c
, cmd
);
5608 if (c
->cmd_type
== CMD_IOACCEL2
) {
5609 struct ctlr_info
*h
= c
->h
;
5610 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5613 if (c2
->error_data
.serv_response
==
5614 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
) {
5615 rc
= hpsa_ioaccel_submit(h
, c
, cmd
);
5618 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
5620 * If we get here, it means dma mapping failed.
5621 * Try again via scsi mid layer, which will
5622 * then get SCSI_MLQUEUE_HOST_BUSY.
5624 cmd
->result
= DID_IMM_RETRY
<< 16;
5625 return hpsa_cmd_free_and_done(h
, c
, cmd
);
5627 /* else, fall thru and resubmit down CISS path */
5630 hpsa_cmd_partial_init(c
->h
, c
->cmdindex
, c
);
5631 if (hpsa_ciss_submit(c
->h
, c
, cmd
, dev
)) {
5633 * If we get here, it means dma mapping failed. Try
5634 * again via scsi mid layer, which will then get
5635 * SCSI_MLQUEUE_HOST_BUSY.
5637 * hpsa_ciss_submit will have already freed c
5638 * if it encountered a dma mapping failure.
5640 cmd
->result
= DID_IMM_RETRY
<< 16;
5641 cmd
->scsi_done(cmd
);
5645 /* Running in struct Scsi_Host->host_lock less mode */
5646 static int hpsa_scsi_queue_command(struct Scsi_Host
*sh
, struct scsi_cmnd
*cmd
)
5648 struct ctlr_info
*h
;
5649 struct hpsa_scsi_dev_t
*dev
;
5650 struct CommandList
*c
;
5653 /* Get the ptr to our adapter structure out of cmd->host. */
5654 h
= sdev_to_hba(cmd
->device
);
5656 BUG_ON(cmd
->request
->tag
< 0);
5658 dev
= cmd
->device
->hostdata
;
5660 cmd
->result
= DID_NO_CONNECT
<< 16;
5661 cmd
->scsi_done(cmd
);
5666 cmd
->result
= DID_NO_CONNECT
<< 16;
5667 cmd
->scsi_done(cmd
);
5671 if (unlikely(lockup_detected(h
))) {
5672 cmd
->result
= DID_NO_CONNECT
<< 16;
5673 cmd
->scsi_done(cmd
);
5678 return SCSI_MLQUEUE_DEVICE_BUSY
;
5680 c
= cmd_tagged_alloc(h
, cmd
);
5682 return SCSI_MLQUEUE_DEVICE_BUSY
;
5685 * This is necessary because the SML doesn't zero out this field during
5691 * Call alternate submit routine for I/O accelerated commands.
5692 * Retries always go down the normal I/O path.
5694 if (likely(cmd
->retries
== 0 &&
5695 !blk_rq_is_passthrough(cmd
->request
) &&
5696 h
->acciopath_status
)) {
5697 rc
= hpsa_ioaccel_submit(h
, c
, cmd
);
5700 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
5701 hpsa_cmd_resolve_and_free(h
, c
);
5702 return SCSI_MLQUEUE_HOST_BUSY
;
5705 return hpsa_ciss_submit(h
, c
, cmd
, dev
);
5708 static void hpsa_scan_complete(struct ctlr_info
*h
)
5710 unsigned long flags
;
5712 spin_lock_irqsave(&h
->scan_lock
, flags
);
5713 h
->scan_finished
= 1;
5714 wake_up(&h
->scan_wait_queue
);
5715 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5718 static void hpsa_scan_start(struct Scsi_Host
*sh
)
5720 struct ctlr_info
*h
= shost_to_hba(sh
);
5721 unsigned long flags
;
5724 * Don't let rescans be initiated on a controller known to be locked
5725 * up. If the controller locks up *during* a rescan, that thread is
5726 * probably hosed, but at least we can prevent new rescan threads from
5727 * piling up on a locked up controller.
5729 if (unlikely(lockup_detected(h
)))
5730 return hpsa_scan_complete(h
);
5733 * If a scan is already waiting to run, no need to add another
5735 spin_lock_irqsave(&h
->scan_lock
, flags
);
5736 if (h
->scan_waiting
) {
5737 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5741 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5743 /* wait until any scan already in progress is finished. */
5745 spin_lock_irqsave(&h
->scan_lock
, flags
);
5746 if (h
->scan_finished
)
5748 h
->scan_waiting
= 1;
5749 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5750 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
5751 /* Note: We don't need to worry about a race between this
5752 * thread and driver unload because the midlayer will
5753 * have incremented the reference count, so unload won't
5754 * happen if we're in here.
5757 h
->scan_finished
= 0; /* mark scan as in progress */
5758 h
->scan_waiting
= 0;
5759 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5761 if (unlikely(lockup_detected(h
)))
5762 return hpsa_scan_complete(h
);
5765 * Do the scan after a reset completion
5767 spin_lock_irqsave(&h
->reset_lock
, flags
);
5768 if (h
->reset_in_progress
) {
5769 h
->drv_req_rescan
= 1;
5770 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
5771 hpsa_scan_complete(h
);
5774 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
5776 hpsa_update_scsi_devices(h
);
5778 hpsa_scan_complete(h
);
5781 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
5783 struct hpsa_scsi_dev_t
*logical_drive
= sdev
->hostdata
;
5790 else if (qdepth
> logical_drive
->queue_depth
)
5791 qdepth
= logical_drive
->queue_depth
;
5793 return scsi_change_queue_depth(sdev
, qdepth
);
5796 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
5797 unsigned long elapsed_time
)
5799 struct ctlr_info
*h
= shost_to_hba(sh
);
5800 unsigned long flags
;
5803 spin_lock_irqsave(&h
->scan_lock
, flags
);
5804 finished
= h
->scan_finished
;
5805 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5809 static int hpsa_scsi_host_alloc(struct ctlr_info
*h
)
5811 struct Scsi_Host
*sh
;
5813 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
5815 dev_err(&h
->pdev
->dev
, "scsi_host_alloc failed\n");
5822 sh
->max_channel
= 3;
5823 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
5824 sh
->max_lun
= HPSA_MAX_LUN
;
5825 sh
->max_id
= HPSA_MAX_LUN
;
5826 sh
->can_queue
= h
->nr_cmds
- HPSA_NRESERVED_CMDS
;
5827 sh
->cmd_per_lun
= sh
->can_queue
;
5828 sh
->sg_tablesize
= h
->maxsgentries
;
5829 sh
->transportt
= hpsa_sas_transport_template
;
5830 sh
->hostdata
[0] = (unsigned long) h
;
5831 sh
->irq
= pci_irq_vector(h
->pdev
, 0);
5832 sh
->unique_id
= sh
->irq
;
5838 static int hpsa_scsi_add_host(struct ctlr_info
*h
)
5842 rv
= scsi_add_host(h
->scsi_host
, &h
->pdev
->dev
);
5844 dev_err(&h
->pdev
->dev
, "scsi_add_host failed\n");
5847 scsi_scan_host(h
->scsi_host
);
5852 * The block layer has already gone to the trouble of picking out a unique,
5853 * small-integer tag for this request. We use an offset from that value as
5854 * an index to select our command block. (The offset allows us to reserve the
5855 * low-numbered entries for our own uses.)
5857 static int hpsa_get_cmd_index(struct scsi_cmnd
*scmd
)
5859 int idx
= scmd
->request
->tag
;
5864 /* Offset to leave space for internal cmds. */
5865 return idx
+= HPSA_NRESERVED_CMDS
;
5869 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5870 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5872 static int hpsa_send_test_unit_ready(struct ctlr_info
*h
,
5873 struct CommandList
*c
, unsigned char lunaddr
[],
5878 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5879 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
5880 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
5881 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
5884 /* no unmap needed here because no data xfer. */
5886 /* Check if the unit is already ready. */
5887 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
5891 * The first command sent after reset will receive "unit attention" to
5892 * indicate that the LUN has been reset...this is actually what we're
5893 * looking for (but, success is good too).
5895 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
5896 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
5897 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
5898 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
5905 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5906 * returns zero when the unit is ready, and non-zero when giving up.
5908 static int hpsa_wait_for_test_unit_ready(struct ctlr_info
*h
,
5909 struct CommandList
*c
,
5910 unsigned char lunaddr
[], int reply_queue
)
5914 int waittime
= 1; /* seconds */
5916 /* Send test unit ready until device ready, or give up. */
5917 for (count
= 0; count
< HPSA_TUR_RETRY_LIMIT
; count
++) {
5920 * Wait for a bit. do this first, because if we send
5921 * the TUR right away, the reset will just abort it.
5923 msleep(1000 * waittime
);
5925 rc
= hpsa_send_test_unit_ready(h
, c
, lunaddr
, reply_queue
);
5929 /* Increase wait time with each try, up to a point. */
5930 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
5933 dev_warn(&h
->pdev
->dev
,
5934 "waiting %d secs for device to become ready.\n",
5941 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
5942 unsigned char lunaddr
[],
5949 struct CommandList
*c
;
5954 * If no specific reply queue was requested, then send the TUR
5955 * repeatedly, requesting a reply on each reply queue; otherwise execute
5956 * the loop exactly once using only the specified queue.
5958 if (reply_queue
== DEFAULT_REPLY_QUEUE
) {
5960 last_queue
= h
->nreply_queues
- 1;
5962 first_queue
= reply_queue
;
5963 last_queue
= reply_queue
;
5966 for (rq
= first_queue
; rq
<= last_queue
; rq
++) {
5967 rc
= hpsa_wait_for_test_unit_ready(h
, c
, lunaddr
, rq
);
5973 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
5975 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
5981 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5982 * complaining. Doing a host- or bus-reset can't do anything good here.
5984 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
5988 struct ctlr_info
*h
;
5989 struct hpsa_scsi_dev_t
*dev
= NULL
;
5992 unsigned long flags
;
5994 /* find the controller to which the command to be aborted was sent */
5995 h
= sdev_to_hba(scsicmd
->device
);
5996 if (h
== NULL
) /* paranoia */
5999 spin_lock_irqsave(&h
->reset_lock
, flags
);
6000 h
->reset_in_progress
= 1;
6001 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
6003 if (lockup_detected(h
)) {
6005 goto return_reset_status
;
6008 dev
= scsicmd
->device
->hostdata
;
6010 dev_err(&h
->pdev
->dev
, "%s: device lookup failed\n", __func__
);
6012 goto return_reset_status
;
6015 if (dev
->devtype
== TYPE_ENCLOSURE
) {
6017 goto return_reset_status
;
6020 /* if controller locked up, we can guarantee command won't complete */
6021 if (lockup_detected(h
)) {
6022 snprintf(msg
, sizeof(msg
),
6023 "cmd %d RESET FAILED, lockup detected",
6024 hpsa_get_cmd_index(scsicmd
));
6025 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
6027 goto return_reset_status
;
6030 /* this reset request might be the result of a lockup; check */
6031 if (detect_controller_lockup(h
)) {
6032 snprintf(msg
, sizeof(msg
),
6033 "cmd %d RESET FAILED, new lockup detected",
6034 hpsa_get_cmd_index(scsicmd
));
6035 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
6037 goto return_reset_status
;
6040 /* Do not attempt on controller */
6041 if (is_hba_lunid(dev
->scsi3addr
)) {
6043 goto return_reset_status
;
6046 if (is_logical_dev_addr_mode(dev
->scsi3addr
))
6047 reset_type
= HPSA_DEVICE_RESET_MSG
;
6049 reset_type
= HPSA_PHYS_TARGET_RESET
;
6051 sprintf(msg
, "resetting %s",
6052 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ");
6053 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
6056 * wait to see if any commands will complete before sending reset
6058 dev
->in_reset
= true; /* block any new cmds from OS for this device */
6059 for (i
= 0; i
< 10; i
++) {
6060 if (atomic_read(&dev
->commands_outstanding
) > 0)
6066 /* send a reset to the SCSI LUN which the command was sent to */
6067 rc
= hpsa_do_reset(h
, dev
, reset_type
, DEFAULT_REPLY_QUEUE
);
6073 sprintf(msg
, "reset %s %s",
6074 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ",
6075 rc
== SUCCESS
? "completed successfully" : "failed");
6076 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
6078 return_reset_status
:
6079 spin_lock_irqsave(&h
->reset_lock
, flags
);
6080 h
->reset_in_progress
= 0;
6082 dev
->in_reset
= false;
6083 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
6088 * For operations with an associated SCSI command, a command block is allocated
6089 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6090 * block request tag as an index into a table of entries. cmd_tagged_free() is
6091 * the complement, although cmd_free() may be called instead.
6093 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
6094 struct scsi_cmnd
*scmd
)
6096 int idx
= hpsa_get_cmd_index(scmd
);
6097 struct CommandList
*c
= h
->cmd_pool
+ idx
;
6099 if (idx
< HPSA_NRESERVED_CMDS
|| idx
>= h
->nr_cmds
) {
6100 dev_err(&h
->pdev
->dev
, "Bad block tag: %d not in [%d..%d]\n",
6101 idx
, HPSA_NRESERVED_CMDS
, h
->nr_cmds
- 1);
6102 /* The index value comes from the block layer, so if it's out of
6103 * bounds, it's probably not our bug.
6108 if (unlikely(!hpsa_is_cmd_idle(c
))) {
6110 * We expect that the SCSI layer will hand us a unique tag
6111 * value. Thus, there should never be a collision here between
6112 * two requests...because if the selected command isn't idle
6113 * then someone is going to be very disappointed.
6115 if (idx
!= h
->last_collision_tag
) { /* Print once per tag */
6116 dev_warn(&h
->pdev
->dev
,
6117 "%s: tag collision (tag=%d)\n", __func__
, idx
);
6119 scsi_print_command(scmd
);
6120 h
->last_collision_tag
= idx
;
6125 atomic_inc(&c
->refcount
);
6127 hpsa_cmd_partial_init(h
, idx
, c
);
6131 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
)
6134 * Release our reference to the block. We don't need to do anything
6135 * else to free it, because it is accessed by index.
6137 (void)atomic_dec(&c
->refcount
);
6141 * For operations that cannot sleep, a command block is allocated at init,
6142 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6143 * which ones are free or in use. Lock must be held when calling this.
6144 * cmd_free() is the complement.
6145 * This function never gives up and returns NULL. If it hangs,
6146 * another thread must call cmd_free() to free some tags.
6149 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
6151 struct CommandList
*c
;
6156 * There is some *extremely* small but non-zero chance that that
6157 * multiple threads could get in here, and one thread could
6158 * be scanning through the list of bits looking for a free
6159 * one, but the free ones are always behind him, and other
6160 * threads sneak in behind him and eat them before he can
6161 * get to them, so that while there is always a free one, a
6162 * very unlucky thread might be starved anyway, never able to
6163 * beat the other threads. In reality, this happens so
6164 * infrequently as to be indistinguishable from never.
6166 * Note that we start allocating commands before the SCSI host structure
6167 * is initialized. Since the search starts at bit zero, this
6168 * all works, since we have at least one command structure available;
6169 * however, it means that the structures with the low indexes have to be
6170 * reserved for driver-initiated requests, while requests from the block
6171 * layer will use the higher indexes.
6175 i
= find_next_zero_bit(h
->cmd_pool_bits
,
6176 HPSA_NRESERVED_CMDS
,
6178 if (unlikely(i
>= HPSA_NRESERVED_CMDS
)) {
6182 c
= h
->cmd_pool
+ i
;
6183 refcount
= atomic_inc_return(&c
->refcount
);
6184 if (unlikely(refcount
> 1)) {
6185 cmd_free(h
, c
); /* already in use */
6186 offset
= (i
+ 1) % HPSA_NRESERVED_CMDS
;
6189 set_bit(i
& (BITS_PER_LONG
- 1),
6190 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
6191 break; /* it's ours now. */
6193 hpsa_cmd_partial_init(h
, i
, c
);
6199 * This is the complementary operation to cmd_alloc(). Note, however, in some
6200 * corner cases it may also be used to free blocks allocated by
6201 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6202 * the clear-bit is harmless.
6204 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
6206 if (atomic_dec_and_test(&c
->refcount
)) {
6209 i
= c
- h
->cmd_pool
;
6210 clear_bit(i
& (BITS_PER_LONG
- 1),
6211 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
6215 #ifdef CONFIG_COMPAT
6217 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, unsigned int cmd
,
6220 IOCTL32_Command_struct __user
*arg32
=
6221 (IOCTL32_Command_struct __user
*) arg
;
6222 IOCTL_Command_struct arg64
;
6223 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
6227 memset(&arg64
, 0, sizeof(arg64
));
6229 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
6230 sizeof(arg64
.LUN_info
));
6231 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
6232 sizeof(arg64
.Request
));
6233 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
6234 sizeof(arg64
.error_info
));
6235 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
6236 err
|= get_user(cp
, &arg32
->buf
);
6237 arg64
.buf
= compat_ptr(cp
);
6238 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
6243 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, p
);
6246 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
6247 sizeof(arg32
->error_info
));
6253 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
6254 unsigned int cmd
, void __user
*arg
)
6256 BIG_IOCTL32_Command_struct __user
*arg32
=
6257 (BIG_IOCTL32_Command_struct __user
*) arg
;
6258 BIG_IOCTL_Command_struct arg64
;
6259 BIG_IOCTL_Command_struct __user
*p
=
6260 compat_alloc_user_space(sizeof(arg64
));
6264 memset(&arg64
, 0, sizeof(arg64
));
6266 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
6267 sizeof(arg64
.LUN_info
));
6268 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
6269 sizeof(arg64
.Request
));
6270 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
6271 sizeof(arg64
.error_info
));
6272 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
6273 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
6274 err
|= get_user(cp
, &arg32
->buf
);
6275 arg64
.buf
= compat_ptr(cp
);
6276 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
6281 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, p
);
6284 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
6285 sizeof(arg32
->error_info
));
6291 static int hpsa_compat_ioctl(struct scsi_device
*dev
, unsigned int cmd
,
6295 case CCISS_GETPCIINFO
:
6296 case CCISS_GETINTINFO
:
6297 case CCISS_SETINTINFO
:
6298 case CCISS_GETNODENAME
:
6299 case CCISS_SETNODENAME
:
6300 case CCISS_GETHEARTBEAT
:
6301 case CCISS_GETBUSTYPES
:
6302 case CCISS_GETFIRMVER
:
6303 case CCISS_GETDRIVVER
:
6304 case CCISS_REVALIDVOLS
:
6305 case CCISS_DEREGDISK
:
6306 case CCISS_REGNEWDISK
:
6308 case CCISS_RESCANDISK
:
6309 case CCISS_GETLUNINFO
:
6310 return hpsa_ioctl(dev
, cmd
, arg
);
6312 case CCISS_PASSTHRU32
:
6313 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
6314 case CCISS_BIG_PASSTHRU32
:
6315 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
6318 return -ENOIOCTLCMD
;
6323 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6325 struct hpsa_pci_info pciinfo
;
6329 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
6330 pciinfo
.bus
= h
->pdev
->bus
->number
;
6331 pciinfo
.dev_fn
= h
->pdev
->devfn
;
6332 pciinfo
.board_id
= h
->board_id
;
6333 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
6338 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6340 DriverVer_type DriverVer
;
6341 unsigned char vmaj
, vmin
, vsubmin
;
6344 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
6345 &vmaj
, &vmin
, &vsubmin
);
6347 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
6348 "unrecognized.", HPSA_DRIVER_VERSION
);
6353 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
6356 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
6361 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6363 IOCTL_Command_struct iocommand
;
6364 struct CommandList
*c
;
6371 if (!capable(CAP_SYS_RAWIO
))
6373 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
6375 if ((iocommand
.buf_size
< 1) &&
6376 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
6379 if (iocommand
.buf_size
> 0) {
6380 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
6383 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
6384 /* Copy the data into the buffer we created */
6385 if (copy_from_user(buff
, iocommand
.buf
,
6386 iocommand
.buf_size
)) {
6391 memset(buff
, 0, iocommand
.buf_size
);
6396 /* Fill in the command type */
6397 c
->cmd_type
= CMD_IOCTL_PEND
;
6398 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6399 /* Fill in Command Header */
6400 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
6401 if (iocommand
.buf_size
> 0) { /* buffer to fill */
6402 c
->Header
.SGList
= 1;
6403 c
->Header
.SGTotal
= cpu_to_le16(1);
6404 } else { /* no buffers to fill */
6405 c
->Header
.SGList
= 0;
6406 c
->Header
.SGTotal
= cpu_to_le16(0);
6408 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
6410 /* Fill in Request block */
6411 memcpy(&c
->Request
, &iocommand
.Request
,
6412 sizeof(c
->Request
));
6414 /* Fill in the scatter gather information */
6415 if (iocommand
.buf_size
> 0) {
6416 temp64
= dma_map_single(&h
->pdev
->dev
, buff
,
6417 iocommand
.buf_size
, DMA_BIDIRECTIONAL
);
6418 if (dma_mapping_error(&h
->pdev
->dev
, (dma_addr_t
) temp64
)) {
6419 c
->SG
[0].Addr
= cpu_to_le64(0);
6420 c
->SG
[0].Len
= cpu_to_le32(0);
6424 c
->SG
[0].Addr
= cpu_to_le64(temp64
);
6425 c
->SG
[0].Len
= cpu_to_le32(iocommand
.buf_size
);
6426 c
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* not chaining */
6428 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
6430 if (iocommand
.buf_size
> 0)
6431 hpsa_pci_unmap(h
->pdev
, c
, 1, DMA_BIDIRECTIONAL
);
6432 check_ioctl_unit_attention(h
, c
);
6438 /* Copy the error information out */
6439 memcpy(&iocommand
.error_info
, c
->err_info
,
6440 sizeof(iocommand
.error_info
));
6441 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
6445 if ((iocommand
.Request
.Type
.Direction
& XFER_READ
) &&
6446 iocommand
.buf_size
> 0) {
6447 /* Copy the data out of the buffer we created */
6448 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
6460 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6462 BIG_IOCTL_Command_struct
*ioc
;
6463 struct CommandList
*c
;
6464 unsigned char **buff
= NULL
;
6465 int *buff_size
= NULL
;
6471 BYTE __user
*data_ptr
;
6475 if (!capable(CAP_SYS_RAWIO
))
6477 ioc
= vmemdup_user(argp
, sizeof(*ioc
));
6479 status
= PTR_ERR(ioc
);
6482 if ((ioc
->buf_size
< 1) &&
6483 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
6487 /* Check kmalloc limits using all SGs */
6488 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
6492 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
6496 buff
= kcalloc(SG_ENTRIES_IN_CMD
, sizeof(char *), GFP_KERNEL
);
6501 buff_size
= kmalloc_array(SG_ENTRIES_IN_CMD
, sizeof(int), GFP_KERNEL
);
6506 left
= ioc
->buf_size
;
6507 data_ptr
= ioc
->buf
;
6509 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
6510 buff_size
[sg_used
] = sz
;
6511 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
6512 if (buff
[sg_used
] == NULL
) {
6516 if (ioc
->Request
.Type
.Direction
& XFER_WRITE
) {
6517 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
6522 memset(buff
[sg_used
], 0, sz
);
6529 c
->cmd_type
= CMD_IOCTL_PEND
;
6530 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6531 c
->Header
.ReplyQueue
= 0;
6532 c
->Header
.SGList
= (u8
) sg_used
;
6533 c
->Header
.SGTotal
= cpu_to_le16(sg_used
);
6534 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
6535 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
6536 if (ioc
->buf_size
> 0) {
6538 for (i
= 0; i
< sg_used
; i
++) {
6539 temp64
= dma_map_single(&h
->pdev
->dev
, buff
[i
],
6540 buff_size
[i
], DMA_BIDIRECTIONAL
);
6541 if (dma_mapping_error(&h
->pdev
->dev
,
6542 (dma_addr_t
) temp64
)) {
6543 c
->SG
[i
].Addr
= cpu_to_le64(0);
6544 c
->SG
[i
].Len
= cpu_to_le32(0);
6545 hpsa_pci_unmap(h
->pdev
, c
, i
,
6550 c
->SG
[i
].Addr
= cpu_to_le64(temp64
);
6551 c
->SG
[i
].Len
= cpu_to_le32(buff_size
[i
]);
6552 c
->SG
[i
].Ext
= cpu_to_le32(0);
6554 c
->SG
[--i
].Ext
= cpu_to_le32(HPSA_SG_LAST
);
6556 status
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
6559 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, DMA_BIDIRECTIONAL
);
6560 check_ioctl_unit_attention(h
, c
);
6566 /* Copy the error information out */
6567 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
6568 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
6572 if ((ioc
->Request
.Type
.Direction
& XFER_READ
) && ioc
->buf_size
> 0) {
6575 /* Copy the data out of the buffer we created */
6576 BYTE __user
*ptr
= ioc
->buf
;
6577 for (i
= 0; i
< sg_used
; i
++) {
6578 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
6582 ptr
+= buff_size
[i
];
6592 for (i
= 0; i
< sg_used
; i
++)
6601 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
6602 struct CommandList
*c
)
6604 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
6605 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
6606 (void) check_for_unit_attention(h
, c
);
6612 static int hpsa_ioctl(struct scsi_device
*dev
, unsigned int cmd
,
6615 struct ctlr_info
*h
;
6616 void __user
*argp
= (void __user
*)arg
;
6619 h
= sdev_to_hba(dev
);
6622 case CCISS_DEREGDISK
:
6623 case CCISS_REGNEWDISK
:
6625 hpsa_scan_start(h
->scsi_host
);
6627 case CCISS_GETPCIINFO
:
6628 return hpsa_getpciinfo_ioctl(h
, argp
);
6629 case CCISS_GETDRIVVER
:
6630 return hpsa_getdrivver_ioctl(h
, argp
);
6631 case CCISS_PASSTHRU
:
6632 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6634 rc
= hpsa_passthru_ioctl(h
, argp
);
6635 atomic_inc(&h
->passthru_cmds_avail
);
6637 case CCISS_BIG_PASSTHRU
:
6638 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6640 rc
= hpsa_big_passthru_ioctl(h
, argp
);
6641 atomic_inc(&h
->passthru_cmds_avail
);
6648 static void hpsa_send_host_reset(struct ctlr_info
*h
, u8 reset_type
)
6650 struct CommandList
*c
;
6654 /* fill_cmd can't fail here, no data buffer to map */
6655 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
6656 RAID_CTLR_LUNID
, TYPE_MSG
);
6657 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
6659 enqueue_cmd_and_start_io(h
, c
);
6660 /* Don't wait for completion, the reset won't complete. Don't free
6661 * the command either. This is the last command we will send before
6662 * re-initializing everything, so it doesn't matter and won't leak.
6667 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
6668 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
6671 enum dma_data_direction dir
= DMA_NONE
;
6673 c
->cmd_type
= CMD_IOCTL_PEND
;
6674 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6675 c
->Header
.ReplyQueue
= 0;
6676 if (buff
!= NULL
&& size
> 0) {
6677 c
->Header
.SGList
= 1;
6678 c
->Header
.SGTotal
= cpu_to_le16(1);
6680 c
->Header
.SGList
= 0;
6681 c
->Header
.SGTotal
= cpu_to_le16(0);
6683 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
6685 if (cmd_type
== TYPE_CMD
) {
6688 /* are we trying to read a vital product page */
6689 if (page_code
& VPD_PAGE
) {
6690 c
->Request
.CDB
[1] = 0x01;
6691 c
->Request
.CDB
[2] = (page_code
& 0xff);
6693 c
->Request
.CDBLen
= 6;
6694 c
->Request
.type_attr_dir
=
6695 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6696 c
->Request
.Timeout
= 0;
6697 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
6698 c
->Request
.CDB
[4] = size
& 0xFF;
6700 case RECEIVE_DIAGNOSTIC
:
6701 c
->Request
.CDBLen
= 6;
6702 c
->Request
.type_attr_dir
=
6703 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6704 c
->Request
.Timeout
= 0;
6705 c
->Request
.CDB
[0] = cmd
;
6706 c
->Request
.CDB
[1] = 1;
6707 c
->Request
.CDB
[2] = 1;
6708 c
->Request
.CDB
[3] = (size
>> 8) & 0xFF;
6709 c
->Request
.CDB
[4] = size
& 0xFF;
6711 case HPSA_REPORT_LOG
:
6712 case HPSA_REPORT_PHYS
:
6713 /* Talking to controller so It's a physical command
6714 mode = 00 target = 0. Nothing to write.
6716 c
->Request
.CDBLen
= 12;
6717 c
->Request
.type_attr_dir
=
6718 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6719 c
->Request
.Timeout
= 0;
6720 c
->Request
.CDB
[0] = cmd
;
6721 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6722 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6723 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6724 c
->Request
.CDB
[9] = size
& 0xFF;
6726 case BMIC_SENSE_DIAG_OPTIONS
:
6727 c
->Request
.CDBLen
= 16;
6728 c
->Request
.type_attr_dir
=
6729 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6730 c
->Request
.Timeout
= 0;
6731 /* Spec says this should be BMIC_WRITE */
6732 c
->Request
.CDB
[0] = BMIC_READ
;
6733 c
->Request
.CDB
[6] = BMIC_SENSE_DIAG_OPTIONS
;
6735 case BMIC_SET_DIAG_OPTIONS
:
6736 c
->Request
.CDBLen
= 16;
6737 c
->Request
.type_attr_dir
=
6738 TYPE_ATTR_DIR(cmd_type
,
6739 ATTR_SIMPLE
, XFER_WRITE
);
6740 c
->Request
.Timeout
= 0;
6741 c
->Request
.CDB
[0] = BMIC_WRITE
;
6742 c
->Request
.CDB
[6] = BMIC_SET_DIAG_OPTIONS
;
6744 case HPSA_CACHE_FLUSH
:
6745 c
->Request
.CDBLen
= 12;
6746 c
->Request
.type_attr_dir
=
6747 TYPE_ATTR_DIR(cmd_type
,
6748 ATTR_SIMPLE
, XFER_WRITE
);
6749 c
->Request
.Timeout
= 0;
6750 c
->Request
.CDB
[0] = BMIC_WRITE
;
6751 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
6752 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
6753 c
->Request
.CDB
[8] = size
& 0xFF;
6755 case TEST_UNIT_READY
:
6756 c
->Request
.CDBLen
= 6;
6757 c
->Request
.type_attr_dir
=
6758 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6759 c
->Request
.Timeout
= 0;
6761 case HPSA_GET_RAID_MAP
:
6762 c
->Request
.CDBLen
= 12;
6763 c
->Request
.type_attr_dir
=
6764 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6765 c
->Request
.Timeout
= 0;
6766 c
->Request
.CDB
[0] = HPSA_CISS_READ
;
6767 c
->Request
.CDB
[1] = cmd
;
6768 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6769 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6770 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6771 c
->Request
.CDB
[9] = size
& 0xFF;
6773 case BMIC_SENSE_CONTROLLER_PARAMETERS
:
6774 c
->Request
.CDBLen
= 10;
6775 c
->Request
.type_attr_dir
=
6776 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6777 c
->Request
.Timeout
= 0;
6778 c
->Request
.CDB
[0] = BMIC_READ
;
6779 c
->Request
.CDB
[6] = BMIC_SENSE_CONTROLLER_PARAMETERS
;
6780 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6781 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6783 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
6784 c
->Request
.CDBLen
= 10;
6785 c
->Request
.type_attr_dir
=
6786 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6787 c
->Request
.Timeout
= 0;
6788 c
->Request
.CDB
[0] = BMIC_READ
;
6789 c
->Request
.CDB
[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE
;
6790 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6791 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6793 case BMIC_SENSE_SUBSYSTEM_INFORMATION
:
6794 c
->Request
.CDBLen
= 10;
6795 c
->Request
.type_attr_dir
=
6796 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6797 c
->Request
.Timeout
= 0;
6798 c
->Request
.CDB
[0] = BMIC_READ
;
6799 c
->Request
.CDB
[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION
;
6800 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6801 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6803 case BMIC_SENSE_STORAGE_BOX_PARAMS
:
6804 c
->Request
.CDBLen
= 10;
6805 c
->Request
.type_attr_dir
=
6806 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6807 c
->Request
.Timeout
= 0;
6808 c
->Request
.CDB
[0] = BMIC_READ
;
6809 c
->Request
.CDB
[6] = BMIC_SENSE_STORAGE_BOX_PARAMS
;
6810 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6811 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6813 case BMIC_IDENTIFY_CONTROLLER
:
6814 c
->Request
.CDBLen
= 10;
6815 c
->Request
.type_attr_dir
=
6816 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6817 c
->Request
.Timeout
= 0;
6818 c
->Request
.CDB
[0] = BMIC_READ
;
6819 c
->Request
.CDB
[1] = 0;
6820 c
->Request
.CDB
[2] = 0;
6821 c
->Request
.CDB
[3] = 0;
6822 c
->Request
.CDB
[4] = 0;
6823 c
->Request
.CDB
[5] = 0;
6824 c
->Request
.CDB
[6] = BMIC_IDENTIFY_CONTROLLER
;
6825 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6826 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6827 c
->Request
.CDB
[9] = 0;
6830 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
6833 } else if (cmd_type
== TYPE_MSG
) {
6836 case HPSA_PHYS_TARGET_RESET
:
6837 c
->Request
.CDBLen
= 16;
6838 c
->Request
.type_attr_dir
=
6839 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6840 c
->Request
.Timeout
= 0; /* Don't time out */
6841 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
6842 c
->Request
.CDB
[0] = HPSA_RESET
;
6843 c
->Request
.CDB
[1] = HPSA_TARGET_RESET_TYPE
;
6844 /* Physical target reset needs no control bytes 4-7*/
6845 c
->Request
.CDB
[4] = 0x00;
6846 c
->Request
.CDB
[5] = 0x00;
6847 c
->Request
.CDB
[6] = 0x00;
6848 c
->Request
.CDB
[7] = 0x00;
6850 case HPSA_DEVICE_RESET_MSG
:
6851 c
->Request
.CDBLen
= 16;
6852 c
->Request
.type_attr_dir
=
6853 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6854 c
->Request
.Timeout
= 0; /* Don't time out */
6855 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
6856 c
->Request
.CDB
[0] = cmd
;
6857 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
6858 /* If bytes 4-7 are zero, it means reset the */
6860 c
->Request
.CDB
[4] = 0x00;
6861 c
->Request
.CDB
[5] = 0x00;
6862 c
->Request
.CDB
[6] = 0x00;
6863 c
->Request
.CDB
[7] = 0x00;
6866 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
6871 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
6875 switch (GET_DIR(c
->Request
.type_attr_dir
)) {
6877 dir
= DMA_FROM_DEVICE
;
6880 dir
= DMA_TO_DEVICE
;
6886 dir
= DMA_BIDIRECTIONAL
;
6888 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, dir
))
6894 * Map (physical) PCI mem into (virtual) kernel space
6896 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
6898 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
6899 ulong page_offs
= ((ulong
) base
) - page_base
;
6900 void __iomem
*page_remapped
= ioremap(page_base
,
6903 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
6906 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
6908 return h
->access
.command_completed(h
, q
);
6911 static inline bool interrupt_pending(struct ctlr_info
*h
)
6913 return h
->access
.intr_pending(h
);
6916 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
6918 return (h
->access
.intr_pending(h
) == 0) ||
6919 (h
->interrupts_enabled
== 0);
6922 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
6925 if (unlikely(tag_index
>= h
->nr_cmds
)) {
6926 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
6932 static inline void finish_cmd(struct CommandList
*c
)
6934 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
6935 if (likely(c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_SCSI
6936 || c
->cmd_type
== CMD_IOACCEL2
))
6937 complete_scsi_command(c
);
6938 else if (c
->cmd_type
== CMD_IOCTL_PEND
|| c
->cmd_type
== IOACCEL2_TMF
)
6939 complete(c
->waiting
);
6942 /* process completion of an indexed ("direct lookup") command */
6943 static inline void process_indexed_cmd(struct ctlr_info
*h
,
6947 struct CommandList
*c
;
6949 tag_index
= raw_tag
>> DIRECT_LOOKUP_SHIFT
;
6950 if (!bad_tag(h
, tag_index
, raw_tag
)) {
6951 c
= h
->cmd_pool
+ tag_index
;
6956 /* Some controllers, like p400, will give us one interrupt
6957 * after a soft reset, even if we turned interrupts off.
6958 * Only need to check for this in the hpsa_xxx_discard_completions
6961 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
6963 if (likely(!reset_devices
))
6966 if (likely(h
->interrupts_enabled
))
6969 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
6970 "(known firmware bug.) Ignoring.\n");
6976 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6977 * Relies on (h-q[x] == x) being true for x such that
6978 * 0 <= x < MAX_REPLY_QUEUES.
6980 static struct ctlr_info
*queue_to_hba(u8
*queue
)
6982 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
6985 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
6987 struct ctlr_info
*h
= queue_to_hba(queue
);
6988 u8 q
= *(u8
*) queue
;
6991 if (ignore_bogus_interrupt(h
))
6994 if (interrupt_not_for_us(h
))
6996 h
->last_intr_timestamp
= get_jiffies_64();
6997 while (interrupt_pending(h
)) {
6998 raw_tag
= get_next_completion(h
, q
);
6999 while (raw_tag
!= FIFO_EMPTY
)
7000 raw_tag
= next_command(h
, q
);
7005 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
7007 struct ctlr_info
*h
= queue_to_hba(queue
);
7009 u8 q
= *(u8
*) queue
;
7011 if (ignore_bogus_interrupt(h
))
7014 h
->last_intr_timestamp
= get_jiffies_64();
7015 raw_tag
= get_next_completion(h
, q
);
7016 while (raw_tag
!= FIFO_EMPTY
)
7017 raw_tag
= next_command(h
, q
);
7021 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
7023 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
7025 u8 q
= *(u8
*) queue
;
7027 if (interrupt_not_for_us(h
))
7029 h
->last_intr_timestamp
= get_jiffies_64();
7030 while (interrupt_pending(h
)) {
7031 raw_tag
= get_next_completion(h
, q
);
7032 while (raw_tag
!= FIFO_EMPTY
) {
7033 process_indexed_cmd(h
, raw_tag
);
7034 raw_tag
= next_command(h
, q
);
7040 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
7042 struct ctlr_info
*h
= queue_to_hba(queue
);
7044 u8 q
= *(u8
*) queue
;
7046 h
->last_intr_timestamp
= get_jiffies_64();
7047 raw_tag
= get_next_completion(h
, q
);
7048 while (raw_tag
!= FIFO_EMPTY
) {
7049 process_indexed_cmd(h
, raw_tag
);
7050 raw_tag
= next_command(h
, q
);
7055 /* Send a message CDB to the firmware. Careful, this only works
7056 * in simple mode, not performant mode due to the tag lookup.
7057 * We only ever use this immediately after a controller reset.
7059 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
7063 struct CommandListHeader CommandHeader
;
7064 struct RequestBlock Request
;
7065 struct ErrDescriptor ErrorDescriptor
;
7067 struct Command
*cmd
;
7068 static const size_t cmd_sz
= sizeof(*cmd
) +
7069 sizeof(cmd
->ErrorDescriptor
);
7073 void __iomem
*vaddr
;
7076 vaddr
= pci_ioremap_bar(pdev
, 0);
7080 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7081 * CCISS commands, so they must be allocated from the lower 4GiB of
7084 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
7090 cmd
= dma_alloc_coherent(&pdev
->dev
, cmd_sz
, &paddr64
, GFP_KERNEL
);
7096 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7097 * although there's no guarantee, we assume that the address is at
7098 * least 4-byte aligned (most likely, it's page-aligned).
7100 paddr32
= cpu_to_le32(paddr64
);
7102 cmd
->CommandHeader
.ReplyQueue
= 0;
7103 cmd
->CommandHeader
.SGList
= 0;
7104 cmd
->CommandHeader
.SGTotal
= cpu_to_le16(0);
7105 cmd
->CommandHeader
.tag
= cpu_to_le64(paddr64
);
7106 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
7108 cmd
->Request
.CDBLen
= 16;
7109 cmd
->Request
.type_attr_dir
=
7110 TYPE_ATTR_DIR(TYPE_MSG
, ATTR_HEADOFQUEUE
, XFER_NONE
);
7111 cmd
->Request
.Timeout
= 0; /* Don't time out */
7112 cmd
->Request
.CDB
[0] = opcode
;
7113 cmd
->Request
.CDB
[1] = type
;
7114 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
7115 cmd
->ErrorDescriptor
.Addr
=
7116 cpu_to_le64((le32_to_cpu(paddr32
) + sizeof(*cmd
)));
7117 cmd
->ErrorDescriptor
.Len
= cpu_to_le32(sizeof(struct ErrorInfo
));
7119 writel(le32_to_cpu(paddr32
), vaddr
+ SA5_REQUEST_PORT_OFFSET
);
7121 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
7122 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
7123 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr64
)
7125 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
7130 /* we leak the DMA buffer here ... no choice since the controller could
7131 * still complete the command.
7133 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
7134 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
7139 dma_free_coherent(&pdev
->dev
, cmd_sz
, cmd
, paddr64
);
7141 if (tag
& HPSA_ERROR_BIT
) {
7142 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
7147 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
7152 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7154 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
7155 void __iomem
*vaddr
, u32 use_doorbell
)
7159 /* For everything after the P600, the PCI power state method
7160 * of resetting the controller doesn't work, so we have this
7161 * other way using the doorbell register.
7163 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
7164 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
7166 /* PMC hardware guys tell us we need a 10 second delay after
7167 * doorbell reset and before any attempt to talk to the board
7168 * at all to ensure that this actually works and doesn't fall
7169 * over in some weird corner cases.
7172 } else { /* Try to do it the PCI power state way */
7174 /* Quoting from the Open CISS Specification: "The Power
7175 * Management Control/Status Register (CSR) controls the power
7176 * state of the device. The normal operating state is D0,
7177 * CSR=00h. The software off state is D3, CSR=03h. To reset
7178 * the controller, place the interface device in D3 then to D0,
7179 * this causes a secondary PCI reset which will reset the
7184 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
7186 /* enter the D3hot power management state */
7187 rc
= pci_set_power_state(pdev
, PCI_D3hot
);
7193 /* enter the D0 power management state */
7194 rc
= pci_set_power_state(pdev
, PCI_D0
);
7199 * The P600 requires a small delay when changing states.
7200 * Otherwise we may think the board did not reset and we bail.
7201 * This for kdump only and is particular to the P600.
7208 static void init_driver_version(char *driver_version
, int len
)
7210 memset(driver_version
, 0, len
);
7211 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
7214 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
7216 char *driver_version
;
7217 int i
, size
= sizeof(cfgtable
->driver_version
);
7219 driver_version
= kmalloc(size
, GFP_KERNEL
);
7220 if (!driver_version
)
7223 init_driver_version(driver_version
, size
);
7224 for (i
= 0; i
< size
; i
++)
7225 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
7226 kfree(driver_version
);
7230 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
7231 unsigned char *driver_ver
)
7235 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
7236 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
7239 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
7242 char *driver_ver
, *old_driver_ver
;
7243 int rc
, size
= sizeof(cfgtable
->driver_version
);
7245 old_driver_ver
= kmalloc_array(2, size
, GFP_KERNEL
);
7246 if (!old_driver_ver
)
7248 driver_ver
= old_driver_ver
+ size
;
7250 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7251 * should have been changed, otherwise we know the reset failed.
7253 init_driver_version(old_driver_ver
, size
);
7254 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
7255 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
7256 kfree(old_driver_ver
);
7259 /* This does a hard reset of the controller using PCI power management
7260 * states or the using the doorbell register.
7262 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
, u32 board_id
)
7266 u64 cfg_base_addr_index
;
7267 void __iomem
*vaddr
;
7268 unsigned long paddr
;
7269 u32 misc_fw_support
;
7271 struct CfgTable __iomem
*cfgtable
;
7273 u16 command_register
;
7275 /* For controllers as old as the P600, this is very nearly
7278 * pci_save_state(pci_dev);
7279 * pci_set_power_state(pci_dev, PCI_D3hot);
7280 * pci_set_power_state(pci_dev, PCI_D0);
7281 * pci_restore_state(pci_dev);
7283 * For controllers newer than the P600, the pci power state
7284 * method of resetting doesn't work so we have another way
7285 * using the doorbell register.
7288 if (!ctlr_is_resettable(board_id
)) {
7289 dev_warn(&pdev
->dev
, "Controller not resettable\n");
7293 /* if controller is soft- but not hard resettable... */
7294 if (!ctlr_is_hard_resettable(board_id
))
7295 return -ENOTSUPP
; /* try soft reset later. */
7297 /* Save the PCI command register */
7298 pci_read_config_word(pdev
, 4, &command_register
);
7299 pci_save_state(pdev
);
7301 /* find the first memory BAR, so we can find the cfg table */
7302 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
7305 vaddr
= remap_pci_mem(paddr
, 0x250);
7309 /* find cfgtable in order to check if reset via doorbell is supported */
7310 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
7311 &cfg_base_addr_index
, &cfg_offset
);
7314 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
7315 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
7320 rc
= write_driver_ver_to_cfgtable(cfgtable
);
7322 goto unmap_cfgtable
;
7324 /* If reset via doorbell register is supported, use that.
7325 * There are two such methods. Favor the newest method.
7327 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
7328 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
7330 use_doorbell
= DOORBELL_CTLR_RESET2
;
7332 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
7334 dev_warn(&pdev
->dev
,
7335 "Soft reset not supported. Firmware update is required.\n");
7336 rc
= -ENOTSUPP
; /* try soft reset */
7337 goto unmap_cfgtable
;
7341 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
7343 goto unmap_cfgtable
;
7345 pci_restore_state(pdev
);
7346 pci_write_config_word(pdev
, 4, command_register
);
7348 /* Some devices (notably the HP Smart Array 5i Controller)
7349 need a little pause here */
7350 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
7352 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
7354 dev_warn(&pdev
->dev
,
7355 "Failed waiting for board to become ready after hard reset\n");
7356 goto unmap_cfgtable
;
7359 rc
= controller_reset_failed(vaddr
);
7361 goto unmap_cfgtable
;
7363 dev_warn(&pdev
->dev
, "Unable to successfully reset "
7364 "controller. Will try soft reset.\n");
7367 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
7379 * We cannot read the structure directly, for portability we must use
7381 * This is for debug only.
7383 static void print_cfg_table(struct device
*dev
, struct CfgTable __iomem
*tb
)
7389 dev_info(dev
, "Controller Configuration information\n");
7390 dev_info(dev
, "------------------------------------\n");
7391 for (i
= 0; i
< 4; i
++)
7392 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
7393 temp_name
[4] = '\0';
7394 dev_info(dev
, " Signature = %s\n", temp_name
);
7395 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
7396 dev_info(dev
, " Transport methods supported = 0x%x\n",
7397 readl(&(tb
->TransportSupport
)));
7398 dev_info(dev
, " Transport methods active = 0x%x\n",
7399 readl(&(tb
->TransportActive
)));
7400 dev_info(dev
, " Requested transport Method = 0x%x\n",
7401 readl(&(tb
->HostWrite
.TransportRequest
)));
7402 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
7403 readl(&(tb
->HostWrite
.CoalIntDelay
)));
7404 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
7405 readl(&(tb
->HostWrite
.CoalIntCount
)));
7406 dev_info(dev
, " Max outstanding commands = %d\n",
7407 readl(&(tb
->CmdsOutMax
)));
7408 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
7409 for (i
= 0; i
< 16; i
++)
7410 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
7411 temp_name
[16] = '\0';
7412 dev_info(dev
, " Server Name = %s\n", temp_name
);
7413 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
7414 readl(&(tb
->HeartBeat
)));
7415 #endif /* HPSA_DEBUG */
7418 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
7420 int i
, offset
, mem_type
, bar_type
;
7422 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
7425 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
7426 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
7427 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
7430 mem_type
= pci_resource_flags(pdev
, i
) &
7431 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
7433 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
7434 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
7435 offset
+= 4; /* 32 bit */
7437 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
7440 default: /* reserved in PCI 2.2 */
7441 dev_warn(&pdev
->dev
,
7442 "base address is invalid\n");
7447 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
7453 static void hpsa_disable_interrupt_mode(struct ctlr_info
*h
)
7455 pci_free_irq_vectors(h
->pdev
);
7456 h
->msix_vectors
= 0;
7459 static void hpsa_setup_reply_map(struct ctlr_info
*h
)
7461 const struct cpumask
*mask
;
7462 unsigned int queue
, cpu
;
7464 for (queue
= 0; queue
< h
->msix_vectors
; queue
++) {
7465 mask
= pci_irq_get_affinity(h
->pdev
, queue
);
7469 for_each_cpu(cpu
, mask
)
7470 h
->reply_map
[cpu
] = queue
;
7475 for_each_possible_cpu(cpu
)
7476 h
->reply_map
[cpu
] = 0;
7479 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7480 * controllers that are capable. If not, we use legacy INTx mode.
7482 static int hpsa_interrupt_mode(struct ctlr_info
*h
)
7484 unsigned int flags
= PCI_IRQ_LEGACY
;
7487 /* Some boards advertise MSI but don't really support it */
7488 switch (h
->board_id
) {
7495 ret
= pci_alloc_irq_vectors(h
->pdev
, 1, MAX_REPLY_QUEUES
,
7496 PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
7498 h
->msix_vectors
= ret
;
7502 flags
|= PCI_IRQ_MSI
;
7506 ret
= pci_alloc_irq_vectors(h
->pdev
, 1, 1, flags
);
7512 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
,
7516 u32 subsystem_vendor_id
, subsystem_device_id
;
7518 subsystem_vendor_id
= pdev
->subsystem_vendor
;
7519 subsystem_device_id
= pdev
->subsystem_device
;
7520 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
7521 subsystem_vendor_id
;
7524 *legacy_board
= false;
7525 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
7526 if (*board_id
== products
[i
].board_id
) {
7527 if (products
[i
].access
!= &SA5A_access
&&
7528 products
[i
].access
!= &SA5B_access
)
7530 dev_warn(&pdev
->dev
,
7531 "legacy board ID: 0x%08x\n",
7534 *legacy_board
= true;
7538 dev_warn(&pdev
->dev
, "unrecognized board ID: 0x%08x\n", *board_id
);
7540 *legacy_board
= true;
7541 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
7544 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
7545 unsigned long *memory_bar
)
7549 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
7550 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
7551 /* addressing mode bits already removed */
7552 *memory_bar
= pci_resource_start(pdev
, i
);
7553 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
7557 dev_warn(&pdev
->dev
, "no memory BAR found\n");
7561 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7567 iterations
= HPSA_BOARD_READY_ITERATIONS
;
7569 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
7571 for (i
= 0; i
< iterations
; i
++) {
7572 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
7573 if (wait_for_ready
) {
7574 if (scratchpad
== HPSA_FIRMWARE_READY
)
7577 if (scratchpad
!= HPSA_FIRMWARE_READY
)
7580 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
7582 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
7586 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7587 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
7590 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
7591 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
7592 *cfg_base_addr
&= (u32
) 0x0000ffff;
7593 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
7594 if (*cfg_base_addr_index
== -1) {
7595 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
7601 static void hpsa_free_cfgtables(struct ctlr_info
*h
)
7603 if (h
->transtable
) {
7604 iounmap(h
->transtable
);
7605 h
->transtable
= NULL
;
7608 iounmap(h
->cfgtable
);
7613 /* Find and map CISS config table and transfer table
7614 + * several items must be unmapped (freed) later
7616 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
7620 u64 cfg_base_addr_index
;
7624 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
7625 &cfg_base_addr_index
, &cfg_offset
);
7628 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7629 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
7631 dev_err(&h
->pdev
->dev
, "Failed mapping cfgtable\n");
7634 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
7637 /* Find performant mode table. */
7638 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
7639 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7640 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
7641 sizeof(*h
->transtable
));
7642 if (!h
->transtable
) {
7643 dev_err(&h
->pdev
->dev
, "Failed mapping transfer table\n");
7644 hpsa_free_cfgtables(h
);
7650 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
7652 #define MIN_MAX_COMMANDS 16
7653 BUILD_BUG_ON(MIN_MAX_COMMANDS
<= HPSA_NRESERVED_CMDS
);
7655 h
->max_commands
= readl(&h
->cfgtable
->MaxPerformantModeCommands
);
7657 /* Limit commands in memory limited kdump scenario. */
7658 if (reset_devices
&& h
->max_commands
> 32)
7659 h
->max_commands
= 32;
7661 if (h
->max_commands
< MIN_MAX_COMMANDS
) {
7662 dev_warn(&h
->pdev
->dev
,
7663 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7666 h
->max_commands
= MIN_MAX_COMMANDS
;
7670 /* If the controller reports that the total max sg entries is greater than 512,
7671 * then we know that chained SG blocks work. (Original smart arrays did not
7672 * support chained SG blocks and would return zero for max sg entries.)
7674 static int hpsa_supports_chained_sg_blocks(struct ctlr_info
*h
)
7676 return h
->maxsgentries
> 512;
7679 /* Interrogate the hardware for some limits:
7680 * max commands, max SG elements without chaining, and with chaining,
7681 * SG chain block size, etc.
7683 static void hpsa_find_board_params(struct ctlr_info
*h
)
7685 hpsa_get_max_perf_mode_cmds(h
);
7686 h
->nr_cmds
= h
->max_commands
;
7687 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
7688 h
->fw_support
= readl(&(h
->cfgtable
->misc_fw_support
));
7689 if (hpsa_supports_chained_sg_blocks(h
)) {
7690 /* Limit in-command s/g elements to 32 save dma'able memory. */
7691 h
->max_cmd_sg_entries
= 32;
7692 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
;
7693 h
->maxsgentries
--; /* save one for chain pointer */
7696 * Original smart arrays supported at most 31 s/g entries
7697 * embedded inline in the command (trying to use more
7698 * would lock up the controller)
7700 h
->max_cmd_sg_entries
= 31;
7701 h
->maxsgentries
= 31; /* default to traditional values */
7705 /* Find out what task management functions are supported and cache */
7706 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
7707 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
))
7708 dev_warn(&h
->pdev
->dev
, "Physical aborts not supported\n");
7709 if (!(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
7710 dev_warn(&h
->pdev
->dev
, "Logical aborts not supported\n");
7711 if (!(HPSATMF_IOACCEL_ENABLED
& h
->TMFSupportFlags
))
7712 dev_warn(&h
->pdev
->dev
, "HP SSD Smart Path aborts not supported\n");
7715 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
7717 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
7718 dev_err(&h
->pdev
->dev
, "not a valid CISS config table\n");
7724 static inline void hpsa_set_driver_support_bits(struct ctlr_info
*h
)
7728 driver_support
= readl(&(h
->cfgtable
->driver_support
));
7729 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7731 driver_support
|= ENABLE_SCSI_PREFETCH
;
7733 driver_support
|= ENABLE_UNIT_ATTN
;
7734 writel(driver_support
, &(h
->cfgtable
->driver_support
));
7737 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7738 * in a prefetch beyond physical memory.
7740 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
7744 if (h
->board_id
!= 0x3225103C)
7746 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
7747 dma_prefetch
|= 0x8000;
7748 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
7751 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info
*h
)
7755 unsigned long flags
;
7756 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7757 for (i
= 0; i
< MAX_CLEAR_EVENT_WAIT
; i
++) {
7758 spin_lock_irqsave(&h
->lock
, flags
);
7759 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
7760 spin_unlock_irqrestore(&h
->lock
, flags
);
7761 if (!(doorbell_value
& DOORBELL_CLEAR_EVENTS
))
7763 /* delay and try again */
7764 msleep(CLEAR_EVENT_WAIT_INTERVAL
);
7771 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
7775 unsigned long flags
;
7777 /* under certain very rare conditions, this can take awhile.
7778 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7779 * as we enter this code.)
7781 for (i
= 0; i
< MAX_MODE_CHANGE_WAIT
; i
++) {
7782 if (h
->remove_in_progress
)
7784 spin_lock_irqsave(&h
->lock
, flags
);
7785 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
7786 spin_unlock_irqrestore(&h
->lock
, flags
);
7787 if (!(doorbell_value
& CFGTBL_ChangeReq
))
7789 /* delay and try again */
7790 msleep(MODE_CHANGE_WAIT_INTERVAL
);
7797 /* return -ENODEV or other reason on error, 0 on success */
7798 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
7802 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
7803 if (!(trans_support
& SIMPLE_MODE
))
7806 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
7808 /* Update the field, and then ring the doorbell */
7809 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
7810 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
7811 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7812 if (hpsa_wait_for_mode_change_ack(h
))
7814 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
7815 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
))
7817 h
->transMethod
= CFGTBL_Trans_Simple
;
7820 dev_err(&h
->pdev
->dev
, "failed to enter simple mode\n");
7824 /* free items allocated or mapped by hpsa_pci_init */
7825 static void hpsa_free_pci_init(struct ctlr_info
*h
)
7827 hpsa_free_cfgtables(h
); /* pci_init 4 */
7828 iounmap(h
->vaddr
); /* pci_init 3 */
7830 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
7832 * call pci_disable_device before pci_release_regions per
7833 * Documentation/driver-api/pci/pci.rst
7835 pci_disable_device(h
->pdev
); /* pci_init 1 */
7836 pci_release_regions(h
->pdev
); /* pci_init 2 */
7839 /* several items must be freed later */
7840 static int hpsa_pci_init(struct ctlr_info
*h
)
7842 int prod_index
, err
;
7845 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
, &legacy_board
);
7848 h
->product_name
= products
[prod_index
].product_name
;
7849 h
->access
= *(products
[prod_index
].access
);
7850 h
->legacy_board
= legacy_board
;
7851 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
7852 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
7854 err
= pci_enable_device(h
->pdev
);
7856 dev_err(&h
->pdev
->dev
, "failed to enable PCI device\n");
7857 pci_disable_device(h
->pdev
);
7861 err
= pci_request_regions(h
->pdev
, HPSA
);
7863 dev_err(&h
->pdev
->dev
,
7864 "failed to obtain PCI resources\n");
7865 pci_disable_device(h
->pdev
);
7869 pci_set_master(h
->pdev
);
7871 err
= hpsa_interrupt_mode(h
);
7875 /* setup mapping between CPU and reply queue */
7876 hpsa_setup_reply_map(h
);
7878 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
7880 goto clean2
; /* intmode+region, pci */
7881 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
7883 dev_err(&h
->pdev
->dev
, "failed to remap PCI mem\n");
7885 goto clean2
; /* intmode+region, pci */
7887 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
7889 goto clean3
; /* vaddr, intmode+region, pci */
7890 err
= hpsa_find_cfgtables(h
);
7892 goto clean3
; /* vaddr, intmode+region, pci */
7893 hpsa_find_board_params(h
);
7895 if (!hpsa_CISS_signature_present(h
)) {
7897 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
7899 hpsa_set_driver_support_bits(h
);
7900 hpsa_p600_dma_prefetch_quirk(h
);
7901 err
= hpsa_enter_simple_mode(h
);
7903 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
7906 clean4
: /* cfgtables, vaddr, intmode+region, pci */
7907 hpsa_free_cfgtables(h
);
7908 clean3
: /* vaddr, intmode+region, pci */
7911 clean2
: /* intmode+region, pci */
7912 hpsa_disable_interrupt_mode(h
);
7915 * call pci_disable_device before pci_release_regions per
7916 * Documentation/driver-api/pci/pci.rst
7918 pci_disable_device(h
->pdev
);
7919 pci_release_regions(h
->pdev
);
7923 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
7927 #define HBA_INQUIRY_BYTE_COUNT 64
7928 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
7929 if (!h
->hba_inquiry_data
)
7931 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
7932 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
7934 kfree(h
->hba_inquiry_data
);
7935 h
->hba_inquiry_data
= NULL
;
7939 static int hpsa_init_reset_devices(struct pci_dev
*pdev
, u32 board_id
)
7942 void __iomem
*vaddr
;
7947 /* kdump kernel is loading, we don't know in which state is
7948 * the pci interface. The dev->enable_cnt is equal zero
7949 * so we call enable+disable, wait a while and switch it on.
7951 rc
= pci_enable_device(pdev
);
7953 dev_warn(&pdev
->dev
, "Failed to enable PCI device\n");
7956 pci_disable_device(pdev
);
7957 msleep(260); /* a randomly chosen number */
7958 rc
= pci_enable_device(pdev
);
7960 dev_warn(&pdev
->dev
, "failed to enable device.\n");
7964 pci_set_master(pdev
);
7966 vaddr
= pci_ioremap_bar(pdev
, 0);
7967 if (vaddr
== NULL
) {
7971 writel(SA5_INTR_OFF
, vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
7974 /* Reset the controller with a PCI power-cycle or via doorbell */
7975 rc
= hpsa_kdump_hard_reset_controller(pdev
, board_id
);
7977 /* -ENOTSUPP here means we cannot reset the controller
7978 * but it's already (and still) up and running in
7979 * "performant mode". Or, it might be 640x, which can't reset
7980 * due to concerns about shared bbwc between 6402/6404 pair.
7985 /* Now try to get the controller to respond to a no-op */
7986 dev_info(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
7987 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
7988 if (hpsa_noop(pdev
) == 0)
7991 dev_warn(&pdev
->dev
, "no-op failed%s\n",
7992 (i
< 11 ? "; re-trying" : ""));
7997 pci_disable_device(pdev
);
8001 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
8003 kfree(h
->cmd_pool_bits
);
8004 h
->cmd_pool_bits
= NULL
;
8006 dma_free_coherent(&h
->pdev
->dev
,
8007 h
->nr_cmds
* sizeof(struct CommandList
),
8009 h
->cmd_pool_dhandle
);
8011 h
->cmd_pool_dhandle
= 0;
8013 if (h
->errinfo_pool
) {
8014 dma_free_coherent(&h
->pdev
->dev
,
8015 h
->nr_cmds
* sizeof(struct ErrorInfo
),
8017 h
->errinfo_pool_dhandle
);
8018 h
->errinfo_pool
= NULL
;
8019 h
->errinfo_pool_dhandle
= 0;
8023 static int hpsa_alloc_cmd_pool(struct ctlr_info
*h
)
8025 h
->cmd_pool_bits
= kcalloc(DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
),
8026 sizeof(unsigned long),
8028 h
->cmd_pool
= dma_alloc_coherent(&h
->pdev
->dev
,
8029 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
8030 &h
->cmd_pool_dhandle
, GFP_KERNEL
);
8031 h
->errinfo_pool
= dma_alloc_coherent(&h
->pdev
->dev
,
8032 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
8033 &h
->errinfo_pool_dhandle
, GFP_KERNEL
);
8034 if ((h
->cmd_pool_bits
== NULL
)
8035 || (h
->cmd_pool
== NULL
)
8036 || (h
->errinfo_pool
== NULL
)) {
8037 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
8040 hpsa_preinitialize_commands(h
);
8043 hpsa_free_cmd_pool(h
);
8047 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8048 static void hpsa_free_irqs(struct ctlr_info
*h
)
8053 if (hpsa_simple_mode
)
8054 irq_vector
= h
->intr_mode
;
8056 if (!h
->msix_vectors
|| h
->intr_mode
!= PERF_MODE_INT
) {
8057 /* Single reply queue, only one irq to free */
8058 free_irq(pci_irq_vector(h
->pdev
, irq_vector
),
8059 &h
->q
[h
->intr_mode
]);
8060 h
->q
[h
->intr_mode
] = 0;
8064 for (i
= 0; i
< h
->msix_vectors
; i
++) {
8065 free_irq(pci_irq_vector(h
->pdev
, i
), &h
->q
[i
]);
8068 for (; i
< MAX_REPLY_QUEUES
; i
++)
8072 /* returns 0 on success; cleans up and returns -Enn on error */
8073 static int hpsa_request_irqs(struct ctlr_info
*h
,
8074 irqreturn_t (*msixhandler
)(int, void *),
8075 irqreturn_t (*intxhandler
)(int, void *))
8080 if (hpsa_simple_mode
)
8081 irq_vector
= h
->intr_mode
;
8084 * initialize h->q[x] = x so that interrupt handlers know which
8087 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
8090 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vectors
> 0) {
8091 /* If performant mode and MSI-X, use multiple reply queues */
8092 for (i
= 0; i
< h
->msix_vectors
; i
++) {
8093 sprintf(h
->intrname
[i
], "%s-msix%d", h
->devname
, i
);
8094 rc
= request_irq(pci_irq_vector(h
->pdev
, i
), msixhandler
,
8100 dev_err(&h
->pdev
->dev
,
8101 "failed to get irq %d for %s\n",
8102 pci_irq_vector(h
->pdev
, i
), h
->devname
);
8103 for (j
= 0; j
< i
; j
++) {
8104 free_irq(pci_irq_vector(h
->pdev
, j
), &h
->q
[j
]);
8107 for (; j
< MAX_REPLY_QUEUES
; j
++)
8113 /* Use single reply pool */
8114 if (h
->msix_vectors
> 0 || h
->pdev
->msi_enabled
) {
8115 sprintf(h
->intrname
[0], "%s-msi%s", h
->devname
,
8116 h
->msix_vectors
? "x" : "");
8117 rc
= request_irq(pci_irq_vector(h
->pdev
, irq_vector
),
8120 &h
->q
[h
->intr_mode
]);
8122 sprintf(h
->intrname
[h
->intr_mode
],
8123 "%s-intx", h
->devname
);
8124 rc
= request_irq(pci_irq_vector(h
->pdev
, irq_vector
),
8125 intxhandler
, IRQF_SHARED
,
8127 &h
->q
[h
->intr_mode
]);
8131 dev_err(&h
->pdev
->dev
, "failed to get irq %d for %s\n",
8132 pci_irq_vector(h
->pdev
, irq_vector
), h
->devname
);
8139 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
8142 hpsa_send_host_reset(h
, HPSA_RESET_TYPE_CONTROLLER
);
8144 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
8145 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
);
8147 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
8151 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
8152 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
8154 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
8155 "after soft reset.\n");
8162 static void hpsa_free_reply_queues(struct ctlr_info
*h
)
8166 for (i
= 0; i
< h
->nreply_queues
; i
++) {
8167 if (!h
->reply_queue
[i
].head
)
8169 dma_free_coherent(&h
->pdev
->dev
,
8170 h
->reply_queue_size
,
8171 h
->reply_queue
[i
].head
,
8172 h
->reply_queue
[i
].busaddr
);
8173 h
->reply_queue
[i
].head
= NULL
;
8174 h
->reply_queue
[i
].busaddr
= 0;
8176 h
->reply_queue_size
= 0;
8179 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
8181 hpsa_free_performant_mode(h
); /* init_one 7 */
8182 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
8183 hpsa_free_cmd_pool(h
); /* init_one 5 */
8184 hpsa_free_irqs(h
); /* init_one 4 */
8185 scsi_host_put(h
->scsi_host
); /* init_one 3 */
8186 h
->scsi_host
= NULL
; /* init_one 3 */
8187 hpsa_free_pci_init(h
); /* init_one 2_5 */
8188 free_percpu(h
->lockup_detected
); /* init_one 2 */
8189 h
->lockup_detected
= NULL
; /* init_one 2 */
8190 if (h
->resubmit_wq
) {
8191 destroy_workqueue(h
->resubmit_wq
); /* init_one 1 */
8192 h
->resubmit_wq
= NULL
;
8194 if (h
->rescan_ctlr_wq
) {
8195 destroy_workqueue(h
->rescan_ctlr_wq
);
8196 h
->rescan_ctlr_wq
= NULL
;
8198 if (h
->monitor_ctlr_wq
) {
8199 destroy_workqueue(h
->monitor_ctlr_wq
);
8200 h
->monitor_ctlr_wq
= NULL
;
8203 kfree(h
); /* init_one 1 */
8206 /* Called when controller lockup detected. */
8207 static void fail_all_outstanding_cmds(struct ctlr_info
*h
)
8210 struct CommandList
*c
;
8213 flush_workqueue(h
->resubmit_wq
); /* ensure all cmds are fully built */
8214 for (i
= 0; i
< h
->nr_cmds
; i
++) {
8215 c
= h
->cmd_pool
+ i
;
8216 refcount
= atomic_inc_return(&c
->refcount
);
8218 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
8220 atomic_dec(&h
->commands_outstanding
);
8225 dev_warn(&h
->pdev
->dev
,
8226 "failed %d commands in fail_all\n", failcount
);
8229 static void set_lockup_detected_for_all_cpus(struct ctlr_info
*h
, u32 value
)
8233 for_each_online_cpu(cpu
) {
8234 u32
*lockup_detected
;
8235 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
8236 *lockup_detected
= value
;
8238 wmb(); /* be sure the per-cpu variables are out to memory */
8241 static void controller_lockup_detected(struct ctlr_info
*h
)
8243 unsigned long flags
;
8244 u32 lockup_detected
;
8246 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8247 spin_lock_irqsave(&h
->lock
, flags
);
8248 lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
8249 if (!lockup_detected
) {
8250 /* no heartbeat, but controller gave us a zero. */
8251 dev_warn(&h
->pdev
->dev
,
8252 "lockup detected after %d but scratchpad register is zero\n",
8253 h
->heartbeat_sample_interval
/ HZ
);
8254 lockup_detected
= 0xffffffff;
8256 set_lockup_detected_for_all_cpus(h
, lockup_detected
);
8257 spin_unlock_irqrestore(&h
->lock
, flags
);
8258 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x after %d\n",
8259 lockup_detected
, h
->heartbeat_sample_interval
/ HZ
);
8260 if (lockup_detected
== 0xffff0000) {
8261 dev_warn(&h
->pdev
->dev
, "Telling controller to do a CHKPT\n");
8262 writel(DOORBELL_GENERATE_CHKPT
, h
->vaddr
+ SA5_DOORBELL
);
8264 pci_disable_device(h
->pdev
);
8265 fail_all_outstanding_cmds(h
);
8268 static int detect_controller_lockup(struct ctlr_info
*h
)
8272 unsigned long flags
;
8274 now
= get_jiffies_64();
8275 /* If we've received an interrupt recently, we're ok. */
8276 if (time_after64(h
->last_intr_timestamp
+
8277 (h
->heartbeat_sample_interval
), now
))
8281 * If we've already checked the heartbeat recently, we're ok.
8282 * This could happen if someone sends us a signal. We
8283 * otherwise don't care about signals in this thread.
8285 if (time_after64(h
->last_heartbeat_timestamp
+
8286 (h
->heartbeat_sample_interval
), now
))
8289 /* If heartbeat has not changed since we last looked, we're not ok. */
8290 spin_lock_irqsave(&h
->lock
, flags
);
8291 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
8292 spin_unlock_irqrestore(&h
->lock
, flags
);
8293 if (h
->last_heartbeat
== heartbeat
) {
8294 controller_lockup_detected(h
);
8299 h
->last_heartbeat
= heartbeat
;
8300 h
->last_heartbeat_timestamp
= now
;
8305 * Set ioaccel status for all ioaccel volumes.
8307 * Called from monitor controller worker (hpsa_event_monitor_worker)
8309 * A Volume (or Volumes that comprise an Array set) may be undergoing a
8310 * transformation, so we will be turning off ioaccel for all volumes that
8311 * make up the Array.
8313 static void hpsa_set_ioaccel_status(struct ctlr_info
*h
)
8319 struct hpsa_scsi_dev_t
*device
;
8324 buf
= kmalloc(64, GFP_KERNEL
);
8329 * Run through current device list used during I/O requests.
8331 for (i
= 0; i
< h
->ndevices
; i
++) {
8332 int offload_to_be_enabled
= 0;
8333 int offload_config
= 0;
8339 if (!hpsa_vpd_page_supported(h
, device
->scsi3addr
,
8340 HPSA_VPD_LV_IOACCEL_STATUS
))
8345 rc
= hpsa_scsi_do_inquiry(h
, device
->scsi3addr
,
8346 VPD_PAGE
| HPSA_VPD_LV_IOACCEL_STATUS
,
8351 ioaccel_status
= buf
[IOACCEL_STATUS_BYTE
];
8354 * Check if offload is still configured on
8357 !!(ioaccel_status
& OFFLOAD_CONFIGURED_BIT
);
8359 * If offload is configured on, check to see if ioaccel
8360 * needs to be enabled.
8363 offload_to_be_enabled
=
8364 !!(ioaccel_status
& OFFLOAD_ENABLED_BIT
);
8367 * If ioaccel is to be re-enabled, re-enable later during the
8368 * scan operation so the driver can get a fresh raidmap
8369 * before turning ioaccel back on.
8371 if (offload_to_be_enabled
)
8375 * Immediately turn off ioaccel for any volume the
8376 * controller tells us to. Some of the reasons could be:
8377 * transformation - change to the LVs of an Array.
8378 * degraded volume - component failure
8380 hpsa_turn_off_ioaccel_for_device(device
);
8386 static void hpsa_ack_ctlr_events(struct ctlr_info
*h
)
8390 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
8393 /* Ask the controller to clear the events we're handling. */
8394 if ((h
->transMethod
& (CFGTBL_Trans_io_accel1
8395 | CFGTBL_Trans_io_accel2
)) &&
8396 (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
||
8397 h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)) {
8399 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
)
8400 event_type
= "state change";
8401 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)
8402 event_type
= "configuration change";
8403 /* Stop sending new RAID offload reqs via the IO accelerator */
8404 scsi_block_requests(h
->scsi_host
);
8405 hpsa_set_ioaccel_status(h
);
8406 hpsa_drain_accel_commands(h
);
8407 /* Set 'accelerator path config change' bit */
8408 dev_warn(&h
->pdev
->dev
,
8409 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8410 h
->events
, event_type
);
8411 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
8412 /* Set the "clear event notify field update" bit 6 */
8413 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
8414 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8415 hpsa_wait_for_clear_event_notify_ack(h
);
8416 scsi_unblock_requests(h
->scsi_host
);
8418 /* Acknowledge controller notification events. */
8419 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
8420 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
8421 hpsa_wait_for_clear_event_notify_ack(h
);
8426 /* Check a register on the controller to see if there are configuration
8427 * changes (added/changed/removed logical drives, etc.) which mean that
8428 * we should rescan the controller for devices.
8429 * Also check flag for driver-initiated rescan.
8431 static int hpsa_ctlr_needs_rescan(struct ctlr_info
*h
)
8433 if (h
->drv_req_rescan
) {
8434 h
->drv_req_rescan
= 0;
8438 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
8441 h
->events
= readl(&(h
->cfgtable
->event_notify
));
8442 return h
->events
& RESCAN_REQUIRED_EVENT_BITS
;
8446 * Check if any of the offline devices have become ready
8448 static int hpsa_offline_devices_ready(struct ctlr_info
*h
)
8450 unsigned long flags
;
8451 struct offline_device_entry
*d
;
8452 struct list_head
*this, *tmp
;
8454 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8455 list_for_each_safe(this, tmp
, &h
->offline_device_list
) {
8456 d
= list_entry(this, struct offline_device_entry
,
8458 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8459 if (!hpsa_volume_offline(h
, d
->scsi3addr
)) {
8460 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8461 list_del(&d
->offline_list
);
8462 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8465 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8467 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8471 static int hpsa_luns_changed(struct ctlr_info
*h
)
8473 int rc
= 1; /* assume there are changes */
8474 struct ReportLUNdata
*logdev
= NULL
;
8476 /* if we can't find out if lun data has changed,
8477 * assume that it has.
8480 if (!h
->lastlogicals
)
8483 logdev
= kzalloc(sizeof(*logdev
), GFP_KERNEL
);
8487 if (hpsa_scsi_do_report_luns(h
, 1, logdev
, sizeof(*logdev
), 0)) {
8488 dev_warn(&h
->pdev
->dev
,
8489 "report luns failed, can't track lun changes.\n");
8492 if (memcmp(logdev
, h
->lastlogicals
, sizeof(*logdev
))) {
8493 dev_info(&h
->pdev
->dev
,
8494 "Lun changes detected.\n");
8495 memcpy(h
->lastlogicals
, logdev
, sizeof(*logdev
));
8498 rc
= 0; /* no changes detected. */
8504 static void hpsa_perform_rescan(struct ctlr_info
*h
)
8506 struct Scsi_Host
*sh
= NULL
;
8507 unsigned long flags
;
8510 * Do the scan after the reset
8512 spin_lock_irqsave(&h
->reset_lock
, flags
);
8513 if (h
->reset_in_progress
) {
8514 h
->drv_req_rescan
= 1;
8515 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
8518 spin_unlock_irqrestore(&h
->reset_lock
, flags
);
8520 sh
= scsi_host_get(h
->scsi_host
);
8522 hpsa_scan_start(sh
);
8524 h
->drv_req_rescan
= 0;
8529 * watch for controller events
8531 static void hpsa_event_monitor_worker(struct work_struct
*work
)
8533 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8534 struct ctlr_info
, event_monitor_work
);
8535 unsigned long flags
;
8537 spin_lock_irqsave(&h
->lock
, flags
);
8538 if (h
->remove_in_progress
) {
8539 spin_unlock_irqrestore(&h
->lock
, flags
);
8542 spin_unlock_irqrestore(&h
->lock
, flags
);
8544 if (hpsa_ctlr_needs_rescan(h
)) {
8545 hpsa_ack_ctlr_events(h
);
8546 hpsa_perform_rescan(h
);
8549 spin_lock_irqsave(&h
->lock
, flags
);
8550 if (!h
->remove_in_progress
)
8551 queue_delayed_work(h
->monitor_ctlr_wq
, &h
->event_monitor_work
,
8552 HPSA_EVENT_MONITOR_INTERVAL
);
8553 spin_unlock_irqrestore(&h
->lock
, flags
);
8556 static void hpsa_rescan_ctlr_worker(struct work_struct
*work
)
8558 unsigned long flags
;
8559 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8560 struct ctlr_info
, rescan_ctlr_work
);
8562 spin_lock_irqsave(&h
->lock
, flags
);
8563 if (h
->remove_in_progress
) {
8564 spin_unlock_irqrestore(&h
->lock
, flags
);
8567 spin_unlock_irqrestore(&h
->lock
, flags
);
8569 if (h
->drv_req_rescan
|| hpsa_offline_devices_ready(h
)) {
8570 hpsa_perform_rescan(h
);
8571 } else if (h
->discovery_polling
) {
8572 if (hpsa_luns_changed(h
)) {
8573 dev_info(&h
->pdev
->dev
,
8574 "driver discovery polling rescan.\n");
8575 hpsa_perform_rescan(h
);
8578 spin_lock_irqsave(&h
->lock
, flags
);
8579 if (!h
->remove_in_progress
)
8580 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8581 h
->heartbeat_sample_interval
);
8582 spin_unlock_irqrestore(&h
->lock
, flags
);
8585 static void hpsa_monitor_ctlr_worker(struct work_struct
*work
)
8587 unsigned long flags
;
8588 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8589 struct ctlr_info
, monitor_ctlr_work
);
8591 detect_controller_lockup(h
);
8592 if (lockup_detected(h
))
8595 spin_lock_irqsave(&h
->lock
, flags
);
8596 if (!h
->remove_in_progress
)
8597 queue_delayed_work(h
->monitor_ctlr_wq
, &h
->monitor_ctlr_work
,
8598 h
->heartbeat_sample_interval
);
8599 spin_unlock_irqrestore(&h
->lock
, flags
);
8602 static struct workqueue_struct
*hpsa_create_controller_wq(struct ctlr_info
*h
,
8605 struct workqueue_struct
*wq
= NULL
;
8607 wq
= alloc_ordered_workqueue("%s_%d_hpsa", 0, name
, h
->ctlr
);
8609 dev_err(&h
->pdev
->dev
, "failed to create %s workqueue\n", name
);
8614 static void hpda_free_ctlr_info(struct ctlr_info
*h
)
8616 kfree(h
->reply_map
);
8620 static struct ctlr_info
*hpda_alloc_ctlr_info(void)
8622 struct ctlr_info
*h
;
8624 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
8628 h
->reply_map
= kcalloc(nr_cpu_ids
, sizeof(*h
->reply_map
), GFP_KERNEL
);
8629 if (!h
->reply_map
) {
8636 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8639 struct ctlr_info
*h
;
8640 int try_soft_reset
= 0;
8641 unsigned long flags
;
8644 if (number_of_controllers
== 0)
8645 printk(KERN_INFO DRIVER_NAME
"\n");
8647 rc
= hpsa_lookup_board_id(pdev
, &board_id
, NULL
);
8649 dev_warn(&pdev
->dev
, "Board ID not found\n");
8653 rc
= hpsa_init_reset_devices(pdev
, board_id
);
8655 if (rc
!= -ENOTSUPP
)
8657 /* If the reset fails in a particular way (it has no way to do
8658 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8659 * a soft reset once we get the controller configured up to the
8660 * point that it can accept a command.
8666 reinit_after_soft_reset
:
8668 /* Command structures must be aligned on a 32-byte boundary because
8669 * the 5 lower bits of the address are used by the hardware. and by
8670 * the driver. See comments in hpsa.h for more info.
8672 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
8673 h
= hpda_alloc_ctlr_info();
8675 dev_err(&pdev
->dev
, "Failed to allocate controller head\n");
8681 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
8682 INIT_LIST_HEAD(&h
->offline_device_list
);
8683 spin_lock_init(&h
->lock
);
8684 spin_lock_init(&h
->offline_device_lock
);
8685 spin_lock_init(&h
->scan_lock
);
8686 spin_lock_init(&h
->reset_lock
);
8687 atomic_set(&h
->passthru_cmds_avail
, HPSA_MAX_CONCURRENT_PASSTHRUS
);
8689 /* Allocate and clear per-cpu variable lockup_detected */
8690 h
->lockup_detected
= alloc_percpu(u32
);
8691 if (!h
->lockup_detected
) {
8692 dev_err(&h
->pdev
->dev
, "Failed to allocate lockup detector\n");
8694 goto clean1
; /* aer/h */
8696 set_lockup_detected_for_all_cpus(h
, 0);
8698 rc
= hpsa_pci_init(h
);
8700 goto clean2
; /* lu, aer/h */
8702 /* relies on h-> settings made by hpsa_pci_init, including
8703 * interrupt_mode h->intr */
8704 rc
= hpsa_scsi_host_alloc(h
);
8706 goto clean2_5
; /* pci, lu, aer/h */
8708 sprintf(h
->devname
, HPSA
"%d", h
->scsi_host
->host_no
);
8709 h
->ctlr
= number_of_controllers
;
8710 number_of_controllers
++;
8712 /* configure PCI DMA stuff */
8713 rc
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64));
8717 rc
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
8721 dev_err(&pdev
->dev
, "no suitable DMA available\n");
8722 goto clean3
; /* shost, pci, lu, aer/h */
8726 /* make sure the board interrupts are off */
8727 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8729 rc
= hpsa_request_irqs(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
);
8731 goto clean3
; /* shost, pci, lu, aer/h */
8732 rc
= hpsa_alloc_cmd_pool(h
);
8734 goto clean4
; /* irq, shost, pci, lu, aer/h */
8735 rc
= hpsa_alloc_sg_chain_blocks(h
);
8737 goto clean5
; /* cmd, irq, shost, pci, lu, aer/h */
8738 init_waitqueue_head(&h
->scan_wait_queue
);
8739 init_waitqueue_head(&h
->event_sync_wait_queue
);
8740 mutex_init(&h
->reset_mutex
);
8741 h
->scan_finished
= 1; /* no scan currently in progress */
8742 h
->scan_waiting
= 0;
8744 pci_set_drvdata(pdev
, h
);
8747 spin_lock_init(&h
->devlock
);
8748 rc
= hpsa_put_ctlr_into_performant_mode(h
);
8750 goto clean6
; /* sg, cmd, irq, shost, pci, lu, aer/h */
8752 /* create the resubmit workqueue */
8753 h
->rescan_ctlr_wq
= hpsa_create_controller_wq(h
, "rescan");
8754 if (!h
->rescan_ctlr_wq
) {
8759 h
->resubmit_wq
= hpsa_create_controller_wq(h
, "resubmit");
8760 if (!h
->resubmit_wq
) {
8762 goto clean7
; /* aer/h */
8765 h
->monitor_ctlr_wq
= hpsa_create_controller_wq(h
, "monitor");
8766 if (!h
->monitor_ctlr_wq
) {
8772 * At this point, the controller is ready to take commands.
8773 * Now, if reset_devices and the hard reset didn't work, try
8774 * the soft reset and see if that works.
8776 if (try_soft_reset
) {
8778 /* This is kind of gross. We may or may not get a completion
8779 * from the soft reset command, and if we do, then the value
8780 * from the fifo may or may not be valid. So, we wait 10 secs
8781 * after the reset throwing away any completions we get during
8782 * that time. Unregister the interrupt handler and register
8783 * fake ones to scoop up any residual completions.
8785 spin_lock_irqsave(&h
->lock
, flags
);
8786 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8787 spin_unlock_irqrestore(&h
->lock
, flags
);
8789 rc
= hpsa_request_irqs(h
, hpsa_msix_discard_completions
,
8790 hpsa_intx_discard_completions
);
8792 dev_warn(&h
->pdev
->dev
,
8793 "Failed to request_irq after soft reset.\n");
8795 * cannot goto clean7 or free_irqs will be called
8796 * again. Instead, do its work
8798 hpsa_free_performant_mode(h
); /* clean7 */
8799 hpsa_free_sg_chain_blocks(h
); /* clean6 */
8800 hpsa_free_cmd_pool(h
); /* clean5 */
8802 * skip hpsa_free_irqs(h) clean4 since that
8803 * was just called before request_irqs failed
8808 rc
= hpsa_kdump_soft_reset(h
);
8810 /* Neither hard nor soft reset worked, we're hosed. */
8813 dev_info(&h
->pdev
->dev
, "Board READY.\n");
8814 dev_info(&h
->pdev
->dev
,
8815 "Waiting for stale completions to drain.\n");
8816 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8818 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8820 rc
= controller_reset_failed(h
->cfgtable
);
8822 dev_info(&h
->pdev
->dev
,
8823 "Soft reset appears to have failed.\n");
8825 /* since the controller's reset, we have to go back and re-init
8826 * everything. Easiest to just forget what we've done and do it
8829 hpsa_undo_allocations_after_kdump_soft_reset(h
);
8832 /* don't goto clean, we already unallocated */
8835 goto reinit_after_soft_reset
;
8838 /* Enable Accelerated IO path at driver layer */
8839 h
->acciopath_status
= 1;
8840 /* Disable discovery polling.*/
8841 h
->discovery_polling
= 0;
8844 /* Turn the interrupts on so we can service requests */
8845 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8847 hpsa_hba_inquiry(h
);
8849 h
->lastlogicals
= kzalloc(sizeof(*(h
->lastlogicals
)), GFP_KERNEL
);
8850 if (!h
->lastlogicals
)
8851 dev_info(&h
->pdev
->dev
,
8852 "Can't track change to report lun data\n");
8854 /* hook into SCSI subsystem */
8855 rc
= hpsa_scsi_add_host(h
);
8857 goto clean7
; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8859 /* Monitor the controller for firmware lockups */
8860 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
8861 INIT_DELAYED_WORK(&h
->monitor_ctlr_work
, hpsa_monitor_ctlr_worker
);
8862 schedule_delayed_work(&h
->monitor_ctlr_work
,
8863 h
->heartbeat_sample_interval
);
8864 INIT_DELAYED_WORK(&h
->rescan_ctlr_work
, hpsa_rescan_ctlr_worker
);
8865 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8866 h
->heartbeat_sample_interval
);
8867 INIT_DELAYED_WORK(&h
->event_monitor_work
, hpsa_event_monitor_worker
);
8868 schedule_delayed_work(&h
->event_monitor_work
,
8869 HPSA_EVENT_MONITOR_INTERVAL
);
8872 clean7
: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8873 hpsa_free_performant_mode(h
);
8874 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8875 clean6
: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8876 hpsa_free_sg_chain_blocks(h
);
8877 clean5
: /* cmd, irq, shost, pci, lu, aer/h */
8878 hpsa_free_cmd_pool(h
);
8879 clean4
: /* irq, shost, pci, lu, aer/h */
8881 clean3
: /* shost, pci, lu, aer/h */
8882 scsi_host_put(h
->scsi_host
);
8883 h
->scsi_host
= NULL
;
8884 clean2_5
: /* pci, lu, aer/h */
8885 hpsa_free_pci_init(h
);
8886 clean2
: /* lu, aer/h */
8887 if (h
->lockup_detected
) {
8888 free_percpu(h
->lockup_detected
);
8889 h
->lockup_detected
= NULL
;
8891 clean1
: /* wq/aer/h */
8892 if (h
->resubmit_wq
) {
8893 destroy_workqueue(h
->resubmit_wq
);
8894 h
->resubmit_wq
= NULL
;
8896 if (h
->rescan_ctlr_wq
) {
8897 destroy_workqueue(h
->rescan_ctlr_wq
);
8898 h
->rescan_ctlr_wq
= NULL
;
8900 if (h
->monitor_ctlr_wq
) {
8901 destroy_workqueue(h
->monitor_ctlr_wq
);
8902 h
->monitor_ctlr_wq
= NULL
;
8908 static void hpsa_flush_cache(struct ctlr_info
*h
)
8911 struct CommandList
*c
;
8914 if (unlikely(lockup_detected(h
)))
8916 flush_buf
= kzalloc(4, GFP_KERNEL
);
8922 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
8923 RAID_CTLR_LUNID
, TYPE_CMD
)) {
8926 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_TO_DEVICE
,
8930 if (c
->err_info
->CommandStatus
!= 0)
8932 dev_warn(&h
->pdev
->dev
,
8933 "error flushing cache on controller\n");
8938 /* Make controller gather fresh report lun data each time we
8939 * send down a report luns request
8941 static void hpsa_disable_rld_caching(struct ctlr_info
*h
)
8944 struct CommandList
*c
;
8947 /* Don't bother trying to set diag options if locked up */
8948 if (unlikely(h
->lockup_detected
))
8951 options
= kzalloc(sizeof(*options
), GFP_KERNEL
);
8957 /* first, get the current diag options settings */
8958 if (fill_cmd(c
, BMIC_SENSE_DIAG_OPTIONS
, h
, options
, 4, 0,
8959 RAID_CTLR_LUNID
, TYPE_CMD
))
8962 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
8964 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
8967 /* Now, set the bit for disabling the RLD caching */
8968 *options
|= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING
;
8970 if (fill_cmd(c
, BMIC_SET_DIAG_OPTIONS
, h
, options
, 4, 0,
8971 RAID_CTLR_LUNID
, TYPE_CMD
))
8974 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_TO_DEVICE
,
8976 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
8979 /* Now verify that it got set: */
8980 if (fill_cmd(c
, BMIC_SENSE_DIAG_OPTIONS
, h
, options
, 4, 0,
8981 RAID_CTLR_LUNID
, TYPE_CMD
))
8984 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, DMA_FROM_DEVICE
,
8986 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
8989 if (*options
& HPSA_DIAG_OPTS_DISABLE_RLD_CACHING
)
8993 dev_err(&h
->pdev
->dev
,
8994 "Error: failed to disable report lun data caching.\n");
9000 static void __hpsa_shutdown(struct pci_dev
*pdev
)
9002 struct ctlr_info
*h
;
9004 h
= pci_get_drvdata(pdev
);
9005 /* Turn board interrupts off and send the flush cache command
9006 * sendcmd will turn off interrupt, and send the flush...
9007 * To write all data in the battery backed cache to disks
9009 hpsa_flush_cache(h
);
9010 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
9011 hpsa_free_irqs(h
); /* init_one 4 */
9012 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
9015 static void hpsa_shutdown(struct pci_dev
*pdev
)
9017 __hpsa_shutdown(pdev
);
9018 pci_disable_device(pdev
);
9021 static void hpsa_free_device_info(struct ctlr_info
*h
)
9025 for (i
= 0; i
< h
->ndevices
; i
++) {
9031 static void hpsa_remove_one(struct pci_dev
*pdev
)
9033 struct ctlr_info
*h
;
9034 unsigned long flags
;
9036 if (pci_get_drvdata(pdev
) == NULL
) {
9037 dev_err(&pdev
->dev
, "unable to remove device\n");
9040 h
= pci_get_drvdata(pdev
);
9042 /* Get rid of any controller monitoring work items */
9043 spin_lock_irqsave(&h
->lock
, flags
);
9044 h
->remove_in_progress
= 1;
9045 spin_unlock_irqrestore(&h
->lock
, flags
);
9046 cancel_delayed_work_sync(&h
->monitor_ctlr_work
);
9047 cancel_delayed_work_sync(&h
->rescan_ctlr_work
);
9048 cancel_delayed_work_sync(&h
->event_monitor_work
);
9049 destroy_workqueue(h
->rescan_ctlr_wq
);
9050 destroy_workqueue(h
->resubmit_wq
);
9051 destroy_workqueue(h
->monitor_ctlr_wq
);
9053 hpsa_delete_sas_host(h
);
9056 * Call before disabling interrupts.
9057 * scsi_remove_host can trigger I/O operations especially
9058 * when multipath is enabled. There can be SYNCHRONIZE CACHE
9059 * operations which cannot complete and will hang the system.
9062 scsi_remove_host(h
->scsi_host
); /* init_one 8 */
9063 /* includes hpsa_free_irqs - init_one 4 */
9064 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9065 __hpsa_shutdown(pdev
);
9067 hpsa_free_device_info(h
); /* scan */
9069 kfree(h
->hba_inquiry_data
); /* init_one 10 */
9070 h
->hba_inquiry_data
= NULL
; /* init_one 10 */
9071 hpsa_free_ioaccel2_sg_chain_blocks(h
);
9072 hpsa_free_performant_mode(h
); /* init_one 7 */
9073 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
9074 hpsa_free_cmd_pool(h
); /* init_one 5 */
9075 kfree(h
->lastlogicals
);
9077 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9079 scsi_host_put(h
->scsi_host
); /* init_one 3 */
9080 h
->scsi_host
= NULL
; /* init_one 3 */
9082 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9083 hpsa_free_pci_init(h
); /* init_one 2.5 */
9085 free_percpu(h
->lockup_detected
); /* init_one 2 */
9086 h
->lockup_detected
= NULL
; /* init_one 2 */
9087 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
9089 hpda_free_ctlr_info(h
); /* init_one 1 */
9092 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
9093 __attribute__((unused
)) pm_message_t state
)
9098 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
9103 static struct pci_driver hpsa_pci_driver
= {
9105 .probe
= hpsa_init_one
,
9106 .remove
= hpsa_remove_one
,
9107 .id_table
= hpsa_pci_device_id
, /* id_table */
9108 .shutdown
= hpsa_shutdown
,
9109 .suspend
= hpsa_suspend
,
9110 .resume
= hpsa_resume
,
9113 /* Fill in bucket_map[], given nsgs (the max number of
9114 * scatter gather elements supported) and bucket[],
9115 * which is an array of 8 integers. The bucket[] array
9116 * contains 8 different DMA transfer sizes (in 16
9117 * byte increments) which the controller uses to fetch
9118 * commands. This function fills in bucket_map[], which
9119 * maps a given number of scatter gather elements to one of
9120 * the 8 DMA transfer sizes. The point of it is to allow the
9121 * controller to only do as much DMA as needed to fetch the
9122 * command, with the DMA transfer size encoded in the lower
9123 * bits of the command address.
9125 static void calc_bucket_map(int bucket
[], int num_buckets
,
9126 int nsgs
, int min_blocks
, u32
*bucket_map
)
9130 /* Note, bucket_map must have nsgs+1 entries. */
9131 for (i
= 0; i
<= nsgs
; i
++) {
9132 /* Compute size of a command with i SG entries */
9133 size
= i
+ min_blocks
;
9134 b
= num_buckets
; /* Assume the biggest bucket */
9135 /* Find the bucket that is just big enough */
9136 for (j
= 0; j
< num_buckets
; j
++) {
9137 if (bucket
[j
] >= size
) {
9142 /* for a command with i SG entries, use bucket b. */
9148 * return -ENODEV on err, 0 on success (or no action)
9149 * allocates numerous items that must be freed later
9151 static int hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 trans_support
)
9154 unsigned long register_value
;
9155 unsigned long transMethod
= CFGTBL_Trans_Performant
|
9156 (trans_support
& CFGTBL_Trans_use_short_tags
) |
9157 CFGTBL_Trans_enable_directed_msix
|
9158 (trans_support
& (CFGTBL_Trans_io_accel1
|
9159 CFGTBL_Trans_io_accel2
));
9160 struct access_method access
= SA5_performant_access
;
9162 /* This is a bit complicated. There are 8 registers on
9163 * the controller which we write to to tell it 8 different
9164 * sizes of commands which there may be. It's a way of
9165 * reducing the DMA done to fetch each command. Encoded into
9166 * each command's tag are 3 bits which communicate to the controller
9167 * which of the eight sizes that command fits within. The size of
9168 * each command depends on how many scatter gather entries there are.
9169 * Each SG entry requires 16 bytes. The eight registers are programmed
9170 * with the number of 16-byte blocks a command of that size requires.
9171 * The smallest command possible requires 5 such 16 byte blocks.
9172 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9173 * blocks. Note, this only extends to the SG entries contained
9174 * within the command block, and does not extend to chained blocks
9175 * of SG elements. bft[] contains the eight values we write to
9176 * the registers. They are not evenly distributed, but have more
9177 * sizes for small commands, and fewer sizes for larger commands.
9179 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
9180 #define MIN_IOACCEL2_BFT_ENTRY 5
9181 #define HPSA_IOACCEL2_HEADER_SZ 4
9182 int bft2
[16] = {MIN_IOACCEL2_BFT_ENTRY
, 6, 7, 8, 9, 10, 11, 12,
9183 13, 14, 15, 16, 17, 18, 19,
9184 HPSA_IOACCEL2_HEADER_SZ
+ IOACCEL2_MAXSGENTRIES
};
9185 BUILD_BUG_ON(ARRAY_SIZE(bft2
) != 16);
9186 BUILD_BUG_ON(ARRAY_SIZE(bft
) != 8);
9187 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) >
9188 16 * MIN_IOACCEL2_BFT_ENTRY
);
9189 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element
) != 16);
9190 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
9191 /* 5 = 1 s/g entry or 4k
9192 * 6 = 2 s/g entry or 8k
9193 * 8 = 4 s/g entry or 16k
9194 * 10 = 6 s/g entry or 24k
9197 /* If the controller supports either ioaccel method then
9198 * we can also use the RAID stack submit path that does not
9199 * perform the superfluous readl() after each command submission.
9201 if (trans_support
& (CFGTBL_Trans_io_accel1
| CFGTBL_Trans_io_accel2
))
9202 access
= SA5_performant_access_no_read
;
9204 /* Controller spec: zero out this buffer. */
9205 for (i
= 0; i
< h
->nreply_queues
; i
++)
9206 memset(h
->reply_queue
[i
].head
, 0, h
->reply_queue_size
);
9208 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
9209 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
9210 SG_ENTRIES_IN_CMD
, 4, h
->blockFetchTable
);
9211 for (i
= 0; i
< 8; i
++)
9212 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
9214 /* size of controller ring buffer */
9215 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
9216 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
9217 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
9218 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
9220 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9221 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
9222 writel(h
->reply_queue
[i
].busaddr
,
9223 &h
->transtable
->RepQAddr
[i
].lower
);
9226 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
9227 writel(transMethod
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
9229 * enable outbound interrupt coalescing in accelerator mode;
9231 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9232 access
= SA5_ioaccel_mode1_access
;
9233 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
9234 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
9236 if (trans_support
& CFGTBL_Trans_io_accel2
)
9237 access
= SA5_ioaccel_mode2_access
;
9238 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
9239 if (hpsa_wait_for_mode_change_ack(h
)) {
9240 dev_err(&h
->pdev
->dev
,
9241 "performant mode problem - doorbell timeout\n");
9244 register_value
= readl(&(h
->cfgtable
->TransportActive
));
9245 if (!(register_value
& CFGTBL_Trans_Performant
)) {
9246 dev_err(&h
->pdev
->dev
,
9247 "performant mode problem - transport not active\n");
9250 /* Change the access methods to the performant access methods */
9252 h
->transMethod
= transMethod
;
9254 if (!((trans_support
& CFGTBL_Trans_io_accel1
) ||
9255 (trans_support
& CFGTBL_Trans_io_accel2
)))
9258 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9259 /* Set up I/O accelerator mode */
9260 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9261 writel(i
, h
->vaddr
+ IOACCEL_MODE1_REPLY_QUEUE_INDEX
);
9262 h
->reply_queue
[i
].current_entry
=
9263 readl(h
->vaddr
+ IOACCEL_MODE1_PRODUCER_INDEX
);
9265 bft
[7] = h
->ioaccel_maxsg
+ 8;
9266 calc_bucket_map(bft
, ARRAY_SIZE(bft
), h
->ioaccel_maxsg
, 8,
9267 h
->ioaccel1_blockFetchTable
);
9269 /* initialize all reply queue entries to unused */
9270 for (i
= 0; i
< h
->nreply_queues
; i
++)
9271 memset(h
->reply_queue
[i
].head
,
9272 (u8
) IOACCEL_MODE1_REPLY_UNUSED
,
9273 h
->reply_queue_size
);
9275 /* set all the constant fields in the accelerator command
9276 * frames once at init time to save CPU cycles later.
9278 for (i
= 0; i
< h
->nr_cmds
; i
++) {
9279 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[i
];
9281 cp
->function
= IOACCEL1_FUNCTION_SCSIIO
;
9282 cp
->err_info
= (u32
) (h
->errinfo_pool_dhandle
+
9283 (i
* sizeof(struct ErrorInfo
)));
9284 cp
->err_info_len
= sizeof(struct ErrorInfo
);
9285 cp
->sgl_offset
= IOACCEL1_SGLOFFSET
;
9286 cp
->host_context_flags
=
9287 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT
);
9288 cp
->timeout_sec
= 0;
9291 cpu_to_le64((i
<< DIRECT_LOOKUP_SHIFT
));
9293 cpu_to_le64(h
->ioaccel_cmd_pool_dhandle
+
9294 (i
* sizeof(struct io_accel1_cmd
)));
9296 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
9297 u64 cfg_offset
, cfg_base_addr_index
;
9298 u32 bft2_offset
, cfg_base_addr
;
9301 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
9302 &cfg_base_addr_index
, &cfg_offset
);
9303 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) != 64);
9304 bft2
[15] = h
->ioaccel_maxsg
+ HPSA_IOACCEL2_HEADER_SZ
;
9305 calc_bucket_map(bft2
, ARRAY_SIZE(bft2
), h
->ioaccel_maxsg
,
9306 4, h
->ioaccel2_blockFetchTable
);
9307 bft2_offset
= readl(&h
->cfgtable
->io_accel_request_size_offset
);
9308 BUILD_BUG_ON(offsetof(struct CfgTable
,
9309 io_accel_request_size_offset
) != 0xb8);
9310 h
->ioaccel2_bft2_regs
=
9311 remap_pci_mem(pci_resource_start(h
->pdev
,
9312 cfg_base_addr_index
) +
9313 cfg_offset
+ bft2_offset
,
9315 sizeof(*h
->ioaccel2_bft2_regs
));
9316 for (i
= 0; i
< ARRAY_SIZE(bft2
); i
++)
9317 writel(bft2
[i
], &h
->ioaccel2_bft2_regs
[i
]);
9319 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
9320 if (hpsa_wait_for_mode_change_ack(h
)) {
9321 dev_err(&h
->pdev
->dev
,
9322 "performant mode problem - enabling ioaccel mode\n");
9328 /* Free ioaccel1 mode command blocks and block fetch table */
9329 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
9331 if (h
->ioaccel_cmd_pool
) {
9332 pci_free_consistent(h
->pdev
,
9333 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
9334 h
->ioaccel_cmd_pool
,
9335 h
->ioaccel_cmd_pool_dhandle
);
9336 h
->ioaccel_cmd_pool
= NULL
;
9337 h
->ioaccel_cmd_pool_dhandle
= 0;
9339 kfree(h
->ioaccel1_blockFetchTable
);
9340 h
->ioaccel1_blockFetchTable
= NULL
;
9343 /* Allocate ioaccel1 mode command blocks and block fetch table */
9344 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
9347 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
9348 if (h
->ioaccel_maxsg
> IOACCEL1_MAXSGENTRIES
)
9349 h
->ioaccel_maxsg
= IOACCEL1_MAXSGENTRIES
;
9351 /* Command structures must be aligned on a 128-byte boundary
9352 * because the 7 lower bits of the address are used by the
9355 BUILD_BUG_ON(sizeof(struct io_accel1_cmd
) %
9356 IOACCEL1_COMMANDLIST_ALIGNMENT
);
9357 h
->ioaccel_cmd_pool
=
9358 dma_alloc_coherent(&h
->pdev
->dev
,
9359 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
9360 &h
->ioaccel_cmd_pool_dhandle
, GFP_KERNEL
);
9362 h
->ioaccel1_blockFetchTable
=
9363 kmalloc(((h
->ioaccel_maxsg
+ 1) *
9364 sizeof(u32
)), GFP_KERNEL
);
9366 if ((h
->ioaccel_cmd_pool
== NULL
) ||
9367 (h
->ioaccel1_blockFetchTable
== NULL
))
9370 memset(h
->ioaccel_cmd_pool
, 0,
9371 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
));
9375 hpsa_free_ioaccel1_cmd_and_bft(h
);
9379 /* Free ioaccel2 mode command blocks and block fetch table */
9380 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
9382 hpsa_free_ioaccel2_sg_chain_blocks(h
);
9384 if (h
->ioaccel2_cmd_pool
) {
9385 pci_free_consistent(h
->pdev
,
9386 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
9387 h
->ioaccel2_cmd_pool
,
9388 h
->ioaccel2_cmd_pool_dhandle
);
9389 h
->ioaccel2_cmd_pool
= NULL
;
9390 h
->ioaccel2_cmd_pool_dhandle
= 0;
9392 kfree(h
->ioaccel2_blockFetchTable
);
9393 h
->ioaccel2_blockFetchTable
= NULL
;
9396 /* Allocate ioaccel2 mode command blocks and block fetch table */
9397 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
9401 /* Allocate ioaccel2 mode command blocks and block fetch table */
9404 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
9405 if (h
->ioaccel_maxsg
> IOACCEL2_MAXSGENTRIES
)
9406 h
->ioaccel_maxsg
= IOACCEL2_MAXSGENTRIES
;
9408 BUILD_BUG_ON(sizeof(struct io_accel2_cmd
) %
9409 IOACCEL2_COMMANDLIST_ALIGNMENT
);
9410 h
->ioaccel2_cmd_pool
=
9411 dma_alloc_coherent(&h
->pdev
->dev
,
9412 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
9413 &h
->ioaccel2_cmd_pool_dhandle
, GFP_KERNEL
);
9415 h
->ioaccel2_blockFetchTable
=
9416 kmalloc(((h
->ioaccel_maxsg
+ 1) *
9417 sizeof(u32
)), GFP_KERNEL
);
9419 if ((h
->ioaccel2_cmd_pool
== NULL
) ||
9420 (h
->ioaccel2_blockFetchTable
== NULL
)) {
9425 rc
= hpsa_allocate_ioaccel2_sg_chain_blocks(h
);
9429 memset(h
->ioaccel2_cmd_pool
, 0,
9430 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
));
9434 hpsa_free_ioaccel2_cmd_and_bft(h
);
9438 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9439 static void hpsa_free_performant_mode(struct ctlr_info
*h
)
9441 kfree(h
->blockFetchTable
);
9442 h
->blockFetchTable
= NULL
;
9443 hpsa_free_reply_queues(h
);
9444 hpsa_free_ioaccel1_cmd_and_bft(h
);
9445 hpsa_free_ioaccel2_cmd_and_bft(h
);
9448 /* return -ENODEV on error, 0 on success (or no action)
9449 * allocates numerous items that must be freed later
9451 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
9454 unsigned long transMethod
= CFGTBL_Trans_Performant
|
9455 CFGTBL_Trans_use_short_tags
;
9458 if (hpsa_simple_mode
)
9461 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
9462 if (!(trans_support
& PERFORMANT_MODE
))
9465 /* Check for I/O accelerator mode support */
9466 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9467 transMethod
|= CFGTBL_Trans_io_accel1
|
9468 CFGTBL_Trans_enable_directed_msix
;
9469 rc
= hpsa_alloc_ioaccel1_cmd_and_bft(h
);
9472 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
9473 transMethod
|= CFGTBL_Trans_io_accel2
|
9474 CFGTBL_Trans_enable_directed_msix
;
9475 rc
= hpsa_alloc_ioaccel2_cmd_and_bft(h
);
9480 h
->nreply_queues
= h
->msix_vectors
> 0 ? h
->msix_vectors
: 1;
9481 hpsa_get_max_perf_mode_cmds(h
);
9482 /* Performant mode ring buffer and supporting data structures */
9483 h
->reply_queue_size
= h
->max_commands
* sizeof(u64
);
9485 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9486 h
->reply_queue
[i
].head
= dma_alloc_coherent(&h
->pdev
->dev
,
9487 h
->reply_queue_size
,
9488 &h
->reply_queue
[i
].busaddr
,
9490 if (!h
->reply_queue
[i
].head
) {
9492 goto clean1
; /* rq, ioaccel */
9494 h
->reply_queue
[i
].size
= h
->max_commands
;
9495 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
9496 h
->reply_queue
[i
].current_entry
= 0;
9499 /* Need a block fetch table for performant mode */
9500 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
9501 sizeof(u32
)), GFP_KERNEL
);
9502 if (!h
->blockFetchTable
) {
9504 goto clean1
; /* rq, ioaccel */
9507 rc
= hpsa_enter_performant_mode(h
, trans_support
);
9509 goto clean2
; /* bft, rq, ioaccel */
9512 clean2
: /* bft, rq, ioaccel */
9513 kfree(h
->blockFetchTable
);
9514 h
->blockFetchTable
= NULL
;
9515 clean1
: /* rq, ioaccel */
9516 hpsa_free_reply_queues(h
);
9517 hpsa_free_ioaccel1_cmd_and_bft(h
);
9518 hpsa_free_ioaccel2_cmd_and_bft(h
);
9522 static int is_accelerated_cmd(struct CommandList
*c
)
9524 return c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_IOACCEL2
;
9527 static void hpsa_drain_accel_commands(struct ctlr_info
*h
)
9529 struct CommandList
*c
= NULL
;
9530 int i
, accel_cmds_out
;
9533 do { /* wait for all outstanding ioaccel commands to drain out */
9535 for (i
= 0; i
< h
->nr_cmds
; i
++) {
9536 c
= h
->cmd_pool
+ i
;
9537 refcount
= atomic_inc_return(&c
->refcount
);
9538 if (refcount
> 1) /* Command is allocated */
9539 accel_cmds_out
+= is_accelerated_cmd(c
);
9542 if (accel_cmds_out
<= 0)
9548 static struct hpsa_sas_phy
*hpsa_alloc_sas_phy(
9549 struct hpsa_sas_port
*hpsa_sas_port
)
9551 struct hpsa_sas_phy
*hpsa_sas_phy
;
9552 struct sas_phy
*phy
;
9554 hpsa_sas_phy
= kzalloc(sizeof(*hpsa_sas_phy
), GFP_KERNEL
);
9558 phy
= sas_phy_alloc(hpsa_sas_port
->parent_node
->parent_dev
,
9559 hpsa_sas_port
->next_phy_index
);
9561 kfree(hpsa_sas_phy
);
9565 hpsa_sas_port
->next_phy_index
++;
9566 hpsa_sas_phy
->phy
= phy
;
9567 hpsa_sas_phy
->parent_port
= hpsa_sas_port
;
9569 return hpsa_sas_phy
;
9572 static void hpsa_free_sas_phy(struct hpsa_sas_phy
*hpsa_sas_phy
)
9574 struct sas_phy
*phy
= hpsa_sas_phy
->phy
;
9576 sas_port_delete_phy(hpsa_sas_phy
->parent_port
->port
, phy
);
9577 if (hpsa_sas_phy
->added_to_port
)
9578 list_del(&hpsa_sas_phy
->phy_list_entry
);
9579 sas_phy_delete(phy
);
9580 kfree(hpsa_sas_phy
);
9583 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy
*hpsa_sas_phy
)
9586 struct hpsa_sas_port
*hpsa_sas_port
;
9587 struct sas_phy
*phy
;
9588 struct sas_identify
*identify
;
9590 hpsa_sas_port
= hpsa_sas_phy
->parent_port
;
9591 phy
= hpsa_sas_phy
->phy
;
9593 identify
= &phy
->identify
;
9594 memset(identify
, 0, sizeof(*identify
));
9595 identify
->sas_address
= hpsa_sas_port
->sas_address
;
9596 identify
->device_type
= SAS_END_DEVICE
;
9597 identify
->initiator_port_protocols
= SAS_PROTOCOL_STP
;
9598 identify
->target_port_protocols
= SAS_PROTOCOL_STP
;
9599 phy
->minimum_linkrate_hw
= SAS_LINK_RATE_UNKNOWN
;
9600 phy
->maximum_linkrate_hw
= SAS_LINK_RATE_UNKNOWN
;
9601 phy
->minimum_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9602 phy
->maximum_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9603 phy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9605 rc
= sas_phy_add(hpsa_sas_phy
->phy
);
9609 sas_port_add_phy(hpsa_sas_port
->port
, hpsa_sas_phy
->phy
);
9610 list_add_tail(&hpsa_sas_phy
->phy_list_entry
,
9611 &hpsa_sas_port
->phy_list_head
);
9612 hpsa_sas_phy
->added_to_port
= true;
9618 hpsa_sas_port_add_rphy(struct hpsa_sas_port
*hpsa_sas_port
,
9619 struct sas_rphy
*rphy
)
9621 struct sas_identify
*identify
;
9623 identify
= &rphy
->identify
;
9624 identify
->sas_address
= hpsa_sas_port
->sas_address
;
9625 identify
->initiator_port_protocols
= SAS_PROTOCOL_STP
;
9626 identify
->target_port_protocols
= SAS_PROTOCOL_STP
;
9628 return sas_rphy_add(rphy
);
9631 static struct hpsa_sas_port
9632 *hpsa_alloc_sas_port(struct hpsa_sas_node
*hpsa_sas_node
,
9636 struct hpsa_sas_port
*hpsa_sas_port
;
9637 struct sas_port
*port
;
9639 hpsa_sas_port
= kzalloc(sizeof(*hpsa_sas_port
), GFP_KERNEL
);
9643 INIT_LIST_HEAD(&hpsa_sas_port
->phy_list_head
);
9644 hpsa_sas_port
->parent_node
= hpsa_sas_node
;
9646 port
= sas_port_alloc_num(hpsa_sas_node
->parent_dev
);
9648 goto free_hpsa_port
;
9650 rc
= sas_port_add(port
);
9654 hpsa_sas_port
->port
= port
;
9655 hpsa_sas_port
->sas_address
= sas_address
;
9656 list_add_tail(&hpsa_sas_port
->port_list_entry
,
9657 &hpsa_sas_node
->port_list_head
);
9659 return hpsa_sas_port
;
9662 sas_port_free(port
);
9664 kfree(hpsa_sas_port
);
9669 static void hpsa_free_sas_port(struct hpsa_sas_port
*hpsa_sas_port
)
9671 struct hpsa_sas_phy
*hpsa_sas_phy
;
9672 struct hpsa_sas_phy
*next
;
9674 list_for_each_entry_safe(hpsa_sas_phy
, next
,
9675 &hpsa_sas_port
->phy_list_head
, phy_list_entry
)
9676 hpsa_free_sas_phy(hpsa_sas_phy
);
9678 sas_port_delete(hpsa_sas_port
->port
);
9679 list_del(&hpsa_sas_port
->port_list_entry
);
9680 kfree(hpsa_sas_port
);
9683 static struct hpsa_sas_node
*hpsa_alloc_sas_node(struct device
*parent_dev
)
9685 struct hpsa_sas_node
*hpsa_sas_node
;
9687 hpsa_sas_node
= kzalloc(sizeof(*hpsa_sas_node
), GFP_KERNEL
);
9688 if (hpsa_sas_node
) {
9689 hpsa_sas_node
->parent_dev
= parent_dev
;
9690 INIT_LIST_HEAD(&hpsa_sas_node
->port_list_head
);
9693 return hpsa_sas_node
;
9696 static void hpsa_free_sas_node(struct hpsa_sas_node
*hpsa_sas_node
)
9698 struct hpsa_sas_port
*hpsa_sas_port
;
9699 struct hpsa_sas_port
*next
;
9704 list_for_each_entry_safe(hpsa_sas_port
, next
,
9705 &hpsa_sas_node
->port_list_head
, port_list_entry
)
9706 hpsa_free_sas_port(hpsa_sas_port
);
9708 kfree(hpsa_sas_node
);
9711 static struct hpsa_scsi_dev_t
9712 *hpsa_find_device_by_sas_rphy(struct ctlr_info
*h
,
9713 struct sas_rphy
*rphy
)
9716 struct hpsa_scsi_dev_t
*device
;
9718 for (i
= 0; i
< h
->ndevices
; i
++) {
9720 if (!device
->sas_port
)
9722 if (device
->sas_port
->rphy
== rphy
)
9729 static int hpsa_add_sas_host(struct ctlr_info
*h
)
9732 struct device
*parent_dev
;
9733 struct hpsa_sas_node
*hpsa_sas_node
;
9734 struct hpsa_sas_port
*hpsa_sas_port
;
9735 struct hpsa_sas_phy
*hpsa_sas_phy
;
9737 parent_dev
= &h
->scsi_host
->shost_dev
;
9739 hpsa_sas_node
= hpsa_alloc_sas_node(parent_dev
);
9743 hpsa_sas_port
= hpsa_alloc_sas_port(hpsa_sas_node
, h
->sas_address
);
9744 if (!hpsa_sas_port
) {
9749 hpsa_sas_phy
= hpsa_alloc_sas_phy(hpsa_sas_port
);
9750 if (!hpsa_sas_phy
) {
9755 rc
= hpsa_sas_port_add_phy(hpsa_sas_phy
);
9759 h
->sas_host
= hpsa_sas_node
;
9764 hpsa_free_sas_phy(hpsa_sas_phy
);
9766 hpsa_free_sas_port(hpsa_sas_port
);
9768 hpsa_free_sas_node(hpsa_sas_node
);
9773 static void hpsa_delete_sas_host(struct ctlr_info
*h
)
9775 hpsa_free_sas_node(h
->sas_host
);
9778 static int hpsa_add_sas_device(struct hpsa_sas_node
*hpsa_sas_node
,
9779 struct hpsa_scsi_dev_t
*device
)
9782 struct hpsa_sas_port
*hpsa_sas_port
;
9783 struct sas_rphy
*rphy
;
9785 hpsa_sas_port
= hpsa_alloc_sas_port(hpsa_sas_node
, device
->sas_address
);
9789 rphy
= sas_end_device_alloc(hpsa_sas_port
->port
);
9795 hpsa_sas_port
->rphy
= rphy
;
9796 device
->sas_port
= hpsa_sas_port
;
9798 rc
= hpsa_sas_port_add_rphy(hpsa_sas_port
, rphy
);
9805 hpsa_free_sas_port(hpsa_sas_port
);
9806 device
->sas_port
= NULL
;
9811 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t
*device
)
9813 if (device
->sas_port
) {
9814 hpsa_free_sas_port(device
->sas_port
);
9815 device
->sas_port
= NULL
;
9820 hpsa_sas_get_linkerrors(struct sas_phy
*phy
)
9826 hpsa_sas_get_enclosure_identifier(struct sas_rphy
*rphy
, u64
*identifier
)
9828 struct Scsi_Host
*shost
= phy_to_shost(rphy
);
9829 struct ctlr_info
*h
;
9830 struct hpsa_scsi_dev_t
*sd
;
9835 h
= shost_to_hba(shost
);
9840 sd
= hpsa_find_device_by_sas_rphy(h
, rphy
);
9844 *identifier
= sd
->eli
;
9850 hpsa_sas_get_bay_identifier(struct sas_rphy
*rphy
)
9856 hpsa_sas_phy_reset(struct sas_phy
*phy
, int hard_reset
)
9862 hpsa_sas_phy_enable(struct sas_phy
*phy
, int enable
)
9868 hpsa_sas_phy_setup(struct sas_phy
*phy
)
9874 hpsa_sas_phy_release(struct sas_phy
*phy
)
9879 hpsa_sas_phy_speed(struct sas_phy
*phy
, struct sas_phy_linkrates
*rates
)
9884 static struct sas_function_template hpsa_sas_transport_functions
= {
9885 .get_linkerrors
= hpsa_sas_get_linkerrors
,
9886 .get_enclosure_identifier
= hpsa_sas_get_enclosure_identifier
,
9887 .get_bay_identifier
= hpsa_sas_get_bay_identifier
,
9888 .phy_reset
= hpsa_sas_phy_reset
,
9889 .phy_enable
= hpsa_sas_phy_enable
,
9890 .phy_setup
= hpsa_sas_phy_setup
,
9891 .phy_release
= hpsa_sas_phy_release
,
9892 .set_phy_speed
= hpsa_sas_phy_speed
,
9896 * This is it. Register the PCI driver information for the cards we control
9897 * the OS will call our registered routines when it finds one of our cards.
9899 static int __init
hpsa_init(void)
9903 hpsa_sas_transport_template
=
9904 sas_attach_transport(&hpsa_sas_transport_functions
);
9905 if (!hpsa_sas_transport_template
)
9908 rc
= pci_register_driver(&hpsa_pci_driver
);
9911 sas_release_transport(hpsa_sas_transport_template
);
9916 static void __exit
hpsa_cleanup(void)
9918 pci_unregister_driver(&hpsa_pci_driver
);
9919 sas_release_transport(hpsa_sas_transport_template
);
9922 static void __attribute__((unused
)) verify_offsets(void)
9924 #define VERIFY_OFFSET(member, offset) \
9925 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9927 VERIFY_OFFSET(structure_size
, 0);
9928 VERIFY_OFFSET(volume_blk_size
, 4);
9929 VERIFY_OFFSET(volume_blk_cnt
, 8);
9930 VERIFY_OFFSET(phys_blk_shift
, 16);
9931 VERIFY_OFFSET(parity_rotation_shift
, 17);
9932 VERIFY_OFFSET(strip_size
, 18);
9933 VERIFY_OFFSET(disk_starting_blk
, 20);
9934 VERIFY_OFFSET(disk_blk_cnt
, 28);
9935 VERIFY_OFFSET(data_disks_per_row
, 36);
9936 VERIFY_OFFSET(metadata_disks_per_row
, 38);
9937 VERIFY_OFFSET(row_cnt
, 40);
9938 VERIFY_OFFSET(layout_map_count
, 42);
9939 VERIFY_OFFSET(flags
, 44);
9940 VERIFY_OFFSET(dekindex
, 46);
9941 /* VERIFY_OFFSET(reserved, 48 */
9942 VERIFY_OFFSET(data
, 64);
9944 #undef VERIFY_OFFSET
9946 #define VERIFY_OFFSET(member, offset) \
9947 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9949 VERIFY_OFFSET(IU_type
, 0);
9950 VERIFY_OFFSET(direction
, 1);
9951 VERIFY_OFFSET(reply_queue
, 2);
9952 /* VERIFY_OFFSET(reserved1, 3); */
9953 VERIFY_OFFSET(scsi_nexus
, 4);
9954 VERIFY_OFFSET(Tag
, 8);
9955 VERIFY_OFFSET(cdb
, 16);
9956 VERIFY_OFFSET(cciss_lun
, 32);
9957 VERIFY_OFFSET(data_len
, 40);
9958 VERIFY_OFFSET(cmd_priority_task_attr
, 44);
9959 VERIFY_OFFSET(sg_count
, 45);
9960 /* VERIFY_OFFSET(reserved3 */
9961 VERIFY_OFFSET(err_ptr
, 48);
9962 VERIFY_OFFSET(err_len
, 56);
9963 /* VERIFY_OFFSET(reserved4 */
9964 VERIFY_OFFSET(sg
, 64);
9966 #undef VERIFY_OFFSET
9968 #define VERIFY_OFFSET(member, offset) \
9969 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9971 VERIFY_OFFSET(dev_handle
, 0x00);
9972 VERIFY_OFFSET(reserved1
, 0x02);
9973 VERIFY_OFFSET(function
, 0x03);
9974 VERIFY_OFFSET(reserved2
, 0x04);
9975 VERIFY_OFFSET(err_info
, 0x0C);
9976 VERIFY_OFFSET(reserved3
, 0x10);
9977 VERIFY_OFFSET(err_info_len
, 0x12);
9978 VERIFY_OFFSET(reserved4
, 0x13);
9979 VERIFY_OFFSET(sgl_offset
, 0x14);
9980 VERIFY_OFFSET(reserved5
, 0x15);
9981 VERIFY_OFFSET(transfer_len
, 0x1C);
9982 VERIFY_OFFSET(reserved6
, 0x20);
9983 VERIFY_OFFSET(io_flags
, 0x24);
9984 VERIFY_OFFSET(reserved7
, 0x26);
9985 VERIFY_OFFSET(LUN
, 0x34);
9986 VERIFY_OFFSET(control
, 0x3C);
9987 VERIFY_OFFSET(CDB
, 0x40);
9988 VERIFY_OFFSET(reserved8
, 0x50);
9989 VERIFY_OFFSET(host_context_flags
, 0x60);
9990 VERIFY_OFFSET(timeout_sec
, 0x62);
9991 VERIFY_OFFSET(ReplyQueue
, 0x64);
9992 VERIFY_OFFSET(reserved9
, 0x65);
9993 VERIFY_OFFSET(tag
, 0x68);
9994 VERIFY_OFFSET(host_addr
, 0x70);
9995 VERIFY_OFFSET(CISS_LUN
, 0x78);
9996 VERIFY_OFFSET(SG
, 0x78 + 8);
9997 #undef VERIFY_OFFSET
10000 module_init(hpsa_init
);
10001 module_exit(hpsa_cleanup
);