ALSA: dice: fix kernel NULL pointer dereference due to invalid calculation for array...
[linux/fpc-iii.git] / drivers / scsi / hpsa.c
blob3a9eca163db8117e7bbf1132c7965d5253850668
1 /*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/fs.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
35 #include <linux/io.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
56 #include "hpsa_cmd.h"
57 #include "hpsa.h"
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
63 #define HPSA_DRIVER_VERSION "3.4.20-125"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65 #define HPSA "hpsa"
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
77 /* Embedded module documentation macros - see modules.h */
78 MODULE_AUTHOR("Hewlett-Packard Company");
79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
80 HPSA_DRIVER_VERSION);
81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82 MODULE_VERSION(HPSA_DRIVER_VERSION);
83 MODULE_LICENSE("GPL");
84 MODULE_ALIAS("cciss");
86 static int hpsa_simple_mode;
87 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
88 MODULE_PARM_DESC(hpsa_simple_mode,
89 "Use 'simple mode' rather than 'performant mode'");
91 /* define the PCI info for the cards we can control */
92 static const struct pci_device_id hpsa_pci_device_id[] = {
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
141 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
145 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
146 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
147 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
148 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
149 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
150 {0,}
153 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
155 /* board_id = Subsystem Device ID & Vendor ID
156 * product = Marketing Name for the board
157 * access = Address of the struct of function pointers
159 static struct board_type products[] = {
160 {0x40700E11, "Smart Array 5300", &SA5A_access},
161 {0x40800E11, "Smart Array 5i", &SA5B_access},
162 {0x40820E11, "Smart Array 532", &SA5B_access},
163 {0x40830E11, "Smart Array 5312", &SA5B_access},
164 {0x409A0E11, "Smart Array 641", &SA5A_access},
165 {0x409B0E11, "Smart Array 642", &SA5A_access},
166 {0x409C0E11, "Smart Array 6400", &SA5A_access},
167 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
168 {0x40910E11, "Smart Array 6i", &SA5A_access},
169 {0x3225103C, "Smart Array P600", &SA5A_access},
170 {0x3223103C, "Smart Array P800", &SA5A_access},
171 {0x3234103C, "Smart Array P400", &SA5A_access},
172 {0x3235103C, "Smart Array P400i", &SA5A_access},
173 {0x3211103C, "Smart Array E200i", &SA5A_access},
174 {0x3212103C, "Smart Array E200", &SA5A_access},
175 {0x3213103C, "Smart Array E200i", &SA5A_access},
176 {0x3214103C, "Smart Array E200i", &SA5A_access},
177 {0x3215103C, "Smart Array E200i", &SA5A_access},
178 {0x3237103C, "Smart Array E500", &SA5A_access},
179 {0x323D103C, "Smart Array P700m", &SA5A_access},
180 {0x3241103C, "Smart Array P212", &SA5_access},
181 {0x3243103C, "Smart Array P410", &SA5_access},
182 {0x3245103C, "Smart Array P410i", &SA5_access},
183 {0x3247103C, "Smart Array P411", &SA5_access},
184 {0x3249103C, "Smart Array P812", &SA5_access},
185 {0x324A103C, "Smart Array P712m", &SA5_access},
186 {0x324B103C, "Smart Array P711m", &SA5_access},
187 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
188 {0x3350103C, "Smart Array P222", &SA5_access},
189 {0x3351103C, "Smart Array P420", &SA5_access},
190 {0x3352103C, "Smart Array P421", &SA5_access},
191 {0x3353103C, "Smart Array P822", &SA5_access},
192 {0x3354103C, "Smart Array P420i", &SA5_access},
193 {0x3355103C, "Smart Array P220i", &SA5_access},
194 {0x3356103C, "Smart Array P721m", &SA5_access},
195 {0x1920103C, "Smart Array P430i", &SA5_access},
196 {0x1921103C, "Smart Array P830i", &SA5_access},
197 {0x1922103C, "Smart Array P430", &SA5_access},
198 {0x1923103C, "Smart Array P431", &SA5_access},
199 {0x1924103C, "Smart Array P830", &SA5_access},
200 {0x1925103C, "Smart Array P831", &SA5_access},
201 {0x1926103C, "Smart Array P731m", &SA5_access},
202 {0x1928103C, "Smart Array P230i", &SA5_access},
203 {0x1929103C, "Smart Array P530", &SA5_access},
204 {0x21BD103C, "Smart Array P244br", &SA5_access},
205 {0x21BE103C, "Smart Array P741m", &SA5_access},
206 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
207 {0x21C0103C, "Smart Array P440ar", &SA5_access},
208 {0x21C1103C, "Smart Array P840ar", &SA5_access},
209 {0x21C2103C, "Smart Array P440", &SA5_access},
210 {0x21C3103C, "Smart Array P441", &SA5_access},
211 {0x21C4103C, "Smart Array", &SA5_access},
212 {0x21C5103C, "Smart Array P841", &SA5_access},
213 {0x21C6103C, "Smart HBA H244br", &SA5_access},
214 {0x21C7103C, "Smart HBA H240", &SA5_access},
215 {0x21C8103C, "Smart HBA H241", &SA5_access},
216 {0x21C9103C, "Smart Array", &SA5_access},
217 {0x21CA103C, "Smart Array P246br", &SA5_access},
218 {0x21CB103C, "Smart Array P840", &SA5_access},
219 {0x21CC103C, "Smart Array", &SA5_access},
220 {0x21CD103C, "Smart Array", &SA5_access},
221 {0x21CE103C, "Smart HBA", &SA5_access},
222 {0x05809005, "SmartHBA-SA", &SA5_access},
223 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
224 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
225 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
226 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
227 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
228 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
229 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
230 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
231 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
232 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
233 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
236 static struct scsi_transport_template *hpsa_sas_transport_template;
237 static int hpsa_add_sas_host(struct ctlr_info *h);
238 static void hpsa_delete_sas_host(struct ctlr_info *h);
239 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
240 struct hpsa_scsi_dev_t *device);
241 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
242 static struct hpsa_scsi_dev_t
243 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
244 struct sas_rphy *rphy);
246 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
247 static const struct scsi_cmnd hpsa_cmd_busy;
248 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
249 static const struct scsi_cmnd hpsa_cmd_idle;
250 static int number_of_controllers;
252 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
253 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
254 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
256 #ifdef CONFIG_COMPAT
257 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
258 void __user *arg);
259 #endif
261 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
262 static struct CommandList *cmd_alloc(struct ctlr_info *h);
263 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
264 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
265 struct scsi_cmnd *scmd);
266 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
267 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
268 int cmd_type);
269 static void hpsa_free_cmd_pool(struct ctlr_info *h);
270 #define VPD_PAGE (1 << 8)
271 #define HPSA_SIMPLE_ERROR_BITS 0x03
273 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
274 static void hpsa_scan_start(struct Scsi_Host *);
275 static int hpsa_scan_finished(struct Scsi_Host *sh,
276 unsigned long elapsed_time);
277 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
279 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
280 static int hpsa_slave_alloc(struct scsi_device *sdev);
281 static int hpsa_slave_configure(struct scsi_device *sdev);
282 static void hpsa_slave_destroy(struct scsi_device *sdev);
284 static void hpsa_update_scsi_devices(struct ctlr_info *h);
285 static int check_for_unit_attention(struct ctlr_info *h,
286 struct CommandList *c);
287 static void check_ioctl_unit_attention(struct ctlr_info *h,
288 struct CommandList *c);
289 /* performant mode helper functions */
290 static void calc_bucket_map(int *bucket, int num_buckets,
291 int nsgs, int min_blocks, u32 *bucket_map);
292 static void hpsa_free_performant_mode(struct ctlr_info *h);
293 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
294 static inline u32 next_command(struct ctlr_info *h, u8 q);
295 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
296 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
297 u64 *cfg_offset);
298 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
299 unsigned long *memory_bar);
300 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
301 bool *legacy_board);
302 static int wait_for_device_to_become_ready(struct ctlr_info *h,
303 unsigned char lunaddr[],
304 int reply_queue);
305 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
306 int wait_for_ready);
307 static inline void finish_cmd(struct CommandList *c);
308 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
309 #define BOARD_NOT_READY 0
310 #define BOARD_READY 1
311 static void hpsa_drain_accel_commands(struct ctlr_info *h);
312 static void hpsa_flush_cache(struct ctlr_info *h);
313 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
314 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
315 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
316 static void hpsa_command_resubmit_worker(struct work_struct *work);
317 static u32 lockup_detected(struct ctlr_info *h);
318 static int detect_controller_lockup(struct ctlr_info *h);
319 static void hpsa_disable_rld_caching(struct ctlr_info *h);
320 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
321 struct ReportExtendedLUNdata *buf, int bufsize);
322 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
323 unsigned char scsi3addr[], u8 page);
324 static int hpsa_luns_changed(struct ctlr_info *h);
325 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
326 struct hpsa_scsi_dev_t *dev,
327 unsigned char *scsi3addr);
329 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
331 unsigned long *priv = shost_priv(sdev->host);
332 return (struct ctlr_info *) *priv;
335 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
337 unsigned long *priv = shost_priv(sh);
338 return (struct ctlr_info *) *priv;
341 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
343 return c->scsi_cmd == SCSI_CMD_IDLE;
346 static inline bool hpsa_is_pending_event(struct CommandList *c)
348 return c->reset_pending;
351 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
352 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
353 u8 *sense_key, u8 *asc, u8 *ascq)
355 struct scsi_sense_hdr sshdr;
356 bool rc;
358 *sense_key = -1;
359 *asc = -1;
360 *ascq = -1;
362 if (sense_data_len < 1)
363 return;
365 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
366 if (rc) {
367 *sense_key = sshdr.sense_key;
368 *asc = sshdr.asc;
369 *ascq = sshdr.ascq;
373 static int check_for_unit_attention(struct ctlr_info *h,
374 struct CommandList *c)
376 u8 sense_key, asc, ascq;
377 int sense_len;
379 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
380 sense_len = sizeof(c->err_info->SenseInfo);
381 else
382 sense_len = c->err_info->SenseLen;
384 decode_sense_data(c->err_info->SenseInfo, sense_len,
385 &sense_key, &asc, &ascq);
386 if (sense_key != UNIT_ATTENTION || asc == 0xff)
387 return 0;
389 switch (asc) {
390 case STATE_CHANGED:
391 dev_warn(&h->pdev->dev,
392 "%s: a state change detected, command retried\n",
393 h->devname);
394 break;
395 case LUN_FAILED:
396 dev_warn(&h->pdev->dev,
397 "%s: LUN failure detected\n", h->devname);
398 break;
399 case REPORT_LUNS_CHANGED:
400 dev_warn(&h->pdev->dev,
401 "%s: report LUN data changed\n", h->devname);
403 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
404 * target (array) devices.
406 break;
407 case POWER_OR_RESET:
408 dev_warn(&h->pdev->dev,
409 "%s: a power on or device reset detected\n",
410 h->devname);
411 break;
412 case UNIT_ATTENTION_CLEARED:
413 dev_warn(&h->pdev->dev,
414 "%s: unit attention cleared by another initiator\n",
415 h->devname);
416 break;
417 default:
418 dev_warn(&h->pdev->dev,
419 "%s: unknown unit attention detected\n",
420 h->devname);
421 break;
423 return 1;
426 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
428 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
429 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
430 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
431 return 0;
432 dev_warn(&h->pdev->dev, HPSA "device busy");
433 return 1;
436 static u32 lockup_detected(struct ctlr_info *h);
437 static ssize_t host_show_lockup_detected(struct device *dev,
438 struct device_attribute *attr, char *buf)
440 int ld;
441 struct ctlr_info *h;
442 struct Scsi_Host *shost = class_to_shost(dev);
444 h = shost_to_hba(shost);
445 ld = lockup_detected(h);
447 return sprintf(buf, "ld=%d\n", ld);
450 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t count)
454 int status, len;
455 struct ctlr_info *h;
456 struct Scsi_Host *shost = class_to_shost(dev);
457 char tmpbuf[10];
459 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
460 return -EACCES;
461 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
462 strncpy(tmpbuf, buf, len);
463 tmpbuf[len] = '\0';
464 if (sscanf(tmpbuf, "%d", &status) != 1)
465 return -EINVAL;
466 h = shost_to_hba(shost);
467 h->acciopath_status = !!status;
468 dev_warn(&h->pdev->dev,
469 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
470 h->acciopath_status ? "enabled" : "disabled");
471 return count;
474 static ssize_t host_store_raid_offload_debug(struct device *dev,
475 struct device_attribute *attr,
476 const char *buf, size_t count)
478 int debug_level, len;
479 struct ctlr_info *h;
480 struct Scsi_Host *shost = class_to_shost(dev);
481 char tmpbuf[10];
483 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
484 return -EACCES;
485 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
486 strncpy(tmpbuf, buf, len);
487 tmpbuf[len] = '\0';
488 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
489 return -EINVAL;
490 if (debug_level < 0)
491 debug_level = 0;
492 h = shost_to_hba(shost);
493 h->raid_offload_debug = debug_level;
494 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
495 h->raid_offload_debug);
496 return count;
499 static ssize_t host_store_rescan(struct device *dev,
500 struct device_attribute *attr,
501 const char *buf, size_t count)
503 struct ctlr_info *h;
504 struct Scsi_Host *shost = class_to_shost(dev);
505 h = shost_to_hba(shost);
506 hpsa_scan_start(h->scsi_host);
507 return count;
510 static ssize_t host_show_firmware_revision(struct device *dev,
511 struct device_attribute *attr, char *buf)
513 struct ctlr_info *h;
514 struct Scsi_Host *shost = class_to_shost(dev);
515 unsigned char *fwrev;
517 h = shost_to_hba(shost);
518 if (!h->hba_inquiry_data)
519 return 0;
520 fwrev = &h->hba_inquiry_data[32];
521 return snprintf(buf, 20, "%c%c%c%c\n",
522 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
525 static ssize_t host_show_commands_outstanding(struct device *dev,
526 struct device_attribute *attr, char *buf)
528 struct Scsi_Host *shost = class_to_shost(dev);
529 struct ctlr_info *h = shost_to_hba(shost);
531 return snprintf(buf, 20, "%d\n",
532 atomic_read(&h->commands_outstanding));
535 static ssize_t host_show_transport_mode(struct device *dev,
536 struct device_attribute *attr, char *buf)
538 struct ctlr_info *h;
539 struct Scsi_Host *shost = class_to_shost(dev);
541 h = shost_to_hba(shost);
542 return snprintf(buf, 20, "%s\n",
543 h->transMethod & CFGTBL_Trans_Performant ?
544 "performant" : "simple");
547 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
548 struct device_attribute *attr, char *buf)
550 struct ctlr_info *h;
551 struct Scsi_Host *shost = class_to_shost(dev);
553 h = shost_to_hba(shost);
554 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
555 (h->acciopath_status == 1) ? "enabled" : "disabled");
558 /* List of controllers which cannot be hard reset on kexec with reset_devices */
559 static u32 unresettable_controller[] = {
560 0x324a103C, /* Smart Array P712m */
561 0x324b103C, /* Smart Array P711m */
562 0x3223103C, /* Smart Array P800 */
563 0x3234103C, /* Smart Array P400 */
564 0x3235103C, /* Smart Array P400i */
565 0x3211103C, /* Smart Array E200i */
566 0x3212103C, /* Smart Array E200 */
567 0x3213103C, /* Smart Array E200i */
568 0x3214103C, /* Smart Array E200i */
569 0x3215103C, /* Smart Array E200i */
570 0x3237103C, /* Smart Array E500 */
571 0x323D103C, /* Smart Array P700m */
572 0x40800E11, /* Smart Array 5i */
573 0x409C0E11, /* Smart Array 6400 */
574 0x409D0E11, /* Smart Array 6400 EM */
575 0x40700E11, /* Smart Array 5300 */
576 0x40820E11, /* Smart Array 532 */
577 0x40830E11, /* Smart Array 5312 */
578 0x409A0E11, /* Smart Array 641 */
579 0x409B0E11, /* Smart Array 642 */
580 0x40910E11, /* Smart Array 6i */
583 /* List of controllers which cannot even be soft reset */
584 static u32 soft_unresettable_controller[] = {
585 0x40800E11, /* Smart Array 5i */
586 0x40700E11, /* Smart Array 5300 */
587 0x40820E11, /* Smart Array 532 */
588 0x40830E11, /* Smart Array 5312 */
589 0x409A0E11, /* Smart Array 641 */
590 0x409B0E11, /* Smart Array 642 */
591 0x40910E11, /* Smart Array 6i */
592 /* Exclude 640x boards. These are two pci devices in one slot
593 * which share a battery backed cache module. One controls the
594 * cache, the other accesses the cache through the one that controls
595 * it. If we reset the one controlling the cache, the other will
596 * likely not be happy. Just forbid resetting this conjoined mess.
597 * The 640x isn't really supported by hpsa anyway.
599 0x409C0E11, /* Smart Array 6400 */
600 0x409D0E11, /* Smart Array 6400 EM */
603 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
605 int i;
607 for (i = 0; i < nelems; i++)
608 if (a[i] == board_id)
609 return 1;
610 return 0;
613 static int ctlr_is_hard_resettable(u32 board_id)
615 return !board_id_in_array(unresettable_controller,
616 ARRAY_SIZE(unresettable_controller), board_id);
619 static int ctlr_is_soft_resettable(u32 board_id)
621 return !board_id_in_array(soft_unresettable_controller,
622 ARRAY_SIZE(soft_unresettable_controller), board_id);
625 static int ctlr_is_resettable(u32 board_id)
627 return ctlr_is_hard_resettable(board_id) ||
628 ctlr_is_soft_resettable(board_id);
631 static ssize_t host_show_resettable(struct device *dev,
632 struct device_attribute *attr, char *buf)
634 struct ctlr_info *h;
635 struct Scsi_Host *shost = class_to_shost(dev);
637 h = shost_to_hba(shost);
638 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
641 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
643 return (scsi3addr[3] & 0xC0) == 0x40;
646 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
647 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
649 #define HPSA_RAID_0 0
650 #define HPSA_RAID_4 1
651 #define HPSA_RAID_1 2 /* also used for RAID 10 */
652 #define HPSA_RAID_5 3 /* also used for RAID 50 */
653 #define HPSA_RAID_51 4
654 #define HPSA_RAID_6 5 /* also used for RAID 60 */
655 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
656 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
657 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
659 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
661 return !device->physical_device;
664 static ssize_t raid_level_show(struct device *dev,
665 struct device_attribute *attr, char *buf)
667 ssize_t l = 0;
668 unsigned char rlevel;
669 struct ctlr_info *h;
670 struct scsi_device *sdev;
671 struct hpsa_scsi_dev_t *hdev;
672 unsigned long flags;
674 sdev = to_scsi_device(dev);
675 h = sdev_to_hba(sdev);
676 spin_lock_irqsave(&h->lock, flags);
677 hdev = sdev->hostdata;
678 if (!hdev) {
679 spin_unlock_irqrestore(&h->lock, flags);
680 return -ENODEV;
683 /* Is this even a logical drive? */
684 if (!is_logical_device(hdev)) {
685 spin_unlock_irqrestore(&h->lock, flags);
686 l = snprintf(buf, PAGE_SIZE, "N/A\n");
687 return l;
690 rlevel = hdev->raid_level;
691 spin_unlock_irqrestore(&h->lock, flags);
692 if (rlevel > RAID_UNKNOWN)
693 rlevel = RAID_UNKNOWN;
694 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
695 return l;
698 static ssize_t lunid_show(struct device *dev,
699 struct device_attribute *attr, char *buf)
701 struct ctlr_info *h;
702 struct scsi_device *sdev;
703 struct hpsa_scsi_dev_t *hdev;
704 unsigned long flags;
705 unsigned char lunid[8];
707 sdev = to_scsi_device(dev);
708 h = sdev_to_hba(sdev);
709 spin_lock_irqsave(&h->lock, flags);
710 hdev = sdev->hostdata;
711 if (!hdev) {
712 spin_unlock_irqrestore(&h->lock, flags);
713 return -ENODEV;
715 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
716 spin_unlock_irqrestore(&h->lock, flags);
717 return snprintf(buf, 20, "0x%8phN\n", lunid);
720 static ssize_t unique_id_show(struct device *dev,
721 struct device_attribute *attr, char *buf)
723 struct ctlr_info *h;
724 struct scsi_device *sdev;
725 struct hpsa_scsi_dev_t *hdev;
726 unsigned long flags;
727 unsigned char sn[16];
729 sdev = to_scsi_device(dev);
730 h = sdev_to_hba(sdev);
731 spin_lock_irqsave(&h->lock, flags);
732 hdev = sdev->hostdata;
733 if (!hdev) {
734 spin_unlock_irqrestore(&h->lock, flags);
735 return -ENODEV;
737 memcpy(sn, hdev->device_id, sizeof(sn));
738 spin_unlock_irqrestore(&h->lock, flags);
739 return snprintf(buf, 16 * 2 + 2,
740 "%02X%02X%02X%02X%02X%02X%02X%02X"
741 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
742 sn[0], sn[1], sn[2], sn[3],
743 sn[4], sn[5], sn[6], sn[7],
744 sn[8], sn[9], sn[10], sn[11],
745 sn[12], sn[13], sn[14], sn[15]);
748 static ssize_t sas_address_show(struct device *dev,
749 struct device_attribute *attr, char *buf)
751 struct ctlr_info *h;
752 struct scsi_device *sdev;
753 struct hpsa_scsi_dev_t *hdev;
754 unsigned long flags;
755 u64 sas_address;
757 sdev = to_scsi_device(dev);
758 h = sdev_to_hba(sdev);
759 spin_lock_irqsave(&h->lock, flags);
760 hdev = sdev->hostdata;
761 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
762 spin_unlock_irqrestore(&h->lock, flags);
763 return -ENODEV;
765 sas_address = hdev->sas_address;
766 spin_unlock_irqrestore(&h->lock, flags);
768 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
771 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
772 struct device_attribute *attr, char *buf)
774 struct ctlr_info *h;
775 struct scsi_device *sdev;
776 struct hpsa_scsi_dev_t *hdev;
777 unsigned long flags;
778 int offload_enabled;
780 sdev = to_scsi_device(dev);
781 h = sdev_to_hba(sdev);
782 spin_lock_irqsave(&h->lock, flags);
783 hdev = sdev->hostdata;
784 if (!hdev) {
785 spin_unlock_irqrestore(&h->lock, flags);
786 return -ENODEV;
788 offload_enabled = hdev->offload_enabled;
789 spin_unlock_irqrestore(&h->lock, flags);
791 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
792 return snprintf(buf, 20, "%d\n", offload_enabled);
793 else
794 return snprintf(buf, 40, "%s\n",
795 "Not applicable for a controller");
798 #define MAX_PATHS 8
799 static ssize_t path_info_show(struct device *dev,
800 struct device_attribute *attr, char *buf)
802 struct ctlr_info *h;
803 struct scsi_device *sdev;
804 struct hpsa_scsi_dev_t *hdev;
805 unsigned long flags;
806 int i;
807 int output_len = 0;
808 u8 box;
809 u8 bay;
810 u8 path_map_index = 0;
811 char *active;
812 unsigned char phys_connector[2];
814 sdev = to_scsi_device(dev);
815 h = sdev_to_hba(sdev);
816 spin_lock_irqsave(&h->devlock, flags);
817 hdev = sdev->hostdata;
818 if (!hdev) {
819 spin_unlock_irqrestore(&h->devlock, flags);
820 return -ENODEV;
823 bay = hdev->bay;
824 for (i = 0; i < MAX_PATHS; i++) {
825 path_map_index = 1<<i;
826 if (i == hdev->active_path_index)
827 active = "Active";
828 else if (hdev->path_map & path_map_index)
829 active = "Inactive";
830 else
831 continue;
833 output_len += scnprintf(buf + output_len,
834 PAGE_SIZE - output_len,
835 "[%d:%d:%d:%d] %20.20s ",
836 h->scsi_host->host_no,
837 hdev->bus, hdev->target, hdev->lun,
838 scsi_device_type(hdev->devtype));
840 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
841 output_len += scnprintf(buf + output_len,
842 PAGE_SIZE - output_len,
843 "%s\n", active);
844 continue;
847 box = hdev->box[i];
848 memcpy(&phys_connector, &hdev->phys_connector[i],
849 sizeof(phys_connector));
850 if (phys_connector[0] < '0')
851 phys_connector[0] = '0';
852 if (phys_connector[1] < '0')
853 phys_connector[1] = '0';
854 output_len += scnprintf(buf + output_len,
855 PAGE_SIZE - output_len,
856 "PORT: %.2s ",
857 phys_connector);
858 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
859 hdev->expose_device) {
860 if (box == 0 || box == 0xFF) {
861 output_len += scnprintf(buf + output_len,
862 PAGE_SIZE - output_len,
863 "BAY: %hhu %s\n",
864 bay, active);
865 } else {
866 output_len += scnprintf(buf + output_len,
867 PAGE_SIZE - output_len,
868 "BOX: %hhu BAY: %hhu %s\n",
869 box, bay, active);
871 } else if (box != 0 && box != 0xFF) {
872 output_len += scnprintf(buf + output_len,
873 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
874 box, active);
875 } else
876 output_len += scnprintf(buf + output_len,
877 PAGE_SIZE - output_len, "%s\n", active);
880 spin_unlock_irqrestore(&h->devlock, flags);
881 return output_len;
884 static ssize_t host_show_ctlr_num(struct device *dev,
885 struct device_attribute *attr, char *buf)
887 struct ctlr_info *h;
888 struct Scsi_Host *shost = class_to_shost(dev);
890 h = shost_to_hba(shost);
891 return snprintf(buf, 20, "%d\n", h->ctlr);
894 static ssize_t host_show_legacy_board(struct device *dev,
895 struct device_attribute *attr, char *buf)
897 struct ctlr_info *h;
898 struct Scsi_Host *shost = class_to_shost(dev);
900 h = shost_to_hba(shost);
901 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
904 static DEVICE_ATTR_RO(raid_level);
905 static DEVICE_ATTR_RO(lunid);
906 static DEVICE_ATTR_RO(unique_id);
907 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
908 static DEVICE_ATTR_RO(sas_address);
909 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
910 host_show_hp_ssd_smart_path_enabled, NULL);
911 static DEVICE_ATTR_RO(path_info);
912 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
913 host_show_hp_ssd_smart_path_status,
914 host_store_hp_ssd_smart_path_status);
915 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
916 host_store_raid_offload_debug);
917 static DEVICE_ATTR(firmware_revision, S_IRUGO,
918 host_show_firmware_revision, NULL);
919 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
920 host_show_commands_outstanding, NULL);
921 static DEVICE_ATTR(transport_mode, S_IRUGO,
922 host_show_transport_mode, NULL);
923 static DEVICE_ATTR(resettable, S_IRUGO,
924 host_show_resettable, NULL);
925 static DEVICE_ATTR(lockup_detected, S_IRUGO,
926 host_show_lockup_detected, NULL);
927 static DEVICE_ATTR(ctlr_num, S_IRUGO,
928 host_show_ctlr_num, NULL);
929 static DEVICE_ATTR(legacy_board, S_IRUGO,
930 host_show_legacy_board, NULL);
932 static struct device_attribute *hpsa_sdev_attrs[] = {
933 &dev_attr_raid_level,
934 &dev_attr_lunid,
935 &dev_attr_unique_id,
936 &dev_attr_hp_ssd_smart_path_enabled,
937 &dev_attr_path_info,
938 &dev_attr_sas_address,
939 NULL,
942 static struct device_attribute *hpsa_shost_attrs[] = {
943 &dev_attr_rescan,
944 &dev_attr_firmware_revision,
945 &dev_attr_commands_outstanding,
946 &dev_attr_transport_mode,
947 &dev_attr_resettable,
948 &dev_attr_hp_ssd_smart_path_status,
949 &dev_attr_raid_offload_debug,
950 &dev_attr_lockup_detected,
951 &dev_attr_ctlr_num,
952 &dev_attr_legacy_board,
953 NULL,
956 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
957 HPSA_MAX_CONCURRENT_PASSTHRUS)
959 static struct scsi_host_template hpsa_driver_template = {
960 .module = THIS_MODULE,
961 .name = HPSA,
962 .proc_name = HPSA,
963 .queuecommand = hpsa_scsi_queue_command,
964 .scan_start = hpsa_scan_start,
965 .scan_finished = hpsa_scan_finished,
966 .change_queue_depth = hpsa_change_queue_depth,
967 .this_id = -1,
968 .use_clustering = ENABLE_CLUSTERING,
969 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
970 .ioctl = hpsa_ioctl,
971 .slave_alloc = hpsa_slave_alloc,
972 .slave_configure = hpsa_slave_configure,
973 .slave_destroy = hpsa_slave_destroy,
974 #ifdef CONFIG_COMPAT
975 .compat_ioctl = hpsa_compat_ioctl,
976 #endif
977 .sdev_attrs = hpsa_sdev_attrs,
978 .shost_attrs = hpsa_shost_attrs,
979 .max_sectors = 1024,
980 .no_write_same = 1,
983 static inline u32 next_command(struct ctlr_info *h, u8 q)
985 u32 a;
986 struct reply_queue_buffer *rq = &h->reply_queue[q];
988 if (h->transMethod & CFGTBL_Trans_io_accel1)
989 return h->access.command_completed(h, q);
991 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
992 return h->access.command_completed(h, q);
994 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
995 a = rq->head[rq->current_entry];
996 rq->current_entry++;
997 atomic_dec(&h->commands_outstanding);
998 } else {
999 a = FIFO_EMPTY;
1001 /* Check for wraparound */
1002 if (rq->current_entry == h->max_commands) {
1003 rq->current_entry = 0;
1004 rq->wraparound ^= 1;
1006 return a;
1010 * There are some special bits in the bus address of the
1011 * command that we have to set for the controller to know
1012 * how to process the command:
1014 * Normal performant mode:
1015 * bit 0: 1 means performant mode, 0 means simple mode.
1016 * bits 1-3 = block fetch table entry
1017 * bits 4-6 = command type (== 0)
1019 * ioaccel1 mode:
1020 * bit 0 = "performant mode" bit.
1021 * bits 1-3 = block fetch table entry
1022 * bits 4-6 = command type (== 110)
1023 * (command type is needed because ioaccel1 mode
1024 * commands are submitted through the same register as normal
1025 * mode commands, so this is how the controller knows whether
1026 * the command is normal mode or ioaccel1 mode.)
1028 * ioaccel2 mode:
1029 * bit 0 = "performant mode" bit.
1030 * bits 1-4 = block fetch table entry (note extra bit)
1031 * bits 4-6 = not needed, because ioaccel2 mode has
1032 * a separate special register for submitting commands.
1036 * set_performant_mode: Modify the tag for cciss performant
1037 * set bit 0 for pull model, bits 3-1 for block fetch
1038 * register number
1040 #define DEFAULT_REPLY_QUEUE (-1)
1041 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1042 int reply_queue)
1044 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1045 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1046 if (unlikely(!h->msix_vectors))
1047 return;
1048 c->Header.ReplyQueue = reply_queue;
1052 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1053 struct CommandList *c,
1054 int reply_queue)
1056 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1059 * Tell the controller to post the reply to the queue for this
1060 * processor. This seems to give the best I/O throughput.
1062 cp->ReplyQueue = reply_queue;
1064 * Set the bits in the address sent down to include:
1065 * - performant mode bit (bit 0)
1066 * - pull count (bits 1-3)
1067 * - command type (bits 4-6)
1069 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1070 IOACCEL1_BUSADDR_CMDTYPE;
1073 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1074 struct CommandList *c,
1075 int reply_queue)
1077 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1078 &h->ioaccel2_cmd_pool[c->cmdindex];
1080 /* Tell the controller to post the reply to the queue for this
1081 * processor. This seems to give the best I/O throughput.
1083 cp->reply_queue = reply_queue;
1084 /* Set the bits in the address sent down to include:
1085 * - performant mode bit not used in ioaccel mode 2
1086 * - pull count (bits 0-3)
1087 * - command type isn't needed for ioaccel2
1089 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1092 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1093 struct CommandList *c,
1094 int reply_queue)
1096 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1099 * Tell the controller to post the reply to the queue for this
1100 * processor. This seems to give the best I/O throughput.
1102 cp->reply_queue = reply_queue;
1104 * Set the bits in the address sent down to include:
1105 * - performant mode bit not used in ioaccel mode 2
1106 * - pull count (bits 0-3)
1107 * - command type isn't needed for ioaccel2
1109 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1112 static int is_firmware_flash_cmd(u8 *cdb)
1114 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1118 * During firmware flash, the heartbeat register may not update as frequently
1119 * as it should. So we dial down lockup detection during firmware flash. and
1120 * dial it back up when firmware flash completes.
1122 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1123 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1124 #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1125 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1126 struct CommandList *c)
1128 if (!is_firmware_flash_cmd(c->Request.CDB))
1129 return;
1130 atomic_inc(&h->firmware_flash_in_progress);
1131 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1134 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1135 struct CommandList *c)
1137 if (is_firmware_flash_cmd(c->Request.CDB) &&
1138 atomic_dec_and_test(&h->firmware_flash_in_progress))
1139 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1142 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1143 struct CommandList *c, int reply_queue)
1145 dial_down_lockup_detection_during_fw_flash(h, c);
1146 atomic_inc(&h->commands_outstanding);
1148 reply_queue = h->reply_map[raw_smp_processor_id()];
1149 switch (c->cmd_type) {
1150 case CMD_IOACCEL1:
1151 set_ioaccel1_performant_mode(h, c, reply_queue);
1152 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1153 break;
1154 case CMD_IOACCEL2:
1155 set_ioaccel2_performant_mode(h, c, reply_queue);
1156 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1157 break;
1158 case IOACCEL2_TMF:
1159 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1160 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1161 break;
1162 default:
1163 set_performant_mode(h, c, reply_queue);
1164 h->access.submit_command(h, c);
1168 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1170 if (unlikely(hpsa_is_pending_event(c)))
1171 return finish_cmd(c);
1173 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1176 static inline int is_hba_lunid(unsigned char scsi3addr[])
1178 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1181 static inline int is_scsi_rev_5(struct ctlr_info *h)
1183 if (!h->hba_inquiry_data)
1184 return 0;
1185 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1186 return 1;
1187 return 0;
1190 static int hpsa_find_target_lun(struct ctlr_info *h,
1191 unsigned char scsi3addr[], int bus, int *target, int *lun)
1193 /* finds an unused bus, target, lun for a new physical device
1194 * assumes h->devlock is held
1196 int i, found = 0;
1197 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1199 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1201 for (i = 0; i < h->ndevices; i++) {
1202 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1203 __set_bit(h->dev[i]->target, lun_taken);
1206 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1207 if (i < HPSA_MAX_DEVICES) {
1208 /* *bus = 1; */
1209 *target = i;
1210 *lun = 0;
1211 found = 1;
1213 return !found;
1216 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1217 struct hpsa_scsi_dev_t *dev, char *description)
1219 #define LABEL_SIZE 25
1220 char label[LABEL_SIZE];
1222 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1223 return;
1225 switch (dev->devtype) {
1226 case TYPE_RAID:
1227 snprintf(label, LABEL_SIZE, "controller");
1228 break;
1229 case TYPE_ENCLOSURE:
1230 snprintf(label, LABEL_SIZE, "enclosure");
1231 break;
1232 case TYPE_DISK:
1233 case TYPE_ZBC:
1234 if (dev->external)
1235 snprintf(label, LABEL_SIZE, "external");
1236 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1237 snprintf(label, LABEL_SIZE, "%s",
1238 raid_label[PHYSICAL_DRIVE]);
1239 else
1240 snprintf(label, LABEL_SIZE, "RAID-%s",
1241 dev->raid_level > RAID_UNKNOWN ? "?" :
1242 raid_label[dev->raid_level]);
1243 break;
1244 case TYPE_ROM:
1245 snprintf(label, LABEL_SIZE, "rom");
1246 break;
1247 case TYPE_TAPE:
1248 snprintf(label, LABEL_SIZE, "tape");
1249 break;
1250 case TYPE_MEDIUM_CHANGER:
1251 snprintf(label, LABEL_SIZE, "changer");
1252 break;
1253 default:
1254 snprintf(label, LABEL_SIZE, "UNKNOWN");
1255 break;
1258 dev_printk(level, &h->pdev->dev,
1259 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1260 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1261 description,
1262 scsi_device_type(dev->devtype),
1263 dev->vendor,
1264 dev->model,
1265 label,
1266 dev->offload_config ? '+' : '-',
1267 dev->offload_to_be_enabled ? '+' : '-',
1268 dev->expose_device);
1271 /* Add an entry into h->dev[] array. */
1272 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1273 struct hpsa_scsi_dev_t *device,
1274 struct hpsa_scsi_dev_t *added[], int *nadded)
1276 /* assumes h->devlock is held */
1277 int n = h->ndevices;
1278 int i;
1279 unsigned char addr1[8], addr2[8];
1280 struct hpsa_scsi_dev_t *sd;
1282 if (n >= HPSA_MAX_DEVICES) {
1283 dev_err(&h->pdev->dev, "too many devices, some will be "
1284 "inaccessible.\n");
1285 return -1;
1288 /* physical devices do not have lun or target assigned until now. */
1289 if (device->lun != -1)
1290 /* Logical device, lun is already assigned. */
1291 goto lun_assigned;
1293 /* If this device a non-zero lun of a multi-lun device
1294 * byte 4 of the 8-byte LUN addr will contain the logical
1295 * unit no, zero otherwise.
1297 if (device->scsi3addr[4] == 0) {
1298 /* This is not a non-zero lun of a multi-lun device */
1299 if (hpsa_find_target_lun(h, device->scsi3addr,
1300 device->bus, &device->target, &device->lun) != 0)
1301 return -1;
1302 goto lun_assigned;
1305 /* This is a non-zero lun of a multi-lun device.
1306 * Search through our list and find the device which
1307 * has the same 8 byte LUN address, excepting byte 4 and 5.
1308 * Assign the same bus and target for this new LUN.
1309 * Use the logical unit number from the firmware.
1311 memcpy(addr1, device->scsi3addr, 8);
1312 addr1[4] = 0;
1313 addr1[5] = 0;
1314 for (i = 0; i < n; i++) {
1315 sd = h->dev[i];
1316 memcpy(addr2, sd->scsi3addr, 8);
1317 addr2[4] = 0;
1318 addr2[5] = 0;
1319 /* differ only in byte 4 and 5? */
1320 if (memcmp(addr1, addr2, 8) == 0) {
1321 device->bus = sd->bus;
1322 device->target = sd->target;
1323 device->lun = device->scsi3addr[4];
1324 break;
1327 if (device->lun == -1) {
1328 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1329 " suspect firmware bug or unsupported hardware "
1330 "configuration.\n");
1331 return -1;
1334 lun_assigned:
1336 h->dev[n] = device;
1337 h->ndevices++;
1338 added[*nadded] = device;
1339 (*nadded)++;
1340 hpsa_show_dev_msg(KERN_INFO, h, device,
1341 device->expose_device ? "added" : "masked");
1342 return 0;
1346 * Called during a scan operation.
1348 * Update an entry in h->dev[] array.
1350 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1351 int entry, struct hpsa_scsi_dev_t *new_entry)
1353 /* assumes h->devlock is held */
1354 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1356 /* Raid level changed. */
1357 h->dev[entry]->raid_level = new_entry->raid_level;
1360 * ioacccel_handle may have changed for a dual domain disk
1362 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1364 /* Raid offload parameters changed. Careful about the ordering. */
1365 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1367 * if drive is newly offload_enabled, we want to copy the
1368 * raid map data first. If previously offload_enabled and
1369 * offload_config were set, raid map data had better be
1370 * the same as it was before. If raid map data has changed
1371 * then it had better be the case that
1372 * h->dev[entry]->offload_enabled is currently 0.
1374 h->dev[entry]->raid_map = new_entry->raid_map;
1375 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1377 if (new_entry->offload_to_be_enabled) {
1378 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1379 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1381 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1382 h->dev[entry]->offload_config = new_entry->offload_config;
1383 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1384 h->dev[entry]->queue_depth = new_entry->queue_depth;
1387 * We can turn off ioaccel offload now, but need to delay turning
1388 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1389 * can't do that until all the devices are updated.
1391 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1394 * turn ioaccel off immediately if told to do so.
1396 if (!new_entry->offload_to_be_enabled)
1397 h->dev[entry]->offload_enabled = 0;
1399 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1402 /* Replace an entry from h->dev[] array. */
1403 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1404 int entry, struct hpsa_scsi_dev_t *new_entry,
1405 struct hpsa_scsi_dev_t *added[], int *nadded,
1406 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1408 /* assumes h->devlock is held */
1409 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1410 removed[*nremoved] = h->dev[entry];
1411 (*nremoved)++;
1414 * New physical devices won't have target/lun assigned yet
1415 * so we need to preserve the values in the slot we are replacing.
1417 if (new_entry->target == -1) {
1418 new_entry->target = h->dev[entry]->target;
1419 new_entry->lun = h->dev[entry]->lun;
1422 h->dev[entry] = new_entry;
1423 added[*nadded] = new_entry;
1424 (*nadded)++;
1426 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1429 /* Remove an entry from h->dev[] array. */
1430 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1431 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1433 /* assumes h->devlock is held */
1434 int i;
1435 struct hpsa_scsi_dev_t *sd;
1437 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1439 sd = h->dev[entry];
1440 removed[*nremoved] = h->dev[entry];
1441 (*nremoved)++;
1443 for (i = entry; i < h->ndevices-1; i++)
1444 h->dev[i] = h->dev[i+1];
1445 h->ndevices--;
1446 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1449 #define SCSI3ADDR_EQ(a, b) ( \
1450 (a)[7] == (b)[7] && \
1451 (a)[6] == (b)[6] && \
1452 (a)[5] == (b)[5] && \
1453 (a)[4] == (b)[4] && \
1454 (a)[3] == (b)[3] && \
1455 (a)[2] == (b)[2] && \
1456 (a)[1] == (b)[1] && \
1457 (a)[0] == (b)[0])
1459 static void fixup_botched_add(struct ctlr_info *h,
1460 struct hpsa_scsi_dev_t *added)
1462 /* called when scsi_add_device fails in order to re-adjust
1463 * h->dev[] to match the mid layer's view.
1465 unsigned long flags;
1466 int i, j;
1468 spin_lock_irqsave(&h->lock, flags);
1469 for (i = 0; i < h->ndevices; i++) {
1470 if (h->dev[i] == added) {
1471 for (j = i; j < h->ndevices-1; j++)
1472 h->dev[j] = h->dev[j+1];
1473 h->ndevices--;
1474 break;
1477 spin_unlock_irqrestore(&h->lock, flags);
1478 kfree(added);
1481 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1482 struct hpsa_scsi_dev_t *dev2)
1484 /* we compare everything except lun and target as these
1485 * are not yet assigned. Compare parts likely
1486 * to differ first
1488 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1489 sizeof(dev1->scsi3addr)) != 0)
1490 return 0;
1491 if (memcmp(dev1->device_id, dev2->device_id,
1492 sizeof(dev1->device_id)) != 0)
1493 return 0;
1494 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1495 return 0;
1496 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1497 return 0;
1498 if (dev1->devtype != dev2->devtype)
1499 return 0;
1500 if (dev1->bus != dev2->bus)
1501 return 0;
1502 return 1;
1505 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1506 struct hpsa_scsi_dev_t *dev2)
1508 /* Device attributes that can change, but don't mean
1509 * that the device is a different device, nor that the OS
1510 * needs to be told anything about the change.
1512 if (dev1->raid_level != dev2->raid_level)
1513 return 1;
1514 if (dev1->offload_config != dev2->offload_config)
1515 return 1;
1516 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1517 return 1;
1518 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1519 if (dev1->queue_depth != dev2->queue_depth)
1520 return 1;
1522 * This can happen for dual domain devices. An active
1523 * path change causes the ioaccel handle to change
1525 * for example note the handle differences between p0 and p1
1526 * Device WWN ,WWN hash,Handle
1527 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1528 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
1530 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1531 return 1;
1532 return 0;
1535 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1536 * and return needle location in *index. If scsi3addr matches, but not
1537 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1538 * location in *index.
1539 * In the case of a minor device attribute change, such as RAID level, just
1540 * return DEVICE_UPDATED, along with the updated device's location in index.
1541 * If needle not found, return DEVICE_NOT_FOUND.
1543 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1544 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1545 int *index)
1547 int i;
1548 #define DEVICE_NOT_FOUND 0
1549 #define DEVICE_CHANGED 1
1550 #define DEVICE_SAME 2
1551 #define DEVICE_UPDATED 3
1552 if (needle == NULL)
1553 return DEVICE_NOT_FOUND;
1555 for (i = 0; i < haystack_size; i++) {
1556 if (haystack[i] == NULL) /* previously removed. */
1557 continue;
1558 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1559 *index = i;
1560 if (device_is_the_same(needle, haystack[i])) {
1561 if (device_updated(needle, haystack[i]))
1562 return DEVICE_UPDATED;
1563 return DEVICE_SAME;
1564 } else {
1565 /* Keep offline devices offline */
1566 if (needle->volume_offline)
1567 return DEVICE_NOT_FOUND;
1568 return DEVICE_CHANGED;
1572 *index = -1;
1573 return DEVICE_NOT_FOUND;
1576 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1577 unsigned char scsi3addr[])
1579 struct offline_device_entry *device;
1580 unsigned long flags;
1582 /* Check to see if device is already on the list */
1583 spin_lock_irqsave(&h->offline_device_lock, flags);
1584 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1585 if (memcmp(device->scsi3addr, scsi3addr,
1586 sizeof(device->scsi3addr)) == 0) {
1587 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1588 return;
1591 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1593 /* Device is not on the list, add it. */
1594 device = kmalloc(sizeof(*device), GFP_KERNEL);
1595 if (!device)
1596 return;
1598 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1599 spin_lock_irqsave(&h->offline_device_lock, flags);
1600 list_add_tail(&device->offline_list, &h->offline_device_list);
1601 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1604 /* Print a message explaining various offline volume states */
1605 static void hpsa_show_volume_status(struct ctlr_info *h,
1606 struct hpsa_scsi_dev_t *sd)
1608 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1609 dev_info(&h->pdev->dev,
1610 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1611 h->scsi_host->host_no,
1612 sd->bus, sd->target, sd->lun);
1613 switch (sd->volume_offline) {
1614 case HPSA_LV_OK:
1615 break;
1616 case HPSA_LV_UNDERGOING_ERASE:
1617 dev_info(&h->pdev->dev,
1618 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1619 h->scsi_host->host_no,
1620 sd->bus, sd->target, sd->lun);
1621 break;
1622 case HPSA_LV_NOT_AVAILABLE:
1623 dev_info(&h->pdev->dev,
1624 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1625 h->scsi_host->host_no,
1626 sd->bus, sd->target, sd->lun);
1627 break;
1628 case HPSA_LV_UNDERGOING_RPI:
1629 dev_info(&h->pdev->dev,
1630 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1631 h->scsi_host->host_no,
1632 sd->bus, sd->target, sd->lun);
1633 break;
1634 case HPSA_LV_PENDING_RPI:
1635 dev_info(&h->pdev->dev,
1636 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1637 h->scsi_host->host_no,
1638 sd->bus, sd->target, sd->lun);
1639 break;
1640 case HPSA_LV_ENCRYPTED_NO_KEY:
1641 dev_info(&h->pdev->dev,
1642 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1643 h->scsi_host->host_no,
1644 sd->bus, sd->target, sd->lun);
1645 break;
1646 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1647 dev_info(&h->pdev->dev,
1648 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1649 h->scsi_host->host_no,
1650 sd->bus, sd->target, sd->lun);
1651 break;
1652 case HPSA_LV_UNDERGOING_ENCRYPTION:
1653 dev_info(&h->pdev->dev,
1654 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1655 h->scsi_host->host_no,
1656 sd->bus, sd->target, sd->lun);
1657 break;
1658 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1659 dev_info(&h->pdev->dev,
1660 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1661 h->scsi_host->host_no,
1662 sd->bus, sd->target, sd->lun);
1663 break;
1664 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1665 dev_info(&h->pdev->dev,
1666 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1667 h->scsi_host->host_no,
1668 sd->bus, sd->target, sd->lun);
1669 break;
1670 case HPSA_LV_PENDING_ENCRYPTION:
1671 dev_info(&h->pdev->dev,
1672 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1673 h->scsi_host->host_no,
1674 sd->bus, sd->target, sd->lun);
1675 break;
1676 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1677 dev_info(&h->pdev->dev,
1678 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1679 h->scsi_host->host_no,
1680 sd->bus, sd->target, sd->lun);
1681 break;
1686 * Figure the list of physical drive pointers for a logical drive with
1687 * raid offload configured.
1689 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1690 struct hpsa_scsi_dev_t *dev[], int ndevices,
1691 struct hpsa_scsi_dev_t *logical_drive)
1693 struct raid_map_data *map = &logical_drive->raid_map;
1694 struct raid_map_disk_data *dd = &map->data[0];
1695 int i, j;
1696 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1697 le16_to_cpu(map->metadata_disks_per_row);
1698 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1699 le16_to_cpu(map->layout_map_count) *
1700 total_disks_per_row;
1701 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1702 total_disks_per_row;
1703 int qdepth;
1705 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1706 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1708 logical_drive->nphysical_disks = nraid_map_entries;
1710 qdepth = 0;
1711 for (i = 0; i < nraid_map_entries; i++) {
1712 logical_drive->phys_disk[i] = NULL;
1713 if (!logical_drive->offload_config)
1714 continue;
1715 for (j = 0; j < ndevices; j++) {
1716 if (dev[j] == NULL)
1717 continue;
1718 if (dev[j]->devtype != TYPE_DISK &&
1719 dev[j]->devtype != TYPE_ZBC)
1720 continue;
1721 if (is_logical_device(dev[j]))
1722 continue;
1723 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1724 continue;
1726 logical_drive->phys_disk[i] = dev[j];
1727 if (i < nphys_disk)
1728 qdepth = min(h->nr_cmds, qdepth +
1729 logical_drive->phys_disk[i]->queue_depth);
1730 break;
1734 * This can happen if a physical drive is removed and
1735 * the logical drive is degraded. In that case, the RAID
1736 * map data will refer to a physical disk which isn't actually
1737 * present. And in that case offload_enabled should already
1738 * be 0, but we'll turn it off here just in case
1740 if (!logical_drive->phys_disk[i]) {
1741 dev_warn(&h->pdev->dev,
1742 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1743 __func__,
1744 h->scsi_host->host_no, logical_drive->bus,
1745 logical_drive->target, logical_drive->lun);
1746 logical_drive->offload_enabled = 0;
1747 logical_drive->offload_to_be_enabled = 0;
1748 logical_drive->queue_depth = 8;
1751 if (nraid_map_entries)
1753 * This is correct for reads, too high for full stripe writes,
1754 * way too high for partial stripe writes
1756 logical_drive->queue_depth = qdepth;
1757 else {
1758 if (logical_drive->external)
1759 logical_drive->queue_depth = EXTERNAL_QD;
1760 else
1761 logical_drive->queue_depth = h->nr_cmds;
1765 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1766 struct hpsa_scsi_dev_t *dev[], int ndevices)
1768 int i;
1770 for (i = 0; i < ndevices; i++) {
1771 if (dev[i] == NULL)
1772 continue;
1773 if (dev[i]->devtype != TYPE_DISK &&
1774 dev[i]->devtype != TYPE_ZBC)
1775 continue;
1776 if (!is_logical_device(dev[i]))
1777 continue;
1780 * If offload is currently enabled, the RAID map and
1781 * phys_disk[] assignment *better* not be changing
1782 * because we would be changing ioaccel phsy_disk[] pointers
1783 * on a ioaccel volume processing I/O requests.
1785 * If an ioaccel volume status changed, initially because it was
1786 * re-configured and thus underwent a transformation, or
1787 * a drive failed, we would have received a state change
1788 * request and ioaccel should have been turned off. When the
1789 * transformation completes, we get another state change
1790 * request to turn ioaccel back on. In this case, we need
1791 * to update the ioaccel information.
1793 * Thus: If it is not currently enabled, but will be after
1794 * the scan completes, make sure the ioaccel pointers
1795 * are up to date.
1798 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1799 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1803 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1805 int rc = 0;
1807 if (!h->scsi_host)
1808 return 1;
1810 if (is_logical_device(device)) /* RAID */
1811 rc = scsi_add_device(h->scsi_host, device->bus,
1812 device->target, device->lun);
1813 else /* HBA */
1814 rc = hpsa_add_sas_device(h->sas_host, device);
1816 return rc;
1819 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1820 struct hpsa_scsi_dev_t *dev)
1822 int i;
1823 int count = 0;
1825 for (i = 0; i < h->nr_cmds; i++) {
1826 struct CommandList *c = h->cmd_pool + i;
1827 int refcount = atomic_inc_return(&c->refcount);
1829 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1830 dev->scsi3addr)) {
1831 unsigned long flags;
1833 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1834 if (!hpsa_is_cmd_idle(c))
1835 ++count;
1836 spin_unlock_irqrestore(&h->lock, flags);
1839 cmd_free(h, c);
1842 return count;
1845 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1846 struct hpsa_scsi_dev_t *device)
1848 int cmds = 0;
1849 int waits = 0;
1851 while (1) {
1852 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1853 if (cmds == 0)
1854 break;
1855 if (++waits > 20)
1856 break;
1857 msleep(1000);
1860 if (waits > 20)
1861 dev_warn(&h->pdev->dev,
1862 "%s: removing device with %d outstanding commands!\n",
1863 __func__, cmds);
1866 static void hpsa_remove_device(struct ctlr_info *h,
1867 struct hpsa_scsi_dev_t *device)
1869 struct scsi_device *sdev = NULL;
1871 if (!h->scsi_host)
1872 return;
1875 * Allow for commands to drain
1877 device->removed = 1;
1878 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1880 if (is_logical_device(device)) { /* RAID */
1881 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1882 device->target, device->lun);
1883 if (sdev) {
1884 scsi_remove_device(sdev);
1885 scsi_device_put(sdev);
1886 } else {
1888 * We don't expect to get here. Future commands
1889 * to this device will get a selection timeout as
1890 * if the device were gone.
1892 hpsa_show_dev_msg(KERN_WARNING, h, device,
1893 "didn't find device for removal.");
1895 } else { /* HBA */
1897 hpsa_remove_sas_device(device);
1901 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1902 struct hpsa_scsi_dev_t *sd[], int nsds)
1904 /* sd contains scsi3 addresses and devtypes, and inquiry
1905 * data. This function takes what's in sd to be the current
1906 * reality and updates h->dev[] to reflect that reality.
1908 int i, entry, device_change, changes = 0;
1909 struct hpsa_scsi_dev_t *csd;
1910 unsigned long flags;
1911 struct hpsa_scsi_dev_t **added, **removed;
1912 int nadded, nremoved;
1915 * A reset can cause a device status to change
1916 * re-schedule the scan to see what happened.
1918 spin_lock_irqsave(&h->reset_lock, flags);
1919 if (h->reset_in_progress) {
1920 h->drv_req_rescan = 1;
1921 spin_unlock_irqrestore(&h->reset_lock, flags);
1922 return;
1924 spin_unlock_irqrestore(&h->reset_lock, flags);
1926 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1927 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1929 if (!added || !removed) {
1930 dev_warn(&h->pdev->dev, "out of memory in "
1931 "adjust_hpsa_scsi_table\n");
1932 goto free_and_out;
1935 spin_lock_irqsave(&h->devlock, flags);
1937 /* find any devices in h->dev[] that are not in
1938 * sd[] and remove them from h->dev[], and for any
1939 * devices which have changed, remove the old device
1940 * info and add the new device info.
1941 * If minor device attributes change, just update
1942 * the existing device structure.
1944 i = 0;
1945 nremoved = 0;
1946 nadded = 0;
1947 while (i < h->ndevices) {
1948 csd = h->dev[i];
1949 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1950 if (device_change == DEVICE_NOT_FOUND) {
1951 changes++;
1952 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1953 continue; /* remove ^^^, hence i not incremented */
1954 } else if (device_change == DEVICE_CHANGED) {
1955 changes++;
1956 hpsa_scsi_replace_entry(h, i, sd[entry],
1957 added, &nadded, removed, &nremoved);
1958 /* Set it to NULL to prevent it from being freed
1959 * at the bottom of hpsa_update_scsi_devices()
1961 sd[entry] = NULL;
1962 } else if (device_change == DEVICE_UPDATED) {
1963 hpsa_scsi_update_entry(h, i, sd[entry]);
1965 i++;
1968 /* Now, make sure every device listed in sd[] is also
1969 * listed in h->dev[], adding them if they aren't found
1972 for (i = 0; i < nsds; i++) {
1973 if (!sd[i]) /* if already added above. */
1974 continue;
1976 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1977 * as the SCSI mid-layer does not handle such devices well.
1978 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1979 * at 160Hz, and prevents the system from coming up.
1981 if (sd[i]->volume_offline) {
1982 hpsa_show_volume_status(h, sd[i]);
1983 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1984 continue;
1987 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1988 h->ndevices, &entry);
1989 if (device_change == DEVICE_NOT_FOUND) {
1990 changes++;
1991 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1992 break;
1993 sd[i] = NULL; /* prevent from being freed later. */
1994 } else if (device_change == DEVICE_CHANGED) {
1995 /* should never happen... */
1996 changes++;
1997 dev_warn(&h->pdev->dev,
1998 "device unexpectedly changed.\n");
1999 /* but if it does happen, we just ignore that device */
2002 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2005 * Now that h->dev[]->phys_disk[] is coherent, we can enable
2006 * any logical drives that need it enabled.
2008 * The raid map should be current by now.
2010 * We are updating the device list used for I/O requests.
2012 for (i = 0; i < h->ndevices; i++) {
2013 if (h->dev[i] == NULL)
2014 continue;
2015 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2018 spin_unlock_irqrestore(&h->devlock, flags);
2020 /* Monitor devices which are in one of several NOT READY states to be
2021 * brought online later. This must be done without holding h->devlock,
2022 * so don't touch h->dev[]
2024 for (i = 0; i < nsds; i++) {
2025 if (!sd[i]) /* if already added above. */
2026 continue;
2027 if (sd[i]->volume_offline)
2028 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2031 /* Don't notify scsi mid layer of any changes the first time through
2032 * (or if there are no changes) scsi_scan_host will do it later the
2033 * first time through.
2035 if (!changes)
2036 goto free_and_out;
2038 /* Notify scsi mid layer of any removed devices */
2039 for (i = 0; i < nremoved; i++) {
2040 if (removed[i] == NULL)
2041 continue;
2042 if (removed[i]->expose_device)
2043 hpsa_remove_device(h, removed[i]);
2044 kfree(removed[i]);
2045 removed[i] = NULL;
2048 /* Notify scsi mid layer of any added devices */
2049 for (i = 0; i < nadded; i++) {
2050 int rc = 0;
2052 if (added[i] == NULL)
2053 continue;
2054 if (!(added[i]->expose_device))
2055 continue;
2056 rc = hpsa_add_device(h, added[i]);
2057 if (!rc)
2058 continue;
2059 dev_warn(&h->pdev->dev,
2060 "addition failed %d, device not added.", rc);
2061 /* now we have to remove it from h->dev,
2062 * since it didn't get added to scsi mid layer
2064 fixup_botched_add(h, added[i]);
2065 h->drv_req_rescan = 1;
2068 free_and_out:
2069 kfree(added);
2070 kfree(removed);
2074 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2075 * Assume's h->devlock is held.
2077 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2078 int bus, int target, int lun)
2080 int i;
2081 struct hpsa_scsi_dev_t *sd;
2083 for (i = 0; i < h->ndevices; i++) {
2084 sd = h->dev[i];
2085 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2086 return sd;
2088 return NULL;
2091 static int hpsa_slave_alloc(struct scsi_device *sdev)
2093 struct hpsa_scsi_dev_t *sd = NULL;
2094 unsigned long flags;
2095 struct ctlr_info *h;
2097 h = sdev_to_hba(sdev);
2098 spin_lock_irqsave(&h->devlock, flags);
2099 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2100 struct scsi_target *starget;
2101 struct sas_rphy *rphy;
2103 starget = scsi_target(sdev);
2104 rphy = target_to_rphy(starget);
2105 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2106 if (sd) {
2107 sd->target = sdev_id(sdev);
2108 sd->lun = sdev->lun;
2111 if (!sd)
2112 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2113 sdev_id(sdev), sdev->lun);
2115 if (sd && sd->expose_device) {
2116 atomic_set(&sd->ioaccel_cmds_out, 0);
2117 sdev->hostdata = sd;
2118 } else
2119 sdev->hostdata = NULL;
2120 spin_unlock_irqrestore(&h->devlock, flags);
2121 return 0;
2124 /* configure scsi device based on internal per-device structure */
2125 static int hpsa_slave_configure(struct scsi_device *sdev)
2127 struct hpsa_scsi_dev_t *sd;
2128 int queue_depth;
2130 sd = sdev->hostdata;
2131 sdev->no_uld_attach = !sd || !sd->expose_device;
2133 if (sd) {
2134 if (sd->external)
2135 queue_depth = EXTERNAL_QD;
2136 else
2137 queue_depth = sd->queue_depth != 0 ?
2138 sd->queue_depth : sdev->host->can_queue;
2139 } else
2140 queue_depth = sdev->host->can_queue;
2142 scsi_change_queue_depth(sdev, queue_depth);
2144 return 0;
2147 static void hpsa_slave_destroy(struct scsi_device *sdev)
2149 /* nothing to do. */
2152 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2154 int i;
2156 if (!h->ioaccel2_cmd_sg_list)
2157 return;
2158 for (i = 0; i < h->nr_cmds; i++) {
2159 kfree(h->ioaccel2_cmd_sg_list[i]);
2160 h->ioaccel2_cmd_sg_list[i] = NULL;
2162 kfree(h->ioaccel2_cmd_sg_list);
2163 h->ioaccel2_cmd_sg_list = NULL;
2166 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2168 int i;
2170 if (h->chainsize <= 0)
2171 return 0;
2173 h->ioaccel2_cmd_sg_list =
2174 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2175 GFP_KERNEL);
2176 if (!h->ioaccel2_cmd_sg_list)
2177 return -ENOMEM;
2178 for (i = 0; i < h->nr_cmds; i++) {
2179 h->ioaccel2_cmd_sg_list[i] =
2180 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2181 h->maxsgentries, GFP_KERNEL);
2182 if (!h->ioaccel2_cmd_sg_list[i])
2183 goto clean;
2185 return 0;
2187 clean:
2188 hpsa_free_ioaccel2_sg_chain_blocks(h);
2189 return -ENOMEM;
2192 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2194 int i;
2196 if (!h->cmd_sg_list)
2197 return;
2198 for (i = 0; i < h->nr_cmds; i++) {
2199 kfree(h->cmd_sg_list[i]);
2200 h->cmd_sg_list[i] = NULL;
2202 kfree(h->cmd_sg_list);
2203 h->cmd_sg_list = NULL;
2206 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2208 int i;
2210 if (h->chainsize <= 0)
2211 return 0;
2213 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2214 GFP_KERNEL);
2215 if (!h->cmd_sg_list)
2216 return -ENOMEM;
2218 for (i = 0; i < h->nr_cmds; i++) {
2219 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2220 h->chainsize, GFP_KERNEL);
2221 if (!h->cmd_sg_list[i])
2222 goto clean;
2225 return 0;
2227 clean:
2228 hpsa_free_sg_chain_blocks(h);
2229 return -ENOMEM;
2232 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2233 struct io_accel2_cmd *cp, struct CommandList *c)
2235 struct ioaccel2_sg_element *chain_block;
2236 u64 temp64;
2237 u32 chain_size;
2239 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2240 chain_size = le32_to_cpu(cp->sg[0].length);
2241 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2242 PCI_DMA_TODEVICE);
2243 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2244 /* prevent subsequent unmapping */
2245 cp->sg->address = 0;
2246 return -1;
2248 cp->sg->address = cpu_to_le64(temp64);
2249 return 0;
2252 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2253 struct io_accel2_cmd *cp)
2255 struct ioaccel2_sg_element *chain_sg;
2256 u64 temp64;
2257 u32 chain_size;
2259 chain_sg = cp->sg;
2260 temp64 = le64_to_cpu(chain_sg->address);
2261 chain_size = le32_to_cpu(cp->sg[0].length);
2262 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2265 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2266 struct CommandList *c)
2268 struct SGDescriptor *chain_sg, *chain_block;
2269 u64 temp64;
2270 u32 chain_len;
2272 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2273 chain_block = h->cmd_sg_list[c->cmdindex];
2274 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2275 chain_len = sizeof(*chain_sg) *
2276 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2277 chain_sg->Len = cpu_to_le32(chain_len);
2278 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2279 PCI_DMA_TODEVICE);
2280 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2281 /* prevent subsequent unmapping */
2282 chain_sg->Addr = cpu_to_le64(0);
2283 return -1;
2285 chain_sg->Addr = cpu_to_le64(temp64);
2286 return 0;
2289 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2290 struct CommandList *c)
2292 struct SGDescriptor *chain_sg;
2294 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2295 return;
2297 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2298 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2299 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2303 /* Decode the various types of errors on ioaccel2 path.
2304 * Return 1 for any error that should generate a RAID path retry.
2305 * Return 0 for errors that don't require a RAID path retry.
2307 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2308 struct CommandList *c,
2309 struct scsi_cmnd *cmd,
2310 struct io_accel2_cmd *c2,
2311 struct hpsa_scsi_dev_t *dev)
2313 int data_len;
2314 int retry = 0;
2315 u32 ioaccel2_resid = 0;
2317 switch (c2->error_data.serv_response) {
2318 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2319 switch (c2->error_data.status) {
2320 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2321 break;
2322 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2323 cmd->result |= SAM_STAT_CHECK_CONDITION;
2324 if (c2->error_data.data_present !=
2325 IOACCEL2_SENSE_DATA_PRESENT) {
2326 memset(cmd->sense_buffer, 0,
2327 SCSI_SENSE_BUFFERSIZE);
2328 break;
2330 /* copy the sense data */
2331 data_len = c2->error_data.sense_data_len;
2332 if (data_len > SCSI_SENSE_BUFFERSIZE)
2333 data_len = SCSI_SENSE_BUFFERSIZE;
2334 if (data_len > sizeof(c2->error_data.sense_data_buff))
2335 data_len =
2336 sizeof(c2->error_data.sense_data_buff);
2337 memcpy(cmd->sense_buffer,
2338 c2->error_data.sense_data_buff, data_len);
2339 retry = 1;
2340 break;
2341 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2342 retry = 1;
2343 break;
2344 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2345 retry = 1;
2346 break;
2347 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2348 retry = 1;
2349 break;
2350 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2351 retry = 1;
2352 break;
2353 default:
2354 retry = 1;
2355 break;
2357 break;
2358 case IOACCEL2_SERV_RESPONSE_FAILURE:
2359 switch (c2->error_data.status) {
2360 case IOACCEL2_STATUS_SR_IO_ERROR:
2361 case IOACCEL2_STATUS_SR_IO_ABORTED:
2362 case IOACCEL2_STATUS_SR_OVERRUN:
2363 retry = 1;
2364 break;
2365 case IOACCEL2_STATUS_SR_UNDERRUN:
2366 cmd->result = (DID_OK << 16); /* host byte */
2367 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2368 ioaccel2_resid = get_unaligned_le32(
2369 &c2->error_data.resid_cnt[0]);
2370 scsi_set_resid(cmd, ioaccel2_resid);
2371 break;
2372 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2373 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2374 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2376 * Did an HBA disk disappear? We will eventually
2377 * get a state change event from the controller but
2378 * in the meantime, we need to tell the OS that the
2379 * HBA disk is no longer there and stop I/O
2380 * from going down. This allows the potential re-insert
2381 * of the disk to get the same device node.
2383 if (dev->physical_device && dev->expose_device) {
2384 cmd->result = DID_NO_CONNECT << 16;
2385 dev->removed = 1;
2386 h->drv_req_rescan = 1;
2387 dev_warn(&h->pdev->dev,
2388 "%s: device is gone!\n", __func__);
2389 } else
2391 * Retry by sending down the RAID path.
2392 * We will get an event from ctlr to
2393 * trigger rescan regardless.
2395 retry = 1;
2396 break;
2397 default:
2398 retry = 1;
2400 break;
2401 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2402 break;
2403 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2404 break;
2405 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2406 retry = 1;
2407 break;
2408 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2409 break;
2410 default:
2411 retry = 1;
2412 break;
2415 return retry; /* retry on raid path? */
2418 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2419 struct CommandList *c)
2421 bool do_wake = false;
2424 * Reset c->scsi_cmd here so that the reset handler will know
2425 * this command has completed. Then, check to see if the handler is
2426 * waiting for this command, and, if so, wake it.
2428 c->scsi_cmd = SCSI_CMD_IDLE;
2429 mb(); /* Declare command idle before checking for pending events. */
2430 if (c->reset_pending) {
2431 unsigned long flags;
2432 struct hpsa_scsi_dev_t *dev;
2435 * There appears to be a reset pending; lock the lock and
2436 * reconfirm. If so, then decrement the count of outstanding
2437 * commands and wake the reset command if this is the last one.
2439 spin_lock_irqsave(&h->lock, flags);
2440 dev = c->reset_pending; /* Re-fetch under the lock. */
2441 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2442 do_wake = true;
2443 c->reset_pending = NULL;
2444 spin_unlock_irqrestore(&h->lock, flags);
2447 if (do_wake)
2448 wake_up_all(&h->event_sync_wait_queue);
2451 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2452 struct CommandList *c)
2454 hpsa_cmd_resolve_events(h, c);
2455 cmd_tagged_free(h, c);
2458 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2459 struct CommandList *c, struct scsi_cmnd *cmd)
2461 hpsa_cmd_resolve_and_free(h, c);
2462 if (cmd && cmd->scsi_done)
2463 cmd->scsi_done(cmd);
2466 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2468 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2469 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2472 static void process_ioaccel2_completion(struct ctlr_info *h,
2473 struct CommandList *c, struct scsi_cmnd *cmd,
2474 struct hpsa_scsi_dev_t *dev)
2476 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2478 /* check for good status */
2479 if (likely(c2->error_data.serv_response == 0 &&
2480 c2->error_data.status == 0))
2481 return hpsa_cmd_free_and_done(h, c, cmd);
2484 * Any RAID offload error results in retry which will use
2485 * the normal I/O path so the controller can handle whatever is
2486 * wrong.
2488 if (is_logical_device(dev) &&
2489 c2->error_data.serv_response ==
2490 IOACCEL2_SERV_RESPONSE_FAILURE) {
2491 if (c2->error_data.status ==
2492 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2493 dev->offload_enabled = 0;
2494 dev->offload_to_be_enabled = 0;
2497 return hpsa_retry_cmd(h, c);
2500 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2501 return hpsa_retry_cmd(h, c);
2503 return hpsa_cmd_free_and_done(h, c, cmd);
2506 /* Returns 0 on success, < 0 otherwise. */
2507 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2508 struct CommandList *cp)
2510 u8 tmf_status = cp->err_info->ScsiStatus;
2512 switch (tmf_status) {
2513 case CISS_TMF_COMPLETE:
2515 * CISS_TMF_COMPLETE never happens, instead,
2516 * ei->CommandStatus == 0 for this case.
2518 case CISS_TMF_SUCCESS:
2519 return 0;
2520 case CISS_TMF_INVALID_FRAME:
2521 case CISS_TMF_NOT_SUPPORTED:
2522 case CISS_TMF_FAILED:
2523 case CISS_TMF_WRONG_LUN:
2524 case CISS_TMF_OVERLAPPED_TAG:
2525 break;
2526 default:
2527 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2528 tmf_status);
2529 break;
2531 return -tmf_status;
2534 static void complete_scsi_command(struct CommandList *cp)
2536 struct scsi_cmnd *cmd;
2537 struct ctlr_info *h;
2538 struct ErrorInfo *ei;
2539 struct hpsa_scsi_dev_t *dev;
2540 struct io_accel2_cmd *c2;
2542 u8 sense_key;
2543 u8 asc; /* additional sense code */
2544 u8 ascq; /* additional sense code qualifier */
2545 unsigned long sense_data_size;
2547 ei = cp->err_info;
2548 cmd = cp->scsi_cmd;
2549 h = cp->h;
2551 if (!cmd->device) {
2552 cmd->result = DID_NO_CONNECT << 16;
2553 return hpsa_cmd_free_and_done(h, cp, cmd);
2556 dev = cmd->device->hostdata;
2557 if (!dev) {
2558 cmd->result = DID_NO_CONNECT << 16;
2559 return hpsa_cmd_free_and_done(h, cp, cmd);
2561 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2563 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2564 if ((cp->cmd_type == CMD_SCSI) &&
2565 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2566 hpsa_unmap_sg_chain_block(h, cp);
2568 if ((cp->cmd_type == CMD_IOACCEL2) &&
2569 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2570 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2572 cmd->result = (DID_OK << 16); /* host byte */
2573 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2575 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2576 if (dev->physical_device && dev->expose_device &&
2577 dev->removed) {
2578 cmd->result = DID_NO_CONNECT << 16;
2579 return hpsa_cmd_free_and_done(h, cp, cmd);
2581 if (likely(cp->phys_disk != NULL))
2582 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2586 * We check for lockup status here as it may be set for
2587 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2588 * fail_all_oustanding_cmds()
2590 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2591 /* DID_NO_CONNECT will prevent a retry */
2592 cmd->result = DID_NO_CONNECT << 16;
2593 return hpsa_cmd_free_and_done(h, cp, cmd);
2596 if ((unlikely(hpsa_is_pending_event(cp))))
2597 if (cp->reset_pending)
2598 return hpsa_cmd_free_and_done(h, cp, cmd);
2600 if (cp->cmd_type == CMD_IOACCEL2)
2601 return process_ioaccel2_completion(h, cp, cmd, dev);
2603 scsi_set_resid(cmd, ei->ResidualCnt);
2604 if (ei->CommandStatus == 0)
2605 return hpsa_cmd_free_and_done(h, cp, cmd);
2607 /* For I/O accelerator commands, copy over some fields to the normal
2608 * CISS header used below for error handling.
2610 if (cp->cmd_type == CMD_IOACCEL1) {
2611 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2612 cp->Header.SGList = scsi_sg_count(cmd);
2613 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2614 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2615 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2616 cp->Header.tag = c->tag;
2617 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2618 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2620 /* Any RAID offload error results in retry which will use
2621 * the normal I/O path so the controller can handle whatever's
2622 * wrong.
2624 if (is_logical_device(dev)) {
2625 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2626 dev->offload_enabled = 0;
2627 return hpsa_retry_cmd(h, cp);
2631 /* an error has occurred */
2632 switch (ei->CommandStatus) {
2634 case CMD_TARGET_STATUS:
2635 cmd->result |= ei->ScsiStatus;
2636 /* copy the sense data */
2637 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2638 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2639 else
2640 sense_data_size = sizeof(ei->SenseInfo);
2641 if (ei->SenseLen < sense_data_size)
2642 sense_data_size = ei->SenseLen;
2643 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2644 if (ei->ScsiStatus)
2645 decode_sense_data(ei->SenseInfo, sense_data_size,
2646 &sense_key, &asc, &ascq);
2647 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2648 if (sense_key == ABORTED_COMMAND) {
2649 cmd->result |= DID_SOFT_ERROR << 16;
2650 break;
2652 break;
2654 /* Problem was not a check condition
2655 * Pass it up to the upper layers...
2657 if (ei->ScsiStatus) {
2658 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2659 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2660 "Returning result: 0x%x\n",
2661 cp, ei->ScsiStatus,
2662 sense_key, asc, ascq,
2663 cmd->result);
2664 } else { /* scsi status is zero??? How??? */
2665 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2666 "Returning no connection.\n", cp),
2668 /* Ordinarily, this case should never happen,
2669 * but there is a bug in some released firmware
2670 * revisions that allows it to happen if, for
2671 * example, a 4100 backplane loses power and
2672 * the tape drive is in it. We assume that
2673 * it's a fatal error of some kind because we
2674 * can't show that it wasn't. We will make it
2675 * look like selection timeout since that is
2676 * the most common reason for this to occur,
2677 * and it's severe enough.
2680 cmd->result = DID_NO_CONNECT << 16;
2682 break;
2684 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2685 break;
2686 case CMD_DATA_OVERRUN:
2687 dev_warn(&h->pdev->dev,
2688 "CDB %16phN data overrun\n", cp->Request.CDB);
2689 break;
2690 case CMD_INVALID: {
2691 /* print_bytes(cp, sizeof(*cp), 1, 0);
2692 print_cmd(cp); */
2693 /* We get CMD_INVALID if you address a non-existent device
2694 * instead of a selection timeout (no response). You will
2695 * see this if you yank out a drive, then try to access it.
2696 * This is kind of a shame because it means that any other
2697 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2698 * missing target. */
2699 cmd->result = DID_NO_CONNECT << 16;
2701 break;
2702 case CMD_PROTOCOL_ERR:
2703 cmd->result = DID_ERROR << 16;
2704 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2705 cp->Request.CDB);
2706 break;
2707 case CMD_HARDWARE_ERR:
2708 cmd->result = DID_ERROR << 16;
2709 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2710 cp->Request.CDB);
2711 break;
2712 case CMD_CONNECTION_LOST:
2713 cmd->result = DID_ERROR << 16;
2714 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2715 cp->Request.CDB);
2716 break;
2717 case CMD_ABORTED:
2718 cmd->result = DID_ABORT << 16;
2719 break;
2720 case CMD_ABORT_FAILED:
2721 cmd->result = DID_ERROR << 16;
2722 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2723 cp->Request.CDB);
2724 break;
2725 case CMD_UNSOLICITED_ABORT:
2726 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2727 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2728 cp->Request.CDB);
2729 break;
2730 case CMD_TIMEOUT:
2731 cmd->result = DID_TIME_OUT << 16;
2732 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2733 cp->Request.CDB);
2734 break;
2735 case CMD_UNABORTABLE:
2736 cmd->result = DID_ERROR << 16;
2737 dev_warn(&h->pdev->dev, "Command unabortable\n");
2738 break;
2739 case CMD_TMF_STATUS:
2740 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2741 cmd->result = DID_ERROR << 16;
2742 break;
2743 case CMD_IOACCEL_DISABLED:
2744 /* This only handles the direct pass-through case since RAID
2745 * offload is handled above. Just attempt a retry.
2747 cmd->result = DID_SOFT_ERROR << 16;
2748 dev_warn(&h->pdev->dev,
2749 "cp %p had HP SSD Smart Path error\n", cp);
2750 break;
2751 default:
2752 cmd->result = DID_ERROR << 16;
2753 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2754 cp, ei->CommandStatus);
2757 return hpsa_cmd_free_and_done(h, cp, cmd);
2760 static void hpsa_pci_unmap(struct pci_dev *pdev,
2761 struct CommandList *c, int sg_used, int data_direction)
2763 int i;
2765 for (i = 0; i < sg_used; i++)
2766 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2767 le32_to_cpu(c->SG[i].Len),
2768 data_direction);
2771 static int hpsa_map_one(struct pci_dev *pdev,
2772 struct CommandList *cp,
2773 unsigned char *buf,
2774 size_t buflen,
2775 int data_direction)
2777 u64 addr64;
2779 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2780 cp->Header.SGList = 0;
2781 cp->Header.SGTotal = cpu_to_le16(0);
2782 return 0;
2785 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2786 if (dma_mapping_error(&pdev->dev, addr64)) {
2787 /* Prevent subsequent unmap of something never mapped */
2788 cp->Header.SGList = 0;
2789 cp->Header.SGTotal = cpu_to_le16(0);
2790 return -1;
2792 cp->SG[0].Addr = cpu_to_le64(addr64);
2793 cp->SG[0].Len = cpu_to_le32(buflen);
2794 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2795 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2796 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2797 return 0;
2800 #define NO_TIMEOUT ((unsigned long) -1)
2801 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2802 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2803 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2805 DECLARE_COMPLETION_ONSTACK(wait);
2807 c->waiting = &wait;
2808 __enqueue_cmd_and_start_io(h, c, reply_queue);
2809 if (timeout_msecs == NO_TIMEOUT) {
2810 /* TODO: get rid of this no-timeout thing */
2811 wait_for_completion_io(&wait);
2812 return IO_OK;
2814 if (!wait_for_completion_io_timeout(&wait,
2815 msecs_to_jiffies(timeout_msecs))) {
2816 dev_warn(&h->pdev->dev, "Command timed out.\n");
2817 return -ETIMEDOUT;
2819 return IO_OK;
2822 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2823 int reply_queue, unsigned long timeout_msecs)
2825 if (unlikely(lockup_detected(h))) {
2826 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2827 return IO_OK;
2829 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2832 static u32 lockup_detected(struct ctlr_info *h)
2834 int cpu;
2835 u32 rc, *lockup_detected;
2837 cpu = get_cpu();
2838 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2839 rc = *lockup_detected;
2840 put_cpu();
2841 return rc;
2844 #define MAX_DRIVER_CMD_RETRIES 25
2845 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2846 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2848 int backoff_time = 10, retry_count = 0;
2849 int rc;
2851 do {
2852 memset(c->err_info, 0, sizeof(*c->err_info));
2853 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2854 timeout_msecs);
2855 if (rc)
2856 break;
2857 retry_count++;
2858 if (retry_count > 3) {
2859 msleep(backoff_time);
2860 if (backoff_time < 1000)
2861 backoff_time *= 2;
2863 } while ((check_for_unit_attention(h, c) ||
2864 check_for_busy(h, c)) &&
2865 retry_count <= MAX_DRIVER_CMD_RETRIES);
2866 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2867 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2868 rc = -EIO;
2869 return rc;
2872 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2873 struct CommandList *c)
2875 const u8 *cdb = c->Request.CDB;
2876 const u8 *lun = c->Header.LUN.LunAddrBytes;
2878 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2879 txt, lun, cdb);
2882 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2883 struct CommandList *cp)
2885 const struct ErrorInfo *ei = cp->err_info;
2886 struct device *d = &cp->h->pdev->dev;
2887 u8 sense_key, asc, ascq;
2888 int sense_len;
2890 switch (ei->CommandStatus) {
2891 case CMD_TARGET_STATUS:
2892 if (ei->SenseLen > sizeof(ei->SenseInfo))
2893 sense_len = sizeof(ei->SenseInfo);
2894 else
2895 sense_len = ei->SenseLen;
2896 decode_sense_data(ei->SenseInfo, sense_len,
2897 &sense_key, &asc, &ascq);
2898 hpsa_print_cmd(h, "SCSI status", cp);
2899 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2900 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2901 sense_key, asc, ascq);
2902 else
2903 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2904 if (ei->ScsiStatus == 0)
2905 dev_warn(d, "SCSI status is abnormally zero. "
2906 "(probably indicates selection timeout "
2907 "reported incorrectly due to a known "
2908 "firmware bug, circa July, 2001.)\n");
2909 break;
2910 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2911 break;
2912 case CMD_DATA_OVERRUN:
2913 hpsa_print_cmd(h, "overrun condition", cp);
2914 break;
2915 case CMD_INVALID: {
2916 /* controller unfortunately reports SCSI passthru's
2917 * to non-existent targets as invalid commands.
2919 hpsa_print_cmd(h, "invalid command", cp);
2920 dev_warn(d, "probably means device no longer present\n");
2922 break;
2923 case CMD_PROTOCOL_ERR:
2924 hpsa_print_cmd(h, "protocol error", cp);
2925 break;
2926 case CMD_HARDWARE_ERR:
2927 hpsa_print_cmd(h, "hardware error", cp);
2928 break;
2929 case CMD_CONNECTION_LOST:
2930 hpsa_print_cmd(h, "connection lost", cp);
2931 break;
2932 case CMD_ABORTED:
2933 hpsa_print_cmd(h, "aborted", cp);
2934 break;
2935 case CMD_ABORT_FAILED:
2936 hpsa_print_cmd(h, "abort failed", cp);
2937 break;
2938 case CMD_UNSOLICITED_ABORT:
2939 hpsa_print_cmd(h, "unsolicited abort", cp);
2940 break;
2941 case CMD_TIMEOUT:
2942 hpsa_print_cmd(h, "timed out", cp);
2943 break;
2944 case CMD_UNABORTABLE:
2945 hpsa_print_cmd(h, "unabortable", cp);
2946 break;
2947 case CMD_CTLR_LOCKUP:
2948 hpsa_print_cmd(h, "controller lockup detected", cp);
2949 break;
2950 default:
2951 hpsa_print_cmd(h, "unknown status", cp);
2952 dev_warn(d, "Unknown command status %x\n",
2953 ei->CommandStatus);
2957 static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
2958 u8 page, u8 *buf, size_t bufsize)
2960 int rc = IO_OK;
2961 struct CommandList *c;
2962 struct ErrorInfo *ei;
2964 c = cmd_alloc(h);
2965 if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
2966 page, scsi3addr, TYPE_CMD)) {
2967 rc = -1;
2968 goto out;
2970 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2971 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2972 if (rc)
2973 goto out;
2974 ei = c->err_info;
2975 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2976 hpsa_scsi_interpret_error(h, c);
2977 rc = -1;
2979 out:
2980 cmd_free(h, c);
2981 return rc;
2984 static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
2985 u8 *scsi3addr)
2987 u8 *buf;
2988 u64 sa = 0;
2989 int rc = 0;
2991 buf = kzalloc(1024, GFP_KERNEL);
2992 if (!buf)
2993 return 0;
2995 rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
2996 buf, 1024);
2998 if (rc)
2999 goto out;
3001 sa = get_unaligned_be64(buf+12);
3003 out:
3004 kfree(buf);
3005 return sa;
3008 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
3009 u16 page, unsigned char *buf,
3010 unsigned char bufsize)
3012 int rc = IO_OK;
3013 struct CommandList *c;
3014 struct ErrorInfo *ei;
3016 c = cmd_alloc(h);
3018 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
3019 page, scsi3addr, TYPE_CMD)) {
3020 rc = -1;
3021 goto out;
3023 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3024 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3025 if (rc)
3026 goto out;
3027 ei = c->err_info;
3028 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3029 hpsa_scsi_interpret_error(h, c);
3030 rc = -1;
3032 out:
3033 cmd_free(h, c);
3034 return rc;
3037 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
3038 u8 reset_type, int reply_queue)
3040 int rc = IO_OK;
3041 struct CommandList *c;
3042 struct ErrorInfo *ei;
3044 c = cmd_alloc(h);
3047 /* fill_cmd can't fail here, no data buffer to map. */
3048 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
3049 scsi3addr, TYPE_MSG);
3050 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
3051 if (rc) {
3052 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
3053 goto out;
3055 /* no unmap needed here because no data xfer. */
3057 ei = c->err_info;
3058 if (ei->CommandStatus != 0) {
3059 hpsa_scsi_interpret_error(h, c);
3060 rc = -1;
3062 out:
3063 cmd_free(h, c);
3064 return rc;
3067 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
3068 struct hpsa_scsi_dev_t *dev,
3069 unsigned char *scsi3addr)
3071 int i;
3072 bool match = false;
3073 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
3074 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
3076 if (hpsa_is_cmd_idle(c))
3077 return false;
3079 switch (c->cmd_type) {
3080 case CMD_SCSI:
3081 case CMD_IOCTL_PEND:
3082 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
3083 sizeof(c->Header.LUN.LunAddrBytes));
3084 break;
3086 case CMD_IOACCEL1:
3087 case CMD_IOACCEL2:
3088 if (c->phys_disk == dev) {
3089 /* HBA mode match */
3090 match = true;
3091 } else {
3092 /* Possible RAID mode -- check each phys dev. */
3093 /* FIXME: Do we need to take out a lock here? If
3094 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3095 * instead. */
3096 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3097 /* FIXME: an alternate test might be
3099 * match = dev->phys_disk[i]->ioaccel_handle
3100 * == c2->scsi_nexus; */
3101 match = dev->phys_disk[i] == c->phys_disk;
3104 break;
3106 case IOACCEL2_TMF:
3107 for (i = 0; i < dev->nphysical_disks && !match; i++) {
3108 match = dev->phys_disk[i]->ioaccel_handle ==
3109 le32_to_cpu(ac->it_nexus);
3111 break;
3113 case 0: /* The command is in the middle of being initialized. */
3114 match = false;
3115 break;
3117 default:
3118 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3119 c->cmd_type);
3120 BUG();
3123 return match;
3126 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3127 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3129 int i;
3130 int rc = 0;
3132 /* We can really only handle one reset at a time */
3133 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3134 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3135 return -EINTR;
3138 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3140 for (i = 0; i < h->nr_cmds; i++) {
3141 struct CommandList *c = h->cmd_pool + i;
3142 int refcount = atomic_inc_return(&c->refcount);
3144 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3145 unsigned long flags;
3148 * Mark the target command as having a reset pending,
3149 * then lock a lock so that the command cannot complete
3150 * while we're considering it. If the command is not
3151 * idle then count it; otherwise revoke the event.
3153 c->reset_pending = dev;
3154 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
3155 if (!hpsa_is_cmd_idle(c))
3156 atomic_inc(&dev->reset_cmds_out);
3157 else
3158 c->reset_pending = NULL;
3159 spin_unlock_irqrestore(&h->lock, flags);
3162 cmd_free(h, c);
3165 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3166 if (!rc)
3167 wait_event(h->event_sync_wait_queue,
3168 atomic_read(&dev->reset_cmds_out) == 0 ||
3169 lockup_detected(h));
3171 if (unlikely(lockup_detected(h))) {
3172 dev_warn(&h->pdev->dev,
3173 "Controller lockup detected during reset wait\n");
3174 rc = -ENODEV;
3177 if (unlikely(rc))
3178 atomic_set(&dev->reset_cmds_out, 0);
3179 else
3180 rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
3182 mutex_unlock(&h->reset_mutex);
3183 return rc;
3186 static void hpsa_get_raid_level(struct ctlr_info *h,
3187 unsigned char *scsi3addr, unsigned char *raid_level)
3189 int rc;
3190 unsigned char *buf;
3192 *raid_level = RAID_UNKNOWN;
3193 buf = kzalloc(64, GFP_KERNEL);
3194 if (!buf)
3195 return;
3197 if (!hpsa_vpd_page_supported(h, scsi3addr,
3198 HPSA_VPD_LV_DEVICE_GEOMETRY))
3199 goto exit;
3201 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3202 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3204 if (rc == 0)
3205 *raid_level = buf[8];
3206 if (*raid_level > RAID_UNKNOWN)
3207 *raid_level = RAID_UNKNOWN;
3208 exit:
3209 kfree(buf);
3210 return;
3213 #define HPSA_MAP_DEBUG
3214 #ifdef HPSA_MAP_DEBUG
3215 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3216 struct raid_map_data *map_buff)
3218 struct raid_map_disk_data *dd = &map_buff->data[0];
3219 int map, row, col;
3220 u16 map_cnt, row_cnt, disks_per_row;
3222 if (rc != 0)
3223 return;
3225 /* Show details only if debugging has been activated. */
3226 if (h->raid_offload_debug < 2)
3227 return;
3229 dev_info(&h->pdev->dev, "structure_size = %u\n",
3230 le32_to_cpu(map_buff->structure_size));
3231 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3232 le32_to_cpu(map_buff->volume_blk_size));
3233 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3234 le64_to_cpu(map_buff->volume_blk_cnt));
3235 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3236 map_buff->phys_blk_shift);
3237 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3238 map_buff->parity_rotation_shift);
3239 dev_info(&h->pdev->dev, "strip_size = %u\n",
3240 le16_to_cpu(map_buff->strip_size));
3241 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3242 le64_to_cpu(map_buff->disk_starting_blk));
3243 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3244 le64_to_cpu(map_buff->disk_blk_cnt));
3245 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3246 le16_to_cpu(map_buff->data_disks_per_row));
3247 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3248 le16_to_cpu(map_buff->metadata_disks_per_row));
3249 dev_info(&h->pdev->dev, "row_cnt = %u\n",
3250 le16_to_cpu(map_buff->row_cnt));
3251 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3252 le16_to_cpu(map_buff->layout_map_count));
3253 dev_info(&h->pdev->dev, "flags = 0x%x\n",
3254 le16_to_cpu(map_buff->flags));
3255 dev_info(&h->pdev->dev, "encryption = %s\n",
3256 le16_to_cpu(map_buff->flags) &
3257 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
3258 dev_info(&h->pdev->dev, "dekindex = %u\n",
3259 le16_to_cpu(map_buff->dekindex));
3260 map_cnt = le16_to_cpu(map_buff->layout_map_count);
3261 for (map = 0; map < map_cnt; map++) {
3262 dev_info(&h->pdev->dev, "Map%u:\n", map);
3263 row_cnt = le16_to_cpu(map_buff->row_cnt);
3264 for (row = 0; row < row_cnt; row++) {
3265 dev_info(&h->pdev->dev, " Row%u:\n", row);
3266 disks_per_row =
3267 le16_to_cpu(map_buff->data_disks_per_row);
3268 for (col = 0; col < disks_per_row; col++, dd++)
3269 dev_info(&h->pdev->dev,
3270 " D%02u: h=0x%04x xor=%u,%u\n",
3271 col, dd->ioaccel_handle,
3272 dd->xor_mult[0], dd->xor_mult[1]);
3273 disks_per_row =
3274 le16_to_cpu(map_buff->metadata_disks_per_row);
3275 for (col = 0; col < disks_per_row; col++, dd++)
3276 dev_info(&h->pdev->dev,
3277 " M%02u: h=0x%04x xor=%u,%u\n",
3278 col, dd->ioaccel_handle,
3279 dd->xor_mult[0], dd->xor_mult[1]);
3283 #else
3284 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3285 __attribute__((unused)) int rc,
3286 __attribute__((unused)) struct raid_map_data *map_buff)
3289 #endif
3291 static int hpsa_get_raid_map(struct ctlr_info *h,
3292 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3294 int rc = 0;
3295 struct CommandList *c;
3296 struct ErrorInfo *ei;
3298 c = cmd_alloc(h);
3300 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3301 sizeof(this_device->raid_map), 0,
3302 scsi3addr, TYPE_CMD)) {
3303 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3304 cmd_free(h, c);
3305 return -1;
3307 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3308 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3309 if (rc)
3310 goto out;
3311 ei = c->err_info;
3312 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3313 hpsa_scsi_interpret_error(h, c);
3314 rc = -1;
3315 goto out;
3317 cmd_free(h, c);
3319 /* @todo in the future, dynamically allocate RAID map memory */
3320 if (le32_to_cpu(this_device->raid_map.structure_size) >
3321 sizeof(this_device->raid_map)) {
3322 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3323 rc = -1;
3325 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3326 return rc;
3327 out:
3328 cmd_free(h, c);
3329 return rc;
3332 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3333 unsigned char scsi3addr[], u16 bmic_device_index,
3334 struct bmic_sense_subsystem_info *buf, size_t bufsize)
3336 int rc = IO_OK;
3337 struct CommandList *c;
3338 struct ErrorInfo *ei;
3340 c = cmd_alloc(h);
3342 rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3343 0, RAID_CTLR_LUNID, TYPE_CMD);
3344 if (rc)
3345 goto out;
3347 c->Request.CDB[2] = bmic_device_index & 0xff;
3348 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3350 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3351 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3352 if (rc)
3353 goto out;
3354 ei = c->err_info;
3355 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3356 hpsa_scsi_interpret_error(h, c);
3357 rc = -1;
3359 out:
3360 cmd_free(h, c);
3361 return rc;
3364 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3365 struct bmic_identify_controller *buf, size_t bufsize)
3367 int rc = IO_OK;
3368 struct CommandList *c;
3369 struct ErrorInfo *ei;
3371 c = cmd_alloc(h);
3373 rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3374 0, RAID_CTLR_LUNID, TYPE_CMD);
3375 if (rc)
3376 goto out;
3378 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3379 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3380 if (rc)
3381 goto out;
3382 ei = c->err_info;
3383 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3384 hpsa_scsi_interpret_error(h, c);
3385 rc = -1;
3387 out:
3388 cmd_free(h, c);
3389 return rc;
3392 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3393 unsigned char scsi3addr[], u16 bmic_device_index,
3394 struct bmic_identify_physical_device *buf, size_t bufsize)
3396 int rc = IO_OK;
3397 struct CommandList *c;
3398 struct ErrorInfo *ei;
3400 c = cmd_alloc(h);
3401 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3402 0, RAID_CTLR_LUNID, TYPE_CMD);
3403 if (rc)
3404 goto out;
3406 c->Request.CDB[2] = bmic_device_index & 0xff;
3407 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3409 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3410 NO_TIMEOUT);
3411 ei = c->err_info;
3412 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3413 hpsa_scsi_interpret_error(h, c);
3414 rc = -1;
3416 out:
3417 cmd_free(h, c);
3419 return rc;
3423 * get enclosure information
3424 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3425 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3426 * Uses id_physical_device to determine the box_index.
3428 static void hpsa_get_enclosure_info(struct ctlr_info *h,
3429 unsigned char *scsi3addr,
3430 struct ReportExtendedLUNdata *rlep, int rle_index,
3431 struct hpsa_scsi_dev_t *encl_dev)
3433 int rc = -1;
3434 struct CommandList *c = NULL;
3435 struct ErrorInfo *ei = NULL;
3436 struct bmic_sense_storage_box_params *bssbp = NULL;
3437 struct bmic_identify_physical_device *id_phys = NULL;
3438 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3439 u16 bmic_device_index = 0;
3441 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3443 encl_dev->sas_address =
3444 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3446 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3447 rc = IO_OK;
3448 goto out;
3451 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3452 rc = IO_OK;
3453 goto out;
3456 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3457 if (!bssbp)
3458 goto out;
3460 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3461 if (!id_phys)
3462 goto out;
3464 rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3465 id_phys, sizeof(*id_phys));
3466 if (rc) {
3467 dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3468 __func__, encl_dev->external, bmic_device_index);
3469 goto out;
3472 c = cmd_alloc(h);
3474 rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3475 sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3477 if (rc)
3478 goto out;
3480 if (id_phys->phys_connector[1] == 'E')
3481 c->Request.CDB[5] = id_phys->box_index;
3482 else
3483 c->Request.CDB[5] = 0;
3485 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3486 NO_TIMEOUT);
3487 if (rc)
3488 goto out;
3490 ei = c->err_info;
3491 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3492 rc = -1;
3493 goto out;
3496 encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3497 memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3498 bssbp->phys_connector, sizeof(bssbp->phys_connector));
3500 rc = IO_OK;
3501 out:
3502 kfree(bssbp);
3503 kfree(id_phys);
3505 if (c)
3506 cmd_free(h, c);
3508 if (rc != IO_OK)
3509 hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3510 "Error, could not get enclosure information");
3513 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3514 unsigned char *scsi3addr)
3516 struct ReportExtendedLUNdata *physdev;
3517 u32 nphysicals;
3518 u64 sa = 0;
3519 int i;
3521 physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3522 if (!physdev)
3523 return 0;
3525 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3526 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3527 kfree(physdev);
3528 return 0;
3530 nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3532 for (i = 0; i < nphysicals; i++)
3533 if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3534 sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3535 break;
3538 kfree(physdev);
3540 return sa;
3543 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3544 struct hpsa_scsi_dev_t *dev)
3546 int rc;
3547 u64 sa = 0;
3549 if (is_hba_lunid(scsi3addr)) {
3550 struct bmic_sense_subsystem_info *ssi;
3552 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3553 if (!ssi)
3554 return;
3556 rc = hpsa_bmic_sense_subsystem_information(h,
3557 scsi3addr, 0, ssi, sizeof(*ssi));
3558 if (rc == 0) {
3559 sa = get_unaligned_be64(ssi->primary_world_wide_id);
3560 h->sas_address = sa;
3563 kfree(ssi);
3564 } else
3565 sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3567 dev->sas_address = sa;
3570 static void hpsa_ext_ctrl_present(struct ctlr_info *h,
3571 struct ReportExtendedLUNdata *physdev)
3573 u32 nphysicals;
3574 int i;
3576 if (h->discovery_polling)
3577 return;
3579 nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
3581 for (i = 0; i < nphysicals; i++) {
3582 if (physdev->LUN[i].device_type ==
3583 BMIC_DEVICE_TYPE_CONTROLLER
3584 && !is_hba_lunid(physdev->LUN[i].lunid)) {
3585 dev_info(&h->pdev->dev,
3586 "External controller present, activate discovery polling and disable rld caching\n");
3587 hpsa_disable_rld_caching(h);
3588 h->discovery_polling = 1;
3589 break;
3594 /* Get a device id from inquiry page 0x83 */
3595 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3596 unsigned char scsi3addr[], u8 page)
3598 int rc;
3599 int i;
3600 int pages;
3601 unsigned char *buf, bufsize;
3603 buf = kzalloc(256, GFP_KERNEL);
3604 if (!buf)
3605 return false;
3607 /* Get the size of the page list first */
3608 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3609 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3610 buf, HPSA_VPD_HEADER_SZ);
3611 if (rc != 0)
3612 goto exit_unsupported;
3613 pages = buf[3];
3614 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3615 bufsize = pages + HPSA_VPD_HEADER_SZ;
3616 else
3617 bufsize = 255;
3619 /* Get the whole VPD page list */
3620 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3621 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3622 buf, bufsize);
3623 if (rc != 0)
3624 goto exit_unsupported;
3626 pages = buf[3];
3627 for (i = 1; i <= pages; i++)
3628 if (buf[3 + i] == page)
3629 goto exit_supported;
3630 exit_unsupported:
3631 kfree(buf);
3632 return false;
3633 exit_supported:
3634 kfree(buf);
3635 return true;
3639 * Called during a scan operation.
3640 * Sets ioaccel status on the new device list, not the existing device list
3642 * The device list used during I/O will be updated later in
3643 * adjust_hpsa_scsi_table.
3645 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3646 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3648 int rc;
3649 unsigned char *buf;
3650 u8 ioaccel_status;
3652 this_device->offload_config = 0;
3653 this_device->offload_enabled = 0;
3654 this_device->offload_to_be_enabled = 0;
3656 buf = kzalloc(64, GFP_KERNEL);
3657 if (!buf)
3658 return;
3659 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3660 goto out;
3661 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3662 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3663 if (rc != 0)
3664 goto out;
3666 #define IOACCEL_STATUS_BYTE 4
3667 #define OFFLOAD_CONFIGURED_BIT 0x01
3668 #define OFFLOAD_ENABLED_BIT 0x02
3669 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3670 this_device->offload_config =
3671 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3672 if (this_device->offload_config) {
3673 this_device->offload_to_be_enabled =
3674 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3675 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3676 this_device->offload_to_be_enabled = 0;
3679 out:
3680 kfree(buf);
3681 return;
3684 /* Get the device id from inquiry page 0x83 */
3685 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3686 unsigned char *device_id, int index, int buflen)
3688 int rc;
3689 unsigned char *buf;
3691 /* Does controller have VPD for device id? */
3692 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3693 return 1; /* not supported */
3695 buf = kzalloc(64, GFP_KERNEL);
3696 if (!buf)
3697 return -ENOMEM;
3699 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3700 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3701 if (rc == 0) {
3702 if (buflen > 16)
3703 buflen = 16;
3704 memcpy(device_id, &buf[8], buflen);
3707 kfree(buf);
3709 return rc; /*0 - got id, otherwise, didn't */
3712 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3713 void *buf, int bufsize,
3714 int extended_response)
3716 int rc = IO_OK;
3717 struct CommandList *c;
3718 unsigned char scsi3addr[8];
3719 struct ErrorInfo *ei;
3721 c = cmd_alloc(h);
3723 /* address the controller */
3724 memset(scsi3addr, 0, sizeof(scsi3addr));
3725 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3726 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3727 rc = -EAGAIN;
3728 goto out;
3730 if (extended_response)
3731 c->Request.CDB[1] = extended_response;
3732 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3733 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3734 if (rc)
3735 goto out;
3736 ei = c->err_info;
3737 if (ei->CommandStatus != 0 &&
3738 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3739 hpsa_scsi_interpret_error(h, c);
3740 rc = -EIO;
3741 } else {
3742 struct ReportLUNdata *rld = buf;
3744 if (rld->extended_response_flag != extended_response) {
3745 if (!h->legacy_board) {
3746 dev_err(&h->pdev->dev,
3747 "report luns requested format %u, got %u\n",
3748 extended_response,
3749 rld->extended_response_flag);
3750 rc = -EINVAL;
3751 } else
3752 rc = -EOPNOTSUPP;
3755 out:
3756 cmd_free(h, c);
3757 return rc;
3760 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3761 struct ReportExtendedLUNdata *buf, int bufsize)
3763 int rc;
3764 struct ReportLUNdata *lbuf;
3766 rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3767 HPSA_REPORT_PHYS_EXTENDED);
3768 if (!rc || rc != -EOPNOTSUPP)
3769 return rc;
3771 /* REPORT PHYS EXTENDED is not supported */
3772 lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3773 if (!lbuf)
3774 return -ENOMEM;
3776 rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3777 if (!rc) {
3778 int i;
3779 u32 nphys;
3781 /* Copy ReportLUNdata header */
3782 memcpy(buf, lbuf, 8);
3783 nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3784 for (i = 0; i < nphys; i++)
3785 memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3787 kfree(lbuf);
3788 return rc;
3791 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3792 struct ReportLUNdata *buf, int bufsize)
3794 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3797 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3798 int bus, int target, int lun)
3800 device->bus = bus;
3801 device->target = target;
3802 device->lun = lun;
3805 /* Use VPD inquiry to get details of volume status */
3806 static int hpsa_get_volume_status(struct ctlr_info *h,
3807 unsigned char scsi3addr[])
3809 int rc;
3810 int status;
3811 int size;
3812 unsigned char *buf;
3814 buf = kzalloc(64, GFP_KERNEL);
3815 if (!buf)
3816 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3818 /* Does controller have VPD for logical volume status? */
3819 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3820 goto exit_failed;
3822 /* Get the size of the VPD return buffer */
3823 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3824 buf, HPSA_VPD_HEADER_SZ);
3825 if (rc != 0)
3826 goto exit_failed;
3827 size = buf[3];
3829 /* Now get the whole VPD buffer */
3830 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3831 buf, size + HPSA_VPD_HEADER_SZ);
3832 if (rc != 0)
3833 goto exit_failed;
3834 status = buf[4]; /* status byte */
3836 kfree(buf);
3837 return status;
3838 exit_failed:
3839 kfree(buf);
3840 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3843 /* Determine offline status of a volume.
3844 * Return either:
3845 * 0 (not offline)
3846 * 0xff (offline for unknown reasons)
3847 * # (integer code indicating one of several NOT READY states
3848 * describing why a volume is to be kept offline)
3850 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3851 unsigned char scsi3addr[])
3853 struct CommandList *c;
3854 unsigned char *sense;
3855 u8 sense_key, asc, ascq;
3856 int sense_len;
3857 int rc, ldstat = 0;
3858 u16 cmd_status;
3859 u8 scsi_status;
3860 #define ASC_LUN_NOT_READY 0x04
3861 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3862 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3864 c = cmd_alloc(h);
3866 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3867 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3868 NO_TIMEOUT);
3869 if (rc) {
3870 cmd_free(h, c);
3871 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3873 sense = c->err_info->SenseInfo;
3874 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3875 sense_len = sizeof(c->err_info->SenseInfo);
3876 else
3877 sense_len = c->err_info->SenseLen;
3878 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3879 cmd_status = c->err_info->CommandStatus;
3880 scsi_status = c->err_info->ScsiStatus;
3881 cmd_free(h, c);
3883 /* Determine the reason for not ready state */
3884 ldstat = hpsa_get_volume_status(h, scsi3addr);
3886 /* Keep volume offline in certain cases: */
3887 switch (ldstat) {
3888 case HPSA_LV_FAILED:
3889 case HPSA_LV_UNDERGOING_ERASE:
3890 case HPSA_LV_NOT_AVAILABLE:
3891 case HPSA_LV_UNDERGOING_RPI:
3892 case HPSA_LV_PENDING_RPI:
3893 case HPSA_LV_ENCRYPTED_NO_KEY:
3894 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3895 case HPSA_LV_UNDERGOING_ENCRYPTION:
3896 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3897 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3898 return ldstat;
3899 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3900 /* If VPD status page isn't available,
3901 * use ASC/ASCQ to determine state
3903 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3904 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3905 return ldstat;
3906 break;
3907 default:
3908 break;
3910 return HPSA_LV_OK;
3913 static int hpsa_update_device_info(struct ctlr_info *h,
3914 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3915 unsigned char *is_OBDR_device)
3918 #define OBDR_SIG_OFFSET 43
3919 #define OBDR_TAPE_SIG "$DR-10"
3920 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3921 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3923 unsigned char *inq_buff;
3924 unsigned char *obdr_sig;
3925 int rc = 0;
3927 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3928 if (!inq_buff) {
3929 rc = -ENOMEM;
3930 goto bail_out;
3933 /* Do an inquiry to the device to see what it is. */
3934 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3935 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3936 dev_err(&h->pdev->dev,
3937 "%s: inquiry failed, device will be skipped.\n",
3938 __func__);
3939 rc = HPSA_INQUIRY_FAILED;
3940 goto bail_out;
3943 scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3944 scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3946 this_device->devtype = (inq_buff[0] & 0x1f);
3947 memcpy(this_device->scsi3addr, scsi3addr, 8);
3948 memcpy(this_device->vendor, &inq_buff[8],
3949 sizeof(this_device->vendor));
3950 memcpy(this_device->model, &inq_buff[16],
3951 sizeof(this_device->model));
3952 this_device->rev = inq_buff[2];
3953 memset(this_device->device_id, 0,
3954 sizeof(this_device->device_id));
3955 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3956 sizeof(this_device->device_id)) < 0)
3957 dev_err(&h->pdev->dev,
3958 "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3959 h->ctlr, __func__,
3960 h->scsi_host->host_no,
3961 this_device->target, this_device->lun,
3962 scsi_device_type(this_device->devtype),
3963 this_device->model);
3965 if ((this_device->devtype == TYPE_DISK ||
3966 this_device->devtype == TYPE_ZBC) &&
3967 is_logical_dev_addr_mode(scsi3addr)) {
3968 unsigned char volume_offline;
3970 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3971 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3972 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3973 volume_offline = hpsa_volume_offline(h, scsi3addr);
3974 if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
3975 h->legacy_board) {
3977 * Legacy boards might not support volume status
3979 dev_info(&h->pdev->dev,
3980 "C0:T%d:L%d Volume status not available, assuming online.\n",
3981 this_device->target, this_device->lun);
3982 volume_offline = 0;
3984 this_device->volume_offline = volume_offline;
3985 if (volume_offline == HPSA_LV_FAILED) {
3986 rc = HPSA_LV_FAILED;
3987 dev_err(&h->pdev->dev,
3988 "%s: LV failed, device will be skipped.\n",
3989 __func__);
3990 goto bail_out;
3992 } else {
3993 this_device->raid_level = RAID_UNKNOWN;
3994 this_device->offload_config = 0;
3995 this_device->offload_enabled = 0;
3996 this_device->offload_to_be_enabled = 0;
3997 this_device->hba_ioaccel_enabled = 0;
3998 this_device->volume_offline = 0;
3999 this_device->queue_depth = h->nr_cmds;
4002 if (this_device->external)
4003 this_device->queue_depth = EXTERNAL_QD;
4005 if (is_OBDR_device) {
4006 /* See if this is a One-Button-Disaster-Recovery device
4007 * by looking for "$DR-10" at offset 43 in inquiry data.
4009 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
4010 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
4011 strncmp(obdr_sig, OBDR_TAPE_SIG,
4012 OBDR_SIG_LEN) == 0);
4014 kfree(inq_buff);
4015 return 0;
4017 bail_out:
4018 kfree(inq_buff);
4019 return rc;
4023 * Helper function to assign bus, target, lun mapping of devices.
4024 * Logical drive target and lun are assigned at this time, but
4025 * physical device lun and target assignment are deferred (assigned
4026 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
4028 static void figure_bus_target_lun(struct ctlr_info *h,
4029 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
4031 u32 lunid = get_unaligned_le32(lunaddrbytes);
4033 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
4034 /* physical device, target and lun filled in later */
4035 if (is_hba_lunid(lunaddrbytes)) {
4036 int bus = HPSA_HBA_BUS;
4038 if (!device->rev)
4039 bus = HPSA_LEGACY_HBA_BUS;
4040 hpsa_set_bus_target_lun(device,
4041 bus, 0, lunid & 0x3fff);
4042 } else
4043 /* defer target, lun assignment for physical devices */
4044 hpsa_set_bus_target_lun(device,
4045 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
4046 return;
4048 /* It's a logical device */
4049 if (device->external) {
4050 hpsa_set_bus_target_lun(device,
4051 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
4052 lunid & 0x00ff);
4053 return;
4055 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
4056 0, lunid & 0x3fff);
4059 static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4060 int i, int nphysicals, int nlocal_logicals)
4062 /* In report logicals, local logicals are listed first,
4063 * then any externals.
4065 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4067 if (i == raid_ctlr_position)
4068 return 0;
4070 if (i < logicals_start)
4071 return 0;
4073 /* i is in logicals range, but still within local logicals */
4074 if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4075 return 0;
4077 return 1; /* it's an external lun */
4081 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
4082 * logdev. The number of luns in physdev and logdev are returned in
4083 * *nphysicals and *nlogicals, respectively.
4084 * Returns 0 on success, -1 otherwise.
4086 static int hpsa_gather_lun_info(struct ctlr_info *h,
4087 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4088 struct ReportLUNdata *logdev, u32 *nlogicals)
4090 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4091 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4092 return -1;
4094 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4095 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4096 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4097 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4098 *nphysicals = HPSA_MAX_PHYS_LUN;
4100 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4101 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4102 return -1;
4104 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4105 /* Reject Logicals in excess of our max capability. */
4106 if (*nlogicals > HPSA_MAX_LUN) {
4107 dev_warn(&h->pdev->dev,
4108 "maximum logical LUNs (%d) exceeded. "
4109 "%d LUNs ignored.\n", HPSA_MAX_LUN,
4110 *nlogicals - HPSA_MAX_LUN);
4111 *nlogicals = HPSA_MAX_LUN;
4113 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4114 dev_warn(&h->pdev->dev,
4115 "maximum logical + physical LUNs (%d) exceeded. "
4116 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4117 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4118 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4120 return 0;
4123 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4124 int i, int nphysicals, int nlogicals,
4125 struct ReportExtendedLUNdata *physdev_list,
4126 struct ReportLUNdata *logdev_list)
4128 /* Helper function, figure out where the LUN ID info is coming from
4129 * given index i, lists of physical and logical devices, where in
4130 * the list the raid controller is supposed to appear (first or last)
4133 int logicals_start = nphysicals + (raid_ctlr_position == 0);
4134 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4136 if (i == raid_ctlr_position)
4137 return RAID_CTLR_LUNID;
4139 if (i < logicals_start)
4140 return &physdev_list->LUN[i -
4141 (raid_ctlr_position == 0)].lunid[0];
4143 if (i < last_device)
4144 return &logdev_list->LUN[i - nphysicals -
4145 (raid_ctlr_position == 0)][0];
4146 BUG();
4147 return NULL;
4150 /* get physical drive ioaccel handle and queue depth */
4151 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4152 struct hpsa_scsi_dev_t *dev,
4153 struct ReportExtendedLUNdata *rlep, int rle_index,
4154 struct bmic_identify_physical_device *id_phys)
4156 int rc;
4157 struct ext_report_lun_entry *rle;
4159 rle = &rlep->LUN[rle_index];
4161 dev->ioaccel_handle = rle->ioaccel_handle;
4162 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4163 dev->hba_ioaccel_enabled = 1;
4164 memset(id_phys, 0, sizeof(*id_phys));
4165 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4166 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4167 sizeof(*id_phys));
4168 if (!rc)
4169 /* Reserve space for FW operations */
4170 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4171 #define DRIVE_QUEUE_DEPTH 7
4172 dev->queue_depth =
4173 le16_to_cpu(id_phys->current_queue_depth_limit) -
4174 DRIVE_CMDS_RESERVED_FOR_FW;
4175 else
4176 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4179 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4180 struct ReportExtendedLUNdata *rlep, int rle_index,
4181 struct bmic_identify_physical_device *id_phys)
4183 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4185 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4186 this_device->hba_ioaccel_enabled = 1;
4188 memcpy(&this_device->active_path_index,
4189 &id_phys->active_path_number,
4190 sizeof(this_device->active_path_index));
4191 memcpy(&this_device->path_map,
4192 &id_phys->redundant_path_present_map,
4193 sizeof(this_device->path_map));
4194 memcpy(&this_device->box,
4195 &id_phys->alternate_paths_phys_box_on_port,
4196 sizeof(this_device->box));
4197 memcpy(&this_device->phys_connector,
4198 &id_phys->alternate_paths_phys_connector,
4199 sizeof(this_device->phys_connector));
4200 memcpy(&this_device->bay,
4201 &id_phys->phys_bay_in_box,
4202 sizeof(this_device->bay));
4205 /* get number of local logical disks. */
4206 static int hpsa_set_local_logical_count(struct ctlr_info *h,
4207 struct bmic_identify_controller *id_ctlr,
4208 u32 *nlocals)
4210 int rc;
4212 if (!id_ctlr) {
4213 dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4214 __func__);
4215 return -ENOMEM;
4217 memset(id_ctlr, 0, sizeof(*id_ctlr));
4218 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4219 if (!rc)
4220 if (id_ctlr->configured_logical_drive_count < 255)
4221 *nlocals = id_ctlr->configured_logical_drive_count;
4222 else
4223 *nlocals = le16_to_cpu(
4224 id_ctlr->extended_logical_unit_count);
4225 else
4226 *nlocals = -1;
4227 return rc;
4230 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4232 struct bmic_identify_physical_device *id_phys;
4233 bool is_spare = false;
4234 int rc;
4236 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4237 if (!id_phys)
4238 return false;
4240 rc = hpsa_bmic_id_physical_device(h,
4241 lunaddrbytes,
4242 GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4243 id_phys, sizeof(*id_phys));
4244 if (rc == 0)
4245 is_spare = (id_phys->more_flags >> 6) & 0x01;
4247 kfree(id_phys);
4248 return is_spare;
4251 #define RPL_DEV_FLAG_NON_DISK 0x1
4252 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4253 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4255 #define BMIC_DEVICE_TYPE_ENCLOSURE 6
4257 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4258 struct ext_report_lun_entry *rle)
4260 u8 device_flags;
4261 u8 device_type;
4263 if (!MASKED_DEVICE(lunaddrbytes))
4264 return false;
4266 device_flags = rle->device_flags;
4267 device_type = rle->device_type;
4269 if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4270 if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4271 return false;
4272 return true;
4275 if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4276 return false;
4278 if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4279 return false;
4282 * Spares may be spun down, we do not want to
4283 * do an Inquiry to a RAID set spare drive as
4284 * that would have them spun up, that is a
4285 * performance hit because I/O to the RAID device
4286 * stops while the spin up occurs which can take
4287 * over 50 seconds.
4289 if (hpsa_is_disk_spare(h, lunaddrbytes))
4290 return true;
4292 return false;
4295 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4297 /* the idea here is we could get notified
4298 * that some devices have changed, so we do a report
4299 * physical luns and report logical luns cmd, and adjust
4300 * our list of devices accordingly.
4302 * The scsi3addr's of devices won't change so long as the
4303 * adapter is not reset. That means we can rescan and
4304 * tell which devices we already know about, vs. new
4305 * devices, vs. disappearing devices.
4307 struct ReportExtendedLUNdata *physdev_list = NULL;
4308 struct ReportLUNdata *logdev_list = NULL;
4309 struct bmic_identify_physical_device *id_phys = NULL;
4310 struct bmic_identify_controller *id_ctlr = NULL;
4311 u32 nphysicals = 0;
4312 u32 nlogicals = 0;
4313 u32 nlocal_logicals = 0;
4314 u32 ndev_allocated = 0;
4315 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4316 int ncurrent = 0;
4317 int i, n_ext_target_devs, ndevs_to_allocate;
4318 int raid_ctlr_position;
4319 bool physical_device;
4320 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4322 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
4323 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4324 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4325 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4326 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4327 id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4329 if (!currentsd || !physdev_list || !logdev_list ||
4330 !tmpdevice || !id_phys || !id_ctlr) {
4331 dev_err(&h->pdev->dev, "out of memory\n");
4332 goto out;
4334 memset(lunzerobits, 0, sizeof(lunzerobits));
4336 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4338 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4339 logdev_list, &nlogicals)) {
4340 h->drv_req_rescan = 1;
4341 goto out;
4344 /* Set number of local logicals (non PTRAID) */
4345 if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4346 dev_warn(&h->pdev->dev,
4347 "%s: Can't determine number of local logical devices.\n",
4348 __func__);
4351 /* We might see up to the maximum number of logical and physical disks
4352 * plus external target devices, and a device for the local RAID
4353 * controller.
4355 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4357 hpsa_ext_ctrl_present(h, physdev_list);
4359 /* Allocate the per device structures */
4360 for (i = 0; i < ndevs_to_allocate; i++) {
4361 if (i >= HPSA_MAX_DEVICES) {
4362 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4363 " %d devices ignored.\n", HPSA_MAX_DEVICES,
4364 ndevs_to_allocate - HPSA_MAX_DEVICES);
4365 break;
4368 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4369 if (!currentsd[i]) {
4370 h->drv_req_rescan = 1;
4371 goto out;
4373 ndev_allocated++;
4376 if (is_scsi_rev_5(h))
4377 raid_ctlr_position = 0;
4378 else
4379 raid_ctlr_position = nphysicals + nlogicals;
4381 /* adjust our table of devices */
4382 n_ext_target_devs = 0;
4383 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4384 u8 *lunaddrbytes, is_OBDR = 0;
4385 int rc = 0;
4386 int phys_dev_index = i - (raid_ctlr_position == 0);
4387 bool skip_device = false;
4389 memset(tmpdevice, 0, sizeof(*tmpdevice));
4391 physical_device = i < nphysicals + (raid_ctlr_position == 0);
4393 /* Figure out where the LUN ID info is coming from */
4394 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4395 i, nphysicals, nlogicals, physdev_list, logdev_list);
4397 /* Determine if this is a lun from an external target array */
4398 tmpdevice->external =
4399 figure_external_status(h, raid_ctlr_position, i,
4400 nphysicals, nlocal_logicals);
4403 * Skip over some devices such as a spare.
4405 if (!tmpdevice->external && physical_device) {
4406 skip_device = hpsa_skip_device(h, lunaddrbytes,
4407 &physdev_list->LUN[phys_dev_index]);
4408 if (skip_device)
4409 continue;
4412 /* Get device type, vendor, model, device id, raid_map */
4413 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4414 &is_OBDR);
4415 if (rc == -ENOMEM) {
4416 dev_warn(&h->pdev->dev,
4417 "Out of memory, rescan deferred.\n");
4418 h->drv_req_rescan = 1;
4419 goto out;
4421 if (rc) {
4422 h->drv_req_rescan = 1;
4423 continue;
4426 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4427 this_device = currentsd[ncurrent];
4429 *this_device = *tmpdevice;
4430 this_device->physical_device = physical_device;
4433 * Expose all devices except for physical devices that
4434 * are masked.
4436 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4437 this_device->expose_device = 0;
4438 else
4439 this_device->expose_device = 1;
4443 * Get the SAS address for physical devices that are exposed.
4445 if (this_device->physical_device && this_device->expose_device)
4446 hpsa_get_sas_address(h, lunaddrbytes, this_device);
4448 switch (this_device->devtype) {
4449 case TYPE_ROM:
4450 /* We don't *really* support actual CD-ROM devices,
4451 * just "One Button Disaster Recovery" tape drive
4452 * which temporarily pretends to be a CD-ROM drive.
4453 * So we check that the device is really an OBDR tape
4454 * device by checking for "$DR-10" in bytes 43-48 of
4455 * the inquiry data.
4457 if (is_OBDR)
4458 ncurrent++;
4459 break;
4460 case TYPE_DISK:
4461 case TYPE_ZBC:
4462 if (this_device->physical_device) {
4463 /* The disk is in HBA mode. */
4464 /* Never use RAID mapper in HBA mode. */
4465 this_device->offload_enabled = 0;
4466 hpsa_get_ioaccel_drive_info(h, this_device,
4467 physdev_list, phys_dev_index, id_phys);
4468 hpsa_get_path_info(this_device,
4469 physdev_list, phys_dev_index, id_phys);
4471 ncurrent++;
4472 break;
4473 case TYPE_TAPE:
4474 case TYPE_MEDIUM_CHANGER:
4475 ncurrent++;
4476 break;
4477 case TYPE_ENCLOSURE:
4478 if (!this_device->external)
4479 hpsa_get_enclosure_info(h, lunaddrbytes,
4480 physdev_list, phys_dev_index,
4481 this_device);
4482 ncurrent++;
4483 break;
4484 case TYPE_RAID:
4485 /* Only present the Smartarray HBA as a RAID controller.
4486 * If it's a RAID controller other than the HBA itself
4487 * (an external RAID controller, MSA500 or similar)
4488 * don't present it.
4490 if (!is_hba_lunid(lunaddrbytes))
4491 break;
4492 ncurrent++;
4493 break;
4494 default:
4495 break;
4497 if (ncurrent >= HPSA_MAX_DEVICES)
4498 break;
4501 if (h->sas_host == NULL) {
4502 int rc = 0;
4504 rc = hpsa_add_sas_host(h);
4505 if (rc) {
4506 dev_warn(&h->pdev->dev,
4507 "Could not add sas host %d\n", rc);
4508 goto out;
4512 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4513 out:
4514 kfree(tmpdevice);
4515 for (i = 0; i < ndev_allocated; i++)
4516 kfree(currentsd[i]);
4517 kfree(currentsd);
4518 kfree(physdev_list);
4519 kfree(logdev_list);
4520 kfree(id_ctlr);
4521 kfree(id_phys);
4524 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4525 struct scatterlist *sg)
4527 u64 addr64 = (u64) sg_dma_address(sg);
4528 unsigned int len = sg_dma_len(sg);
4530 desc->Addr = cpu_to_le64(addr64);
4531 desc->Len = cpu_to_le32(len);
4532 desc->Ext = 0;
4536 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4537 * dma mapping and fills in the scatter gather entries of the
4538 * hpsa command, cp.
4540 static int hpsa_scatter_gather(struct ctlr_info *h,
4541 struct CommandList *cp,
4542 struct scsi_cmnd *cmd)
4544 struct scatterlist *sg;
4545 int use_sg, i, sg_limit, chained, last_sg;
4546 struct SGDescriptor *curr_sg;
4548 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4550 use_sg = scsi_dma_map(cmd);
4551 if (use_sg < 0)
4552 return use_sg;
4554 if (!use_sg)
4555 goto sglist_finished;
4558 * If the number of entries is greater than the max for a single list,
4559 * then we have a chained list; we will set up all but one entry in the
4560 * first list (the last entry is saved for link information);
4561 * otherwise, we don't have a chained list and we'll set up at each of
4562 * the entries in the one list.
4564 curr_sg = cp->SG;
4565 chained = use_sg > h->max_cmd_sg_entries;
4566 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4567 last_sg = scsi_sg_count(cmd) - 1;
4568 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4569 hpsa_set_sg_descriptor(curr_sg, sg);
4570 curr_sg++;
4573 if (chained) {
4575 * Continue with the chained list. Set curr_sg to the chained
4576 * list. Modify the limit to the total count less the entries
4577 * we've already set up. Resume the scan at the list entry
4578 * where the previous loop left off.
4580 curr_sg = h->cmd_sg_list[cp->cmdindex];
4581 sg_limit = use_sg - sg_limit;
4582 for_each_sg(sg, sg, sg_limit, i) {
4583 hpsa_set_sg_descriptor(curr_sg, sg);
4584 curr_sg++;
4588 /* Back the pointer up to the last entry and mark it as "last". */
4589 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4591 if (use_sg + chained > h->maxSG)
4592 h->maxSG = use_sg + chained;
4594 if (chained) {
4595 cp->Header.SGList = h->max_cmd_sg_entries;
4596 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4597 if (hpsa_map_sg_chain_block(h, cp)) {
4598 scsi_dma_unmap(cmd);
4599 return -1;
4601 return 0;
4604 sglist_finished:
4606 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
4607 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4608 return 0;
4611 static inline void warn_zero_length_transfer(struct ctlr_info *h,
4612 u8 *cdb, int cdb_len,
4613 const char *func)
4615 dev_warn(&h->pdev->dev,
4616 "%s: Blocking zero-length request: CDB:%*phN\n",
4617 func, cdb_len, cdb);
4620 #define IO_ACCEL_INELIGIBLE 1
4621 /* zero-length transfers trigger hardware errors. */
4622 static bool is_zero_length_transfer(u8 *cdb)
4624 u32 block_cnt;
4626 /* Block zero-length transfer sizes on certain commands. */
4627 switch (cdb[0]) {
4628 case READ_10:
4629 case WRITE_10:
4630 case VERIFY: /* 0x2F */
4631 case WRITE_VERIFY: /* 0x2E */
4632 block_cnt = get_unaligned_be16(&cdb[7]);
4633 break;
4634 case READ_12:
4635 case WRITE_12:
4636 case VERIFY_12: /* 0xAF */
4637 case WRITE_VERIFY_12: /* 0xAE */
4638 block_cnt = get_unaligned_be32(&cdb[6]);
4639 break;
4640 case READ_16:
4641 case WRITE_16:
4642 case VERIFY_16: /* 0x8F */
4643 block_cnt = get_unaligned_be32(&cdb[10]);
4644 break;
4645 default:
4646 return false;
4649 return block_cnt == 0;
4652 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4654 int is_write = 0;
4655 u32 block;
4656 u32 block_cnt;
4658 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4659 switch (cdb[0]) {
4660 case WRITE_6:
4661 case WRITE_12:
4662 is_write = 1;
4663 case READ_6:
4664 case READ_12:
4665 if (*cdb_len == 6) {
4666 block = (((cdb[1] & 0x1F) << 16) |
4667 (cdb[2] << 8) |
4668 cdb[3]);
4669 block_cnt = cdb[4];
4670 if (block_cnt == 0)
4671 block_cnt = 256;
4672 } else {
4673 BUG_ON(*cdb_len != 12);
4674 block = get_unaligned_be32(&cdb[2]);
4675 block_cnt = get_unaligned_be32(&cdb[6]);
4677 if (block_cnt > 0xffff)
4678 return IO_ACCEL_INELIGIBLE;
4680 cdb[0] = is_write ? WRITE_10 : READ_10;
4681 cdb[1] = 0;
4682 cdb[2] = (u8) (block >> 24);
4683 cdb[3] = (u8) (block >> 16);
4684 cdb[4] = (u8) (block >> 8);
4685 cdb[5] = (u8) (block);
4686 cdb[6] = 0;
4687 cdb[7] = (u8) (block_cnt >> 8);
4688 cdb[8] = (u8) (block_cnt);
4689 cdb[9] = 0;
4690 *cdb_len = 10;
4691 break;
4693 return 0;
4696 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4697 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4698 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4700 struct scsi_cmnd *cmd = c->scsi_cmd;
4701 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4702 unsigned int len;
4703 unsigned int total_len = 0;
4704 struct scatterlist *sg;
4705 u64 addr64;
4706 int use_sg, i;
4707 struct SGDescriptor *curr_sg;
4708 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4710 /* TODO: implement chaining support */
4711 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4712 atomic_dec(&phys_disk->ioaccel_cmds_out);
4713 return IO_ACCEL_INELIGIBLE;
4716 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4718 if (is_zero_length_transfer(cdb)) {
4719 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4720 atomic_dec(&phys_disk->ioaccel_cmds_out);
4721 return IO_ACCEL_INELIGIBLE;
4724 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4725 atomic_dec(&phys_disk->ioaccel_cmds_out);
4726 return IO_ACCEL_INELIGIBLE;
4729 c->cmd_type = CMD_IOACCEL1;
4731 /* Adjust the DMA address to point to the accelerated command buffer */
4732 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4733 (c->cmdindex * sizeof(*cp));
4734 BUG_ON(c->busaddr & 0x0000007F);
4736 use_sg = scsi_dma_map(cmd);
4737 if (use_sg < 0) {
4738 atomic_dec(&phys_disk->ioaccel_cmds_out);
4739 return use_sg;
4742 if (use_sg) {
4743 curr_sg = cp->SG;
4744 scsi_for_each_sg(cmd, sg, use_sg, i) {
4745 addr64 = (u64) sg_dma_address(sg);
4746 len = sg_dma_len(sg);
4747 total_len += len;
4748 curr_sg->Addr = cpu_to_le64(addr64);
4749 curr_sg->Len = cpu_to_le32(len);
4750 curr_sg->Ext = cpu_to_le32(0);
4751 curr_sg++;
4753 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4755 switch (cmd->sc_data_direction) {
4756 case DMA_TO_DEVICE:
4757 control |= IOACCEL1_CONTROL_DATA_OUT;
4758 break;
4759 case DMA_FROM_DEVICE:
4760 control |= IOACCEL1_CONTROL_DATA_IN;
4761 break;
4762 case DMA_NONE:
4763 control |= IOACCEL1_CONTROL_NODATAXFER;
4764 break;
4765 default:
4766 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4767 cmd->sc_data_direction);
4768 BUG();
4769 break;
4771 } else {
4772 control |= IOACCEL1_CONTROL_NODATAXFER;
4775 c->Header.SGList = use_sg;
4776 /* Fill out the command structure to submit */
4777 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4778 cp->transfer_len = cpu_to_le32(total_len);
4779 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4780 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4781 cp->control = cpu_to_le32(control);
4782 memcpy(cp->CDB, cdb, cdb_len);
4783 memcpy(cp->CISS_LUN, scsi3addr, 8);
4784 /* Tag was already set at init time. */
4785 enqueue_cmd_and_start_io(h, c);
4786 return 0;
4790 * Queue a command directly to a device behind the controller using the
4791 * I/O accelerator path.
4793 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4794 struct CommandList *c)
4796 struct scsi_cmnd *cmd = c->scsi_cmd;
4797 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4799 if (!dev)
4800 return -1;
4802 c->phys_disk = dev;
4804 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4805 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4809 * Set encryption parameters for the ioaccel2 request
4811 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4812 struct CommandList *c, struct io_accel2_cmd *cp)
4814 struct scsi_cmnd *cmd = c->scsi_cmd;
4815 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4816 struct raid_map_data *map = &dev->raid_map;
4817 u64 first_block;
4819 /* Are we doing encryption on this device */
4820 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4821 return;
4822 /* Set the data encryption key index. */
4823 cp->dekindex = map->dekindex;
4825 /* Set the encryption enable flag, encoded into direction field. */
4826 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4828 /* Set encryption tweak values based on logical block address
4829 * If block size is 512, tweak value is LBA.
4830 * For other block sizes, tweak is (LBA * block size)/ 512)
4832 switch (cmd->cmnd[0]) {
4833 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4834 case READ_6:
4835 case WRITE_6:
4836 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4837 (cmd->cmnd[2] << 8) |
4838 cmd->cmnd[3]);
4839 break;
4840 case WRITE_10:
4841 case READ_10:
4842 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4843 case WRITE_12:
4844 case READ_12:
4845 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4846 break;
4847 case WRITE_16:
4848 case READ_16:
4849 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4850 break;
4851 default:
4852 dev_err(&h->pdev->dev,
4853 "ERROR: %s: size (0x%x) not supported for encryption\n",
4854 __func__, cmd->cmnd[0]);
4855 BUG();
4856 break;
4859 if (le32_to_cpu(map->volume_blk_size) != 512)
4860 first_block = first_block *
4861 le32_to_cpu(map->volume_blk_size)/512;
4863 cp->tweak_lower = cpu_to_le32(first_block);
4864 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4867 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4868 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4869 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4871 struct scsi_cmnd *cmd = c->scsi_cmd;
4872 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4873 struct ioaccel2_sg_element *curr_sg;
4874 int use_sg, i;
4875 struct scatterlist *sg;
4876 u64 addr64;
4877 u32 len;
4878 u32 total_len = 0;
4880 if (!cmd->device)
4881 return -1;
4883 if (!cmd->device->hostdata)
4884 return -1;
4886 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4888 if (is_zero_length_transfer(cdb)) {
4889 warn_zero_length_transfer(h, cdb, cdb_len, __func__);
4890 atomic_dec(&phys_disk->ioaccel_cmds_out);
4891 return IO_ACCEL_INELIGIBLE;
4894 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4895 atomic_dec(&phys_disk->ioaccel_cmds_out);
4896 return IO_ACCEL_INELIGIBLE;
4899 c->cmd_type = CMD_IOACCEL2;
4900 /* Adjust the DMA address to point to the accelerated command buffer */
4901 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4902 (c->cmdindex * sizeof(*cp));
4903 BUG_ON(c->busaddr & 0x0000007F);
4905 memset(cp, 0, sizeof(*cp));
4906 cp->IU_type = IOACCEL2_IU_TYPE;
4908 use_sg = scsi_dma_map(cmd);
4909 if (use_sg < 0) {
4910 atomic_dec(&phys_disk->ioaccel_cmds_out);
4911 return use_sg;
4914 if (use_sg) {
4915 curr_sg = cp->sg;
4916 if (use_sg > h->ioaccel_maxsg) {
4917 addr64 = le64_to_cpu(
4918 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4919 curr_sg->address = cpu_to_le64(addr64);
4920 curr_sg->length = 0;
4921 curr_sg->reserved[0] = 0;
4922 curr_sg->reserved[1] = 0;
4923 curr_sg->reserved[2] = 0;
4924 curr_sg->chain_indicator = 0x80;
4926 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4928 scsi_for_each_sg(cmd, sg, use_sg, i) {
4929 addr64 = (u64) sg_dma_address(sg);
4930 len = sg_dma_len(sg);
4931 total_len += len;
4932 curr_sg->address = cpu_to_le64(addr64);
4933 curr_sg->length = cpu_to_le32(len);
4934 curr_sg->reserved[0] = 0;
4935 curr_sg->reserved[1] = 0;
4936 curr_sg->reserved[2] = 0;
4937 curr_sg->chain_indicator = 0;
4938 curr_sg++;
4941 switch (cmd->sc_data_direction) {
4942 case DMA_TO_DEVICE:
4943 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4944 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4945 break;
4946 case DMA_FROM_DEVICE:
4947 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4948 cp->direction |= IOACCEL2_DIR_DATA_IN;
4949 break;
4950 case DMA_NONE:
4951 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4952 cp->direction |= IOACCEL2_DIR_NO_DATA;
4953 break;
4954 default:
4955 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4956 cmd->sc_data_direction);
4957 BUG();
4958 break;
4960 } else {
4961 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4962 cp->direction |= IOACCEL2_DIR_NO_DATA;
4965 /* Set encryption parameters, if necessary */
4966 set_encrypt_ioaccel2(h, c, cp);
4968 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4969 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4970 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4972 cp->data_len = cpu_to_le32(total_len);
4973 cp->err_ptr = cpu_to_le64(c->busaddr +
4974 offsetof(struct io_accel2_cmd, error_data));
4975 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4977 /* fill in sg elements */
4978 if (use_sg > h->ioaccel_maxsg) {
4979 cp->sg_count = 1;
4980 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4981 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4982 atomic_dec(&phys_disk->ioaccel_cmds_out);
4983 scsi_dma_unmap(cmd);
4984 return -1;
4986 } else
4987 cp->sg_count = (u8) use_sg;
4989 enqueue_cmd_and_start_io(h, c);
4990 return 0;
4994 * Queue a command to the correct I/O accelerator path.
4996 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4997 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4998 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
5000 if (!c->scsi_cmd->device)
5001 return -1;
5003 if (!c->scsi_cmd->device->hostdata)
5004 return -1;
5006 /* Try to honor the device's queue depth */
5007 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
5008 phys_disk->queue_depth) {
5009 atomic_dec(&phys_disk->ioaccel_cmds_out);
5010 return IO_ACCEL_INELIGIBLE;
5012 if (h->transMethod & CFGTBL_Trans_io_accel1)
5013 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
5014 cdb, cdb_len, scsi3addr,
5015 phys_disk);
5016 else
5017 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
5018 cdb, cdb_len, scsi3addr,
5019 phys_disk);
5022 static void raid_map_helper(struct raid_map_data *map,
5023 int offload_to_mirror, u32 *map_index, u32 *current_group)
5025 if (offload_to_mirror == 0) {
5026 /* use physical disk in the first mirrored group. */
5027 *map_index %= le16_to_cpu(map->data_disks_per_row);
5028 return;
5030 do {
5031 /* determine mirror group that *map_index indicates */
5032 *current_group = *map_index /
5033 le16_to_cpu(map->data_disks_per_row);
5034 if (offload_to_mirror == *current_group)
5035 continue;
5036 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
5037 /* select map index from next group */
5038 *map_index += le16_to_cpu(map->data_disks_per_row);
5039 (*current_group)++;
5040 } else {
5041 /* select map index from first group */
5042 *map_index %= le16_to_cpu(map->data_disks_per_row);
5043 *current_group = 0;
5045 } while (offload_to_mirror != *current_group);
5049 * Attempt to perform offload RAID mapping for a logical volume I/O.
5051 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
5052 struct CommandList *c)
5054 struct scsi_cmnd *cmd = c->scsi_cmd;
5055 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5056 struct raid_map_data *map = &dev->raid_map;
5057 struct raid_map_disk_data *dd = &map->data[0];
5058 int is_write = 0;
5059 u32 map_index;
5060 u64 first_block, last_block;
5061 u32 block_cnt;
5062 u32 blocks_per_row;
5063 u64 first_row, last_row;
5064 u32 first_row_offset, last_row_offset;
5065 u32 first_column, last_column;
5066 u64 r0_first_row, r0_last_row;
5067 u32 r5or6_blocks_per_row;
5068 u64 r5or6_first_row, r5or6_last_row;
5069 u32 r5or6_first_row_offset, r5or6_last_row_offset;
5070 u32 r5or6_first_column, r5or6_last_column;
5071 u32 total_disks_per_row;
5072 u32 stripesize;
5073 u32 first_group, last_group, current_group;
5074 u32 map_row;
5075 u32 disk_handle;
5076 u64 disk_block;
5077 u32 disk_block_cnt;
5078 u8 cdb[16];
5079 u8 cdb_len;
5080 u16 strip_size;
5081 #if BITS_PER_LONG == 32
5082 u64 tmpdiv;
5083 #endif
5084 int offload_to_mirror;
5086 if (!dev)
5087 return -1;
5089 /* check for valid opcode, get LBA and block count */
5090 switch (cmd->cmnd[0]) {
5091 case WRITE_6:
5092 is_write = 1;
5093 case READ_6:
5094 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5095 (cmd->cmnd[2] << 8) |
5096 cmd->cmnd[3]);
5097 block_cnt = cmd->cmnd[4];
5098 if (block_cnt == 0)
5099 block_cnt = 256;
5100 break;
5101 case WRITE_10:
5102 is_write = 1;
5103 case READ_10:
5104 first_block =
5105 (((u64) cmd->cmnd[2]) << 24) |
5106 (((u64) cmd->cmnd[3]) << 16) |
5107 (((u64) cmd->cmnd[4]) << 8) |
5108 cmd->cmnd[5];
5109 block_cnt =
5110 (((u32) cmd->cmnd[7]) << 8) |
5111 cmd->cmnd[8];
5112 break;
5113 case WRITE_12:
5114 is_write = 1;
5115 case READ_12:
5116 first_block =
5117 (((u64) cmd->cmnd[2]) << 24) |
5118 (((u64) cmd->cmnd[3]) << 16) |
5119 (((u64) cmd->cmnd[4]) << 8) |
5120 cmd->cmnd[5];
5121 block_cnt =
5122 (((u32) cmd->cmnd[6]) << 24) |
5123 (((u32) cmd->cmnd[7]) << 16) |
5124 (((u32) cmd->cmnd[8]) << 8) |
5125 cmd->cmnd[9];
5126 break;
5127 case WRITE_16:
5128 is_write = 1;
5129 case READ_16:
5130 first_block =
5131 (((u64) cmd->cmnd[2]) << 56) |
5132 (((u64) cmd->cmnd[3]) << 48) |
5133 (((u64) cmd->cmnd[4]) << 40) |
5134 (((u64) cmd->cmnd[5]) << 32) |
5135 (((u64) cmd->cmnd[6]) << 24) |
5136 (((u64) cmd->cmnd[7]) << 16) |
5137 (((u64) cmd->cmnd[8]) << 8) |
5138 cmd->cmnd[9];
5139 block_cnt =
5140 (((u32) cmd->cmnd[10]) << 24) |
5141 (((u32) cmd->cmnd[11]) << 16) |
5142 (((u32) cmd->cmnd[12]) << 8) |
5143 cmd->cmnd[13];
5144 break;
5145 default:
5146 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5148 last_block = first_block + block_cnt - 1;
5150 /* check for write to non-RAID-0 */
5151 if (is_write && dev->raid_level != 0)
5152 return IO_ACCEL_INELIGIBLE;
5154 /* check for invalid block or wraparound */
5155 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5156 last_block < first_block)
5157 return IO_ACCEL_INELIGIBLE;
5159 /* calculate stripe information for the request */
5160 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5161 le16_to_cpu(map->strip_size);
5162 strip_size = le16_to_cpu(map->strip_size);
5163 #if BITS_PER_LONG == 32
5164 tmpdiv = first_block;
5165 (void) do_div(tmpdiv, blocks_per_row);
5166 first_row = tmpdiv;
5167 tmpdiv = last_block;
5168 (void) do_div(tmpdiv, blocks_per_row);
5169 last_row = tmpdiv;
5170 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5171 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5172 tmpdiv = first_row_offset;
5173 (void) do_div(tmpdiv, strip_size);
5174 first_column = tmpdiv;
5175 tmpdiv = last_row_offset;
5176 (void) do_div(tmpdiv, strip_size);
5177 last_column = tmpdiv;
5178 #else
5179 first_row = first_block / blocks_per_row;
5180 last_row = last_block / blocks_per_row;
5181 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5182 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5183 first_column = first_row_offset / strip_size;
5184 last_column = last_row_offset / strip_size;
5185 #endif
5187 /* if this isn't a single row/column then give to the controller */
5188 if ((first_row != last_row) || (first_column != last_column))
5189 return IO_ACCEL_INELIGIBLE;
5191 /* proceeding with driver mapping */
5192 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5193 le16_to_cpu(map->metadata_disks_per_row);
5194 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5195 le16_to_cpu(map->row_cnt);
5196 map_index = (map_row * total_disks_per_row) + first_column;
5198 switch (dev->raid_level) {
5199 case HPSA_RAID_0:
5200 break; /* nothing special to do */
5201 case HPSA_RAID_1:
5202 /* Handles load balance across RAID 1 members.
5203 * (2-drive R1 and R10 with even # of drives.)
5204 * Appropriate for SSDs, not optimal for HDDs
5206 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5207 if (dev->offload_to_mirror)
5208 map_index += le16_to_cpu(map->data_disks_per_row);
5209 dev->offload_to_mirror = !dev->offload_to_mirror;
5210 break;
5211 case HPSA_RAID_ADM:
5212 /* Handles N-way mirrors (R1-ADM)
5213 * and R10 with # of drives divisible by 3.)
5215 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5217 offload_to_mirror = dev->offload_to_mirror;
5218 raid_map_helper(map, offload_to_mirror,
5219 &map_index, &current_group);
5220 /* set mirror group to use next time */
5221 offload_to_mirror =
5222 (offload_to_mirror >=
5223 le16_to_cpu(map->layout_map_count) - 1)
5224 ? 0 : offload_to_mirror + 1;
5225 dev->offload_to_mirror = offload_to_mirror;
5226 /* Avoid direct use of dev->offload_to_mirror within this
5227 * function since multiple threads might simultaneously
5228 * increment it beyond the range of dev->layout_map_count -1.
5230 break;
5231 case HPSA_RAID_5:
5232 case HPSA_RAID_6:
5233 if (le16_to_cpu(map->layout_map_count) <= 1)
5234 break;
5236 /* Verify first and last block are in same RAID group */
5237 r5or6_blocks_per_row =
5238 le16_to_cpu(map->strip_size) *
5239 le16_to_cpu(map->data_disks_per_row);
5240 BUG_ON(r5or6_blocks_per_row == 0);
5241 stripesize = r5or6_blocks_per_row *
5242 le16_to_cpu(map->layout_map_count);
5243 #if BITS_PER_LONG == 32
5244 tmpdiv = first_block;
5245 first_group = do_div(tmpdiv, stripesize);
5246 tmpdiv = first_group;
5247 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5248 first_group = tmpdiv;
5249 tmpdiv = last_block;
5250 last_group = do_div(tmpdiv, stripesize);
5251 tmpdiv = last_group;
5252 (void) do_div(tmpdiv, r5or6_blocks_per_row);
5253 last_group = tmpdiv;
5254 #else
5255 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5256 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5257 #endif
5258 if (first_group != last_group)
5259 return IO_ACCEL_INELIGIBLE;
5261 /* Verify request is in a single row of RAID 5/6 */
5262 #if BITS_PER_LONG == 32
5263 tmpdiv = first_block;
5264 (void) do_div(tmpdiv, stripesize);
5265 first_row = r5or6_first_row = r0_first_row = tmpdiv;
5266 tmpdiv = last_block;
5267 (void) do_div(tmpdiv, stripesize);
5268 r5or6_last_row = r0_last_row = tmpdiv;
5269 #else
5270 first_row = r5or6_first_row = r0_first_row =
5271 first_block / stripesize;
5272 r5or6_last_row = r0_last_row = last_block / stripesize;
5273 #endif
5274 if (r5or6_first_row != r5or6_last_row)
5275 return IO_ACCEL_INELIGIBLE;
5278 /* Verify request is in a single column */
5279 #if BITS_PER_LONG == 32
5280 tmpdiv = first_block;
5281 first_row_offset = do_div(tmpdiv, stripesize);
5282 tmpdiv = first_row_offset;
5283 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5284 r5or6_first_row_offset = first_row_offset;
5285 tmpdiv = last_block;
5286 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5287 tmpdiv = r5or6_last_row_offset;
5288 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5289 tmpdiv = r5or6_first_row_offset;
5290 (void) do_div(tmpdiv, map->strip_size);
5291 first_column = r5or6_first_column = tmpdiv;
5292 tmpdiv = r5or6_last_row_offset;
5293 (void) do_div(tmpdiv, map->strip_size);
5294 r5or6_last_column = tmpdiv;
5295 #else
5296 first_row_offset = r5or6_first_row_offset =
5297 (u32)((first_block % stripesize) %
5298 r5or6_blocks_per_row);
5300 r5or6_last_row_offset =
5301 (u32)((last_block % stripesize) %
5302 r5or6_blocks_per_row);
5304 first_column = r5or6_first_column =
5305 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5306 r5or6_last_column =
5307 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5308 #endif
5309 if (r5or6_first_column != r5or6_last_column)
5310 return IO_ACCEL_INELIGIBLE;
5312 /* Request is eligible */
5313 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5314 le16_to_cpu(map->row_cnt);
5316 map_index = (first_group *
5317 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5318 (map_row * total_disks_per_row) + first_column;
5319 break;
5320 default:
5321 return IO_ACCEL_INELIGIBLE;
5324 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5325 return IO_ACCEL_INELIGIBLE;
5327 c->phys_disk = dev->phys_disk[map_index];
5328 if (!c->phys_disk)
5329 return IO_ACCEL_INELIGIBLE;
5331 disk_handle = dd[map_index].ioaccel_handle;
5332 disk_block = le64_to_cpu(map->disk_starting_blk) +
5333 first_row * le16_to_cpu(map->strip_size) +
5334 (first_row_offset - first_column *
5335 le16_to_cpu(map->strip_size));
5336 disk_block_cnt = block_cnt;
5338 /* handle differing logical/physical block sizes */
5339 if (map->phys_blk_shift) {
5340 disk_block <<= map->phys_blk_shift;
5341 disk_block_cnt <<= map->phys_blk_shift;
5343 BUG_ON(disk_block_cnt > 0xffff);
5345 /* build the new CDB for the physical disk I/O */
5346 if (disk_block > 0xffffffff) {
5347 cdb[0] = is_write ? WRITE_16 : READ_16;
5348 cdb[1] = 0;
5349 cdb[2] = (u8) (disk_block >> 56);
5350 cdb[3] = (u8) (disk_block >> 48);
5351 cdb[4] = (u8) (disk_block >> 40);
5352 cdb[5] = (u8) (disk_block >> 32);
5353 cdb[6] = (u8) (disk_block >> 24);
5354 cdb[7] = (u8) (disk_block >> 16);
5355 cdb[8] = (u8) (disk_block >> 8);
5356 cdb[9] = (u8) (disk_block);
5357 cdb[10] = (u8) (disk_block_cnt >> 24);
5358 cdb[11] = (u8) (disk_block_cnt >> 16);
5359 cdb[12] = (u8) (disk_block_cnt >> 8);
5360 cdb[13] = (u8) (disk_block_cnt);
5361 cdb[14] = 0;
5362 cdb[15] = 0;
5363 cdb_len = 16;
5364 } else {
5365 cdb[0] = is_write ? WRITE_10 : READ_10;
5366 cdb[1] = 0;
5367 cdb[2] = (u8) (disk_block >> 24);
5368 cdb[3] = (u8) (disk_block >> 16);
5369 cdb[4] = (u8) (disk_block >> 8);
5370 cdb[5] = (u8) (disk_block);
5371 cdb[6] = 0;
5372 cdb[7] = (u8) (disk_block_cnt >> 8);
5373 cdb[8] = (u8) (disk_block_cnt);
5374 cdb[9] = 0;
5375 cdb_len = 10;
5377 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5378 dev->scsi3addr,
5379 dev->phys_disk[map_index]);
5383 * Submit commands down the "normal" RAID stack path
5384 * All callers to hpsa_ciss_submit must check lockup_detected
5385 * beforehand, before (opt.) and after calling cmd_alloc
5387 static int hpsa_ciss_submit(struct ctlr_info *h,
5388 struct CommandList *c, struct scsi_cmnd *cmd,
5389 unsigned char scsi3addr[])
5391 cmd->host_scribble = (unsigned char *) c;
5392 c->cmd_type = CMD_SCSI;
5393 c->scsi_cmd = cmd;
5394 c->Header.ReplyQueue = 0; /* unused in simple mode */
5395 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
5396 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5398 /* Fill in the request block... */
5400 c->Request.Timeout = 0;
5401 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5402 c->Request.CDBLen = cmd->cmd_len;
5403 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5404 switch (cmd->sc_data_direction) {
5405 case DMA_TO_DEVICE:
5406 c->Request.type_attr_dir =
5407 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5408 break;
5409 case DMA_FROM_DEVICE:
5410 c->Request.type_attr_dir =
5411 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5412 break;
5413 case DMA_NONE:
5414 c->Request.type_attr_dir =
5415 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5416 break;
5417 case DMA_BIDIRECTIONAL:
5418 /* This can happen if a buggy application does a scsi passthru
5419 * and sets both inlen and outlen to non-zero. ( see
5420 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5423 c->Request.type_attr_dir =
5424 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5425 /* This is technically wrong, and hpsa controllers should
5426 * reject it with CMD_INVALID, which is the most correct
5427 * response, but non-fibre backends appear to let it
5428 * slide by, and give the same results as if this field
5429 * were set correctly. Either way is acceptable for
5430 * our purposes here.
5433 break;
5435 default:
5436 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5437 cmd->sc_data_direction);
5438 BUG();
5439 break;
5442 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5443 hpsa_cmd_resolve_and_free(h, c);
5444 return SCSI_MLQUEUE_HOST_BUSY;
5446 enqueue_cmd_and_start_io(h, c);
5447 /* the cmd'll come back via intr handler in complete_scsi_command() */
5448 return 0;
5451 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5452 struct CommandList *c)
5454 dma_addr_t cmd_dma_handle, err_dma_handle;
5456 /* Zero out all of commandlist except the last field, refcount */
5457 memset(c, 0, offsetof(struct CommandList, refcount));
5458 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5459 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5460 c->err_info = h->errinfo_pool + index;
5461 memset(c->err_info, 0, sizeof(*c->err_info));
5462 err_dma_handle = h->errinfo_pool_dhandle
5463 + index * sizeof(*c->err_info);
5464 c->cmdindex = index;
5465 c->busaddr = (u32) cmd_dma_handle;
5466 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5467 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5468 c->h = h;
5469 c->scsi_cmd = SCSI_CMD_IDLE;
5472 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5474 int i;
5476 for (i = 0; i < h->nr_cmds; i++) {
5477 struct CommandList *c = h->cmd_pool + i;
5479 hpsa_cmd_init(h, i, c);
5480 atomic_set(&c->refcount, 0);
5484 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5485 struct CommandList *c)
5487 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5489 BUG_ON(c->cmdindex != index);
5491 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5492 memset(c->err_info, 0, sizeof(*c->err_info));
5493 c->busaddr = (u32) cmd_dma_handle;
5496 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5497 struct CommandList *c, struct scsi_cmnd *cmd,
5498 unsigned char *scsi3addr)
5500 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5501 int rc = IO_ACCEL_INELIGIBLE;
5503 if (!dev)
5504 return SCSI_MLQUEUE_HOST_BUSY;
5506 cmd->host_scribble = (unsigned char *) c;
5508 if (dev->offload_enabled) {
5509 hpsa_cmd_init(h, c->cmdindex, c);
5510 c->cmd_type = CMD_SCSI;
5511 c->scsi_cmd = cmd;
5512 rc = hpsa_scsi_ioaccel_raid_map(h, c);
5513 if (rc < 0) /* scsi_dma_map failed. */
5514 rc = SCSI_MLQUEUE_HOST_BUSY;
5515 } else if (dev->hba_ioaccel_enabled) {
5516 hpsa_cmd_init(h, c->cmdindex, c);
5517 c->cmd_type = CMD_SCSI;
5518 c->scsi_cmd = cmd;
5519 rc = hpsa_scsi_ioaccel_direct_map(h, c);
5520 if (rc < 0) /* scsi_dma_map failed. */
5521 rc = SCSI_MLQUEUE_HOST_BUSY;
5523 return rc;
5526 static void hpsa_command_resubmit_worker(struct work_struct *work)
5528 struct scsi_cmnd *cmd;
5529 struct hpsa_scsi_dev_t *dev;
5530 struct CommandList *c = container_of(work, struct CommandList, work);
5532 cmd = c->scsi_cmd;
5533 dev = cmd->device->hostdata;
5534 if (!dev) {
5535 cmd->result = DID_NO_CONNECT << 16;
5536 return hpsa_cmd_free_and_done(c->h, c, cmd);
5538 if (c->reset_pending)
5539 return hpsa_cmd_free_and_done(c->h, c, cmd);
5540 if (c->cmd_type == CMD_IOACCEL2) {
5541 struct ctlr_info *h = c->h;
5542 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5543 int rc;
5545 if (c2->error_data.serv_response ==
5546 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5547 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5548 if (rc == 0)
5549 return;
5550 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5552 * If we get here, it means dma mapping failed.
5553 * Try again via scsi mid layer, which will
5554 * then get SCSI_MLQUEUE_HOST_BUSY.
5556 cmd->result = DID_IMM_RETRY << 16;
5557 return hpsa_cmd_free_and_done(h, c, cmd);
5559 /* else, fall thru and resubmit down CISS path */
5562 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5563 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5565 * If we get here, it means dma mapping failed. Try
5566 * again via scsi mid layer, which will then get
5567 * SCSI_MLQUEUE_HOST_BUSY.
5569 * hpsa_ciss_submit will have already freed c
5570 * if it encountered a dma mapping failure.
5572 cmd->result = DID_IMM_RETRY << 16;
5573 cmd->scsi_done(cmd);
5577 /* Running in struct Scsi_Host->host_lock less mode */
5578 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5580 struct ctlr_info *h;
5581 struct hpsa_scsi_dev_t *dev;
5582 unsigned char scsi3addr[8];
5583 struct CommandList *c;
5584 int rc = 0;
5586 /* Get the ptr to our adapter structure out of cmd->host. */
5587 h = sdev_to_hba(cmd->device);
5589 BUG_ON(cmd->request->tag < 0);
5591 dev = cmd->device->hostdata;
5592 if (!dev) {
5593 cmd->result = DID_NO_CONNECT << 16;
5594 cmd->scsi_done(cmd);
5595 return 0;
5598 if (dev->removed) {
5599 cmd->result = DID_NO_CONNECT << 16;
5600 cmd->scsi_done(cmd);
5601 return 0;
5604 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5606 if (unlikely(lockup_detected(h))) {
5607 cmd->result = DID_NO_CONNECT << 16;
5608 cmd->scsi_done(cmd);
5609 return 0;
5611 c = cmd_tagged_alloc(h, cmd);
5614 * Call alternate submit routine for I/O accelerated commands.
5615 * Retries always go down the normal I/O path.
5617 if (likely(cmd->retries == 0 &&
5618 !blk_rq_is_passthrough(cmd->request) &&
5619 h->acciopath_status)) {
5620 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5621 if (rc == 0)
5622 return 0;
5623 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5624 hpsa_cmd_resolve_and_free(h, c);
5625 return SCSI_MLQUEUE_HOST_BUSY;
5628 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5631 static void hpsa_scan_complete(struct ctlr_info *h)
5633 unsigned long flags;
5635 spin_lock_irqsave(&h->scan_lock, flags);
5636 h->scan_finished = 1;
5637 wake_up(&h->scan_wait_queue);
5638 spin_unlock_irqrestore(&h->scan_lock, flags);
5641 static void hpsa_scan_start(struct Scsi_Host *sh)
5643 struct ctlr_info *h = shost_to_hba(sh);
5644 unsigned long flags;
5647 * Don't let rescans be initiated on a controller known to be locked
5648 * up. If the controller locks up *during* a rescan, that thread is
5649 * probably hosed, but at least we can prevent new rescan threads from
5650 * piling up on a locked up controller.
5652 if (unlikely(lockup_detected(h)))
5653 return hpsa_scan_complete(h);
5656 * If a scan is already waiting to run, no need to add another
5658 spin_lock_irqsave(&h->scan_lock, flags);
5659 if (h->scan_waiting) {
5660 spin_unlock_irqrestore(&h->scan_lock, flags);
5661 return;
5664 spin_unlock_irqrestore(&h->scan_lock, flags);
5666 /* wait until any scan already in progress is finished. */
5667 while (1) {
5668 spin_lock_irqsave(&h->scan_lock, flags);
5669 if (h->scan_finished)
5670 break;
5671 h->scan_waiting = 1;
5672 spin_unlock_irqrestore(&h->scan_lock, flags);
5673 wait_event(h->scan_wait_queue, h->scan_finished);
5674 /* Note: We don't need to worry about a race between this
5675 * thread and driver unload because the midlayer will
5676 * have incremented the reference count, so unload won't
5677 * happen if we're in here.
5680 h->scan_finished = 0; /* mark scan as in progress */
5681 h->scan_waiting = 0;
5682 spin_unlock_irqrestore(&h->scan_lock, flags);
5684 if (unlikely(lockup_detected(h)))
5685 return hpsa_scan_complete(h);
5688 * Do the scan after a reset completion
5690 spin_lock_irqsave(&h->reset_lock, flags);
5691 if (h->reset_in_progress) {
5692 h->drv_req_rescan = 1;
5693 spin_unlock_irqrestore(&h->reset_lock, flags);
5694 hpsa_scan_complete(h);
5695 return;
5697 spin_unlock_irqrestore(&h->reset_lock, flags);
5699 hpsa_update_scsi_devices(h);
5701 hpsa_scan_complete(h);
5704 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5706 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5708 if (!logical_drive)
5709 return -ENODEV;
5711 if (qdepth < 1)
5712 qdepth = 1;
5713 else if (qdepth > logical_drive->queue_depth)
5714 qdepth = logical_drive->queue_depth;
5716 return scsi_change_queue_depth(sdev, qdepth);
5719 static int hpsa_scan_finished(struct Scsi_Host *sh,
5720 unsigned long elapsed_time)
5722 struct ctlr_info *h = shost_to_hba(sh);
5723 unsigned long flags;
5724 int finished;
5726 spin_lock_irqsave(&h->scan_lock, flags);
5727 finished = h->scan_finished;
5728 spin_unlock_irqrestore(&h->scan_lock, flags);
5729 return finished;
5732 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5734 struct Scsi_Host *sh;
5736 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5737 if (sh == NULL) {
5738 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5739 return -ENOMEM;
5742 sh->io_port = 0;
5743 sh->n_io_port = 0;
5744 sh->this_id = -1;
5745 sh->max_channel = 3;
5746 sh->max_cmd_len = MAX_COMMAND_SIZE;
5747 sh->max_lun = HPSA_MAX_LUN;
5748 sh->max_id = HPSA_MAX_LUN;
5749 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5750 sh->cmd_per_lun = sh->can_queue;
5751 sh->sg_tablesize = h->maxsgentries;
5752 sh->transportt = hpsa_sas_transport_template;
5753 sh->hostdata[0] = (unsigned long) h;
5754 sh->irq = pci_irq_vector(h->pdev, 0);
5755 sh->unique_id = sh->irq;
5757 h->scsi_host = sh;
5758 return 0;
5761 static int hpsa_scsi_add_host(struct ctlr_info *h)
5763 int rv;
5765 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5766 if (rv) {
5767 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5768 return rv;
5770 scsi_scan_host(h->scsi_host);
5771 return 0;
5775 * The block layer has already gone to the trouble of picking out a unique,
5776 * small-integer tag for this request. We use an offset from that value as
5777 * an index to select our command block. (The offset allows us to reserve the
5778 * low-numbered entries for our own uses.)
5780 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5782 int idx = scmd->request->tag;
5784 if (idx < 0)
5785 return idx;
5787 /* Offset to leave space for internal cmds. */
5788 return idx += HPSA_NRESERVED_CMDS;
5792 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5793 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5795 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5796 struct CommandList *c, unsigned char lunaddr[],
5797 int reply_queue)
5799 int rc;
5801 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5802 (void) fill_cmd(c, TEST_UNIT_READY, h,
5803 NULL, 0, 0, lunaddr, TYPE_CMD);
5804 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5805 if (rc)
5806 return rc;
5807 /* no unmap needed here because no data xfer. */
5809 /* Check if the unit is already ready. */
5810 if (c->err_info->CommandStatus == CMD_SUCCESS)
5811 return 0;
5814 * The first command sent after reset will receive "unit attention" to
5815 * indicate that the LUN has been reset...this is actually what we're
5816 * looking for (but, success is good too).
5818 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5819 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5820 (c->err_info->SenseInfo[2] == NO_SENSE ||
5821 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5822 return 0;
5824 return 1;
5828 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5829 * returns zero when the unit is ready, and non-zero when giving up.
5831 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5832 struct CommandList *c,
5833 unsigned char lunaddr[], int reply_queue)
5835 int rc;
5836 int count = 0;
5837 int waittime = 1; /* seconds */
5839 /* Send test unit ready until device ready, or give up. */
5840 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5843 * Wait for a bit. do this first, because if we send
5844 * the TUR right away, the reset will just abort it.
5846 msleep(1000 * waittime);
5848 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5849 if (!rc)
5850 break;
5852 /* Increase wait time with each try, up to a point. */
5853 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5854 waittime *= 2;
5856 dev_warn(&h->pdev->dev,
5857 "waiting %d secs for device to become ready.\n",
5858 waittime);
5861 return rc;
5864 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5865 unsigned char lunaddr[],
5866 int reply_queue)
5868 int first_queue;
5869 int last_queue;
5870 int rq;
5871 int rc = 0;
5872 struct CommandList *c;
5874 c = cmd_alloc(h);
5877 * If no specific reply queue was requested, then send the TUR
5878 * repeatedly, requesting a reply on each reply queue; otherwise execute
5879 * the loop exactly once using only the specified queue.
5881 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5882 first_queue = 0;
5883 last_queue = h->nreply_queues - 1;
5884 } else {
5885 first_queue = reply_queue;
5886 last_queue = reply_queue;
5889 for (rq = first_queue; rq <= last_queue; rq++) {
5890 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5891 if (rc)
5892 break;
5895 if (rc)
5896 dev_warn(&h->pdev->dev, "giving up on device.\n");
5897 else
5898 dev_warn(&h->pdev->dev, "device is ready.\n");
5900 cmd_free(h, c);
5901 return rc;
5904 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5905 * complaining. Doing a host- or bus-reset can't do anything good here.
5907 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5909 int rc = SUCCESS;
5910 struct ctlr_info *h;
5911 struct hpsa_scsi_dev_t *dev;
5912 u8 reset_type;
5913 char msg[48];
5914 unsigned long flags;
5916 /* find the controller to which the command to be aborted was sent */
5917 h = sdev_to_hba(scsicmd->device);
5918 if (h == NULL) /* paranoia */
5919 return FAILED;
5921 spin_lock_irqsave(&h->reset_lock, flags);
5922 h->reset_in_progress = 1;
5923 spin_unlock_irqrestore(&h->reset_lock, flags);
5925 if (lockup_detected(h)) {
5926 rc = FAILED;
5927 goto return_reset_status;
5930 dev = scsicmd->device->hostdata;
5931 if (!dev) {
5932 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5933 rc = FAILED;
5934 goto return_reset_status;
5937 if (dev->devtype == TYPE_ENCLOSURE) {
5938 rc = SUCCESS;
5939 goto return_reset_status;
5942 /* if controller locked up, we can guarantee command won't complete */
5943 if (lockup_detected(h)) {
5944 snprintf(msg, sizeof(msg),
5945 "cmd %d RESET FAILED, lockup detected",
5946 hpsa_get_cmd_index(scsicmd));
5947 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5948 rc = FAILED;
5949 goto return_reset_status;
5952 /* this reset request might be the result of a lockup; check */
5953 if (detect_controller_lockup(h)) {
5954 snprintf(msg, sizeof(msg),
5955 "cmd %d RESET FAILED, new lockup detected",
5956 hpsa_get_cmd_index(scsicmd));
5957 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5958 rc = FAILED;
5959 goto return_reset_status;
5962 /* Do not attempt on controller */
5963 if (is_hba_lunid(dev->scsi3addr)) {
5964 rc = SUCCESS;
5965 goto return_reset_status;
5968 if (is_logical_dev_addr_mode(dev->scsi3addr))
5969 reset_type = HPSA_DEVICE_RESET_MSG;
5970 else
5971 reset_type = HPSA_PHYS_TARGET_RESET;
5973 sprintf(msg, "resetting %s",
5974 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5975 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5977 /* send a reset to the SCSI LUN which the command was sent to */
5978 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5979 DEFAULT_REPLY_QUEUE);
5980 if (rc == 0)
5981 rc = SUCCESS;
5982 else
5983 rc = FAILED;
5985 sprintf(msg, "reset %s %s",
5986 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5987 rc == SUCCESS ? "completed successfully" : "failed");
5988 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5990 return_reset_status:
5991 spin_lock_irqsave(&h->reset_lock, flags);
5992 h->reset_in_progress = 0;
5993 spin_unlock_irqrestore(&h->reset_lock, flags);
5994 return rc;
5998 * For operations with an associated SCSI command, a command block is allocated
5999 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6000 * block request tag as an index into a table of entries. cmd_tagged_free() is
6001 * the complement, although cmd_free() may be called instead.
6003 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6004 struct scsi_cmnd *scmd)
6006 int idx = hpsa_get_cmd_index(scmd);
6007 struct CommandList *c = h->cmd_pool + idx;
6009 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6010 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6011 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6012 /* The index value comes from the block layer, so if it's out of
6013 * bounds, it's probably not our bug.
6015 BUG();
6018 atomic_inc(&c->refcount);
6019 if (unlikely(!hpsa_is_cmd_idle(c))) {
6021 * We expect that the SCSI layer will hand us a unique tag
6022 * value. Thus, there should never be a collision here between
6023 * two requests...because if the selected command isn't idle
6024 * then someone is going to be very disappointed.
6026 dev_err(&h->pdev->dev,
6027 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
6028 idx);
6029 if (c->scsi_cmd != NULL)
6030 scsi_print_command(c->scsi_cmd);
6031 scsi_print_command(scmd);
6034 hpsa_cmd_partial_init(h, idx, c);
6035 return c;
6038 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6041 * Release our reference to the block. We don't need to do anything
6042 * else to free it, because it is accessed by index.
6044 (void)atomic_dec(&c->refcount);
6048 * For operations that cannot sleep, a command block is allocated at init,
6049 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6050 * which ones are free or in use. Lock must be held when calling this.
6051 * cmd_free() is the complement.
6052 * This function never gives up and returns NULL. If it hangs,
6053 * another thread must call cmd_free() to free some tags.
6056 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6058 struct CommandList *c;
6059 int refcount, i;
6060 int offset = 0;
6063 * There is some *extremely* small but non-zero chance that that
6064 * multiple threads could get in here, and one thread could
6065 * be scanning through the list of bits looking for a free
6066 * one, but the free ones are always behind him, and other
6067 * threads sneak in behind him and eat them before he can
6068 * get to them, so that while there is always a free one, a
6069 * very unlucky thread might be starved anyway, never able to
6070 * beat the other threads. In reality, this happens so
6071 * infrequently as to be indistinguishable from never.
6073 * Note that we start allocating commands before the SCSI host structure
6074 * is initialized. Since the search starts at bit zero, this
6075 * all works, since we have at least one command structure available;
6076 * however, it means that the structures with the low indexes have to be
6077 * reserved for driver-initiated requests, while requests from the block
6078 * layer will use the higher indexes.
6081 for (;;) {
6082 i = find_next_zero_bit(h->cmd_pool_bits,
6083 HPSA_NRESERVED_CMDS,
6084 offset);
6085 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6086 offset = 0;
6087 continue;
6089 c = h->cmd_pool + i;
6090 refcount = atomic_inc_return(&c->refcount);
6091 if (unlikely(refcount > 1)) {
6092 cmd_free(h, c); /* already in use */
6093 offset = (i + 1) % HPSA_NRESERVED_CMDS;
6094 continue;
6096 set_bit(i & (BITS_PER_LONG - 1),
6097 h->cmd_pool_bits + (i / BITS_PER_LONG));
6098 break; /* it's ours now. */
6100 hpsa_cmd_partial_init(h, i, c);
6101 return c;
6105 * This is the complementary operation to cmd_alloc(). Note, however, in some
6106 * corner cases it may also be used to free blocks allocated by
6107 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6108 * the clear-bit is harmless.
6110 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6112 if (atomic_dec_and_test(&c->refcount)) {
6113 int i;
6115 i = c - h->cmd_pool;
6116 clear_bit(i & (BITS_PER_LONG - 1),
6117 h->cmd_pool_bits + (i / BITS_PER_LONG));
6121 #ifdef CONFIG_COMPAT
6123 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6124 void __user *arg)
6126 IOCTL32_Command_struct __user *arg32 =
6127 (IOCTL32_Command_struct __user *) arg;
6128 IOCTL_Command_struct arg64;
6129 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6130 int err;
6131 u32 cp;
6133 memset(&arg64, 0, sizeof(arg64));
6134 err = 0;
6135 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6136 sizeof(arg64.LUN_info));
6137 err |= copy_from_user(&arg64.Request, &arg32->Request,
6138 sizeof(arg64.Request));
6139 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6140 sizeof(arg64.error_info));
6141 err |= get_user(arg64.buf_size, &arg32->buf_size);
6142 err |= get_user(cp, &arg32->buf);
6143 arg64.buf = compat_ptr(cp);
6144 err |= copy_to_user(p, &arg64, sizeof(arg64));
6146 if (err)
6147 return -EFAULT;
6149 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6150 if (err)
6151 return err;
6152 err |= copy_in_user(&arg32->error_info, &p->error_info,
6153 sizeof(arg32->error_info));
6154 if (err)
6155 return -EFAULT;
6156 return err;
6159 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6160 int cmd, void __user *arg)
6162 BIG_IOCTL32_Command_struct __user *arg32 =
6163 (BIG_IOCTL32_Command_struct __user *) arg;
6164 BIG_IOCTL_Command_struct arg64;
6165 BIG_IOCTL_Command_struct __user *p =
6166 compat_alloc_user_space(sizeof(arg64));
6167 int err;
6168 u32 cp;
6170 memset(&arg64, 0, sizeof(arg64));
6171 err = 0;
6172 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6173 sizeof(arg64.LUN_info));
6174 err |= copy_from_user(&arg64.Request, &arg32->Request,
6175 sizeof(arg64.Request));
6176 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6177 sizeof(arg64.error_info));
6178 err |= get_user(arg64.buf_size, &arg32->buf_size);
6179 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6180 err |= get_user(cp, &arg32->buf);
6181 arg64.buf = compat_ptr(cp);
6182 err |= copy_to_user(p, &arg64, sizeof(arg64));
6184 if (err)
6185 return -EFAULT;
6187 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6188 if (err)
6189 return err;
6190 err |= copy_in_user(&arg32->error_info, &p->error_info,
6191 sizeof(arg32->error_info));
6192 if (err)
6193 return -EFAULT;
6194 return err;
6197 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6199 switch (cmd) {
6200 case CCISS_GETPCIINFO:
6201 case CCISS_GETINTINFO:
6202 case CCISS_SETINTINFO:
6203 case CCISS_GETNODENAME:
6204 case CCISS_SETNODENAME:
6205 case CCISS_GETHEARTBEAT:
6206 case CCISS_GETBUSTYPES:
6207 case CCISS_GETFIRMVER:
6208 case CCISS_GETDRIVVER:
6209 case CCISS_REVALIDVOLS:
6210 case CCISS_DEREGDISK:
6211 case CCISS_REGNEWDISK:
6212 case CCISS_REGNEWD:
6213 case CCISS_RESCANDISK:
6214 case CCISS_GETLUNINFO:
6215 return hpsa_ioctl(dev, cmd, arg);
6217 case CCISS_PASSTHRU32:
6218 return hpsa_ioctl32_passthru(dev, cmd, arg);
6219 case CCISS_BIG_PASSTHRU32:
6220 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6222 default:
6223 return -ENOIOCTLCMD;
6226 #endif
6228 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6230 struct hpsa_pci_info pciinfo;
6232 if (!argp)
6233 return -EINVAL;
6234 pciinfo.domain = pci_domain_nr(h->pdev->bus);
6235 pciinfo.bus = h->pdev->bus->number;
6236 pciinfo.dev_fn = h->pdev->devfn;
6237 pciinfo.board_id = h->board_id;
6238 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6239 return -EFAULT;
6240 return 0;
6243 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6245 DriverVer_type DriverVer;
6246 unsigned char vmaj, vmin, vsubmin;
6247 int rc;
6249 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6250 &vmaj, &vmin, &vsubmin);
6251 if (rc != 3) {
6252 dev_info(&h->pdev->dev, "driver version string '%s' "
6253 "unrecognized.", HPSA_DRIVER_VERSION);
6254 vmaj = 0;
6255 vmin = 0;
6256 vsubmin = 0;
6258 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6259 if (!argp)
6260 return -EINVAL;
6261 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6262 return -EFAULT;
6263 return 0;
6266 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6268 IOCTL_Command_struct iocommand;
6269 struct CommandList *c;
6270 char *buff = NULL;
6271 u64 temp64;
6272 int rc = 0;
6274 if (!argp)
6275 return -EINVAL;
6276 if (!capable(CAP_SYS_RAWIO))
6277 return -EPERM;
6278 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6279 return -EFAULT;
6280 if ((iocommand.buf_size < 1) &&
6281 (iocommand.Request.Type.Direction != XFER_NONE)) {
6282 return -EINVAL;
6284 if (iocommand.buf_size > 0) {
6285 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6286 if (buff == NULL)
6287 return -ENOMEM;
6288 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6289 /* Copy the data into the buffer we created */
6290 if (copy_from_user(buff, iocommand.buf,
6291 iocommand.buf_size)) {
6292 rc = -EFAULT;
6293 goto out_kfree;
6295 } else {
6296 memset(buff, 0, iocommand.buf_size);
6299 c = cmd_alloc(h);
6301 /* Fill in the command type */
6302 c->cmd_type = CMD_IOCTL_PEND;
6303 c->scsi_cmd = SCSI_CMD_BUSY;
6304 /* Fill in Command Header */
6305 c->Header.ReplyQueue = 0; /* unused in simple mode */
6306 if (iocommand.buf_size > 0) { /* buffer to fill */
6307 c->Header.SGList = 1;
6308 c->Header.SGTotal = cpu_to_le16(1);
6309 } else { /* no buffers to fill */
6310 c->Header.SGList = 0;
6311 c->Header.SGTotal = cpu_to_le16(0);
6313 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6315 /* Fill in Request block */
6316 memcpy(&c->Request, &iocommand.Request,
6317 sizeof(c->Request));
6319 /* Fill in the scatter gather information */
6320 if (iocommand.buf_size > 0) {
6321 temp64 = pci_map_single(h->pdev, buff,
6322 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6323 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6324 c->SG[0].Addr = cpu_to_le64(0);
6325 c->SG[0].Len = cpu_to_le32(0);
6326 rc = -ENOMEM;
6327 goto out;
6329 c->SG[0].Addr = cpu_to_le64(temp64);
6330 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6331 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6333 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6334 NO_TIMEOUT);
6335 if (iocommand.buf_size > 0)
6336 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6337 check_ioctl_unit_attention(h, c);
6338 if (rc) {
6339 rc = -EIO;
6340 goto out;
6343 /* Copy the error information out */
6344 memcpy(&iocommand.error_info, c->err_info,
6345 sizeof(iocommand.error_info));
6346 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6347 rc = -EFAULT;
6348 goto out;
6350 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6351 iocommand.buf_size > 0) {
6352 /* Copy the data out of the buffer we created */
6353 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6354 rc = -EFAULT;
6355 goto out;
6358 out:
6359 cmd_free(h, c);
6360 out_kfree:
6361 kfree(buff);
6362 return rc;
6365 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6367 BIG_IOCTL_Command_struct *ioc;
6368 struct CommandList *c;
6369 unsigned char **buff = NULL;
6370 int *buff_size = NULL;
6371 u64 temp64;
6372 BYTE sg_used = 0;
6373 int status = 0;
6374 u32 left;
6375 u32 sz;
6376 BYTE __user *data_ptr;
6378 if (!argp)
6379 return -EINVAL;
6380 if (!capable(CAP_SYS_RAWIO))
6381 return -EPERM;
6382 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
6383 if (!ioc) {
6384 status = -ENOMEM;
6385 goto cleanup1;
6387 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6388 status = -EFAULT;
6389 goto cleanup1;
6391 if ((ioc->buf_size < 1) &&
6392 (ioc->Request.Type.Direction != XFER_NONE)) {
6393 status = -EINVAL;
6394 goto cleanup1;
6396 /* Check kmalloc limits using all SGs */
6397 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6398 status = -EINVAL;
6399 goto cleanup1;
6401 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6402 status = -EINVAL;
6403 goto cleanup1;
6405 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6406 if (!buff) {
6407 status = -ENOMEM;
6408 goto cleanup1;
6410 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6411 if (!buff_size) {
6412 status = -ENOMEM;
6413 goto cleanup1;
6415 left = ioc->buf_size;
6416 data_ptr = ioc->buf;
6417 while (left) {
6418 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6419 buff_size[sg_used] = sz;
6420 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6421 if (buff[sg_used] == NULL) {
6422 status = -ENOMEM;
6423 goto cleanup1;
6425 if (ioc->Request.Type.Direction & XFER_WRITE) {
6426 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6427 status = -EFAULT;
6428 goto cleanup1;
6430 } else
6431 memset(buff[sg_used], 0, sz);
6432 left -= sz;
6433 data_ptr += sz;
6434 sg_used++;
6436 c = cmd_alloc(h);
6438 c->cmd_type = CMD_IOCTL_PEND;
6439 c->scsi_cmd = SCSI_CMD_BUSY;
6440 c->Header.ReplyQueue = 0;
6441 c->Header.SGList = (u8) sg_used;
6442 c->Header.SGTotal = cpu_to_le16(sg_used);
6443 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6444 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6445 if (ioc->buf_size > 0) {
6446 int i;
6447 for (i = 0; i < sg_used; i++) {
6448 temp64 = pci_map_single(h->pdev, buff[i],
6449 buff_size[i], PCI_DMA_BIDIRECTIONAL);
6450 if (dma_mapping_error(&h->pdev->dev,
6451 (dma_addr_t) temp64)) {
6452 c->SG[i].Addr = cpu_to_le64(0);
6453 c->SG[i].Len = cpu_to_le32(0);
6454 hpsa_pci_unmap(h->pdev, c, i,
6455 PCI_DMA_BIDIRECTIONAL);
6456 status = -ENOMEM;
6457 goto cleanup0;
6459 c->SG[i].Addr = cpu_to_le64(temp64);
6460 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6461 c->SG[i].Ext = cpu_to_le32(0);
6463 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6465 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6466 NO_TIMEOUT);
6467 if (sg_used)
6468 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6469 check_ioctl_unit_attention(h, c);
6470 if (status) {
6471 status = -EIO;
6472 goto cleanup0;
6475 /* Copy the error information out */
6476 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6477 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6478 status = -EFAULT;
6479 goto cleanup0;
6481 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6482 int i;
6484 /* Copy the data out of the buffer we created */
6485 BYTE __user *ptr = ioc->buf;
6486 for (i = 0; i < sg_used; i++) {
6487 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6488 status = -EFAULT;
6489 goto cleanup0;
6491 ptr += buff_size[i];
6494 status = 0;
6495 cleanup0:
6496 cmd_free(h, c);
6497 cleanup1:
6498 if (buff) {
6499 int i;
6501 for (i = 0; i < sg_used; i++)
6502 kfree(buff[i]);
6503 kfree(buff);
6505 kfree(buff_size);
6506 kfree(ioc);
6507 return status;
6510 static void check_ioctl_unit_attention(struct ctlr_info *h,
6511 struct CommandList *c)
6513 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6514 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6515 (void) check_for_unit_attention(h, c);
6519 * ioctl
6521 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6523 struct ctlr_info *h;
6524 void __user *argp = (void __user *)arg;
6525 int rc;
6527 h = sdev_to_hba(dev);
6529 switch (cmd) {
6530 case CCISS_DEREGDISK:
6531 case CCISS_REGNEWDISK:
6532 case CCISS_REGNEWD:
6533 hpsa_scan_start(h->scsi_host);
6534 return 0;
6535 case CCISS_GETPCIINFO:
6536 return hpsa_getpciinfo_ioctl(h, argp);
6537 case CCISS_GETDRIVVER:
6538 return hpsa_getdrivver_ioctl(h, argp);
6539 case CCISS_PASSTHRU:
6540 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6541 return -EAGAIN;
6542 rc = hpsa_passthru_ioctl(h, argp);
6543 atomic_inc(&h->passthru_cmds_avail);
6544 return rc;
6545 case CCISS_BIG_PASSTHRU:
6546 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6547 return -EAGAIN;
6548 rc = hpsa_big_passthru_ioctl(h, argp);
6549 atomic_inc(&h->passthru_cmds_avail);
6550 return rc;
6551 default:
6552 return -ENOTTY;
6556 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6557 u8 reset_type)
6559 struct CommandList *c;
6561 c = cmd_alloc(h);
6563 /* fill_cmd can't fail here, no data buffer to map */
6564 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6565 RAID_CTLR_LUNID, TYPE_MSG);
6566 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6567 c->waiting = NULL;
6568 enqueue_cmd_and_start_io(h, c);
6569 /* Don't wait for completion, the reset won't complete. Don't free
6570 * the command either. This is the last command we will send before
6571 * re-initializing everything, so it doesn't matter and won't leak.
6573 return;
6576 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6577 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6578 int cmd_type)
6580 int pci_dir = XFER_NONE;
6582 c->cmd_type = CMD_IOCTL_PEND;
6583 c->scsi_cmd = SCSI_CMD_BUSY;
6584 c->Header.ReplyQueue = 0;
6585 if (buff != NULL && size > 0) {
6586 c->Header.SGList = 1;
6587 c->Header.SGTotal = cpu_to_le16(1);
6588 } else {
6589 c->Header.SGList = 0;
6590 c->Header.SGTotal = cpu_to_le16(0);
6592 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6594 if (cmd_type == TYPE_CMD) {
6595 switch (cmd) {
6596 case HPSA_INQUIRY:
6597 /* are we trying to read a vital product page */
6598 if (page_code & VPD_PAGE) {
6599 c->Request.CDB[1] = 0x01;
6600 c->Request.CDB[2] = (page_code & 0xff);
6602 c->Request.CDBLen = 6;
6603 c->Request.type_attr_dir =
6604 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6605 c->Request.Timeout = 0;
6606 c->Request.CDB[0] = HPSA_INQUIRY;
6607 c->Request.CDB[4] = size & 0xFF;
6608 break;
6609 case RECEIVE_DIAGNOSTIC:
6610 c->Request.CDBLen = 6;
6611 c->Request.type_attr_dir =
6612 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6613 c->Request.Timeout = 0;
6614 c->Request.CDB[0] = cmd;
6615 c->Request.CDB[1] = 1;
6616 c->Request.CDB[2] = 1;
6617 c->Request.CDB[3] = (size >> 8) & 0xFF;
6618 c->Request.CDB[4] = size & 0xFF;
6619 break;
6620 case HPSA_REPORT_LOG:
6621 case HPSA_REPORT_PHYS:
6622 /* Talking to controller so It's a physical command
6623 mode = 00 target = 0. Nothing to write.
6625 c->Request.CDBLen = 12;
6626 c->Request.type_attr_dir =
6627 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6628 c->Request.Timeout = 0;
6629 c->Request.CDB[0] = cmd;
6630 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6631 c->Request.CDB[7] = (size >> 16) & 0xFF;
6632 c->Request.CDB[8] = (size >> 8) & 0xFF;
6633 c->Request.CDB[9] = size & 0xFF;
6634 break;
6635 case BMIC_SENSE_DIAG_OPTIONS:
6636 c->Request.CDBLen = 16;
6637 c->Request.type_attr_dir =
6638 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6639 c->Request.Timeout = 0;
6640 /* Spec says this should be BMIC_WRITE */
6641 c->Request.CDB[0] = BMIC_READ;
6642 c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6643 break;
6644 case BMIC_SET_DIAG_OPTIONS:
6645 c->Request.CDBLen = 16;
6646 c->Request.type_attr_dir =
6647 TYPE_ATTR_DIR(cmd_type,
6648 ATTR_SIMPLE, XFER_WRITE);
6649 c->Request.Timeout = 0;
6650 c->Request.CDB[0] = BMIC_WRITE;
6651 c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6652 break;
6653 case HPSA_CACHE_FLUSH:
6654 c->Request.CDBLen = 12;
6655 c->Request.type_attr_dir =
6656 TYPE_ATTR_DIR(cmd_type,
6657 ATTR_SIMPLE, XFER_WRITE);
6658 c->Request.Timeout = 0;
6659 c->Request.CDB[0] = BMIC_WRITE;
6660 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6661 c->Request.CDB[7] = (size >> 8) & 0xFF;
6662 c->Request.CDB[8] = size & 0xFF;
6663 break;
6664 case TEST_UNIT_READY:
6665 c->Request.CDBLen = 6;
6666 c->Request.type_attr_dir =
6667 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6668 c->Request.Timeout = 0;
6669 break;
6670 case HPSA_GET_RAID_MAP:
6671 c->Request.CDBLen = 12;
6672 c->Request.type_attr_dir =
6673 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6674 c->Request.Timeout = 0;
6675 c->Request.CDB[0] = HPSA_CISS_READ;
6676 c->Request.CDB[1] = cmd;
6677 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6678 c->Request.CDB[7] = (size >> 16) & 0xFF;
6679 c->Request.CDB[8] = (size >> 8) & 0xFF;
6680 c->Request.CDB[9] = size & 0xFF;
6681 break;
6682 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6683 c->Request.CDBLen = 10;
6684 c->Request.type_attr_dir =
6685 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6686 c->Request.Timeout = 0;
6687 c->Request.CDB[0] = BMIC_READ;
6688 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6689 c->Request.CDB[7] = (size >> 16) & 0xFF;
6690 c->Request.CDB[8] = (size >> 8) & 0xFF;
6691 break;
6692 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6693 c->Request.CDBLen = 10;
6694 c->Request.type_attr_dir =
6695 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6696 c->Request.Timeout = 0;
6697 c->Request.CDB[0] = BMIC_READ;
6698 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6699 c->Request.CDB[7] = (size >> 16) & 0xFF;
6700 c->Request.CDB[8] = (size >> 8) & 0XFF;
6701 break;
6702 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6703 c->Request.CDBLen = 10;
6704 c->Request.type_attr_dir =
6705 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6706 c->Request.Timeout = 0;
6707 c->Request.CDB[0] = BMIC_READ;
6708 c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6709 c->Request.CDB[7] = (size >> 16) & 0xFF;
6710 c->Request.CDB[8] = (size >> 8) & 0XFF;
6711 break;
6712 case BMIC_SENSE_STORAGE_BOX_PARAMS:
6713 c->Request.CDBLen = 10;
6714 c->Request.type_attr_dir =
6715 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6716 c->Request.Timeout = 0;
6717 c->Request.CDB[0] = BMIC_READ;
6718 c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
6719 c->Request.CDB[7] = (size >> 16) & 0xFF;
6720 c->Request.CDB[8] = (size >> 8) & 0XFF;
6721 break;
6722 case BMIC_IDENTIFY_CONTROLLER:
6723 c->Request.CDBLen = 10;
6724 c->Request.type_attr_dir =
6725 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6726 c->Request.Timeout = 0;
6727 c->Request.CDB[0] = BMIC_READ;
6728 c->Request.CDB[1] = 0;
6729 c->Request.CDB[2] = 0;
6730 c->Request.CDB[3] = 0;
6731 c->Request.CDB[4] = 0;
6732 c->Request.CDB[5] = 0;
6733 c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6734 c->Request.CDB[7] = (size >> 16) & 0xFF;
6735 c->Request.CDB[8] = (size >> 8) & 0XFF;
6736 c->Request.CDB[9] = 0;
6737 break;
6738 default:
6739 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6740 BUG();
6742 } else if (cmd_type == TYPE_MSG) {
6743 switch (cmd) {
6745 case HPSA_PHYS_TARGET_RESET:
6746 c->Request.CDBLen = 16;
6747 c->Request.type_attr_dir =
6748 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6749 c->Request.Timeout = 0; /* Don't time out */
6750 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6751 c->Request.CDB[0] = HPSA_RESET;
6752 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6753 /* Physical target reset needs no control bytes 4-7*/
6754 c->Request.CDB[4] = 0x00;
6755 c->Request.CDB[5] = 0x00;
6756 c->Request.CDB[6] = 0x00;
6757 c->Request.CDB[7] = 0x00;
6758 break;
6759 case HPSA_DEVICE_RESET_MSG:
6760 c->Request.CDBLen = 16;
6761 c->Request.type_attr_dir =
6762 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6763 c->Request.Timeout = 0; /* Don't time out */
6764 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6765 c->Request.CDB[0] = cmd;
6766 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6767 /* If bytes 4-7 are zero, it means reset the */
6768 /* LunID device */
6769 c->Request.CDB[4] = 0x00;
6770 c->Request.CDB[5] = 0x00;
6771 c->Request.CDB[6] = 0x00;
6772 c->Request.CDB[7] = 0x00;
6773 break;
6774 default:
6775 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6776 cmd);
6777 BUG();
6779 } else {
6780 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6781 BUG();
6784 switch (GET_DIR(c->Request.type_attr_dir)) {
6785 case XFER_READ:
6786 pci_dir = PCI_DMA_FROMDEVICE;
6787 break;
6788 case XFER_WRITE:
6789 pci_dir = PCI_DMA_TODEVICE;
6790 break;
6791 case XFER_NONE:
6792 pci_dir = PCI_DMA_NONE;
6793 break;
6794 default:
6795 pci_dir = PCI_DMA_BIDIRECTIONAL;
6797 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6798 return -1;
6799 return 0;
6803 * Map (physical) PCI mem into (virtual) kernel space
6805 static void __iomem *remap_pci_mem(ulong base, ulong size)
6807 ulong page_base = ((ulong) base) & PAGE_MASK;
6808 ulong page_offs = ((ulong) base) - page_base;
6809 void __iomem *page_remapped = ioremap_nocache(page_base,
6810 page_offs + size);
6812 return page_remapped ? (page_remapped + page_offs) : NULL;
6815 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6817 return h->access.command_completed(h, q);
6820 static inline bool interrupt_pending(struct ctlr_info *h)
6822 return h->access.intr_pending(h);
6825 static inline long interrupt_not_for_us(struct ctlr_info *h)
6827 return (h->access.intr_pending(h) == 0) ||
6828 (h->interrupts_enabled == 0);
6831 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6832 u32 raw_tag)
6834 if (unlikely(tag_index >= h->nr_cmds)) {
6835 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6836 return 1;
6838 return 0;
6841 static inline void finish_cmd(struct CommandList *c)
6843 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6844 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6845 || c->cmd_type == CMD_IOACCEL2))
6846 complete_scsi_command(c);
6847 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6848 complete(c->waiting);
6851 /* process completion of an indexed ("direct lookup") command */
6852 static inline void process_indexed_cmd(struct ctlr_info *h,
6853 u32 raw_tag)
6855 u32 tag_index;
6856 struct CommandList *c;
6858 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6859 if (!bad_tag(h, tag_index, raw_tag)) {
6860 c = h->cmd_pool + tag_index;
6861 finish_cmd(c);
6865 /* Some controllers, like p400, will give us one interrupt
6866 * after a soft reset, even if we turned interrupts off.
6867 * Only need to check for this in the hpsa_xxx_discard_completions
6868 * functions.
6870 static int ignore_bogus_interrupt(struct ctlr_info *h)
6872 if (likely(!reset_devices))
6873 return 0;
6875 if (likely(h->interrupts_enabled))
6876 return 0;
6878 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6879 "(known firmware bug.) Ignoring.\n");
6881 return 1;
6885 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6886 * Relies on (h-q[x] == x) being true for x such that
6887 * 0 <= x < MAX_REPLY_QUEUES.
6889 static struct ctlr_info *queue_to_hba(u8 *queue)
6891 return container_of((queue - *queue), struct ctlr_info, q[0]);
6894 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6896 struct ctlr_info *h = queue_to_hba(queue);
6897 u8 q = *(u8 *) queue;
6898 u32 raw_tag;
6900 if (ignore_bogus_interrupt(h))
6901 return IRQ_NONE;
6903 if (interrupt_not_for_us(h))
6904 return IRQ_NONE;
6905 h->last_intr_timestamp = get_jiffies_64();
6906 while (interrupt_pending(h)) {
6907 raw_tag = get_next_completion(h, q);
6908 while (raw_tag != FIFO_EMPTY)
6909 raw_tag = next_command(h, q);
6911 return IRQ_HANDLED;
6914 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6916 struct ctlr_info *h = queue_to_hba(queue);
6917 u32 raw_tag;
6918 u8 q = *(u8 *) queue;
6920 if (ignore_bogus_interrupt(h))
6921 return IRQ_NONE;
6923 h->last_intr_timestamp = get_jiffies_64();
6924 raw_tag = get_next_completion(h, q);
6925 while (raw_tag != FIFO_EMPTY)
6926 raw_tag = next_command(h, q);
6927 return IRQ_HANDLED;
6930 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6932 struct ctlr_info *h = queue_to_hba((u8 *) queue);
6933 u32 raw_tag;
6934 u8 q = *(u8 *) queue;
6936 if (interrupt_not_for_us(h))
6937 return IRQ_NONE;
6938 h->last_intr_timestamp = get_jiffies_64();
6939 while (interrupt_pending(h)) {
6940 raw_tag = get_next_completion(h, q);
6941 while (raw_tag != FIFO_EMPTY) {
6942 process_indexed_cmd(h, raw_tag);
6943 raw_tag = next_command(h, q);
6946 return IRQ_HANDLED;
6949 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6951 struct ctlr_info *h = queue_to_hba(queue);
6952 u32 raw_tag;
6953 u8 q = *(u8 *) queue;
6955 h->last_intr_timestamp = get_jiffies_64();
6956 raw_tag = get_next_completion(h, q);
6957 while (raw_tag != FIFO_EMPTY) {
6958 process_indexed_cmd(h, raw_tag);
6959 raw_tag = next_command(h, q);
6961 return IRQ_HANDLED;
6964 /* Send a message CDB to the firmware. Careful, this only works
6965 * in simple mode, not performant mode due to the tag lookup.
6966 * We only ever use this immediately after a controller reset.
6968 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6969 unsigned char type)
6971 struct Command {
6972 struct CommandListHeader CommandHeader;
6973 struct RequestBlock Request;
6974 struct ErrDescriptor ErrorDescriptor;
6976 struct Command *cmd;
6977 static const size_t cmd_sz = sizeof(*cmd) +
6978 sizeof(cmd->ErrorDescriptor);
6979 dma_addr_t paddr64;
6980 __le32 paddr32;
6981 u32 tag;
6982 void __iomem *vaddr;
6983 int i, err;
6985 vaddr = pci_ioremap_bar(pdev, 0);
6986 if (vaddr == NULL)
6987 return -ENOMEM;
6989 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6990 * CCISS commands, so they must be allocated from the lower 4GiB of
6991 * memory.
6993 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6994 if (err) {
6995 iounmap(vaddr);
6996 return err;
6999 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
7000 if (cmd == NULL) {
7001 iounmap(vaddr);
7002 return -ENOMEM;
7005 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7006 * although there's no guarantee, we assume that the address is at
7007 * least 4-byte aligned (most likely, it's page-aligned).
7009 paddr32 = cpu_to_le32(paddr64);
7011 cmd->CommandHeader.ReplyQueue = 0;
7012 cmd->CommandHeader.SGList = 0;
7013 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7014 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7015 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7017 cmd->Request.CDBLen = 16;
7018 cmd->Request.type_attr_dir =
7019 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7020 cmd->Request.Timeout = 0; /* Don't time out */
7021 cmd->Request.CDB[0] = opcode;
7022 cmd->Request.CDB[1] = type;
7023 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7024 cmd->ErrorDescriptor.Addr =
7025 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7026 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7028 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7030 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7031 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7032 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7033 break;
7034 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7037 iounmap(vaddr);
7039 /* we leak the DMA buffer here ... no choice since the controller could
7040 * still complete the command.
7042 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7043 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7044 opcode, type);
7045 return -ETIMEDOUT;
7048 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
7050 if (tag & HPSA_ERROR_BIT) {
7051 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7052 opcode, type);
7053 return -EIO;
7056 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7057 opcode, type);
7058 return 0;
7061 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7063 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7064 void __iomem *vaddr, u32 use_doorbell)
7067 if (use_doorbell) {
7068 /* For everything after the P600, the PCI power state method
7069 * of resetting the controller doesn't work, so we have this
7070 * other way using the doorbell register.
7072 dev_info(&pdev->dev, "using doorbell to reset controller\n");
7073 writel(use_doorbell, vaddr + SA5_DOORBELL);
7075 /* PMC hardware guys tell us we need a 10 second delay after
7076 * doorbell reset and before any attempt to talk to the board
7077 * at all to ensure that this actually works and doesn't fall
7078 * over in some weird corner cases.
7080 msleep(10000);
7081 } else { /* Try to do it the PCI power state way */
7083 /* Quoting from the Open CISS Specification: "The Power
7084 * Management Control/Status Register (CSR) controls the power
7085 * state of the device. The normal operating state is D0,
7086 * CSR=00h. The software off state is D3, CSR=03h. To reset
7087 * the controller, place the interface device in D3 then to D0,
7088 * this causes a secondary PCI reset which will reset the
7089 * controller." */
7091 int rc = 0;
7093 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7095 /* enter the D3hot power management state */
7096 rc = pci_set_power_state(pdev, PCI_D3hot);
7097 if (rc)
7098 return rc;
7100 msleep(500);
7102 /* enter the D0 power management state */
7103 rc = pci_set_power_state(pdev, PCI_D0);
7104 if (rc)
7105 return rc;
7108 * The P600 requires a small delay when changing states.
7109 * Otherwise we may think the board did not reset and we bail.
7110 * This for kdump only and is particular to the P600.
7112 msleep(500);
7114 return 0;
7117 static void init_driver_version(char *driver_version, int len)
7119 memset(driver_version, 0, len);
7120 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7123 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7125 char *driver_version;
7126 int i, size = sizeof(cfgtable->driver_version);
7128 driver_version = kmalloc(size, GFP_KERNEL);
7129 if (!driver_version)
7130 return -ENOMEM;
7132 init_driver_version(driver_version, size);
7133 for (i = 0; i < size; i++)
7134 writeb(driver_version[i], &cfgtable->driver_version[i]);
7135 kfree(driver_version);
7136 return 0;
7139 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7140 unsigned char *driver_ver)
7142 int i;
7144 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7145 driver_ver[i] = readb(&cfgtable->driver_version[i]);
7148 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7151 char *driver_ver, *old_driver_ver;
7152 int rc, size = sizeof(cfgtable->driver_version);
7154 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7155 if (!old_driver_ver)
7156 return -ENOMEM;
7157 driver_ver = old_driver_ver + size;
7159 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7160 * should have been changed, otherwise we know the reset failed.
7162 init_driver_version(old_driver_ver, size);
7163 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7164 rc = !memcmp(driver_ver, old_driver_ver, size);
7165 kfree(old_driver_ver);
7166 return rc;
7168 /* This does a hard reset of the controller using PCI power management
7169 * states or the using the doorbell register.
7171 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7173 u64 cfg_offset;
7174 u32 cfg_base_addr;
7175 u64 cfg_base_addr_index;
7176 void __iomem *vaddr;
7177 unsigned long paddr;
7178 u32 misc_fw_support;
7179 int rc;
7180 struct CfgTable __iomem *cfgtable;
7181 u32 use_doorbell;
7182 u16 command_register;
7184 /* For controllers as old as the P600, this is very nearly
7185 * the same thing as
7187 * pci_save_state(pci_dev);
7188 * pci_set_power_state(pci_dev, PCI_D3hot);
7189 * pci_set_power_state(pci_dev, PCI_D0);
7190 * pci_restore_state(pci_dev);
7192 * For controllers newer than the P600, the pci power state
7193 * method of resetting doesn't work so we have another way
7194 * using the doorbell register.
7197 if (!ctlr_is_resettable(board_id)) {
7198 dev_warn(&pdev->dev, "Controller not resettable\n");
7199 return -ENODEV;
7202 /* if controller is soft- but not hard resettable... */
7203 if (!ctlr_is_hard_resettable(board_id))
7204 return -ENOTSUPP; /* try soft reset later. */
7206 /* Save the PCI command register */
7207 pci_read_config_word(pdev, 4, &command_register);
7208 pci_save_state(pdev);
7210 /* find the first memory BAR, so we can find the cfg table */
7211 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7212 if (rc)
7213 return rc;
7214 vaddr = remap_pci_mem(paddr, 0x250);
7215 if (!vaddr)
7216 return -ENOMEM;
7218 /* find cfgtable in order to check if reset via doorbell is supported */
7219 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7220 &cfg_base_addr_index, &cfg_offset);
7221 if (rc)
7222 goto unmap_vaddr;
7223 cfgtable = remap_pci_mem(pci_resource_start(pdev,
7224 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7225 if (!cfgtable) {
7226 rc = -ENOMEM;
7227 goto unmap_vaddr;
7229 rc = write_driver_ver_to_cfgtable(cfgtable);
7230 if (rc)
7231 goto unmap_cfgtable;
7233 /* If reset via doorbell register is supported, use that.
7234 * There are two such methods. Favor the newest method.
7236 misc_fw_support = readl(&cfgtable->misc_fw_support);
7237 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7238 if (use_doorbell) {
7239 use_doorbell = DOORBELL_CTLR_RESET2;
7240 } else {
7241 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7242 if (use_doorbell) {
7243 dev_warn(&pdev->dev,
7244 "Soft reset not supported. Firmware update is required.\n");
7245 rc = -ENOTSUPP; /* try soft reset */
7246 goto unmap_cfgtable;
7250 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7251 if (rc)
7252 goto unmap_cfgtable;
7254 pci_restore_state(pdev);
7255 pci_write_config_word(pdev, 4, command_register);
7257 /* Some devices (notably the HP Smart Array 5i Controller)
7258 need a little pause here */
7259 msleep(HPSA_POST_RESET_PAUSE_MSECS);
7261 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7262 if (rc) {
7263 dev_warn(&pdev->dev,
7264 "Failed waiting for board to become ready after hard reset\n");
7265 goto unmap_cfgtable;
7268 rc = controller_reset_failed(vaddr);
7269 if (rc < 0)
7270 goto unmap_cfgtable;
7271 if (rc) {
7272 dev_warn(&pdev->dev, "Unable to successfully reset "
7273 "controller. Will try soft reset.\n");
7274 rc = -ENOTSUPP;
7275 } else {
7276 dev_info(&pdev->dev, "board ready after hard reset.\n");
7279 unmap_cfgtable:
7280 iounmap(cfgtable);
7282 unmap_vaddr:
7283 iounmap(vaddr);
7284 return rc;
7288 * We cannot read the structure directly, for portability we must use
7289 * the io functions.
7290 * This is for debug only.
7292 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7294 #ifdef HPSA_DEBUG
7295 int i;
7296 char temp_name[17];
7298 dev_info(dev, "Controller Configuration information\n");
7299 dev_info(dev, "------------------------------------\n");
7300 for (i = 0; i < 4; i++)
7301 temp_name[i] = readb(&(tb->Signature[i]));
7302 temp_name[4] = '\0';
7303 dev_info(dev, " Signature = %s\n", temp_name);
7304 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7305 dev_info(dev, " Transport methods supported = 0x%x\n",
7306 readl(&(tb->TransportSupport)));
7307 dev_info(dev, " Transport methods active = 0x%x\n",
7308 readl(&(tb->TransportActive)));
7309 dev_info(dev, " Requested transport Method = 0x%x\n",
7310 readl(&(tb->HostWrite.TransportRequest)));
7311 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7312 readl(&(tb->HostWrite.CoalIntDelay)));
7313 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7314 readl(&(tb->HostWrite.CoalIntCount)));
7315 dev_info(dev, " Max outstanding commands = %d\n",
7316 readl(&(tb->CmdsOutMax)));
7317 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7318 for (i = 0; i < 16; i++)
7319 temp_name[i] = readb(&(tb->ServerName[i]));
7320 temp_name[16] = '\0';
7321 dev_info(dev, " Server Name = %s\n", temp_name);
7322 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7323 readl(&(tb->HeartBeat)));
7324 #endif /* HPSA_DEBUG */
7327 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7329 int i, offset, mem_type, bar_type;
7331 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7332 return 0;
7333 offset = 0;
7334 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7335 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7336 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7337 offset += 4;
7338 else {
7339 mem_type = pci_resource_flags(pdev, i) &
7340 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7341 switch (mem_type) {
7342 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7343 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7344 offset += 4; /* 32 bit */
7345 break;
7346 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7347 offset += 8;
7348 break;
7349 default: /* reserved in PCI 2.2 */
7350 dev_warn(&pdev->dev,
7351 "base address is invalid\n");
7352 return -1;
7353 break;
7356 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7357 return i + 1;
7359 return -1;
7362 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7364 pci_free_irq_vectors(h->pdev);
7365 h->msix_vectors = 0;
7368 static void hpsa_setup_reply_map(struct ctlr_info *h)
7370 const struct cpumask *mask;
7371 unsigned int queue, cpu;
7373 for (queue = 0; queue < h->msix_vectors; queue++) {
7374 mask = pci_irq_get_affinity(h->pdev, queue);
7375 if (!mask)
7376 goto fallback;
7378 for_each_cpu(cpu, mask)
7379 h->reply_map[cpu] = queue;
7381 return;
7383 fallback:
7384 for_each_possible_cpu(cpu)
7385 h->reply_map[cpu] = 0;
7388 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7389 * controllers that are capable. If not, we use legacy INTx mode.
7391 static int hpsa_interrupt_mode(struct ctlr_info *h)
7393 unsigned int flags = PCI_IRQ_LEGACY;
7394 int ret;
7396 /* Some boards advertise MSI but don't really support it */
7397 switch (h->board_id) {
7398 case 0x40700E11:
7399 case 0x40800E11:
7400 case 0x40820E11:
7401 case 0x40830E11:
7402 break;
7403 default:
7404 ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7405 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7406 if (ret > 0) {
7407 h->msix_vectors = ret;
7408 return 0;
7411 flags |= PCI_IRQ_MSI;
7412 break;
7415 ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7416 if (ret < 0)
7417 return ret;
7418 return 0;
7421 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
7422 bool *legacy_board)
7424 int i;
7425 u32 subsystem_vendor_id, subsystem_device_id;
7427 subsystem_vendor_id = pdev->subsystem_vendor;
7428 subsystem_device_id = pdev->subsystem_device;
7429 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7430 subsystem_vendor_id;
7432 if (legacy_board)
7433 *legacy_board = false;
7434 for (i = 0; i < ARRAY_SIZE(products); i++)
7435 if (*board_id == products[i].board_id) {
7436 if (products[i].access != &SA5A_access &&
7437 products[i].access != &SA5B_access)
7438 return i;
7439 dev_warn(&pdev->dev,
7440 "legacy board ID: 0x%08x\n",
7441 *board_id);
7442 if (legacy_board)
7443 *legacy_board = true;
7444 return i;
7447 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
7448 if (legacy_board)
7449 *legacy_board = true;
7450 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7453 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7454 unsigned long *memory_bar)
7456 int i;
7458 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7459 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7460 /* addressing mode bits already removed */
7461 *memory_bar = pci_resource_start(pdev, i);
7462 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7463 *memory_bar);
7464 return 0;
7466 dev_warn(&pdev->dev, "no memory BAR found\n");
7467 return -ENODEV;
7470 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7471 int wait_for_ready)
7473 int i, iterations;
7474 u32 scratchpad;
7475 if (wait_for_ready)
7476 iterations = HPSA_BOARD_READY_ITERATIONS;
7477 else
7478 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7480 for (i = 0; i < iterations; i++) {
7481 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7482 if (wait_for_ready) {
7483 if (scratchpad == HPSA_FIRMWARE_READY)
7484 return 0;
7485 } else {
7486 if (scratchpad != HPSA_FIRMWARE_READY)
7487 return 0;
7489 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7491 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7492 return -ENODEV;
7495 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7496 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7497 u64 *cfg_offset)
7499 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7500 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7501 *cfg_base_addr &= (u32) 0x0000ffff;
7502 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7503 if (*cfg_base_addr_index == -1) {
7504 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7505 return -ENODEV;
7507 return 0;
7510 static void hpsa_free_cfgtables(struct ctlr_info *h)
7512 if (h->transtable) {
7513 iounmap(h->transtable);
7514 h->transtable = NULL;
7516 if (h->cfgtable) {
7517 iounmap(h->cfgtable);
7518 h->cfgtable = NULL;
7522 /* Find and map CISS config table and transfer table
7523 + * several items must be unmapped (freed) later
7524 + * */
7525 static int hpsa_find_cfgtables(struct ctlr_info *h)
7527 u64 cfg_offset;
7528 u32 cfg_base_addr;
7529 u64 cfg_base_addr_index;
7530 u32 trans_offset;
7531 int rc;
7533 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7534 &cfg_base_addr_index, &cfg_offset);
7535 if (rc)
7536 return rc;
7537 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7538 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7539 if (!h->cfgtable) {
7540 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7541 return -ENOMEM;
7543 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7544 if (rc)
7545 return rc;
7546 /* Find performant mode table. */
7547 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7548 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7549 cfg_base_addr_index)+cfg_offset+trans_offset,
7550 sizeof(*h->transtable));
7551 if (!h->transtable) {
7552 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7553 hpsa_free_cfgtables(h);
7554 return -ENOMEM;
7556 return 0;
7559 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7561 #define MIN_MAX_COMMANDS 16
7562 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7564 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7566 /* Limit commands in memory limited kdump scenario. */
7567 if (reset_devices && h->max_commands > 32)
7568 h->max_commands = 32;
7570 if (h->max_commands < MIN_MAX_COMMANDS) {
7571 dev_warn(&h->pdev->dev,
7572 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7573 h->max_commands,
7574 MIN_MAX_COMMANDS);
7575 h->max_commands = MIN_MAX_COMMANDS;
7579 /* If the controller reports that the total max sg entries is greater than 512,
7580 * then we know that chained SG blocks work. (Original smart arrays did not
7581 * support chained SG blocks and would return zero for max sg entries.)
7583 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7585 return h->maxsgentries > 512;
7588 /* Interrogate the hardware for some limits:
7589 * max commands, max SG elements without chaining, and with chaining,
7590 * SG chain block size, etc.
7592 static void hpsa_find_board_params(struct ctlr_info *h)
7594 hpsa_get_max_perf_mode_cmds(h);
7595 h->nr_cmds = h->max_commands;
7596 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7597 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7598 if (hpsa_supports_chained_sg_blocks(h)) {
7599 /* Limit in-command s/g elements to 32 save dma'able memory. */
7600 h->max_cmd_sg_entries = 32;
7601 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7602 h->maxsgentries--; /* save one for chain pointer */
7603 } else {
7605 * Original smart arrays supported at most 31 s/g entries
7606 * embedded inline in the command (trying to use more
7607 * would lock up the controller)
7609 h->max_cmd_sg_entries = 31;
7610 h->maxsgentries = 31; /* default to traditional values */
7611 h->chainsize = 0;
7614 /* Find out what task management functions are supported and cache */
7615 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7616 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7617 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7618 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7619 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7620 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7621 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7624 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7626 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7627 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7628 return false;
7630 return true;
7633 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7635 u32 driver_support;
7637 driver_support = readl(&(h->cfgtable->driver_support));
7638 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7639 #ifdef CONFIG_X86
7640 driver_support |= ENABLE_SCSI_PREFETCH;
7641 #endif
7642 driver_support |= ENABLE_UNIT_ATTN;
7643 writel(driver_support, &(h->cfgtable->driver_support));
7646 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7647 * in a prefetch beyond physical memory.
7649 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7651 u32 dma_prefetch;
7653 if (h->board_id != 0x3225103C)
7654 return;
7655 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7656 dma_prefetch |= 0x8000;
7657 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7660 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7662 int i;
7663 u32 doorbell_value;
7664 unsigned long flags;
7665 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7666 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7667 spin_lock_irqsave(&h->lock, flags);
7668 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7669 spin_unlock_irqrestore(&h->lock, flags);
7670 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7671 goto done;
7672 /* delay and try again */
7673 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7675 return -ENODEV;
7676 done:
7677 return 0;
7680 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7682 int i;
7683 u32 doorbell_value;
7684 unsigned long flags;
7686 /* under certain very rare conditions, this can take awhile.
7687 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7688 * as we enter this code.)
7690 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7691 if (h->remove_in_progress)
7692 goto done;
7693 spin_lock_irqsave(&h->lock, flags);
7694 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7695 spin_unlock_irqrestore(&h->lock, flags);
7696 if (!(doorbell_value & CFGTBL_ChangeReq))
7697 goto done;
7698 /* delay and try again */
7699 msleep(MODE_CHANGE_WAIT_INTERVAL);
7701 return -ENODEV;
7702 done:
7703 return 0;
7706 /* return -ENODEV or other reason on error, 0 on success */
7707 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7709 u32 trans_support;
7711 trans_support = readl(&(h->cfgtable->TransportSupport));
7712 if (!(trans_support & SIMPLE_MODE))
7713 return -ENOTSUPP;
7715 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7717 /* Update the field, and then ring the doorbell */
7718 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7719 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7720 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7721 if (hpsa_wait_for_mode_change_ack(h))
7722 goto error;
7723 print_cfg_table(&h->pdev->dev, h->cfgtable);
7724 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7725 goto error;
7726 h->transMethod = CFGTBL_Trans_Simple;
7727 return 0;
7728 error:
7729 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7730 return -ENODEV;
7733 /* free items allocated or mapped by hpsa_pci_init */
7734 static void hpsa_free_pci_init(struct ctlr_info *h)
7736 hpsa_free_cfgtables(h); /* pci_init 4 */
7737 iounmap(h->vaddr); /* pci_init 3 */
7738 h->vaddr = NULL;
7739 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7741 * call pci_disable_device before pci_release_regions per
7742 * Documentation/PCI/pci.txt
7744 pci_disable_device(h->pdev); /* pci_init 1 */
7745 pci_release_regions(h->pdev); /* pci_init 2 */
7748 /* several items must be freed later */
7749 static int hpsa_pci_init(struct ctlr_info *h)
7751 int prod_index, err;
7752 bool legacy_board;
7754 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
7755 if (prod_index < 0)
7756 return prod_index;
7757 h->product_name = products[prod_index].product_name;
7758 h->access = *(products[prod_index].access);
7759 h->legacy_board = legacy_board;
7760 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7761 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7763 err = pci_enable_device(h->pdev);
7764 if (err) {
7765 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7766 pci_disable_device(h->pdev);
7767 return err;
7770 err = pci_request_regions(h->pdev, HPSA);
7771 if (err) {
7772 dev_err(&h->pdev->dev,
7773 "failed to obtain PCI resources\n");
7774 pci_disable_device(h->pdev);
7775 return err;
7778 pci_set_master(h->pdev);
7780 err = hpsa_interrupt_mode(h);
7781 if (err)
7782 goto clean1;
7784 /* setup mapping between CPU and reply queue */
7785 hpsa_setup_reply_map(h);
7787 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7788 if (err)
7789 goto clean2; /* intmode+region, pci */
7790 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7791 if (!h->vaddr) {
7792 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7793 err = -ENOMEM;
7794 goto clean2; /* intmode+region, pci */
7796 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7797 if (err)
7798 goto clean3; /* vaddr, intmode+region, pci */
7799 err = hpsa_find_cfgtables(h);
7800 if (err)
7801 goto clean3; /* vaddr, intmode+region, pci */
7802 hpsa_find_board_params(h);
7804 if (!hpsa_CISS_signature_present(h)) {
7805 err = -ENODEV;
7806 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7808 hpsa_set_driver_support_bits(h);
7809 hpsa_p600_dma_prefetch_quirk(h);
7810 err = hpsa_enter_simple_mode(h);
7811 if (err)
7812 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7813 return 0;
7815 clean4: /* cfgtables, vaddr, intmode+region, pci */
7816 hpsa_free_cfgtables(h);
7817 clean3: /* vaddr, intmode+region, pci */
7818 iounmap(h->vaddr);
7819 h->vaddr = NULL;
7820 clean2: /* intmode+region, pci */
7821 hpsa_disable_interrupt_mode(h);
7822 clean1:
7824 * call pci_disable_device before pci_release_regions per
7825 * Documentation/PCI/pci.txt
7827 pci_disable_device(h->pdev);
7828 pci_release_regions(h->pdev);
7829 return err;
7832 static void hpsa_hba_inquiry(struct ctlr_info *h)
7834 int rc;
7836 #define HBA_INQUIRY_BYTE_COUNT 64
7837 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7838 if (!h->hba_inquiry_data)
7839 return;
7840 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7841 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7842 if (rc != 0) {
7843 kfree(h->hba_inquiry_data);
7844 h->hba_inquiry_data = NULL;
7848 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7850 int rc, i;
7851 void __iomem *vaddr;
7853 if (!reset_devices)
7854 return 0;
7856 /* kdump kernel is loading, we don't know in which state is
7857 * the pci interface. The dev->enable_cnt is equal zero
7858 * so we call enable+disable, wait a while and switch it on.
7860 rc = pci_enable_device(pdev);
7861 if (rc) {
7862 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7863 return -ENODEV;
7865 pci_disable_device(pdev);
7866 msleep(260); /* a randomly chosen number */
7867 rc = pci_enable_device(pdev);
7868 if (rc) {
7869 dev_warn(&pdev->dev, "failed to enable device.\n");
7870 return -ENODEV;
7873 pci_set_master(pdev);
7875 vaddr = pci_ioremap_bar(pdev, 0);
7876 if (vaddr == NULL) {
7877 rc = -ENOMEM;
7878 goto out_disable;
7880 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7881 iounmap(vaddr);
7883 /* Reset the controller with a PCI power-cycle or via doorbell */
7884 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7886 /* -ENOTSUPP here means we cannot reset the controller
7887 * but it's already (and still) up and running in
7888 * "performant mode". Or, it might be 640x, which can't reset
7889 * due to concerns about shared bbwc between 6402/6404 pair.
7891 if (rc)
7892 goto out_disable;
7894 /* Now try to get the controller to respond to a no-op */
7895 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7896 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7897 if (hpsa_noop(pdev) == 0)
7898 break;
7899 else
7900 dev_warn(&pdev->dev, "no-op failed%s\n",
7901 (i < 11 ? "; re-trying" : ""));
7904 out_disable:
7906 pci_disable_device(pdev);
7907 return rc;
7910 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7912 kfree(h->cmd_pool_bits);
7913 h->cmd_pool_bits = NULL;
7914 if (h->cmd_pool) {
7915 pci_free_consistent(h->pdev,
7916 h->nr_cmds * sizeof(struct CommandList),
7917 h->cmd_pool,
7918 h->cmd_pool_dhandle);
7919 h->cmd_pool = NULL;
7920 h->cmd_pool_dhandle = 0;
7922 if (h->errinfo_pool) {
7923 pci_free_consistent(h->pdev,
7924 h->nr_cmds * sizeof(struct ErrorInfo),
7925 h->errinfo_pool,
7926 h->errinfo_pool_dhandle);
7927 h->errinfo_pool = NULL;
7928 h->errinfo_pool_dhandle = 0;
7932 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7934 h->cmd_pool_bits = kzalloc(
7935 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7936 sizeof(unsigned long), GFP_KERNEL);
7937 h->cmd_pool = pci_alloc_consistent(h->pdev,
7938 h->nr_cmds * sizeof(*h->cmd_pool),
7939 &(h->cmd_pool_dhandle));
7940 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7941 h->nr_cmds * sizeof(*h->errinfo_pool),
7942 &(h->errinfo_pool_dhandle));
7943 if ((h->cmd_pool_bits == NULL)
7944 || (h->cmd_pool == NULL)
7945 || (h->errinfo_pool == NULL)) {
7946 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7947 goto clean_up;
7949 hpsa_preinitialize_commands(h);
7950 return 0;
7951 clean_up:
7952 hpsa_free_cmd_pool(h);
7953 return -ENOMEM;
7956 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7957 static void hpsa_free_irqs(struct ctlr_info *h)
7959 int i;
7961 if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
7962 /* Single reply queue, only one irq to free */
7963 free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
7964 h->q[h->intr_mode] = 0;
7965 return;
7968 for (i = 0; i < h->msix_vectors; i++) {
7969 free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
7970 h->q[i] = 0;
7972 for (; i < MAX_REPLY_QUEUES; i++)
7973 h->q[i] = 0;
7976 /* returns 0 on success; cleans up and returns -Enn on error */
7977 static int hpsa_request_irqs(struct ctlr_info *h,
7978 irqreturn_t (*msixhandler)(int, void *),
7979 irqreturn_t (*intxhandler)(int, void *))
7981 int rc, i;
7984 * initialize h->q[x] = x so that interrupt handlers know which
7985 * queue to process.
7987 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7988 h->q[i] = (u8) i;
7990 if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
7991 /* If performant mode and MSI-X, use multiple reply queues */
7992 for (i = 0; i < h->msix_vectors; i++) {
7993 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
7994 rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
7995 0, h->intrname[i],
7996 &h->q[i]);
7997 if (rc) {
7998 int j;
8000 dev_err(&h->pdev->dev,
8001 "failed to get irq %d for %s\n",
8002 pci_irq_vector(h->pdev, i), h->devname);
8003 for (j = 0; j < i; j++) {
8004 free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8005 h->q[j] = 0;
8007 for (; j < MAX_REPLY_QUEUES; j++)
8008 h->q[j] = 0;
8009 return rc;
8012 } else {
8013 /* Use single reply pool */
8014 if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8015 sprintf(h->intrname[0], "%s-msi%s", h->devname,
8016 h->msix_vectors ? "x" : "");
8017 rc = request_irq(pci_irq_vector(h->pdev, 0),
8018 msixhandler, 0,
8019 h->intrname[0],
8020 &h->q[h->intr_mode]);
8021 } else {
8022 sprintf(h->intrname[h->intr_mode],
8023 "%s-intx", h->devname);
8024 rc = request_irq(pci_irq_vector(h->pdev, 0),
8025 intxhandler, IRQF_SHARED,
8026 h->intrname[0],
8027 &h->q[h->intr_mode]);
8030 if (rc) {
8031 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8032 pci_irq_vector(h->pdev, 0), h->devname);
8033 hpsa_free_irqs(h);
8034 return -ENODEV;
8036 return 0;
8039 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8041 int rc;
8042 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
8044 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8045 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8046 if (rc) {
8047 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8048 return rc;
8051 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8052 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8053 if (rc) {
8054 dev_warn(&h->pdev->dev, "Board failed to become ready "
8055 "after soft reset.\n");
8056 return rc;
8059 return 0;
8062 static void hpsa_free_reply_queues(struct ctlr_info *h)
8064 int i;
8066 for (i = 0; i < h->nreply_queues; i++) {
8067 if (!h->reply_queue[i].head)
8068 continue;
8069 pci_free_consistent(h->pdev,
8070 h->reply_queue_size,
8071 h->reply_queue[i].head,
8072 h->reply_queue[i].busaddr);
8073 h->reply_queue[i].head = NULL;
8074 h->reply_queue[i].busaddr = 0;
8076 h->reply_queue_size = 0;
8079 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8081 hpsa_free_performant_mode(h); /* init_one 7 */
8082 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8083 hpsa_free_cmd_pool(h); /* init_one 5 */
8084 hpsa_free_irqs(h); /* init_one 4 */
8085 scsi_host_put(h->scsi_host); /* init_one 3 */
8086 h->scsi_host = NULL; /* init_one 3 */
8087 hpsa_free_pci_init(h); /* init_one 2_5 */
8088 free_percpu(h->lockup_detected); /* init_one 2 */
8089 h->lockup_detected = NULL; /* init_one 2 */
8090 if (h->resubmit_wq) {
8091 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
8092 h->resubmit_wq = NULL;
8094 if (h->rescan_ctlr_wq) {
8095 destroy_workqueue(h->rescan_ctlr_wq);
8096 h->rescan_ctlr_wq = NULL;
8098 kfree(h); /* init_one 1 */
8101 /* Called when controller lockup detected. */
8102 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8104 int i, refcount;
8105 struct CommandList *c;
8106 int failcount = 0;
8108 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8109 for (i = 0; i < h->nr_cmds; i++) {
8110 c = h->cmd_pool + i;
8111 refcount = atomic_inc_return(&c->refcount);
8112 if (refcount > 1) {
8113 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8114 finish_cmd(c);
8115 atomic_dec(&h->commands_outstanding);
8116 failcount++;
8118 cmd_free(h, c);
8120 dev_warn(&h->pdev->dev,
8121 "failed %d commands in fail_all\n", failcount);
8124 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8126 int cpu;
8128 for_each_online_cpu(cpu) {
8129 u32 *lockup_detected;
8130 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8131 *lockup_detected = value;
8133 wmb(); /* be sure the per-cpu variables are out to memory */
8136 static void controller_lockup_detected(struct ctlr_info *h)
8138 unsigned long flags;
8139 u32 lockup_detected;
8141 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8142 spin_lock_irqsave(&h->lock, flags);
8143 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8144 if (!lockup_detected) {
8145 /* no heartbeat, but controller gave us a zero. */
8146 dev_warn(&h->pdev->dev,
8147 "lockup detected after %d but scratchpad register is zero\n",
8148 h->heartbeat_sample_interval / HZ);
8149 lockup_detected = 0xffffffff;
8151 set_lockup_detected_for_all_cpus(h, lockup_detected);
8152 spin_unlock_irqrestore(&h->lock, flags);
8153 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8154 lockup_detected, h->heartbeat_sample_interval / HZ);
8155 if (lockup_detected == 0xffff0000) {
8156 dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
8157 writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
8159 pci_disable_device(h->pdev);
8160 fail_all_outstanding_cmds(h);
8163 static int detect_controller_lockup(struct ctlr_info *h)
8165 u64 now;
8166 u32 heartbeat;
8167 unsigned long flags;
8169 now = get_jiffies_64();
8170 /* If we've received an interrupt recently, we're ok. */
8171 if (time_after64(h->last_intr_timestamp +
8172 (h->heartbeat_sample_interval), now))
8173 return false;
8176 * If we've already checked the heartbeat recently, we're ok.
8177 * This could happen if someone sends us a signal. We
8178 * otherwise don't care about signals in this thread.
8180 if (time_after64(h->last_heartbeat_timestamp +
8181 (h->heartbeat_sample_interval), now))
8182 return false;
8184 /* If heartbeat has not changed since we last looked, we're not ok. */
8185 spin_lock_irqsave(&h->lock, flags);
8186 heartbeat = readl(&h->cfgtable->HeartBeat);
8187 spin_unlock_irqrestore(&h->lock, flags);
8188 if (h->last_heartbeat == heartbeat) {
8189 controller_lockup_detected(h);
8190 return true;
8193 /* We're ok. */
8194 h->last_heartbeat = heartbeat;
8195 h->last_heartbeat_timestamp = now;
8196 return false;
8200 * Set ioaccel status for all ioaccel volumes.
8202 * Called from monitor controller worker (hpsa_event_monitor_worker)
8204 * A Volume (or Volumes that comprise an Array set may be undergoing a
8205 * transformation, so we will be turning off ioaccel for all volumes that
8206 * make up the Array.
8208 static void hpsa_set_ioaccel_status(struct ctlr_info *h)
8210 int rc;
8211 int i;
8212 u8 ioaccel_status;
8213 unsigned char *buf;
8214 struct hpsa_scsi_dev_t *device;
8216 if (!h)
8217 return;
8219 buf = kmalloc(64, GFP_KERNEL);
8220 if (!buf)
8221 return;
8224 * Run through current device list used during I/O requests.
8226 for (i = 0; i < h->ndevices; i++) {
8227 device = h->dev[i];
8229 if (!device)
8230 continue;
8231 if (!hpsa_vpd_page_supported(h, device->scsi3addr,
8232 HPSA_VPD_LV_IOACCEL_STATUS))
8233 continue;
8235 memset(buf, 0, 64);
8237 rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
8238 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
8239 buf, 64);
8240 if (rc != 0)
8241 continue;
8243 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
8244 device->offload_config =
8245 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
8246 if (device->offload_config)
8247 device->offload_to_be_enabled =
8248 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
8251 * Immediately turn off ioaccel for any volume the
8252 * controller tells us to. Some of the reasons could be:
8253 * transformation - change to the LVs of an Array.
8254 * degraded volume - component failure
8256 * If ioaccel is to be re-enabled, re-enable later during the
8257 * scan operation so the driver can get a fresh raidmap
8258 * before turning ioaccel back on.
8261 if (!device->offload_to_be_enabled)
8262 device->offload_enabled = 0;
8265 kfree(buf);
8268 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8270 char *event_type;
8272 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8273 return;
8275 /* Ask the controller to clear the events we're handling. */
8276 if ((h->transMethod & (CFGTBL_Trans_io_accel1
8277 | CFGTBL_Trans_io_accel2)) &&
8278 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8279 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8281 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8282 event_type = "state change";
8283 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8284 event_type = "configuration change";
8285 /* Stop sending new RAID offload reqs via the IO accelerator */
8286 scsi_block_requests(h->scsi_host);
8287 hpsa_set_ioaccel_status(h);
8288 hpsa_drain_accel_commands(h);
8289 /* Set 'accelerator path config change' bit */
8290 dev_warn(&h->pdev->dev,
8291 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8292 h->events, event_type);
8293 writel(h->events, &(h->cfgtable->clear_event_notify));
8294 /* Set the "clear event notify field update" bit 6 */
8295 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8296 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8297 hpsa_wait_for_clear_event_notify_ack(h);
8298 scsi_unblock_requests(h->scsi_host);
8299 } else {
8300 /* Acknowledge controller notification events. */
8301 writel(h->events, &(h->cfgtable->clear_event_notify));
8302 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8303 hpsa_wait_for_clear_event_notify_ack(h);
8305 return;
8308 /* Check a register on the controller to see if there are configuration
8309 * changes (added/changed/removed logical drives, etc.) which mean that
8310 * we should rescan the controller for devices.
8311 * Also check flag for driver-initiated rescan.
8313 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8315 if (h->drv_req_rescan) {
8316 h->drv_req_rescan = 0;
8317 return 1;
8320 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8321 return 0;
8323 h->events = readl(&(h->cfgtable->event_notify));
8324 return h->events & RESCAN_REQUIRED_EVENT_BITS;
8328 * Check if any of the offline devices have become ready
8330 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8332 unsigned long flags;
8333 struct offline_device_entry *d;
8334 struct list_head *this, *tmp;
8336 spin_lock_irqsave(&h->offline_device_lock, flags);
8337 list_for_each_safe(this, tmp, &h->offline_device_list) {
8338 d = list_entry(this, struct offline_device_entry,
8339 offline_list);
8340 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8341 if (!hpsa_volume_offline(h, d->scsi3addr)) {
8342 spin_lock_irqsave(&h->offline_device_lock, flags);
8343 list_del(&d->offline_list);
8344 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8345 return 1;
8347 spin_lock_irqsave(&h->offline_device_lock, flags);
8349 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8350 return 0;
8353 static int hpsa_luns_changed(struct ctlr_info *h)
8355 int rc = 1; /* assume there are changes */
8356 struct ReportLUNdata *logdev = NULL;
8358 /* if we can't find out if lun data has changed,
8359 * assume that it has.
8362 if (!h->lastlogicals)
8363 return rc;
8365 logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8366 if (!logdev)
8367 return rc;
8369 if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8370 dev_warn(&h->pdev->dev,
8371 "report luns failed, can't track lun changes.\n");
8372 goto out;
8374 if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8375 dev_info(&h->pdev->dev,
8376 "Lun changes detected.\n");
8377 memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8378 goto out;
8379 } else
8380 rc = 0; /* no changes detected. */
8381 out:
8382 kfree(logdev);
8383 return rc;
8386 static void hpsa_perform_rescan(struct ctlr_info *h)
8388 struct Scsi_Host *sh = NULL;
8389 unsigned long flags;
8392 * Do the scan after the reset
8394 spin_lock_irqsave(&h->reset_lock, flags);
8395 if (h->reset_in_progress) {
8396 h->drv_req_rescan = 1;
8397 spin_unlock_irqrestore(&h->reset_lock, flags);
8398 return;
8400 spin_unlock_irqrestore(&h->reset_lock, flags);
8402 sh = scsi_host_get(h->scsi_host);
8403 if (sh != NULL) {
8404 hpsa_scan_start(sh);
8405 scsi_host_put(sh);
8406 h->drv_req_rescan = 0;
8411 * watch for controller events
8413 static void hpsa_event_monitor_worker(struct work_struct *work)
8415 struct ctlr_info *h = container_of(to_delayed_work(work),
8416 struct ctlr_info, event_monitor_work);
8417 unsigned long flags;
8419 spin_lock_irqsave(&h->lock, flags);
8420 if (h->remove_in_progress) {
8421 spin_unlock_irqrestore(&h->lock, flags);
8422 return;
8424 spin_unlock_irqrestore(&h->lock, flags);
8426 if (hpsa_ctlr_needs_rescan(h)) {
8427 hpsa_ack_ctlr_events(h);
8428 hpsa_perform_rescan(h);
8431 spin_lock_irqsave(&h->lock, flags);
8432 if (!h->remove_in_progress)
8433 schedule_delayed_work(&h->event_monitor_work,
8434 HPSA_EVENT_MONITOR_INTERVAL);
8435 spin_unlock_irqrestore(&h->lock, flags);
8438 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8440 unsigned long flags;
8441 struct ctlr_info *h = container_of(to_delayed_work(work),
8442 struct ctlr_info, rescan_ctlr_work);
8444 spin_lock_irqsave(&h->lock, flags);
8445 if (h->remove_in_progress) {
8446 spin_unlock_irqrestore(&h->lock, flags);
8447 return;
8449 spin_unlock_irqrestore(&h->lock, flags);
8451 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8452 hpsa_perform_rescan(h);
8453 } else if (h->discovery_polling) {
8454 if (hpsa_luns_changed(h)) {
8455 dev_info(&h->pdev->dev,
8456 "driver discovery polling rescan.\n");
8457 hpsa_perform_rescan(h);
8460 spin_lock_irqsave(&h->lock, flags);
8461 if (!h->remove_in_progress)
8462 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8463 h->heartbeat_sample_interval);
8464 spin_unlock_irqrestore(&h->lock, flags);
8467 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8469 unsigned long flags;
8470 struct ctlr_info *h = container_of(to_delayed_work(work),
8471 struct ctlr_info, monitor_ctlr_work);
8473 detect_controller_lockup(h);
8474 if (lockup_detected(h))
8475 return;
8477 spin_lock_irqsave(&h->lock, flags);
8478 if (!h->remove_in_progress)
8479 schedule_delayed_work(&h->monitor_ctlr_work,
8480 h->heartbeat_sample_interval);
8481 spin_unlock_irqrestore(&h->lock, flags);
8484 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8485 char *name)
8487 struct workqueue_struct *wq = NULL;
8489 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8490 if (!wq)
8491 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8493 return wq;
8496 static void hpda_free_ctlr_info(struct ctlr_info *h)
8498 kfree(h->reply_map);
8499 kfree(h);
8502 static struct ctlr_info *hpda_alloc_ctlr_info(void)
8504 struct ctlr_info *h;
8506 h = kzalloc(sizeof(*h), GFP_KERNEL);
8507 if (!h)
8508 return NULL;
8510 h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL);
8511 if (!h->reply_map) {
8512 kfree(h);
8513 return NULL;
8515 return h;
8518 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8520 int dac, rc;
8521 struct ctlr_info *h;
8522 int try_soft_reset = 0;
8523 unsigned long flags;
8524 u32 board_id;
8526 if (number_of_controllers == 0)
8527 printk(KERN_INFO DRIVER_NAME "\n");
8529 rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
8530 if (rc < 0) {
8531 dev_warn(&pdev->dev, "Board ID not found\n");
8532 return rc;
8535 rc = hpsa_init_reset_devices(pdev, board_id);
8536 if (rc) {
8537 if (rc != -ENOTSUPP)
8538 return rc;
8539 /* If the reset fails in a particular way (it has no way to do
8540 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8541 * a soft reset once we get the controller configured up to the
8542 * point that it can accept a command.
8544 try_soft_reset = 1;
8545 rc = 0;
8548 reinit_after_soft_reset:
8550 /* Command structures must be aligned on a 32-byte boundary because
8551 * the 5 lower bits of the address are used by the hardware. and by
8552 * the driver. See comments in hpsa.h for more info.
8554 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8555 h = hpda_alloc_ctlr_info();
8556 if (!h) {
8557 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8558 return -ENOMEM;
8561 h->pdev = pdev;
8563 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8564 INIT_LIST_HEAD(&h->offline_device_list);
8565 spin_lock_init(&h->lock);
8566 spin_lock_init(&h->offline_device_lock);
8567 spin_lock_init(&h->scan_lock);
8568 spin_lock_init(&h->reset_lock);
8569 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8571 /* Allocate and clear per-cpu variable lockup_detected */
8572 h->lockup_detected = alloc_percpu(u32);
8573 if (!h->lockup_detected) {
8574 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8575 rc = -ENOMEM;
8576 goto clean1; /* aer/h */
8578 set_lockup_detected_for_all_cpus(h, 0);
8580 rc = hpsa_pci_init(h);
8581 if (rc)
8582 goto clean2; /* lu, aer/h */
8584 /* relies on h-> settings made by hpsa_pci_init, including
8585 * interrupt_mode h->intr */
8586 rc = hpsa_scsi_host_alloc(h);
8587 if (rc)
8588 goto clean2_5; /* pci, lu, aer/h */
8590 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8591 h->ctlr = number_of_controllers;
8592 number_of_controllers++;
8594 /* configure PCI DMA stuff */
8595 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8596 if (rc == 0) {
8597 dac = 1;
8598 } else {
8599 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8600 if (rc == 0) {
8601 dac = 0;
8602 } else {
8603 dev_err(&pdev->dev, "no suitable DMA available\n");
8604 goto clean3; /* shost, pci, lu, aer/h */
8608 /* make sure the board interrupts are off */
8609 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8611 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8612 if (rc)
8613 goto clean3; /* shost, pci, lu, aer/h */
8614 rc = hpsa_alloc_cmd_pool(h);
8615 if (rc)
8616 goto clean4; /* irq, shost, pci, lu, aer/h */
8617 rc = hpsa_alloc_sg_chain_blocks(h);
8618 if (rc)
8619 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
8620 init_waitqueue_head(&h->scan_wait_queue);
8621 init_waitqueue_head(&h->event_sync_wait_queue);
8622 mutex_init(&h->reset_mutex);
8623 h->scan_finished = 1; /* no scan currently in progress */
8624 h->scan_waiting = 0;
8626 pci_set_drvdata(pdev, h);
8627 h->ndevices = 0;
8629 spin_lock_init(&h->devlock);
8630 rc = hpsa_put_ctlr_into_performant_mode(h);
8631 if (rc)
8632 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8634 /* create the resubmit workqueue */
8635 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8636 if (!h->rescan_ctlr_wq) {
8637 rc = -ENOMEM;
8638 goto clean7;
8641 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8642 if (!h->resubmit_wq) {
8643 rc = -ENOMEM;
8644 goto clean7; /* aer/h */
8648 * At this point, the controller is ready to take commands.
8649 * Now, if reset_devices and the hard reset didn't work, try
8650 * the soft reset and see if that works.
8652 if (try_soft_reset) {
8654 /* This is kind of gross. We may or may not get a completion
8655 * from the soft reset command, and if we do, then the value
8656 * from the fifo may or may not be valid. So, we wait 10 secs
8657 * after the reset throwing away any completions we get during
8658 * that time. Unregister the interrupt handler and register
8659 * fake ones to scoop up any residual completions.
8661 spin_lock_irqsave(&h->lock, flags);
8662 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8663 spin_unlock_irqrestore(&h->lock, flags);
8664 hpsa_free_irqs(h);
8665 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8666 hpsa_intx_discard_completions);
8667 if (rc) {
8668 dev_warn(&h->pdev->dev,
8669 "Failed to request_irq after soft reset.\n");
8671 * cannot goto clean7 or free_irqs will be called
8672 * again. Instead, do its work
8674 hpsa_free_performant_mode(h); /* clean7 */
8675 hpsa_free_sg_chain_blocks(h); /* clean6 */
8676 hpsa_free_cmd_pool(h); /* clean5 */
8678 * skip hpsa_free_irqs(h) clean4 since that
8679 * was just called before request_irqs failed
8681 goto clean3;
8684 rc = hpsa_kdump_soft_reset(h);
8685 if (rc)
8686 /* Neither hard nor soft reset worked, we're hosed. */
8687 goto clean7;
8689 dev_info(&h->pdev->dev, "Board READY.\n");
8690 dev_info(&h->pdev->dev,
8691 "Waiting for stale completions to drain.\n");
8692 h->access.set_intr_mask(h, HPSA_INTR_ON);
8693 msleep(10000);
8694 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8696 rc = controller_reset_failed(h->cfgtable);
8697 if (rc)
8698 dev_info(&h->pdev->dev,
8699 "Soft reset appears to have failed.\n");
8701 /* since the controller's reset, we have to go back and re-init
8702 * everything. Easiest to just forget what we've done and do it
8703 * all over again.
8705 hpsa_undo_allocations_after_kdump_soft_reset(h);
8706 try_soft_reset = 0;
8707 if (rc)
8708 /* don't goto clean, we already unallocated */
8709 return -ENODEV;
8711 goto reinit_after_soft_reset;
8714 /* Enable Accelerated IO path at driver layer */
8715 h->acciopath_status = 1;
8716 /* Disable discovery polling.*/
8717 h->discovery_polling = 0;
8720 /* Turn the interrupts on so we can service requests */
8721 h->access.set_intr_mask(h, HPSA_INTR_ON);
8723 hpsa_hba_inquiry(h);
8725 h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8726 if (!h->lastlogicals)
8727 dev_info(&h->pdev->dev,
8728 "Can't track change to report lun data\n");
8730 /* hook into SCSI subsystem */
8731 rc = hpsa_scsi_add_host(h);
8732 if (rc)
8733 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8735 /* Monitor the controller for firmware lockups */
8736 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8737 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8738 schedule_delayed_work(&h->monitor_ctlr_work,
8739 h->heartbeat_sample_interval);
8740 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8741 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8742 h->heartbeat_sample_interval);
8743 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
8744 schedule_delayed_work(&h->event_monitor_work,
8745 HPSA_EVENT_MONITOR_INTERVAL);
8746 return 0;
8748 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8749 hpsa_free_performant_mode(h);
8750 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8751 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8752 hpsa_free_sg_chain_blocks(h);
8753 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8754 hpsa_free_cmd_pool(h);
8755 clean4: /* irq, shost, pci, lu, aer/h */
8756 hpsa_free_irqs(h);
8757 clean3: /* shost, pci, lu, aer/h */
8758 scsi_host_put(h->scsi_host);
8759 h->scsi_host = NULL;
8760 clean2_5: /* pci, lu, aer/h */
8761 hpsa_free_pci_init(h);
8762 clean2: /* lu, aer/h */
8763 if (h->lockup_detected) {
8764 free_percpu(h->lockup_detected);
8765 h->lockup_detected = NULL;
8767 clean1: /* wq/aer/h */
8768 if (h->resubmit_wq) {
8769 destroy_workqueue(h->resubmit_wq);
8770 h->resubmit_wq = NULL;
8772 if (h->rescan_ctlr_wq) {
8773 destroy_workqueue(h->rescan_ctlr_wq);
8774 h->rescan_ctlr_wq = NULL;
8776 kfree(h);
8777 return rc;
8780 static void hpsa_flush_cache(struct ctlr_info *h)
8782 char *flush_buf;
8783 struct CommandList *c;
8784 int rc;
8786 if (unlikely(lockup_detected(h)))
8787 return;
8788 flush_buf = kzalloc(4, GFP_KERNEL);
8789 if (!flush_buf)
8790 return;
8792 c = cmd_alloc(h);
8794 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8795 RAID_CTLR_LUNID, TYPE_CMD)) {
8796 goto out;
8798 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8799 PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
8800 if (rc)
8801 goto out;
8802 if (c->err_info->CommandStatus != 0)
8803 out:
8804 dev_warn(&h->pdev->dev,
8805 "error flushing cache on controller\n");
8806 cmd_free(h, c);
8807 kfree(flush_buf);
8810 /* Make controller gather fresh report lun data each time we
8811 * send down a report luns request
8813 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8815 u32 *options;
8816 struct CommandList *c;
8817 int rc;
8819 /* Don't bother trying to set diag options if locked up */
8820 if (unlikely(h->lockup_detected))
8821 return;
8823 options = kzalloc(sizeof(*options), GFP_KERNEL);
8824 if (!options)
8825 return;
8827 c = cmd_alloc(h);
8829 /* first, get the current diag options settings */
8830 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8831 RAID_CTLR_LUNID, TYPE_CMD))
8832 goto errout;
8834 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8835 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8836 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8837 goto errout;
8839 /* Now, set the bit for disabling the RLD caching */
8840 *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8842 if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8843 RAID_CTLR_LUNID, TYPE_CMD))
8844 goto errout;
8846 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8847 PCI_DMA_TODEVICE, NO_TIMEOUT);
8848 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8849 goto errout;
8851 /* Now verify that it got set: */
8852 if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8853 RAID_CTLR_LUNID, TYPE_CMD))
8854 goto errout;
8856 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8857 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8858 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8859 goto errout;
8861 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8862 goto out;
8864 errout:
8865 dev_err(&h->pdev->dev,
8866 "Error: failed to disable report lun data caching.\n");
8867 out:
8868 cmd_free(h, c);
8869 kfree(options);
8872 static void hpsa_shutdown(struct pci_dev *pdev)
8874 struct ctlr_info *h;
8876 h = pci_get_drvdata(pdev);
8877 /* Turn board interrupts off and send the flush cache command
8878 * sendcmd will turn off interrupt, and send the flush...
8879 * To write all data in the battery backed cache to disks
8881 hpsa_flush_cache(h);
8882 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8883 hpsa_free_irqs(h); /* init_one 4 */
8884 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8887 static void hpsa_free_device_info(struct ctlr_info *h)
8889 int i;
8891 for (i = 0; i < h->ndevices; i++) {
8892 kfree(h->dev[i]);
8893 h->dev[i] = NULL;
8897 static void hpsa_remove_one(struct pci_dev *pdev)
8899 struct ctlr_info *h;
8900 unsigned long flags;
8902 if (pci_get_drvdata(pdev) == NULL) {
8903 dev_err(&pdev->dev, "unable to remove device\n");
8904 return;
8906 h = pci_get_drvdata(pdev);
8908 /* Get rid of any controller monitoring work items */
8909 spin_lock_irqsave(&h->lock, flags);
8910 h->remove_in_progress = 1;
8911 spin_unlock_irqrestore(&h->lock, flags);
8912 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8913 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8914 cancel_delayed_work_sync(&h->event_monitor_work);
8915 destroy_workqueue(h->rescan_ctlr_wq);
8916 destroy_workqueue(h->resubmit_wq);
8918 hpsa_delete_sas_host(h);
8921 * Call before disabling interrupts.
8922 * scsi_remove_host can trigger I/O operations especially
8923 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8924 * operations which cannot complete and will hang the system.
8926 if (h->scsi_host)
8927 scsi_remove_host(h->scsi_host); /* init_one 8 */
8928 /* includes hpsa_free_irqs - init_one 4 */
8929 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8930 hpsa_shutdown(pdev);
8932 hpsa_free_device_info(h); /* scan */
8934 kfree(h->hba_inquiry_data); /* init_one 10 */
8935 h->hba_inquiry_data = NULL; /* init_one 10 */
8936 hpsa_free_ioaccel2_sg_chain_blocks(h);
8937 hpsa_free_performant_mode(h); /* init_one 7 */
8938 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8939 hpsa_free_cmd_pool(h); /* init_one 5 */
8940 kfree(h->lastlogicals);
8942 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8944 scsi_host_put(h->scsi_host); /* init_one 3 */
8945 h->scsi_host = NULL; /* init_one 3 */
8947 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8948 hpsa_free_pci_init(h); /* init_one 2.5 */
8950 free_percpu(h->lockup_detected); /* init_one 2 */
8951 h->lockup_detected = NULL; /* init_one 2 */
8952 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8954 hpda_free_ctlr_info(h); /* init_one 1 */
8957 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8958 __attribute__((unused)) pm_message_t state)
8960 return -ENOSYS;
8963 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8965 return -ENOSYS;
8968 static struct pci_driver hpsa_pci_driver = {
8969 .name = HPSA,
8970 .probe = hpsa_init_one,
8971 .remove = hpsa_remove_one,
8972 .id_table = hpsa_pci_device_id, /* id_table */
8973 .shutdown = hpsa_shutdown,
8974 .suspend = hpsa_suspend,
8975 .resume = hpsa_resume,
8978 /* Fill in bucket_map[], given nsgs (the max number of
8979 * scatter gather elements supported) and bucket[],
8980 * which is an array of 8 integers. The bucket[] array
8981 * contains 8 different DMA transfer sizes (in 16
8982 * byte increments) which the controller uses to fetch
8983 * commands. This function fills in bucket_map[], which
8984 * maps a given number of scatter gather elements to one of
8985 * the 8 DMA transfer sizes. The point of it is to allow the
8986 * controller to only do as much DMA as needed to fetch the
8987 * command, with the DMA transfer size encoded in the lower
8988 * bits of the command address.
8990 static void calc_bucket_map(int bucket[], int num_buckets,
8991 int nsgs, int min_blocks, u32 *bucket_map)
8993 int i, j, b, size;
8995 /* Note, bucket_map must have nsgs+1 entries. */
8996 for (i = 0; i <= nsgs; i++) {
8997 /* Compute size of a command with i SG entries */
8998 size = i + min_blocks;
8999 b = num_buckets; /* Assume the biggest bucket */
9000 /* Find the bucket that is just big enough */
9001 for (j = 0; j < num_buckets; j++) {
9002 if (bucket[j] >= size) {
9003 b = j;
9004 break;
9007 /* for a command with i SG entries, use bucket b. */
9008 bucket_map[i] = b;
9013 * return -ENODEV on err, 0 on success (or no action)
9014 * allocates numerous items that must be freed later
9016 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9018 int i;
9019 unsigned long register_value;
9020 unsigned long transMethod = CFGTBL_Trans_Performant |
9021 (trans_support & CFGTBL_Trans_use_short_tags) |
9022 CFGTBL_Trans_enable_directed_msix |
9023 (trans_support & (CFGTBL_Trans_io_accel1 |
9024 CFGTBL_Trans_io_accel2));
9025 struct access_method access = SA5_performant_access;
9027 /* This is a bit complicated. There are 8 registers on
9028 * the controller which we write to to tell it 8 different
9029 * sizes of commands which there may be. It's a way of
9030 * reducing the DMA done to fetch each command. Encoded into
9031 * each command's tag are 3 bits which communicate to the controller
9032 * which of the eight sizes that command fits within. The size of
9033 * each command depends on how many scatter gather entries there are.
9034 * Each SG entry requires 16 bytes. The eight registers are programmed
9035 * with the number of 16-byte blocks a command of that size requires.
9036 * The smallest command possible requires 5 such 16 byte blocks.
9037 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9038 * blocks. Note, this only extends to the SG entries contained
9039 * within the command block, and does not extend to chained blocks
9040 * of SG elements. bft[] contains the eight values we write to
9041 * the registers. They are not evenly distributed, but have more
9042 * sizes for small commands, and fewer sizes for larger commands.
9044 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9045 #define MIN_IOACCEL2_BFT_ENTRY 5
9046 #define HPSA_IOACCEL2_HEADER_SZ 4
9047 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9048 13, 14, 15, 16, 17, 18, 19,
9049 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9050 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9051 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9052 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9053 16 * MIN_IOACCEL2_BFT_ENTRY);
9054 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9055 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9056 /* 5 = 1 s/g entry or 4k
9057 * 6 = 2 s/g entry or 8k
9058 * 8 = 4 s/g entry or 16k
9059 * 10 = 6 s/g entry or 24k
9062 /* If the controller supports either ioaccel method then
9063 * we can also use the RAID stack submit path that does not
9064 * perform the superfluous readl() after each command submission.
9066 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9067 access = SA5_performant_access_no_read;
9069 /* Controller spec: zero out this buffer. */
9070 for (i = 0; i < h->nreply_queues; i++)
9071 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9073 bft[7] = SG_ENTRIES_IN_CMD + 4;
9074 calc_bucket_map(bft, ARRAY_SIZE(bft),
9075 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9076 for (i = 0; i < 8; i++)
9077 writel(bft[i], &h->transtable->BlockFetch[i]);
9079 /* size of controller ring buffer */
9080 writel(h->max_commands, &h->transtable->RepQSize);
9081 writel(h->nreply_queues, &h->transtable->RepQCount);
9082 writel(0, &h->transtable->RepQCtrAddrLow32);
9083 writel(0, &h->transtable->RepQCtrAddrHigh32);
9085 for (i = 0; i < h->nreply_queues; i++) {
9086 writel(0, &h->transtable->RepQAddr[i].upper);
9087 writel(h->reply_queue[i].busaddr,
9088 &h->transtable->RepQAddr[i].lower);
9091 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9092 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9094 * enable outbound interrupt coalescing in accelerator mode;
9096 if (trans_support & CFGTBL_Trans_io_accel1) {
9097 access = SA5_ioaccel_mode1_access;
9098 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9099 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9100 } else
9101 if (trans_support & CFGTBL_Trans_io_accel2)
9102 access = SA5_ioaccel_mode2_access;
9103 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9104 if (hpsa_wait_for_mode_change_ack(h)) {
9105 dev_err(&h->pdev->dev,
9106 "performant mode problem - doorbell timeout\n");
9107 return -ENODEV;
9109 register_value = readl(&(h->cfgtable->TransportActive));
9110 if (!(register_value & CFGTBL_Trans_Performant)) {
9111 dev_err(&h->pdev->dev,
9112 "performant mode problem - transport not active\n");
9113 return -ENODEV;
9115 /* Change the access methods to the performant access methods */
9116 h->access = access;
9117 h->transMethod = transMethod;
9119 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9120 (trans_support & CFGTBL_Trans_io_accel2)))
9121 return 0;
9123 if (trans_support & CFGTBL_Trans_io_accel1) {
9124 /* Set up I/O accelerator mode */
9125 for (i = 0; i < h->nreply_queues; i++) {
9126 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9127 h->reply_queue[i].current_entry =
9128 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9130 bft[7] = h->ioaccel_maxsg + 8;
9131 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9132 h->ioaccel1_blockFetchTable);
9134 /* initialize all reply queue entries to unused */
9135 for (i = 0; i < h->nreply_queues; i++)
9136 memset(h->reply_queue[i].head,
9137 (u8) IOACCEL_MODE1_REPLY_UNUSED,
9138 h->reply_queue_size);
9140 /* set all the constant fields in the accelerator command
9141 * frames once at init time to save CPU cycles later.
9143 for (i = 0; i < h->nr_cmds; i++) {
9144 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9146 cp->function = IOACCEL1_FUNCTION_SCSIIO;
9147 cp->err_info = (u32) (h->errinfo_pool_dhandle +
9148 (i * sizeof(struct ErrorInfo)));
9149 cp->err_info_len = sizeof(struct ErrorInfo);
9150 cp->sgl_offset = IOACCEL1_SGLOFFSET;
9151 cp->host_context_flags =
9152 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9153 cp->timeout_sec = 0;
9154 cp->ReplyQueue = 0;
9155 cp->tag =
9156 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9157 cp->host_addr =
9158 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9159 (i * sizeof(struct io_accel1_cmd)));
9161 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9162 u64 cfg_offset, cfg_base_addr_index;
9163 u32 bft2_offset, cfg_base_addr;
9164 int rc;
9166 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9167 &cfg_base_addr_index, &cfg_offset);
9168 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9169 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9170 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9171 4, h->ioaccel2_blockFetchTable);
9172 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9173 BUILD_BUG_ON(offsetof(struct CfgTable,
9174 io_accel_request_size_offset) != 0xb8);
9175 h->ioaccel2_bft2_regs =
9176 remap_pci_mem(pci_resource_start(h->pdev,
9177 cfg_base_addr_index) +
9178 cfg_offset + bft2_offset,
9179 ARRAY_SIZE(bft2) *
9180 sizeof(*h->ioaccel2_bft2_regs));
9181 for (i = 0; i < ARRAY_SIZE(bft2); i++)
9182 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9184 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9185 if (hpsa_wait_for_mode_change_ack(h)) {
9186 dev_err(&h->pdev->dev,
9187 "performant mode problem - enabling ioaccel mode\n");
9188 return -ENODEV;
9190 return 0;
9193 /* Free ioaccel1 mode command blocks and block fetch table */
9194 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9196 if (h->ioaccel_cmd_pool) {
9197 pci_free_consistent(h->pdev,
9198 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9199 h->ioaccel_cmd_pool,
9200 h->ioaccel_cmd_pool_dhandle);
9201 h->ioaccel_cmd_pool = NULL;
9202 h->ioaccel_cmd_pool_dhandle = 0;
9204 kfree(h->ioaccel1_blockFetchTable);
9205 h->ioaccel1_blockFetchTable = NULL;
9208 /* Allocate ioaccel1 mode command blocks and block fetch table */
9209 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9211 h->ioaccel_maxsg =
9212 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9213 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9214 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9216 /* Command structures must be aligned on a 128-byte boundary
9217 * because the 7 lower bits of the address are used by the
9218 * hardware.
9220 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9221 IOACCEL1_COMMANDLIST_ALIGNMENT);
9222 h->ioaccel_cmd_pool =
9223 pci_alloc_consistent(h->pdev,
9224 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9225 &(h->ioaccel_cmd_pool_dhandle));
9227 h->ioaccel1_blockFetchTable =
9228 kmalloc(((h->ioaccel_maxsg + 1) *
9229 sizeof(u32)), GFP_KERNEL);
9231 if ((h->ioaccel_cmd_pool == NULL) ||
9232 (h->ioaccel1_blockFetchTable == NULL))
9233 goto clean_up;
9235 memset(h->ioaccel_cmd_pool, 0,
9236 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9237 return 0;
9239 clean_up:
9240 hpsa_free_ioaccel1_cmd_and_bft(h);
9241 return -ENOMEM;
9244 /* Free ioaccel2 mode command blocks and block fetch table */
9245 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9247 hpsa_free_ioaccel2_sg_chain_blocks(h);
9249 if (h->ioaccel2_cmd_pool) {
9250 pci_free_consistent(h->pdev,
9251 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9252 h->ioaccel2_cmd_pool,
9253 h->ioaccel2_cmd_pool_dhandle);
9254 h->ioaccel2_cmd_pool = NULL;
9255 h->ioaccel2_cmd_pool_dhandle = 0;
9257 kfree(h->ioaccel2_blockFetchTable);
9258 h->ioaccel2_blockFetchTable = NULL;
9261 /* Allocate ioaccel2 mode command blocks and block fetch table */
9262 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9264 int rc;
9266 /* Allocate ioaccel2 mode command blocks and block fetch table */
9268 h->ioaccel_maxsg =
9269 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9270 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9271 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9273 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9274 IOACCEL2_COMMANDLIST_ALIGNMENT);
9275 h->ioaccel2_cmd_pool =
9276 pci_alloc_consistent(h->pdev,
9277 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9278 &(h->ioaccel2_cmd_pool_dhandle));
9280 h->ioaccel2_blockFetchTable =
9281 kmalloc(((h->ioaccel_maxsg + 1) *
9282 sizeof(u32)), GFP_KERNEL);
9284 if ((h->ioaccel2_cmd_pool == NULL) ||
9285 (h->ioaccel2_blockFetchTable == NULL)) {
9286 rc = -ENOMEM;
9287 goto clean_up;
9290 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9291 if (rc)
9292 goto clean_up;
9294 memset(h->ioaccel2_cmd_pool, 0,
9295 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9296 return 0;
9298 clean_up:
9299 hpsa_free_ioaccel2_cmd_and_bft(h);
9300 return rc;
9303 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9304 static void hpsa_free_performant_mode(struct ctlr_info *h)
9306 kfree(h->blockFetchTable);
9307 h->blockFetchTable = NULL;
9308 hpsa_free_reply_queues(h);
9309 hpsa_free_ioaccel1_cmd_and_bft(h);
9310 hpsa_free_ioaccel2_cmd_and_bft(h);
9313 /* return -ENODEV on error, 0 on success (or no action)
9314 * allocates numerous items that must be freed later
9316 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9318 u32 trans_support;
9319 unsigned long transMethod = CFGTBL_Trans_Performant |
9320 CFGTBL_Trans_use_short_tags;
9321 int i, rc;
9323 if (hpsa_simple_mode)
9324 return 0;
9326 trans_support = readl(&(h->cfgtable->TransportSupport));
9327 if (!(trans_support & PERFORMANT_MODE))
9328 return 0;
9330 /* Check for I/O accelerator mode support */
9331 if (trans_support & CFGTBL_Trans_io_accel1) {
9332 transMethod |= CFGTBL_Trans_io_accel1 |
9333 CFGTBL_Trans_enable_directed_msix;
9334 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9335 if (rc)
9336 return rc;
9337 } else if (trans_support & CFGTBL_Trans_io_accel2) {
9338 transMethod |= CFGTBL_Trans_io_accel2 |
9339 CFGTBL_Trans_enable_directed_msix;
9340 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9341 if (rc)
9342 return rc;
9345 h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9346 hpsa_get_max_perf_mode_cmds(h);
9347 /* Performant mode ring buffer and supporting data structures */
9348 h->reply_queue_size = h->max_commands * sizeof(u64);
9350 for (i = 0; i < h->nreply_queues; i++) {
9351 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9352 h->reply_queue_size,
9353 &(h->reply_queue[i].busaddr));
9354 if (!h->reply_queue[i].head) {
9355 rc = -ENOMEM;
9356 goto clean1; /* rq, ioaccel */
9358 h->reply_queue[i].size = h->max_commands;
9359 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
9360 h->reply_queue[i].current_entry = 0;
9363 /* Need a block fetch table for performant mode */
9364 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9365 sizeof(u32)), GFP_KERNEL);
9366 if (!h->blockFetchTable) {
9367 rc = -ENOMEM;
9368 goto clean1; /* rq, ioaccel */
9371 rc = hpsa_enter_performant_mode(h, trans_support);
9372 if (rc)
9373 goto clean2; /* bft, rq, ioaccel */
9374 return 0;
9376 clean2: /* bft, rq, ioaccel */
9377 kfree(h->blockFetchTable);
9378 h->blockFetchTable = NULL;
9379 clean1: /* rq, ioaccel */
9380 hpsa_free_reply_queues(h);
9381 hpsa_free_ioaccel1_cmd_and_bft(h);
9382 hpsa_free_ioaccel2_cmd_and_bft(h);
9383 return rc;
9386 static int is_accelerated_cmd(struct CommandList *c)
9388 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9391 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9393 struct CommandList *c = NULL;
9394 int i, accel_cmds_out;
9395 int refcount;
9397 do { /* wait for all outstanding ioaccel commands to drain out */
9398 accel_cmds_out = 0;
9399 for (i = 0; i < h->nr_cmds; i++) {
9400 c = h->cmd_pool + i;
9401 refcount = atomic_inc_return(&c->refcount);
9402 if (refcount > 1) /* Command is allocated */
9403 accel_cmds_out += is_accelerated_cmd(c);
9404 cmd_free(h, c);
9406 if (accel_cmds_out <= 0)
9407 break;
9408 msleep(100);
9409 } while (1);
9412 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9413 struct hpsa_sas_port *hpsa_sas_port)
9415 struct hpsa_sas_phy *hpsa_sas_phy;
9416 struct sas_phy *phy;
9418 hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9419 if (!hpsa_sas_phy)
9420 return NULL;
9422 phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9423 hpsa_sas_port->next_phy_index);
9424 if (!phy) {
9425 kfree(hpsa_sas_phy);
9426 return NULL;
9429 hpsa_sas_port->next_phy_index++;
9430 hpsa_sas_phy->phy = phy;
9431 hpsa_sas_phy->parent_port = hpsa_sas_port;
9433 return hpsa_sas_phy;
9436 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9438 struct sas_phy *phy = hpsa_sas_phy->phy;
9440 sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9441 if (hpsa_sas_phy->added_to_port)
9442 list_del(&hpsa_sas_phy->phy_list_entry);
9443 sas_phy_delete(phy);
9444 kfree(hpsa_sas_phy);
9447 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9449 int rc;
9450 struct hpsa_sas_port *hpsa_sas_port;
9451 struct sas_phy *phy;
9452 struct sas_identify *identify;
9454 hpsa_sas_port = hpsa_sas_phy->parent_port;
9455 phy = hpsa_sas_phy->phy;
9457 identify = &phy->identify;
9458 memset(identify, 0, sizeof(*identify));
9459 identify->sas_address = hpsa_sas_port->sas_address;
9460 identify->device_type = SAS_END_DEVICE;
9461 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9462 identify->target_port_protocols = SAS_PROTOCOL_STP;
9463 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9464 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9465 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9466 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9467 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9469 rc = sas_phy_add(hpsa_sas_phy->phy);
9470 if (rc)
9471 return rc;
9473 sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9474 list_add_tail(&hpsa_sas_phy->phy_list_entry,
9475 &hpsa_sas_port->phy_list_head);
9476 hpsa_sas_phy->added_to_port = true;
9478 return 0;
9481 static int
9482 hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9483 struct sas_rphy *rphy)
9485 struct sas_identify *identify;
9487 identify = &rphy->identify;
9488 identify->sas_address = hpsa_sas_port->sas_address;
9489 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9490 identify->target_port_protocols = SAS_PROTOCOL_STP;
9492 return sas_rphy_add(rphy);
9495 static struct hpsa_sas_port
9496 *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9497 u64 sas_address)
9499 int rc;
9500 struct hpsa_sas_port *hpsa_sas_port;
9501 struct sas_port *port;
9503 hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9504 if (!hpsa_sas_port)
9505 return NULL;
9507 INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9508 hpsa_sas_port->parent_node = hpsa_sas_node;
9510 port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9511 if (!port)
9512 goto free_hpsa_port;
9514 rc = sas_port_add(port);
9515 if (rc)
9516 goto free_sas_port;
9518 hpsa_sas_port->port = port;
9519 hpsa_sas_port->sas_address = sas_address;
9520 list_add_tail(&hpsa_sas_port->port_list_entry,
9521 &hpsa_sas_node->port_list_head);
9523 return hpsa_sas_port;
9525 free_sas_port:
9526 sas_port_free(port);
9527 free_hpsa_port:
9528 kfree(hpsa_sas_port);
9530 return NULL;
9533 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9535 struct hpsa_sas_phy *hpsa_sas_phy;
9536 struct hpsa_sas_phy *next;
9538 list_for_each_entry_safe(hpsa_sas_phy, next,
9539 &hpsa_sas_port->phy_list_head, phy_list_entry)
9540 hpsa_free_sas_phy(hpsa_sas_phy);
9542 sas_port_delete(hpsa_sas_port->port);
9543 list_del(&hpsa_sas_port->port_list_entry);
9544 kfree(hpsa_sas_port);
9547 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9549 struct hpsa_sas_node *hpsa_sas_node;
9551 hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9552 if (hpsa_sas_node) {
9553 hpsa_sas_node->parent_dev = parent_dev;
9554 INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9557 return hpsa_sas_node;
9560 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9562 struct hpsa_sas_port *hpsa_sas_port;
9563 struct hpsa_sas_port *next;
9565 if (!hpsa_sas_node)
9566 return;
9568 list_for_each_entry_safe(hpsa_sas_port, next,
9569 &hpsa_sas_node->port_list_head, port_list_entry)
9570 hpsa_free_sas_port(hpsa_sas_port);
9572 kfree(hpsa_sas_node);
9575 static struct hpsa_scsi_dev_t
9576 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9577 struct sas_rphy *rphy)
9579 int i;
9580 struct hpsa_scsi_dev_t *device;
9582 for (i = 0; i < h->ndevices; i++) {
9583 device = h->dev[i];
9584 if (!device->sas_port)
9585 continue;
9586 if (device->sas_port->rphy == rphy)
9587 return device;
9590 return NULL;
9593 static int hpsa_add_sas_host(struct ctlr_info *h)
9595 int rc;
9596 struct device *parent_dev;
9597 struct hpsa_sas_node *hpsa_sas_node;
9598 struct hpsa_sas_port *hpsa_sas_port;
9599 struct hpsa_sas_phy *hpsa_sas_phy;
9601 parent_dev = &h->scsi_host->shost_dev;
9603 hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9604 if (!hpsa_sas_node)
9605 return -ENOMEM;
9607 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9608 if (!hpsa_sas_port) {
9609 rc = -ENODEV;
9610 goto free_sas_node;
9613 hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9614 if (!hpsa_sas_phy) {
9615 rc = -ENODEV;
9616 goto free_sas_port;
9619 rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9620 if (rc)
9621 goto free_sas_phy;
9623 h->sas_host = hpsa_sas_node;
9625 return 0;
9627 free_sas_phy:
9628 hpsa_free_sas_phy(hpsa_sas_phy);
9629 free_sas_port:
9630 hpsa_free_sas_port(hpsa_sas_port);
9631 free_sas_node:
9632 hpsa_free_sas_node(hpsa_sas_node);
9634 return rc;
9637 static void hpsa_delete_sas_host(struct ctlr_info *h)
9639 hpsa_free_sas_node(h->sas_host);
9642 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9643 struct hpsa_scsi_dev_t *device)
9645 int rc;
9646 struct hpsa_sas_port *hpsa_sas_port;
9647 struct sas_rphy *rphy;
9649 hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9650 if (!hpsa_sas_port)
9651 return -ENOMEM;
9653 rphy = sas_end_device_alloc(hpsa_sas_port->port);
9654 if (!rphy) {
9655 rc = -ENODEV;
9656 goto free_sas_port;
9659 hpsa_sas_port->rphy = rphy;
9660 device->sas_port = hpsa_sas_port;
9662 rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9663 if (rc)
9664 goto free_sas_port;
9666 return 0;
9668 free_sas_port:
9669 hpsa_free_sas_port(hpsa_sas_port);
9670 device->sas_port = NULL;
9672 return rc;
9675 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9677 if (device->sas_port) {
9678 hpsa_free_sas_port(device->sas_port);
9679 device->sas_port = NULL;
9683 static int
9684 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9686 return 0;
9689 static int
9690 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9692 *identifier = rphy->identify.sas_address;
9693 return 0;
9696 static int
9697 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9699 return -ENXIO;
9702 static int
9703 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9705 return 0;
9708 static int
9709 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9711 return 0;
9714 static int
9715 hpsa_sas_phy_setup(struct sas_phy *phy)
9717 return 0;
9720 static void
9721 hpsa_sas_phy_release(struct sas_phy *phy)
9725 static int
9726 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9728 return -EINVAL;
9731 static struct sas_function_template hpsa_sas_transport_functions = {
9732 .get_linkerrors = hpsa_sas_get_linkerrors,
9733 .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9734 .get_bay_identifier = hpsa_sas_get_bay_identifier,
9735 .phy_reset = hpsa_sas_phy_reset,
9736 .phy_enable = hpsa_sas_phy_enable,
9737 .phy_setup = hpsa_sas_phy_setup,
9738 .phy_release = hpsa_sas_phy_release,
9739 .set_phy_speed = hpsa_sas_phy_speed,
9743 * This is it. Register the PCI driver information for the cards we control
9744 * the OS will call our registered routines when it finds one of our cards.
9746 static int __init hpsa_init(void)
9748 int rc;
9750 hpsa_sas_transport_template =
9751 sas_attach_transport(&hpsa_sas_transport_functions);
9752 if (!hpsa_sas_transport_template)
9753 return -ENODEV;
9755 rc = pci_register_driver(&hpsa_pci_driver);
9757 if (rc)
9758 sas_release_transport(hpsa_sas_transport_template);
9760 return rc;
9763 static void __exit hpsa_cleanup(void)
9765 pci_unregister_driver(&hpsa_pci_driver);
9766 sas_release_transport(hpsa_sas_transport_template);
9769 static void __attribute__((unused)) verify_offsets(void)
9771 #define VERIFY_OFFSET(member, offset) \
9772 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9774 VERIFY_OFFSET(structure_size, 0);
9775 VERIFY_OFFSET(volume_blk_size, 4);
9776 VERIFY_OFFSET(volume_blk_cnt, 8);
9777 VERIFY_OFFSET(phys_blk_shift, 16);
9778 VERIFY_OFFSET(parity_rotation_shift, 17);
9779 VERIFY_OFFSET(strip_size, 18);
9780 VERIFY_OFFSET(disk_starting_blk, 20);
9781 VERIFY_OFFSET(disk_blk_cnt, 28);
9782 VERIFY_OFFSET(data_disks_per_row, 36);
9783 VERIFY_OFFSET(metadata_disks_per_row, 38);
9784 VERIFY_OFFSET(row_cnt, 40);
9785 VERIFY_OFFSET(layout_map_count, 42);
9786 VERIFY_OFFSET(flags, 44);
9787 VERIFY_OFFSET(dekindex, 46);
9788 /* VERIFY_OFFSET(reserved, 48 */
9789 VERIFY_OFFSET(data, 64);
9791 #undef VERIFY_OFFSET
9793 #define VERIFY_OFFSET(member, offset) \
9794 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9796 VERIFY_OFFSET(IU_type, 0);
9797 VERIFY_OFFSET(direction, 1);
9798 VERIFY_OFFSET(reply_queue, 2);
9799 /* VERIFY_OFFSET(reserved1, 3); */
9800 VERIFY_OFFSET(scsi_nexus, 4);
9801 VERIFY_OFFSET(Tag, 8);
9802 VERIFY_OFFSET(cdb, 16);
9803 VERIFY_OFFSET(cciss_lun, 32);
9804 VERIFY_OFFSET(data_len, 40);
9805 VERIFY_OFFSET(cmd_priority_task_attr, 44);
9806 VERIFY_OFFSET(sg_count, 45);
9807 /* VERIFY_OFFSET(reserved3 */
9808 VERIFY_OFFSET(err_ptr, 48);
9809 VERIFY_OFFSET(err_len, 56);
9810 /* VERIFY_OFFSET(reserved4 */
9811 VERIFY_OFFSET(sg, 64);
9813 #undef VERIFY_OFFSET
9815 #define VERIFY_OFFSET(member, offset) \
9816 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9818 VERIFY_OFFSET(dev_handle, 0x00);
9819 VERIFY_OFFSET(reserved1, 0x02);
9820 VERIFY_OFFSET(function, 0x03);
9821 VERIFY_OFFSET(reserved2, 0x04);
9822 VERIFY_OFFSET(err_info, 0x0C);
9823 VERIFY_OFFSET(reserved3, 0x10);
9824 VERIFY_OFFSET(err_info_len, 0x12);
9825 VERIFY_OFFSET(reserved4, 0x13);
9826 VERIFY_OFFSET(sgl_offset, 0x14);
9827 VERIFY_OFFSET(reserved5, 0x15);
9828 VERIFY_OFFSET(transfer_len, 0x1C);
9829 VERIFY_OFFSET(reserved6, 0x20);
9830 VERIFY_OFFSET(io_flags, 0x24);
9831 VERIFY_OFFSET(reserved7, 0x26);
9832 VERIFY_OFFSET(LUN, 0x34);
9833 VERIFY_OFFSET(control, 0x3C);
9834 VERIFY_OFFSET(CDB, 0x40);
9835 VERIFY_OFFSET(reserved8, 0x50);
9836 VERIFY_OFFSET(host_context_flags, 0x60);
9837 VERIFY_OFFSET(timeout_sec, 0x62);
9838 VERIFY_OFFSET(ReplyQueue, 0x64);
9839 VERIFY_OFFSET(reserved9, 0x65);
9840 VERIFY_OFFSET(tag, 0x68);
9841 VERIFY_OFFSET(host_addr, 0x70);
9842 VERIFY_OFFSET(CISS_LUN, 0x78);
9843 VERIFY_OFFSET(SG, 0x78 + 8);
9844 #undef VERIFY_OFFSET
9847 module_init(hpsa_init);
9848 module_exit(hpsa_cleanup);