Linux 5.1.15
[linux/fpc-iii.git] / drivers / scsi / myrs.c
blobeb0dd566330abdab242b3953b7641f60a0342ce3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
5 * This driver supports the newer, SCSI-based firmware interface only.
7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
9 * Based on the original DAC960 driver, which has
10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/raid_class.h>
20 #include <asm/unaligned.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_device.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_tcq.h>
26 #include "myrs.h"
28 static struct raid_template *myrs_raid_template;
30 static struct myrs_devstate_name_entry {
31 enum myrs_devstate state;
32 char *name;
33 } myrs_devstate_name_list[] = {
34 { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
35 { MYRS_DEVICE_ONLINE, "Online" },
36 { MYRS_DEVICE_REBUILD, "Rebuild" },
37 { MYRS_DEVICE_MISSING, "Missing" },
38 { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
39 { MYRS_DEVICE_OFFLINE, "Offline" },
40 { MYRS_DEVICE_CRITICAL, "Critical" },
41 { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
42 { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
43 { MYRS_DEVICE_STANDBY, "Standby" },
44 { MYRS_DEVICE_INVALID_STATE, "Invalid" },
47 static char *myrs_devstate_name(enum myrs_devstate state)
49 struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
50 int i;
52 for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
53 if (entry[i].state == state)
54 return entry[i].name;
56 return NULL;
59 static struct myrs_raid_level_name_entry {
60 enum myrs_raid_level level;
61 char *name;
62 } myrs_raid_level_name_list[] = {
63 { MYRS_RAID_LEVEL0, "RAID0" },
64 { MYRS_RAID_LEVEL1, "RAID1" },
65 { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
66 { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
67 { MYRS_RAID_LEVEL6, "RAID6" },
68 { MYRS_RAID_JBOD, "JBOD" },
69 { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
70 { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
71 { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
72 { MYRS_RAID_SPAN, "Mylex SPAN" },
73 { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
74 { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
75 { MYRS_RAID_PHYSICAL, "Physical device" },
78 static char *myrs_raid_level_name(enum myrs_raid_level level)
80 struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
81 int i;
83 for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
84 if (entry[i].level == level)
85 return entry[i].name;
87 return NULL;
90 /**
91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
93 static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
95 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
97 memset(mbox, 0, sizeof(union myrs_cmd_mbox));
98 cmd_blk->status = 0;
102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
104 static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
106 void __iomem *base = cs->io_base;
107 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
108 union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
110 cs->write_cmd_mbox(next_mbox, mbox);
112 if (cs->prev_cmd_mbox1->words[0] == 0 ||
113 cs->prev_cmd_mbox2->words[0] == 0)
114 cs->get_cmd_mbox(base);
116 cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
117 cs->prev_cmd_mbox1 = next_mbox;
119 if (++next_mbox > cs->last_cmd_mbox)
120 next_mbox = cs->first_cmd_mbox;
122 cs->next_cmd_mbox = next_mbox;
126 * myrs_exec_cmd - executes V2 Command and waits for completion.
128 static void myrs_exec_cmd(struct myrs_hba *cs,
129 struct myrs_cmdblk *cmd_blk)
131 DECLARE_COMPLETION_ONSTACK(complete);
132 unsigned long flags;
134 cmd_blk->complete = &complete;
135 spin_lock_irqsave(&cs->queue_lock, flags);
136 myrs_qcmd(cs, cmd_blk);
137 spin_unlock_irqrestore(&cs->queue_lock, flags);
139 WARN_ON(in_interrupt());
140 wait_for_completion(&complete);
144 * myrs_report_progress - prints progress message
146 static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
147 unsigned char *msg, unsigned long blocks,
148 unsigned long size)
150 shost_printk(KERN_INFO, cs->host,
151 "Logical Drive %d: %s in Progress: %d%% completed\n",
152 ldev_num, msg,
153 (100 * (int)(blocks >> 7)) / (int)(size >> 7));
157 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
159 static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
161 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
162 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
163 dma_addr_t ctlr_info_addr;
164 union myrs_sgl *sgl;
165 unsigned char status;
166 unsigned short ldev_present, ldev_critical, ldev_offline;
168 ldev_present = cs->ctlr_info->ldev_present;
169 ldev_critical = cs->ctlr_info->ldev_critical;
170 ldev_offline = cs->ctlr_info->ldev_offline;
172 ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
173 sizeof(struct myrs_ctlr_info),
174 DMA_FROM_DEVICE);
175 if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
176 return MYRS_STATUS_FAILED;
178 mutex_lock(&cs->dcmd_mutex);
179 myrs_reset_cmd(cmd_blk);
180 mbox->ctlr_info.id = MYRS_DCMD_TAG;
181 mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
182 mbox->ctlr_info.control.dma_ctrl_to_host = true;
183 mbox->ctlr_info.control.no_autosense = true;
184 mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
185 mbox->ctlr_info.ctlr_num = 0;
186 mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
187 sgl = &mbox->ctlr_info.dma_addr;
188 sgl->sge[0].sge_addr = ctlr_info_addr;
189 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
190 dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
191 myrs_exec_cmd(cs, cmd_blk);
192 status = cmd_blk->status;
193 mutex_unlock(&cs->dcmd_mutex);
194 dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
195 sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
196 if (status == MYRS_STATUS_SUCCESS) {
197 if (cs->ctlr_info->bg_init_active +
198 cs->ctlr_info->ldev_init_active +
199 cs->ctlr_info->pdev_init_active +
200 cs->ctlr_info->cc_active +
201 cs->ctlr_info->rbld_active +
202 cs->ctlr_info->exp_active != 0)
203 cs->needs_update = true;
204 if (cs->ctlr_info->ldev_present != ldev_present ||
205 cs->ctlr_info->ldev_critical != ldev_critical ||
206 cs->ctlr_info->ldev_offline != ldev_offline)
207 shost_printk(KERN_INFO, cs->host,
208 "Logical drive count changes (%d/%d/%d)\n",
209 cs->ctlr_info->ldev_critical,
210 cs->ctlr_info->ldev_offline,
211 cs->ctlr_info->ldev_present);
214 return status;
218 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
220 static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
221 unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
223 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
224 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
225 dma_addr_t ldev_info_addr;
226 struct myrs_ldev_info ldev_info_orig;
227 union myrs_sgl *sgl;
228 unsigned char status;
230 memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
231 ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
232 sizeof(struct myrs_ldev_info),
233 DMA_FROM_DEVICE);
234 if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
235 return MYRS_STATUS_FAILED;
237 mutex_lock(&cs->dcmd_mutex);
238 myrs_reset_cmd(cmd_blk);
239 mbox->ldev_info.id = MYRS_DCMD_TAG;
240 mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
241 mbox->ldev_info.control.dma_ctrl_to_host = true;
242 mbox->ldev_info.control.no_autosense = true;
243 mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
244 mbox->ldev_info.ldev.ldev_num = ldev_num;
245 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
246 sgl = &mbox->ldev_info.dma_addr;
247 sgl->sge[0].sge_addr = ldev_info_addr;
248 sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
249 dev_dbg(&cs->host->shost_gendev,
250 "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
251 myrs_exec_cmd(cs, cmd_blk);
252 status = cmd_blk->status;
253 mutex_unlock(&cs->dcmd_mutex);
254 dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
255 sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
256 if (status == MYRS_STATUS_SUCCESS) {
257 unsigned short ldev_num = ldev_info->ldev_num;
258 struct myrs_ldev_info *new = ldev_info;
259 struct myrs_ldev_info *old = &ldev_info_orig;
260 unsigned long ldev_size = new->cfg_devsize;
262 if (new->dev_state != old->dev_state) {
263 const char *name;
265 name = myrs_devstate_name(new->dev_state);
266 shost_printk(KERN_INFO, cs->host,
267 "Logical Drive %d is now %s\n",
268 ldev_num, name ? name : "Invalid");
270 if ((new->soft_errs != old->soft_errs) ||
271 (new->cmds_failed != old->cmds_failed) ||
272 (new->deferred_write_errs != old->deferred_write_errs))
273 shost_printk(KERN_INFO, cs->host,
274 "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
275 ldev_num, new->soft_errs,
276 new->cmds_failed,
277 new->deferred_write_errs);
278 if (new->bg_init_active)
279 myrs_report_progress(cs, ldev_num,
280 "Background Initialization",
281 new->bg_init_lba, ldev_size);
282 else if (new->fg_init_active)
283 myrs_report_progress(cs, ldev_num,
284 "Foreground Initialization",
285 new->fg_init_lba, ldev_size);
286 else if (new->migration_active)
287 myrs_report_progress(cs, ldev_num,
288 "Data Migration",
289 new->migration_lba, ldev_size);
290 else if (new->patrol_active)
291 myrs_report_progress(cs, ldev_num,
292 "Patrol Operation",
293 new->patrol_lba, ldev_size);
294 if (old->bg_init_active && !new->bg_init_active)
295 shost_printk(KERN_INFO, cs->host,
296 "Logical Drive %d: Background Initialization %s\n",
297 ldev_num,
298 (new->ldev_control.ldev_init_done ?
299 "Completed" : "Failed"));
301 return status;
305 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
307 static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
308 unsigned char channel, unsigned char target, unsigned char lun,
309 struct myrs_pdev_info *pdev_info)
311 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
312 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
313 dma_addr_t pdev_info_addr;
314 union myrs_sgl *sgl;
315 unsigned char status;
317 pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
318 sizeof(struct myrs_pdev_info),
319 DMA_FROM_DEVICE);
320 if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
321 return MYRS_STATUS_FAILED;
323 mutex_lock(&cs->dcmd_mutex);
324 myrs_reset_cmd(cmd_blk);
325 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
326 mbox->pdev_info.id = MYRS_DCMD_TAG;
327 mbox->pdev_info.control.dma_ctrl_to_host = true;
328 mbox->pdev_info.control.no_autosense = true;
329 mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
330 mbox->pdev_info.pdev.lun = lun;
331 mbox->pdev_info.pdev.target = target;
332 mbox->pdev_info.pdev.channel = channel;
333 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
334 sgl = &mbox->pdev_info.dma_addr;
335 sgl->sge[0].sge_addr = pdev_info_addr;
336 sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
337 dev_dbg(&cs->host->shost_gendev,
338 "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
339 channel, target, lun);
340 myrs_exec_cmd(cs, cmd_blk);
341 status = cmd_blk->status;
342 mutex_unlock(&cs->dcmd_mutex);
343 dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
344 sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
345 return status;
349 * myrs_dev_op - executes a "Device Operation" Command
351 static unsigned char myrs_dev_op(struct myrs_hba *cs,
352 enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
354 struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
355 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
356 unsigned char status;
358 mutex_lock(&cs->dcmd_mutex);
359 myrs_reset_cmd(cmd_blk);
360 mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
361 mbox->dev_op.id = MYRS_DCMD_TAG;
362 mbox->dev_op.control.dma_ctrl_to_host = true;
363 mbox->dev_op.control.no_autosense = true;
364 mbox->dev_op.ioctl_opcode = opcode;
365 mbox->dev_op.opdev = opdev;
366 myrs_exec_cmd(cs, cmd_blk);
367 status = cmd_blk->status;
368 mutex_unlock(&cs->dcmd_mutex);
369 return status;
373 * myrs_translate_pdev - translates a Physical Device Channel and
374 * TargetID into a Logical Device.
376 static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
377 unsigned char channel, unsigned char target, unsigned char lun,
378 struct myrs_devmap *devmap)
380 struct pci_dev *pdev = cs->pdev;
381 dma_addr_t devmap_addr;
382 struct myrs_cmdblk *cmd_blk;
383 union myrs_cmd_mbox *mbox;
384 union myrs_sgl *sgl;
385 unsigned char status;
387 memset(devmap, 0x0, sizeof(struct myrs_devmap));
388 devmap_addr = dma_map_single(&pdev->dev, devmap,
389 sizeof(struct myrs_devmap),
390 DMA_FROM_DEVICE);
391 if (dma_mapping_error(&pdev->dev, devmap_addr))
392 return MYRS_STATUS_FAILED;
394 mutex_lock(&cs->dcmd_mutex);
395 cmd_blk = &cs->dcmd_blk;
396 mbox = &cmd_blk->mbox;
397 mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
398 mbox->pdev_info.control.dma_ctrl_to_host = true;
399 mbox->pdev_info.control.no_autosense = true;
400 mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
401 mbox->pdev_info.pdev.target = target;
402 mbox->pdev_info.pdev.channel = channel;
403 mbox->pdev_info.pdev.lun = lun;
404 mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
405 sgl = &mbox->pdev_info.dma_addr;
406 sgl->sge[0].sge_addr = devmap_addr;
407 sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
409 myrs_exec_cmd(cs, cmd_blk);
410 status = cmd_blk->status;
411 mutex_unlock(&cs->dcmd_mutex);
412 dma_unmap_single(&pdev->dev, devmap_addr,
413 sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
414 return status;
418 * myrs_get_event - executes a Get Event Command
420 static unsigned char myrs_get_event(struct myrs_hba *cs,
421 unsigned int event_num, struct myrs_event *event_buf)
423 struct pci_dev *pdev = cs->pdev;
424 dma_addr_t event_addr;
425 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
426 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
427 union myrs_sgl *sgl;
428 unsigned char status;
430 event_addr = dma_map_single(&pdev->dev, event_buf,
431 sizeof(struct myrs_event), DMA_FROM_DEVICE);
432 if (dma_mapping_error(&pdev->dev, event_addr))
433 return MYRS_STATUS_FAILED;
435 mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
436 mbox->get_event.dma_size = sizeof(struct myrs_event);
437 mbox->get_event.evnum_upper = event_num >> 16;
438 mbox->get_event.ctlr_num = 0;
439 mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
440 mbox->get_event.evnum_lower = event_num & 0xFFFF;
441 sgl = &mbox->get_event.dma_addr;
442 sgl->sge[0].sge_addr = event_addr;
443 sgl->sge[0].sge_count = mbox->get_event.dma_size;
444 myrs_exec_cmd(cs, cmd_blk);
445 status = cmd_blk->status;
446 dma_unmap_single(&pdev->dev, event_addr,
447 sizeof(struct myrs_event), DMA_FROM_DEVICE);
449 return status;
453 * myrs_get_fwstatus - executes a Get Health Status Command
455 static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
457 struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
458 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
459 union myrs_sgl *sgl;
460 unsigned char status = cmd_blk->status;
462 myrs_reset_cmd(cmd_blk);
463 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
464 mbox->common.id = MYRS_MCMD_TAG;
465 mbox->common.control.dma_ctrl_to_host = true;
466 mbox->common.control.no_autosense = true;
467 mbox->common.dma_size = sizeof(struct myrs_fwstat);
468 mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
469 sgl = &mbox->common.dma_addr;
470 sgl->sge[0].sge_addr = cs->fwstat_addr;
471 sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
472 dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
473 myrs_exec_cmd(cs, cmd_blk);
474 status = cmd_blk->status;
476 return status;
480 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
482 static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
483 enable_mbox_t enable_mbox_fn)
485 void __iomem *base = cs->io_base;
486 struct pci_dev *pdev = cs->pdev;
487 union myrs_cmd_mbox *cmd_mbox;
488 struct myrs_stat_mbox *stat_mbox;
489 union myrs_cmd_mbox *mbox;
490 dma_addr_t mbox_addr;
491 unsigned char status = MYRS_STATUS_FAILED;
493 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
494 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
495 dev_err(&pdev->dev, "DMA mask out of range\n");
496 return false;
499 /* Temporary dma mapping, used only in the scope of this function */
500 mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
501 &mbox_addr, GFP_KERNEL);
502 if (dma_mapping_error(&pdev->dev, mbox_addr))
503 return false;
505 /* These are the base addresses for the command memory mailbox array */
506 cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
507 cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
508 &cs->cmd_mbox_addr, GFP_KERNEL);
509 if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
510 dev_err(&pdev->dev, "Failed to map command mailbox\n");
511 goto out_free;
513 cs->first_cmd_mbox = cmd_mbox;
514 cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
515 cs->last_cmd_mbox = cmd_mbox;
516 cs->next_cmd_mbox = cs->first_cmd_mbox;
517 cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
518 cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
520 /* These are the base addresses for the status memory mailbox array */
521 cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
522 stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
523 &cs->stat_mbox_addr, GFP_KERNEL);
524 if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
525 dev_err(&pdev->dev, "Failed to map status mailbox\n");
526 goto out_free;
529 cs->first_stat_mbox = stat_mbox;
530 stat_mbox += MYRS_MAX_STAT_MBOX - 1;
531 cs->last_stat_mbox = stat_mbox;
532 cs->next_stat_mbox = cs->first_stat_mbox;
534 cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
535 sizeof(struct myrs_fwstat),
536 &cs->fwstat_addr, GFP_KERNEL);
537 if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
538 dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
539 cs->fwstat_buf = NULL;
540 goto out_free;
542 cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
543 GFP_KERNEL | GFP_DMA);
544 if (!cs->ctlr_info)
545 goto out_free;
547 cs->event_buf = kzalloc(sizeof(struct myrs_event),
548 GFP_KERNEL | GFP_DMA);
549 if (!cs->event_buf)
550 goto out_free;
552 /* Enable the Memory Mailbox Interface. */
553 memset(mbox, 0, sizeof(union myrs_cmd_mbox));
554 mbox->set_mbox.id = 1;
555 mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
556 mbox->set_mbox.control.no_autosense = true;
557 mbox->set_mbox.first_cmd_mbox_size_kb =
558 (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
559 mbox->set_mbox.first_stat_mbox_size_kb =
560 (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
561 mbox->set_mbox.second_cmd_mbox_size_kb = 0;
562 mbox->set_mbox.second_stat_mbox_size_kb = 0;
563 mbox->set_mbox.sense_len = 0;
564 mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
565 mbox->set_mbox.fwstat_buf_size_kb = 1;
566 mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
567 mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
568 mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
569 status = enable_mbox_fn(base, mbox_addr);
571 out_free:
572 dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
573 mbox, mbox_addr);
574 if (status != MYRS_STATUS_SUCCESS)
575 dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
576 status);
577 return (status == MYRS_STATUS_SUCCESS);
581 * myrs_get_config - reads the Configuration Information
583 static int myrs_get_config(struct myrs_hba *cs)
585 struct myrs_ctlr_info *info = cs->ctlr_info;
586 struct Scsi_Host *shost = cs->host;
587 unsigned char status;
588 unsigned char model[20];
589 unsigned char fw_version[12];
590 int i, model_len;
592 /* Get data into dma-able area, then copy into permanent location */
593 mutex_lock(&cs->cinfo_mutex);
594 status = myrs_get_ctlr_info(cs);
595 mutex_unlock(&cs->cinfo_mutex);
596 if (status != MYRS_STATUS_SUCCESS) {
597 shost_printk(KERN_ERR, shost,
598 "Failed to get controller information\n");
599 return -ENODEV;
602 /* Initialize the Controller Model Name and Full Model Name fields. */
603 model_len = sizeof(info->ctlr_name);
604 if (model_len > sizeof(model)-1)
605 model_len = sizeof(model)-1;
606 memcpy(model, info->ctlr_name, model_len);
607 model_len--;
608 while (model[model_len] == ' ' || model[model_len] == '\0')
609 model_len--;
610 model[++model_len] = '\0';
611 strcpy(cs->model_name, "DAC960 ");
612 strcat(cs->model_name, model);
613 /* Initialize the Controller Firmware Version field. */
614 sprintf(fw_version, "%d.%02d-%02d",
615 info->fw_major_version, info->fw_minor_version,
616 info->fw_turn_number);
617 if (info->fw_major_version == 6 &&
618 info->fw_minor_version == 0 &&
619 info->fw_turn_number < 1) {
620 shost_printk(KERN_WARNING, shost,
621 "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
622 "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
623 "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
624 fw_version);
625 return -ENODEV;
627 /* Initialize the Controller Channels and Targets. */
628 shost->max_channel = info->physchan_present + info->virtchan_present;
629 shost->max_id = info->max_targets[0];
630 for (i = 1; i < 16; i++) {
631 if (!info->max_targets[i])
632 continue;
633 if (shost->max_id < info->max_targets[i])
634 shost->max_id = info->max_targets[i];
638 * Initialize the Controller Queue Depth, Driver Queue Depth,
639 * Logical Drive Count, Maximum Blocks per Command, Controller
640 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
641 * The Driver Queue Depth must be at most three less than
642 * the Controller Queue Depth; tag '1' is reserved for
643 * direct commands, and tag '2' for monitoring commands.
645 shost->can_queue = info->max_tcq - 3;
646 if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
647 shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
648 shost->max_sectors = info->max_transfer_size;
649 shost->sg_tablesize = info->max_sge;
650 if (shost->sg_tablesize > MYRS_SG_LIMIT)
651 shost->sg_tablesize = MYRS_SG_LIMIT;
653 shost_printk(KERN_INFO, shost,
654 "Configuring %s PCI RAID Controller\n", model);
655 shost_printk(KERN_INFO, shost,
656 " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
657 fw_version, info->physchan_present, info->mem_size_mb);
659 shost_printk(KERN_INFO, shost,
660 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
661 shost->can_queue, shost->max_sectors);
663 shost_printk(KERN_INFO, shost,
664 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
665 shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
666 for (i = 0; i < info->physchan_max; i++) {
667 if (!info->max_targets[i])
668 continue;
669 shost_printk(KERN_INFO, shost,
670 " Device Channel %d: max %d devices\n",
671 i, info->max_targets[i]);
673 shost_printk(KERN_INFO, shost,
674 " Physical: %d/%d channels, %d disks, %d devices\n",
675 info->physchan_present, info->physchan_max,
676 info->pdisk_present, info->pdev_present);
678 shost_printk(KERN_INFO, shost,
679 " Logical: %d/%d channels, %d disks\n",
680 info->virtchan_present, info->virtchan_max,
681 info->ldev_present);
682 return 0;
686 * myrs_log_event - prints a Controller Event message
688 static struct {
689 int ev_code;
690 unsigned char *ev_msg;
691 } myrs_ev_list[] = {
692 /* Physical Device Events (0x0000 - 0x007F) */
693 { 0x0001, "P Online" },
694 { 0x0002, "P Standby" },
695 { 0x0005, "P Automatic Rebuild Started" },
696 { 0x0006, "P Manual Rebuild Started" },
697 { 0x0007, "P Rebuild Completed" },
698 { 0x0008, "P Rebuild Cancelled" },
699 { 0x0009, "P Rebuild Failed for Unknown Reasons" },
700 { 0x000A, "P Rebuild Failed due to New Physical Device" },
701 { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
702 { 0x000C, "S Offline" },
703 { 0x000D, "P Found" },
704 { 0x000E, "P Removed" },
705 { 0x000F, "P Unconfigured" },
706 { 0x0010, "P Expand Capacity Started" },
707 { 0x0011, "P Expand Capacity Completed" },
708 { 0x0012, "P Expand Capacity Failed" },
709 { 0x0013, "P Command Timed Out" },
710 { 0x0014, "P Command Aborted" },
711 { 0x0015, "P Command Retried" },
712 { 0x0016, "P Parity Error" },
713 { 0x0017, "P Soft Error" },
714 { 0x0018, "P Miscellaneous Error" },
715 { 0x0019, "P Reset" },
716 { 0x001A, "P Active Spare Found" },
717 { 0x001B, "P Warm Spare Found" },
718 { 0x001C, "S Sense Data Received" },
719 { 0x001D, "P Initialization Started" },
720 { 0x001E, "P Initialization Completed" },
721 { 0x001F, "P Initialization Failed" },
722 { 0x0020, "P Initialization Cancelled" },
723 { 0x0021, "P Failed because Write Recovery Failed" },
724 { 0x0022, "P Failed because SCSI Bus Reset Failed" },
725 { 0x0023, "P Failed because of Double Check Condition" },
726 { 0x0024, "P Failed because Device Cannot Be Accessed" },
727 { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
728 { 0x0026, "P Failed because of Bad Tag from Device" },
729 { 0x0027, "P Failed because of Command Timeout" },
730 { 0x0028, "P Failed because of System Reset" },
731 { 0x0029, "P Failed because of Busy Status or Parity Error" },
732 { 0x002A, "P Failed because Host Set Device to Failed State" },
733 { 0x002B, "P Failed because of Selection Timeout" },
734 { 0x002C, "P Failed because of SCSI Bus Phase Error" },
735 { 0x002D, "P Failed because Device Returned Unknown Status" },
736 { 0x002E, "P Failed because Device Not Ready" },
737 { 0x002F, "P Failed because Device Not Found at Startup" },
738 { 0x0030, "P Failed because COD Write Operation Failed" },
739 { 0x0031, "P Failed because BDT Write Operation Failed" },
740 { 0x0039, "P Missing at Startup" },
741 { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
742 { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
743 { 0x003D, "P Standby Rebuild Started" },
744 /* Logical Device Events (0x0080 - 0x00FF) */
745 { 0x0080, "M Consistency Check Started" },
746 { 0x0081, "M Consistency Check Completed" },
747 { 0x0082, "M Consistency Check Cancelled" },
748 { 0x0083, "M Consistency Check Completed With Errors" },
749 { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
750 { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
751 { 0x0086, "L Offline" },
752 { 0x0087, "L Critical" },
753 { 0x0088, "L Online" },
754 { 0x0089, "M Automatic Rebuild Started" },
755 { 0x008A, "M Manual Rebuild Started" },
756 { 0x008B, "M Rebuild Completed" },
757 { 0x008C, "M Rebuild Cancelled" },
758 { 0x008D, "M Rebuild Failed for Unknown Reasons" },
759 { 0x008E, "M Rebuild Failed due to New Physical Device" },
760 { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
761 { 0x0090, "M Initialization Started" },
762 { 0x0091, "M Initialization Completed" },
763 { 0x0092, "M Initialization Cancelled" },
764 { 0x0093, "M Initialization Failed" },
765 { 0x0094, "L Found" },
766 { 0x0095, "L Deleted" },
767 { 0x0096, "M Expand Capacity Started" },
768 { 0x0097, "M Expand Capacity Completed" },
769 { 0x0098, "M Expand Capacity Failed" },
770 { 0x0099, "L Bad Block Found" },
771 { 0x009A, "L Size Changed" },
772 { 0x009B, "L Type Changed" },
773 { 0x009C, "L Bad Data Block Found" },
774 { 0x009E, "L Read of Data Block in BDT" },
775 { 0x009F, "L Write Back Data for Disk Block Lost" },
776 { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
777 { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
778 { 0x00A2, "L Standby Rebuild Started" },
779 /* Fault Management Events (0x0100 - 0x017F) */
780 { 0x0140, "E Fan %d Failed" },
781 { 0x0141, "E Fan %d OK" },
782 { 0x0142, "E Fan %d Not Present" },
783 { 0x0143, "E Power Supply %d Failed" },
784 { 0x0144, "E Power Supply %d OK" },
785 { 0x0145, "E Power Supply %d Not Present" },
786 { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
787 { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
788 { 0x0148, "E Temperature Sensor %d Temperature Normal" },
789 { 0x0149, "E Temperature Sensor %d Not Present" },
790 { 0x014A, "E Enclosure Management Unit %d Access Critical" },
791 { 0x014B, "E Enclosure Management Unit %d Access OK" },
792 { 0x014C, "E Enclosure Management Unit %d Access Offline" },
793 /* Controller Events (0x0180 - 0x01FF) */
794 { 0x0181, "C Cache Write Back Error" },
795 { 0x0188, "C Battery Backup Unit Found" },
796 { 0x0189, "C Battery Backup Unit Charge Level Low" },
797 { 0x018A, "C Battery Backup Unit Charge Level OK" },
798 { 0x0193, "C Installation Aborted" },
799 { 0x0195, "C Battery Backup Unit Physically Removed" },
800 { 0x0196, "C Memory Error During Warm Boot" },
801 { 0x019E, "C Memory Soft ECC Error Corrected" },
802 { 0x019F, "C Memory Hard ECC Error Corrected" },
803 { 0x01A2, "C Battery Backup Unit Failed" },
804 { 0x01AB, "C Mirror Race Recovery Failed" },
805 { 0x01AC, "C Mirror Race on Critical Drive" },
806 /* Controller Internal Processor Events */
807 { 0x0380, "C Internal Controller Hung" },
808 { 0x0381, "C Internal Controller Firmware Breakpoint" },
809 { 0x0390, "C Internal Controller i960 Processor Specific Error" },
810 { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
811 { 0, "" }
814 static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
816 unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
817 int ev_idx = 0, ev_code;
818 unsigned char ev_type, *ev_msg;
819 struct Scsi_Host *shost = cs->host;
820 struct scsi_device *sdev;
821 struct scsi_sense_hdr sshdr = {0};
822 unsigned char sense_info[4];
823 unsigned char cmd_specific[4];
825 if (ev->ev_code == 0x1C) {
826 if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
827 memset(&sshdr, 0x0, sizeof(sshdr));
828 memset(sense_info, 0x0, sizeof(sense_info));
829 memset(cmd_specific, 0x0, sizeof(cmd_specific));
830 } else {
831 memcpy(sense_info, &ev->sense_data[3], 4);
832 memcpy(cmd_specific, &ev->sense_data[7], 4);
835 if (sshdr.sense_key == VENDOR_SPECIFIC &&
836 (sshdr.asc == 0x80 || sshdr.asc == 0x81))
837 ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
838 while (true) {
839 ev_code = myrs_ev_list[ev_idx].ev_code;
840 if (ev_code == ev->ev_code || ev_code == 0)
841 break;
842 ev_idx++;
844 ev_type = myrs_ev_list[ev_idx].ev_msg[0];
845 ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
846 if (ev_code == 0) {
847 shost_printk(KERN_WARNING, shost,
848 "Unknown Controller Event Code %04X\n",
849 ev->ev_code);
850 return;
852 switch (ev_type) {
853 case 'P':
854 sdev = scsi_device_lookup(shost, ev->channel,
855 ev->target, 0);
856 sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
857 ev->ev_seq, ev_msg);
858 if (sdev && sdev->hostdata &&
859 sdev->channel < cs->ctlr_info->physchan_present) {
860 struct myrs_pdev_info *pdev_info = sdev->hostdata;
862 switch (ev->ev_code) {
863 case 0x0001:
864 case 0x0007:
865 pdev_info->dev_state = MYRS_DEVICE_ONLINE;
866 break;
867 case 0x0002:
868 pdev_info->dev_state = MYRS_DEVICE_STANDBY;
869 break;
870 case 0x000C:
871 pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
872 break;
873 case 0x000E:
874 pdev_info->dev_state = MYRS_DEVICE_MISSING;
875 break;
876 case 0x000F:
877 pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
878 break;
881 break;
882 case 'L':
883 shost_printk(KERN_INFO, shost,
884 "event %d: Logical Drive %d %s\n",
885 ev->ev_seq, ev->lun, ev_msg);
886 cs->needs_update = true;
887 break;
888 case 'M':
889 shost_printk(KERN_INFO, shost,
890 "event %d: Logical Drive %d %s\n",
891 ev->ev_seq, ev->lun, ev_msg);
892 cs->needs_update = true;
893 break;
894 case 'S':
895 if (sshdr.sense_key == NO_SENSE ||
896 (sshdr.sense_key == NOT_READY &&
897 sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
898 sshdr.ascq == 0x02)))
899 break;
900 shost_printk(KERN_INFO, shost,
901 "event %d: Physical Device %d:%d %s\n",
902 ev->ev_seq, ev->channel, ev->target, ev_msg);
903 shost_printk(KERN_INFO, shost,
904 "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
905 ev->channel, ev->target,
906 sshdr.sense_key, sshdr.asc, sshdr.ascq);
907 shost_printk(KERN_INFO, shost,
908 "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
909 ev->channel, ev->target,
910 sense_info[0], sense_info[1],
911 sense_info[2], sense_info[3],
912 cmd_specific[0], cmd_specific[1],
913 cmd_specific[2], cmd_specific[3]);
914 break;
915 case 'E':
916 if (cs->disable_enc_msg)
917 break;
918 sprintf(msg_buf, ev_msg, ev->lun);
919 shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
920 ev->ev_seq, ev->target, msg_buf);
921 break;
922 case 'C':
923 shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
924 ev->ev_seq, ev_msg);
925 break;
926 default:
927 shost_printk(KERN_INFO, shost,
928 "event %d: Unknown Event Code %04X\n",
929 ev->ev_seq, ev->ev_code);
930 break;
935 * SCSI sysfs interface functions
937 static ssize_t raid_state_show(struct device *dev,
938 struct device_attribute *attr, char *buf)
940 struct scsi_device *sdev = to_scsi_device(dev);
941 struct myrs_hba *cs = shost_priv(sdev->host);
942 int ret;
944 if (!sdev->hostdata)
945 return snprintf(buf, 16, "Unknown\n");
947 if (sdev->channel >= cs->ctlr_info->physchan_present) {
948 struct myrs_ldev_info *ldev_info = sdev->hostdata;
949 const char *name;
951 name = myrs_devstate_name(ldev_info->dev_state);
952 if (name)
953 ret = snprintf(buf, 32, "%s\n", name);
954 else
955 ret = snprintf(buf, 32, "Invalid (%02X)\n",
956 ldev_info->dev_state);
957 } else {
958 struct myrs_pdev_info *pdev_info;
959 const char *name;
961 pdev_info = sdev->hostdata;
962 name = myrs_devstate_name(pdev_info->dev_state);
963 if (name)
964 ret = snprintf(buf, 32, "%s\n", name);
965 else
966 ret = snprintf(buf, 32, "Invalid (%02X)\n",
967 pdev_info->dev_state);
969 return ret;
972 static ssize_t raid_state_store(struct device *dev,
973 struct device_attribute *attr, const char *buf, size_t count)
975 struct scsi_device *sdev = to_scsi_device(dev);
976 struct myrs_hba *cs = shost_priv(sdev->host);
977 struct myrs_cmdblk *cmd_blk;
978 union myrs_cmd_mbox *mbox;
979 enum myrs_devstate new_state;
980 unsigned short ldev_num;
981 unsigned char status;
983 if (!strncmp(buf, "offline", 7) ||
984 !strncmp(buf, "kill", 4))
985 new_state = MYRS_DEVICE_OFFLINE;
986 else if (!strncmp(buf, "online", 6))
987 new_state = MYRS_DEVICE_ONLINE;
988 else if (!strncmp(buf, "standby", 7))
989 new_state = MYRS_DEVICE_STANDBY;
990 else
991 return -EINVAL;
993 if (sdev->channel < cs->ctlr_info->physchan_present) {
994 struct myrs_pdev_info *pdev_info = sdev->hostdata;
995 struct myrs_devmap *pdev_devmap =
996 (struct myrs_devmap *)&pdev_info->rsvd13;
998 if (pdev_info->dev_state == new_state) {
999 sdev_printk(KERN_INFO, sdev,
1000 "Device already in %s\n",
1001 myrs_devstate_name(new_state));
1002 return count;
1004 status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
1005 sdev->lun, pdev_devmap);
1006 if (status != MYRS_STATUS_SUCCESS)
1007 return -ENXIO;
1008 ldev_num = pdev_devmap->ldev_num;
1009 } else {
1010 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1012 if (ldev_info->dev_state == new_state) {
1013 sdev_printk(KERN_INFO, sdev,
1014 "Device already in %s\n",
1015 myrs_devstate_name(new_state));
1016 return count;
1018 ldev_num = ldev_info->ldev_num;
1020 mutex_lock(&cs->dcmd_mutex);
1021 cmd_blk = &cs->dcmd_blk;
1022 myrs_reset_cmd(cmd_blk);
1023 mbox = &cmd_blk->mbox;
1024 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1025 mbox->common.id = MYRS_DCMD_TAG;
1026 mbox->common.control.dma_ctrl_to_host = true;
1027 mbox->common.control.no_autosense = true;
1028 mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
1029 mbox->set_devstate.state = new_state;
1030 mbox->set_devstate.ldev.ldev_num = ldev_num;
1031 myrs_exec_cmd(cs, cmd_blk);
1032 status = cmd_blk->status;
1033 mutex_unlock(&cs->dcmd_mutex);
1034 if (status == MYRS_STATUS_SUCCESS) {
1035 if (sdev->channel < cs->ctlr_info->physchan_present) {
1036 struct myrs_pdev_info *pdev_info = sdev->hostdata;
1038 pdev_info->dev_state = new_state;
1039 } else {
1040 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1042 ldev_info->dev_state = new_state;
1044 sdev_printk(KERN_INFO, sdev,
1045 "Set device state to %s\n",
1046 myrs_devstate_name(new_state));
1047 return count;
1049 sdev_printk(KERN_INFO, sdev,
1050 "Failed to set device state to %s, status 0x%02x\n",
1051 myrs_devstate_name(new_state), status);
1052 return -EINVAL;
1054 static DEVICE_ATTR_RW(raid_state);
1056 static ssize_t raid_level_show(struct device *dev,
1057 struct device_attribute *attr, char *buf)
1059 struct scsi_device *sdev = to_scsi_device(dev);
1060 struct myrs_hba *cs = shost_priv(sdev->host);
1061 const char *name = NULL;
1063 if (!sdev->hostdata)
1064 return snprintf(buf, 16, "Unknown\n");
1066 if (sdev->channel >= cs->ctlr_info->physchan_present) {
1067 struct myrs_ldev_info *ldev_info;
1069 ldev_info = sdev->hostdata;
1070 name = myrs_raid_level_name(ldev_info->raid_level);
1071 if (!name)
1072 return snprintf(buf, 32, "Invalid (%02X)\n",
1073 ldev_info->dev_state);
1075 } else
1076 name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
1078 return snprintf(buf, 32, "%s\n", name);
1080 static DEVICE_ATTR_RO(raid_level);
1082 static ssize_t rebuild_show(struct device *dev,
1083 struct device_attribute *attr, char *buf)
1085 struct scsi_device *sdev = to_scsi_device(dev);
1086 struct myrs_hba *cs = shost_priv(sdev->host);
1087 struct myrs_ldev_info *ldev_info;
1088 unsigned short ldev_num;
1089 unsigned char status;
1091 if (sdev->channel < cs->ctlr_info->physchan_present)
1092 return snprintf(buf, 32, "physical device - not rebuilding\n");
1094 ldev_info = sdev->hostdata;
1095 ldev_num = ldev_info->ldev_num;
1096 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1097 if (status != MYRS_STATUS_SUCCESS) {
1098 sdev_printk(KERN_INFO, sdev,
1099 "Failed to get device information, status 0x%02x\n",
1100 status);
1101 return -EIO;
1103 if (ldev_info->rbld_active) {
1104 return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
1105 (size_t)ldev_info->rbld_lba,
1106 (size_t)ldev_info->cfg_devsize);
1107 } else
1108 return snprintf(buf, 32, "not rebuilding\n");
1111 static ssize_t rebuild_store(struct device *dev,
1112 struct device_attribute *attr, const char *buf, size_t count)
1114 struct scsi_device *sdev = to_scsi_device(dev);
1115 struct myrs_hba *cs = shost_priv(sdev->host);
1116 struct myrs_ldev_info *ldev_info;
1117 struct myrs_cmdblk *cmd_blk;
1118 union myrs_cmd_mbox *mbox;
1119 unsigned short ldev_num;
1120 unsigned char status;
1121 int rebuild, ret;
1123 if (sdev->channel < cs->ctlr_info->physchan_present)
1124 return -EINVAL;
1126 ldev_info = sdev->hostdata;
1127 if (!ldev_info)
1128 return -ENXIO;
1129 ldev_num = ldev_info->ldev_num;
1131 ret = kstrtoint(buf, 0, &rebuild);
1132 if (ret)
1133 return ret;
1135 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1136 if (status != MYRS_STATUS_SUCCESS) {
1137 sdev_printk(KERN_INFO, sdev,
1138 "Failed to get device information, status 0x%02x\n",
1139 status);
1140 return -EIO;
1143 if (rebuild && ldev_info->rbld_active) {
1144 sdev_printk(KERN_INFO, sdev,
1145 "Rebuild Not Initiated; already in progress\n");
1146 return -EALREADY;
1148 if (!rebuild && !ldev_info->rbld_active) {
1149 sdev_printk(KERN_INFO, sdev,
1150 "Rebuild Not Cancelled; no rebuild in progress\n");
1151 return count;
1154 mutex_lock(&cs->dcmd_mutex);
1155 cmd_blk = &cs->dcmd_blk;
1156 myrs_reset_cmd(cmd_blk);
1157 mbox = &cmd_blk->mbox;
1158 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1159 mbox->common.id = MYRS_DCMD_TAG;
1160 mbox->common.control.dma_ctrl_to_host = true;
1161 mbox->common.control.no_autosense = true;
1162 if (rebuild) {
1163 mbox->ldev_info.ldev.ldev_num = ldev_num;
1164 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
1165 } else {
1166 mbox->ldev_info.ldev.ldev_num = ldev_num;
1167 mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
1169 myrs_exec_cmd(cs, cmd_blk);
1170 status = cmd_blk->status;
1171 mutex_unlock(&cs->dcmd_mutex);
1172 if (status) {
1173 sdev_printk(KERN_INFO, sdev,
1174 "Rebuild Not %s, status 0x%02x\n",
1175 rebuild ? "Initiated" : "Cancelled", status);
1176 ret = -EIO;
1177 } else {
1178 sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1179 rebuild ? "Initiated" : "Cancelled");
1180 ret = count;
1183 return ret;
1185 static DEVICE_ATTR_RW(rebuild);
1187 static ssize_t consistency_check_show(struct device *dev,
1188 struct device_attribute *attr, char *buf)
1190 struct scsi_device *sdev = to_scsi_device(dev);
1191 struct myrs_hba *cs = shost_priv(sdev->host);
1192 struct myrs_ldev_info *ldev_info;
1193 unsigned short ldev_num;
1194 unsigned char status;
1196 if (sdev->channel < cs->ctlr_info->physchan_present)
1197 return snprintf(buf, 32, "physical device - not checking\n");
1199 ldev_info = sdev->hostdata;
1200 if (!ldev_info)
1201 return -ENXIO;
1202 ldev_num = ldev_info->ldev_num;
1203 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1204 if (ldev_info->cc_active)
1205 return snprintf(buf, 32, "checking block %zu of %zu\n",
1206 (size_t)ldev_info->cc_lba,
1207 (size_t)ldev_info->cfg_devsize);
1208 else
1209 return snprintf(buf, 32, "not checking\n");
1212 static ssize_t consistency_check_store(struct device *dev,
1213 struct device_attribute *attr, const char *buf, size_t count)
1215 struct scsi_device *sdev = to_scsi_device(dev);
1216 struct myrs_hba *cs = shost_priv(sdev->host);
1217 struct myrs_ldev_info *ldev_info;
1218 struct myrs_cmdblk *cmd_blk;
1219 union myrs_cmd_mbox *mbox;
1220 unsigned short ldev_num;
1221 unsigned char status;
1222 int check, ret;
1224 if (sdev->channel < cs->ctlr_info->physchan_present)
1225 return -EINVAL;
1227 ldev_info = sdev->hostdata;
1228 if (!ldev_info)
1229 return -ENXIO;
1230 ldev_num = ldev_info->ldev_num;
1232 ret = kstrtoint(buf, 0, &check);
1233 if (ret)
1234 return ret;
1236 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1237 if (status != MYRS_STATUS_SUCCESS) {
1238 sdev_printk(KERN_INFO, sdev,
1239 "Failed to get device information, status 0x%02x\n",
1240 status);
1241 return -EIO;
1243 if (check && ldev_info->cc_active) {
1244 sdev_printk(KERN_INFO, sdev,
1245 "Consistency Check Not Initiated; "
1246 "already in progress\n");
1247 return -EALREADY;
1249 if (!check && !ldev_info->cc_active) {
1250 sdev_printk(KERN_INFO, sdev,
1251 "Consistency Check Not Cancelled; "
1252 "check not in progress\n");
1253 return count;
1256 mutex_lock(&cs->dcmd_mutex);
1257 cmd_blk = &cs->dcmd_blk;
1258 myrs_reset_cmd(cmd_blk);
1259 mbox = &cmd_blk->mbox;
1260 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1261 mbox->common.id = MYRS_DCMD_TAG;
1262 mbox->common.control.dma_ctrl_to_host = true;
1263 mbox->common.control.no_autosense = true;
1264 if (check) {
1265 mbox->cc.ldev.ldev_num = ldev_num;
1266 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
1267 mbox->cc.restore_consistency = true;
1268 mbox->cc.initialized_area_only = false;
1269 } else {
1270 mbox->cc.ldev.ldev_num = ldev_num;
1271 mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
1273 myrs_exec_cmd(cs, cmd_blk);
1274 status = cmd_blk->status;
1275 mutex_unlock(&cs->dcmd_mutex);
1276 if (status != MYRS_STATUS_SUCCESS) {
1277 sdev_printk(KERN_INFO, sdev,
1278 "Consistency Check Not %s, status 0x%02x\n",
1279 check ? "Initiated" : "Cancelled", status);
1280 ret = -EIO;
1281 } else {
1282 sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
1283 check ? "Initiated" : "Cancelled");
1284 ret = count;
1287 return ret;
1289 static DEVICE_ATTR_RW(consistency_check);
1291 static struct device_attribute *myrs_sdev_attrs[] = {
1292 &dev_attr_consistency_check,
1293 &dev_attr_rebuild,
1294 &dev_attr_raid_state,
1295 &dev_attr_raid_level,
1296 NULL,
1299 static ssize_t serial_show(struct device *dev,
1300 struct device_attribute *attr, char *buf)
1302 struct Scsi_Host *shost = class_to_shost(dev);
1303 struct myrs_hba *cs = shost_priv(shost);
1304 char serial[17];
1306 memcpy(serial, cs->ctlr_info->serial_number, 16);
1307 serial[16] = '\0';
1308 return snprintf(buf, 16, "%s\n", serial);
1310 static DEVICE_ATTR_RO(serial);
1312 static ssize_t ctlr_num_show(struct device *dev,
1313 struct device_attribute *attr, char *buf)
1315 struct Scsi_Host *shost = class_to_shost(dev);
1316 struct myrs_hba *cs = shost_priv(shost);
1318 return snprintf(buf, 20, "%d\n", cs->host->host_no);
1320 static DEVICE_ATTR_RO(ctlr_num);
1322 static struct myrs_cpu_type_tbl {
1323 enum myrs_cpu_type type;
1324 char *name;
1325 } myrs_cpu_type_names[] = {
1326 { MYRS_CPUTYPE_i960CA, "i960CA" },
1327 { MYRS_CPUTYPE_i960RD, "i960RD" },
1328 { MYRS_CPUTYPE_i960RN, "i960RN" },
1329 { MYRS_CPUTYPE_i960RP, "i960RP" },
1330 { MYRS_CPUTYPE_NorthBay, "NorthBay" },
1331 { MYRS_CPUTYPE_StrongArm, "StrongARM" },
1332 { MYRS_CPUTYPE_i960RM, "i960RM" },
1335 static ssize_t processor_show(struct device *dev,
1336 struct device_attribute *attr, char *buf)
1338 struct Scsi_Host *shost = class_to_shost(dev);
1339 struct myrs_hba *cs = shost_priv(shost);
1340 struct myrs_cpu_type_tbl *tbl;
1341 const char *first_processor = NULL;
1342 const char *second_processor = NULL;
1343 struct myrs_ctlr_info *info = cs->ctlr_info;
1344 ssize_t ret;
1345 int i;
1347 if (info->cpu[0].cpu_count) {
1348 tbl = myrs_cpu_type_names;
1349 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1350 if (tbl[i].type == info->cpu[0].cpu_type) {
1351 first_processor = tbl[i].name;
1352 break;
1356 if (info->cpu[1].cpu_count) {
1357 tbl = myrs_cpu_type_names;
1358 for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1359 if (tbl[i].type == info->cpu[1].cpu_type) {
1360 second_processor = tbl[i].name;
1361 break;
1365 if (first_processor && second_processor)
1366 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
1367 "2: %s (%s, %d cpus)\n",
1368 info->cpu[0].cpu_name,
1369 first_processor, info->cpu[0].cpu_count,
1370 info->cpu[1].cpu_name,
1371 second_processor, info->cpu[1].cpu_count);
1372 else if (first_processor && !second_processor)
1373 ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
1374 info->cpu[0].cpu_name,
1375 first_processor, info->cpu[0].cpu_count);
1376 else if (!first_processor && second_processor)
1377 ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
1378 info->cpu[1].cpu_name,
1379 second_processor, info->cpu[1].cpu_count);
1380 else
1381 ret = snprintf(buf, 64, "1: absent\n2: absent\n");
1383 return ret;
1385 static DEVICE_ATTR_RO(processor);
1387 static ssize_t model_show(struct device *dev,
1388 struct device_attribute *attr, char *buf)
1390 struct Scsi_Host *shost = class_to_shost(dev);
1391 struct myrs_hba *cs = shost_priv(shost);
1393 return snprintf(buf, 28, "%s\n", cs->model_name);
1395 static DEVICE_ATTR_RO(model);
1397 static ssize_t ctlr_type_show(struct device *dev,
1398 struct device_attribute *attr, char *buf)
1400 struct Scsi_Host *shost = class_to_shost(dev);
1401 struct myrs_hba *cs = shost_priv(shost);
1403 return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
1405 static DEVICE_ATTR_RO(ctlr_type);
1407 static ssize_t cache_size_show(struct device *dev,
1408 struct device_attribute *attr, char *buf)
1410 struct Scsi_Host *shost = class_to_shost(dev);
1411 struct myrs_hba *cs = shost_priv(shost);
1413 return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
1415 static DEVICE_ATTR_RO(cache_size);
1417 static ssize_t firmware_show(struct device *dev,
1418 struct device_attribute *attr, char *buf)
1420 struct Scsi_Host *shost = class_to_shost(dev);
1421 struct myrs_hba *cs = shost_priv(shost);
1423 return snprintf(buf, 16, "%d.%02d-%02d\n",
1424 cs->ctlr_info->fw_major_version,
1425 cs->ctlr_info->fw_minor_version,
1426 cs->ctlr_info->fw_turn_number);
1428 static DEVICE_ATTR_RO(firmware);
1430 static ssize_t discovery_store(struct device *dev,
1431 struct device_attribute *attr, const char *buf, size_t count)
1433 struct Scsi_Host *shost = class_to_shost(dev);
1434 struct myrs_hba *cs = shost_priv(shost);
1435 struct myrs_cmdblk *cmd_blk;
1436 union myrs_cmd_mbox *mbox;
1437 unsigned char status;
1439 mutex_lock(&cs->dcmd_mutex);
1440 cmd_blk = &cs->dcmd_blk;
1441 myrs_reset_cmd(cmd_blk);
1442 mbox = &cmd_blk->mbox;
1443 mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1444 mbox->common.id = MYRS_DCMD_TAG;
1445 mbox->common.control.dma_ctrl_to_host = true;
1446 mbox->common.control.no_autosense = true;
1447 mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
1448 myrs_exec_cmd(cs, cmd_blk);
1449 status = cmd_blk->status;
1450 mutex_unlock(&cs->dcmd_mutex);
1451 if (status != MYRS_STATUS_SUCCESS) {
1452 shost_printk(KERN_INFO, shost,
1453 "Discovery Not Initiated, status %02X\n",
1454 status);
1455 return -EINVAL;
1457 shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
1458 cs->next_evseq = 0;
1459 cs->needs_update = true;
1460 queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
1461 flush_delayed_work(&cs->monitor_work);
1462 shost_printk(KERN_INFO, shost, "Discovery Completed\n");
1464 return count;
1466 static DEVICE_ATTR_WO(discovery);
1468 static ssize_t flush_cache_store(struct device *dev,
1469 struct device_attribute *attr, const char *buf, size_t count)
1471 struct Scsi_Host *shost = class_to_shost(dev);
1472 struct myrs_hba *cs = shost_priv(shost);
1473 unsigned char status;
1475 status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
1476 MYRS_RAID_CONTROLLER);
1477 if (status == MYRS_STATUS_SUCCESS) {
1478 shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
1479 return count;
1481 shost_printk(KERN_INFO, shost,
1482 "Cache Flush failed, status 0x%02x\n", status);
1483 return -EIO;
1485 static DEVICE_ATTR_WO(flush_cache);
1487 static ssize_t disable_enclosure_messages_show(struct device *dev,
1488 struct device_attribute *attr, char *buf)
1490 struct Scsi_Host *shost = class_to_shost(dev);
1491 struct myrs_hba *cs = shost_priv(shost);
1493 return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
1496 static ssize_t disable_enclosure_messages_store(struct device *dev,
1497 struct device_attribute *attr, const char *buf, size_t count)
1499 struct scsi_device *sdev = to_scsi_device(dev);
1500 struct myrs_hba *cs = shost_priv(sdev->host);
1501 int value, ret;
1503 ret = kstrtoint(buf, 0, &value);
1504 if (ret)
1505 return ret;
1507 if (value > 2)
1508 return -EINVAL;
1510 cs->disable_enc_msg = value;
1511 return count;
1513 static DEVICE_ATTR_RW(disable_enclosure_messages);
1515 static struct device_attribute *myrs_shost_attrs[] = {
1516 &dev_attr_serial,
1517 &dev_attr_ctlr_num,
1518 &dev_attr_processor,
1519 &dev_attr_model,
1520 &dev_attr_ctlr_type,
1521 &dev_attr_cache_size,
1522 &dev_attr_firmware,
1523 &dev_attr_discovery,
1524 &dev_attr_flush_cache,
1525 &dev_attr_disable_enclosure_messages,
1526 NULL,
1530 * SCSI midlayer interface
1532 int myrs_host_reset(struct scsi_cmnd *scmd)
1534 struct Scsi_Host *shost = scmd->device->host;
1535 struct myrs_hba *cs = shost_priv(shost);
1537 cs->reset(cs->io_base);
1538 return SUCCESS;
1541 static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
1542 struct myrs_ldev_info *ldev_info)
1544 unsigned char modes[32], *mode_pg;
1545 bool dbd;
1546 size_t mode_len;
1548 dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1549 if (dbd) {
1550 mode_len = 24;
1551 mode_pg = &modes[4];
1552 } else {
1553 mode_len = 32;
1554 mode_pg = &modes[12];
1556 memset(modes, 0, sizeof(modes));
1557 modes[0] = mode_len - 1;
1558 modes[2] = 0x10; /* Enable FUA */
1559 if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
1560 modes[2] |= 0x80;
1561 if (!dbd) {
1562 unsigned char *block_desc = &modes[4];
1564 modes[3] = 8;
1565 put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
1566 put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
1568 mode_pg[0] = 0x08;
1569 mode_pg[1] = 0x12;
1570 if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
1571 mode_pg[2] |= 0x01;
1572 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1573 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1574 mode_pg[2] |= 0x04;
1575 if (ldev_info->cacheline_size) {
1576 mode_pg[2] |= 0x08;
1577 put_unaligned_be16(1 << ldev_info->cacheline_size,
1578 &mode_pg[14]);
1581 scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1584 static int myrs_queuecommand(struct Scsi_Host *shost,
1585 struct scsi_cmnd *scmd)
1587 struct myrs_hba *cs = shost_priv(shost);
1588 struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1589 union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
1590 struct scsi_device *sdev = scmd->device;
1591 union myrs_sgl *hw_sge;
1592 dma_addr_t sense_addr;
1593 struct scatterlist *sgl;
1594 unsigned long flags, timeout;
1595 int nsge;
1597 if (!scmd->device->hostdata) {
1598 scmd->result = (DID_NO_CONNECT << 16);
1599 scmd->scsi_done(scmd);
1600 return 0;
1603 switch (scmd->cmnd[0]) {
1604 case REPORT_LUNS:
1605 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
1606 0x20, 0x0);
1607 scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1608 scmd->scsi_done(scmd);
1609 return 0;
1610 case MODE_SENSE:
1611 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1612 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1614 if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1615 (scmd->cmnd[2] & 0x3F) != 0x08) {
1616 /* Illegal request, invalid field in CDB */
1617 scsi_build_sense_buffer(0, scmd->sense_buffer,
1618 ILLEGAL_REQUEST, 0x24, 0);
1619 scmd->result = (DRIVER_SENSE << 24) |
1620 SAM_STAT_CHECK_CONDITION;
1621 } else {
1622 myrs_mode_sense(cs, scmd, ldev_info);
1623 scmd->result = (DID_OK << 16);
1625 scmd->scsi_done(scmd);
1626 return 0;
1628 break;
1631 myrs_reset_cmd(cmd_blk);
1632 cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
1633 &sense_addr);
1634 if (!cmd_blk->sense)
1635 return SCSI_MLQUEUE_HOST_BUSY;
1636 cmd_blk->sense_addr = sense_addr;
1638 timeout = scmd->request->timeout;
1639 if (scmd->cmd_len <= 10) {
1640 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1641 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1643 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
1644 mbox->SCSI_10.pdev.lun = ldev_info->lun;
1645 mbox->SCSI_10.pdev.target = ldev_info->target;
1646 mbox->SCSI_10.pdev.channel = ldev_info->channel;
1647 mbox->SCSI_10.pdev.ctlr = 0;
1648 } else {
1649 mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
1650 mbox->SCSI_10.pdev.lun = sdev->lun;
1651 mbox->SCSI_10.pdev.target = sdev->id;
1652 mbox->SCSI_10.pdev.channel = sdev->channel;
1654 mbox->SCSI_10.id = scmd->request->tag + 3;
1655 mbox->SCSI_10.control.dma_ctrl_to_host =
1656 (scmd->sc_data_direction == DMA_FROM_DEVICE);
1657 if (scmd->request->cmd_flags & REQ_FUA)
1658 mbox->SCSI_10.control.fua = true;
1659 mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
1660 mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
1661 mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
1662 mbox->SCSI_10.cdb_len = scmd->cmd_len;
1663 if (timeout > 60) {
1664 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1665 mbox->SCSI_10.tmo.tmo_val = timeout / 60;
1666 } else {
1667 mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1668 mbox->SCSI_10.tmo.tmo_val = timeout;
1670 memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
1671 hw_sge = &mbox->SCSI_10.dma_addr;
1672 cmd_blk->dcdb = NULL;
1673 } else {
1674 dma_addr_t dcdb_dma;
1676 cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
1677 &dcdb_dma);
1678 if (!cmd_blk->dcdb) {
1679 dma_pool_free(cs->sense_pool, cmd_blk->sense,
1680 cmd_blk->sense_addr);
1681 cmd_blk->sense = NULL;
1682 cmd_blk->sense_addr = 0;
1683 return SCSI_MLQUEUE_HOST_BUSY;
1685 cmd_blk->dcdb_dma = dcdb_dma;
1686 if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1687 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1689 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
1690 mbox->SCSI_255.pdev.lun = ldev_info->lun;
1691 mbox->SCSI_255.pdev.target = ldev_info->target;
1692 mbox->SCSI_255.pdev.channel = ldev_info->channel;
1693 mbox->SCSI_255.pdev.ctlr = 0;
1694 } else {
1695 mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
1696 mbox->SCSI_255.pdev.lun = sdev->lun;
1697 mbox->SCSI_255.pdev.target = sdev->id;
1698 mbox->SCSI_255.pdev.channel = sdev->channel;
1700 mbox->SCSI_255.id = scmd->request->tag + 3;
1701 mbox->SCSI_255.control.dma_ctrl_to_host =
1702 (scmd->sc_data_direction == DMA_FROM_DEVICE);
1703 if (scmd->request->cmd_flags & REQ_FUA)
1704 mbox->SCSI_255.control.fua = true;
1705 mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
1706 mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
1707 mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
1708 mbox->SCSI_255.cdb_len = scmd->cmd_len;
1709 mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
1710 if (timeout > 60) {
1711 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1712 mbox->SCSI_255.tmo.tmo_val = timeout / 60;
1713 } else {
1714 mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1715 mbox->SCSI_255.tmo.tmo_val = timeout;
1717 memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
1718 hw_sge = &mbox->SCSI_255.dma_addr;
1720 if (scmd->sc_data_direction == DMA_NONE)
1721 goto submit;
1722 nsge = scsi_dma_map(scmd);
1723 if (nsge == 1) {
1724 sgl = scsi_sglist(scmd);
1725 hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
1726 hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
1727 } else {
1728 struct myrs_sge *hw_sgl;
1729 dma_addr_t hw_sgl_addr;
1730 int i;
1732 if (nsge > 2) {
1733 hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
1734 &hw_sgl_addr);
1735 if (WARN_ON(!hw_sgl)) {
1736 if (cmd_blk->dcdb) {
1737 dma_pool_free(cs->dcdb_pool,
1738 cmd_blk->dcdb,
1739 cmd_blk->dcdb_dma);
1740 cmd_blk->dcdb = NULL;
1741 cmd_blk->dcdb_dma = 0;
1743 dma_pool_free(cs->sense_pool,
1744 cmd_blk->sense,
1745 cmd_blk->sense_addr);
1746 cmd_blk->sense = NULL;
1747 cmd_blk->sense_addr = 0;
1748 return SCSI_MLQUEUE_HOST_BUSY;
1750 cmd_blk->sgl = hw_sgl;
1751 cmd_blk->sgl_addr = hw_sgl_addr;
1752 if (scmd->cmd_len <= 10)
1753 mbox->SCSI_10.control.add_sge_mem = true;
1754 else
1755 mbox->SCSI_255.control.add_sge_mem = true;
1756 hw_sge->ext.sge0_len = nsge;
1757 hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
1758 } else
1759 hw_sgl = hw_sge->sge;
1761 scsi_for_each_sg(scmd, sgl, nsge, i) {
1762 if (WARN_ON(!hw_sgl)) {
1763 scsi_dma_unmap(scmd);
1764 scmd->result = (DID_ERROR << 16);
1765 scmd->scsi_done(scmd);
1766 return 0;
1768 hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
1769 hw_sgl->sge_count = (u64)sg_dma_len(sgl);
1770 hw_sgl++;
1773 submit:
1774 spin_lock_irqsave(&cs->queue_lock, flags);
1775 myrs_qcmd(cs, cmd_blk);
1776 spin_unlock_irqrestore(&cs->queue_lock, flags);
1778 return 0;
1781 static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
1782 struct scsi_device *sdev)
1784 unsigned short ldev_num;
1785 unsigned int chan_offset =
1786 sdev->channel - cs->ctlr_info->physchan_present;
1788 ldev_num = sdev->id + chan_offset * sdev->host->max_id;
1790 return ldev_num;
1793 static int myrs_slave_alloc(struct scsi_device *sdev)
1795 struct myrs_hba *cs = shost_priv(sdev->host);
1796 unsigned char status;
1798 if (sdev->channel > sdev->host->max_channel)
1799 return 0;
1801 if (sdev->channel >= cs->ctlr_info->physchan_present) {
1802 struct myrs_ldev_info *ldev_info;
1803 unsigned short ldev_num;
1805 if (sdev->lun > 0)
1806 return -ENXIO;
1808 ldev_num = myrs_translate_ldev(cs, sdev);
1810 ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
1811 if (!ldev_info)
1812 return -ENOMEM;
1814 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1815 if (status != MYRS_STATUS_SUCCESS) {
1816 sdev->hostdata = NULL;
1817 kfree(ldev_info);
1818 } else {
1819 enum raid_level level;
1821 dev_dbg(&sdev->sdev_gendev,
1822 "Logical device mapping %d:%d:%d -> %d\n",
1823 ldev_info->channel, ldev_info->target,
1824 ldev_info->lun, ldev_info->ldev_num);
1826 sdev->hostdata = ldev_info;
1827 switch (ldev_info->raid_level) {
1828 case MYRS_RAID_LEVEL0:
1829 level = RAID_LEVEL_LINEAR;
1830 break;
1831 case MYRS_RAID_LEVEL1:
1832 level = RAID_LEVEL_1;
1833 break;
1834 case MYRS_RAID_LEVEL3:
1835 case MYRS_RAID_LEVEL3F:
1836 case MYRS_RAID_LEVEL3L:
1837 level = RAID_LEVEL_3;
1838 break;
1839 case MYRS_RAID_LEVEL5:
1840 case MYRS_RAID_LEVEL5L:
1841 level = RAID_LEVEL_5;
1842 break;
1843 case MYRS_RAID_LEVEL6:
1844 level = RAID_LEVEL_6;
1845 break;
1846 case MYRS_RAID_LEVELE:
1847 case MYRS_RAID_NEWSPAN:
1848 case MYRS_RAID_SPAN:
1849 level = RAID_LEVEL_LINEAR;
1850 break;
1851 case MYRS_RAID_JBOD:
1852 level = RAID_LEVEL_JBOD;
1853 break;
1854 default:
1855 level = RAID_LEVEL_UNKNOWN;
1856 break;
1858 raid_set_level(myrs_raid_template,
1859 &sdev->sdev_gendev, level);
1860 if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
1861 const char *name;
1863 name = myrs_devstate_name(ldev_info->dev_state);
1864 sdev_printk(KERN_DEBUG, sdev,
1865 "logical device in state %s\n",
1866 name ? name : "Invalid");
1869 } else {
1870 struct myrs_pdev_info *pdev_info;
1872 pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1873 if (!pdev_info)
1874 return -ENOMEM;
1876 status = myrs_get_pdev_info(cs, sdev->channel,
1877 sdev->id, sdev->lun,
1878 pdev_info);
1879 if (status != MYRS_STATUS_SUCCESS) {
1880 sdev->hostdata = NULL;
1881 kfree(pdev_info);
1882 return -ENXIO;
1884 sdev->hostdata = pdev_info;
1886 return 0;
1889 static int myrs_slave_configure(struct scsi_device *sdev)
1891 struct myrs_hba *cs = shost_priv(sdev->host);
1892 struct myrs_ldev_info *ldev_info;
1894 if (sdev->channel > sdev->host->max_channel)
1895 return -ENXIO;
1897 if (sdev->channel < cs->ctlr_info->physchan_present) {
1898 /* Skip HBA device */
1899 if (sdev->type == TYPE_RAID)
1900 return -ENXIO;
1901 sdev->no_uld_attach = 1;
1902 return 0;
1904 if (sdev->lun != 0)
1905 return -ENXIO;
1907 ldev_info = sdev->hostdata;
1908 if (!ldev_info)
1909 return -ENXIO;
1910 if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1911 ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1912 sdev->wce_default_on = 1;
1913 sdev->tagged_supported = 1;
1914 return 0;
1917 static void myrs_slave_destroy(struct scsi_device *sdev)
1919 kfree(sdev->hostdata);
1922 struct scsi_host_template myrs_template = {
1923 .module = THIS_MODULE,
1924 .name = "DAC960",
1925 .proc_name = "myrs",
1926 .queuecommand = myrs_queuecommand,
1927 .eh_host_reset_handler = myrs_host_reset,
1928 .slave_alloc = myrs_slave_alloc,
1929 .slave_configure = myrs_slave_configure,
1930 .slave_destroy = myrs_slave_destroy,
1931 .cmd_size = sizeof(struct myrs_cmdblk),
1932 .shost_attrs = myrs_shost_attrs,
1933 .sdev_attrs = myrs_sdev_attrs,
1934 .this_id = -1,
1937 static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
1938 const struct pci_device_id *entry)
1940 struct Scsi_Host *shost;
1941 struct myrs_hba *cs;
1943 shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
1944 if (!shost)
1945 return NULL;
1947 shost->max_cmd_len = 16;
1948 shost->max_lun = 256;
1949 cs = shost_priv(shost);
1950 mutex_init(&cs->dcmd_mutex);
1951 mutex_init(&cs->cinfo_mutex);
1952 cs->host = shost;
1954 return cs;
1958 * RAID template functions
1962 * myrs_is_raid - return boolean indicating device is raid volume
1963 * @dev the device struct object
1965 static int
1966 myrs_is_raid(struct device *dev)
1968 struct scsi_device *sdev = to_scsi_device(dev);
1969 struct myrs_hba *cs = shost_priv(sdev->host);
1971 return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
1975 * myrs_get_resync - get raid volume resync percent complete
1976 * @dev the device struct object
1978 static void
1979 myrs_get_resync(struct device *dev)
1981 struct scsi_device *sdev = to_scsi_device(dev);
1982 struct myrs_hba *cs = shost_priv(sdev->host);
1983 struct myrs_ldev_info *ldev_info = sdev->hostdata;
1984 u64 percent_complete = 0;
1985 u8 status;
1987 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
1988 return;
1989 if (ldev_info->rbld_active) {
1990 unsigned short ldev_num = ldev_info->ldev_num;
1992 status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1993 percent_complete = ldev_info->rbld_lba * 100;
1994 do_div(percent_complete, ldev_info->cfg_devsize);
1996 raid_set_resync(myrs_raid_template, dev, percent_complete);
2000 * myrs_get_state - get raid volume status
2001 * @dev the device struct object
2003 static void
2004 myrs_get_state(struct device *dev)
2006 struct scsi_device *sdev = to_scsi_device(dev);
2007 struct myrs_hba *cs = shost_priv(sdev->host);
2008 struct myrs_ldev_info *ldev_info = sdev->hostdata;
2009 enum raid_state state = RAID_STATE_UNKNOWN;
2011 if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
2012 state = RAID_STATE_UNKNOWN;
2013 else {
2014 switch (ldev_info->dev_state) {
2015 case MYRS_DEVICE_ONLINE:
2016 state = RAID_STATE_ACTIVE;
2017 break;
2018 case MYRS_DEVICE_SUSPECTED_CRITICAL:
2019 case MYRS_DEVICE_CRITICAL:
2020 state = RAID_STATE_DEGRADED;
2021 break;
2022 case MYRS_DEVICE_REBUILD:
2023 state = RAID_STATE_RESYNCING;
2024 break;
2025 case MYRS_DEVICE_UNCONFIGURED:
2026 case MYRS_DEVICE_INVALID_STATE:
2027 state = RAID_STATE_UNKNOWN;
2028 break;
2029 default:
2030 state = RAID_STATE_OFFLINE;
2033 raid_set_state(myrs_raid_template, dev, state);
2036 struct raid_function_template myrs_raid_functions = {
2037 .cookie = &myrs_template,
2038 .is_raid = myrs_is_raid,
2039 .get_resync = myrs_get_resync,
2040 .get_state = myrs_get_state,
2044 * PCI interface functions
2046 void myrs_flush_cache(struct myrs_hba *cs)
2048 myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
2051 static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
2052 struct scsi_cmnd *scmd)
2054 unsigned char status;
2056 if (!cmd_blk)
2057 return;
2059 scsi_dma_unmap(scmd);
2060 status = cmd_blk->status;
2061 if (cmd_blk->sense) {
2062 if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
2063 unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
2065 if (sense_len > cmd_blk->sense_len)
2066 sense_len = cmd_blk->sense_len;
2067 memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
2069 dma_pool_free(cs->sense_pool, cmd_blk->sense,
2070 cmd_blk->sense_addr);
2071 cmd_blk->sense = NULL;
2072 cmd_blk->sense_addr = 0;
2074 if (cmd_blk->dcdb) {
2075 dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
2076 cmd_blk->dcdb_dma);
2077 cmd_blk->dcdb = NULL;
2078 cmd_blk->dcdb_dma = 0;
2080 if (cmd_blk->sgl) {
2081 dma_pool_free(cs->sg_pool, cmd_blk->sgl,
2082 cmd_blk->sgl_addr);
2083 cmd_blk->sgl = NULL;
2084 cmd_blk->sgl_addr = 0;
2086 if (cmd_blk->residual)
2087 scsi_set_resid(scmd, cmd_blk->residual);
2088 if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
2089 status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
2090 scmd->result = (DID_BAD_TARGET << 16);
2091 else
2092 scmd->result = (DID_OK << 16) | status;
2093 scmd->scsi_done(scmd);
2096 static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
2098 if (!cmd_blk)
2099 return;
2101 if (cmd_blk->complete) {
2102 complete(cmd_blk->complete);
2103 cmd_blk->complete = NULL;
2107 static void myrs_monitor(struct work_struct *work)
2109 struct myrs_hba *cs = container_of(work, struct myrs_hba,
2110 monitor_work.work);
2111 struct Scsi_Host *shost = cs->host;
2112 struct myrs_ctlr_info *info = cs->ctlr_info;
2113 unsigned int epoch = cs->fwstat_buf->epoch;
2114 unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
2115 unsigned char status;
2117 dev_dbg(&shost->shost_gendev, "monitor tick\n");
2119 status = myrs_get_fwstatus(cs);
2121 if (cs->needs_update) {
2122 cs->needs_update = false;
2123 mutex_lock(&cs->cinfo_mutex);
2124 status = myrs_get_ctlr_info(cs);
2125 mutex_unlock(&cs->cinfo_mutex);
2127 if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
2128 status = myrs_get_event(cs, cs->next_evseq,
2129 cs->event_buf);
2130 if (status == MYRS_STATUS_SUCCESS) {
2131 myrs_log_event(cs, cs->event_buf);
2132 cs->next_evseq++;
2133 interval = 1;
2137 if (time_after(jiffies, cs->secondary_monitor_time
2138 + MYRS_SECONDARY_MONITOR_INTERVAL))
2139 cs->secondary_monitor_time = jiffies;
2141 if (info->bg_init_active +
2142 info->ldev_init_active +
2143 info->pdev_init_active +
2144 info->cc_active +
2145 info->rbld_active +
2146 info->exp_active != 0) {
2147 struct scsi_device *sdev;
2149 shost_for_each_device(sdev, shost) {
2150 struct myrs_ldev_info *ldev_info;
2151 int ldev_num;
2153 if (sdev->channel < info->physchan_present)
2154 continue;
2155 ldev_info = sdev->hostdata;
2156 if (!ldev_info)
2157 continue;
2158 ldev_num = ldev_info->ldev_num;
2159 myrs_get_ldev_info(cs, ldev_num, ldev_info);
2161 cs->needs_update = true;
2163 if (epoch == cs->epoch &&
2164 cs->fwstat_buf->next_evseq == cs->next_evseq &&
2165 (cs->needs_update == false ||
2166 time_before(jiffies, cs->primary_monitor_time
2167 + MYRS_PRIMARY_MONITOR_INTERVAL))) {
2168 interval = MYRS_SECONDARY_MONITOR_INTERVAL;
2171 if (interval > 1)
2172 cs->primary_monitor_time = jiffies;
2173 queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
2176 static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
2178 struct Scsi_Host *shost = cs->host;
2179 size_t elem_size, elem_align;
2181 elem_align = sizeof(struct myrs_sge);
2182 elem_size = shost->sg_tablesize * elem_align;
2183 cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
2184 elem_size, elem_align, 0);
2185 if (cs->sg_pool == NULL) {
2186 shost_printk(KERN_ERR, shost,
2187 "Failed to allocate SG pool\n");
2188 return false;
2191 cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
2192 MYRS_SENSE_SIZE, sizeof(int), 0);
2193 if (cs->sense_pool == NULL) {
2194 dma_pool_destroy(cs->sg_pool);
2195 cs->sg_pool = NULL;
2196 shost_printk(KERN_ERR, shost,
2197 "Failed to allocate sense data pool\n");
2198 return false;
2201 cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
2202 MYRS_DCDB_SIZE,
2203 sizeof(unsigned char), 0);
2204 if (!cs->dcdb_pool) {
2205 dma_pool_destroy(cs->sg_pool);
2206 cs->sg_pool = NULL;
2207 dma_pool_destroy(cs->sense_pool);
2208 cs->sense_pool = NULL;
2209 shost_printk(KERN_ERR, shost,
2210 "Failed to allocate DCDB pool\n");
2211 return false;
2214 snprintf(cs->work_q_name, sizeof(cs->work_q_name),
2215 "myrs_wq_%d", shost->host_no);
2216 cs->work_q = create_singlethread_workqueue(cs->work_q_name);
2217 if (!cs->work_q) {
2218 dma_pool_destroy(cs->dcdb_pool);
2219 cs->dcdb_pool = NULL;
2220 dma_pool_destroy(cs->sg_pool);
2221 cs->sg_pool = NULL;
2222 dma_pool_destroy(cs->sense_pool);
2223 cs->sense_pool = NULL;
2224 shost_printk(KERN_ERR, shost,
2225 "Failed to create workqueue\n");
2226 return false;
2229 /* Initialize the Monitoring Timer. */
2230 INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
2231 queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
2233 return true;
2236 static void myrs_destroy_mempools(struct myrs_hba *cs)
2238 cancel_delayed_work_sync(&cs->monitor_work);
2239 destroy_workqueue(cs->work_q);
2241 dma_pool_destroy(cs->sg_pool);
2242 dma_pool_destroy(cs->dcdb_pool);
2243 dma_pool_destroy(cs->sense_pool);
2246 static void myrs_unmap(struct myrs_hba *cs)
2248 kfree(cs->event_buf);
2249 kfree(cs->ctlr_info);
2250 if (cs->fwstat_buf) {
2251 dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
2252 cs->fwstat_buf, cs->fwstat_addr);
2253 cs->fwstat_buf = NULL;
2255 if (cs->first_stat_mbox) {
2256 dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
2257 cs->first_stat_mbox, cs->stat_mbox_addr);
2258 cs->first_stat_mbox = NULL;
2260 if (cs->first_cmd_mbox) {
2261 dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
2262 cs->first_cmd_mbox, cs->cmd_mbox_addr);
2263 cs->first_cmd_mbox = NULL;
2267 static void myrs_cleanup(struct myrs_hba *cs)
2269 struct pci_dev *pdev = cs->pdev;
2271 /* Free the memory mailbox, status, and related structures */
2272 myrs_unmap(cs);
2274 if (cs->mmio_base) {
2275 cs->disable_intr(cs);
2276 iounmap(cs->mmio_base);
2278 if (cs->irq)
2279 free_irq(cs->irq, cs);
2280 if (cs->io_addr)
2281 release_region(cs->io_addr, 0x80);
2282 iounmap(cs->mmio_base);
2283 pci_set_drvdata(pdev, NULL);
2284 pci_disable_device(pdev);
2285 scsi_host_put(cs->host);
2288 static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
2289 const struct pci_device_id *entry)
2291 struct myrs_privdata *privdata =
2292 (struct myrs_privdata *)entry->driver_data;
2293 irq_handler_t irq_handler = privdata->irq_handler;
2294 unsigned int mmio_size = privdata->mmio_size;
2295 struct myrs_hba *cs = NULL;
2297 cs = myrs_alloc_host(pdev, entry);
2298 if (!cs) {
2299 dev_err(&pdev->dev, "Unable to allocate Controller\n");
2300 return NULL;
2302 cs->pdev = pdev;
2304 if (pci_enable_device(pdev))
2305 goto Failure;
2307 cs->pci_addr = pci_resource_start(pdev, 0);
2309 pci_set_drvdata(pdev, cs);
2310 spin_lock_init(&cs->queue_lock);
2311 /* Map the Controller Register Window. */
2312 if (mmio_size < PAGE_SIZE)
2313 mmio_size = PAGE_SIZE;
2314 cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
2315 if (cs->mmio_base == NULL) {
2316 dev_err(&pdev->dev,
2317 "Unable to map Controller Register Window\n");
2318 goto Failure;
2321 cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
2322 if (privdata->hw_init(pdev, cs, cs->io_base))
2323 goto Failure;
2325 /* Acquire shared access to the IRQ Channel. */
2326 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
2327 dev_err(&pdev->dev,
2328 "Unable to acquire IRQ Channel %d\n", pdev->irq);
2329 goto Failure;
2331 cs->irq = pdev->irq;
2332 return cs;
2334 Failure:
2335 dev_err(&pdev->dev,
2336 "Failed to initialize Controller\n");
2337 myrs_cleanup(cs);
2338 return NULL;
2342 * myrs_err_status reports Controller BIOS Messages passed through
2343 the Error Status Register when the driver performs the BIOS handshaking.
2344 It returns true for fatal errors and false otherwise.
2347 static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
2348 unsigned char parm0, unsigned char parm1)
2350 struct pci_dev *pdev = cs->pdev;
2352 switch (status) {
2353 case 0x00:
2354 dev_info(&pdev->dev,
2355 "Physical Device %d:%d Not Responding\n",
2356 parm1, parm0);
2357 break;
2358 case 0x08:
2359 dev_notice(&pdev->dev, "Spinning Up Drives\n");
2360 break;
2361 case 0x30:
2362 dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2363 break;
2364 case 0x60:
2365 dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2366 break;
2367 case 0x70:
2368 dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2369 break;
2370 case 0x90:
2371 dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2372 parm1, parm0);
2373 break;
2374 case 0xA0:
2375 dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2376 break;
2377 case 0xB0:
2378 dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2379 break;
2380 case 0xD0:
2381 dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2382 break;
2383 case 0xF0:
2384 dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2385 return true;
2386 default:
2387 dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2388 status);
2389 return true;
2391 return false;
2395 * Hardware-specific functions
2399 * DAC960 GEM Series Controllers.
2402 static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
2404 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2406 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2409 static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
2411 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
2413 writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
2416 static inline void DAC960_GEM_gen_intr(void __iomem *base)
2418 __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
2420 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2423 static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
2425 __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
2427 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2430 static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
2432 __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2434 writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2437 static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
2439 __le32 val;
2441 val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2442 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
2445 static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
2447 __le32 val;
2449 val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2450 return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
2453 static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
2455 __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
2457 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2460 static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
2462 __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
2464 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2467 static inline void DAC960_GEM_ack_intr(void __iomem *base)
2469 __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
2470 DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
2472 writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2475 static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
2477 __le32 val;
2479 val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
2480 return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
2483 static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
2485 __le32 val;
2487 val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
2488 return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
2491 static inline void DAC960_GEM_enable_intr(void __iomem *base)
2493 __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
2494 DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
2495 writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
2498 static inline void DAC960_GEM_disable_intr(void __iomem *base)
2500 __le32 val = 0;
2502 writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
2505 static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
2507 __le32 val;
2509 val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
2510 return !((le32_to_cpu(val) >> 24) &
2511 (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
2512 DAC960_GEM_IRQMASK_MMBOX_IRQ));
2515 static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2516 union myrs_cmd_mbox *mbox)
2518 memcpy(&mem_mbox->words[1], &mbox->words[1],
2519 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2520 /* Barrier to avoid reordering */
2521 wmb();
2522 mem_mbox->words[0] = mbox->words[0];
2523 /* Barrier to force PCI access */
2524 mb();
2527 static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
2528 dma_addr_t cmd_mbox_addr)
2530 dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
2533 static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
2535 return readw(base + DAC960_GEM_CMDSTS_OFFSET);
2538 static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
2540 return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
2543 static inline bool
2544 DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
2545 unsigned char *param0, unsigned char *param1)
2547 __le32 val;
2549 val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
2550 if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
2551 return false;
2552 *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
2553 *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
2554 *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
2555 writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
2556 return true;
2559 static inline unsigned char
2560 DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2562 unsigned char status;
2564 while (DAC960_GEM_hw_mbox_is_full(base))
2565 udelay(1);
2566 DAC960_GEM_write_hw_mbox(base, mbox_addr);
2567 DAC960_GEM_hw_mbox_new_cmd(base);
2568 while (!DAC960_GEM_hw_mbox_status_available(base))
2569 udelay(1);
2570 status = DAC960_GEM_read_cmd_status(base);
2571 DAC960_GEM_ack_hw_mbox_intr(base);
2572 DAC960_GEM_ack_hw_mbox_status(base);
2574 return status;
2577 static int DAC960_GEM_hw_init(struct pci_dev *pdev,
2578 struct myrs_hba *cs, void __iomem *base)
2580 int timeout = 0;
2581 unsigned char status, parm0, parm1;
2583 DAC960_GEM_disable_intr(base);
2584 DAC960_GEM_ack_hw_mbox_status(base);
2585 udelay(1000);
2586 while (DAC960_GEM_init_in_progress(base) &&
2587 timeout < MYRS_MAILBOX_TIMEOUT) {
2588 if (DAC960_GEM_read_error_status(base, &status,
2589 &parm0, &parm1) &&
2590 myrs_err_status(cs, status, parm0, parm1))
2591 return -EIO;
2592 udelay(10);
2593 timeout++;
2595 if (timeout == MYRS_MAILBOX_TIMEOUT) {
2596 dev_err(&pdev->dev,
2597 "Timeout waiting for Controller Initialisation\n");
2598 return -ETIMEDOUT;
2600 if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
2601 dev_err(&pdev->dev,
2602 "Unable to Enable Memory Mailbox Interface\n");
2603 DAC960_GEM_reset_ctrl(base);
2604 return -EAGAIN;
2606 DAC960_GEM_enable_intr(base);
2607 cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
2608 cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
2609 cs->disable_intr = DAC960_GEM_disable_intr;
2610 cs->reset = DAC960_GEM_reset_ctrl;
2611 return 0;
2614 static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
2616 struct myrs_hba *cs = arg;
2617 void __iomem *base = cs->io_base;
2618 struct myrs_stat_mbox *next_stat_mbox;
2619 unsigned long flags;
2621 spin_lock_irqsave(&cs->queue_lock, flags);
2622 DAC960_GEM_ack_intr(base);
2623 next_stat_mbox = cs->next_stat_mbox;
2624 while (next_stat_mbox->id > 0) {
2625 unsigned short id = next_stat_mbox->id;
2626 struct scsi_cmnd *scmd = NULL;
2627 struct myrs_cmdblk *cmd_blk = NULL;
2629 if (id == MYRS_DCMD_TAG)
2630 cmd_blk = &cs->dcmd_blk;
2631 else if (id == MYRS_MCMD_TAG)
2632 cmd_blk = &cs->mcmd_blk;
2633 else {
2634 scmd = scsi_host_find_tag(cs->host, id - 3);
2635 if (scmd)
2636 cmd_blk = scsi_cmd_priv(scmd);
2638 if (cmd_blk) {
2639 cmd_blk->status = next_stat_mbox->status;
2640 cmd_blk->sense_len = next_stat_mbox->sense_len;
2641 cmd_blk->residual = next_stat_mbox->residual;
2642 } else
2643 dev_err(&cs->pdev->dev,
2644 "Unhandled command completion %d\n", id);
2646 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2647 if (++next_stat_mbox > cs->last_stat_mbox)
2648 next_stat_mbox = cs->first_stat_mbox;
2650 if (cmd_blk) {
2651 if (id < 3)
2652 myrs_handle_cmdblk(cs, cmd_blk);
2653 else
2654 myrs_handle_scsi(cs, cmd_blk, scmd);
2657 cs->next_stat_mbox = next_stat_mbox;
2658 spin_unlock_irqrestore(&cs->queue_lock, flags);
2659 return IRQ_HANDLED;
2662 struct myrs_privdata DAC960_GEM_privdata = {
2663 .hw_init = DAC960_GEM_hw_init,
2664 .irq_handler = DAC960_GEM_intr_handler,
2665 .mmio_size = DAC960_GEM_mmio_size,
2669 * DAC960 BA Series Controllers.
2672 static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
2674 writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2677 static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
2679 writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
2682 static inline void DAC960_BA_gen_intr(void __iomem *base)
2684 writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
2687 static inline void DAC960_BA_reset_ctrl(void __iomem *base)
2689 writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
2692 static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
2694 writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2697 static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
2699 u8 val;
2701 val = readb(base + DAC960_BA_IDB_OFFSET);
2702 return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
2705 static inline bool DAC960_BA_init_in_progress(void __iomem *base)
2707 u8 val;
2709 val = readb(base + DAC960_BA_IDB_OFFSET);
2710 return !(val & DAC960_BA_IDB_INIT_DONE);
2713 static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
2715 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
2718 static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
2720 writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
2723 static inline void DAC960_BA_ack_intr(void __iomem *base)
2725 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
2726 base + DAC960_BA_ODB_OFFSET);
2729 static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
2731 u8 val;
2733 val = readb(base + DAC960_BA_ODB_OFFSET);
2734 return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
2737 static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
2739 u8 val;
2741 val = readb(base + DAC960_BA_ODB_OFFSET);
2742 return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
2745 static inline void DAC960_BA_enable_intr(void __iomem *base)
2747 writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
2750 static inline void DAC960_BA_disable_intr(void __iomem *base)
2752 writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
2755 static inline bool DAC960_BA_intr_enabled(void __iomem *base)
2757 u8 val;
2759 val = readb(base + DAC960_BA_IRQMASK_OFFSET);
2760 return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
2763 static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2764 union myrs_cmd_mbox *mbox)
2766 memcpy(&mem_mbox->words[1], &mbox->words[1],
2767 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2768 /* Barrier to avoid reordering */
2769 wmb();
2770 mem_mbox->words[0] = mbox->words[0];
2771 /* Barrier to force PCI access */
2772 mb();
2776 static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
2777 dma_addr_t cmd_mbox_addr)
2779 dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
2782 static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
2784 return readw(base + DAC960_BA_CMDSTS_OFFSET);
2787 static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
2789 return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
2792 static inline bool
2793 DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
2794 unsigned char *param0, unsigned char *param1)
2796 u8 val;
2798 val = readb(base + DAC960_BA_ERRSTS_OFFSET);
2799 if (!(val & DAC960_BA_ERRSTS_PENDING))
2800 return false;
2801 val &= ~DAC960_BA_ERRSTS_PENDING;
2802 *error = val;
2803 *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
2804 *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
2805 writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
2806 return true;
2809 static inline unsigned char
2810 DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2812 unsigned char status;
2814 while (DAC960_BA_hw_mbox_is_full(base))
2815 udelay(1);
2816 DAC960_BA_write_hw_mbox(base, mbox_addr);
2817 DAC960_BA_hw_mbox_new_cmd(base);
2818 while (!DAC960_BA_hw_mbox_status_available(base))
2819 udelay(1);
2820 status = DAC960_BA_read_cmd_status(base);
2821 DAC960_BA_ack_hw_mbox_intr(base);
2822 DAC960_BA_ack_hw_mbox_status(base);
2824 return status;
2827 static int DAC960_BA_hw_init(struct pci_dev *pdev,
2828 struct myrs_hba *cs, void __iomem *base)
2830 int timeout = 0;
2831 unsigned char status, parm0, parm1;
2833 DAC960_BA_disable_intr(base);
2834 DAC960_BA_ack_hw_mbox_status(base);
2835 udelay(1000);
2836 while (DAC960_BA_init_in_progress(base) &&
2837 timeout < MYRS_MAILBOX_TIMEOUT) {
2838 if (DAC960_BA_read_error_status(base, &status,
2839 &parm0, &parm1) &&
2840 myrs_err_status(cs, status, parm0, parm1))
2841 return -EIO;
2842 udelay(10);
2843 timeout++;
2845 if (timeout == MYRS_MAILBOX_TIMEOUT) {
2846 dev_err(&pdev->dev,
2847 "Timeout waiting for Controller Initialisation\n");
2848 return -ETIMEDOUT;
2850 if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
2851 dev_err(&pdev->dev,
2852 "Unable to Enable Memory Mailbox Interface\n");
2853 DAC960_BA_reset_ctrl(base);
2854 return -EAGAIN;
2856 DAC960_BA_enable_intr(base);
2857 cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
2858 cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
2859 cs->disable_intr = DAC960_BA_disable_intr;
2860 cs->reset = DAC960_BA_reset_ctrl;
2861 return 0;
2864 static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
2866 struct myrs_hba *cs = arg;
2867 void __iomem *base = cs->io_base;
2868 struct myrs_stat_mbox *next_stat_mbox;
2869 unsigned long flags;
2871 spin_lock_irqsave(&cs->queue_lock, flags);
2872 DAC960_BA_ack_intr(base);
2873 next_stat_mbox = cs->next_stat_mbox;
2874 while (next_stat_mbox->id > 0) {
2875 unsigned short id = next_stat_mbox->id;
2876 struct scsi_cmnd *scmd = NULL;
2877 struct myrs_cmdblk *cmd_blk = NULL;
2879 if (id == MYRS_DCMD_TAG)
2880 cmd_blk = &cs->dcmd_blk;
2881 else if (id == MYRS_MCMD_TAG)
2882 cmd_blk = &cs->mcmd_blk;
2883 else {
2884 scmd = scsi_host_find_tag(cs->host, id - 3);
2885 if (scmd)
2886 cmd_blk = scsi_cmd_priv(scmd);
2888 if (cmd_blk) {
2889 cmd_blk->status = next_stat_mbox->status;
2890 cmd_blk->sense_len = next_stat_mbox->sense_len;
2891 cmd_blk->residual = next_stat_mbox->residual;
2892 } else
2893 dev_err(&cs->pdev->dev,
2894 "Unhandled command completion %d\n", id);
2896 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2897 if (++next_stat_mbox > cs->last_stat_mbox)
2898 next_stat_mbox = cs->first_stat_mbox;
2900 if (cmd_blk) {
2901 if (id < 3)
2902 myrs_handle_cmdblk(cs, cmd_blk);
2903 else
2904 myrs_handle_scsi(cs, cmd_blk, scmd);
2907 cs->next_stat_mbox = next_stat_mbox;
2908 spin_unlock_irqrestore(&cs->queue_lock, flags);
2909 return IRQ_HANDLED;
2912 struct myrs_privdata DAC960_BA_privdata = {
2913 .hw_init = DAC960_BA_hw_init,
2914 .irq_handler = DAC960_BA_intr_handler,
2915 .mmio_size = DAC960_BA_mmio_size,
2919 * DAC960 LP Series Controllers.
2922 static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
2924 writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2927 static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
2929 writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
2932 static inline void DAC960_LP_gen_intr(void __iomem *base)
2934 writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
2937 static inline void DAC960_LP_reset_ctrl(void __iomem *base)
2939 writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
2942 static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
2944 writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2947 static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
2949 u8 val;
2951 val = readb(base + DAC960_LP_IDB_OFFSET);
2952 return val & DAC960_LP_IDB_HWMBOX_FULL;
2955 static inline bool DAC960_LP_init_in_progress(void __iomem *base)
2957 u8 val;
2959 val = readb(base + DAC960_LP_IDB_OFFSET);
2960 return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
2963 static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
2965 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
2968 static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
2970 writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
2973 static inline void DAC960_LP_ack_intr(void __iomem *base)
2975 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
2976 base + DAC960_LP_ODB_OFFSET);
2979 static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
2981 u8 val;
2983 val = readb(base + DAC960_LP_ODB_OFFSET);
2984 return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
2987 static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
2989 u8 val;
2991 val = readb(base + DAC960_LP_ODB_OFFSET);
2992 return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
2995 static inline void DAC960_LP_enable_intr(void __iomem *base)
2997 writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
3000 static inline void DAC960_LP_disable_intr(void __iomem *base)
3002 writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
3005 static inline bool DAC960_LP_intr_enabled(void __iomem *base)
3007 u8 val;
3009 val = readb(base + DAC960_LP_IRQMASK_OFFSET);
3010 return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
3013 static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
3014 union myrs_cmd_mbox *mbox)
3016 memcpy(&mem_mbox->words[1], &mbox->words[1],
3017 sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
3018 /* Barrier to avoid reordering */
3019 wmb();
3020 mem_mbox->words[0] = mbox->words[0];
3021 /* Barrier to force PCI access */
3022 mb();
3025 static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
3026 dma_addr_t cmd_mbox_addr)
3028 dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
3031 static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
3033 return readw(base + DAC960_LP_CMDSTS_OFFSET);
3036 static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
3038 return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
3041 static inline bool
3042 DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
3043 unsigned char *param0, unsigned char *param1)
3045 u8 val;
3047 val = readb(base + DAC960_LP_ERRSTS_OFFSET);
3048 if (!(val & DAC960_LP_ERRSTS_PENDING))
3049 return false;
3050 val &= ~DAC960_LP_ERRSTS_PENDING;
3051 *error = val;
3052 *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
3053 *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
3054 writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
3055 return true;
3058 static inline unsigned char
3059 DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
3061 unsigned char status;
3063 while (DAC960_LP_hw_mbox_is_full(base))
3064 udelay(1);
3065 DAC960_LP_write_hw_mbox(base, mbox_addr);
3066 DAC960_LP_hw_mbox_new_cmd(base);
3067 while (!DAC960_LP_hw_mbox_status_available(base))
3068 udelay(1);
3069 status = DAC960_LP_read_cmd_status(base);
3070 DAC960_LP_ack_hw_mbox_intr(base);
3071 DAC960_LP_ack_hw_mbox_status(base);
3073 return status;
3076 static int DAC960_LP_hw_init(struct pci_dev *pdev,
3077 struct myrs_hba *cs, void __iomem *base)
3079 int timeout = 0;
3080 unsigned char status, parm0, parm1;
3082 DAC960_LP_disable_intr(base);
3083 DAC960_LP_ack_hw_mbox_status(base);
3084 udelay(1000);
3085 while (DAC960_LP_init_in_progress(base) &&
3086 timeout < MYRS_MAILBOX_TIMEOUT) {
3087 if (DAC960_LP_read_error_status(base, &status,
3088 &parm0, &parm1) &&
3089 myrs_err_status(cs, status, parm0, parm1))
3090 return -EIO;
3091 udelay(10);
3092 timeout++;
3094 if (timeout == MYRS_MAILBOX_TIMEOUT) {
3095 dev_err(&pdev->dev,
3096 "Timeout waiting for Controller Initialisation\n");
3097 return -ETIMEDOUT;
3099 if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
3100 dev_err(&pdev->dev,
3101 "Unable to Enable Memory Mailbox Interface\n");
3102 DAC960_LP_reset_ctrl(base);
3103 return -ENODEV;
3105 DAC960_LP_enable_intr(base);
3106 cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
3107 cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
3108 cs->disable_intr = DAC960_LP_disable_intr;
3109 cs->reset = DAC960_LP_reset_ctrl;
3111 return 0;
3114 static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
3116 struct myrs_hba *cs = arg;
3117 void __iomem *base = cs->io_base;
3118 struct myrs_stat_mbox *next_stat_mbox;
3119 unsigned long flags;
3121 spin_lock_irqsave(&cs->queue_lock, flags);
3122 DAC960_LP_ack_intr(base);
3123 next_stat_mbox = cs->next_stat_mbox;
3124 while (next_stat_mbox->id > 0) {
3125 unsigned short id = next_stat_mbox->id;
3126 struct scsi_cmnd *scmd = NULL;
3127 struct myrs_cmdblk *cmd_blk = NULL;
3129 if (id == MYRS_DCMD_TAG)
3130 cmd_blk = &cs->dcmd_blk;
3131 else if (id == MYRS_MCMD_TAG)
3132 cmd_blk = &cs->mcmd_blk;
3133 else {
3134 scmd = scsi_host_find_tag(cs->host, id - 3);
3135 if (scmd)
3136 cmd_blk = scsi_cmd_priv(scmd);
3138 if (cmd_blk) {
3139 cmd_blk->status = next_stat_mbox->status;
3140 cmd_blk->sense_len = next_stat_mbox->sense_len;
3141 cmd_blk->residual = next_stat_mbox->residual;
3142 } else
3143 dev_err(&cs->pdev->dev,
3144 "Unhandled command completion %d\n", id);
3146 memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
3147 if (++next_stat_mbox > cs->last_stat_mbox)
3148 next_stat_mbox = cs->first_stat_mbox;
3150 if (cmd_blk) {
3151 if (id < 3)
3152 myrs_handle_cmdblk(cs, cmd_blk);
3153 else
3154 myrs_handle_scsi(cs, cmd_blk, scmd);
3157 cs->next_stat_mbox = next_stat_mbox;
3158 spin_unlock_irqrestore(&cs->queue_lock, flags);
3159 return IRQ_HANDLED;
3162 struct myrs_privdata DAC960_LP_privdata = {
3163 .hw_init = DAC960_LP_hw_init,
3164 .irq_handler = DAC960_LP_intr_handler,
3165 .mmio_size = DAC960_LP_mmio_size,
3169 * Module functions
3171 static int
3172 myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3174 struct myrs_hba *cs;
3175 int ret;
3177 cs = myrs_detect(dev, entry);
3178 if (!cs)
3179 return -ENODEV;
3181 ret = myrs_get_config(cs);
3182 if (ret < 0) {
3183 myrs_cleanup(cs);
3184 return ret;
3187 if (!myrs_create_mempools(dev, cs)) {
3188 ret = -ENOMEM;
3189 goto failed;
3192 ret = scsi_add_host(cs->host, &dev->dev);
3193 if (ret) {
3194 dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3195 myrs_destroy_mempools(cs);
3196 goto failed;
3198 scsi_scan_host(cs->host);
3199 return 0;
3200 failed:
3201 myrs_cleanup(cs);
3202 return ret;
3206 static void myrs_remove(struct pci_dev *pdev)
3208 struct myrs_hba *cs = pci_get_drvdata(pdev);
3210 if (cs == NULL)
3211 return;
3213 shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
3214 myrs_flush_cache(cs);
3215 myrs_destroy_mempools(cs);
3216 myrs_cleanup(cs);
3220 static const struct pci_device_id myrs_id_table[] = {
3222 PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
3223 PCI_DEVICE_ID_MYLEX_DAC960_GEM,
3224 PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
3225 .driver_data = (unsigned long) &DAC960_GEM_privdata,
3228 PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
3231 PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
3233 {0, },
3236 MODULE_DEVICE_TABLE(pci, myrs_id_table);
3238 static struct pci_driver myrs_pci_driver = {
3239 .name = "myrs",
3240 .id_table = myrs_id_table,
3241 .probe = myrs_probe,
3242 .remove = myrs_remove,
3245 static int __init myrs_init_module(void)
3247 int ret;
3249 myrs_raid_template = raid_class_attach(&myrs_raid_functions);
3250 if (!myrs_raid_template)
3251 return -ENODEV;
3253 ret = pci_register_driver(&myrs_pci_driver);
3254 if (ret)
3255 raid_class_release(myrs_raid_template);
3257 return ret;
3260 static void __exit myrs_cleanup_module(void)
3262 pci_unregister_driver(&myrs_pci_driver);
3263 raid_class_release(myrs_raid_template);
3266 module_init(myrs_init_module);
3267 module_exit(myrs_cleanup_module);
3269 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
3270 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3271 MODULE_LICENSE("GPL");