1 // SPDX-License-Identifier: GPL-2.0
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
5 * This driver supports the newer, SCSI-based firmware interface only.
7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
9 * Based on the original DAC960 driver, which has
10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/raid_class.h>
20 #include <asm/unaligned.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_host.h>
23 #include <scsi/scsi_device.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_tcq.h>
28 static struct raid_template
*myrs_raid_template
;
30 static struct myrs_devstate_name_entry
{
31 enum myrs_devstate state
;
33 } myrs_devstate_name_list
[] = {
34 { MYRS_DEVICE_UNCONFIGURED
, "Unconfigured" },
35 { MYRS_DEVICE_ONLINE
, "Online" },
36 { MYRS_DEVICE_REBUILD
, "Rebuild" },
37 { MYRS_DEVICE_MISSING
, "Missing" },
38 { MYRS_DEVICE_SUSPECTED_CRITICAL
, "SuspectedCritical" },
39 { MYRS_DEVICE_OFFLINE
, "Offline" },
40 { MYRS_DEVICE_CRITICAL
, "Critical" },
41 { MYRS_DEVICE_SUSPECTED_DEAD
, "SuspectedDead" },
42 { MYRS_DEVICE_COMMANDED_OFFLINE
, "CommandedOffline" },
43 { MYRS_DEVICE_STANDBY
, "Standby" },
44 { MYRS_DEVICE_INVALID_STATE
, "Invalid" },
47 static char *myrs_devstate_name(enum myrs_devstate state
)
49 struct myrs_devstate_name_entry
*entry
= myrs_devstate_name_list
;
52 for (i
= 0; i
< ARRAY_SIZE(myrs_devstate_name_list
); i
++) {
53 if (entry
[i
].state
== state
)
59 static struct myrs_raid_level_name_entry
{
60 enum myrs_raid_level level
;
62 } myrs_raid_level_name_list
[] = {
63 { MYRS_RAID_LEVEL0
, "RAID0" },
64 { MYRS_RAID_LEVEL1
, "RAID1" },
65 { MYRS_RAID_LEVEL3
, "RAID3 right asymmetric parity" },
66 { MYRS_RAID_LEVEL5
, "RAID5 right asymmetric parity" },
67 { MYRS_RAID_LEVEL6
, "RAID6" },
68 { MYRS_RAID_JBOD
, "JBOD" },
69 { MYRS_RAID_NEWSPAN
, "New Mylex SPAN" },
70 { MYRS_RAID_LEVEL3F
, "RAID3 fixed parity" },
71 { MYRS_RAID_LEVEL3L
, "RAID3 left symmetric parity" },
72 { MYRS_RAID_SPAN
, "Mylex SPAN" },
73 { MYRS_RAID_LEVEL5L
, "RAID5 left symmetric parity" },
74 { MYRS_RAID_LEVELE
, "RAIDE (concatenation)" },
75 { MYRS_RAID_PHYSICAL
, "Physical device" },
78 static char *myrs_raid_level_name(enum myrs_raid_level level
)
80 struct myrs_raid_level_name_entry
*entry
= myrs_raid_level_name_list
;
83 for (i
= 0; i
< ARRAY_SIZE(myrs_raid_level_name_list
); i
++) {
84 if (entry
[i
].level
== level
)
91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
93 static inline void myrs_reset_cmd(struct myrs_cmdblk
*cmd_blk
)
95 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
97 memset(mbox
, 0, sizeof(union myrs_cmd_mbox
));
102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
104 static void myrs_qcmd(struct myrs_hba
*cs
, struct myrs_cmdblk
*cmd_blk
)
106 void __iomem
*base
= cs
->io_base
;
107 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
108 union myrs_cmd_mbox
*next_mbox
= cs
->next_cmd_mbox
;
110 cs
->write_cmd_mbox(next_mbox
, mbox
);
112 if (cs
->prev_cmd_mbox1
->words
[0] == 0 ||
113 cs
->prev_cmd_mbox2
->words
[0] == 0)
114 cs
->get_cmd_mbox(base
);
116 cs
->prev_cmd_mbox2
= cs
->prev_cmd_mbox1
;
117 cs
->prev_cmd_mbox1
= next_mbox
;
119 if (++next_mbox
> cs
->last_cmd_mbox
)
120 next_mbox
= cs
->first_cmd_mbox
;
122 cs
->next_cmd_mbox
= next_mbox
;
126 * myrs_exec_cmd - executes V2 Command and waits for completion.
128 static void myrs_exec_cmd(struct myrs_hba
*cs
,
129 struct myrs_cmdblk
*cmd_blk
)
131 DECLARE_COMPLETION_ONSTACK(complete
);
134 cmd_blk
->complete
= &complete
;
135 spin_lock_irqsave(&cs
->queue_lock
, flags
);
136 myrs_qcmd(cs
, cmd_blk
);
137 spin_unlock_irqrestore(&cs
->queue_lock
, flags
);
139 WARN_ON(in_interrupt());
140 wait_for_completion(&complete
);
144 * myrs_report_progress - prints progress message
146 static void myrs_report_progress(struct myrs_hba
*cs
, unsigned short ldev_num
,
147 unsigned char *msg
, unsigned long blocks
,
150 shost_printk(KERN_INFO
, cs
->host
,
151 "Logical Drive %d: %s in Progress: %d%% completed\n",
153 (100 * (int)(blocks
>> 7)) / (int)(size
>> 7));
157 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
159 static unsigned char myrs_get_ctlr_info(struct myrs_hba
*cs
)
161 struct myrs_cmdblk
*cmd_blk
= &cs
->dcmd_blk
;
162 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
163 dma_addr_t ctlr_info_addr
;
165 unsigned char status
;
166 struct myrs_ctlr_info old
;
168 memcpy(&old
, cs
->ctlr_info
, sizeof(struct myrs_ctlr_info
));
169 ctlr_info_addr
= dma_map_single(&cs
->pdev
->dev
, cs
->ctlr_info
,
170 sizeof(struct myrs_ctlr_info
),
172 if (dma_mapping_error(&cs
->pdev
->dev
, ctlr_info_addr
))
173 return MYRS_STATUS_FAILED
;
175 mutex_lock(&cs
->dcmd_mutex
);
176 myrs_reset_cmd(cmd_blk
);
177 mbox
->ctlr_info
.id
= MYRS_DCMD_TAG
;
178 mbox
->ctlr_info
.opcode
= MYRS_CMD_OP_IOCTL
;
179 mbox
->ctlr_info
.control
.dma_ctrl_to_host
= true;
180 mbox
->ctlr_info
.control
.no_autosense
= true;
181 mbox
->ctlr_info
.dma_size
= sizeof(struct myrs_ctlr_info
);
182 mbox
->ctlr_info
.ctlr_num
= 0;
183 mbox
->ctlr_info
.ioctl_opcode
= MYRS_IOCTL_GET_CTLR_INFO
;
184 sgl
= &mbox
->ctlr_info
.dma_addr
;
185 sgl
->sge
[0].sge_addr
= ctlr_info_addr
;
186 sgl
->sge
[0].sge_count
= mbox
->ctlr_info
.dma_size
;
187 dev_dbg(&cs
->host
->shost_gendev
, "Sending GetControllerInfo\n");
188 myrs_exec_cmd(cs
, cmd_blk
);
189 status
= cmd_blk
->status
;
190 mutex_unlock(&cs
->dcmd_mutex
);
191 dma_unmap_single(&cs
->pdev
->dev
, ctlr_info_addr
,
192 sizeof(struct myrs_ctlr_info
), DMA_FROM_DEVICE
);
193 if (status
== MYRS_STATUS_SUCCESS
) {
194 if (cs
->ctlr_info
->bg_init_active
+
195 cs
->ctlr_info
->ldev_init_active
+
196 cs
->ctlr_info
->pdev_init_active
+
197 cs
->ctlr_info
->cc_active
+
198 cs
->ctlr_info
->rbld_active
+
199 cs
->ctlr_info
->exp_active
!= 0)
200 cs
->needs_update
= true;
201 if (cs
->ctlr_info
->ldev_present
!= old
.ldev_present
||
202 cs
->ctlr_info
->ldev_critical
!= old
.ldev_critical
||
203 cs
->ctlr_info
->ldev_offline
!= old
.ldev_offline
)
204 shost_printk(KERN_INFO
, cs
->host
,
205 "Logical drive count changes (%d/%d/%d)\n",
206 cs
->ctlr_info
->ldev_critical
,
207 cs
->ctlr_info
->ldev_offline
,
208 cs
->ctlr_info
->ldev_present
);
215 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
217 static unsigned char myrs_get_ldev_info(struct myrs_hba
*cs
,
218 unsigned short ldev_num
, struct myrs_ldev_info
*ldev_info
)
220 struct myrs_cmdblk
*cmd_blk
= &cs
->dcmd_blk
;
221 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
222 dma_addr_t ldev_info_addr
;
223 struct myrs_ldev_info ldev_info_orig
;
225 unsigned char status
;
227 memcpy(&ldev_info_orig
, ldev_info
, sizeof(struct myrs_ldev_info
));
228 ldev_info_addr
= dma_map_single(&cs
->pdev
->dev
, ldev_info
,
229 sizeof(struct myrs_ldev_info
),
231 if (dma_mapping_error(&cs
->pdev
->dev
, ldev_info_addr
))
232 return MYRS_STATUS_FAILED
;
234 mutex_lock(&cs
->dcmd_mutex
);
235 myrs_reset_cmd(cmd_blk
);
236 mbox
->ldev_info
.id
= MYRS_DCMD_TAG
;
237 mbox
->ldev_info
.opcode
= MYRS_CMD_OP_IOCTL
;
238 mbox
->ldev_info
.control
.dma_ctrl_to_host
= true;
239 mbox
->ldev_info
.control
.no_autosense
= true;
240 mbox
->ldev_info
.dma_size
= sizeof(struct myrs_ldev_info
);
241 mbox
->ldev_info
.ldev
.ldev_num
= ldev_num
;
242 mbox
->ldev_info
.ioctl_opcode
= MYRS_IOCTL_GET_LDEV_INFO_VALID
;
243 sgl
= &mbox
->ldev_info
.dma_addr
;
244 sgl
->sge
[0].sge_addr
= ldev_info_addr
;
245 sgl
->sge
[0].sge_count
= mbox
->ldev_info
.dma_size
;
246 dev_dbg(&cs
->host
->shost_gendev
,
247 "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num
);
248 myrs_exec_cmd(cs
, cmd_blk
);
249 status
= cmd_blk
->status
;
250 mutex_unlock(&cs
->dcmd_mutex
);
251 dma_unmap_single(&cs
->pdev
->dev
, ldev_info_addr
,
252 sizeof(struct myrs_ldev_info
), DMA_FROM_DEVICE
);
253 if (status
== MYRS_STATUS_SUCCESS
) {
254 unsigned short ldev_num
= ldev_info
->ldev_num
;
255 struct myrs_ldev_info
*new = ldev_info
;
256 struct myrs_ldev_info
*old
= &ldev_info_orig
;
257 unsigned long ldev_size
= new->cfg_devsize
;
259 if (new->dev_state
!= old
->dev_state
) {
262 name
= myrs_devstate_name(new->dev_state
);
263 shost_printk(KERN_INFO
, cs
->host
,
264 "Logical Drive %d is now %s\n",
265 ldev_num
, name
? name
: "Invalid");
267 if ((new->soft_errs
!= old
->soft_errs
) ||
268 (new->cmds_failed
!= old
->cmds_failed
) ||
269 (new->deferred_write_errs
!= old
->deferred_write_errs
))
270 shost_printk(KERN_INFO
, cs
->host
,
271 "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
272 ldev_num
, new->soft_errs
,
274 new->deferred_write_errs
);
275 if (new->bg_init_active
)
276 myrs_report_progress(cs
, ldev_num
,
277 "Background Initialization",
278 new->bg_init_lba
, ldev_size
);
279 else if (new->fg_init_active
)
280 myrs_report_progress(cs
, ldev_num
,
281 "Foreground Initialization",
282 new->fg_init_lba
, ldev_size
);
283 else if (new->migration_active
)
284 myrs_report_progress(cs
, ldev_num
,
286 new->migration_lba
, ldev_size
);
287 else if (new->patrol_active
)
288 myrs_report_progress(cs
, ldev_num
,
290 new->patrol_lba
, ldev_size
);
291 if (old
->bg_init_active
&& !new->bg_init_active
)
292 shost_printk(KERN_INFO
, cs
->host
,
293 "Logical Drive %d: Background Initialization %s\n",
295 (new->ldev_control
.ldev_init_done
?
296 "Completed" : "Failed"));
302 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
304 static unsigned char myrs_get_pdev_info(struct myrs_hba
*cs
,
305 unsigned char channel
, unsigned char target
, unsigned char lun
,
306 struct myrs_pdev_info
*pdev_info
)
308 struct myrs_cmdblk
*cmd_blk
= &cs
->dcmd_blk
;
309 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
310 dma_addr_t pdev_info_addr
;
312 unsigned char status
;
314 pdev_info_addr
= dma_map_single(&cs
->pdev
->dev
, pdev_info
,
315 sizeof(struct myrs_pdev_info
),
317 if (dma_mapping_error(&cs
->pdev
->dev
, pdev_info_addr
))
318 return MYRS_STATUS_FAILED
;
320 mutex_lock(&cs
->dcmd_mutex
);
321 myrs_reset_cmd(cmd_blk
);
322 mbox
->pdev_info
.opcode
= MYRS_CMD_OP_IOCTL
;
323 mbox
->pdev_info
.id
= MYRS_DCMD_TAG
;
324 mbox
->pdev_info
.control
.dma_ctrl_to_host
= true;
325 mbox
->pdev_info
.control
.no_autosense
= true;
326 mbox
->pdev_info
.dma_size
= sizeof(struct myrs_pdev_info
);
327 mbox
->pdev_info
.pdev
.lun
= lun
;
328 mbox
->pdev_info
.pdev
.target
= target
;
329 mbox
->pdev_info
.pdev
.channel
= channel
;
330 mbox
->pdev_info
.ioctl_opcode
= MYRS_IOCTL_GET_PDEV_INFO_VALID
;
331 sgl
= &mbox
->pdev_info
.dma_addr
;
332 sgl
->sge
[0].sge_addr
= pdev_info_addr
;
333 sgl
->sge
[0].sge_count
= mbox
->pdev_info
.dma_size
;
334 dev_dbg(&cs
->host
->shost_gendev
,
335 "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
336 channel
, target
, lun
);
337 myrs_exec_cmd(cs
, cmd_blk
);
338 status
= cmd_blk
->status
;
339 mutex_unlock(&cs
->dcmd_mutex
);
340 dma_unmap_single(&cs
->pdev
->dev
, pdev_info_addr
,
341 sizeof(struct myrs_pdev_info
), DMA_FROM_DEVICE
);
346 * myrs_dev_op - executes a "Device Operation" Command
348 static unsigned char myrs_dev_op(struct myrs_hba
*cs
,
349 enum myrs_ioctl_opcode opcode
, enum myrs_opdev opdev
)
351 struct myrs_cmdblk
*cmd_blk
= &cs
->dcmd_blk
;
352 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
353 unsigned char status
;
355 mutex_lock(&cs
->dcmd_mutex
);
356 myrs_reset_cmd(cmd_blk
);
357 mbox
->dev_op
.opcode
= MYRS_CMD_OP_IOCTL
;
358 mbox
->dev_op
.id
= MYRS_DCMD_TAG
;
359 mbox
->dev_op
.control
.dma_ctrl_to_host
= true;
360 mbox
->dev_op
.control
.no_autosense
= true;
361 mbox
->dev_op
.ioctl_opcode
= opcode
;
362 mbox
->dev_op
.opdev
= opdev
;
363 myrs_exec_cmd(cs
, cmd_blk
);
364 status
= cmd_blk
->status
;
365 mutex_unlock(&cs
->dcmd_mutex
);
370 * myrs_translate_pdev - translates a Physical Device Channel and
371 * TargetID into a Logical Device.
373 static unsigned char myrs_translate_pdev(struct myrs_hba
*cs
,
374 unsigned char channel
, unsigned char target
, unsigned char lun
,
375 struct myrs_devmap
*devmap
)
377 struct pci_dev
*pdev
= cs
->pdev
;
378 dma_addr_t devmap_addr
;
379 struct myrs_cmdblk
*cmd_blk
;
380 union myrs_cmd_mbox
*mbox
;
382 unsigned char status
;
384 memset(devmap
, 0x0, sizeof(struct myrs_devmap
));
385 devmap_addr
= dma_map_single(&pdev
->dev
, devmap
,
386 sizeof(struct myrs_devmap
),
388 if (dma_mapping_error(&pdev
->dev
, devmap_addr
))
389 return MYRS_STATUS_FAILED
;
391 mutex_lock(&cs
->dcmd_mutex
);
392 cmd_blk
= &cs
->dcmd_blk
;
393 mbox
= &cmd_blk
->mbox
;
394 mbox
->pdev_info
.opcode
= MYRS_CMD_OP_IOCTL
;
395 mbox
->pdev_info
.control
.dma_ctrl_to_host
= true;
396 mbox
->pdev_info
.control
.no_autosense
= true;
397 mbox
->pdev_info
.dma_size
= sizeof(struct myrs_devmap
);
398 mbox
->pdev_info
.pdev
.target
= target
;
399 mbox
->pdev_info
.pdev
.channel
= channel
;
400 mbox
->pdev_info
.pdev
.lun
= lun
;
401 mbox
->pdev_info
.ioctl_opcode
= MYRS_IOCTL_XLATE_PDEV_TO_LDEV
;
402 sgl
= &mbox
->pdev_info
.dma_addr
;
403 sgl
->sge
[0].sge_addr
= devmap_addr
;
404 sgl
->sge
[0].sge_count
= mbox
->pdev_info
.dma_size
;
406 myrs_exec_cmd(cs
, cmd_blk
);
407 status
= cmd_blk
->status
;
408 mutex_unlock(&cs
->dcmd_mutex
);
409 dma_unmap_single(&pdev
->dev
, devmap_addr
,
410 sizeof(struct myrs_devmap
), DMA_FROM_DEVICE
);
415 * myrs_get_event - executes a Get Event Command
417 static unsigned char myrs_get_event(struct myrs_hba
*cs
,
418 unsigned int event_num
, struct myrs_event
*event_buf
)
420 struct pci_dev
*pdev
= cs
->pdev
;
421 dma_addr_t event_addr
;
422 struct myrs_cmdblk
*cmd_blk
= &cs
->mcmd_blk
;
423 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
425 unsigned char status
;
427 event_addr
= dma_map_single(&pdev
->dev
, event_buf
,
428 sizeof(struct myrs_event
), DMA_FROM_DEVICE
);
429 if (dma_mapping_error(&pdev
->dev
, event_addr
))
430 return MYRS_STATUS_FAILED
;
432 mbox
->get_event
.opcode
= MYRS_CMD_OP_IOCTL
;
433 mbox
->get_event
.dma_size
= sizeof(struct myrs_event
);
434 mbox
->get_event
.evnum_upper
= event_num
>> 16;
435 mbox
->get_event
.ctlr_num
= 0;
436 mbox
->get_event
.ioctl_opcode
= MYRS_IOCTL_GET_EVENT
;
437 mbox
->get_event
.evnum_lower
= event_num
& 0xFFFF;
438 sgl
= &mbox
->get_event
.dma_addr
;
439 sgl
->sge
[0].sge_addr
= event_addr
;
440 sgl
->sge
[0].sge_count
= mbox
->get_event
.dma_size
;
441 myrs_exec_cmd(cs
, cmd_blk
);
442 status
= cmd_blk
->status
;
443 dma_unmap_single(&pdev
->dev
, event_addr
,
444 sizeof(struct myrs_event
), DMA_FROM_DEVICE
);
450 * myrs_get_fwstatus - executes a Get Health Status Command
452 static unsigned char myrs_get_fwstatus(struct myrs_hba
*cs
)
454 struct myrs_cmdblk
*cmd_blk
= &cs
->mcmd_blk
;
455 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
457 unsigned char status
= cmd_blk
->status
;
459 myrs_reset_cmd(cmd_blk
);
460 mbox
->common
.opcode
= MYRS_CMD_OP_IOCTL
;
461 mbox
->common
.id
= MYRS_MCMD_TAG
;
462 mbox
->common
.control
.dma_ctrl_to_host
= true;
463 mbox
->common
.control
.no_autosense
= true;
464 mbox
->common
.dma_size
= sizeof(struct myrs_fwstat
);
465 mbox
->common
.ioctl_opcode
= MYRS_IOCTL_GET_HEALTH_STATUS
;
466 sgl
= &mbox
->common
.dma_addr
;
467 sgl
->sge
[0].sge_addr
= cs
->fwstat_addr
;
468 sgl
->sge
[0].sge_count
= mbox
->ctlr_info
.dma_size
;
469 dev_dbg(&cs
->host
->shost_gendev
, "Sending GetHealthStatus\n");
470 myrs_exec_cmd(cs
, cmd_blk
);
471 status
= cmd_blk
->status
;
477 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
479 static bool myrs_enable_mmio_mbox(struct myrs_hba
*cs
,
480 enable_mbox_t enable_mbox_fn
)
482 void __iomem
*base
= cs
->io_base
;
483 struct pci_dev
*pdev
= cs
->pdev
;
484 union myrs_cmd_mbox
*cmd_mbox
;
485 struct myrs_stat_mbox
*stat_mbox
;
486 union myrs_cmd_mbox
*mbox
;
487 dma_addr_t mbox_addr
;
488 unsigned char status
= MYRS_STATUS_FAILED
;
490 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)))
491 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32))) {
492 dev_err(&pdev
->dev
, "DMA mask out of range\n");
496 /* Temporary dma mapping, used only in the scope of this function */
497 mbox
= dma_alloc_coherent(&pdev
->dev
, sizeof(union myrs_cmd_mbox
),
498 &mbox_addr
, GFP_KERNEL
);
499 if (dma_mapping_error(&pdev
->dev
, mbox_addr
))
502 /* These are the base addresses for the command memory mailbox array */
503 cs
->cmd_mbox_size
= MYRS_MAX_CMD_MBOX
* sizeof(union myrs_cmd_mbox
);
504 cmd_mbox
= dma_alloc_coherent(&pdev
->dev
, cs
->cmd_mbox_size
,
505 &cs
->cmd_mbox_addr
, GFP_KERNEL
);
506 if (dma_mapping_error(&pdev
->dev
, cs
->cmd_mbox_addr
)) {
507 dev_err(&pdev
->dev
, "Failed to map command mailbox\n");
510 cs
->first_cmd_mbox
= cmd_mbox
;
511 cmd_mbox
+= MYRS_MAX_CMD_MBOX
- 1;
512 cs
->last_cmd_mbox
= cmd_mbox
;
513 cs
->next_cmd_mbox
= cs
->first_cmd_mbox
;
514 cs
->prev_cmd_mbox1
= cs
->last_cmd_mbox
;
515 cs
->prev_cmd_mbox2
= cs
->last_cmd_mbox
- 1;
517 /* These are the base addresses for the status memory mailbox array */
518 cs
->stat_mbox_size
= MYRS_MAX_STAT_MBOX
* sizeof(struct myrs_stat_mbox
);
519 stat_mbox
= dma_alloc_coherent(&pdev
->dev
, cs
->stat_mbox_size
,
520 &cs
->stat_mbox_addr
, GFP_KERNEL
);
521 if (dma_mapping_error(&pdev
->dev
, cs
->stat_mbox_addr
)) {
522 dev_err(&pdev
->dev
, "Failed to map status mailbox\n");
526 cs
->first_stat_mbox
= stat_mbox
;
527 stat_mbox
+= MYRS_MAX_STAT_MBOX
- 1;
528 cs
->last_stat_mbox
= stat_mbox
;
529 cs
->next_stat_mbox
= cs
->first_stat_mbox
;
531 cs
->fwstat_buf
= dma_alloc_coherent(&pdev
->dev
,
532 sizeof(struct myrs_fwstat
),
533 &cs
->fwstat_addr
, GFP_KERNEL
);
534 if (dma_mapping_error(&pdev
->dev
, cs
->fwstat_addr
)) {
535 dev_err(&pdev
->dev
, "Failed to map firmware health buffer\n");
536 cs
->fwstat_buf
= NULL
;
539 cs
->ctlr_info
= kzalloc(sizeof(struct myrs_ctlr_info
),
540 GFP_KERNEL
| GFP_DMA
);
544 cs
->event_buf
= kzalloc(sizeof(struct myrs_event
),
545 GFP_KERNEL
| GFP_DMA
);
549 /* Enable the Memory Mailbox Interface. */
550 memset(mbox
, 0, sizeof(union myrs_cmd_mbox
));
551 mbox
->set_mbox
.id
= 1;
552 mbox
->set_mbox
.opcode
= MYRS_CMD_OP_IOCTL
;
553 mbox
->set_mbox
.control
.no_autosense
= true;
554 mbox
->set_mbox
.first_cmd_mbox_size_kb
=
555 (MYRS_MAX_CMD_MBOX
* sizeof(union myrs_cmd_mbox
)) >> 10;
556 mbox
->set_mbox
.first_stat_mbox_size_kb
=
557 (MYRS_MAX_STAT_MBOX
* sizeof(struct myrs_stat_mbox
)) >> 10;
558 mbox
->set_mbox
.second_cmd_mbox_size_kb
= 0;
559 mbox
->set_mbox
.second_stat_mbox_size_kb
= 0;
560 mbox
->set_mbox
.sense_len
= 0;
561 mbox
->set_mbox
.ioctl_opcode
= MYRS_IOCTL_SET_MEM_MBOX
;
562 mbox
->set_mbox
.fwstat_buf_size_kb
= 1;
563 mbox
->set_mbox
.fwstat_buf_addr
= cs
->fwstat_addr
;
564 mbox
->set_mbox
.first_cmd_mbox_addr
= cs
->cmd_mbox_addr
;
565 mbox
->set_mbox
.first_stat_mbox_addr
= cs
->stat_mbox_addr
;
566 status
= enable_mbox_fn(base
, mbox_addr
);
569 dma_free_coherent(&pdev
->dev
, sizeof(union myrs_cmd_mbox
),
571 if (status
!= MYRS_STATUS_SUCCESS
)
572 dev_err(&pdev
->dev
, "Failed to enable mailbox, status %X\n",
574 return (status
== MYRS_STATUS_SUCCESS
);
578 * myrs_get_config - reads the Configuration Information
580 static int myrs_get_config(struct myrs_hba
*cs
)
582 struct myrs_ctlr_info
*info
= cs
->ctlr_info
;
583 struct Scsi_Host
*shost
= cs
->host
;
584 unsigned char status
;
585 unsigned char model
[20];
586 unsigned char fw_version
[12];
589 /* Get data into dma-able area, then copy into permanent location */
590 mutex_lock(&cs
->cinfo_mutex
);
591 status
= myrs_get_ctlr_info(cs
);
592 mutex_unlock(&cs
->cinfo_mutex
);
593 if (status
!= MYRS_STATUS_SUCCESS
) {
594 shost_printk(KERN_ERR
, shost
,
595 "Failed to get controller information\n");
599 /* Initialize the Controller Model Name and Full Model Name fields. */
600 model_len
= sizeof(info
->ctlr_name
);
601 if (model_len
> sizeof(model
)-1)
602 model_len
= sizeof(model
)-1;
603 memcpy(model
, info
->ctlr_name
, model_len
);
605 while (model
[model_len
] == ' ' || model
[model_len
] == '\0')
607 model
[++model_len
] = '\0';
608 strcpy(cs
->model_name
, "DAC960 ");
609 strcat(cs
->model_name
, model
);
610 /* Initialize the Controller Firmware Version field. */
611 sprintf(fw_version
, "%d.%02d-%02d",
612 info
->fw_major_version
, info
->fw_minor_version
,
613 info
->fw_turn_number
);
614 if (info
->fw_major_version
== 6 &&
615 info
->fw_minor_version
== 0 &&
616 info
->fw_turn_number
< 1) {
617 shost_printk(KERN_WARNING
, shost
,
618 "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
619 "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
620 "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
624 /* Initialize the Controller Channels and Targets. */
625 shost
->max_channel
= info
->physchan_present
+ info
->virtchan_present
;
626 shost
->max_id
= info
->max_targets
[0];
627 for (i
= 1; i
< 16; i
++) {
628 if (!info
->max_targets
[i
])
630 if (shost
->max_id
< info
->max_targets
[i
])
631 shost
->max_id
= info
->max_targets
[i
];
635 * Initialize the Controller Queue Depth, Driver Queue Depth,
636 * Logical Drive Count, Maximum Blocks per Command, Controller
637 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
638 * The Driver Queue Depth must be at most three less than
639 * the Controller Queue Depth; tag '1' is reserved for
640 * direct commands, and tag '2' for monitoring commands.
642 shost
->can_queue
= info
->max_tcq
- 3;
643 if (shost
->can_queue
> MYRS_MAX_CMD_MBOX
- 3)
644 shost
->can_queue
= MYRS_MAX_CMD_MBOX
- 3;
645 shost
->max_sectors
= info
->max_transfer_size
;
646 shost
->sg_tablesize
= info
->max_sge
;
647 if (shost
->sg_tablesize
> MYRS_SG_LIMIT
)
648 shost
->sg_tablesize
= MYRS_SG_LIMIT
;
650 shost_printk(KERN_INFO
, shost
,
651 "Configuring %s PCI RAID Controller\n", model
);
652 shost_printk(KERN_INFO
, shost
,
653 " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
654 fw_version
, info
->physchan_present
, info
->mem_size_mb
);
656 shost_printk(KERN_INFO
, shost
,
657 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
658 shost
->can_queue
, shost
->max_sectors
);
660 shost_printk(KERN_INFO
, shost
,
661 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
662 shost
->can_queue
, shost
->sg_tablesize
, MYRS_SG_LIMIT
);
663 for (i
= 0; i
< info
->physchan_max
; i
++) {
664 if (!info
->max_targets
[i
])
666 shost_printk(KERN_INFO
, shost
,
667 " Device Channel %d: max %d devices\n",
668 i
, info
->max_targets
[i
]);
670 shost_printk(KERN_INFO
, shost
,
671 " Physical: %d/%d channels, %d disks, %d devices\n",
672 info
->physchan_present
, info
->physchan_max
,
673 info
->pdisk_present
, info
->pdev_present
);
675 shost_printk(KERN_INFO
, shost
,
676 " Logical: %d/%d channels, %d disks\n",
677 info
->virtchan_present
, info
->virtchan_max
,
683 * myrs_log_event - prints a Controller Event message
687 unsigned char *ev_msg
;
689 /* Physical Device Events (0x0000 - 0x007F) */
690 { 0x0001, "P Online" },
691 { 0x0002, "P Standby" },
692 { 0x0005, "P Automatic Rebuild Started" },
693 { 0x0006, "P Manual Rebuild Started" },
694 { 0x0007, "P Rebuild Completed" },
695 { 0x0008, "P Rebuild Cancelled" },
696 { 0x0009, "P Rebuild Failed for Unknown Reasons" },
697 { 0x000A, "P Rebuild Failed due to New Physical Device" },
698 { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
699 { 0x000C, "S Offline" },
700 { 0x000D, "P Found" },
701 { 0x000E, "P Removed" },
702 { 0x000F, "P Unconfigured" },
703 { 0x0010, "P Expand Capacity Started" },
704 { 0x0011, "P Expand Capacity Completed" },
705 { 0x0012, "P Expand Capacity Failed" },
706 { 0x0013, "P Command Timed Out" },
707 { 0x0014, "P Command Aborted" },
708 { 0x0015, "P Command Retried" },
709 { 0x0016, "P Parity Error" },
710 { 0x0017, "P Soft Error" },
711 { 0x0018, "P Miscellaneous Error" },
712 { 0x0019, "P Reset" },
713 { 0x001A, "P Active Spare Found" },
714 { 0x001B, "P Warm Spare Found" },
715 { 0x001C, "S Sense Data Received" },
716 { 0x001D, "P Initialization Started" },
717 { 0x001E, "P Initialization Completed" },
718 { 0x001F, "P Initialization Failed" },
719 { 0x0020, "P Initialization Cancelled" },
720 { 0x0021, "P Failed because Write Recovery Failed" },
721 { 0x0022, "P Failed because SCSI Bus Reset Failed" },
722 { 0x0023, "P Failed because of Double Check Condition" },
723 { 0x0024, "P Failed because Device Cannot Be Accessed" },
724 { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
725 { 0x0026, "P Failed because of Bad Tag from Device" },
726 { 0x0027, "P Failed because of Command Timeout" },
727 { 0x0028, "P Failed because of System Reset" },
728 { 0x0029, "P Failed because of Busy Status or Parity Error" },
729 { 0x002A, "P Failed because Host Set Device to Failed State" },
730 { 0x002B, "P Failed because of Selection Timeout" },
731 { 0x002C, "P Failed because of SCSI Bus Phase Error" },
732 { 0x002D, "P Failed because Device Returned Unknown Status" },
733 { 0x002E, "P Failed because Device Not Ready" },
734 { 0x002F, "P Failed because Device Not Found at Startup" },
735 { 0x0030, "P Failed because COD Write Operation Failed" },
736 { 0x0031, "P Failed because BDT Write Operation Failed" },
737 { 0x0039, "P Missing at Startup" },
738 { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
739 { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
740 { 0x003D, "P Standby Rebuild Started" },
741 /* Logical Device Events (0x0080 - 0x00FF) */
742 { 0x0080, "M Consistency Check Started" },
743 { 0x0081, "M Consistency Check Completed" },
744 { 0x0082, "M Consistency Check Cancelled" },
745 { 0x0083, "M Consistency Check Completed With Errors" },
746 { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
747 { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
748 { 0x0086, "L Offline" },
749 { 0x0087, "L Critical" },
750 { 0x0088, "L Online" },
751 { 0x0089, "M Automatic Rebuild Started" },
752 { 0x008A, "M Manual Rebuild Started" },
753 { 0x008B, "M Rebuild Completed" },
754 { 0x008C, "M Rebuild Cancelled" },
755 { 0x008D, "M Rebuild Failed for Unknown Reasons" },
756 { 0x008E, "M Rebuild Failed due to New Physical Device" },
757 { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
758 { 0x0090, "M Initialization Started" },
759 { 0x0091, "M Initialization Completed" },
760 { 0x0092, "M Initialization Cancelled" },
761 { 0x0093, "M Initialization Failed" },
762 { 0x0094, "L Found" },
763 { 0x0095, "L Deleted" },
764 { 0x0096, "M Expand Capacity Started" },
765 { 0x0097, "M Expand Capacity Completed" },
766 { 0x0098, "M Expand Capacity Failed" },
767 { 0x0099, "L Bad Block Found" },
768 { 0x009A, "L Size Changed" },
769 { 0x009B, "L Type Changed" },
770 { 0x009C, "L Bad Data Block Found" },
771 { 0x009E, "L Read of Data Block in BDT" },
772 { 0x009F, "L Write Back Data for Disk Block Lost" },
773 { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
774 { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
775 { 0x00A2, "L Standby Rebuild Started" },
776 /* Fault Management Events (0x0100 - 0x017F) */
777 { 0x0140, "E Fan %d Failed" },
778 { 0x0141, "E Fan %d OK" },
779 { 0x0142, "E Fan %d Not Present" },
780 { 0x0143, "E Power Supply %d Failed" },
781 { 0x0144, "E Power Supply %d OK" },
782 { 0x0145, "E Power Supply %d Not Present" },
783 { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
784 { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
785 { 0x0148, "E Temperature Sensor %d Temperature Normal" },
786 { 0x0149, "E Temperature Sensor %d Not Present" },
787 { 0x014A, "E Enclosure Management Unit %d Access Critical" },
788 { 0x014B, "E Enclosure Management Unit %d Access OK" },
789 { 0x014C, "E Enclosure Management Unit %d Access Offline" },
790 /* Controller Events (0x0180 - 0x01FF) */
791 { 0x0181, "C Cache Write Back Error" },
792 { 0x0188, "C Battery Backup Unit Found" },
793 { 0x0189, "C Battery Backup Unit Charge Level Low" },
794 { 0x018A, "C Battery Backup Unit Charge Level OK" },
795 { 0x0193, "C Installation Aborted" },
796 { 0x0195, "C Battery Backup Unit Physically Removed" },
797 { 0x0196, "C Memory Error During Warm Boot" },
798 { 0x019E, "C Memory Soft ECC Error Corrected" },
799 { 0x019F, "C Memory Hard ECC Error Corrected" },
800 { 0x01A2, "C Battery Backup Unit Failed" },
801 { 0x01AB, "C Mirror Race Recovery Failed" },
802 { 0x01AC, "C Mirror Race on Critical Drive" },
803 /* Controller Internal Processor Events */
804 { 0x0380, "C Internal Controller Hung" },
805 { 0x0381, "C Internal Controller Firmware Breakpoint" },
806 { 0x0390, "C Internal Controller i960 Processor Specific Error" },
807 { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
811 static void myrs_log_event(struct myrs_hba
*cs
, struct myrs_event
*ev
)
813 unsigned char msg_buf
[MYRS_LINE_BUFFER_SIZE
];
814 int ev_idx
= 0, ev_code
;
815 unsigned char ev_type
, *ev_msg
;
816 struct Scsi_Host
*shost
= cs
->host
;
817 struct scsi_device
*sdev
;
818 struct scsi_sense_hdr sshdr
;
819 unsigned char sense_info
[4];
820 unsigned char cmd_specific
[4];
822 if (ev
->ev_code
== 0x1C) {
823 if (!scsi_normalize_sense(ev
->sense_data
, 40, &sshdr
)) {
824 memset(&sshdr
, 0x0, sizeof(sshdr
));
825 memset(sense_info
, 0x0, sizeof(sense_info
));
826 memset(cmd_specific
, 0x0, sizeof(cmd_specific
));
828 memcpy(sense_info
, &ev
->sense_data
[3], 4);
829 memcpy(cmd_specific
, &ev
->sense_data
[7], 4);
832 if (sshdr
.sense_key
== VENDOR_SPECIFIC
&&
833 (sshdr
.asc
== 0x80 || sshdr
.asc
== 0x81))
834 ev
->ev_code
= ((sshdr
.asc
- 0x80) << 8 | sshdr
.ascq
);
836 ev_code
= myrs_ev_list
[ev_idx
].ev_code
;
837 if (ev_code
== ev
->ev_code
|| ev_code
== 0)
841 ev_type
= myrs_ev_list
[ev_idx
].ev_msg
[0];
842 ev_msg
= &myrs_ev_list
[ev_idx
].ev_msg
[2];
844 shost_printk(KERN_WARNING
, shost
,
845 "Unknown Controller Event Code %04X\n",
851 sdev
= scsi_device_lookup(shost
, ev
->channel
,
853 sdev_printk(KERN_INFO
, sdev
, "event %d: Physical Device %s\n",
855 if (sdev
&& sdev
->hostdata
&&
856 sdev
->channel
< cs
->ctlr_info
->physchan_present
) {
857 struct myrs_pdev_info
*pdev_info
= sdev
->hostdata
;
859 switch (ev
->ev_code
) {
862 pdev_info
->dev_state
= MYRS_DEVICE_ONLINE
;
865 pdev_info
->dev_state
= MYRS_DEVICE_STANDBY
;
868 pdev_info
->dev_state
= MYRS_DEVICE_OFFLINE
;
871 pdev_info
->dev_state
= MYRS_DEVICE_MISSING
;
874 pdev_info
->dev_state
= MYRS_DEVICE_UNCONFIGURED
;
880 shost_printk(KERN_INFO
, shost
,
881 "event %d: Logical Drive %d %s\n",
882 ev
->ev_seq
, ev
->lun
, ev_msg
);
883 cs
->needs_update
= true;
886 shost_printk(KERN_INFO
, shost
,
887 "event %d: Logical Drive %d %s\n",
888 ev
->ev_seq
, ev
->lun
, ev_msg
);
889 cs
->needs_update
= true;
892 if (sshdr
.sense_key
== NO_SENSE
||
893 (sshdr
.sense_key
== NOT_READY
&&
894 sshdr
.asc
== 0x04 && (sshdr
.ascq
== 0x01 ||
895 sshdr
.ascq
== 0x02)))
897 shost_printk(KERN_INFO
, shost
,
898 "event %d: Physical Device %d:%d %s\n",
899 ev
->ev_seq
, ev
->channel
, ev
->target
, ev_msg
);
900 shost_printk(KERN_INFO
, shost
,
901 "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
902 ev
->channel
, ev
->target
,
903 sshdr
.sense_key
, sshdr
.asc
, sshdr
.ascq
);
904 shost_printk(KERN_INFO
, shost
,
905 "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
906 ev
->channel
, ev
->target
,
907 sense_info
[0], sense_info
[1],
908 sense_info
[2], sense_info
[3],
909 cmd_specific
[0], cmd_specific
[1],
910 cmd_specific
[2], cmd_specific
[3]);
913 if (cs
->disable_enc_msg
)
915 sprintf(msg_buf
, ev_msg
, ev
->lun
);
916 shost_printk(KERN_INFO
, shost
, "event %d: Enclosure %d %s\n",
917 ev
->ev_seq
, ev
->target
, msg_buf
);
920 shost_printk(KERN_INFO
, shost
, "event %d: Controller %s\n",
924 shost_printk(KERN_INFO
, shost
,
925 "event %d: Unknown Event Code %04X\n",
926 ev
->ev_seq
, ev
->ev_code
);
932 * SCSI sysfs interface functions
934 static ssize_t
raid_state_show(struct device
*dev
,
935 struct device_attribute
*attr
, char *buf
)
937 struct scsi_device
*sdev
= to_scsi_device(dev
);
938 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
942 return snprintf(buf
, 16, "Unknown\n");
944 if (sdev
->channel
>= cs
->ctlr_info
->physchan_present
) {
945 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
948 name
= myrs_devstate_name(ldev_info
->dev_state
);
950 ret
= snprintf(buf
, 32, "%s\n", name
);
952 ret
= snprintf(buf
, 32, "Invalid (%02X)\n",
953 ldev_info
->dev_state
);
955 struct myrs_pdev_info
*pdev_info
;
958 pdev_info
= sdev
->hostdata
;
959 name
= myrs_devstate_name(pdev_info
->dev_state
);
961 ret
= snprintf(buf
, 32, "%s\n", name
);
963 ret
= snprintf(buf
, 32, "Invalid (%02X)\n",
964 pdev_info
->dev_state
);
969 static ssize_t
raid_state_store(struct device
*dev
,
970 struct device_attribute
*attr
, const char *buf
, size_t count
)
972 struct scsi_device
*sdev
= to_scsi_device(dev
);
973 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
974 struct myrs_cmdblk
*cmd_blk
;
975 union myrs_cmd_mbox
*mbox
;
976 enum myrs_devstate new_state
;
977 unsigned short ldev_num
;
978 unsigned char status
;
980 if (!strncmp(buf
, "offline", 7) ||
981 !strncmp(buf
, "kill", 4))
982 new_state
= MYRS_DEVICE_OFFLINE
;
983 else if (!strncmp(buf
, "online", 6))
984 new_state
= MYRS_DEVICE_ONLINE
;
985 else if (!strncmp(buf
, "standby", 7))
986 new_state
= MYRS_DEVICE_STANDBY
;
990 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
) {
991 struct myrs_pdev_info
*pdev_info
= sdev
->hostdata
;
992 struct myrs_devmap
*pdev_devmap
=
993 (struct myrs_devmap
*)&pdev_info
->rsvd13
;
995 if (pdev_info
->dev_state
== new_state
) {
996 sdev_printk(KERN_INFO
, sdev
,
997 "Device already in %s\n",
998 myrs_devstate_name(new_state
));
1001 status
= myrs_translate_pdev(cs
, sdev
->channel
, sdev
->id
,
1002 sdev
->lun
, pdev_devmap
);
1003 if (status
!= MYRS_STATUS_SUCCESS
)
1005 ldev_num
= pdev_devmap
->ldev_num
;
1007 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
1009 if (ldev_info
->dev_state
== new_state
) {
1010 sdev_printk(KERN_INFO
, sdev
,
1011 "Device already in %s\n",
1012 myrs_devstate_name(new_state
));
1015 ldev_num
= ldev_info
->ldev_num
;
1017 mutex_lock(&cs
->dcmd_mutex
);
1018 cmd_blk
= &cs
->dcmd_blk
;
1019 myrs_reset_cmd(cmd_blk
);
1020 mbox
= &cmd_blk
->mbox
;
1021 mbox
->common
.opcode
= MYRS_CMD_OP_IOCTL
;
1022 mbox
->common
.id
= MYRS_DCMD_TAG
;
1023 mbox
->common
.control
.dma_ctrl_to_host
= true;
1024 mbox
->common
.control
.no_autosense
= true;
1025 mbox
->set_devstate
.ioctl_opcode
= MYRS_IOCTL_SET_DEVICE_STATE
;
1026 mbox
->set_devstate
.state
= new_state
;
1027 mbox
->set_devstate
.ldev
.ldev_num
= ldev_num
;
1028 myrs_exec_cmd(cs
, cmd_blk
);
1029 status
= cmd_blk
->status
;
1030 mutex_unlock(&cs
->dcmd_mutex
);
1031 if (status
== MYRS_STATUS_SUCCESS
) {
1032 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
) {
1033 struct myrs_pdev_info
*pdev_info
= sdev
->hostdata
;
1035 pdev_info
->dev_state
= new_state
;
1037 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
1039 ldev_info
->dev_state
= new_state
;
1041 sdev_printk(KERN_INFO
, sdev
,
1042 "Set device state to %s\n",
1043 myrs_devstate_name(new_state
));
1046 sdev_printk(KERN_INFO
, sdev
,
1047 "Failed to set device state to %s, status 0x%02x\n",
1048 myrs_devstate_name(new_state
), status
);
1051 static DEVICE_ATTR_RW(raid_state
);
1053 static ssize_t
raid_level_show(struct device
*dev
,
1054 struct device_attribute
*attr
, char *buf
)
1056 struct scsi_device
*sdev
= to_scsi_device(dev
);
1057 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1058 const char *name
= NULL
;
1060 if (!sdev
->hostdata
)
1061 return snprintf(buf
, 16, "Unknown\n");
1063 if (sdev
->channel
>= cs
->ctlr_info
->physchan_present
) {
1064 struct myrs_ldev_info
*ldev_info
;
1066 ldev_info
= sdev
->hostdata
;
1067 name
= myrs_raid_level_name(ldev_info
->raid_level
);
1069 return snprintf(buf
, 32, "Invalid (%02X)\n",
1070 ldev_info
->dev_state
);
1073 name
= myrs_raid_level_name(MYRS_RAID_PHYSICAL
);
1075 return snprintf(buf
, 32, "%s\n", name
);
1077 static DEVICE_ATTR_RO(raid_level
);
1079 static ssize_t
rebuild_show(struct device
*dev
,
1080 struct device_attribute
*attr
, char *buf
)
1082 struct scsi_device
*sdev
= to_scsi_device(dev
);
1083 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1084 struct myrs_ldev_info
*ldev_info
;
1085 unsigned short ldev_num
;
1086 unsigned char status
;
1088 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
)
1089 return snprintf(buf
, 32, "physical device - not rebuilding\n");
1091 ldev_info
= sdev
->hostdata
;
1092 ldev_num
= ldev_info
->ldev_num
;
1093 status
= myrs_get_ldev_info(cs
, ldev_num
, ldev_info
);
1094 if (status
!= MYRS_STATUS_SUCCESS
) {
1095 sdev_printk(KERN_INFO
, sdev
,
1096 "Failed to get device information, status 0x%02x\n",
1100 if (ldev_info
->rbld_active
) {
1101 return snprintf(buf
, 32, "rebuilding block %zu of %zu\n",
1102 (size_t)ldev_info
->rbld_lba
,
1103 (size_t)ldev_info
->cfg_devsize
);
1105 return snprintf(buf
, 32, "not rebuilding\n");
1108 static ssize_t
rebuild_store(struct device
*dev
,
1109 struct device_attribute
*attr
, const char *buf
, size_t count
)
1111 struct scsi_device
*sdev
= to_scsi_device(dev
);
1112 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1113 struct myrs_ldev_info
*ldev_info
;
1114 struct myrs_cmdblk
*cmd_blk
;
1115 union myrs_cmd_mbox
*mbox
;
1116 unsigned short ldev_num
;
1117 unsigned char status
;
1120 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
)
1123 ldev_info
= sdev
->hostdata
;
1126 ldev_num
= ldev_info
->ldev_num
;
1128 ret
= kstrtoint(buf
, 0, &rebuild
);
1132 status
= myrs_get_ldev_info(cs
, ldev_num
, ldev_info
);
1133 if (status
!= MYRS_STATUS_SUCCESS
) {
1134 sdev_printk(KERN_INFO
, sdev
,
1135 "Failed to get device information, status 0x%02x\n",
1140 if (rebuild
&& ldev_info
->rbld_active
) {
1141 sdev_printk(KERN_INFO
, sdev
,
1142 "Rebuild Not Initiated; already in progress\n");
1145 if (!rebuild
&& !ldev_info
->rbld_active
) {
1146 sdev_printk(KERN_INFO
, sdev
,
1147 "Rebuild Not Cancelled; no rebuild in progress\n");
1151 mutex_lock(&cs
->dcmd_mutex
);
1152 cmd_blk
= &cs
->dcmd_blk
;
1153 myrs_reset_cmd(cmd_blk
);
1154 mbox
= &cmd_blk
->mbox
;
1155 mbox
->common
.opcode
= MYRS_CMD_OP_IOCTL
;
1156 mbox
->common
.id
= MYRS_DCMD_TAG
;
1157 mbox
->common
.control
.dma_ctrl_to_host
= true;
1158 mbox
->common
.control
.no_autosense
= true;
1160 mbox
->ldev_info
.ldev
.ldev_num
= ldev_num
;
1161 mbox
->ldev_info
.ioctl_opcode
= MYRS_IOCTL_RBLD_DEVICE_START
;
1163 mbox
->ldev_info
.ldev
.ldev_num
= ldev_num
;
1164 mbox
->ldev_info
.ioctl_opcode
= MYRS_IOCTL_RBLD_DEVICE_STOP
;
1166 myrs_exec_cmd(cs
, cmd_blk
);
1167 status
= cmd_blk
->status
;
1168 mutex_unlock(&cs
->dcmd_mutex
);
1170 sdev_printk(KERN_INFO
, sdev
,
1171 "Rebuild Not %s, status 0x%02x\n",
1172 rebuild
? "Initiated" : "Cancelled", status
);
1175 sdev_printk(KERN_INFO
, sdev
, "Rebuild %s\n",
1176 rebuild
? "Initiated" : "Cancelled");
1182 static DEVICE_ATTR_RW(rebuild
);
1184 static ssize_t
consistency_check_show(struct device
*dev
,
1185 struct device_attribute
*attr
, char *buf
)
1187 struct scsi_device
*sdev
= to_scsi_device(dev
);
1188 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1189 struct myrs_ldev_info
*ldev_info
;
1190 unsigned short ldev_num
;
1191 unsigned char status
;
1193 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
)
1194 return snprintf(buf
, 32, "physical device - not checking\n");
1196 ldev_info
= sdev
->hostdata
;
1199 ldev_num
= ldev_info
->ldev_num
;
1200 status
= myrs_get_ldev_info(cs
, ldev_num
, ldev_info
);
1201 if (ldev_info
->cc_active
)
1202 return snprintf(buf
, 32, "checking block %zu of %zu\n",
1203 (size_t)ldev_info
->cc_lba
,
1204 (size_t)ldev_info
->cfg_devsize
);
1206 return snprintf(buf
, 32, "not checking\n");
1209 static ssize_t
consistency_check_store(struct device
*dev
,
1210 struct device_attribute
*attr
, const char *buf
, size_t count
)
1212 struct scsi_device
*sdev
= to_scsi_device(dev
);
1213 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1214 struct myrs_ldev_info
*ldev_info
;
1215 struct myrs_cmdblk
*cmd_blk
;
1216 union myrs_cmd_mbox
*mbox
;
1217 unsigned short ldev_num
;
1218 unsigned char status
;
1221 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
)
1224 ldev_info
= sdev
->hostdata
;
1227 ldev_num
= ldev_info
->ldev_num
;
1229 ret
= kstrtoint(buf
, 0, &check
);
1233 status
= myrs_get_ldev_info(cs
, ldev_num
, ldev_info
);
1234 if (status
!= MYRS_STATUS_SUCCESS
) {
1235 sdev_printk(KERN_INFO
, sdev
,
1236 "Failed to get device information, status 0x%02x\n",
1240 if (check
&& ldev_info
->cc_active
) {
1241 sdev_printk(KERN_INFO
, sdev
,
1242 "Consistency Check Not Initiated; "
1243 "already in progress\n");
1246 if (!check
&& !ldev_info
->cc_active
) {
1247 sdev_printk(KERN_INFO
, sdev
,
1248 "Consistency Check Not Cancelled; "
1249 "check not in progress\n");
1253 mutex_lock(&cs
->dcmd_mutex
);
1254 cmd_blk
= &cs
->dcmd_blk
;
1255 myrs_reset_cmd(cmd_blk
);
1256 mbox
= &cmd_blk
->mbox
;
1257 mbox
->common
.opcode
= MYRS_CMD_OP_IOCTL
;
1258 mbox
->common
.id
= MYRS_DCMD_TAG
;
1259 mbox
->common
.control
.dma_ctrl_to_host
= true;
1260 mbox
->common
.control
.no_autosense
= true;
1262 mbox
->cc
.ldev
.ldev_num
= ldev_num
;
1263 mbox
->cc
.ioctl_opcode
= MYRS_IOCTL_CC_START
;
1264 mbox
->cc
.restore_consistency
= true;
1265 mbox
->cc
.initialized_area_only
= false;
1267 mbox
->cc
.ldev
.ldev_num
= ldev_num
;
1268 mbox
->cc
.ioctl_opcode
= MYRS_IOCTL_CC_STOP
;
1270 myrs_exec_cmd(cs
, cmd_blk
);
1271 status
= cmd_blk
->status
;
1272 mutex_unlock(&cs
->dcmd_mutex
);
1273 if (status
!= MYRS_STATUS_SUCCESS
) {
1274 sdev_printk(KERN_INFO
, sdev
,
1275 "Consistency Check Not %s, status 0x%02x\n",
1276 check
? "Initiated" : "Cancelled", status
);
1279 sdev_printk(KERN_INFO
, sdev
, "Consistency Check %s\n",
1280 check
? "Initiated" : "Cancelled");
1286 static DEVICE_ATTR_RW(consistency_check
);
1288 static struct device_attribute
*myrs_sdev_attrs
[] = {
1289 &dev_attr_consistency_check
,
1291 &dev_attr_raid_state
,
1292 &dev_attr_raid_level
,
1296 static ssize_t
serial_show(struct device
*dev
,
1297 struct device_attribute
*attr
, char *buf
)
1299 struct Scsi_Host
*shost
= class_to_shost(dev
);
1300 struct myrs_hba
*cs
= shost_priv(shost
);
1303 memcpy(serial
, cs
->ctlr_info
->serial_number
, 16);
1305 return snprintf(buf
, 16, "%s\n", serial
);
1307 static DEVICE_ATTR_RO(serial
);
1309 static ssize_t
ctlr_num_show(struct device
*dev
,
1310 struct device_attribute
*attr
, char *buf
)
1312 struct Scsi_Host
*shost
= class_to_shost(dev
);
1313 struct myrs_hba
*cs
= shost_priv(shost
);
1315 return snprintf(buf
, 20, "%d\n", cs
->host
->host_no
);
1317 static DEVICE_ATTR_RO(ctlr_num
);
1319 static struct myrs_cpu_type_tbl
{
1320 enum myrs_cpu_type type
;
1322 } myrs_cpu_type_names
[] = {
1323 { MYRS_CPUTYPE_i960CA
, "i960CA" },
1324 { MYRS_CPUTYPE_i960RD
, "i960RD" },
1325 { MYRS_CPUTYPE_i960RN
, "i960RN" },
1326 { MYRS_CPUTYPE_i960RP
, "i960RP" },
1327 { MYRS_CPUTYPE_NorthBay
, "NorthBay" },
1328 { MYRS_CPUTYPE_StrongArm
, "StrongARM" },
1329 { MYRS_CPUTYPE_i960RM
, "i960RM" },
1332 static ssize_t
processor_show(struct device
*dev
,
1333 struct device_attribute
*attr
, char *buf
)
1335 struct Scsi_Host
*shost
= class_to_shost(dev
);
1336 struct myrs_hba
*cs
= shost_priv(shost
);
1337 struct myrs_cpu_type_tbl
*tbl
;
1338 const char *first_processor
= NULL
;
1339 const char *second_processor
= NULL
;
1340 struct myrs_ctlr_info
*info
= cs
->ctlr_info
;
1344 if (info
->cpu
[0].cpu_count
) {
1345 tbl
= myrs_cpu_type_names
;
1346 for (i
= 0; i
< ARRAY_SIZE(myrs_cpu_type_names
); i
++) {
1347 if (tbl
[i
].type
== info
->cpu
[0].cpu_type
) {
1348 first_processor
= tbl
[i
].name
;
1353 if (info
->cpu
[1].cpu_count
) {
1354 tbl
= myrs_cpu_type_names
;
1355 for (i
= 0; i
< ARRAY_SIZE(myrs_cpu_type_names
); i
++) {
1356 if (tbl
[i
].type
== info
->cpu
[1].cpu_type
) {
1357 second_processor
= tbl
[i
].name
;
1362 if (first_processor
&& second_processor
)
1363 ret
= snprintf(buf
, 64, "1: %s (%s, %d cpus)\n"
1364 "2: %s (%s, %d cpus)\n",
1365 info
->cpu
[0].cpu_name
,
1366 first_processor
, info
->cpu
[0].cpu_count
,
1367 info
->cpu
[1].cpu_name
,
1368 second_processor
, info
->cpu
[1].cpu_count
);
1369 else if (first_processor
&& !second_processor
)
1370 ret
= snprintf(buf
, 64, "1: %s (%s, %d cpus)\n2: absent\n",
1371 info
->cpu
[0].cpu_name
,
1372 first_processor
, info
->cpu
[0].cpu_count
);
1373 else if (!first_processor
&& second_processor
)
1374 ret
= snprintf(buf
, 64, "1: absent\n2: %s (%s, %d cpus)\n",
1375 info
->cpu
[1].cpu_name
,
1376 second_processor
, info
->cpu
[1].cpu_count
);
1378 ret
= snprintf(buf
, 64, "1: absent\n2: absent\n");
1382 static DEVICE_ATTR_RO(processor
);
1384 static ssize_t
model_show(struct device
*dev
,
1385 struct device_attribute
*attr
, char *buf
)
1387 struct Scsi_Host
*shost
= class_to_shost(dev
);
1388 struct myrs_hba
*cs
= shost_priv(shost
);
1390 return snprintf(buf
, 28, "%s\n", cs
->model_name
);
1392 static DEVICE_ATTR_RO(model
);
1394 static ssize_t
ctlr_type_show(struct device
*dev
,
1395 struct device_attribute
*attr
, char *buf
)
1397 struct Scsi_Host
*shost
= class_to_shost(dev
);
1398 struct myrs_hba
*cs
= shost_priv(shost
);
1400 return snprintf(buf
, 4, "%d\n", cs
->ctlr_info
->ctlr_type
);
1402 static DEVICE_ATTR_RO(ctlr_type
);
1404 static ssize_t
cache_size_show(struct device
*dev
,
1405 struct device_attribute
*attr
, char *buf
)
1407 struct Scsi_Host
*shost
= class_to_shost(dev
);
1408 struct myrs_hba
*cs
= shost_priv(shost
);
1410 return snprintf(buf
, 8, "%d MB\n", cs
->ctlr_info
->cache_size_mb
);
1412 static DEVICE_ATTR_RO(cache_size
);
1414 static ssize_t
firmware_show(struct device
*dev
,
1415 struct device_attribute
*attr
, char *buf
)
1417 struct Scsi_Host
*shost
= class_to_shost(dev
);
1418 struct myrs_hba
*cs
= shost_priv(shost
);
1420 return snprintf(buf
, 16, "%d.%02d-%02d\n",
1421 cs
->ctlr_info
->fw_major_version
,
1422 cs
->ctlr_info
->fw_minor_version
,
1423 cs
->ctlr_info
->fw_turn_number
);
1425 static DEVICE_ATTR_RO(firmware
);
1427 static ssize_t
discovery_store(struct device
*dev
,
1428 struct device_attribute
*attr
, const char *buf
, size_t count
)
1430 struct Scsi_Host
*shost
= class_to_shost(dev
);
1431 struct myrs_hba
*cs
= shost_priv(shost
);
1432 struct myrs_cmdblk
*cmd_blk
;
1433 union myrs_cmd_mbox
*mbox
;
1434 unsigned char status
;
1436 mutex_lock(&cs
->dcmd_mutex
);
1437 cmd_blk
= &cs
->dcmd_blk
;
1438 myrs_reset_cmd(cmd_blk
);
1439 mbox
= &cmd_blk
->mbox
;
1440 mbox
->common
.opcode
= MYRS_CMD_OP_IOCTL
;
1441 mbox
->common
.id
= MYRS_DCMD_TAG
;
1442 mbox
->common
.control
.dma_ctrl_to_host
= true;
1443 mbox
->common
.control
.no_autosense
= true;
1444 mbox
->common
.ioctl_opcode
= MYRS_IOCTL_START_DISCOVERY
;
1445 myrs_exec_cmd(cs
, cmd_blk
);
1446 status
= cmd_blk
->status
;
1447 mutex_unlock(&cs
->dcmd_mutex
);
1448 if (status
!= MYRS_STATUS_SUCCESS
) {
1449 shost_printk(KERN_INFO
, shost
,
1450 "Discovery Not Initiated, status %02X\n",
1454 shost_printk(KERN_INFO
, shost
, "Discovery Initiated\n");
1456 cs
->needs_update
= true;
1457 queue_delayed_work(cs
->work_q
, &cs
->monitor_work
, 1);
1458 flush_delayed_work(&cs
->monitor_work
);
1459 shost_printk(KERN_INFO
, shost
, "Discovery Completed\n");
1463 static DEVICE_ATTR_WO(discovery
);
1465 static ssize_t
flush_cache_store(struct device
*dev
,
1466 struct device_attribute
*attr
, const char *buf
, size_t count
)
1468 struct Scsi_Host
*shost
= class_to_shost(dev
);
1469 struct myrs_hba
*cs
= shost_priv(shost
);
1470 unsigned char status
;
1472 status
= myrs_dev_op(cs
, MYRS_IOCTL_FLUSH_DEVICE_DATA
,
1473 MYRS_RAID_CONTROLLER
);
1474 if (status
== MYRS_STATUS_SUCCESS
) {
1475 shost_printk(KERN_INFO
, shost
, "Cache Flush Completed\n");
1478 shost_printk(KERN_INFO
, shost
,
1479 "Cache Flush failed, status 0x%02x\n", status
);
1482 static DEVICE_ATTR_WO(flush_cache
);
1484 static ssize_t
disable_enclosure_messages_show(struct device
*dev
,
1485 struct device_attribute
*attr
, char *buf
)
1487 struct Scsi_Host
*shost
= class_to_shost(dev
);
1488 struct myrs_hba
*cs
= shost_priv(shost
);
1490 return snprintf(buf
, 3, "%d\n", cs
->disable_enc_msg
);
1493 static ssize_t
disable_enclosure_messages_store(struct device
*dev
,
1494 struct device_attribute
*attr
, const char *buf
, size_t count
)
1496 struct scsi_device
*sdev
= to_scsi_device(dev
);
1497 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1500 ret
= kstrtoint(buf
, 0, &value
);
1507 cs
->disable_enc_msg
= value
;
1510 static DEVICE_ATTR_RW(disable_enclosure_messages
);
1512 static struct device_attribute
*myrs_shost_attrs
[] = {
1515 &dev_attr_processor
,
1517 &dev_attr_ctlr_type
,
1518 &dev_attr_cache_size
,
1520 &dev_attr_discovery
,
1521 &dev_attr_flush_cache
,
1522 &dev_attr_disable_enclosure_messages
,
1527 * SCSI midlayer interface
1529 int myrs_host_reset(struct scsi_cmnd
*scmd
)
1531 struct Scsi_Host
*shost
= scmd
->device
->host
;
1532 struct myrs_hba
*cs
= shost_priv(shost
);
1534 cs
->reset(cs
->io_base
);
1538 static void myrs_mode_sense(struct myrs_hba
*cs
, struct scsi_cmnd
*scmd
,
1539 struct myrs_ldev_info
*ldev_info
)
1541 unsigned char modes
[32], *mode_pg
;
1545 dbd
= (scmd
->cmnd
[1] & 0x08) == 0x08;
1548 mode_pg
= &modes
[4];
1551 mode_pg
= &modes
[12];
1553 memset(modes
, 0, sizeof(modes
));
1554 modes
[0] = mode_len
- 1;
1555 modes
[2] = 0x10; /* Enable FUA */
1556 if (ldev_info
->ldev_control
.wce
== MYRS_LOGICALDEVICE_RO
)
1559 unsigned char *block_desc
= &modes
[4];
1562 put_unaligned_be32(ldev_info
->cfg_devsize
, &block_desc
[0]);
1563 put_unaligned_be32(ldev_info
->devsize_bytes
, &block_desc
[5]);
1567 if (ldev_info
->ldev_control
.rce
== MYRS_READCACHE_DISABLED
)
1569 if (ldev_info
->ldev_control
.wce
== MYRS_WRITECACHE_ENABLED
||
1570 ldev_info
->ldev_control
.wce
== MYRS_INTELLIGENT_WRITECACHE_ENABLED
)
1572 if (ldev_info
->cacheline_size
) {
1574 put_unaligned_be16(1 << ldev_info
->cacheline_size
,
1578 scsi_sg_copy_from_buffer(scmd
, modes
, mode_len
);
1581 static int myrs_queuecommand(struct Scsi_Host
*shost
,
1582 struct scsi_cmnd
*scmd
)
1584 struct myrs_hba
*cs
= shost_priv(shost
);
1585 struct myrs_cmdblk
*cmd_blk
= scsi_cmd_priv(scmd
);
1586 union myrs_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
1587 struct scsi_device
*sdev
= scmd
->device
;
1588 union myrs_sgl
*hw_sge
;
1589 dma_addr_t sense_addr
;
1590 struct scatterlist
*sgl
;
1591 unsigned long flags
, timeout
;
1594 if (!scmd
->device
->hostdata
) {
1595 scmd
->result
= (DID_NO_CONNECT
<< 16);
1596 scmd
->scsi_done(scmd
);
1600 switch (scmd
->cmnd
[0]) {
1602 scsi_build_sense_buffer(0, scmd
->sense_buffer
, ILLEGAL_REQUEST
,
1604 scmd
->result
= (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
1605 scmd
->scsi_done(scmd
);
1608 if (scmd
->device
->channel
>= cs
->ctlr_info
->physchan_present
) {
1609 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
1611 if ((scmd
->cmnd
[2] & 0x3F) != 0x3F &&
1612 (scmd
->cmnd
[2] & 0x3F) != 0x08) {
1613 /* Illegal request, invalid field in CDB */
1614 scsi_build_sense_buffer(0, scmd
->sense_buffer
,
1615 ILLEGAL_REQUEST
, 0x24, 0);
1616 scmd
->result
= (DRIVER_SENSE
<< 24) |
1617 SAM_STAT_CHECK_CONDITION
;
1619 myrs_mode_sense(cs
, scmd
, ldev_info
);
1620 scmd
->result
= (DID_OK
<< 16);
1622 scmd
->scsi_done(scmd
);
1628 myrs_reset_cmd(cmd_blk
);
1629 cmd_blk
->sense
= dma_pool_alloc(cs
->sense_pool
, GFP_ATOMIC
,
1631 if (!cmd_blk
->sense
)
1632 return SCSI_MLQUEUE_HOST_BUSY
;
1633 cmd_blk
->sense_addr
= sense_addr
;
1635 timeout
= scmd
->request
->timeout
;
1636 if (scmd
->cmd_len
<= 10) {
1637 if (scmd
->device
->channel
>= cs
->ctlr_info
->physchan_present
) {
1638 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
1640 mbox
->SCSI_10
.opcode
= MYRS_CMD_OP_SCSI_10
;
1641 mbox
->SCSI_10
.pdev
.lun
= ldev_info
->lun
;
1642 mbox
->SCSI_10
.pdev
.target
= ldev_info
->target
;
1643 mbox
->SCSI_10
.pdev
.channel
= ldev_info
->channel
;
1644 mbox
->SCSI_10
.pdev
.ctlr
= 0;
1646 mbox
->SCSI_10
.opcode
= MYRS_CMD_OP_SCSI_10_PASSTHRU
;
1647 mbox
->SCSI_10
.pdev
.lun
= sdev
->lun
;
1648 mbox
->SCSI_10
.pdev
.target
= sdev
->id
;
1649 mbox
->SCSI_10
.pdev
.channel
= sdev
->channel
;
1651 mbox
->SCSI_10
.id
= scmd
->request
->tag
+ 3;
1652 mbox
->SCSI_10
.control
.dma_ctrl_to_host
=
1653 (scmd
->sc_data_direction
== DMA_FROM_DEVICE
);
1654 if (scmd
->request
->cmd_flags
& REQ_FUA
)
1655 mbox
->SCSI_10
.control
.fua
= true;
1656 mbox
->SCSI_10
.dma_size
= scsi_bufflen(scmd
);
1657 mbox
->SCSI_10
.sense_addr
= cmd_blk
->sense_addr
;
1658 mbox
->SCSI_10
.sense_len
= MYRS_SENSE_SIZE
;
1659 mbox
->SCSI_10
.cdb_len
= scmd
->cmd_len
;
1661 mbox
->SCSI_10
.tmo
.tmo_scale
= MYRS_TMO_SCALE_MINUTES
;
1662 mbox
->SCSI_10
.tmo
.tmo_val
= timeout
/ 60;
1664 mbox
->SCSI_10
.tmo
.tmo_scale
= MYRS_TMO_SCALE_SECONDS
;
1665 mbox
->SCSI_10
.tmo
.tmo_val
= timeout
;
1667 memcpy(&mbox
->SCSI_10
.cdb
, scmd
->cmnd
, scmd
->cmd_len
);
1668 hw_sge
= &mbox
->SCSI_10
.dma_addr
;
1669 cmd_blk
->dcdb
= NULL
;
1671 dma_addr_t dcdb_dma
;
1673 cmd_blk
->dcdb
= dma_pool_alloc(cs
->dcdb_pool
, GFP_ATOMIC
,
1675 if (!cmd_blk
->dcdb
) {
1676 dma_pool_free(cs
->sense_pool
, cmd_blk
->sense
,
1677 cmd_blk
->sense_addr
);
1678 cmd_blk
->sense
= NULL
;
1679 cmd_blk
->sense_addr
= 0;
1680 return SCSI_MLQUEUE_HOST_BUSY
;
1682 cmd_blk
->dcdb_dma
= dcdb_dma
;
1683 if (scmd
->device
->channel
>= cs
->ctlr_info
->physchan_present
) {
1684 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
1686 mbox
->SCSI_255
.opcode
= MYRS_CMD_OP_SCSI_256
;
1687 mbox
->SCSI_255
.pdev
.lun
= ldev_info
->lun
;
1688 mbox
->SCSI_255
.pdev
.target
= ldev_info
->target
;
1689 mbox
->SCSI_255
.pdev
.channel
= ldev_info
->channel
;
1690 mbox
->SCSI_255
.pdev
.ctlr
= 0;
1692 mbox
->SCSI_255
.opcode
= MYRS_CMD_OP_SCSI_255_PASSTHRU
;
1693 mbox
->SCSI_255
.pdev
.lun
= sdev
->lun
;
1694 mbox
->SCSI_255
.pdev
.target
= sdev
->id
;
1695 mbox
->SCSI_255
.pdev
.channel
= sdev
->channel
;
1697 mbox
->SCSI_255
.id
= scmd
->request
->tag
+ 3;
1698 mbox
->SCSI_255
.control
.dma_ctrl_to_host
=
1699 (scmd
->sc_data_direction
== DMA_FROM_DEVICE
);
1700 if (scmd
->request
->cmd_flags
& REQ_FUA
)
1701 mbox
->SCSI_255
.control
.fua
= true;
1702 mbox
->SCSI_255
.dma_size
= scsi_bufflen(scmd
);
1703 mbox
->SCSI_255
.sense_addr
= cmd_blk
->sense_addr
;
1704 mbox
->SCSI_255
.sense_len
= MYRS_SENSE_SIZE
;
1705 mbox
->SCSI_255
.cdb_len
= scmd
->cmd_len
;
1706 mbox
->SCSI_255
.cdb_addr
= cmd_blk
->dcdb_dma
;
1708 mbox
->SCSI_255
.tmo
.tmo_scale
= MYRS_TMO_SCALE_MINUTES
;
1709 mbox
->SCSI_255
.tmo
.tmo_val
= timeout
/ 60;
1711 mbox
->SCSI_255
.tmo
.tmo_scale
= MYRS_TMO_SCALE_SECONDS
;
1712 mbox
->SCSI_255
.tmo
.tmo_val
= timeout
;
1714 memcpy(cmd_blk
->dcdb
, scmd
->cmnd
, scmd
->cmd_len
);
1715 hw_sge
= &mbox
->SCSI_255
.dma_addr
;
1717 if (scmd
->sc_data_direction
== DMA_NONE
)
1719 nsge
= scsi_dma_map(scmd
);
1721 sgl
= scsi_sglist(scmd
);
1722 hw_sge
->sge
[0].sge_addr
= (u64
)sg_dma_address(sgl
);
1723 hw_sge
->sge
[0].sge_count
= (u64
)sg_dma_len(sgl
);
1725 struct myrs_sge
*hw_sgl
;
1726 dma_addr_t hw_sgl_addr
;
1730 hw_sgl
= dma_pool_alloc(cs
->sg_pool
, GFP_ATOMIC
,
1732 if (WARN_ON(!hw_sgl
)) {
1733 if (cmd_blk
->dcdb
) {
1734 dma_pool_free(cs
->dcdb_pool
,
1737 cmd_blk
->dcdb
= NULL
;
1738 cmd_blk
->dcdb_dma
= 0;
1740 dma_pool_free(cs
->sense_pool
,
1742 cmd_blk
->sense_addr
);
1743 cmd_blk
->sense
= NULL
;
1744 cmd_blk
->sense_addr
= 0;
1745 return SCSI_MLQUEUE_HOST_BUSY
;
1747 cmd_blk
->sgl
= hw_sgl
;
1748 cmd_blk
->sgl_addr
= hw_sgl_addr
;
1749 if (scmd
->cmd_len
<= 10)
1750 mbox
->SCSI_10
.control
.add_sge_mem
= true;
1752 mbox
->SCSI_255
.control
.add_sge_mem
= true;
1753 hw_sge
->ext
.sge0_len
= nsge
;
1754 hw_sge
->ext
.sge0_addr
= cmd_blk
->sgl_addr
;
1756 hw_sgl
= hw_sge
->sge
;
1758 scsi_for_each_sg(scmd
, sgl
, nsge
, i
) {
1759 if (WARN_ON(!hw_sgl
)) {
1760 scsi_dma_unmap(scmd
);
1761 scmd
->result
= (DID_ERROR
<< 16);
1762 scmd
->scsi_done(scmd
);
1765 hw_sgl
->sge_addr
= (u64
)sg_dma_address(sgl
);
1766 hw_sgl
->sge_count
= (u64
)sg_dma_len(sgl
);
1771 spin_lock_irqsave(&cs
->queue_lock
, flags
);
1772 myrs_qcmd(cs
, cmd_blk
);
1773 spin_unlock_irqrestore(&cs
->queue_lock
, flags
);
1778 static unsigned short myrs_translate_ldev(struct myrs_hba
*cs
,
1779 struct scsi_device
*sdev
)
1781 unsigned short ldev_num
;
1782 unsigned int chan_offset
=
1783 sdev
->channel
- cs
->ctlr_info
->physchan_present
;
1785 ldev_num
= sdev
->id
+ chan_offset
* sdev
->host
->max_id
;
1790 static int myrs_slave_alloc(struct scsi_device
*sdev
)
1792 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1793 unsigned char status
;
1795 if (sdev
->channel
> sdev
->host
->max_channel
)
1798 if (sdev
->channel
>= cs
->ctlr_info
->physchan_present
) {
1799 struct myrs_ldev_info
*ldev_info
;
1800 unsigned short ldev_num
;
1805 ldev_num
= myrs_translate_ldev(cs
, sdev
);
1807 ldev_info
= kzalloc(sizeof(*ldev_info
), GFP_KERNEL
|GFP_DMA
);
1811 status
= myrs_get_ldev_info(cs
, ldev_num
, ldev_info
);
1812 if (status
!= MYRS_STATUS_SUCCESS
) {
1813 sdev
->hostdata
= NULL
;
1816 enum raid_level level
;
1818 dev_dbg(&sdev
->sdev_gendev
,
1819 "Logical device mapping %d:%d:%d -> %d\n",
1820 ldev_info
->channel
, ldev_info
->target
,
1821 ldev_info
->lun
, ldev_info
->ldev_num
);
1823 sdev
->hostdata
= ldev_info
;
1824 switch (ldev_info
->raid_level
) {
1825 case MYRS_RAID_LEVEL0
:
1826 level
= RAID_LEVEL_LINEAR
;
1828 case MYRS_RAID_LEVEL1
:
1829 level
= RAID_LEVEL_1
;
1831 case MYRS_RAID_LEVEL3
:
1832 case MYRS_RAID_LEVEL3F
:
1833 case MYRS_RAID_LEVEL3L
:
1834 level
= RAID_LEVEL_3
;
1836 case MYRS_RAID_LEVEL5
:
1837 case MYRS_RAID_LEVEL5L
:
1838 level
= RAID_LEVEL_5
;
1840 case MYRS_RAID_LEVEL6
:
1841 level
= RAID_LEVEL_6
;
1843 case MYRS_RAID_LEVELE
:
1844 case MYRS_RAID_NEWSPAN
:
1845 case MYRS_RAID_SPAN
:
1846 level
= RAID_LEVEL_LINEAR
;
1848 case MYRS_RAID_JBOD
:
1849 level
= RAID_LEVEL_JBOD
;
1852 level
= RAID_LEVEL_UNKNOWN
;
1855 raid_set_level(myrs_raid_template
,
1856 &sdev
->sdev_gendev
, level
);
1857 if (ldev_info
->dev_state
!= MYRS_DEVICE_ONLINE
) {
1860 name
= myrs_devstate_name(ldev_info
->dev_state
);
1861 sdev_printk(KERN_DEBUG
, sdev
,
1862 "logical device in state %s\n",
1863 name
? name
: "Invalid");
1867 struct myrs_pdev_info
*pdev_info
;
1869 pdev_info
= kzalloc(sizeof(*pdev_info
), GFP_KERNEL
|GFP_DMA
);
1873 status
= myrs_get_pdev_info(cs
, sdev
->channel
,
1874 sdev
->id
, sdev
->lun
,
1876 if (status
!= MYRS_STATUS_SUCCESS
) {
1877 sdev
->hostdata
= NULL
;
1881 sdev
->hostdata
= pdev_info
;
1886 static int myrs_slave_configure(struct scsi_device
*sdev
)
1888 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1889 struct myrs_ldev_info
*ldev_info
;
1891 if (sdev
->channel
> sdev
->host
->max_channel
)
1894 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
) {
1895 /* Skip HBA device */
1896 if (sdev
->type
== TYPE_RAID
)
1898 sdev
->no_uld_attach
= 1;
1904 ldev_info
= sdev
->hostdata
;
1907 if (ldev_info
->ldev_control
.wce
== MYRS_WRITECACHE_ENABLED
||
1908 ldev_info
->ldev_control
.wce
== MYRS_INTELLIGENT_WRITECACHE_ENABLED
)
1909 sdev
->wce_default_on
= 1;
1910 sdev
->tagged_supported
= 1;
1914 static void myrs_slave_destroy(struct scsi_device
*sdev
)
1916 kfree(sdev
->hostdata
);
1919 struct scsi_host_template myrs_template
= {
1920 .module
= THIS_MODULE
,
1922 .proc_name
= "myrs",
1923 .queuecommand
= myrs_queuecommand
,
1924 .eh_host_reset_handler
= myrs_host_reset
,
1925 .slave_alloc
= myrs_slave_alloc
,
1926 .slave_configure
= myrs_slave_configure
,
1927 .slave_destroy
= myrs_slave_destroy
,
1928 .cmd_size
= sizeof(struct myrs_cmdblk
),
1929 .shost_attrs
= myrs_shost_attrs
,
1930 .sdev_attrs
= myrs_sdev_attrs
,
1934 static struct myrs_hba
*myrs_alloc_host(struct pci_dev
*pdev
,
1935 const struct pci_device_id
*entry
)
1937 struct Scsi_Host
*shost
;
1938 struct myrs_hba
*cs
;
1940 shost
= scsi_host_alloc(&myrs_template
, sizeof(struct myrs_hba
));
1944 shost
->max_cmd_len
= 16;
1945 shost
->max_lun
= 256;
1946 cs
= shost_priv(shost
);
1947 mutex_init(&cs
->dcmd_mutex
);
1948 mutex_init(&cs
->cinfo_mutex
);
1955 * RAID template functions
1959 * myrs_is_raid - return boolean indicating device is raid volume
1960 * @dev the device struct object
1963 myrs_is_raid(struct device
*dev
)
1965 struct scsi_device
*sdev
= to_scsi_device(dev
);
1966 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1968 return (sdev
->channel
>= cs
->ctlr_info
->physchan_present
) ? 1 : 0;
1972 * myrs_get_resync - get raid volume resync percent complete
1973 * @dev the device struct object
1976 myrs_get_resync(struct device
*dev
)
1978 struct scsi_device
*sdev
= to_scsi_device(dev
);
1979 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
1980 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
1981 u64 percent_complete
= 0;
1984 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
|| !ldev_info
)
1986 if (ldev_info
->rbld_active
) {
1987 unsigned short ldev_num
= ldev_info
->ldev_num
;
1989 status
= myrs_get_ldev_info(cs
, ldev_num
, ldev_info
);
1990 percent_complete
= ldev_info
->rbld_lba
* 100;
1991 do_div(percent_complete
, ldev_info
->cfg_devsize
);
1993 raid_set_resync(myrs_raid_template
, dev
, percent_complete
);
1997 * myrs_get_state - get raid volume status
1998 * @dev the device struct object
2001 myrs_get_state(struct device
*dev
)
2003 struct scsi_device
*sdev
= to_scsi_device(dev
);
2004 struct myrs_hba
*cs
= shost_priv(sdev
->host
);
2005 struct myrs_ldev_info
*ldev_info
= sdev
->hostdata
;
2006 enum raid_state state
= RAID_STATE_UNKNOWN
;
2008 if (sdev
->channel
< cs
->ctlr_info
->physchan_present
|| !ldev_info
)
2009 state
= RAID_STATE_UNKNOWN
;
2011 switch (ldev_info
->dev_state
) {
2012 case MYRS_DEVICE_ONLINE
:
2013 state
= RAID_STATE_ACTIVE
;
2015 case MYRS_DEVICE_SUSPECTED_CRITICAL
:
2016 case MYRS_DEVICE_CRITICAL
:
2017 state
= RAID_STATE_DEGRADED
;
2019 case MYRS_DEVICE_REBUILD
:
2020 state
= RAID_STATE_RESYNCING
;
2022 case MYRS_DEVICE_UNCONFIGURED
:
2023 case MYRS_DEVICE_INVALID_STATE
:
2024 state
= RAID_STATE_UNKNOWN
;
2027 state
= RAID_STATE_OFFLINE
;
2030 raid_set_state(myrs_raid_template
, dev
, state
);
2033 struct raid_function_template myrs_raid_functions
= {
2034 .cookie
= &myrs_template
,
2035 .is_raid
= myrs_is_raid
,
2036 .get_resync
= myrs_get_resync
,
2037 .get_state
= myrs_get_state
,
2041 * PCI interface functions
2043 void myrs_flush_cache(struct myrs_hba
*cs
)
2045 myrs_dev_op(cs
, MYRS_IOCTL_FLUSH_DEVICE_DATA
, MYRS_RAID_CONTROLLER
);
2048 static void myrs_handle_scsi(struct myrs_hba
*cs
, struct myrs_cmdblk
*cmd_blk
,
2049 struct scsi_cmnd
*scmd
)
2051 unsigned char status
;
2056 scsi_dma_unmap(scmd
);
2057 status
= cmd_blk
->status
;
2058 if (cmd_blk
->sense
) {
2059 if (status
== MYRS_STATUS_FAILED
&& cmd_blk
->sense_len
) {
2060 unsigned int sense_len
= SCSI_SENSE_BUFFERSIZE
;
2062 if (sense_len
> cmd_blk
->sense_len
)
2063 sense_len
= cmd_blk
->sense_len
;
2064 memcpy(scmd
->sense_buffer
, cmd_blk
->sense
, sense_len
);
2066 dma_pool_free(cs
->sense_pool
, cmd_blk
->sense
,
2067 cmd_blk
->sense_addr
);
2068 cmd_blk
->sense
= NULL
;
2069 cmd_blk
->sense_addr
= 0;
2071 if (cmd_blk
->dcdb
) {
2072 dma_pool_free(cs
->dcdb_pool
, cmd_blk
->dcdb
,
2074 cmd_blk
->dcdb
= NULL
;
2075 cmd_blk
->dcdb_dma
= 0;
2078 dma_pool_free(cs
->sg_pool
, cmd_blk
->sgl
,
2080 cmd_blk
->sgl
= NULL
;
2081 cmd_blk
->sgl_addr
= 0;
2083 if (cmd_blk
->residual
)
2084 scsi_set_resid(scmd
, cmd_blk
->residual
);
2085 if (status
== MYRS_STATUS_DEVICE_NON_RESPONSIVE
||
2086 status
== MYRS_STATUS_DEVICE_NON_RESPONSIVE2
)
2087 scmd
->result
= (DID_BAD_TARGET
<< 16);
2089 scmd
->result
= (DID_OK
<< 16) | status
;
2090 scmd
->scsi_done(scmd
);
2093 static void myrs_handle_cmdblk(struct myrs_hba
*cs
, struct myrs_cmdblk
*cmd_blk
)
2098 if (cmd_blk
->complete
) {
2099 complete(cmd_blk
->complete
);
2100 cmd_blk
->complete
= NULL
;
2104 static void myrs_monitor(struct work_struct
*work
)
2106 struct myrs_hba
*cs
= container_of(work
, struct myrs_hba
,
2108 struct Scsi_Host
*shost
= cs
->host
;
2109 struct myrs_ctlr_info
*info
= cs
->ctlr_info
;
2110 unsigned int epoch
= cs
->fwstat_buf
->epoch
;
2111 unsigned long interval
= MYRS_PRIMARY_MONITOR_INTERVAL
;
2112 unsigned char status
;
2114 dev_dbg(&shost
->shost_gendev
, "monitor tick\n");
2116 status
= myrs_get_fwstatus(cs
);
2118 if (cs
->needs_update
) {
2119 cs
->needs_update
= false;
2120 mutex_lock(&cs
->cinfo_mutex
);
2121 status
= myrs_get_ctlr_info(cs
);
2122 mutex_unlock(&cs
->cinfo_mutex
);
2124 if (cs
->fwstat_buf
->next_evseq
- cs
->next_evseq
> 0) {
2125 status
= myrs_get_event(cs
, cs
->next_evseq
,
2127 if (status
== MYRS_STATUS_SUCCESS
) {
2128 myrs_log_event(cs
, cs
->event_buf
);
2134 if (time_after(jiffies
, cs
->secondary_monitor_time
2135 + MYRS_SECONDARY_MONITOR_INTERVAL
))
2136 cs
->secondary_monitor_time
= jiffies
;
2138 if (info
->bg_init_active
+
2139 info
->ldev_init_active
+
2140 info
->pdev_init_active
+
2143 info
->exp_active
!= 0) {
2144 struct scsi_device
*sdev
;
2146 shost_for_each_device(sdev
, shost
) {
2147 struct myrs_ldev_info
*ldev_info
;
2150 if (sdev
->channel
< info
->physchan_present
)
2152 ldev_info
= sdev
->hostdata
;
2155 ldev_num
= ldev_info
->ldev_num
;
2156 myrs_get_ldev_info(cs
, ldev_num
, ldev_info
);
2158 cs
->needs_update
= true;
2160 if (epoch
== cs
->epoch
&&
2161 cs
->fwstat_buf
->next_evseq
== cs
->next_evseq
&&
2162 (cs
->needs_update
== false ||
2163 time_before(jiffies
, cs
->primary_monitor_time
2164 + MYRS_PRIMARY_MONITOR_INTERVAL
))) {
2165 interval
= MYRS_SECONDARY_MONITOR_INTERVAL
;
2169 cs
->primary_monitor_time
= jiffies
;
2170 queue_delayed_work(cs
->work_q
, &cs
->monitor_work
, interval
);
2173 static bool myrs_create_mempools(struct pci_dev
*pdev
, struct myrs_hba
*cs
)
2175 struct Scsi_Host
*shost
= cs
->host
;
2176 size_t elem_size
, elem_align
;
2178 elem_align
= sizeof(struct myrs_sge
);
2179 elem_size
= shost
->sg_tablesize
* elem_align
;
2180 cs
->sg_pool
= dma_pool_create("myrs_sg", &pdev
->dev
,
2181 elem_size
, elem_align
, 0);
2182 if (cs
->sg_pool
== NULL
) {
2183 shost_printk(KERN_ERR
, shost
,
2184 "Failed to allocate SG pool\n");
2188 cs
->sense_pool
= dma_pool_create("myrs_sense", &pdev
->dev
,
2189 MYRS_SENSE_SIZE
, sizeof(int), 0);
2190 if (cs
->sense_pool
== NULL
) {
2191 dma_pool_destroy(cs
->sg_pool
);
2193 shost_printk(KERN_ERR
, shost
,
2194 "Failed to allocate sense data pool\n");
2198 cs
->dcdb_pool
= dma_pool_create("myrs_dcdb", &pdev
->dev
,
2200 sizeof(unsigned char), 0);
2201 if (!cs
->dcdb_pool
) {
2202 dma_pool_destroy(cs
->sg_pool
);
2204 dma_pool_destroy(cs
->sense_pool
);
2205 cs
->sense_pool
= NULL
;
2206 shost_printk(KERN_ERR
, shost
,
2207 "Failed to allocate DCDB pool\n");
2211 snprintf(cs
->work_q_name
, sizeof(cs
->work_q_name
),
2212 "myrs_wq_%d", shost
->host_no
);
2213 cs
->work_q
= create_singlethread_workqueue(cs
->work_q_name
);
2215 dma_pool_destroy(cs
->dcdb_pool
);
2216 cs
->dcdb_pool
= NULL
;
2217 dma_pool_destroy(cs
->sg_pool
);
2219 dma_pool_destroy(cs
->sense_pool
);
2220 cs
->sense_pool
= NULL
;
2221 shost_printk(KERN_ERR
, shost
,
2222 "Failed to create workqueue\n");
2226 /* Initialize the Monitoring Timer. */
2227 INIT_DELAYED_WORK(&cs
->monitor_work
, myrs_monitor
);
2228 queue_delayed_work(cs
->work_q
, &cs
->monitor_work
, 1);
2233 static void myrs_destroy_mempools(struct myrs_hba
*cs
)
2235 cancel_delayed_work_sync(&cs
->monitor_work
);
2236 destroy_workqueue(cs
->work_q
);
2238 dma_pool_destroy(cs
->sg_pool
);
2239 dma_pool_destroy(cs
->dcdb_pool
);
2240 dma_pool_destroy(cs
->sense_pool
);
2243 static void myrs_unmap(struct myrs_hba
*cs
)
2245 kfree(cs
->event_buf
);
2246 kfree(cs
->ctlr_info
);
2247 if (cs
->fwstat_buf
) {
2248 dma_free_coherent(&cs
->pdev
->dev
, sizeof(struct myrs_fwstat
),
2249 cs
->fwstat_buf
, cs
->fwstat_addr
);
2250 cs
->fwstat_buf
= NULL
;
2252 if (cs
->first_stat_mbox
) {
2253 dma_free_coherent(&cs
->pdev
->dev
, cs
->stat_mbox_size
,
2254 cs
->first_stat_mbox
, cs
->stat_mbox_addr
);
2255 cs
->first_stat_mbox
= NULL
;
2257 if (cs
->first_cmd_mbox
) {
2258 dma_free_coherent(&cs
->pdev
->dev
, cs
->cmd_mbox_size
,
2259 cs
->first_cmd_mbox
, cs
->cmd_mbox_addr
);
2260 cs
->first_cmd_mbox
= NULL
;
2264 static void myrs_cleanup(struct myrs_hba
*cs
)
2266 struct pci_dev
*pdev
= cs
->pdev
;
2268 /* Free the memory mailbox, status, and related structures */
2271 if (cs
->mmio_base
) {
2272 cs
->disable_intr(cs
);
2273 iounmap(cs
->mmio_base
);
2276 free_irq(cs
->irq
, cs
);
2278 release_region(cs
->io_addr
, 0x80);
2279 iounmap(cs
->mmio_base
);
2280 pci_set_drvdata(pdev
, NULL
);
2281 pci_disable_device(pdev
);
2282 scsi_host_put(cs
->host
);
2285 static struct myrs_hba
*myrs_detect(struct pci_dev
*pdev
,
2286 const struct pci_device_id
*entry
)
2288 struct myrs_privdata
*privdata
=
2289 (struct myrs_privdata
*)entry
->driver_data
;
2290 irq_handler_t irq_handler
= privdata
->irq_handler
;
2291 unsigned int mmio_size
= privdata
->mmio_size
;
2292 struct myrs_hba
*cs
= NULL
;
2294 cs
= myrs_alloc_host(pdev
, entry
);
2296 dev_err(&pdev
->dev
, "Unable to allocate Controller\n");
2301 if (pci_enable_device(pdev
))
2304 cs
->pci_addr
= pci_resource_start(pdev
, 0);
2306 pci_set_drvdata(pdev
, cs
);
2307 spin_lock_init(&cs
->queue_lock
);
2308 /* Map the Controller Register Window. */
2309 if (mmio_size
< PAGE_SIZE
)
2310 mmio_size
= PAGE_SIZE
;
2311 cs
->mmio_base
= ioremap_nocache(cs
->pci_addr
& PAGE_MASK
, mmio_size
);
2312 if (cs
->mmio_base
== NULL
) {
2314 "Unable to map Controller Register Window\n");
2318 cs
->io_base
= cs
->mmio_base
+ (cs
->pci_addr
& ~PAGE_MASK
);
2319 if (privdata
->hw_init(pdev
, cs
, cs
->io_base
))
2322 /* Acquire shared access to the IRQ Channel. */
2323 if (request_irq(pdev
->irq
, irq_handler
, IRQF_SHARED
, "myrs", cs
) < 0) {
2325 "Unable to acquire IRQ Channel %d\n", pdev
->irq
);
2328 cs
->irq
= pdev
->irq
;
2333 "Failed to initialize Controller\n");
2339 * myrs_err_status reports Controller BIOS Messages passed through
2340 the Error Status Register when the driver performs the BIOS handshaking.
2341 It returns true for fatal errors and false otherwise.
2344 static bool myrs_err_status(struct myrs_hba
*cs
, unsigned char status
,
2345 unsigned char parm0
, unsigned char parm1
)
2347 struct pci_dev
*pdev
= cs
->pdev
;
2351 dev_info(&pdev
->dev
,
2352 "Physical Device %d:%d Not Responding\n",
2356 dev_notice(&pdev
->dev
, "Spinning Up Drives\n");
2359 dev_notice(&pdev
->dev
, "Configuration Checksum Error\n");
2362 dev_notice(&pdev
->dev
, "Mirror Race Recovery Failed\n");
2365 dev_notice(&pdev
->dev
, "Mirror Race Recovery In Progress\n");
2368 dev_notice(&pdev
->dev
, "Physical Device %d:%d COD Mismatch\n",
2372 dev_notice(&pdev
->dev
, "Logical Drive Installation Aborted\n");
2375 dev_notice(&pdev
->dev
, "Mirror Race On A Critical Logical Drive\n");
2378 dev_notice(&pdev
->dev
, "New Controller Configuration Found\n");
2381 dev_err(&pdev
->dev
, "Fatal Memory Parity Error\n");
2384 dev_err(&pdev
->dev
, "Unknown Initialization Error %02X\n",
2392 * Hardware-specific functions
2396 * DAC960 GEM Series Controllers.
2399 static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem
*base
)
2401 __le32 val
= cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD
<< 24);
2403 writel(val
, base
+ DAC960_GEM_IDB_READ_OFFSET
);
2406 static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem
*base
)
2408 __le32 val
= cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS
<< 24);
2410 writel(val
, base
+ DAC960_GEM_IDB_CLEAR_OFFSET
);
2413 static inline void DAC960_GEM_gen_intr(void __iomem
*base
)
2415 __le32 val
= cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ
<< 24);
2417 writel(val
, base
+ DAC960_GEM_IDB_READ_OFFSET
);
2420 static inline void DAC960_GEM_reset_ctrl(void __iomem
*base
)
2422 __le32 val
= cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET
<< 24);
2424 writel(val
, base
+ DAC960_GEM_IDB_READ_OFFSET
);
2427 static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem
*base
)
2429 __le32 val
= cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD
<< 24);
2431 writel(val
, base
+ DAC960_GEM_IDB_READ_OFFSET
);
2434 static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem
*base
)
2438 val
= readl(base
+ DAC960_GEM_IDB_READ_OFFSET
);
2439 return (le32_to_cpu(val
) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL
;
2442 static inline bool DAC960_GEM_init_in_progress(void __iomem
*base
)
2446 val
= readl(base
+ DAC960_GEM_IDB_READ_OFFSET
);
2447 return (le32_to_cpu(val
) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS
;
2450 static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem
*base
)
2452 __le32 val
= cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ
<< 24);
2454 writel(val
, base
+ DAC960_GEM_ODB_CLEAR_OFFSET
);
2457 static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem
*base
)
2459 __le32 val
= cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ
<< 24);
2461 writel(val
, base
+ DAC960_GEM_ODB_CLEAR_OFFSET
);
2464 static inline void DAC960_GEM_ack_intr(void __iomem
*base
)
2466 __le32 val
= cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ
|
2467 DAC960_GEM_ODB_MMBOX_ACK_IRQ
) << 24);
2469 writel(val
, base
+ DAC960_GEM_ODB_CLEAR_OFFSET
);
2472 static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem
*base
)
2476 val
= readl(base
+ DAC960_GEM_ODB_READ_OFFSET
);
2477 return (le32_to_cpu(val
) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL
;
2480 static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem
*base
)
2484 val
= readl(base
+ DAC960_GEM_ODB_READ_OFFSET
);
2485 return (le32_to_cpu(val
) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL
;
2488 static inline void DAC960_GEM_enable_intr(void __iomem
*base
)
2490 __le32 val
= cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ
|
2491 DAC960_GEM_IRQMASK_MMBOX_IRQ
) << 24);
2492 writel(val
, base
+ DAC960_GEM_IRQMASK_CLEAR_OFFSET
);
2495 static inline void DAC960_GEM_disable_intr(void __iomem
*base
)
2499 writel(val
, base
+ DAC960_GEM_IRQMASK_READ_OFFSET
);
2502 static inline bool DAC960_GEM_intr_enabled(void __iomem
*base
)
2506 val
= readl(base
+ DAC960_GEM_IRQMASK_READ_OFFSET
);
2507 return !((le32_to_cpu(val
) >> 24) &
2508 (DAC960_GEM_IRQMASK_HWMBOX_IRQ
|
2509 DAC960_GEM_IRQMASK_MMBOX_IRQ
));
2512 static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox
*mem_mbox
,
2513 union myrs_cmd_mbox
*mbox
)
2515 memcpy(&mem_mbox
->words
[1], &mbox
->words
[1],
2516 sizeof(union myrs_cmd_mbox
) - sizeof(unsigned int));
2517 /* Barrier to avoid reordering */
2519 mem_mbox
->words
[0] = mbox
->words
[0];
2520 /* Barrier to force PCI access */
2524 static inline void DAC960_GEM_write_hw_mbox(void __iomem
*base
,
2525 dma_addr_t cmd_mbox_addr
)
2527 dma_addr_writeql(cmd_mbox_addr
, base
+ DAC960_GEM_CMDMBX_OFFSET
);
2530 static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem
*base
)
2532 return readw(base
+ DAC960_GEM_CMDSTS_OFFSET
);
2535 static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem
*base
)
2537 return readw(base
+ DAC960_GEM_CMDSTS_OFFSET
+ 2);
2541 DAC960_GEM_read_error_status(void __iomem
*base
, unsigned char *error
,
2542 unsigned char *param0
, unsigned char *param1
)
2546 val
= readl(base
+ DAC960_GEM_ERRSTS_READ_OFFSET
);
2547 if (!((le32_to_cpu(val
) >> 24) & DAC960_GEM_ERRSTS_PENDING
))
2549 *error
= val
& ~(DAC960_GEM_ERRSTS_PENDING
<< 24);
2550 *param0
= readb(base
+ DAC960_GEM_CMDMBX_OFFSET
+ 0);
2551 *param1
= readb(base
+ DAC960_GEM_CMDMBX_OFFSET
+ 1);
2552 writel(0x03000000, base
+ DAC960_GEM_ERRSTS_CLEAR_OFFSET
);
2556 static inline unsigned char
2557 DAC960_GEM_mbox_init(void __iomem
*base
, dma_addr_t mbox_addr
)
2559 unsigned char status
;
2561 while (DAC960_GEM_hw_mbox_is_full(base
))
2563 DAC960_GEM_write_hw_mbox(base
, mbox_addr
);
2564 DAC960_GEM_hw_mbox_new_cmd(base
);
2565 while (!DAC960_GEM_hw_mbox_status_available(base
))
2567 status
= DAC960_GEM_read_cmd_status(base
);
2568 DAC960_GEM_ack_hw_mbox_intr(base
);
2569 DAC960_GEM_ack_hw_mbox_status(base
);
2574 static int DAC960_GEM_hw_init(struct pci_dev
*pdev
,
2575 struct myrs_hba
*cs
, void __iomem
*base
)
2578 unsigned char status
, parm0
, parm1
;
2580 DAC960_GEM_disable_intr(base
);
2581 DAC960_GEM_ack_hw_mbox_status(base
);
2583 while (DAC960_GEM_init_in_progress(base
) &&
2584 timeout
< MYRS_MAILBOX_TIMEOUT
) {
2585 if (DAC960_GEM_read_error_status(base
, &status
,
2587 myrs_err_status(cs
, status
, parm0
, parm1
))
2592 if (timeout
== MYRS_MAILBOX_TIMEOUT
) {
2594 "Timeout waiting for Controller Initialisation\n");
2597 if (!myrs_enable_mmio_mbox(cs
, DAC960_GEM_mbox_init
)) {
2599 "Unable to Enable Memory Mailbox Interface\n");
2600 DAC960_GEM_reset_ctrl(base
);
2603 DAC960_GEM_enable_intr(base
);
2604 cs
->write_cmd_mbox
= DAC960_GEM_write_cmd_mbox
;
2605 cs
->get_cmd_mbox
= DAC960_GEM_mem_mbox_new_cmd
;
2606 cs
->disable_intr
= DAC960_GEM_disable_intr
;
2607 cs
->reset
= DAC960_GEM_reset_ctrl
;
2611 static irqreturn_t
DAC960_GEM_intr_handler(int irq
, void *arg
)
2613 struct myrs_hba
*cs
= arg
;
2614 void __iomem
*base
= cs
->io_base
;
2615 struct myrs_stat_mbox
*next_stat_mbox
;
2616 unsigned long flags
;
2618 spin_lock_irqsave(&cs
->queue_lock
, flags
);
2619 DAC960_GEM_ack_intr(base
);
2620 next_stat_mbox
= cs
->next_stat_mbox
;
2621 while (next_stat_mbox
->id
> 0) {
2622 unsigned short id
= next_stat_mbox
->id
;
2623 struct scsi_cmnd
*scmd
= NULL
;
2624 struct myrs_cmdblk
*cmd_blk
= NULL
;
2626 if (id
== MYRS_DCMD_TAG
)
2627 cmd_blk
= &cs
->dcmd_blk
;
2628 else if (id
== MYRS_MCMD_TAG
)
2629 cmd_blk
= &cs
->mcmd_blk
;
2631 scmd
= scsi_host_find_tag(cs
->host
, id
- 3);
2633 cmd_blk
= scsi_cmd_priv(scmd
);
2636 cmd_blk
->status
= next_stat_mbox
->status
;
2637 cmd_blk
->sense_len
= next_stat_mbox
->sense_len
;
2638 cmd_blk
->residual
= next_stat_mbox
->residual
;
2640 dev_err(&cs
->pdev
->dev
,
2641 "Unhandled command completion %d\n", id
);
2643 memset(next_stat_mbox
, 0, sizeof(struct myrs_stat_mbox
));
2644 if (++next_stat_mbox
> cs
->last_stat_mbox
)
2645 next_stat_mbox
= cs
->first_stat_mbox
;
2649 myrs_handle_cmdblk(cs
, cmd_blk
);
2651 myrs_handle_scsi(cs
, cmd_blk
, scmd
);
2654 cs
->next_stat_mbox
= next_stat_mbox
;
2655 spin_unlock_irqrestore(&cs
->queue_lock
, flags
);
2659 struct myrs_privdata DAC960_GEM_privdata
= {
2660 .hw_init
= DAC960_GEM_hw_init
,
2661 .irq_handler
= DAC960_GEM_intr_handler
,
2662 .mmio_size
= DAC960_GEM_mmio_size
,
2666 * DAC960 BA Series Controllers.
2669 static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem
*base
)
2671 writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD
, base
+ DAC960_BA_IDB_OFFSET
);
2674 static inline void DAC960_BA_ack_hw_mbox_status(void __iomem
*base
)
2676 writeb(DAC960_BA_IDB_HWMBOX_ACK_STS
, base
+ DAC960_BA_IDB_OFFSET
);
2679 static inline void DAC960_BA_gen_intr(void __iomem
*base
)
2681 writeb(DAC960_BA_IDB_GEN_IRQ
, base
+ DAC960_BA_IDB_OFFSET
);
2684 static inline void DAC960_BA_reset_ctrl(void __iomem
*base
)
2686 writeb(DAC960_BA_IDB_CTRL_RESET
, base
+ DAC960_BA_IDB_OFFSET
);
2689 static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem
*base
)
2691 writeb(DAC960_BA_IDB_MMBOX_NEW_CMD
, base
+ DAC960_BA_IDB_OFFSET
);
2694 static inline bool DAC960_BA_hw_mbox_is_full(void __iomem
*base
)
2698 val
= readb(base
+ DAC960_BA_IDB_OFFSET
);
2699 return !(val
& DAC960_BA_IDB_HWMBOX_EMPTY
);
2702 static inline bool DAC960_BA_init_in_progress(void __iomem
*base
)
2706 val
= readb(base
+ DAC960_BA_IDB_OFFSET
);
2707 return !(val
& DAC960_BA_IDB_INIT_DONE
);
2710 static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem
*base
)
2712 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ
, base
+ DAC960_BA_ODB_OFFSET
);
2715 static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem
*base
)
2717 writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ
, base
+ DAC960_BA_ODB_OFFSET
);
2720 static inline void DAC960_BA_ack_intr(void __iomem
*base
)
2722 writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ
| DAC960_BA_ODB_MMBOX_ACK_IRQ
,
2723 base
+ DAC960_BA_ODB_OFFSET
);
2726 static inline bool DAC960_BA_hw_mbox_status_available(void __iomem
*base
)
2730 val
= readb(base
+ DAC960_BA_ODB_OFFSET
);
2731 return val
& DAC960_BA_ODB_HWMBOX_STS_AVAIL
;
2734 static inline bool DAC960_BA_mem_mbox_status_available(void __iomem
*base
)
2738 val
= readb(base
+ DAC960_BA_ODB_OFFSET
);
2739 return val
& DAC960_BA_ODB_MMBOX_STS_AVAIL
;
2742 static inline void DAC960_BA_enable_intr(void __iomem
*base
)
2744 writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ
, base
+ DAC960_BA_IRQMASK_OFFSET
);
2747 static inline void DAC960_BA_disable_intr(void __iomem
*base
)
2749 writeb(0xFF, base
+ DAC960_BA_IRQMASK_OFFSET
);
2752 static inline bool DAC960_BA_intr_enabled(void __iomem
*base
)
2756 val
= readb(base
+ DAC960_BA_IRQMASK_OFFSET
);
2757 return !(val
& DAC960_BA_IRQMASK_DISABLE_IRQ
);
2760 static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox
*mem_mbox
,
2761 union myrs_cmd_mbox
*mbox
)
2763 memcpy(&mem_mbox
->words
[1], &mbox
->words
[1],
2764 sizeof(union myrs_cmd_mbox
) - sizeof(unsigned int));
2765 /* Barrier to avoid reordering */
2767 mem_mbox
->words
[0] = mbox
->words
[0];
2768 /* Barrier to force PCI access */
2773 static inline void DAC960_BA_write_hw_mbox(void __iomem
*base
,
2774 dma_addr_t cmd_mbox_addr
)
2776 dma_addr_writeql(cmd_mbox_addr
, base
+ DAC960_BA_CMDMBX_OFFSET
);
2779 static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem
*base
)
2781 return readw(base
+ DAC960_BA_CMDSTS_OFFSET
);
2784 static inline unsigned char DAC960_BA_read_cmd_status(void __iomem
*base
)
2786 return readw(base
+ DAC960_BA_CMDSTS_OFFSET
+ 2);
2790 DAC960_BA_read_error_status(void __iomem
*base
, unsigned char *error
,
2791 unsigned char *param0
, unsigned char *param1
)
2795 val
= readb(base
+ DAC960_BA_ERRSTS_OFFSET
);
2796 if (!(val
& DAC960_BA_ERRSTS_PENDING
))
2798 val
&= ~DAC960_BA_ERRSTS_PENDING
;
2800 *param0
= readb(base
+ DAC960_BA_CMDMBX_OFFSET
+ 0);
2801 *param1
= readb(base
+ DAC960_BA_CMDMBX_OFFSET
+ 1);
2802 writeb(0xFF, base
+ DAC960_BA_ERRSTS_OFFSET
);
2806 static inline unsigned char
2807 DAC960_BA_mbox_init(void __iomem
*base
, dma_addr_t mbox_addr
)
2809 unsigned char status
;
2811 while (DAC960_BA_hw_mbox_is_full(base
))
2813 DAC960_BA_write_hw_mbox(base
, mbox_addr
);
2814 DAC960_BA_hw_mbox_new_cmd(base
);
2815 while (!DAC960_BA_hw_mbox_status_available(base
))
2817 status
= DAC960_BA_read_cmd_status(base
);
2818 DAC960_BA_ack_hw_mbox_intr(base
);
2819 DAC960_BA_ack_hw_mbox_status(base
);
2824 static int DAC960_BA_hw_init(struct pci_dev
*pdev
,
2825 struct myrs_hba
*cs
, void __iomem
*base
)
2828 unsigned char status
, parm0
, parm1
;
2830 DAC960_BA_disable_intr(base
);
2831 DAC960_BA_ack_hw_mbox_status(base
);
2833 while (DAC960_BA_init_in_progress(base
) &&
2834 timeout
< MYRS_MAILBOX_TIMEOUT
) {
2835 if (DAC960_BA_read_error_status(base
, &status
,
2837 myrs_err_status(cs
, status
, parm0
, parm1
))
2842 if (timeout
== MYRS_MAILBOX_TIMEOUT
) {
2844 "Timeout waiting for Controller Initialisation\n");
2847 if (!myrs_enable_mmio_mbox(cs
, DAC960_BA_mbox_init
)) {
2849 "Unable to Enable Memory Mailbox Interface\n");
2850 DAC960_BA_reset_ctrl(base
);
2853 DAC960_BA_enable_intr(base
);
2854 cs
->write_cmd_mbox
= DAC960_BA_write_cmd_mbox
;
2855 cs
->get_cmd_mbox
= DAC960_BA_mem_mbox_new_cmd
;
2856 cs
->disable_intr
= DAC960_BA_disable_intr
;
2857 cs
->reset
= DAC960_BA_reset_ctrl
;
2861 static irqreturn_t
DAC960_BA_intr_handler(int irq
, void *arg
)
2863 struct myrs_hba
*cs
= arg
;
2864 void __iomem
*base
= cs
->io_base
;
2865 struct myrs_stat_mbox
*next_stat_mbox
;
2866 unsigned long flags
;
2868 spin_lock_irqsave(&cs
->queue_lock
, flags
);
2869 DAC960_BA_ack_intr(base
);
2870 next_stat_mbox
= cs
->next_stat_mbox
;
2871 while (next_stat_mbox
->id
> 0) {
2872 unsigned short id
= next_stat_mbox
->id
;
2873 struct scsi_cmnd
*scmd
= NULL
;
2874 struct myrs_cmdblk
*cmd_blk
= NULL
;
2876 if (id
== MYRS_DCMD_TAG
)
2877 cmd_blk
= &cs
->dcmd_blk
;
2878 else if (id
== MYRS_MCMD_TAG
)
2879 cmd_blk
= &cs
->mcmd_blk
;
2881 scmd
= scsi_host_find_tag(cs
->host
, id
- 3);
2883 cmd_blk
= scsi_cmd_priv(scmd
);
2886 cmd_blk
->status
= next_stat_mbox
->status
;
2887 cmd_blk
->sense_len
= next_stat_mbox
->sense_len
;
2888 cmd_blk
->residual
= next_stat_mbox
->residual
;
2890 dev_err(&cs
->pdev
->dev
,
2891 "Unhandled command completion %d\n", id
);
2893 memset(next_stat_mbox
, 0, sizeof(struct myrs_stat_mbox
));
2894 if (++next_stat_mbox
> cs
->last_stat_mbox
)
2895 next_stat_mbox
= cs
->first_stat_mbox
;
2899 myrs_handle_cmdblk(cs
, cmd_blk
);
2901 myrs_handle_scsi(cs
, cmd_blk
, scmd
);
2904 cs
->next_stat_mbox
= next_stat_mbox
;
2905 spin_unlock_irqrestore(&cs
->queue_lock
, flags
);
2909 struct myrs_privdata DAC960_BA_privdata
= {
2910 .hw_init
= DAC960_BA_hw_init
,
2911 .irq_handler
= DAC960_BA_intr_handler
,
2912 .mmio_size
= DAC960_BA_mmio_size
,
2916 * DAC960 LP Series Controllers.
2919 static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem
*base
)
2921 writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD
, base
+ DAC960_LP_IDB_OFFSET
);
2924 static inline void DAC960_LP_ack_hw_mbox_status(void __iomem
*base
)
2926 writeb(DAC960_LP_IDB_HWMBOX_ACK_STS
, base
+ DAC960_LP_IDB_OFFSET
);
2929 static inline void DAC960_LP_gen_intr(void __iomem
*base
)
2931 writeb(DAC960_LP_IDB_GEN_IRQ
, base
+ DAC960_LP_IDB_OFFSET
);
2934 static inline void DAC960_LP_reset_ctrl(void __iomem
*base
)
2936 writeb(DAC960_LP_IDB_CTRL_RESET
, base
+ DAC960_LP_IDB_OFFSET
);
2939 static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem
*base
)
2941 writeb(DAC960_LP_IDB_MMBOX_NEW_CMD
, base
+ DAC960_LP_IDB_OFFSET
);
2944 static inline bool DAC960_LP_hw_mbox_is_full(void __iomem
*base
)
2948 val
= readb(base
+ DAC960_LP_IDB_OFFSET
);
2949 return val
& DAC960_LP_IDB_HWMBOX_FULL
;
2952 static inline bool DAC960_LP_init_in_progress(void __iomem
*base
)
2956 val
= readb(base
+ DAC960_LP_IDB_OFFSET
);
2957 return val
& DAC960_LP_IDB_INIT_IN_PROGRESS
;
2960 static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem
*base
)
2962 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ
, base
+ DAC960_LP_ODB_OFFSET
);
2965 static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem
*base
)
2967 writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ
, base
+ DAC960_LP_ODB_OFFSET
);
2970 static inline void DAC960_LP_ack_intr(void __iomem
*base
)
2972 writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ
| DAC960_LP_ODB_MMBOX_ACK_IRQ
,
2973 base
+ DAC960_LP_ODB_OFFSET
);
2976 static inline bool DAC960_LP_hw_mbox_status_available(void __iomem
*base
)
2980 val
= readb(base
+ DAC960_LP_ODB_OFFSET
);
2981 return val
& DAC960_LP_ODB_HWMBOX_STS_AVAIL
;
2984 static inline bool DAC960_LP_mem_mbox_status_available(void __iomem
*base
)
2988 val
= readb(base
+ DAC960_LP_ODB_OFFSET
);
2989 return val
& DAC960_LP_ODB_MMBOX_STS_AVAIL
;
2992 static inline void DAC960_LP_enable_intr(void __iomem
*base
)
2994 writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ
, base
+ DAC960_LP_IRQMASK_OFFSET
);
2997 static inline void DAC960_LP_disable_intr(void __iomem
*base
)
2999 writeb(0xFF, base
+ DAC960_LP_IRQMASK_OFFSET
);
3002 static inline bool DAC960_LP_intr_enabled(void __iomem
*base
)
3006 val
= readb(base
+ DAC960_LP_IRQMASK_OFFSET
);
3007 return !(val
& DAC960_LP_IRQMASK_DISABLE_IRQ
);
3010 static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox
*mem_mbox
,
3011 union myrs_cmd_mbox
*mbox
)
3013 memcpy(&mem_mbox
->words
[1], &mbox
->words
[1],
3014 sizeof(union myrs_cmd_mbox
) - sizeof(unsigned int));
3015 /* Barrier to avoid reordering */
3017 mem_mbox
->words
[0] = mbox
->words
[0];
3018 /* Barrier to force PCI access */
3022 static inline void DAC960_LP_write_hw_mbox(void __iomem
*base
,
3023 dma_addr_t cmd_mbox_addr
)
3025 dma_addr_writeql(cmd_mbox_addr
, base
+ DAC960_LP_CMDMBX_OFFSET
);
3028 static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem
*base
)
3030 return readw(base
+ DAC960_LP_CMDSTS_OFFSET
);
3033 static inline unsigned char DAC960_LP_read_cmd_status(void __iomem
*base
)
3035 return readw(base
+ DAC960_LP_CMDSTS_OFFSET
+ 2);
3039 DAC960_LP_read_error_status(void __iomem
*base
, unsigned char *error
,
3040 unsigned char *param0
, unsigned char *param1
)
3044 val
= readb(base
+ DAC960_LP_ERRSTS_OFFSET
);
3045 if (!(val
& DAC960_LP_ERRSTS_PENDING
))
3047 val
&= ~DAC960_LP_ERRSTS_PENDING
;
3049 *param0
= readb(base
+ DAC960_LP_CMDMBX_OFFSET
+ 0);
3050 *param1
= readb(base
+ DAC960_LP_CMDMBX_OFFSET
+ 1);
3051 writeb(0xFF, base
+ DAC960_LP_ERRSTS_OFFSET
);
3055 static inline unsigned char
3056 DAC960_LP_mbox_init(void __iomem
*base
, dma_addr_t mbox_addr
)
3058 unsigned char status
;
3060 while (DAC960_LP_hw_mbox_is_full(base
))
3062 DAC960_LP_write_hw_mbox(base
, mbox_addr
);
3063 DAC960_LP_hw_mbox_new_cmd(base
);
3064 while (!DAC960_LP_hw_mbox_status_available(base
))
3066 status
= DAC960_LP_read_cmd_status(base
);
3067 DAC960_LP_ack_hw_mbox_intr(base
);
3068 DAC960_LP_ack_hw_mbox_status(base
);
3073 static int DAC960_LP_hw_init(struct pci_dev
*pdev
,
3074 struct myrs_hba
*cs
, void __iomem
*base
)
3077 unsigned char status
, parm0
, parm1
;
3079 DAC960_LP_disable_intr(base
);
3080 DAC960_LP_ack_hw_mbox_status(base
);
3082 while (DAC960_LP_init_in_progress(base
) &&
3083 timeout
< MYRS_MAILBOX_TIMEOUT
) {
3084 if (DAC960_LP_read_error_status(base
, &status
,
3086 myrs_err_status(cs
, status
, parm0
, parm1
))
3091 if (timeout
== MYRS_MAILBOX_TIMEOUT
) {
3093 "Timeout waiting for Controller Initialisation\n");
3096 if (!myrs_enable_mmio_mbox(cs
, DAC960_LP_mbox_init
)) {
3098 "Unable to Enable Memory Mailbox Interface\n");
3099 DAC960_LP_reset_ctrl(base
);
3102 DAC960_LP_enable_intr(base
);
3103 cs
->write_cmd_mbox
= DAC960_LP_write_cmd_mbox
;
3104 cs
->get_cmd_mbox
= DAC960_LP_mem_mbox_new_cmd
;
3105 cs
->disable_intr
= DAC960_LP_disable_intr
;
3106 cs
->reset
= DAC960_LP_reset_ctrl
;
3111 static irqreturn_t
DAC960_LP_intr_handler(int irq
, void *arg
)
3113 struct myrs_hba
*cs
= arg
;
3114 void __iomem
*base
= cs
->io_base
;
3115 struct myrs_stat_mbox
*next_stat_mbox
;
3116 unsigned long flags
;
3118 spin_lock_irqsave(&cs
->queue_lock
, flags
);
3119 DAC960_LP_ack_intr(base
);
3120 next_stat_mbox
= cs
->next_stat_mbox
;
3121 while (next_stat_mbox
->id
> 0) {
3122 unsigned short id
= next_stat_mbox
->id
;
3123 struct scsi_cmnd
*scmd
= NULL
;
3124 struct myrs_cmdblk
*cmd_blk
= NULL
;
3126 if (id
== MYRS_DCMD_TAG
)
3127 cmd_blk
= &cs
->dcmd_blk
;
3128 else if (id
== MYRS_MCMD_TAG
)
3129 cmd_blk
= &cs
->mcmd_blk
;
3131 scmd
= scsi_host_find_tag(cs
->host
, id
- 3);
3133 cmd_blk
= scsi_cmd_priv(scmd
);
3136 cmd_blk
->status
= next_stat_mbox
->status
;
3137 cmd_blk
->sense_len
= next_stat_mbox
->sense_len
;
3138 cmd_blk
->residual
= next_stat_mbox
->residual
;
3140 dev_err(&cs
->pdev
->dev
,
3141 "Unhandled command completion %d\n", id
);
3143 memset(next_stat_mbox
, 0, sizeof(struct myrs_stat_mbox
));
3144 if (++next_stat_mbox
> cs
->last_stat_mbox
)
3145 next_stat_mbox
= cs
->first_stat_mbox
;
3149 myrs_handle_cmdblk(cs
, cmd_blk
);
3151 myrs_handle_scsi(cs
, cmd_blk
, scmd
);
3154 cs
->next_stat_mbox
= next_stat_mbox
;
3155 spin_unlock_irqrestore(&cs
->queue_lock
, flags
);
3159 struct myrs_privdata DAC960_LP_privdata
= {
3160 .hw_init
= DAC960_LP_hw_init
,
3161 .irq_handler
= DAC960_LP_intr_handler
,
3162 .mmio_size
= DAC960_LP_mmio_size
,
3169 myrs_probe(struct pci_dev
*dev
, const struct pci_device_id
*entry
)
3171 struct myrs_hba
*cs
;
3174 cs
= myrs_detect(dev
, entry
);
3178 ret
= myrs_get_config(cs
);
3184 if (!myrs_create_mempools(dev
, cs
)) {
3189 ret
= scsi_add_host(cs
->host
, &dev
->dev
);
3191 dev_err(&dev
->dev
, "scsi_add_host failed with %d\n", ret
);
3192 myrs_destroy_mempools(cs
);
3195 scsi_scan_host(cs
->host
);
3203 static void myrs_remove(struct pci_dev
*pdev
)
3205 struct myrs_hba
*cs
= pci_get_drvdata(pdev
);
3210 shost_printk(KERN_NOTICE
, cs
->host
, "Flushing Cache...");
3211 myrs_flush_cache(cs
);
3212 myrs_destroy_mempools(cs
);
3217 static const struct pci_device_id myrs_id_table
[] = {
3219 PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX
,
3220 PCI_DEVICE_ID_MYLEX_DAC960_GEM
,
3221 PCI_VENDOR_ID_MYLEX
, PCI_ANY_ID
),
3222 .driver_data
= (unsigned long) &DAC960_GEM_privdata
,
3225 PCI_DEVICE_DATA(MYLEX
, DAC960_BA
, &DAC960_BA_privdata
),
3228 PCI_DEVICE_DATA(MYLEX
, DAC960_LP
, &DAC960_LP_privdata
),
3233 MODULE_DEVICE_TABLE(pci
, myrs_id_table
);
3235 static struct pci_driver myrs_pci_driver
= {
3237 .id_table
= myrs_id_table
,
3238 .probe
= myrs_probe
,
3239 .remove
= myrs_remove
,
3242 static int __init
myrs_init_module(void)
3246 myrs_raid_template
= raid_class_attach(&myrs_raid_functions
);
3247 if (!myrs_raid_template
)
3250 ret
= pci_register_driver(&myrs_pci_driver
);
3252 raid_class_release(myrs_raid_template
);
3257 static void __exit
myrs_cleanup_module(void)
3259 pci_unregister_driver(&myrs_pci_driver
);
3260 raid_class_release(myrs_raid_template
);
3263 module_init(myrs_init_module
);
3264 module_exit(myrs_cleanup_module
);
3266 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
3267 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3268 MODULE_LICENSE("GPL");