1 // SPDX-License-Identifier: GPL-2.0
3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
7 * Based on the original DAC960 driver,
8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <linux/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
27 static struct raid_template
*myrb_raid_template
;
29 static void myrb_monitor(struct work_struct
*work
);
30 static inline void myrb_translate_devstate(void *DeviceState
);
32 static inline int myrb_logical_channel(struct Scsi_Host
*shost
)
34 return shost
->max_channel
- 1;
37 static struct myrb_devstate_name_entry
{
38 enum myrb_devstate state
;
40 } myrb_devstate_name_list
[] = {
41 { MYRB_DEVICE_DEAD
, "Dead" },
42 { MYRB_DEVICE_WO
, "WriteOnly" },
43 { MYRB_DEVICE_ONLINE
, "Online" },
44 { MYRB_DEVICE_CRITICAL
, "Critical" },
45 { MYRB_DEVICE_STANDBY
, "Standby" },
46 { MYRB_DEVICE_OFFLINE
, "Offline" },
49 static const char *myrb_devstate_name(enum myrb_devstate state
)
51 struct myrb_devstate_name_entry
*entry
= myrb_devstate_name_list
;
54 for (i
= 0; i
< ARRAY_SIZE(myrb_devstate_name_list
); i
++) {
55 if (entry
[i
].state
== state
)
61 static struct myrb_raidlevel_name_entry
{
62 enum myrb_raidlevel level
;
64 } myrb_raidlevel_name_list
[] = {
65 { MYRB_RAID_LEVEL0
, "RAID0" },
66 { MYRB_RAID_LEVEL1
, "RAID1" },
67 { MYRB_RAID_LEVEL3
, "RAID3" },
68 { MYRB_RAID_LEVEL5
, "RAID5" },
69 { MYRB_RAID_LEVEL6
, "RAID6" },
70 { MYRB_RAID_JBOD
, "JBOD" },
73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level
)
75 struct myrb_raidlevel_name_entry
*entry
= myrb_raidlevel_name_list
;
78 for (i
= 0; i
< ARRAY_SIZE(myrb_raidlevel_name_list
); i
++) {
79 if (entry
[i
].level
== level
)
86 * myrb_create_mempools - allocates auxiliary data structures
88 * Return: true on success, false otherwise.
90 static bool myrb_create_mempools(struct pci_dev
*pdev
, struct myrb_hba
*cb
)
92 size_t elem_size
, elem_align
;
94 elem_align
= sizeof(struct myrb_sge
);
95 elem_size
= cb
->host
->sg_tablesize
* elem_align
;
96 cb
->sg_pool
= dma_pool_create("myrb_sg", &pdev
->dev
,
97 elem_size
, elem_align
, 0);
98 if (cb
->sg_pool
== NULL
) {
99 shost_printk(KERN_ERR
, cb
->host
,
100 "Failed to allocate SG pool\n");
104 cb
->dcdb_pool
= dma_pool_create("myrb_dcdb", &pdev
->dev
,
105 sizeof(struct myrb_dcdb
),
106 sizeof(unsigned int), 0);
107 if (!cb
->dcdb_pool
) {
108 dma_pool_destroy(cb
->sg_pool
);
110 shost_printk(KERN_ERR
, cb
->host
,
111 "Failed to allocate DCDB pool\n");
115 cb
->work_q
= alloc_ordered_workqueue("myrb_wq_%d", WQ_MEM_RECLAIM
,
118 dma_pool_destroy(cb
->dcdb_pool
);
119 cb
->dcdb_pool
= NULL
;
120 dma_pool_destroy(cb
->sg_pool
);
122 shost_printk(KERN_ERR
, cb
->host
,
123 "Failed to create workqueue\n");
128 * Initialize the Monitoring Timer.
130 INIT_DELAYED_WORK(&cb
->monitor_work
, myrb_monitor
);
131 queue_delayed_work(cb
->work_q
, &cb
->monitor_work
, 1);
137 * myrb_destroy_mempools - tears down the memory pools for the controller
139 static void myrb_destroy_mempools(struct myrb_hba
*cb
)
141 cancel_delayed_work_sync(&cb
->monitor_work
);
142 destroy_workqueue(cb
->work_q
);
144 dma_pool_destroy(cb
->sg_pool
);
145 dma_pool_destroy(cb
->dcdb_pool
);
149 * myrb_reset_cmd - reset command block
151 static inline void myrb_reset_cmd(struct myrb_cmdblk
*cmd_blk
)
153 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
155 memset(mbox
, 0, sizeof(union myrb_cmd_mbox
));
160 * myrb_qcmd - queues command block for execution
162 static void myrb_qcmd(struct myrb_hba
*cb
, struct myrb_cmdblk
*cmd_blk
)
164 void __iomem
*base
= cb
->io_base
;
165 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
166 union myrb_cmd_mbox
*next_mbox
= cb
->next_cmd_mbox
;
168 cb
->write_cmd_mbox(next_mbox
, mbox
);
169 if (cb
->prev_cmd_mbox1
->words
[0] == 0 ||
170 cb
->prev_cmd_mbox2
->words
[0] == 0)
171 cb
->get_cmd_mbox(base
);
172 cb
->prev_cmd_mbox2
= cb
->prev_cmd_mbox1
;
173 cb
->prev_cmd_mbox1
= next_mbox
;
174 if (++next_mbox
> cb
->last_cmd_mbox
)
175 next_mbox
= cb
->first_cmd_mbox
;
176 cb
->next_cmd_mbox
= next_mbox
;
180 * myrb_exec_cmd - executes command block and waits for completion.
182 * Return: command status
184 static unsigned short myrb_exec_cmd(struct myrb_hba
*cb
,
185 struct myrb_cmdblk
*cmd_blk
)
187 DECLARE_COMPLETION_ONSTACK(cmpl
);
190 cmd_blk
->completion
= &cmpl
;
192 spin_lock_irqsave(&cb
->queue_lock
, flags
);
193 cb
->qcmd(cb
, cmd_blk
);
194 spin_unlock_irqrestore(&cb
->queue_lock
, flags
);
196 wait_for_completion(&cmpl
);
197 return cmd_blk
->status
;
201 * myrb_exec_type3 - executes a type 3 command and waits for completion.
203 * Return: command status
205 static unsigned short myrb_exec_type3(struct myrb_hba
*cb
,
206 enum myrb_cmd_opcode op
, dma_addr_t addr
)
208 struct myrb_cmdblk
*cmd_blk
= &cb
->dcmd_blk
;
209 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
210 unsigned short status
;
212 mutex_lock(&cb
->dcmd_mutex
);
213 myrb_reset_cmd(cmd_blk
);
214 mbox
->type3
.id
= MYRB_DCMD_TAG
;
215 mbox
->type3
.opcode
= op
;
216 mbox
->type3
.addr
= addr
;
217 status
= myrb_exec_cmd(cb
, cmd_blk
);
218 mutex_unlock(&cb
->dcmd_mutex
);
223 * myrb_exec_type3D - executes a type 3D command and waits for completion.
225 * Return: command status
227 static unsigned short myrb_exec_type3D(struct myrb_hba
*cb
,
228 enum myrb_cmd_opcode op
, struct scsi_device
*sdev
,
229 struct myrb_pdev_state
*pdev_info
)
231 struct myrb_cmdblk
*cmd_blk
= &cb
->dcmd_blk
;
232 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
233 unsigned short status
;
234 dma_addr_t pdev_info_addr
;
236 pdev_info_addr
= dma_map_single(&cb
->pdev
->dev
, pdev_info
,
237 sizeof(struct myrb_pdev_state
),
239 if (dma_mapping_error(&cb
->pdev
->dev
, pdev_info_addr
))
240 return MYRB_STATUS_SUBSYS_FAILED
;
242 mutex_lock(&cb
->dcmd_mutex
);
243 myrb_reset_cmd(cmd_blk
);
244 mbox
->type3D
.id
= MYRB_DCMD_TAG
;
245 mbox
->type3D
.opcode
= op
;
246 mbox
->type3D
.channel
= sdev
->channel
;
247 mbox
->type3D
.target
= sdev
->id
;
248 mbox
->type3D
.addr
= pdev_info_addr
;
249 status
= myrb_exec_cmd(cb
, cmd_blk
);
250 mutex_unlock(&cb
->dcmd_mutex
);
251 dma_unmap_single(&cb
->pdev
->dev
, pdev_info_addr
,
252 sizeof(struct myrb_pdev_state
), DMA_FROM_DEVICE
);
253 if (status
== MYRB_STATUS_SUCCESS
&&
254 mbox
->type3D
.opcode
== MYRB_CMD_GET_DEVICE_STATE_OLD
)
255 myrb_translate_devstate(pdev_info
);
260 static char *myrb_event_msg
[] = {
261 "killed because write recovery failed",
262 "killed because of SCSI bus reset failure",
263 "killed because of double check condition",
264 "killed because it was removed",
265 "killed because of gross error on SCSI chip",
266 "killed because of bad tag returned from drive",
267 "killed because of timeout on SCSI command",
268 "killed because of reset SCSI command issued from system",
269 "killed because busy or parity error count exceeded limit",
270 "killed because of 'kill drive' command from system",
271 "killed because of selection timeout",
272 "killed due to SCSI phase sequence error",
273 "killed due to unknown status",
277 * myrb_get_event - get event log from HBA
278 * @cb: pointer to the hba structure
279 * @event: number of the event
281 * Execute a type 3E command and logs the event message
283 static void myrb_get_event(struct myrb_hba
*cb
, unsigned int event
)
285 struct myrb_cmdblk
*cmd_blk
= &cb
->mcmd_blk
;
286 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
287 struct myrb_log_entry
*ev_buf
;
289 unsigned short status
;
291 ev_buf
= dma_alloc_coherent(&cb
->pdev
->dev
,
292 sizeof(struct myrb_log_entry
),
293 &ev_addr
, GFP_KERNEL
);
297 myrb_reset_cmd(cmd_blk
);
298 mbox
->type3E
.id
= MYRB_MCMD_TAG
;
299 mbox
->type3E
.opcode
= MYRB_CMD_EVENT_LOG_OPERATION
;
300 mbox
->type3E
.optype
= DAC960_V1_GetEventLogEntry
;
301 mbox
->type3E
.opqual
= 1;
302 mbox
->type3E
.ev_seq
= event
;
303 mbox
->type3E
.addr
= ev_addr
;
304 status
= myrb_exec_cmd(cb
, cmd_blk
);
305 if (status
!= MYRB_STATUS_SUCCESS
)
306 shost_printk(KERN_INFO
, cb
->host
,
307 "Failed to get event log %d, status %04x\n",
310 else if (ev_buf
->seq_num
== event
) {
311 struct scsi_sense_hdr sshdr
;
313 memset(&sshdr
, 0, sizeof(sshdr
));
314 scsi_normalize_sense(ev_buf
->sense
, 32, &sshdr
);
316 if (sshdr
.sense_key
== VENDOR_SPECIFIC
&&
318 sshdr
.ascq
< ARRAY_SIZE(myrb_event_msg
))
319 shost_printk(KERN_CRIT
, cb
->host
,
320 "Physical drive %d:%d: %s\n",
321 ev_buf
->channel
, ev_buf
->target
,
322 myrb_event_msg
[sshdr
.ascq
]);
324 shost_printk(KERN_CRIT
, cb
->host
,
325 "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
326 ev_buf
->channel
, ev_buf
->target
,
327 sshdr
.sense_key
, sshdr
.asc
, sshdr
.ascq
);
330 dma_free_coherent(&cb
->pdev
->dev
, sizeof(struct myrb_log_entry
),
335 * myrb_get_errtable - retrieves the error table from the controller
337 * Executes a type 3 command and logs the error table from the controller.
339 static void myrb_get_errtable(struct myrb_hba
*cb
)
341 struct myrb_cmdblk
*cmd_blk
= &cb
->mcmd_blk
;
342 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
343 unsigned short status
;
344 struct myrb_error_entry old_table
[MYRB_MAX_CHANNELS
* MYRB_MAX_TARGETS
];
346 memcpy(&old_table
, cb
->err_table
, sizeof(old_table
));
348 myrb_reset_cmd(cmd_blk
);
349 mbox
->type3
.id
= MYRB_MCMD_TAG
;
350 mbox
->type3
.opcode
= MYRB_CMD_GET_ERROR_TABLE
;
351 mbox
->type3
.addr
= cb
->err_table_addr
;
352 status
= myrb_exec_cmd(cb
, cmd_blk
);
353 if (status
== MYRB_STATUS_SUCCESS
) {
354 struct myrb_error_entry
*table
= cb
->err_table
;
355 struct myrb_error_entry
*new, *old
;
356 size_t err_table_offset
;
357 struct scsi_device
*sdev
;
359 shost_for_each_device(sdev
, cb
->host
) {
360 if (sdev
->channel
>= myrb_logical_channel(cb
->host
))
362 err_table_offset
= sdev
->channel
* MYRB_MAX_TARGETS
364 new = table
+ err_table_offset
;
365 old
= &old_table
[err_table_offset
];
366 if (new->parity_err
== old
->parity_err
&&
367 new->soft_err
== old
->soft_err
&&
368 new->hard_err
== old
->hard_err
&&
369 new->misc_err
== old
->misc_err
)
371 sdev_printk(KERN_CRIT
, sdev
,
372 "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
373 new->parity_err
, new->soft_err
,
374 new->hard_err
, new->misc_err
);
380 * myrb_get_ldev_info - retrieves the logical device table from the controller
382 * Executes a type 3 command and updates the logical device table.
384 * Return: command status
386 static unsigned short myrb_get_ldev_info(struct myrb_hba
*cb
)
388 unsigned short status
;
389 int ldev_num
, ldev_cnt
= cb
->enquiry
->ldev_count
;
390 struct Scsi_Host
*shost
= cb
->host
;
392 status
= myrb_exec_type3(cb
, MYRB_CMD_GET_LDEV_INFO
,
394 if (status
!= MYRB_STATUS_SUCCESS
)
397 for (ldev_num
= 0; ldev_num
< ldev_cnt
; ldev_num
++) {
398 struct myrb_ldev_info
*old
= NULL
;
399 struct myrb_ldev_info
*new = cb
->ldev_info_buf
+ ldev_num
;
400 struct scsi_device
*sdev
;
402 sdev
= scsi_device_lookup(shost
, myrb_logical_channel(shost
),
405 if (new->state
== MYRB_DEVICE_OFFLINE
)
407 shost_printk(KERN_INFO
, shost
,
408 "Adding Logical Drive %d in state %s\n",
409 ldev_num
, myrb_devstate_name(new->state
));
410 scsi_add_device(shost
, myrb_logical_channel(shost
),
414 old
= sdev
->hostdata
;
415 if (new->state
!= old
->state
)
416 shost_printk(KERN_INFO
, shost
,
417 "Logical Drive %d is now %s\n",
418 ldev_num
, myrb_devstate_name(new->state
));
419 if (new->wb_enabled
!= old
->wb_enabled
)
420 sdev_printk(KERN_INFO
, sdev
,
421 "Logical Drive is now WRITE %s\n",
422 (new->wb_enabled
? "BACK" : "THRU"));
423 memcpy(old
, new, sizeof(*new));
424 scsi_device_put(sdev
);
430 * myrb_get_rbld_progress - get rebuild progress information
432 * Executes a type 3 command and returns the rebuild progress
435 * Return: command status
437 static unsigned short myrb_get_rbld_progress(struct myrb_hba
*cb
,
438 struct myrb_rbld_progress
*rbld
)
440 struct myrb_cmdblk
*cmd_blk
= &cb
->mcmd_blk
;
441 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
442 struct myrb_rbld_progress
*rbld_buf
;
443 dma_addr_t rbld_addr
;
444 unsigned short status
;
446 rbld_buf
= dma_alloc_coherent(&cb
->pdev
->dev
,
447 sizeof(struct myrb_rbld_progress
),
448 &rbld_addr
, GFP_KERNEL
);
450 return MYRB_STATUS_RBLD_NOT_CHECKED
;
452 myrb_reset_cmd(cmd_blk
);
453 mbox
->type3
.id
= MYRB_MCMD_TAG
;
454 mbox
->type3
.opcode
= MYRB_CMD_GET_REBUILD_PROGRESS
;
455 mbox
->type3
.addr
= rbld_addr
;
456 status
= myrb_exec_cmd(cb
, cmd_blk
);
458 memcpy(rbld
, rbld_buf
, sizeof(struct myrb_rbld_progress
));
459 dma_free_coherent(&cb
->pdev
->dev
, sizeof(struct myrb_rbld_progress
),
460 rbld_buf
, rbld_addr
);
465 * myrb_update_rbld_progress - updates the rebuild status
467 * Updates the rebuild status for the attached logical devices.
469 static void myrb_update_rbld_progress(struct myrb_hba
*cb
)
471 struct myrb_rbld_progress rbld_buf
;
472 unsigned short status
;
474 status
= myrb_get_rbld_progress(cb
, &rbld_buf
);
475 if (status
== MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS
&&
476 cb
->last_rbld_status
== MYRB_STATUS_SUCCESS
)
477 status
= MYRB_STATUS_RBLD_SUCCESS
;
478 if (status
!= MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS
) {
479 unsigned int blocks_done
=
480 rbld_buf
.ldev_size
- rbld_buf
.blocks_left
;
481 struct scsi_device
*sdev
;
483 sdev
= scsi_device_lookup(cb
->host
,
484 myrb_logical_channel(cb
->host
),
485 rbld_buf
.ldev_num
, 0);
490 case MYRB_STATUS_SUCCESS
:
491 sdev_printk(KERN_INFO
, sdev
,
492 "Rebuild in Progress, %d%% completed\n",
493 (100 * (blocks_done
>> 7))
494 / (rbld_buf
.ldev_size
>> 7));
496 case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE
:
497 sdev_printk(KERN_INFO
, sdev
,
498 "Rebuild Failed due to Logical Drive Failure\n");
500 case MYRB_STATUS_RBLD_FAILED_BADBLOCKS
:
501 sdev_printk(KERN_INFO
, sdev
,
502 "Rebuild Failed due to Bad Blocks on Other Drives\n");
504 case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED
:
505 sdev_printk(KERN_INFO
, sdev
,
506 "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
508 case MYRB_STATUS_RBLD_SUCCESS
:
509 sdev_printk(KERN_INFO
, sdev
,
510 "Rebuild Completed Successfully\n");
512 case MYRB_STATUS_RBLD_SUCCESS_TERMINATED
:
513 sdev_printk(KERN_INFO
, sdev
,
514 "Rebuild Successfully Terminated\n");
519 scsi_device_put(sdev
);
521 cb
->last_rbld_status
= status
;
525 * myrb_get_cc_progress - retrieve the rebuild status
527 * Execute a type 3 Command and fetch the rebuild / consistency check
530 static void myrb_get_cc_progress(struct myrb_hba
*cb
)
532 struct myrb_cmdblk
*cmd_blk
= &cb
->mcmd_blk
;
533 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
534 struct myrb_rbld_progress
*rbld_buf
;
535 dma_addr_t rbld_addr
;
536 unsigned short status
;
538 rbld_buf
= dma_alloc_coherent(&cb
->pdev
->dev
,
539 sizeof(struct myrb_rbld_progress
),
540 &rbld_addr
, GFP_KERNEL
);
542 cb
->need_cc_status
= true;
545 myrb_reset_cmd(cmd_blk
);
546 mbox
->type3
.id
= MYRB_MCMD_TAG
;
547 mbox
->type3
.opcode
= MYRB_CMD_REBUILD_STAT
;
548 mbox
->type3
.addr
= rbld_addr
;
549 status
= myrb_exec_cmd(cb
, cmd_blk
);
550 if (status
== MYRB_STATUS_SUCCESS
) {
551 unsigned int ldev_num
= rbld_buf
->ldev_num
;
552 unsigned int ldev_size
= rbld_buf
->ldev_size
;
553 unsigned int blocks_done
=
554 ldev_size
- rbld_buf
->blocks_left
;
555 struct scsi_device
*sdev
;
557 sdev
= scsi_device_lookup(cb
->host
,
558 myrb_logical_channel(cb
->host
),
561 sdev_printk(KERN_INFO
, sdev
,
562 "Consistency Check in Progress: %d%% completed\n",
563 (100 * (blocks_done
>> 7))
565 scsi_device_put(sdev
);
568 dma_free_coherent(&cb
->pdev
->dev
, sizeof(struct myrb_rbld_progress
),
569 rbld_buf
, rbld_addr
);
573 * myrb_bgi_control - updates background initialisation status
575 * Executes a type 3B command and updates the background initialisation status
577 static void myrb_bgi_control(struct myrb_hba
*cb
)
579 struct myrb_cmdblk
*cmd_blk
= &cb
->mcmd_blk
;
580 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
581 struct myrb_bgi_status
*bgi
, *last_bgi
;
583 struct scsi_device
*sdev
= NULL
;
584 unsigned short status
;
586 bgi
= dma_alloc_coherent(&cb
->pdev
->dev
, sizeof(struct myrb_bgi_status
),
587 &bgi_addr
, GFP_KERNEL
);
589 shost_printk(KERN_ERR
, cb
->host
,
590 "Failed to allocate bgi memory\n");
593 myrb_reset_cmd(cmd_blk
);
594 mbox
->type3B
.id
= MYRB_DCMD_TAG
;
595 mbox
->type3B
.opcode
= MYRB_CMD_BGI_CONTROL
;
596 mbox
->type3B
.optype
= 0x20;
597 mbox
->type3B
.addr
= bgi_addr
;
598 status
= myrb_exec_cmd(cb
, cmd_blk
);
599 last_bgi
= &cb
->bgi_status
;
600 sdev
= scsi_device_lookup(cb
->host
,
601 myrb_logical_channel(cb
->host
),
604 case MYRB_STATUS_SUCCESS
:
605 switch (bgi
->status
) {
606 case MYRB_BGI_INVALID
:
608 case MYRB_BGI_STARTED
:
611 sdev_printk(KERN_INFO
, sdev
,
612 "Background Initialization Started\n");
614 case MYRB_BGI_INPROGRESS
:
617 if (bgi
->blocks_done
== last_bgi
->blocks_done
&&
618 bgi
->ldev_num
== last_bgi
->ldev_num
)
620 sdev_printk(KERN_INFO
, sdev
,
621 "Background Initialization in Progress: %d%% completed\n",
622 (100 * (bgi
->blocks_done
>> 7))
623 / (bgi
->ldev_size
>> 7));
625 case MYRB_BGI_SUSPENDED
:
628 sdev_printk(KERN_INFO
, sdev
,
629 "Background Initialization Suspended\n");
631 case MYRB_BGI_CANCELLED
:
634 sdev_printk(KERN_INFO
, sdev
,
635 "Background Initialization Cancelled\n");
638 memcpy(&cb
->bgi_status
, bgi
, sizeof(struct myrb_bgi_status
));
640 case MYRB_STATUS_BGI_SUCCESS
:
641 if (sdev
&& cb
->bgi_status
.status
== MYRB_BGI_INPROGRESS
)
642 sdev_printk(KERN_INFO
, sdev
,
643 "Background Initialization Completed Successfully\n");
644 cb
->bgi_status
.status
= MYRB_BGI_INVALID
;
646 case MYRB_STATUS_BGI_ABORTED
:
647 if (sdev
&& cb
->bgi_status
.status
== MYRB_BGI_INPROGRESS
)
648 sdev_printk(KERN_INFO
, sdev
,
649 "Background Initialization Aborted\n");
651 case MYRB_STATUS_NO_BGI_INPROGRESS
:
652 cb
->bgi_status
.status
= MYRB_BGI_INVALID
;
656 scsi_device_put(sdev
);
657 dma_free_coherent(&cb
->pdev
->dev
, sizeof(struct myrb_bgi_status
),
662 * myrb_hba_enquiry - updates the controller status
664 * Executes a DAC_V1_Enquiry command and updates the controller status.
666 * Return: command status
668 static unsigned short myrb_hba_enquiry(struct myrb_hba
*cb
)
670 struct myrb_enquiry old
, *new;
671 unsigned short status
;
673 memcpy(&old
, cb
->enquiry
, sizeof(struct myrb_enquiry
));
675 status
= myrb_exec_type3(cb
, MYRB_CMD_ENQUIRY
, cb
->enquiry_addr
);
676 if (status
!= MYRB_STATUS_SUCCESS
)
680 if (new->ldev_count
> old
.ldev_count
) {
681 int ldev_num
= old
.ldev_count
- 1;
683 while (++ldev_num
< new->ldev_count
)
684 shost_printk(KERN_CRIT
, cb
->host
,
685 "Logical Drive %d Now Exists\n",
688 if (new->ldev_count
< old
.ldev_count
) {
689 int ldev_num
= new->ldev_count
- 1;
691 while (++ldev_num
< old
.ldev_count
)
692 shost_printk(KERN_CRIT
, cb
->host
,
693 "Logical Drive %d No Longer Exists\n",
696 if (new->status
.deferred
!= old
.status
.deferred
)
697 shost_printk(KERN_CRIT
, cb
->host
,
698 "Deferred Write Error Flag is now %s\n",
699 (new->status
.deferred
? "TRUE" : "FALSE"));
700 if (new->ev_seq
!= old
.ev_seq
) {
701 cb
->new_ev_seq
= new->ev_seq
;
702 cb
->need_err_info
= true;
703 shost_printk(KERN_INFO
, cb
->host
,
704 "Event log %d/%d (%d/%d) available\n",
705 cb
->old_ev_seq
, cb
->new_ev_seq
,
706 old
.ev_seq
, new->ev_seq
);
708 if ((new->ldev_critical
> 0 &&
709 new->ldev_critical
!= old
.ldev_critical
) ||
710 (new->ldev_offline
> 0 &&
711 new->ldev_offline
!= old
.ldev_offline
) ||
712 (new->ldev_count
!= old
.ldev_count
)) {
713 shost_printk(KERN_INFO
, cb
->host
,
714 "Logical drive count changed (%d/%d/%d)\n",
718 cb
->need_ldev_info
= true;
720 if (new->pdev_dead
> 0 ||
721 new->pdev_dead
!= old
.pdev_dead
||
722 time_after_eq(jiffies
, cb
->secondary_monitor_time
723 + MYRB_SECONDARY_MONITOR_INTERVAL
)) {
724 cb
->need_bgi_status
= cb
->bgi_status_supported
;
725 cb
->secondary_monitor_time
= jiffies
;
727 if (new->rbld
== MYRB_STDBY_RBLD_IN_PROGRESS
||
728 new->rbld
== MYRB_BG_RBLD_IN_PROGRESS
||
729 old
.rbld
== MYRB_STDBY_RBLD_IN_PROGRESS
||
730 old
.rbld
== MYRB_BG_RBLD_IN_PROGRESS
) {
731 cb
->need_rbld
= true;
732 cb
->rbld_first
= (new->ldev_critical
< old
.ldev_critical
);
734 if (old
.rbld
== MYRB_BG_CHECK_IN_PROGRESS
)
736 case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS
:
737 shost_printk(KERN_INFO
, cb
->host
,
738 "Consistency Check Completed Successfully\n");
740 case MYRB_STDBY_RBLD_IN_PROGRESS
:
741 case MYRB_BG_RBLD_IN_PROGRESS
:
743 case MYRB_BG_CHECK_IN_PROGRESS
:
744 cb
->need_cc_status
= true;
746 case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR
:
747 shost_printk(KERN_INFO
, cb
->host
,
748 "Consistency Check Completed with Error\n");
750 case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED
:
751 shost_printk(KERN_INFO
, cb
->host
,
752 "Consistency Check Failed - Physical Device Failed\n");
754 case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED
:
755 shost_printk(KERN_INFO
, cb
->host
,
756 "Consistency Check Failed - Logical Drive Failed\n");
758 case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER
:
759 shost_printk(KERN_INFO
, cb
->host
,
760 "Consistency Check Failed - Other Causes\n");
762 case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED
:
763 shost_printk(KERN_INFO
, cb
->host
,
764 "Consistency Check Successfully Terminated\n");
767 else if (new->rbld
== MYRB_BG_CHECK_IN_PROGRESS
)
768 cb
->need_cc_status
= true;
770 return MYRB_STATUS_SUCCESS
;
774 * myrb_set_pdev_state - sets the device state for a physical device
776 * Return: command status
778 static unsigned short myrb_set_pdev_state(struct myrb_hba
*cb
,
779 struct scsi_device
*sdev
, enum myrb_devstate state
)
781 struct myrb_cmdblk
*cmd_blk
= &cb
->dcmd_blk
;
782 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
783 unsigned short status
;
785 mutex_lock(&cb
->dcmd_mutex
);
786 mbox
->type3D
.opcode
= MYRB_CMD_START_DEVICE
;
787 mbox
->type3D
.id
= MYRB_DCMD_TAG
;
788 mbox
->type3D
.channel
= sdev
->channel
;
789 mbox
->type3D
.target
= sdev
->id
;
790 mbox
->type3D
.state
= state
& 0x1F;
791 status
= myrb_exec_cmd(cb
, cmd_blk
);
792 mutex_unlock(&cb
->dcmd_mutex
);
798 * myrb_enable_mmio - enables the Memory Mailbox Interface
800 * PD and P controller types have no memory mailbox, but still need the
801 * other dma mapped memory.
803 * Return: true on success, false otherwise.
805 static bool myrb_enable_mmio(struct myrb_hba
*cb
, mbox_mmio_init_t mmio_init_fn
)
807 void __iomem
*base
= cb
->io_base
;
808 struct pci_dev
*pdev
= cb
->pdev
;
809 size_t err_table_size
;
810 size_t ldev_info_size
;
811 union myrb_cmd_mbox
*cmd_mbox_mem
;
812 struct myrb_stat_mbox
*stat_mbox_mem
;
813 union myrb_cmd_mbox mbox
;
814 unsigned short status
;
816 memset(&mbox
, 0, sizeof(union myrb_cmd_mbox
));
818 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32))) {
819 dev_err(&pdev
->dev
, "DMA mask out of range\n");
823 cb
->enquiry
= dma_alloc_coherent(&pdev
->dev
,
824 sizeof(struct myrb_enquiry
),
825 &cb
->enquiry_addr
, GFP_KERNEL
);
829 err_table_size
= sizeof(struct myrb_error_entry
) *
830 MYRB_MAX_CHANNELS
* MYRB_MAX_TARGETS
;
831 cb
->err_table
= dma_alloc_coherent(&pdev
->dev
, err_table_size
,
832 &cb
->err_table_addr
, GFP_KERNEL
);
836 ldev_info_size
= sizeof(struct myrb_ldev_info
) * MYRB_MAX_LDEVS
;
837 cb
->ldev_info_buf
= dma_alloc_coherent(&pdev
->dev
, ldev_info_size
,
838 &cb
->ldev_info_addr
, GFP_KERNEL
);
839 if (!cb
->ldev_info_buf
)
843 * Skip mailbox initialisation for PD and P Controllers
848 /* These are the base addresses for the command memory mailbox array */
849 cb
->cmd_mbox_size
= MYRB_CMD_MBOX_COUNT
* sizeof(union myrb_cmd_mbox
);
850 cb
->first_cmd_mbox
= dma_alloc_coherent(&pdev
->dev
,
854 if (!cb
->first_cmd_mbox
)
857 cmd_mbox_mem
= cb
->first_cmd_mbox
;
858 cmd_mbox_mem
+= MYRB_CMD_MBOX_COUNT
- 1;
859 cb
->last_cmd_mbox
= cmd_mbox_mem
;
860 cb
->next_cmd_mbox
= cb
->first_cmd_mbox
;
861 cb
->prev_cmd_mbox1
= cb
->last_cmd_mbox
;
862 cb
->prev_cmd_mbox2
= cb
->last_cmd_mbox
- 1;
864 /* These are the base addresses for the status memory mailbox array */
865 cb
->stat_mbox_size
= MYRB_STAT_MBOX_COUNT
*
866 sizeof(struct myrb_stat_mbox
);
867 cb
->first_stat_mbox
= dma_alloc_coherent(&pdev
->dev
,
871 if (!cb
->first_stat_mbox
)
874 stat_mbox_mem
= cb
->first_stat_mbox
;
875 stat_mbox_mem
+= MYRB_STAT_MBOX_COUNT
- 1;
876 cb
->last_stat_mbox
= stat_mbox_mem
;
877 cb
->next_stat_mbox
= cb
->first_stat_mbox
;
879 /* Enable the Memory Mailbox Interface. */
880 cb
->dual_mode_interface
= true;
881 mbox
.typeX
.opcode
= 0x2B;
883 mbox
.typeX
.opcode2
= 0x14;
884 mbox
.typeX
.cmd_mbox_addr
= cb
->cmd_mbox_addr
;
885 mbox
.typeX
.stat_mbox_addr
= cb
->stat_mbox_addr
;
887 status
= mmio_init_fn(pdev
, base
, &mbox
);
888 if (status
!= MYRB_STATUS_SUCCESS
) {
889 cb
->dual_mode_interface
= false;
890 mbox
.typeX
.opcode2
= 0x10;
891 status
= mmio_init_fn(pdev
, base
, &mbox
);
892 if (status
!= MYRB_STATUS_SUCCESS
) {
894 "Failed to enable mailbox, statux %02X\n",
903 * myrb_get_hba_config - reads the configuration information
905 * Reads the configuration information from the controller and
906 * initializes the controller structure.
908 * Return: 0 on success, errno otherwise
910 static int myrb_get_hba_config(struct myrb_hba
*cb
)
912 struct myrb_enquiry2
*enquiry2
;
913 dma_addr_t enquiry2_addr
;
914 struct myrb_config2
*config2
;
915 dma_addr_t config2_addr
;
916 struct Scsi_Host
*shost
= cb
->host
;
917 struct pci_dev
*pdev
= cb
->pdev
;
918 int pchan_max
= 0, pchan_cur
= 0;
919 unsigned short status
;
920 int ret
= -ENODEV
, memsize
= 0;
922 enquiry2
= dma_alloc_coherent(&pdev
->dev
, sizeof(struct myrb_enquiry2
),
923 &enquiry2_addr
, GFP_KERNEL
);
925 shost_printk(KERN_ERR
, cb
->host
,
926 "Failed to allocate V1 enquiry2 memory\n");
929 config2
= dma_alloc_coherent(&pdev
->dev
, sizeof(struct myrb_config2
),
930 &config2_addr
, GFP_KERNEL
);
932 shost_printk(KERN_ERR
, cb
->host
,
933 "Failed to allocate V1 config2 memory\n");
934 dma_free_coherent(&pdev
->dev
, sizeof(struct myrb_enquiry2
),
935 enquiry2
, enquiry2_addr
);
938 mutex_lock(&cb
->dma_mutex
);
939 status
= myrb_hba_enquiry(cb
);
940 mutex_unlock(&cb
->dma_mutex
);
941 if (status
!= MYRB_STATUS_SUCCESS
) {
942 shost_printk(KERN_WARNING
, cb
->host
,
943 "Failed it issue V1 Enquiry\n");
947 status
= myrb_exec_type3(cb
, MYRB_CMD_ENQUIRY2
, enquiry2_addr
);
948 if (status
!= MYRB_STATUS_SUCCESS
) {
949 shost_printk(KERN_WARNING
, cb
->host
,
950 "Failed to issue V1 Enquiry2\n");
954 status
= myrb_exec_type3(cb
, MYRB_CMD_READ_CONFIG2
, config2_addr
);
955 if (status
!= MYRB_STATUS_SUCCESS
) {
956 shost_printk(KERN_WARNING
, cb
->host
,
957 "Failed to issue ReadConfig2\n");
961 status
= myrb_get_ldev_info(cb
);
962 if (status
!= MYRB_STATUS_SUCCESS
) {
963 shost_printk(KERN_WARNING
, cb
->host
,
964 "Failed to get logical drive information\n");
969 * Initialize the Controller Model Name and Full Model Name fields.
971 switch (enquiry2
->hw
.sub_model
) {
972 case DAC960_V1_P_PD_PU
:
973 if (enquiry2
->scsi_cap
.bus_speed
== MYRB_SCSI_SPEED_ULTRA
)
974 strcpy(cb
->model_name
, "DAC960PU");
976 strcpy(cb
->model_name
, "DAC960PD");
979 strcpy(cb
->model_name
, "DAC960PL");
982 strcpy(cb
->model_name
, "DAC960PG");
985 strcpy(cb
->model_name
, "DAC960PJ");
988 strcpy(cb
->model_name
, "DAC960PR");
991 strcpy(cb
->model_name
, "DAC960PT");
994 strcpy(cb
->model_name
, "DAC960PTL0");
997 strcpy(cb
->model_name
, "DAC960PRL");
1000 strcpy(cb
->model_name
, "DAC960PTL1");
1002 case DAC960_V1_1164P
:
1003 strcpy(cb
->model_name
, "eXtremeRAID 1100");
1006 shost_printk(KERN_WARNING
, cb
->host
,
1007 "Unknown Model %X\n",
1008 enquiry2
->hw
.sub_model
);
1012 * Initialize the Controller Firmware Version field and verify that it
1013 * is a supported firmware version.
1014 * The supported firmware versions are:
1016 * DAC1164P 5.06 and above
1017 * DAC960PTL/PRL/PJ/PG 4.06 and above
1018 * DAC960PU/PD/PL 3.51 and above
1019 * DAC960PU/PD/PL/P 2.73 and above
1021 #if defined(CONFIG_ALPHA)
1023 * DEC Alpha machines were often equipped with DAC960 cards that were
1024 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1025 * the last custom FW revision to be released by DEC for these older
1026 * controllers, appears to work quite well with this driver.
1028 * Cards tested successfully were several versions each of the PD and
1029 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1030 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1031 * back of the board, of:
1033 * KZPSC: D040347 (1-channel) or D040348 (2-channel)
1034 * or D040349 (3-channel)
1035 * KZPAC: D040395 (1-channel) or D040396 (2-channel)
1036 * or D040397 (3-channel)
1038 # define FIRMWARE_27X "2.70"
1040 # define FIRMWARE_27X "2.73"
1043 if (enquiry2
->fw
.major_version
== 0) {
1044 enquiry2
->fw
.major_version
= cb
->enquiry
->fw_major_version
;
1045 enquiry2
->fw
.minor_version
= cb
->enquiry
->fw_minor_version
;
1046 enquiry2
->fw
.firmware_type
= '0';
1047 enquiry2
->fw
.turn_id
= 0;
1049 snprintf(cb
->fw_version
, sizeof(cb
->fw_version
),
1051 enquiry2
->fw
.major_version
,
1052 enquiry2
->fw
.minor_version
,
1053 enquiry2
->fw
.firmware_type
,
1054 enquiry2
->fw
.turn_id
);
1055 if (!((enquiry2
->fw
.major_version
== 5 &&
1056 enquiry2
->fw
.minor_version
>= 6) ||
1057 (enquiry2
->fw
.major_version
== 4 &&
1058 enquiry2
->fw
.minor_version
>= 6) ||
1059 (enquiry2
->fw
.major_version
== 3 &&
1060 enquiry2
->fw
.minor_version
>= 51) ||
1061 (enquiry2
->fw
.major_version
== 2 &&
1062 strcmp(cb
->fw_version
, FIRMWARE_27X
) >= 0))) {
1063 shost_printk(KERN_WARNING
, cb
->host
,
1064 "Firmware Version '%s' unsupported\n",
1069 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1070 * Enclosure Management Enabled fields.
1072 switch (enquiry2
->hw
.model
) {
1073 case MYRB_5_CHANNEL_BOARD
:
1076 case MYRB_3_CHANNEL_BOARD
:
1077 case MYRB_3_CHANNEL_ASIC_DAC
:
1080 case MYRB_2_CHANNEL_BOARD
:
1084 pchan_max
= enquiry2
->cfg_chan
;
1087 pchan_cur
= enquiry2
->cur_chan
;
1088 if (enquiry2
->scsi_cap
.bus_width
== MYRB_WIDTH_WIDE_32BIT
)
1090 else if (enquiry2
->scsi_cap
.bus_width
== MYRB_WIDTH_WIDE_16BIT
)
1094 cb
->ldev_block_size
= enquiry2
->ldev_block_size
;
1095 shost
->max_channel
= pchan_cur
;
1096 shost
->max_id
= enquiry2
->max_targets
;
1097 memsize
= enquiry2
->mem_size
>> 20;
1098 cb
->safte_enabled
= (enquiry2
->fault_mgmt
== MYRB_FAULT_SAFTE
);
1100 * Initialize the Controller Queue Depth, Driver Queue Depth,
1101 * Logical Drive Count, Maximum Blocks per Command, Controller
1102 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1103 * The Driver Queue Depth must be at most one less than the
1104 * Controller Queue Depth to allow for an automatic drive
1105 * rebuild operation.
1107 shost
->can_queue
= cb
->enquiry
->max_tcq
;
1108 if (shost
->can_queue
< 3)
1109 shost
->can_queue
= enquiry2
->max_cmds
;
1110 if (shost
->can_queue
< 3)
1111 /* Play safe and disable TCQ */
1112 shost
->can_queue
= 1;
1114 if (shost
->can_queue
> MYRB_CMD_MBOX_COUNT
- 2)
1115 shost
->can_queue
= MYRB_CMD_MBOX_COUNT
- 2;
1116 shost
->max_sectors
= enquiry2
->max_sectors
;
1117 shost
->sg_tablesize
= enquiry2
->max_sge
;
1118 if (shost
->sg_tablesize
> MYRB_SCATTER_GATHER_LIMIT
)
1119 shost
->sg_tablesize
= MYRB_SCATTER_GATHER_LIMIT
;
1121 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1123 cb
->stripe_size
= config2
->blocks_per_stripe
* config2
->block_factor
1124 >> (10 - MYRB_BLKSIZE_BITS
);
1125 cb
->segment_size
= config2
->blocks_per_cacheline
* config2
->block_factor
1126 >> (10 - MYRB_BLKSIZE_BITS
);
1127 /* Assume 255/63 translation */
1128 cb
->ldev_geom_heads
= 255;
1129 cb
->ldev_geom_sectors
= 63;
1130 if (config2
->drive_geometry
) {
1131 cb
->ldev_geom_heads
= 128;
1132 cb
->ldev_geom_sectors
= 32;
1136 * Initialize the Background Initialization Status.
1138 if ((cb
->fw_version
[0] == '4' &&
1139 strcmp(cb
->fw_version
, "4.08") >= 0) ||
1140 (cb
->fw_version
[0] == '5' &&
1141 strcmp(cb
->fw_version
, "5.08") >= 0)) {
1142 cb
->bgi_status_supported
= true;
1143 myrb_bgi_control(cb
);
1145 cb
->last_rbld_status
= MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS
;
1149 shost_printk(KERN_INFO
, cb
->host
,
1150 "Configuring %s PCI RAID Controller\n", cb
->model_name
);
1151 shost_printk(KERN_INFO
, cb
->host
,
1152 " Firmware Version: %s, Memory Size: %dMB\n",
1153 cb
->fw_version
, memsize
);
1154 if (cb
->io_addr
== 0)
1155 shost_printk(KERN_INFO
, cb
->host
,
1156 " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1157 (unsigned long)cb
->pci_addr
, cb
->irq
);
1159 shost_printk(KERN_INFO
, cb
->host
,
1160 " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1161 (unsigned long)cb
->io_addr
, (unsigned long)cb
->pci_addr
,
1163 shost_printk(KERN_INFO
, cb
->host
,
1164 " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1165 cb
->host
->can_queue
, cb
->host
->max_sectors
);
1166 shost_printk(KERN_INFO
, cb
->host
,
1167 " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1168 cb
->host
->can_queue
, cb
->host
->sg_tablesize
,
1169 MYRB_SCATTER_GATHER_LIMIT
);
1170 shost_printk(KERN_INFO
, cb
->host
,
1171 " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1172 cb
->stripe_size
, cb
->segment_size
,
1173 cb
->ldev_geom_heads
, cb
->ldev_geom_sectors
,
1175 " SAF-TE Enclosure Management Enabled" : "");
1176 shost_printk(KERN_INFO
, cb
->host
,
1177 " Physical: %d/%d channels %d/%d/%d devices\n",
1178 pchan_cur
, pchan_max
, 0, cb
->enquiry
->pdev_dead
,
1181 shost_printk(KERN_INFO
, cb
->host
,
1182 " Logical: 1/1 channels, %d/%d disks\n",
1183 cb
->enquiry
->ldev_count
, MYRB_MAX_LDEVS
);
1186 dma_free_coherent(&pdev
->dev
, sizeof(struct myrb_enquiry2
),
1187 enquiry2
, enquiry2_addr
);
1188 dma_free_coherent(&pdev
->dev
, sizeof(struct myrb_config2
),
1189 config2
, config2_addr
);
1195 * myrb_unmap - unmaps controller structures
1197 static void myrb_unmap(struct myrb_hba
*cb
)
1199 if (cb
->ldev_info_buf
) {
1200 size_t ldev_info_size
= sizeof(struct myrb_ldev_info
) *
1202 dma_free_coherent(&cb
->pdev
->dev
, ldev_info_size
,
1203 cb
->ldev_info_buf
, cb
->ldev_info_addr
);
1204 cb
->ldev_info_buf
= NULL
;
1206 if (cb
->err_table
) {
1207 size_t err_table_size
= sizeof(struct myrb_error_entry
) *
1208 MYRB_MAX_CHANNELS
* MYRB_MAX_TARGETS
;
1209 dma_free_coherent(&cb
->pdev
->dev
, err_table_size
,
1210 cb
->err_table
, cb
->err_table_addr
);
1211 cb
->err_table
= NULL
;
1214 dma_free_coherent(&cb
->pdev
->dev
, sizeof(struct myrb_enquiry
),
1215 cb
->enquiry
, cb
->enquiry_addr
);
1218 if (cb
->first_stat_mbox
) {
1219 dma_free_coherent(&cb
->pdev
->dev
, cb
->stat_mbox_size
,
1220 cb
->first_stat_mbox
, cb
->stat_mbox_addr
);
1221 cb
->first_stat_mbox
= NULL
;
1223 if (cb
->first_cmd_mbox
) {
1224 dma_free_coherent(&cb
->pdev
->dev
, cb
->cmd_mbox_size
,
1225 cb
->first_cmd_mbox
, cb
->cmd_mbox_addr
);
1226 cb
->first_cmd_mbox
= NULL
;
1231 * myrb_cleanup - cleanup controller structures
1233 static void myrb_cleanup(struct myrb_hba
*cb
)
1235 struct pci_dev
*pdev
= cb
->pdev
;
1237 /* Free the memory mailbox, status, and related structures */
1240 if (cb
->mmio_base
) {
1241 if (cb
->disable_intr
)
1242 cb
->disable_intr(cb
->io_base
);
1243 iounmap(cb
->mmio_base
);
1246 free_irq(cb
->irq
, cb
);
1248 release_region(cb
->io_addr
, 0x80);
1249 pci_set_drvdata(pdev
, NULL
);
1250 pci_disable_device(pdev
);
1251 scsi_host_put(cb
->host
);
1254 static int myrb_host_reset(struct scsi_cmnd
*scmd
)
1256 struct Scsi_Host
*shost
= scmd
->device
->host
;
1257 struct myrb_hba
*cb
= shost_priv(shost
);
1259 cb
->reset(cb
->io_base
);
1263 static int myrb_pthru_queuecommand(struct Scsi_Host
*shost
,
1264 struct scsi_cmnd
*scmd
)
1266 struct request
*rq
= scsi_cmd_to_rq(scmd
);
1267 struct myrb_hba
*cb
= shost_priv(shost
);
1268 struct myrb_cmdblk
*cmd_blk
= scsi_cmd_priv(scmd
);
1269 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
1270 struct myrb_dcdb
*dcdb
;
1271 dma_addr_t dcdb_addr
;
1272 struct scsi_device
*sdev
= scmd
->device
;
1273 struct scatterlist
*sgl
;
1274 unsigned long flags
;
1277 myrb_reset_cmd(cmd_blk
);
1278 dcdb
= dma_pool_alloc(cb
->dcdb_pool
, GFP_ATOMIC
, &dcdb_addr
);
1280 return SCSI_MLQUEUE_HOST_BUSY
;
1281 nsge
= scsi_dma_map(scmd
);
1283 dma_pool_free(cb
->dcdb_pool
, dcdb
, dcdb_addr
);
1284 scmd
->result
= (DID_ERROR
<< 16);
1289 mbox
->type3
.opcode
= MYRB_CMD_DCDB
;
1290 mbox
->type3
.id
= rq
->tag
+ 3;
1291 mbox
->type3
.addr
= dcdb_addr
;
1292 dcdb
->channel
= sdev
->channel
;
1293 dcdb
->target
= sdev
->id
;
1294 switch (scmd
->sc_data_direction
) {
1296 dcdb
->data_xfer
= MYRB_DCDB_XFER_NONE
;
1299 dcdb
->data_xfer
= MYRB_DCDB_XFER_SYSTEM_TO_DEVICE
;
1301 case DMA_FROM_DEVICE
:
1302 dcdb
->data_xfer
= MYRB_DCDB_XFER_DEVICE_TO_SYSTEM
;
1305 dcdb
->data_xfer
= MYRB_DCDB_XFER_ILLEGAL
;
1308 dcdb
->early_status
= false;
1309 if (rq
->timeout
<= 10)
1310 dcdb
->timeout
= MYRB_DCDB_TMO_10_SECS
;
1311 else if (rq
->timeout
<= 60)
1312 dcdb
->timeout
= MYRB_DCDB_TMO_60_SECS
;
1313 else if (rq
->timeout
<= 600)
1314 dcdb
->timeout
= MYRB_DCDB_TMO_10_MINS
;
1316 dcdb
->timeout
= MYRB_DCDB_TMO_24_HRS
;
1317 dcdb
->no_autosense
= false;
1318 dcdb
->allow_disconnect
= true;
1319 sgl
= scsi_sglist(scmd
);
1320 dcdb
->dma_addr
= sg_dma_address(sgl
);
1321 if (sg_dma_len(sgl
) > USHRT_MAX
) {
1322 dcdb
->xfer_len_lo
= sg_dma_len(sgl
) & 0xffff;
1323 dcdb
->xfer_len_hi4
= sg_dma_len(sgl
) >> 16;
1325 dcdb
->xfer_len_lo
= sg_dma_len(sgl
);
1326 dcdb
->xfer_len_hi4
= 0;
1328 dcdb
->cdb_len
= scmd
->cmd_len
;
1329 dcdb
->sense_len
= sizeof(dcdb
->sense
);
1330 memcpy(&dcdb
->cdb
, scmd
->cmnd
, scmd
->cmd_len
);
1332 spin_lock_irqsave(&cb
->queue_lock
, flags
);
1333 cb
->qcmd(cb
, cmd_blk
);
1334 spin_unlock_irqrestore(&cb
->queue_lock
, flags
);
1338 static void myrb_inquiry(struct myrb_hba
*cb
,
1339 struct scsi_cmnd
*scmd
)
1341 unsigned char inq
[36] = {
1342 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1343 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1344 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1345 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346 0x20, 0x20, 0x20, 0x20,
1349 if (cb
->bus_width
> 16)
1351 if (cb
->bus_width
> 8)
1353 memcpy(&inq
[16], cb
->model_name
, 16);
1354 memcpy(&inq
[32], cb
->fw_version
, 1);
1355 memcpy(&inq
[33], &cb
->fw_version
[2], 2);
1356 memcpy(&inq
[35], &cb
->fw_version
[7], 1);
1358 scsi_sg_copy_from_buffer(scmd
, (void *)inq
, 36);
1362 myrb_mode_sense(struct myrb_hba
*cb
, struct scsi_cmnd
*scmd
,
1363 struct myrb_ldev_info
*ldev_info
)
1365 unsigned char modes
[32], *mode_pg
;
1369 dbd
= (scmd
->cmnd
[1] & 0x08) == 0x08;
1372 mode_pg
= &modes
[4];
1375 mode_pg
= &modes
[12];
1377 memset(modes
, 0, sizeof(modes
));
1378 modes
[0] = mode_len
- 1;
1380 unsigned char *block_desc
= &modes
[4];
1383 put_unaligned_be32(ldev_info
->size
, &block_desc
[0]);
1384 put_unaligned_be32(cb
->ldev_block_size
, &block_desc
[5]);
1388 if (ldev_info
->wb_enabled
)
1390 if (cb
->segment_size
) {
1392 put_unaligned_be16(cb
->segment_size
, &mode_pg
[14]);
1395 scsi_sg_copy_from_buffer(scmd
, modes
, mode_len
);
1398 static void myrb_request_sense(struct myrb_hba
*cb
,
1399 struct scsi_cmnd
*scmd
)
1401 scsi_build_sense(scmd
, 0, NO_SENSE
, 0, 0);
1402 scsi_sg_copy_from_buffer(scmd
, scmd
->sense_buffer
,
1403 SCSI_SENSE_BUFFERSIZE
);
1406 static void myrb_read_capacity(struct myrb_hba
*cb
, struct scsi_cmnd
*scmd
,
1407 struct myrb_ldev_info
*ldev_info
)
1409 unsigned char data
[8];
1411 dev_dbg(&scmd
->device
->sdev_gendev
,
1412 "Capacity %u, blocksize %u\n",
1413 ldev_info
->size
, cb
->ldev_block_size
);
1414 put_unaligned_be32(ldev_info
->size
- 1, &data
[0]);
1415 put_unaligned_be32(cb
->ldev_block_size
, &data
[4]);
1416 scsi_sg_copy_from_buffer(scmd
, data
, 8);
1419 static int myrb_ldev_queuecommand(struct Scsi_Host
*shost
,
1420 struct scsi_cmnd
*scmd
)
1422 struct myrb_hba
*cb
= shost_priv(shost
);
1423 struct myrb_cmdblk
*cmd_blk
= scsi_cmd_priv(scmd
);
1424 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
1425 struct myrb_ldev_info
*ldev_info
;
1426 struct scsi_device
*sdev
= scmd
->device
;
1427 struct scatterlist
*sgl
;
1428 unsigned long flags
;
1433 ldev_info
= sdev
->hostdata
;
1434 if (ldev_info
->state
!= MYRB_DEVICE_ONLINE
&&
1435 ldev_info
->state
!= MYRB_DEVICE_WO
) {
1436 dev_dbg(&shost
->shost_gendev
, "ldev %u in state %x, skip\n",
1437 sdev
->id
, ldev_info
? ldev_info
->state
: 0xff);
1438 scmd
->result
= (DID_BAD_TARGET
<< 16);
1442 switch (scmd
->cmnd
[0]) {
1443 case TEST_UNIT_READY
:
1444 scmd
->result
= (DID_OK
<< 16);
1448 if (scmd
->cmnd
[1] & 1) {
1449 /* Illegal request, invalid field in CDB */
1450 scsi_build_sense(scmd
, 0, ILLEGAL_REQUEST
, 0x24, 0);
1452 myrb_inquiry(cb
, scmd
);
1453 scmd
->result
= (DID_OK
<< 16);
1457 case SYNCHRONIZE_CACHE
:
1458 scmd
->result
= (DID_OK
<< 16);
1462 if ((scmd
->cmnd
[2] & 0x3F) != 0x3F &&
1463 (scmd
->cmnd
[2] & 0x3F) != 0x08) {
1464 /* Illegal request, invalid field in CDB */
1465 scsi_build_sense(scmd
, 0, ILLEGAL_REQUEST
, 0x24, 0);
1467 myrb_mode_sense(cb
, scmd
, ldev_info
);
1468 scmd
->result
= (DID_OK
<< 16);
1473 if ((scmd
->cmnd
[1] & 1) ||
1474 (scmd
->cmnd
[8] & 1)) {
1475 /* Illegal request, invalid field in CDB */
1476 scsi_build_sense(scmd
, 0, ILLEGAL_REQUEST
, 0x24, 0);
1480 lba
= get_unaligned_be32(&scmd
->cmnd
[2]);
1482 /* Illegal request, invalid field in CDB */
1483 scsi_build_sense(scmd
, 0, ILLEGAL_REQUEST
, 0x24, 0);
1487 myrb_read_capacity(cb
, scmd
, ldev_info
);
1491 myrb_request_sense(cb
, scmd
);
1492 scmd
->result
= (DID_OK
<< 16);
1494 case SEND_DIAGNOSTIC
:
1495 if (scmd
->cmnd
[1] != 0x04) {
1496 /* Illegal request, invalid field in CDB */
1497 scsi_build_sense(scmd
, 0, ILLEGAL_REQUEST
, 0x24, 0);
1499 /* Assume good status */
1500 scmd
->result
= (DID_OK
<< 16);
1505 if (ldev_info
->state
== MYRB_DEVICE_WO
) {
1506 /* Data protect, attempt to read invalid data */
1507 scsi_build_sense(scmd
, 0, DATA_PROTECT
, 0x21, 0x06);
1513 lba
= (((scmd
->cmnd
[1] & 0x1F) << 16) |
1514 (scmd
->cmnd
[2] << 8) |
1516 block_cnt
= scmd
->cmnd
[4];
1519 if (ldev_info
->state
== MYRB_DEVICE_WO
) {
1520 /* Data protect, attempt to read invalid data */
1521 scsi_build_sense(scmd
, 0, DATA_PROTECT
, 0x21, 0x06);
1527 case VERIFY
: /* 0x2F */
1528 case WRITE_VERIFY
: /* 0x2E */
1529 lba
= get_unaligned_be32(&scmd
->cmnd
[2]);
1530 block_cnt
= get_unaligned_be16(&scmd
->cmnd
[7]);
1533 if (ldev_info
->state
== MYRB_DEVICE_WO
) {
1534 /* Data protect, attempt to read invalid data */
1535 scsi_build_sense(scmd
, 0, DATA_PROTECT
, 0x21, 0x06);
1541 case VERIFY_12
: /* 0xAF */
1542 case WRITE_VERIFY_12
: /* 0xAE */
1543 lba
= get_unaligned_be32(&scmd
->cmnd
[2]);
1544 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[6]);
1547 /* Illegal request, invalid opcode */
1548 scsi_build_sense(scmd
, 0, ILLEGAL_REQUEST
, 0x20, 0);
1553 myrb_reset_cmd(cmd_blk
);
1554 mbox
->type5
.id
= scsi_cmd_to_rq(scmd
)->tag
+ 3;
1555 if (scmd
->sc_data_direction
== DMA_NONE
)
1557 nsge
= scsi_dma_map(scmd
);
1559 sgl
= scsi_sglist(scmd
);
1560 if (scmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1561 mbox
->type5
.opcode
= MYRB_CMD_READ
;
1563 mbox
->type5
.opcode
= MYRB_CMD_WRITE
;
1565 mbox
->type5
.ld
.xfer_len
= block_cnt
;
1566 mbox
->type5
.ld
.ldev_num
= sdev
->id
;
1567 mbox
->type5
.lba
= lba
;
1568 mbox
->type5
.addr
= (u32
)sg_dma_address(sgl
);
1570 struct myrb_sge
*hw_sgl
;
1571 dma_addr_t hw_sgl_addr
;
1574 hw_sgl
= dma_pool_alloc(cb
->sg_pool
, GFP_ATOMIC
, &hw_sgl_addr
);
1576 return SCSI_MLQUEUE_HOST_BUSY
;
1578 cmd_blk
->sgl
= hw_sgl
;
1579 cmd_blk
->sgl_addr
= hw_sgl_addr
;
1581 if (scmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1582 mbox
->type5
.opcode
= MYRB_CMD_READ_SG
;
1584 mbox
->type5
.opcode
= MYRB_CMD_WRITE_SG
;
1586 mbox
->type5
.ld
.xfer_len
= block_cnt
;
1587 mbox
->type5
.ld
.ldev_num
= sdev
->id
;
1588 mbox
->type5
.lba
= lba
;
1589 mbox
->type5
.addr
= hw_sgl_addr
;
1590 mbox
->type5
.sg_count
= nsge
;
1592 scsi_for_each_sg(scmd
, sgl
, nsge
, i
) {
1593 hw_sgl
->sge_addr
= (u32
)sg_dma_address(sgl
);
1594 hw_sgl
->sge_count
= (u32
)sg_dma_len(sgl
);
1599 spin_lock_irqsave(&cb
->queue_lock
, flags
);
1600 cb
->qcmd(cb
, cmd_blk
);
1601 spin_unlock_irqrestore(&cb
->queue_lock
, flags
);
1606 static int myrb_queuecommand(struct Scsi_Host
*shost
,
1607 struct scsi_cmnd
*scmd
)
1609 struct scsi_device
*sdev
= scmd
->device
;
1611 if (sdev
->channel
> myrb_logical_channel(shost
)) {
1612 scmd
->result
= (DID_BAD_TARGET
<< 16);
1616 if (sdev
->channel
== myrb_logical_channel(shost
))
1617 return myrb_ldev_queuecommand(shost
, scmd
);
1619 return myrb_pthru_queuecommand(shost
, scmd
);
1622 static int myrb_ldev_slave_alloc(struct scsi_device
*sdev
)
1624 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
1625 struct myrb_ldev_info
*ldev_info
;
1626 unsigned short ldev_num
= sdev
->id
;
1627 enum raid_level level
;
1629 ldev_info
= cb
->ldev_info_buf
+ ldev_num
;
1633 sdev
->hostdata
= kzalloc(sizeof(*ldev_info
), GFP_KERNEL
);
1634 if (!sdev
->hostdata
)
1636 dev_dbg(&sdev
->sdev_gendev
,
1637 "slave alloc ldev %d state %x\n",
1638 ldev_num
, ldev_info
->state
);
1639 memcpy(sdev
->hostdata
, ldev_info
,
1640 sizeof(*ldev_info
));
1641 switch (ldev_info
->raid_level
) {
1642 case MYRB_RAID_LEVEL0
:
1643 level
= RAID_LEVEL_LINEAR
;
1645 case MYRB_RAID_LEVEL1
:
1646 level
= RAID_LEVEL_1
;
1648 case MYRB_RAID_LEVEL3
:
1649 level
= RAID_LEVEL_3
;
1651 case MYRB_RAID_LEVEL5
:
1652 level
= RAID_LEVEL_5
;
1654 case MYRB_RAID_LEVEL6
:
1655 level
= RAID_LEVEL_6
;
1657 case MYRB_RAID_JBOD
:
1658 level
= RAID_LEVEL_JBOD
;
1661 level
= RAID_LEVEL_UNKNOWN
;
1664 raid_set_level(myrb_raid_template
, &sdev
->sdev_gendev
, level
);
1668 static int myrb_pdev_slave_alloc(struct scsi_device
*sdev
)
1670 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
1671 struct myrb_pdev_state
*pdev_info
;
1672 unsigned short status
;
1674 if (sdev
->id
> MYRB_MAX_TARGETS
)
1677 pdev_info
= kzalloc(sizeof(*pdev_info
), GFP_KERNEL
);
1681 status
= myrb_exec_type3D(cb
, MYRB_CMD_GET_DEVICE_STATE
,
1683 if (status
!= MYRB_STATUS_SUCCESS
) {
1684 dev_dbg(&sdev
->sdev_gendev
,
1685 "Failed to get device state, status %x\n",
1690 if (!pdev_info
->present
) {
1691 dev_dbg(&sdev
->sdev_gendev
,
1692 "device not present, skip\n");
1696 dev_dbg(&sdev
->sdev_gendev
,
1697 "slave alloc pdev %d:%d state %x\n",
1698 sdev
->channel
, sdev
->id
, pdev_info
->state
);
1699 sdev
->hostdata
= pdev_info
;
1704 static int myrb_slave_alloc(struct scsi_device
*sdev
)
1706 if (sdev
->channel
> myrb_logical_channel(sdev
->host
))
1712 if (sdev
->channel
== myrb_logical_channel(sdev
->host
))
1713 return myrb_ldev_slave_alloc(sdev
);
1715 return myrb_pdev_slave_alloc(sdev
);
1718 static int myrb_slave_configure(struct scsi_device
*sdev
)
1720 struct myrb_ldev_info
*ldev_info
;
1722 if (sdev
->channel
> myrb_logical_channel(sdev
->host
))
1725 if (sdev
->channel
< myrb_logical_channel(sdev
->host
)) {
1726 sdev
->no_uld_attach
= 1;
1732 ldev_info
= sdev
->hostdata
;
1735 if (ldev_info
->state
!= MYRB_DEVICE_ONLINE
)
1736 sdev_printk(KERN_INFO
, sdev
,
1737 "Logical drive is %s\n",
1738 myrb_devstate_name(ldev_info
->state
));
1740 sdev
->tagged_supported
= 1;
1744 static void myrb_slave_destroy(struct scsi_device
*sdev
)
1746 kfree(sdev
->hostdata
);
1749 static int myrb_biosparam(struct scsi_device
*sdev
, struct block_device
*bdev
,
1750 sector_t capacity
, int geom
[])
1752 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
1754 geom
[0] = cb
->ldev_geom_heads
;
1755 geom
[1] = cb
->ldev_geom_sectors
;
1756 geom
[2] = sector_div(capacity
, geom
[0] * geom
[1]);
1761 static ssize_t
raid_state_show(struct device
*dev
,
1762 struct device_attribute
*attr
, char *buf
)
1764 struct scsi_device
*sdev
= to_scsi_device(dev
);
1765 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
1768 if (!sdev
->hostdata
)
1769 return snprintf(buf
, 16, "Unknown\n");
1771 if (sdev
->channel
== myrb_logical_channel(sdev
->host
)) {
1772 struct myrb_ldev_info
*ldev_info
= sdev
->hostdata
;
1775 name
= myrb_devstate_name(ldev_info
->state
);
1777 ret
= snprintf(buf
, 64, "%s\n", name
);
1779 ret
= snprintf(buf
, 64, "Invalid (%02X)\n",
1782 struct myrb_pdev_state
*pdev_info
= sdev
->hostdata
;
1783 unsigned short status
;
1786 status
= myrb_exec_type3D(cb
, MYRB_CMD_GET_DEVICE_STATE
,
1788 if (status
!= MYRB_STATUS_SUCCESS
)
1789 sdev_printk(KERN_INFO
, sdev
,
1790 "Failed to get device state, status %x\n",
1793 if (!pdev_info
->present
)
1796 name
= myrb_devstate_name(pdev_info
->state
);
1798 ret
= snprintf(buf
, 64, "%s\n", name
);
1800 ret
= snprintf(buf
, 64, "Invalid (%02X)\n",
1806 static ssize_t
raid_state_store(struct device
*dev
,
1807 struct device_attribute
*attr
, const char *buf
, size_t count
)
1809 struct scsi_device
*sdev
= to_scsi_device(dev
);
1810 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
1811 struct myrb_pdev_state
*pdev_info
;
1812 enum myrb_devstate new_state
;
1813 unsigned short status
;
1815 if (!strncmp(buf
, "kill", 4) ||
1816 !strncmp(buf
, "offline", 7))
1817 new_state
= MYRB_DEVICE_DEAD
;
1818 else if (!strncmp(buf
, "online", 6))
1819 new_state
= MYRB_DEVICE_ONLINE
;
1820 else if (!strncmp(buf
, "standby", 7))
1821 new_state
= MYRB_DEVICE_STANDBY
;
1825 pdev_info
= sdev
->hostdata
;
1827 sdev_printk(KERN_INFO
, sdev
,
1828 "Failed - no physical device information\n");
1831 if (!pdev_info
->present
) {
1832 sdev_printk(KERN_INFO
, sdev
,
1833 "Failed - device not present\n");
1837 if (pdev_info
->state
== new_state
)
1840 status
= myrb_set_pdev_state(cb
, sdev
, new_state
);
1842 case MYRB_STATUS_SUCCESS
:
1844 case MYRB_STATUS_START_DEVICE_FAILED
:
1845 sdev_printk(KERN_INFO
, sdev
,
1846 "Failed - Unable to Start Device\n");
1849 case MYRB_STATUS_NO_DEVICE
:
1850 sdev_printk(KERN_INFO
, sdev
,
1851 "Failed - No Device at Address\n");
1854 case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET
:
1855 sdev_printk(KERN_INFO
, sdev
,
1856 "Failed - Invalid Channel or Target or Modifier\n");
1859 case MYRB_STATUS_CHANNEL_BUSY
:
1860 sdev_printk(KERN_INFO
, sdev
,
1861 "Failed - Channel Busy\n");
1865 sdev_printk(KERN_INFO
, sdev
,
1866 "Failed - Unexpected Status %04X\n", status
);
1872 static DEVICE_ATTR_RW(raid_state
);
1874 static ssize_t
raid_level_show(struct device
*dev
,
1875 struct device_attribute
*attr
, char *buf
)
1877 struct scsi_device
*sdev
= to_scsi_device(dev
);
1879 if (sdev
->channel
== myrb_logical_channel(sdev
->host
)) {
1880 struct myrb_ldev_info
*ldev_info
= sdev
->hostdata
;
1886 name
= myrb_raidlevel_name(ldev_info
->raid_level
);
1888 return snprintf(buf
, 64, "Invalid (%02X)\n",
1890 return snprintf(buf
, 64, "%s\n", name
);
1892 return snprintf(buf
, 64, "Physical Drive\n");
1894 static DEVICE_ATTR_RO(raid_level
);
1896 static ssize_t
rebuild_show(struct device
*dev
,
1897 struct device_attribute
*attr
, char *buf
)
1899 struct scsi_device
*sdev
= to_scsi_device(dev
);
1900 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
1901 struct myrb_rbld_progress rbld_buf
;
1902 unsigned char status
;
1904 if (sdev
->channel
< myrb_logical_channel(sdev
->host
))
1905 return snprintf(buf
, 64, "physical device - not rebuilding\n");
1907 status
= myrb_get_rbld_progress(cb
, &rbld_buf
);
1909 if (rbld_buf
.ldev_num
!= sdev
->id
||
1910 status
!= MYRB_STATUS_SUCCESS
)
1911 return snprintf(buf
, 64, "not rebuilding\n");
1913 return snprintf(buf
, 64, "rebuilding block %u of %u\n",
1914 rbld_buf
.ldev_size
- rbld_buf
.blocks_left
,
1915 rbld_buf
.ldev_size
);
1918 static ssize_t
rebuild_store(struct device
*dev
,
1919 struct device_attribute
*attr
, const char *buf
, size_t count
)
1921 struct scsi_device
*sdev
= to_scsi_device(dev
);
1922 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
1923 struct myrb_cmdblk
*cmd_blk
;
1924 union myrb_cmd_mbox
*mbox
;
1925 unsigned short status
;
1929 rc
= kstrtoint(buf
, 0, &start
);
1933 if (sdev
->channel
>= myrb_logical_channel(sdev
->host
))
1936 status
= myrb_get_rbld_progress(cb
, NULL
);
1938 if (status
== MYRB_STATUS_SUCCESS
) {
1939 sdev_printk(KERN_INFO
, sdev
,
1940 "Rebuild Not Initiated; already in progress\n");
1943 mutex_lock(&cb
->dcmd_mutex
);
1944 cmd_blk
= &cb
->dcmd_blk
;
1945 myrb_reset_cmd(cmd_blk
);
1946 mbox
= &cmd_blk
->mbox
;
1947 mbox
->type3D
.opcode
= MYRB_CMD_REBUILD_ASYNC
;
1948 mbox
->type3D
.id
= MYRB_DCMD_TAG
;
1949 mbox
->type3D
.channel
= sdev
->channel
;
1950 mbox
->type3D
.target
= sdev
->id
;
1951 status
= myrb_exec_cmd(cb
, cmd_blk
);
1952 mutex_unlock(&cb
->dcmd_mutex
);
1954 struct pci_dev
*pdev
= cb
->pdev
;
1955 unsigned char *rate
;
1956 dma_addr_t rate_addr
;
1958 if (status
!= MYRB_STATUS_SUCCESS
) {
1959 sdev_printk(KERN_INFO
, sdev
,
1960 "Rebuild Not Cancelled; not in progress\n");
1964 rate
= dma_alloc_coherent(&pdev
->dev
, sizeof(char),
1965 &rate_addr
, GFP_KERNEL
);
1967 sdev_printk(KERN_INFO
, sdev
,
1968 "Cancellation of Rebuild Failed - Out of Memory\n");
1971 mutex_lock(&cb
->dcmd_mutex
);
1972 cmd_blk
= &cb
->dcmd_blk
;
1973 myrb_reset_cmd(cmd_blk
);
1974 mbox
= &cmd_blk
->mbox
;
1975 mbox
->type3R
.opcode
= MYRB_CMD_REBUILD_CONTROL
;
1976 mbox
->type3R
.id
= MYRB_DCMD_TAG
;
1977 mbox
->type3R
.rbld_rate
= 0xFF;
1978 mbox
->type3R
.addr
= rate_addr
;
1979 status
= myrb_exec_cmd(cb
, cmd_blk
);
1980 dma_free_coherent(&pdev
->dev
, sizeof(char), rate
, rate_addr
);
1981 mutex_unlock(&cb
->dcmd_mutex
);
1983 if (status
== MYRB_STATUS_SUCCESS
) {
1984 sdev_printk(KERN_INFO
, sdev
, "Rebuild %s\n",
1985 start
? "Initiated" : "Cancelled");
1989 sdev_printk(KERN_INFO
, sdev
,
1990 "Rebuild Not Cancelled, status 0x%x\n",
1996 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE
:
1997 msg
= "Attempt to Rebuild Online or Unresponsive Drive";
1999 case MYRB_STATUS_RBLD_NEW_DISK_FAILED
:
2000 msg
= "New Disk Failed During Rebuild";
2002 case MYRB_STATUS_INVALID_ADDRESS
:
2003 msg
= "Invalid Device Address";
2005 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS
:
2006 msg
= "Already in Progress";
2013 sdev_printk(KERN_INFO
, sdev
,
2014 "Rebuild Failed - %s\n", msg
);
2016 sdev_printk(KERN_INFO
, sdev
,
2017 "Rebuild Failed, status 0x%x\n", status
);
2021 static DEVICE_ATTR_RW(rebuild
);
2023 static ssize_t
consistency_check_store(struct device
*dev
,
2024 struct device_attribute
*attr
, const char *buf
, size_t count
)
2026 struct scsi_device
*sdev
= to_scsi_device(dev
);
2027 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
2028 struct myrb_rbld_progress rbld_buf
;
2029 struct myrb_cmdblk
*cmd_blk
;
2030 union myrb_cmd_mbox
*mbox
;
2031 unsigned short ldev_num
= 0xFFFF;
2032 unsigned short status
;
2036 rc
= kstrtoint(buf
, 0, &start
);
2040 if (sdev
->channel
< myrb_logical_channel(sdev
->host
))
2043 status
= myrb_get_rbld_progress(cb
, &rbld_buf
);
2045 if (status
== MYRB_STATUS_SUCCESS
) {
2046 sdev_printk(KERN_INFO
, sdev
,
2047 "Check Consistency Not Initiated; already in progress\n");
2050 mutex_lock(&cb
->dcmd_mutex
);
2051 cmd_blk
= &cb
->dcmd_blk
;
2052 myrb_reset_cmd(cmd_blk
);
2053 mbox
= &cmd_blk
->mbox
;
2054 mbox
->type3C
.opcode
= MYRB_CMD_CHECK_CONSISTENCY_ASYNC
;
2055 mbox
->type3C
.id
= MYRB_DCMD_TAG
;
2056 mbox
->type3C
.ldev_num
= sdev
->id
;
2057 mbox
->type3C
.auto_restore
= true;
2059 status
= myrb_exec_cmd(cb
, cmd_blk
);
2060 mutex_unlock(&cb
->dcmd_mutex
);
2062 struct pci_dev
*pdev
= cb
->pdev
;
2063 unsigned char *rate
;
2064 dma_addr_t rate_addr
;
2066 if (ldev_num
!= sdev
->id
) {
2067 sdev_printk(KERN_INFO
, sdev
,
2068 "Check Consistency Not Cancelled; not in progress\n");
2071 rate
= dma_alloc_coherent(&pdev
->dev
, sizeof(char),
2072 &rate_addr
, GFP_KERNEL
);
2074 sdev_printk(KERN_INFO
, sdev
,
2075 "Cancellation of Check Consistency Failed - Out of Memory\n");
2078 mutex_lock(&cb
->dcmd_mutex
);
2079 cmd_blk
= &cb
->dcmd_blk
;
2080 myrb_reset_cmd(cmd_blk
);
2081 mbox
= &cmd_blk
->mbox
;
2082 mbox
->type3R
.opcode
= MYRB_CMD_REBUILD_CONTROL
;
2083 mbox
->type3R
.id
= MYRB_DCMD_TAG
;
2084 mbox
->type3R
.rbld_rate
= 0xFF;
2085 mbox
->type3R
.addr
= rate_addr
;
2086 status
= myrb_exec_cmd(cb
, cmd_blk
);
2087 dma_free_coherent(&pdev
->dev
, sizeof(char), rate
, rate_addr
);
2088 mutex_unlock(&cb
->dcmd_mutex
);
2090 if (status
== MYRB_STATUS_SUCCESS
) {
2091 sdev_printk(KERN_INFO
, sdev
, "Check Consistency %s\n",
2092 start
? "Initiated" : "Cancelled");
2096 sdev_printk(KERN_INFO
, sdev
,
2097 "Check Consistency Not Cancelled, status 0x%x\n",
2103 case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE
:
2104 msg
= "Dependent Physical Device is DEAD";
2106 case MYRB_STATUS_RBLD_NEW_DISK_FAILED
:
2107 msg
= "New Disk Failed During Rebuild";
2109 case MYRB_STATUS_INVALID_ADDRESS
:
2110 msg
= "Invalid or Nonredundant Logical Drive";
2112 case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS
:
2113 msg
= "Already in Progress";
2120 sdev_printk(KERN_INFO
, sdev
,
2121 "Check Consistency Failed - %s\n", msg
);
2123 sdev_printk(KERN_INFO
, sdev
,
2124 "Check Consistency Failed, status 0x%x\n", status
);
2129 static ssize_t
consistency_check_show(struct device
*dev
,
2130 struct device_attribute
*attr
, char *buf
)
2132 return rebuild_show(dev
, attr
, buf
);
2134 static DEVICE_ATTR_RW(consistency_check
);
2136 static ssize_t
ctlr_num_show(struct device
*dev
,
2137 struct device_attribute
*attr
, char *buf
)
2139 struct Scsi_Host
*shost
= class_to_shost(dev
);
2140 struct myrb_hba
*cb
= shost_priv(shost
);
2142 return snprintf(buf
, 20, "%u\n", cb
->ctlr_num
);
2144 static DEVICE_ATTR_RO(ctlr_num
);
2146 static ssize_t
firmware_show(struct device
*dev
,
2147 struct device_attribute
*attr
, char *buf
)
2149 struct Scsi_Host
*shost
= class_to_shost(dev
);
2150 struct myrb_hba
*cb
= shost_priv(shost
);
2152 return snprintf(buf
, 16, "%s\n", cb
->fw_version
);
2154 static DEVICE_ATTR_RO(firmware
);
2156 static ssize_t
model_show(struct device
*dev
,
2157 struct device_attribute
*attr
, char *buf
)
2159 struct Scsi_Host
*shost
= class_to_shost(dev
);
2160 struct myrb_hba
*cb
= shost_priv(shost
);
2162 return snprintf(buf
, 16, "%s\n", cb
->model_name
);
2164 static DEVICE_ATTR_RO(model
);
2166 static ssize_t
flush_cache_store(struct device
*dev
,
2167 struct device_attribute
*attr
, const char *buf
, size_t count
)
2169 struct Scsi_Host
*shost
= class_to_shost(dev
);
2170 struct myrb_hba
*cb
= shost_priv(shost
);
2171 unsigned short status
;
2173 status
= myrb_exec_type3(cb
, MYRB_CMD_FLUSH
, 0);
2174 if (status
== MYRB_STATUS_SUCCESS
) {
2175 shost_printk(KERN_INFO
, shost
,
2176 "Cache Flush Completed\n");
2179 shost_printk(KERN_INFO
, shost
,
2180 "Cache Flush Failed, status %x\n", status
);
2183 static DEVICE_ATTR_WO(flush_cache
);
2185 static struct attribute
*myrb_sdev_attrs
[] = {
2186 &dev_attr_rebuild
.attr
,
2187 &dev_attr_consistency_check
.attr
,
2188 &dev_attr_raid_state
.attr
,
2189 &dev_attr_raid_level
.attr
,
2193 ATTRIBUTE_GROUPS(myrb_sdev
);
2195 static struct attribute
*myrb_shost_attrs
[] = {
2196 &dev_attr_ctlr_num
.attr
,
2197 &dev_attr_model
.attr
,
2198 &dev_attr_firmware
.attr
,
2199 &dev_attr_flush_cache
.attr
,
2203 ATTRIBUTE_GROUPS(myrb_shost
);
2205 static const struct scsi_host_template myrb_template
= {
2206 .module
= THIS_MODULE
,
2208 .proc_name
= "myrb",
2209 .queuecommand
= myrb_queuecommand
,
2210 .eh_host_reset_handler
= myrb_host_reset
,
2211 .slave_alloc
= myrb_slave_alloc
,
2212 .slave_configure
= myrb_slave_configure
,
2213 .slave_destroy
= myrb_slave_destroy
,
2214 .bios_param
= myrb_biosparam
,
2215 .cmd_size
= sizeof(struct myrb_cmdblk
),
2216 .shost_groups
= myrb_shost_groups
,
2217 .sdev_groups
= myrb_sdev_groups
,
2222 * myrb_is_raid - return boolean indicating device is raid volume
2223 * @dev: the device struct object
2225 static int myrb_is_raid(struct device
*dev
)
2227 struct scsi_device
*sdev
= to_scsi_device(dev
);
2229 return sdev
->channel
== myrb_logical_channel(sdev
->host
);
2233 * myrb_get_resync - get raid volume resync percent complete
2234 * @dev: the device struct object
2236 static void myrb_get_resync(struct device
*dev
)
2238 struct scsi_device
*sdev
= to_scsi_device(dev
);
2239 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
2240 struct myrb_rbld_progress rbld_buf
;
2241 unsigned int percent_complete
= 0;
2242 unsigned short status
;
2243 unsigned int ldev_size
= 0, remaining
= 0;
2245 if (sdev
->channel
< myrb_logical_channel(sdev
->host
))
2247 status
= myrb_get_rbld_progress(cb
, &rbld_buf
);
2248 if (status
== MYRB_STATUS_SUCCESS
) {
2249 if (rbld_buf
.ldev_num
== sdev
->id
) {
2250 ldev_size
= rbld_buf
.ldev_size
;
2251 remaining
= rbld_buf
.blocks_left
;
2254 if (remaining
&& ldev_size
)
2255 percent_complete
= (ldev_size
- remaining
) * 100 / ldev_size
;
2256 raid_set_resync(myrb_raid_template
, dev
, percent_complete
);
2260 * myrb_get_state - get raid volume status
2261 * @dev: the device struct object
2263 static void myrb_get_state(struct device
*dev
)
2265 struct scsi_device
*sdev
= to_scsi_device(dev
);
2266 struct myrb_hba
*cb
= shost_priv(sdev
->host
);
2267 struct myrb_ldev_info
*ldev_info
= sdev
->hostdata
;
2268 enum raid_state state
= RAID_STATE_UNKNOWN
;
2269 unsigned short status
;
2271 if (sdev
->channel
< myrb_logical_channel(sdev
->host
) || !ldev_info
)
2272 state
= RAID_STATE_UNKNOWN
;
2274 status
= myrb_get_rbld_progress(cb
, NULL
);
2275 if (status
== MYRB_STATUS_SUCCESS
)
2276 state
= RAID_STATE_RESYNCING
;
2278 switch (ldev_info
->state
) {
2279 case MYRB_DEVICE_ONLINE
:
2280 state
= RAID_STATE_ACTIVE
;
2282 case MYRB_DEVICE_WO
:
2283 case MYRB_DEVICE_CRITICAL
:
2284 state
= RAID_STATE_DEGRADED
;
2287 state
= RAID_STATE_OFFLINE
;
2291 raid_set_state(myrb_raid_template
, dev
, state
);
2294 static struct raid_function_template myrb_raid_functions
= {
2295 .cookie
= &myrb_template
,
2296 .is_raid
= myrb_is_raid
,
2297 .get_resync
= myrb_get_resync
,
2298 .get_state
= myrb_get_state
,
2301 static void myrb_handle_scsi(struct myrb_hba
*cb
, struct myrb_cmdblk
*cmd_blk
,
2302 struct scsi_cmnd
*scmd
)
2304 unsigned short status
;
2309 scsi_dma_unmap(scmd
);
2311 if (cmd_blk
->dcdb
) {
2312 memcpy(scmd
->sense_buffer
, &cmd_blk
->dcdb
->sense
, 64);
2313 dma_pool_free(cb
->dcdb_pool
, cmd_blk
->dcdb
,
2314 cmd_blk
->dcdb_addr
);
2315 cmd_blk
->dcdb
= NULL
;
2318 dma_pool_free(cb
->sg_pool
, cmd_blk
->sgl
, cmd_blk
->sgl_addr
);
2319 cmd_blk
->sgl
= NULL
;
2320 cmd_blk
->sgl_addr
= 0;
2322 status
= cmd_blk
->status
;
2324 case MYRB_STATUS_SUCCESS
:
2325 case MYRB_STATUS_DEVICE_BUSY
:
2326 scmd
->result
= (DID_OK
<< 16) | status
;
2328 case MYRB_STATUS_BAD_DATA
:
2329 dev_dbg(&scmd
->device
->sdev_gendev
,
2330 "Bad Data Encountered\n");
2331 if (scmd
->sc_data_direction
== DMA_FROM_DEVICE
)
2332 /* Unrecovered read error */
2333 scsi_build_sense(scmd
, 0, MEDIUM_ERROR
, 0x11, 0);
2336 scsi_build_sense(scmd
, 0, MEDIUM_ERROR
, 0x0C, 0);
2338 case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR
:
2339 scmd_printk(KERN_ERR
, scmd
, "Irrecoverable Data Error\n");
2340 if (scmd
->sc_data_direction
== DMA_FROM_DEVICE
)
2341 /* Unrecovered read error, auto-reallocation failed */
2342 scsi_build_sense(scmd
, 0, MEDIUM_ERROR
, 0x11, 0x04);
2344 /* Write error, auto-reallocation failed */
2345 scsi_build_sense(scmd
, 0, MEDIUM_ERROR
, 0x0C, 0x02);
2347 case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE
:
2348 dev_dbg(&scmd
->device
->sdev_gendev
,
2349 "Logical Drive Nonexistent or Offline");
2350 scmd
->result
= (DID_BAD_TARGET
<< 16);
2352 case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV
:
2353 dev_dbg(&scmd
->device
->sdev_gendev
,
2354 "Attempt to Access Beyond End of Logical Drive");
2355 /* Logical block address out of range */
2356 scsi_build_sense(scmd
, 0, NOT_READY
, 0x21, 0);
2358 case MYRB_STATUS_DEVICE_NONRESPONSIVE
:
2359 dev_dbg(&scmd
->device
->sdev_gendev
, "Device nonresponsive\n");
2360 scmd
->result
= (DID_BAD_TARGET
<< 16);
2363 scmd_printk(KERN_ERR
, scmd
,
2364 "Unexpected Error Status %04X", status
);
2365 scmd
->result
= (DID_ERROR
<< 16);
2371 static void myrb_handle_cmdblk(struct myrb_hba
*cb
, struct myrb_cmdblk
*cmd_blk
)
2376 if (cmd_blk
->completion
) {
2377 complete(cmd_blk
->completion
);
2378 cmd_blk
->completion
= NULL
;
2382 static void myrb_monitor(struct work_struct
*work
)
2384 struct myrb_hba
*cb
= container_of(work
,
2385 struct myrb_hba
, monitor_work
.work
);
2386 struct Scsi_Host
*shost
= cb
->host
;
2387 unsigned long interval
= MYRB_PRIMARY_MONITOR_INTERVAL
;
2389 dev_dbg(&shost
->shost_gendev
, "monitor tick\n");
2391 if (cb
->new_ev_seq
> cb
->old_ev_seq
) {
2392 int event
= cb
->old_ev_seq
;
2394 dev_dbg(&shost
->shost_gendev
,
2395 "get event log no %d/%d\n",
2396 cb
->new_ev_seq
, event
);
2397 myrb_get_event(cb
, event
);
2398 cb
->old_ev_seq
= event
+ 1;
2400 } else if (cb
->need_err_info
) {
2401 cb
->need_err_info
= false;
2402 dev_dbg(&shost
->shost_gendev
, "get error table\n");
2403 myrb_get_errtable(cb
);
2405 } else if (cb
->need_rbld
&& cb
->rbld_first
) {
2406 cb
->need_rbld
= false;
2407 dev_dbg(&shost
->shost_gendev
,
2408 "get rebuild progress\n");
2409 myrb_update_rbld_progress(cb
);
2411 } else if (cb
->need_ldev_info
) {
2412 cb
->need_ldev_info
= false;
2413 dev_dbg(&shost
->shost_gendev
,
2414 "get logical drive info\n");
2415 myrb_get_ldev_info(cb
);
2417 } else if (cb
->need_rbld
) {
2418 cb
->need_rbld
= false;
2419 dev_dbg(&shost
->shost_gendev
,
2420 "get rebuild progress\n");
2421 myrb_update_rbld_progress(cb
);
2423 } else if (cb
->need_cc_status
) {
2424 cb
->need_cc_status
= false;
2425 dev_dbg(&shost
->shost_gendev
,
2426 "get consistency check progress\n");
2427 myrb_get_cc_progress(cb
);
2429 } else if (cb
->need_bgi_status
) {
2430 cb
->need_bgi_status
= false;
2431 dev_dbg(&shost
->shost_gendev
, "get background init status\n");
2432 myrb_bgi_control(cb
);
2435 dev_dbg(&shost
->shost_gendev
, "new enquiry\n");
2436 mutex_lock(&cb
->dma_mutex
);
2437 myrb_hba_enquiry(cb
);
2438 mutex_unlock(&cb
->dma_mutex
);
2439 if ((cb
->new_ev_seq
- cb
->old_ev_seq
> 0) ||
2440 cb
->need_err_info
|| cb
->need_rbld
||
2441 cb
->need_ldev_info
|| cb
->need_cc_status
||
2442 cb
->need_bgi_status
) {
2443 dev_dbg(&shost
->shost_gendev
,
2444 "reschedule monitor\n");
2449 cb
->primary_monitor_time
= jiffies
;
2450 queue_delayed_work(cb
->work_q
, &cb
->monitor_work
, interval
);
2454 * myrb_err_status - reports controller BIOS messages
2456 * Controller BIOS messages are passed through the Error Status Register
2457 * when the driver performs the BIOS handshaking.
2459 * Return: true for fatal errors and false otherwise.
2461 static bool myrb_err_status(struct myrb_hba
*cb
, unsigned char error
,
2462 unsigned char parm0
, unsigned char parm1
)
2464 struct pci_dev
*pdev
= cb
->pdev
;
2468 dev_info(&pdev
->dev
,
2469 "Physical Device %d:%d Not Responding\n",
2473 dev_notice(&pdev
->dev
, "Spinning Up Drives\n");
2476 dev_notice(&pdev
->dev
, "Configuration Checksum Error\n");
2479 dev_notice(&pdev
->dev
, "Mirror Race Recovery Failed\n");
2482 dev_notice(&pdev
->dev
, "Mirror Race Recovery In Progress\n");
2485 dev_notice(&pdev
->dev
, "Physical Device %d:%d COD Mismatch\n",
2489 dev_notice(&pdev
->dev
, "Logical Drive Installation Aborted\n");
2492 dev_notice(&pdev
->dev
, "Mirror Race On A Critical Logical Drive\n");
2495 dev_notice(&pdev
->dev
, "New Controller Configuration Found\n");
2498 dev_err(&pdev
->dev
, "Fatal Memory Parity Error\n");
2501 dev_err(&pdev
->dev
, "Unknown Initialization Error %02X\n",
2509 * Hardware-specific functions
2513 * DAC960 LA Series Controllers
2516 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem
*base
)
2518 writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD
, base
+ DAC960_LA_IDB_OFFSET
);
2521 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem
*base
)
2523 writeb(DAC960_LA_IDB_HWMBOX_ACK_STS
, base
+ DAC960_LA_IDB_OFFSET
);
2526 static inline void DAC960_LA_reset_ctrl(void __iomem
*base
)
2528 writeb(DAC960_LA_IDB_CTRL_RESET
, base
+ DAC960_LA_IDB_OFFSET
);
2531 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem
*base
)
2533 writeb(DAC960_LA_IDB_MMBOX_NEW_CMD
, base
+ DAC960_LA_IDB_OFFSET
);
2536 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem
*base
)
2538 unsigned char idb
= readb(base
+ DAC960_LA_IDB_OFFSET
);
2540 return !(idb
& DAC960_LA_IDB_HWMBOX_EMPTY
);
2543 static inline bool DAC960_LA_init_in_progress(void __iomem
*base
)
2545 unsigned char idb
= readb(base
+ DAC960_LA_IDB_OFFSET
);
2547 return !(idb
& DAC960_LA_IDB_INIT_DONE
);
2550 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem
*base
)
2552 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ
, base
+ DAC960_LA_ODB_OFFSET
);
2555 static inline void DAC960_LA_ack_intr(void __iomem
*base
)
2557 writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ
| DAC960_LA_ODB_MMBOX_ACK_IRQ
,
2558 base
+ DAC960_LA_ODB_OFFSET
);
2561 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem
*base
)
2563 unsigned char odb
= readb(base
+ DAC960_LA_ODB_OFFSET
);
2565 return odb
& DAC960_LA_ODB_HWMBOX_STS_AVAIL
;
2568 static inline void DAC960_LA_enable_intr(void __iomem
*base
)
2570 unsigned char odb
= 0xFF;
2572 odb
&= ~DAC960_LA_IRQMASK_DISABLE_IRQ
;
2573 writeb(odb
, base
+ DAC960_LA_IRQMASK_OFFSET
);
2576 static inline void DAC960_LA_disable_intr(void __iomem
*base
)
2578 unsigned char odb
= 0xFF;
2580 odb
|= DAC960_LA_IRQMASK_DISABLE_IRQ
;
2581 writeb(odb
, base
+ DAC960_LA_IRQMASK_OFFSET
);
2584 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox
*mem_mbox
,
2585 union myrb_cmd_mbox
*mbox
)
2587 mem_mbox
->words
[1] = mbox
->words
[1];
2588 mem_mbox
->words
[2] = mbox
->words
[2];
2589 mem_mbox
->words
[3] = mbox
->words
[3];
2590 /* Memory barrier to prevent reordering */
2592 mem_mbox
->words
[0] = mbox
->words
[0];
2593 /* Memory barrier to force PCI access */
2597 static inline void DAC960_LA_write_hw_mbox(void __iomem
*base
,
2598 union myrb_cmd_mbox
*mbox
)
2600 writel(mbox
->words
[0], base
+ DAC960_LA_CMDOP_OFFSET
);
2601 writel(mbox
->words
[1], base
+ DAC960_LA_MBOX4_OFFSET
);
2602 writel(mbox
->words
[2], base
+ DAC960_LA_MBOX8_OFFSET
);
2603 writeb(mbox
->bytes
[12], base
+ DAC960_LA_MBOX12_OFFSET
);
2606 static inline unsigned short DAC960_LA_read_status(void __iomem
*base
)
2608 return readw(base
+ DAC960_LA_STS_OFFSET
);
2612 DAC960_LA_read_error_status(void __iomem
*base
, unsigned char *error
,
2613 unsigned char *param0
, unsigned char *param1
)
2615 unsigned char errsts
= readb(base
+ DAC960_LA_ERRSTS_OFFSET
);
2617 if (!(errsts
& DAC960_LA_ERRSTS_PENDING
))
2619 errsts
&= ~DAC960_LA_ERRSTS_PENDING
;
2622 *param0
= readb(base
+ DAC960_LA_CMDOP_OFFSET
);
2623 *param1
= readb(base
+ DAC960_LA_CMDID_OFFSET
);
2624 writeb(0xFF, base
+ DAC960_LA_ERRSTS_OFFSET
);
2628 static inline unsigned short
2629 DAC960_LA_mbox_init(struct pci_dev
*pdev
, void __iomem
*base
,
2630 union myrb_cmd_mbox
*mbox
)
2632 unsigned short status
;
2635 while (timeout
< MYRB_MAILBOX_TIMEOUT
) {
2636 if (!DAC960_LA_hw_mbox_is_full(base
))
2641 if (DAC960_LA_hw_mbox_is_full(base
)) {
2643 "Timeout waiting for empty mailbox\n");
2644 return MYRB_STATUS_SUBSYS_TIMEOUT
;
2646 DAC960_LA_write_hw_mbox(base
, mbox
);
2647 DAC960_LA_hw_mbox_new_cmd(base
);
2649 while (timeout
< MYRB_MAILBOX_TIMEOUT
) {
2650 if (DAC960_LA_hw_mbox_status_available(base
))
2655 if (!DAC960_LA_hw_mbox_status_available(base
)) {
2656 dev_err(&pdev
->dev
, "Timeout waiting for mailbox status\n");
2657 return MYRB_STATUS_SUBSYS_TIMEOUT
;
2659 status
= DAC960_LA_read_status(base
);
2660 DAC960_LA_ack_hw_mbox_intr(base
);
2661 DAC960_LA_ack_hw_mbox_status(base
);
2666 static int DAC960_LA_hw_init(struct pci_dev
*pdev
,
2667 struct myrb_hba
*cb
, void __iomem
*base
)
2670 unsigned char error
, parm0
, parm1
;
2672 DAC960_LA_disable_intr(base
);
2673 DAC960_LA_ack_hw_mbox_status(base
);
2675 while (DAC960_LA_init_in_progress(base
) &&
2676 timeout
< MYRB_MAILBOX_TIMEOUT
) {
2677 if (DAC960_LA_read_error_status(base
, &error
,
2679 myrb_err_status(cb
, error
, parm0
, parm1
))
2684 if (timeout
== MYRB_MAILBOX_TIMEOUT
) {
2686 "Timeout waiting for Controller Initialisation\n");
2689 if (!myrb_enable_mmio(cb
, DAC960_LA_mbox_init
)) {
2691 "Unable to Enable Memory Mailbox Interface\n");
2692 DAC960_LA_reset_ctrl(base
);
2695 DAC960_LA_enable_intr(base
);
2696 cb
->qcmd
= myrb_qcmd
;
2697 cb
->write_cmd_mbox
= DAC960_LA_write_cmd_mbox
;
2698 if (cb
->dual_mode_interface
)
2699 cb
->get_cmd_mbox
= DAC960_LA_mem_mbox_new_cmd
;
2701 cb
->get_cmd_mbox
= DAC960_LA_hw_mbox_new_cmd
;
2702 cb
->disable_intr
= DAC960_LA_disable_intr
;
2703 cb
->reset
= DAC960_LA_reset_ctrl
;
2708 static irqreturn_t
DAC960_LA_intr_handler(int irq
, void *arg
)
2710 struct myrb_hba
*cb
= arg
;
2711 void __iomem
*base
= cb
->io_base
;
2712 struct myrb_stat_mbox
*next_stat_mbox
;
2713 unsigned long flags
;
2715 spin_lock_irqsave(&cb
->queue_lock
, flags
);
2716 DAC960_LA_ack_intr(base
);
2717 next_stat_mbox
= cb
->next_stat_mbox
;
2718 while (next_stat_mbox
->valid
) {
2719 unsigned char id
= next_stat_mbox
->id
;
2720 struct scsi_cmnd
*scmd
= NULL
;
2721 struct myrb_cmdblk
*cmd_blk
= NULL
;
2723 if (id
== MYRB_DCMD_TAG
)
2724 cmd_blk
= &cb
->dcmd_blk
;
2725 else if (id
== MYRB_MCMD_TAG
)
2726 cmd_blk
= &cb
->mcmd_blk
;
2728 scmd
= scsi_host_find_tag(cb
->host
, id
- 3);
2730 cmd_blk
= scsi_cmd_priv(scmd
);
2733 cmd_blk
->status
= next_stat_mbox
->status
;
2735 dev_err(&cb
->pdev
->dev
,
2736 "Unhandled command completion %d\n", id
);
2738 memset(next_stat_mbox
, 0, sizeof(struct myrb_stat_mbox
));
2739 if (++next_stat_mbox
> cb
->last_stat_mbox
)
2740 next_stat_mbox
= cb
->first_stat_mbox
;
2744 myrb_handle_cmdblk(cb
, cmd_blk
);
2746 myrb_handle_scsi(cb
, cmd_blk
, scmd
);
2749 cb
->next_stat_mbox
= next_stat_mbox
;
2750 spin_unlock_irqrestore(&cb
->queue_lock
, flags
);
2754 static struct myrb_privdata DAC960_LA_privdata
= {
2755 .hw_init
= DAC960_LA_hw_init
,
2756 .irq_handler
= DAC960_LA_intr_handler
,
2757 .mmio_size
= DAC960_LA_mmio_size
,
2761 * DAC960 PG Series Controllers
2763 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem
*base
)
2765 writel(DAC960_PG_IDB_HWMBOX_NEW_CMD
, base
+ DAC960_PG_IDB_OFFSET
);
2768 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem
*base
)
2770 writel(DAC960_PG_IDB_HWMBOX_ACK_STS
, base
+ DAC960_PG_IDB_OFFSET
);
2773 static inline void DAC960_PG_reset_ctrl(void __iomem
*base
)
2775 writel(DAC960_PG_IDB_CTRL_RESET
, base
+ DAC960_PG_IDB_OFFSET
);
2778 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem
*base
)
2780 writel(DAC960_PG_IDB_MMBOX_NEW_CMD
, base
+ DAC960_PG_IDB_OFFSET
);
2783 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem
*base
)
2785 unsigned char idb
= readl(base
+ DAC960_PG_IDB_OFFSET
);
2787 return idb
& DAC960_PG_IDB_HWMBOX_FULL
;
2790 static inline bool DAC960_PG_init_in_progress(void __iomem
*base
)
2792 unsigned char idb
= readl(base
+ DAC960_PG_IDB_OFFSET
);
2794 return idb
& DAC960_PG_IDB_INIT_IN_PROGRESS
;
2797 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem
*base
)
2799 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ
, base
+ DAC960_PG_ODB_OFFSET
);
2802 static inline void DAC960_PG_ack_intr(void __iomem
*base
)
2804 writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ
| DAC960_PG_ODB_MMBOX_ACK_IRQ
,
2805 base
+ DAC960_PG_ODB_OFFSET
);
2808 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem
*base
)
2810 unsigned char odb
= readl(base
+ DAC960_PG_ODB_OFFSET
);
2812 return odb
& DAC960_PG_ODB_HWMBOX_STS_AVAIL
;
2815 static inline void DAC960_PG_enable_intr(void __iomem
*base
)
2817 unsigned int imask
= (unsigned int)-1;
2819 imask
&= ~DAC960_PG_IRQMASK_DISABLE_IRQ
;
2820 writel(imask
, base
+ DAC960_PG_IRQMASK_OFFSET
);
2823 static inline void DAC960_PG_disable_intr(void __iomem
*base
)
2825 unsigned int imask
= (unsigned int)-1;
2827 writel(imask
, base
+ DAC960_PG_IRQMASK_OFFSET
);
2830 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox
*mem_mbox
,
2831 union myrb_cmd_mbox
*mbox
)
2833 mem_mbox
->words
[1] = mbox
->words
[1];
2834 mem_mbox
->words
[2] = mbox
->words
[2];
2835 mem_mbox
->words
[3] = mbox
->words
[3];
2836 /* Memory barrier to prevent reordering */
2838 mem_mbox
->words
[0] = mbox
->words
[0];
2839 /* Memory barrier to force PCI access */
2843 static inline void DAC960_PG_write_hw_mbox(void __iomem
*base
,
2844 union myrb_cmd_mbox
*mbox
)
2846 writel(mbox
->words
[0], base
+ DAC960_PG_CMDOP_OFFSET
);
2847 writel(mbox
->words
[1], base
+ DAC960_PG_MBOX4_OFFSET
);
2848 writel(mbox
->words
[2], base
+ DAC960_PG_MBOX8_OFFSET
);
2849 writeb(mbox
->bytes
[12], base
+ DAC960_PG_MBOX12_OFFSET
);
2852 static inline unsigned short
2853 DAC960_PG_read_status(void __iomem
*base
)
2855 return readw(base
+ DAC960_PG_STS_OFFSET
);
2859 DAC960_PG_read_error_status(void __iomem
*base
, unsigned char *error
,
2860 unsigned char *param0
, unsigned char *param1
)
2862 unsigned char errsts
= readb(base
+ DAC960_PG_ERRSTS_OFFSET
);
2864 if (!(errsts
& DAC960_PG_ERRSTS_PENDING
))
2866 errsts
&= ~DAC960_PG_ERRSTS_PENDING
;
2868 *param0
= readb(base
+ DAC960_PG_CMDOP_OFFSET
);
2869 *param1
= readb(base
+ DAC960_PG_CMDID_OFFSET
);
2870 writeb(0, base
+ DAC960_PG_ERRSTS_OFFSET
);
2874 static inline unsigned short
2875 DAC960_PG_mbox_init(struct pci_dev
*pdev
, void __iomem
*base
,
2876 union myrb_cmd_mbox
*mbox
)
2878 unsigned short status
;
2881 while (timeout
< MYRB_MAILBOX_TIMEOUT
) {
2882 if (!DAC960_PG_hw_mbox_is_full(base
))
2887 if (DAC960_PG_hw_mbox_is_full(base
)) {
2889 "Timeout waiting for empty mailbox\n");
2890 return MYRB_STATUS_SUBSYS_TIMEOUT
;
2892 DAC960_PG_write_hw_mbox(base
, mbox
);
2893 DAC960_PG_hw_mbox_new_cmd(base
);
2896 while (timeout
< MYRB_MAILBOX_TIMEOUT
) {
2897 if (DAC960_PG_hw_mbox_status_available(base
))
2902 if (!DAC960_PG_hw_mbox_status_available(base
)) {
2904 "Timeout waiting for mailbox status\n");
2905 return MYRB_STATUS_SUBSYS_TIMEOUT
;
2907 status
= DAC960_PG_read_status(base
);
2908 DAC960_PG_ack_hw_mbox_intr(base
);
2909 DAC960_PG_ack_hw_mbox_status(base
);
2914 static int DAC960_PG_hw_init(struct pci_dev
*pdev
,
2915 struct myrb_hba
*cb
, void __iomem
*base
)
2918 unsigned char error
, parm0
, parm1
;
2920 DAC960_PG_disable_intr(base
);
2921 DAC960_PG_ack_hw_mbox_status(base
);
2923 while (DAC960_PG_init_in_progress(base
) &&
2924 timeout
< MYRB_MAILBOX_TIMEOUT
) {
2925 if (DAC960_PG_read_error_status(base
, &error
,
2927 myrb_err_status(cb
, error
, parm0
, parm1
))
2932 if (timeout
== MYRB_MAILBOX_TIMEOUT
) {
2934 "Timeout waiting for Controller Initialisation\n");
2937 if (!myrb_enable_mmio(cb
, DAC960_PG_mbox_init
)) {
2939 "Unable to Enable Memory Mailbox Interface\n");
2940 DAC960_PG_reset_ctrl(base
);
2943 DAC960_PG_enable_intr(base
);
2944 cb
->qcmd
= myrb_qcmd
;
2945 cb
->write_cmd_mbox
= DAC960_PG_write_cmd_mbox
;
2946 if (cb
->dual_mode_interface
)
2947 cb
->get_cmd_mbox
= DAC960_PG_mem_mbox_new_cmd
;
2949 cb
->get_cmd_mbox
= DAC960_PG_hw_mbox_new_cmd
;
2950 cb
->disable_intr
= DAC960_PG_disable_intr
;
2951 cb
->reset
= DAC960_PG_reset_ctrl
;
2956 static irqreturn_t
DAC960_PG_intr_handler(int irq
, void *arg
)
2958 struct myrb_hba
*cb
= arg
;
2959 void __iomem
*base
= cb
->io_base
;
2960 struct myrb_stat_mbox
*next_stat_mbox
;
2961 unsigned long flags
;
2963 spin_lock_irqsave(&cb
->queue_lock
, flags
);
2964 DAC960_PG_ack_intr(base
);
2965 next_stat_mbox
= cb
->next_stat_mbox
;
2966 while (next_stat_mbox
->valid
) {
2967 unsigned char id
= next_stat_mbox
->id
;
2968 struct scsi_cmnd
*scmd
= NULL
;
2969 struct myrb_cmdblk
*cmd_blk
= NULL
;
2971 if (id
== MYRB_DCMD_TAG
)
2972 cmd_blk
= &cb
->dcmd_blk
;
2973 else if (id
== MYRB_MCMD_TAG
)
2974 cmd_blk
= &cb
->mcmd_blk
;
2976 scmd
= scsi_host_find_tag(cb
->host
, id
- 3);
2978 cmd_blk
= scsi_cmd_priv(scmd
);
2981 cmd_blk
->status
= next_stat_mbox
->status
;
2983 dev_err(&cb
->pdev
->dev
,
2984 "Unhandled command completion %d\n", id
);
2986 memset(next_stat_mbox
, 0, sizeof(struct myrb_stat_mbox
));
2987 if (++next_stat_mbox
> cb
->last_stat_mbox
)
2988 next_stat_mbox
= cb
->first_stat_mbox
;
2991 myrb_handle_cmdblk(cb
, cmd_blk
);
2993 myrb_handle_scsi(cb
, cmd_blk
, scmd
);
2995 cb
->next_stat_mbox
= next_stat_mbox
;
2996 spin_unlock_irqrestore(&cb
->queue_lock
, flags
);
3000 static struct myrb_privdata DAC960_PG_privdata
= {
3001 .hw_init
= DAC960_PG_hw_init
,
3002 .irq_handler
= DAC960_PG_intr_handler
,
3003 .mmio_size
= DAC960_PG_mmio_size
,
3008 * DAC960 PD Series Controllers
3011 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem
*base
)
3013 writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD
, base
+ DAC960_PD_IDB_OFFSET
);
3016 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem
*base
)
3018 writeb(DAC960_PD_IDB_HWMBOX_ACK_STS
, base
+ DAC960_PD_IDB_OFFSET
);
3021 static inline void DAC960_PD_reset_ctrl(void __iomem
*base
)
3023 writeb(DAC960_PD_IDB_CTRL_RESET
, base
+ DAC960_PD_IDB_OFFSET
);
3026 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem
*base
)
3028 unsigned char idb
= readb(base
+ DAC960_PD_IDB_OFFSET
);
3030 return idb
& DAC960_PD_IDB_HWMBOX_FULL
;
3033 static inline bool DAC960_PD_init_in_progress(void __iomem
*base
)
3035 unsigned char idb
= readb(base
+ DAC960_PD_IDB_OFFSET
);
3037 return idb
& DAC960_PD_IDB_INIT_IN_PROGRESS
;
3040 static inline void DAC960_PD_ack_intr(void __iomem
*base
)
3042 writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ
, base
+ DAC960_PD_ODB_OFFSET
);
3045 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem
*base
)
3047 unsigned char odb
= readb(base
+ DAC960_PD_ODB_OFFSET
);
3049 return odb
& DAC960_PD_ODB_HWMBOX_STS_AVAIL
;
3052 static inline void DAC960_PD_enable_intr(void __iomem
*base
)
3054 writeb(DAC960_PD_IRQMASK_ENABLE_IRQ
, base
+ DAC960_PD_IRQEN_OFFSET
);
3057 static inline void DAC960_PD_disable_intr(void __iomem
*base
)
3059 writeb(0, base
+ DAC960_PD_IRQEN_OFFSET
);
3062 static inline void DAC960_PD_write_cmd_mbox(void __iomem
*base
,
3063 union myrb_cmd_mbox
*mbox
)
3065 writel(mbox
->words
[0], base
+ DAC960_PD_CMDOP_OFFSET
);
3066 writel(mbox
->words
[1], base
+ DAC960_PD_MBOX4_OFFSET
);
3067 writel(mbox
->words
[2], base
+ DAC960_PD_MBOX8_OFFSET
);
3068 writeb(mbox
->bytes
[12], base
+ DAC960_PD_MBOX12_OFFSET
);
3071 static inline unsigned char
3072 DAC960_PD_read_status_cmd_ident(void __iomem
*base
)
3074 return readb(base
+ DAC960_PD_STSID_OFFSET
);
3077 static inline unsigned short
3078 DAC960_PD_read_status(void __iomem
*base
)
3080 return readw(base
+ DAC960_PD_STS_OFFSET
);
3084 DAC960_PD_read_error_status(void __iomem
*base
, unsigned char *error
,
3085 unsigned char *param0
, unsigned char *param1
)
3087 unsigned char errsts
= readb(base
+ DAC960_PD_ERRSTS_OFFSET
);
3089 if (!(errsts
& DAC960_PD_ERRSTS_PENDING
))
3091 errsts
&= ~DAC960_PD_ERRSTS_PENDING
;
3093 *param0
= readb(base
+ DAC960_PD_CMDOP_OFFSET
);
3094 *param1
= readb(base
+ DAC960_PD_CMDID_OFFSET
);
3095 writeb(0, base
+ DAC960_PD_ERRSTS_OFFSET
);
3099 static void DAC960_PD_qcmd(struct myrb_hba
*cb
, struct myrb_cmdblk
*cmd_blk
)
3101 void __iomem
*base
= cb
->io_base
;
3102 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
3104 while (DAC960_PD_hw_mbox_is_full(base
))
3106 DAC960_PD_write_cmd_mbox(base
, mbox
);
3107 DAC960_PD_hw_mbox_new_cmd(base
);
3110 static int DAC960_PD_hw_init(struct pci_dev
*pdev
,
3111 struct myrb_hba
*cb
, void __iomem
*base
)
3114 unsigned char error
, parm0
, parm1
;
3116 if (!request_region(cb
->io_addr
, 0x80, "myrb")) {
3117 dev_err(&pdev
->dev
, "IO port 0x%lx busy\n",
3118 (unsigned long)cb
->io_addr
);
3121 DAC960_PD_disable_intr(base
);
3122 DAC960_PD_ack_hw_mbox_status(base
);
3124 while (DAC960_PD_init_in_progress(base
) &&
3125 timeout
< MYRB_MAILBOX_TIMEOUT
) {
3126 if (DAC960_PD_read_error_status(base
, &error
,
3128 myrb_err_status(cb
, error
, parm0
, parm1
))
3133 if (timeout
== MYRB_MAILBOX_TIMEOUT
) {
3135 "Timeout waiting for Controller Initialisation\n");
3138 if (!myrb_enable_mmio(cb
, NULL
)) {
3140 "Unable to Enable Memory Mailbox Interface\n");
3141 DAC960_PD_reset_ctrl(base
);
3144 DAC960_PD_enable_intr(base
);
3145 cb
->qcmd
= DAC960_PD_qcmd
;
3146 cb
->disable_intr
= DAC960_PD_disable_intr
;
3147 cb
->reset
= DAC960_PD_reset_ctrl
;
3152 static irqreturn_t
DAC960_PD_intr_handler(int irq
, void *arg
)
3154 struct myrb_hba
*cb
= arg
;
3155 void __iomem
*base
= cb
->io_base
;
3156 unsigned long flags
;
3158 spin_lock_irqsave(&cb
->queue_lock
, flags
);
3159 while (DAC960_PD_hw_mbox_status_available(base
)) {
3160 unsigned char id
= DAC960_PD_read_status_cmd_ident(base
);
3161 struct scsi_cmnd
*scmd
= NULL
;
3162 struct myrb_cmdblk
*cmd_blk
= NULL
;
3164 if (id
== MYRB_DCMD_TAG
)
3165 cmd_blk
= &cb
->dcmd_blk
;
3166 else if (id
== MYRB_MCMD_TAG
)
3167 cmd_blk
= &cb
->mcmd_blk
;
3169 scmd
= scsi_host_find_tag(cb
->host
, id
- 3);
3171 cmd_blk
= scsi_cmd_priv(scmd
);
3174 cmd_blk
->status
= DAC960_PD_read_status(base
);
3176 dev_err(&cb
->pdev
->dev
,
3177 "Unhandled command completion %d\n", id
);
3179 DAC960_PD_ack_intr(base
);
3180 DAC960_PD_ack_hw_mbox_status(base
);
3183 myrb_handle_cmdblk(cb
, cmd_blk
);
3185 myrb_handle_scsi(cb
, cmd_blk
, scmd
);
3187 spin_unlock_irqrestore(&cb
->queue_lock
, flags
);
3191 static struct myrb_privdata DAC960_PD_privdata
= {
3192 .hw_init
= DAC960_PD_hw_init
,
3193 .irq_handler
= DAC960_PD_intr_handler
,
3194 .mmio_size
= DAC960_PD_mmio_size
,
3199 * DAC960 P Series Controllers
3201 * Similar to the DAC960 PD Series Controllers, but some commands have
3205 static inline void myrb_translate_enquiry(void *enq
)
3207 memcpy(enq
+ 132, enq
+ 36, 64);
3208 memset(enq
+ 36, 0, 96);
3211 static inline void myrb_translate_devstate(void *state
)
3213 memcpy(state
+ 2, state
+ 3, 1);
3214 memmove(state
+ 4, state
+ 5, 2);
3215 memmove(state
+ 6, state
+ 8, 4);
3218 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk
*cmd_blk
)
3220 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
3221 int ldev_num
= mbox
->type5
.ld
.ldev_num
;
3223 mbox
->bytes
[3] &= 0x7;
3224 mbox
->bytes
[3] |= mbox
->bytes
[7] << 6;
3225 mbox
->bytes
[7] = ldev_num
;
3228 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk
*cmd_blk
)
3230 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
3231 int ldev_num
= mbox
->bytes
[7];
3233 mbox
->bytes
[7] = mbox
->bytes
[3] >> 6;
3234 mbox
->bytes
[3] &= 0x7;
3235 mbox
->bytes
[3] |= ldev_num
<< 3;
3238 static void DAC960_P_qcmd(struct myrb_hba
*cb
, struct myrb_cmdblk
*cmd_blk
)
3240 void __iomem
*base
= cb
->io_base
;
3241 union myrb_cmd_mbox
*mbox
= &cmd_blk
->mbox
;
3243 switch (mbox
->common
.opcode
) {
3244 case MYRB_CMD_ENQUIRY
:
3245 mbox
->common
.opcode
= MYRB_CMD_ENQUIRY_OLD
;
3247 case MYRB_CMD_GET_DEVICE_STATE
:
3248 mbox
->common
.opcode
= MYRB_CMD_GET_DEVICE_STATE_OLD
;
3251 mbox
->common
.opcode
= MYRB_CMD_READ_OLD
;
3252 myrb_translate_to_rw_command(cmd_blk
);
3254 case MYRB_CMD_WRITE
:
3255 mbox
->common
.opcode
= MYRB_CMD_WRITE_OLD
;
3256 myrb_translate_to_rw_command(cmd_blk
);
3258 case MYRB_CMD_READ_SG
:
3259 mbox
->common
.opcode
= MYRB_CMD_READ_SG_OLD
;
3260 myrb_translate_to_rw_command(cmd_blk
);
3262 case MYRB_CMD_WRITE_SG
:
3263 mbox
->common
.opcode
= MYRB_CMD_WRITE_SG_OLD
;
3264 myrb_translate_to_rw_command(cmd_blk
);
3269 while (DAC960_PD_hw_mbox_is_full(base
))
3271 DAC960_PD_write_cmd_mbox(base
, mbox
);
3272 DAC960_PD_hw_mbox_new_cmd(base
);
3276 static int DAC960_P_hw_init(struct pci_dev
*pdev
,
3277 struct myrb_hba
*cb
, void __iomem
*base
)
3280 unsigned char error
, parm0
, parm1
;
3282 if (!request_region(cb
->io_addr
, 0x80, "myrb")) {
3283 dev_err(&pdev
->dev
, "IO port 0x%lx busy\n",
3284 (unsigned long)cb
->io_addr
);
3287 DAC960_PD_disable_intr(base
);
3288 DAC960_PD_ack_hw_mbox_status(base
);
3290 while (DAC960_PD_init_in_progress(base
) &&
3291 timeout
< MYRB_MAILBOX_TIMEOUT
) {
3292 if (DAC960_PD_read_error_status(base
, &error
,
3294 myrb_err_status(cb
, error
, parm0
, parm1
))
3299 if (timeout
== MYRB_MAILBOX_TIMEOUT
) {
3301 "Timeout waiting for Controller Initialisation\n");
3304 if (!myrb_enable_mmio(cb
, NULL
)) {
3306 "Unable to allocate DMA mapped memory\n");
3307 DAC960_PD_reset_ctrl(base
);
3310 DAC960_PD_enable_intr(base
);
3311 cb
->qcmd
= DAC960_P_qcmd
;
3312 cb
->disable_intr
= DAC960_PD_disable_intr
;
3313 cb
->reset
= DAC960_PD_reset_ctrl
;
3318 static irqreturn_t
DAC960_P_intr_handler(int irq
, void *arg
)
3320 struct myrb_hba
*cb
= arg
;
3321 void __iomem
*base
= cb
->io_base
;
3322 unsigned long flags
;
3324 spin_lock_irqsave(&cb
->queue_lock
, flags
);
3325 while (DAC960_PD_hw_mbox_status_available(base
)) {
3326 unsigned char id
= DAC960_PD_read_status_cmd_ident(base
);
3327 struct scsi_cmnd
*scmd
= NULL
;
3328 struct myrb_cmdblk
*cmd_blk
= NULL
;
3329 union myrb_cmd_mbox
*mbox
;
3330 enum myrb_cmd_opcode op
;
3333 if (id
== MYRB_DCMD_TAG
)
3334 cmd_blk
= &cb
->dcmd_blk
;
3335 else if (id
== MYRB_MCMD_TAG
)
3336 cmd_blk
= &cb
->mcmd_blk
;
3338 scmd
= scsi_host_find_tag(cb
->host
, id
- 3);
3340 cmd_blk
= scsi_cmd_priv(scmd
);
3343 cmd_blk
->status
= DAC960_PD_read_status(base
);
3345 dev_err(&cb
->pdev
->dev
,
3346 "Unhandled command completion %d\n", id
);
3348 DAC960_PD_ack_intr(base
);
3349 DAC960_PD_ack_hw_mbox_status(base
);
3354 mbox
= &cmd_blk
->mbox
;
3355 op
= mbox
->common
.opcode
;
3357 case MYRB_CMD_ENQUIRY_OLD
:
3358 mbox
->common
.opcode
= MYRB_CMD_ENQUIRY
;
3359 myrb_translate_enquiry(cb
->enquiry
);
3361 case MYRB_CMD_READ_OLD
:
3362 mbox
->common
.opcode
= MYRB_CMD_READ
;
3363 myrb_translate_from_rw_command(cmd_blk
);
3365 case MYRB_CMD_WRITE_OLD
:
3366 mbox
->common
.opcode
= MYRB_CMD_WRITE
;
3367 myrb_translate_from_rw_command(cmd_blk
);
3369 case MYRB_CMD_READ_SG_OLD
:
3370 mbox
->common
.opcode
= MYRB_CMD_READ_SG
;
3371 myrb_translate_from_rw_command(cmd_blk
);
3373 case MYRB_CMD_WRITE_SG_OLD
:
3374 mbox
->common
.opcode
= MYRB_CMD_WRITE_SG
;
3375 myrb_translate_from_rw_command(cmd_blk
);
3381 myrb_handle_cmdblk(cb
, cmd_blk
);
3383 myrb_handle_scsi(cb
, cmd_blk
, scmd
);
3385 spin_unlock_irqrestore(&cb
->queue_lock
, flags
);
3389 static struct myrb_privdata DAC960_P_privdata
= {
3390 .hw_init
= DAC960_P_hw_init
,
3391 .irq_handler
= DAC960_P_intr_handler
,
3392 .mmio_size
= DAC960_PD_mmio_size
,
3395 static struct myrb_hba
*myrb_detect(struct pci_dev
*pdev
,
3396 const struct pci_device_id
*entry
)
3398 struct myrb_privdata
*privdata
=
3399 (struct myrb_privdata
*)entry
->driver_data
;
3400 irq_handler_t irq_handler
= privdata
->irq_handler
;
3401 unsigned int mmio_size
= privdata
->mmio_size
;
3402 struct Scsi_Host
*shost
;
3403 struct myrb_hba
*cb
= NULL
;
3405 shost
= scsi_host_alloc(&myrb_template
, sizeof(struct myrb_hba
));
3407 dev_err(&pdev
->dev
, "Unable to allocate Controller\n");
3410 shost
->max_cmd_len
= 12;
3411 shost
->max_lun
= 256;
3412 cb
= shost_priv(shost
);
3413 mutex_init(&cb
->dcmd_mutex
);
3414 mutex_init(&cb
->dma_mutex
);
3418 if (pci_enable_device(pdev
)) {
3419 dev_err(&pdev
->dev
, "Failed to enable PCI device\n");
3420 scsi_host_put(shost
);
3424 if (privdata
->hw_init
== DAC960_PD_hw_init
||
3425 privdata
->hw_init
== DAC960_P_hw_init
) {
3426 cb
->io_addr
= pci_resource_start(pdev
, 0);
3427 cb
->pci_addr
= pci_resource_start(pdev
, 1);
3429 cb
->pci_addr
= pci_resource_start(pdev
, 0);
3431 pci_set_drvdata(pdev
, cb
);
3432 spin_lock_init(&cb
->queue_lock
);
3433 if (mmio_size
< PAGE_SIZE
)
3434 mmio_size
= PAGE_SIZE
;
3435 cb
->mmio_base
= ioremap(cb
->pci_addr
& PAGE_MASK
, mmio_size
);
3436 if (cb
->mmio_base
== NULL
) {
3438 "Unable to map Controller Register Window\n");
3442 cb
->io_base
= cb
->mmio_base
+ (cb
->pci_addr
& ~PAGE_MASK
);
3443 if (privdata
->hw_init(pdev
, cb
, cb
->io_base
))
3446 if (request_irq(pdev
->irq
, irq_handler
, IRQF_SHARED
, "myrb", cb
) < 0) {
3448 "Unable to acquire IRQ Channel %d\n", pdev
->irq
);
3451 cb
->irq
= pdev
->irq
;
3456 "Failed to initialize Controller\n");
3461 static int myrb_probe(struct pci_dev
*dev
, const struct pci_device_id
*entry
)
3463 struct myrb_hba
*cb
;
3466 cb
= myrb_detect(dev
, entry
);
3470 ret
= myrb_get_hba_config(cb
);
3476 if (!myrb_create_mempools(dev
, cb
)) {
3481 ret
= scsi_add_host(cb
->host
, &dev
->dev
);
3483 dev_err(&dev
->dev
, "scsi_add_host failed with %d\n", ret
);
3484 myrb_destroy_mempools(cb
);
3487 scsi_scan_host(cb
->host
);
3495 static void myrb_remove(struct pci_dev
*pdev
)
3497 struct myrb_hba
*cb
= pci_get_drvdata(pdev
);
3499 shost_printk(KERN_NOTICE
, cb
->host
, "Flushing Cache...");
3500 myrb_exec_type3(cb
, MYRB_CMD_FLUSH
, 0);
3502 myrb_destroy_mempools(cb
);
3506 static const struct pci_device_id myrb_id_table
[] = {
3508 PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC
,
3509 PCI_DEVICE_ID_DEC_21285
,
3510 PCI_VENDOR_ID_MYLEX
,
3511 PCI_DEVICE_ID_MYLEX_DAC960_LA
),
3512 .driver_data
= (unsigned long) &DAC960_LA_privdata
,
3515 PCI_DEVICE_DATA(MYLEX
, DAC960_PG
, &DAC960_PG_privdata
),
3518 PCI_DEVICE_DATA(MYLEX
, DAC960_PD
, &DAC960_PD_privdata
),
3521 PCI_DEVICE_DATA(MYLEX
, DAC960_P
, &DAC960_P_privdata
),
3526 MODULE_DEVICE_TABLE(pci
, myrb_id_table
);
3528 static struct pci_driver myrb_pci_driver
= {
3530 .id_table
= myrb_id_table
,
3531 .probe
= myrb_probe
,
3532 .remove
= myrb_remove
,
3535 static int __init
myrb_init_module(void)
3539 myrb_raid_template
= raid_class_attach(&myrb_raid_functions
);
3540 if (!myrb_raid_template
)
3543 ret
= pci_register_driver(&myrb_pci_driver
);
3545 raid_class_release(myrb_raid_template
);
3550 static void __exit
myrb_cleanup_module(void)
3552 pci_unregister_driver(&myrb_pci_driver
);
3553 raid_class_release(myrb_raid_template
);
3556 module_init(myrb_init_module
);
3557 module_exit(myrb_cleanup_module
);
3559 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3560 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3561 MODULE_LICENSE("GPL");