2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
64 " Smart Array G2 Series SAS/SATA Controllers");
65 MODULE_VERSION("3.6.20");
66 MODULE_LICENSE("GPL");
68 #include "cciss_cmd.h"
70 #include <linux/cciss_ioctl.h>
72 /* define the PCI info for the cards we can control */
73 static const struct pci_device_id cciss_pci_device_id
[] = {
74 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISS
, 0x0E11, 0x4070},
75 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSB
, 0x0E11, 0x4080},
76 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSB
, 0x0E11, 0x4082},
77 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSB
, 0x0E11, 0x4083},
78 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x4091},
79 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409A},
80 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409B},
81 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409C},
82 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409D},
83 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSA
, 0x103C, 0x3225},
84 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3223},
85 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3234},
86 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3235},
87 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3211},
88 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3212},
89 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3213},
90 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3214},
91 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3215},
92 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3237},
93 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x323D},
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
99 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
100 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
104 MODULE_DEVICE_TABLE(pci
, cciss_pci_device_id
);
106 /* board_id = Subsystem Device ID & Vendor ID
107 * product = Marketing Name for the board
108 * access = Address of the struct of function pointers
109 * nr_cmds = Number of commands supported by controller
111 static struct board_type products
[] = {
112 {0x40700E11, "Smart Array 5300", &SA5_access
, 512},
113 {0x40800E11, "Smart Array 5i", &SA5B_access
, 512},
114 {0x40820E11, "Smart Array 532", &SA5B_access
, 512},
115 {0x40830E11, "Smart Array 5312", &SA5B_access
, 512},
116 {0x409A0E11, "Smart Array 641", &SA5_access
, 512},
117 {0x409B0E11, "Smart Array 642", &SA5_access
, 512},
118 {0x409C0E11, "Smart Array 6400", &SA5_access
, 512},
119 {0x409D0E11, "Smart Array 6400 EM", &SA5_access
, 512},
120 {0x40910E11, "Smart Array 6i", &SA5_access
, 512},
121 {0x3225103C, "Smart Array P600", &SA5_access
, 512},
122 {0x3223103C, "Smart Array P800", &SA5_access
, 512},
123 {0x3234103C, "Smart Array P400", &SA5_access
, 512},
124 {0x3235103C, "Smart Array P400i", &SA5_access
, 512},
125 {0x3211103C, "Smart Array E200i", &SA5_access
, 120},
126 {0x3212103C, "Smart Array E200", &SA5_access
, 120},
127 {0x3213103C, "Smart Array E200i", &SA5_access
, 120},
128 {0x3214103C, "Smart Array E200i", &SA5_access
, 120},
129 {0x3215103C, "Smart Array E200i", &SA5_access
, 120},
130 {0x3237103C, "Smart Array E500", &SA5_access
, 512},
131 {0x323D103C, "Smart Array P700m", &SA5_access
, 512},
132 {0x3241103C, "Smart Array P212", &SA5_access
, 384},
133 {0x3243103C, "Smart Array P410", &SA5_access
, 384},
134 {0x3245103C, "Smart Array P410i", &SA5_access
, 384},
135 {0x3247103C, "Smart Array P411", &SA5_access
, 384},
136 {0x3249103C, "Smart Array P812", &SA5_access
, 384},
137 {0xFFFF103C, "Unknown Smart Array", &SA5_access
, 120},
140 /* How long to wait (in milliseconds) for board to go into simple mode */
141 #define MAX_CONFIG_WAIT 30000
142 #define MAX_IOCTL_CONFIG_WAIT 1000
144 /*define how many times we will try a command because of bus resets */
145 #define MAX_CMD_RETRIES 3
149 /* Originally cciss driver only supports 8 major numbers */
150 #define MAX_CTLR_ORIG 8
152 static ctlr_info_t
*hba
[MAX_CTLR
];
154 static void do_cciss_request(struct request_queue
*q
);
155 static irqreturn_t
do_cciss_intr(int irq
, void *dev_id
);
156 static int cciss_open(struct inode
*inode
, struct file
*filep
);
157 static int cciss_release(struct inode
*inode
, struct file
*filep
);
158 static int cciss_ioctl(struct inode
*inode
, struct file
*filep
,
159 unsigned int cmd
, unsigned long arg
);
160 static int cciss_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
);
162 static int cciss_revalidate(struct gendisk
*disk
);
163 static int rebuild_lun_table(ctlr_info_t
*h
, struct gendisk
*del_disk
);
164 static int deregister_disk(struct gendisk
*disk
, drive_info_struct
*drv
,
167 static void cciss_read_capacity(int ctlr
, int logvol
, int withirq
,
168 sector_t
*total_size
, unsigned int *block_size
);
169 static void cciss_read_capacity_16(int ctlr
, int logvol
, int withirq
,
170 sector_t
*total_size
, unsigned int *block_size
);
171 static void cciss_geometry_inquiry(int ctlr
, int logvol
,
172 int withirq
, sector_t total_size
,
173 unsigned int block_size
, InquiryData_struct
*inq_buff
,
174 drive_info_struct
*drv
);
175 static void cciss_getgeometry(int cntl_num
);
176 static void __devinit
cciss_interrupt_mode(ctlr_info_t
*, struct pci_dev
*,
178 static void start_io(ctlr_info_t
*h
);
179 static int sendcmd(__u8 cmd
, int ctlr
, void *buff
, size_t size
,
180 unsigned int use_unit_num
, unsigned int log_unit
,
181 __u8 page_code
, unsigned char *scsi3addr
, int cmd_type
);
182 static int sendcmd_withirq(__u8 cmd
, int ctlr
, void *buff
, size_t size
,
183 unsigned int use_unit_num
, unsigned int log_unit
,
184 __u8 page_code
, int cmd_type
);
186 static void fail_all_cmds(unsigned long ctlr
);
188 #ifdef CONFIG_PROC_FS
189 static void cciss_procinit(int i
);
191 static void cciss_procinit(int i
)
194 #endif /* CONFIG_PROC_FS */
197 static long cciss_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
);
200 static struct block_device_operations cciss_fops
= {
201 .owner
= THIS_MODULE
,
203 .release
= cciss_release
,
204 .ioctl
= cciss_ioctl
,
205 .getgeo
= cciss_getgeo
,
207 .compat_ioctl
= cciss_compat_ioctl
,
209 .revalidate_disk
= cciss_revalidate
,
213 * Enqueuing and dequeuing functions for cmdlists.
215 static inline void addQ(CommandList_struct
**Qptr
, CommandList_struct
*c
)
219 c
->next
= c
->prev
= c
;
221 c
->prev
= (*Qptr
)->prev
;
223 (*Qptr
)->prev
->next
= c
;
228 static inline CommandList_struct
*removeQ(CommandList_struct
**Qptr
,
229 CommandList_struct
*c
)
231 if (c
&& c
->next
!= c
) {
234 c
->prev
->next
= c
->next
;
235 c
->next
->prev
= c
->prev
;
242 #include "cciss_scsi.c" /* For SCSI tape support */
244 #define RAID_UNKNOWN 6
246 #ifdef CONFIG_PROC_FS
249 * Report information about this controller.
251 #define ENG_GIG 1000000000
252 #define ENG_GIG_FACTOR (ENG_GIG/512)
253 #define ENGAGE_SCSI "engage scsi"
254 static const char *raid_label
[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
258 static struct proc_dir_entry
*proc_cciss
;
260 static void cciss_seq_show_header(struct seq_file
*seq
)
262 ctlr_info_t
*h
= seq
->private;
264 seq_printf(seq
, "%s: HP %s Controller\n"
265 "Board ID: 0x%08lx\n"
266 "Firmware Version: %c%c%c%c\n"
268 "Logical drives: %d\n"
269 "Current Q depth: %d\n"
270 "Current # commands on controller: %d\n"
271 "Max Q depth since init: %d\n"
272 "Max # commands on controller since init: %d\n"
273 "Max SG entries since init: %d\n",
276 (unsigned long)h
->board_id
,
277 h
->firm_ver
[0], h
->firm_ver
[1], h
->firm_ver
[2],
278 h
->firm_ver
[3], (unsigned int)h
->intr
[SIMPLE_MODE_INT
],
280 h
->Qdepth
, h
->commands_outstanding
,
281 h
->maxQsinceinit
, h
->max_outstanding
, h
->maxSG
);
283 #ifdef CONFIG_CISS_SCSI_TAPE
284 cciss_seq_tape_report(seq
, h
->ctlr
);
285 #endif /* CONFIG_CISS_SCSI_TAPE */
288 static void *cciss_seq_start(struct seq_file
*seq
, loff_t
*pos
)
290 ctlr_info_t
*h
= seq
->private;
291 unsigned ctlr
= h
->ctlr
;
294 /* prevent displaying bogus info during configuration
295 * or deconfiguration of a logical volume
297 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
298 if (h
->busy_configuring
) {
299 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
300 return ERR_PTR(-EBUSY
);
302 h
->busy_configuring
= 1;
303 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
306 cciss_seq_show_header(seq
);
311 static int cciss_seq_show(struct seq_file
*seq
, void *v
)
313 sector_t vol_sz
, vol_sz_frac
;
314 ctlr_info_t
*h
= seq
->private;
315 unsigned ctlr
= h
->ctlr
;
317 drive_info_struct
*drv
= &h
->drv
[*pos
];
319 if (*pos
> h
->highest_lun
)
325 vol_sz
= drv
->nr_blocks
;
326 vol_sz_frac
= sector_div(vol_sz
, ENG_GIG_FACTOR
);
328 sector_div(vol_sz_frac
, ENG_GIG_FACTOR
);
330 if (drv
->raid_level
> 5)
331 drv
->raid_level
= RAID_UNKNOWN
;
332 seq_printf(seq
, "cciss/c%dd%d:"
333 "\t%4u.%02uGB\tRAID %s\n",
334 ctlr
, (int) *pos
, (int)vol_sz
, (int)vol_sz_frac
,
335 raid_label
[drv
->raid_level
]);
339 static void *cciss_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
341 ctlr_info_t
*h
= seq
->private;
343 if (*pos
> h
->highest_lun
)
350 static void cciss_seq_stop(struct seq_file
*seq
, void *v
)
352 ctlr_info_t
*h
= seq
->private;
354 /* Only reset h->busy_configuring if we succeeded in setting
355 * it during cciss_seq_start. */
356 if (v
== ERR_PTR(-EBUSY
))
359 h
->busy_configuring
= 0;
362 static struct seq_operations cciss_seq_ops
= {
363 .start
= cciss_seq_start
,
364 .show
= cciss_seq_show
,
365 .next
= cciss_seq_next
,
366 .stop
= cciss_seq_stop
,
369 static int cciss_seq_open(struct inode
*inode
, struct file
*file
)
371 int ret
= seq_open(file
, &cciss_seq_ops
);
372 struct seq_file
*seq
= file
->private_data
;
375 seq
->private = PDE(inode
)->data
;
381 cciss_proc_write(struct file
*file
, const char __user
*buf
,
382 size_t length
, loff_t
*ppos
)
387 #ifndef CONFIG_CISS_SCSI_TAPE
391 if (!buf
|| length
> PAGE_SIZE
- 1)
394 buffer
= (char *)__get_free_page(GFP_KERNEL
);
399 if (copy_from_user(buffer
, buf
, length
))
401 buffer
[length
] = '\0';
403 #ifdef CONFIG_CISS_SCSI_TAPE
404 if (strncmp(ENGAGE_SCSI
, buffer
, sizeof ENGAGE_SCSI
- 1) == 0) {
405 struct seq_file
*seq
= file
->private_data
;
406 ctlr_info_t
*h
= seq
->private;
409 rc
= cciss_engage_scsi(h
->ctlr
);
415 #endif /* CONFIG_CISS_SCSI_TAPE */
417 /* might be nice to have "disengage" too, but it's not
418 safely possible. (only 1 module use count, lock issues.) */
421 free_page((unsigned long)buffer
);
425 static struct file_operations cciss_proc_fops
= {
426 .owner
= THIS_MODULE
,
427 .open
= cciss_seq_open
,
430 .release
= seq_release
,
431 .write
= cciss_proc_write
,
434 static void __devinit
cciss_procinit(int i
)
436 struct proc_dir_entry
*pde
;
438 if (proc_cciss
== NULL
)
439 proc_cciss
= proc_mkdir("driver/cciss", NULL
);
442 pde
= proc_create_data(hba
[i
]->devname
, S_IWUSR
| S_IRUSR
| S_IRGRP
|
444 &cciss_proc_fops
, hba
[i
]);
446 #endif /* CONFIG_PROC_FS */
449 * For operations that cannot sleep, a command block is allocated at init,
450 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
451 * which ones are free or in use. For operations that can wait for kmalloc
452 * to possible sleep, this routine can be called with get_from_pool set to 0.
453 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
455 static CommandList_struct
*cmd_alloc(ctlr_info_t
*h
, int get_from_pool
)
457 CommandList_struct
*c
;
460 dma_addr_t cmd_dma_handle
, err_dma_handle
;
462 if (!get_from_pool
) {
463 c
= (CommandList_struct
*) pci_alloc_consistent(h
->pdev
,
464 sizeof(CommandList_struct
), &cmd_dma_handle
);
467 memset(c
, 0, sizeof(CommandList_struct
));
471 c
->err_info
= (ErrorInfo_struct
*)
472 pci_alloc_consistent(h
->pdev
, sizeof(ErrorInfo_struct
),
475 if (c
->err_info
== NULL
) {
476 pci_free_consistent(h
->pdev
,
477 sizeof(CommandList_struct
), c
, cmd_dma_handle
);
480 memset(c
->err_info
, 0, sizeof(ErrorInfo_struct
));
481 } else { /* get it out of the controllers pool */
484 i
= find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
);
487 } while (test_and_set_bit
488 (i
& (BITS_PER_LONG
- 1),
489 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
)) != 0);
491 printk(KERN_DEBUG
"cciss: using command buffer %d\n", i
);
494 memset(c
, 0, sizeof(CommandList_struct
));
495 cmd_dma_handle
= h
->cmd_pool_dhandle
496 + i
* sizeof(CommandList_struct
);
497 c
->err_info
= h
->errinfo_pool
+ i
;
498 memset(c
->err_info
, 0, sizeof(ErrorInfo_struct
));
499 err_dma_handle
= h
->errinfo_pool_dhandle
500 + i
* sizeof(ErrorInfo_struct
);
506 c
->busaddr
= (__u32
) cmd_dma_handle
;
507 temp64
.val
= (__u64
) err_dma_handle
;
508 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
509 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
510 c
->ErrDesc
.Len
= sizeof(ErrorInfo_struct
);
517 * Frees a command block that was previously allocated with cmd_alloc().
519 static void cmd_free(ctlr_info_t
*h
, CommandList_struct
*c
, int got_from_pool
)
524 if (!got_from_pool
) {
525 temp64
.val32
.lower
= c
->ErrDesc
.Addr
.lower
;
526 temp64
.val32
.upper
= c
->ErrDesc
.Addr
.upper
;
527 pci_free_consistent(h
->pdev
, sizeof(ErrorInfo_struct
),
528 c
->err_info
, (dma_addr_t
) temp64
.val
);
529 pci_free_consistent(h
->pdev
, sizeof(CommandList_struct
),
530 c
, (dma_addr_t
) c
->busaddr
);
533 clear_bit(i
& (BITS_PER_LONG
- 1),
534 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
539 static inline ctlr_info_t
*get_host(struct gendisk
*disk
)
541 return disk
->queue
->queuedata
;
544 static inline drive_info_struct
*get_drv(struct gendisk
*disk
)
546 return disk
->private_data
;
550 * Open. Make sure the device is really there.
552 static int cciss_open(struct inode
*inode
, struct file
*filep
)
554 ctlr_info_t
*host
= get_host(inode
->i_bdev
->bd_disk
);
555 drive_info_struct
*drv
= get_drv(inode
->i_bdev
->bd_disk
);
558 printk(KERN_DEBUG
"cciss_open %s\n", inode
->i_bdev
->bd_disk
->disk_name
);
559 #endif /* CCISS_DEBUG */
561 if (host
->busy_initializing
|| drv
->busy_configuring
)
564 * Root is allowed to open raw volume zero even if it's not configured
565 * so array config can still work. Root is also allowed to open any
566 * volume that has a LUN ID, so it can issue IOCTL to reread the
567 * disk information. I don't think I really like this
568 * but I'm already using way to many device nodes to claim another one
569 * for "raw controller".
571 if (drv
->heads
== 0) {
572 if (iminor(inode
) != 0) { /* not node 0? */
573 /* if not node 0 make sure it is a partition = 0 */
574 if (iminor(inode
) & 0x0f) {
576 /* if it is, make sure we have a LUN ID */
577 } else if (drv
->LunID
== 0) {
581 if (!capable(CAP_SYS_ADMIN
))
592 static int cciss_release(struct inode
*inode
, struct file
*filep
)
594 ctlr_info_t
*host
= get_host(inode
->i_bdev
->bd_disk
);
595 drive_info_struct
*drv
= get_drv(inode
->i_bdev
->bd_disk
);
598 printk(KERN_DEBUG
"cciss_release %s\n",
599 inode
->i_bdev
->bd_disk
->disk_name
);
600 #endif /* CCISS_DEBUG */
609 static int do_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
613 ret
= cciss_ioctl(f
->f_path
.dentry
->d_inode
, f
, cmd
, arg
);
618 static int cciss_ioctl32_passthru(struct file
*f
, unsigned cmd
,
620 static int cciss_ioctl32_big_passthru(struct file
*f
, unsigned cmd
,
623 static long cciss_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
626 case CCISS_GETPCIINFO
:
627 case CCISS_GETINTINFO
:
628 case CCISS_SETINTINFO
:
629 case CCISS_GETNODENAME
:
630 case CCISS_SETNODENAME
:
631 case CCISS_GETHEARTBEAT
:
632 case CCISS_GETBUSTYPES
:
633 case CCISS_GETFIRMVER
:
634 case CCISS_GETDRIVVER
:
635 case CCISS_REVALIDVOLS
:
636 case CCISS_DEREGDISK
:
637 case CCISS_REGNEWDISK
:
639 case CCISS_RESCANDISK
:
640 case CCISS_GETLUNINFO
:
641 return do_ioctl(f
, cmd
, arg
);
643 case CCISS_PASSTHRU32
:
644 return cciss_ioctl32_passthru(f
, cmd
, arg
);
645 case CCISS_BIG_PASSTHRU32
:
646 return cciss_ioctl32_big_passthru(f
, cmd
, arg
);
653 static int cciss_ioctl32_passthru(struct file
*f
, unsigned cmd
,
656 IOCTL32_Command_struct __user
*arg32
=
657 (IOCTL32_Command_struct __user
*) arg
;
658 IOCTL_Command_struct arg64
;
659 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
665 copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
666 sizeof(arg64
.LUN_info
));
668 copy_from_user(&arg64
.Request
, &arg32
->Request
,
669 sizeof(arg64
.Request
));
671 copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
672 sizeof(arg64
.error_info
));
673 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
674 err
|= get_user(cp
, &arg32
->buf
);
675 arg64
.buf
= compat_ptr(cp
);
676 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
681 err
= do_ioctl(f
, CCISS_PASSTHRU
, (unsigned long)p
);
685 copy_in_user(&arg32
->error_info
, &p
->error_info
,
686 sizeof(arg32
->error_info
));
692 static int cciss_ioctl32_big_passthru(struct file
*file
, unsigned cmd
,
695 BIG_IOCTL32_Command_struct __user
*arg32
=
696 (BIG_IOCTL32_Command_struct __user
*) arg
;
697 BIG_IOCTL_Command_struct arg64
;
698 BIG_IOCTL_Command_struct __user
*p
=
699 compat_alloc_user_space(sizeof(arg64
));
705 copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
706 sizeof(arg64
.LUN_info
));
708 copy_from_user(&arg64
.Request
, &arg32
->Request
,
709 sizeof(arg64
.Request
));
711 copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
712 sizeof(arg64
.error_info
));
713 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
714 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
715 err
|= get_user(cp
, &arg32
->buf
);
716 arg64
.buf
= compat_ptr(cp
);
717 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
722 err
= do_ioctl(file
, CCISS_BIG_PASSTHRU
, (unsigned long)p
);
726 copy_in_user(&arg32
->error_info
, &p
->error_info
,
727 sizeof(arg32
->error_info
));
734 static int cciss_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
736 drive_info_struct
*drv
= get_drv(bdev
->bd_disk
);
741 geo
->heads
= drv
->heads
;
742 geo
->sectors
= drv
->sectors
;
743 geo
->cylinders
= drv
->cylinders
;
750 static int cciss_ioctl(struct inode
*inode
, struct file
*filep
,
751 unsigned int cmd
, unsigned long arg
)
753 struct block_device
*bdev
= inode
->i_bdev
;
754 struct gendisk
*disk
= bdev
->bd_disk
;
755 ctlr_info_t
*host
= get_host(disk
);
756 drive_info_struct
*drv
= get_drv(disk
);
757 int ctlr
= host
->ctlr
;
758 void __user
*argp
= (void __user
*)arg
;
761 printk(KERN_DEBUG
"cciss_ioctl: Called with cmd=%x %lx\n", cmd
, arg
);
762 #endif /* CCISS_DEBUG */
765 case CCISS_GETPCIINFO
:
767 cciss_pci_info_struct pciinfo
;
771 pciinfo
.domain
= pci_domain_nr(host
->pdev
->bus
);
772 pciinfo
.bus
= host
->pdev
->bus
->number
;
773 pciinfo
.dev_fn
= host
->pdev
->devfn
;
774 pciinfo
.board_id
= host
->board_id
;
776 (argp
, &pciinfo
, sizeof(cciss_pci_info_struct
)))
780 case CCISS_GETINTINFO
:
782 cciss_coalint_struct intinfo
;
786 readl(&host
->cfgtable
->HostWrite
.CoalIntDelay
);
788 readl(&host
->cfgtable
->HostWrite
.CoalIntCount
);
790 (argp
, &intinfo
, sizeof(cciss_coalint_struct
)))
794 case CCISS_SETINTINFO
:
796 cciss_coalint_struct intinfo
;
802 if (!capable(CAP_SYS_ADMIN
))
805 (&intinfo
, argp
, sizeof(cciss_coalint_struct
)))
807 if ((intinfo
.delay
== 0) && (intinfo
.count
== 0))
809 // printk("cciss_ioctl: delay and count cannot be 0\n");
812 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
813 /* Update the field, and then ring the doorbell */
814 writel(intinfo
.delay
,
815 &(host
->cfgtable
->HostWrite
.CoalIntDelay
));
816 writel(intinfo
.count
,
817 &(host
->cfgtable
->HostWrite
.CoalIntCount
));
818 writel(CFGTBL_ChangeReq
, host
->vaddr
+ SA5_DOORBELL
);
820 for (i
= 0; i
< MAX_IOCTL_CONFIG_WAIT
; i
++) {
821 if (!(readl(host
->vaddr
+ SA5_DOORBELL
)
824 /* delay and try again */
827 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
828 if (i
>= MAX_IOCTL_CONFIG_WAIT
)
832 case CCISS_GETNODENAME
:
834 NodeName_type NodeName
;
839 for (i
= 0; i
< 16; i
++)
841 readb(&host
->cfgtable
->ServerName
[i
]);
842 if (copy_to_user(argp
, NodeName
, sizeof(NodeName_type
)))
846 case CCISS_SETNODENAME
:
848 NodeName_type NodeName
;
854 if (!capable(CAP_SYS_ADMIN
))
858 (NodeName
, argp
, sizeof(NodeName_type
)))
861 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
863 /* Update the field, and then ring the doorbell */
864 for (i
= 0; i
< 16; i
++)
866 &host
->cfgtable
->ServerName
[i
]);
868 writel(CFGTBL_ChangeReq
, host
->vaddr
+ SA5_DOORBELL
);
870 for (i
= 0; i
< MAX_IOCTL_CONFIG_WAIT
; i
++) {
871 if (!(readl(host
->vaddr
+ SA5_DOORBELL
)
874 /* delay and try again */
877 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
878 if (i
>= MAX_IOCTL_CONFIG_WAIT
)
883 case CCISS_GETHEARTBEAT
:
885 Heartbeat_type heartbeat
;
889 heartbeat
= readl(&host
->cfgtable
->HeartBeat
);
891 (argp
, &heartbeat
, sizeof(Heartbeat_type
)))
895 case CCISS_GETBUSTYPES
:
897 BusTypes_type BusTypes
;
901 BusTypes
= readl(&host
->cfgtable
->BusTypes
);
903 (argp
, &BusTypes
, sizeof(BusTypes_type
)))
907 case CCISS_GETFIRMVER
:
909 FirmwareVer_type firmware
;
913 memcpy(firmware
, host
->firm_ver
, 4);
916 (argp
, firmware
, sizeof(FirmwareVer_type
)))
920 case CCISS_GETDRIVVER
:
922 DriverVer_type DriverVer
= DRIVER_VERSION
;
928 (argp
, &DriverVer
, sizeof(DriverVer_type
)))
933 case CCISS_REVALIDVOLS
:
934 return rebuild_lun_table(host
, NULL
);
936 case CCISS_GETLUNINFO
:{
937 LogvolInfo_struct luninfo
;
939 luninfo
.LunID
= drv
->LunID
;
940 luninfo
.num_opens
= drv
->usage_count
;
941 luninfo
.num_parts
= 0;
942 if (copy_to_user(argp
, &luninfo
,
943 sizeof(LogvolInfo_struct
)))
947 case CCISS_DEREGDISK
:
948 return rebuild_lun_table(host
, disk
);
951 return rebuild_lun_table(host
, NULL
);
955 IOCTL_Command_struct iocommand
;
956 CommandList_struct
*c
;
960 DECLARE_COMPLETION_ONSTACK(wait
);
965 if (!capable(CAP_SYS_RAWIO
))
969 (&iocommand
, argp
, sizeof(IOCTL_Command_struct
)))
971 if ((iocommand
.buf_size
< 1) &&
972 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
975 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
976 /* Check kmalloc limits */
977 if (iocommand
.buf_size
> 128000)
980 if (iocommand
.buf_size
> 0) {
981 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
985 if (iocommand
.Request
.Type
.Direction
== XFER_WRITE
) {
986 /* Copy the data into the buffer we created */
988 (buff
, iocommand
.buf
, iocommand
.buf_size
)) {
993 memset(buff
, 0, iocommand
.buf_size
);
995 if ((c
= cmd_alloc(host
, 0)) == NULL
) {
999 // Fill in the command type
1000 c
->cmd_type
= CMD_IOCTL_PEND
;
1001 // Fill in Command Header
1002 c
->Header
.ReplyQueue
= 0; // unused in simple mode
1003 if (iocommand
.buf_size
> 0) // buffer to fill
1005 c
->Header
.SGList
= 1;
1006 c
->Header
.SGTotal
= 1;
1007 } else // no buffers to fill
1009 c
->Header
.SGList
= 0;
1010 c
->Header
.SGTotal
= 0;
1012 c
->Header
.LUN
= iocommand
.LUN_info
;
1013 c
->Header
.Tag
.lower
= c
->busaddr
; // use the kernel address the cmd block for tag
1015 // Fill in Request block
1016 c
->Request
= iocommand
.Request
;
1018 // Fill in the scatter gather information
1019 if (iocommand
.buf_size
> 0) {
1020 temp64
.val
= pci_map_single(host
->pdev
, buff
,
1022 PCI_DMA_BIDIRECTIONAL
);
1023 c
->SG
[0].Addr
.lower
= temp64
.val32
.lower
;
1024 c
->SG
[0].Addr
.upper
= temp64
.val32
.upper
;
1025 c
->SG
[0].Len
= iocommand
.buf_size
;
1026 c
->SG
[0].Ext
= 0; // we are not chaining
1030 /* Put the request on the tail of the request queue */
1031 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
1032 addQ(&host
->reqQ
, c
);
1035 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
1037 wait_for_completion(&wait
);
1039 /* unlock the buffers from DMA */
1040 temp64
.val32
.lower
= c
->SG
[0].Addr
.lower
;
1041 temp64
.val32
.upper
= c
->SG
[0].Addr
.upper
;
1042 pci_unmap_single(host
->pdev
, (dma_addr_t
) temp64
.val
,
1044 PCI_DMA_BIDIRECTIONAL
);
1046 /* Copy the error information out */
1047 iocommand
.error_info
= *(c
->err_info
);
1049 (argp
, &iocommand
, sizeof(IOCTL_Command_struct
))) {
1051 cmd_free(host
, c
, 0);
1055 if (iocommand
.Request
.Type
.Direction
== XFER_READ
) {
1056 /* Copy the data out of the buffer we created */
1058 (iocommand
.buf
, buff
, iocommand
.buf_size
)) {
1060 cmd_free(host
, c
, 0);
1065 cmd_free(host
, c
, 0);
1068 case CCISS_BIG_PASSTHRU
:{
1069 BIG_IOCTL_Command_struct
*ioc
;
1070 CommandList_struct
*c
;
1071 unsigned char **buff
= NULL
;
1072 int *buff_size
= NULL
;
1074 unsigned long flags
;
1078 DECLARE_COMPLETION_ONSTACK(wait
);
1081 BYTE __user
*data_ptr
;
1085 if (!capable(CAP_SYS_RAWIO
))
1087 ioc
= (BIG_IOCTL_Command_struct
*)
1088 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
1093 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
1097 if ((ioc
->buf_size
< 1) &&
1098 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
1102 /* Check kmalloc limits using all SGs */
1103 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
1107 if (ioc
->buf_size
> ioc
->malloc_size
* MAXSGENTRIES
) {
1112 kzalloc(MAXSGENTRIES
* sizeof(char *), GFP_KERNEL
);
1117 buff_size
= kmalloc(MAXSGENTRIES
* sizeof(int),
1123 left
= ioc
->buf_size
;
1124 data_ptr
= ioc
->buf
;
1127 ioc
->malloc_size
) ? ioc
->
1129 buff_size
[sg_used
] = sz
;
1130 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
1131 if (buff
[sg_used
] == NULL
) {
1135 if (ioc
->Request
.Type
.Direction
== XFER_WRITE
) {
1137 (buff
[sg_used
], data_ptr
, sz
)) {
1142 memset(buff
[sg_used
], 0, sz
);
1148 if ((c
= cmd_alloc(host
, 0)) == NULL
) {
1152 c
->cmd_type
= CMD_IOCTL_PEND
;
1153 c
->Header
.ReplyQueue
= 0;
1155 if (ioc
->buf_size
> 0) {
1156 c
->Header
.SGList
= sg_used
;
1157 c
->Header
.SGTotal
= sg_used
;
1159 c
->Header
.SGList
= 0;
1160 c
->Header
.SGTotal
= 0;
1162 c
->Header
.LUN
= ioc
->LUN_info
;
1163 c
->Header
.Tag
.lower
= c
->busaddr
;
1165 c
->Request
= ioc
->Request
;
1166 if (ioc
->buf_size
> 0) {
1168 for (i
= 0; i
< sg_used
; i
++) {
1170 pci_map_single(host
->pdev
, buff
[i
],
1172 PCI_DMA_BIDIRECTIONAL
);
1173 c
->SG
[i
].Addr
.lower
=
1175 c
->SG
[i
].Addr
.upper
=
1177 c
->SG
[i
].Len
= buff_size
[i
];
1178 c
->SG
[i
].Ext
= 0; /* we are not chaining */
1182 /* Put the request on the tail of the request queue */
1183 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
1184 addQ(&host
->reqQ
, c
);
1187 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
1188 wait_for_completion(&wait
);
1189 /* unlock the buffers from DMA */
1190 for (i
= 0; i
< sg_used
; i
++) {
1191 temp64
.val32
.lower
= c
->SG
[i
].Addr
.lower
;
1192 temp64
.val32
.upper
= c
->SG
[i
].Addr
.upper
;
1193 pci_unmap_single(host
->pdev
,
1194 (dma_addr_t
) temp64
.val
, buff_size
[i
],
1195 PCI_DMA_BIDIRECTIONAL
);
1197 /* Copy the error information out */
1198 ioc
->error_info
= *(c
->err_info
);
1199 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
1200 cmd_free(host
, c
, 0);
1204 if (ioc
->Request
.Type
.Direction
== XFER_READ
) {
1205 /* Copy the data out of the buffer we created */
1206 BYTE __user
*ptr
= ioc
->buf
;
1207 for (i
= 0; i
< sg_used
; i
++) {
1209 (ptr
, buff
[i
], buff_size
[i
])) {
1210 cmd_free(host
, c
, 0);
1214 ptr
+= buff_size
[i
];
1217 cmd_free(host
, c
, 0);
1221 for (i
= 0; i
< sg_used
; i
++)
1230 /* scsi_cmd_ioctl handles these, below, though some are not */
1231 /* very meaningful for cciss. SG_IO is the main one people want. */
1233 case SG_GET_VERSION_NUM
:
1234 case SG_SET_TIMEOUT
:
1235 case SG_GET_TIMEOUT
:
1236 case SG_GET_RESERVED_SIZE
:
1237 case SG_SET_RESERVED_SIZE
:
1238 case SG_EMULATED_HOST
:
1240 case SCSI_IOCTL_SEND_COMMAND
:
1241 return scsi_cmd_ioctl(filep
, disk
->queue
, disk
, cmd
, argp
);
1243 /* scsi_cmd_ioctl would normally handle these, below, but */
1244 /* they aren't a good fit for cciss, as CD-ROMs are */
1245 /* not supported, and we don't have any bus/target/lun */
1246 /* which we present to the kernel. */
1248 case CDROM_SEND_PACKET
:
1249 case CDROMCLOSETRAY
:
1251 case SCSI_IOCTL_GET_IDLUN
:
1252 case SCSI_IOCTL_GET_BUS_NUMBER
:
1258 static void cciss_check_queues(ctlr_info_t
*h
)
1260 int start_queue
= h
->next_to_run
;
1263 /* check to see if we have maxed out the number of commands that can
1264 * be placed on the queue. If so then exit. We do this check here
1265 * in case the interrupt we serviced was from an ioctl and did not
1266 * free any new commands.
1268 if ((find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
)) == h
->nr_cmds
)
1271 /* We have room on the queue for more commands. Now we need to queue
1272 * them up. We will also keep track of the next queue to run so
1273 * that every queue gets a chance to be started first.
1275 for (i
= 0; i
< h
->highest_lun
+ 1; i
++) {
1276 int curr_queue
= (start_queue
+ i
) % (h
->highest_lun
+ 1);
1277 /* make sure the disk has been added and the drive is real
1278 * because this can be called from the middle of init_one.
1280 if (!(h
->drv
[curr_queue
].queue
) || !(h
->drv
[curr_queue
].heads
))
1282 blk_start_queue(h
->gendisk
[curr_queue
]->queue
);
1284 /* check to see if we have maxed out the number of commands
1285 * that can be placed on the queue.
1287 if ((find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
)) == h
->nr_cmds
) {
1288 if (curr_queue
== start_queue
) {
1290 (start_queue
+ 1) % (h
->highest_lun
+ 1);
1293 h
->next_to_run
= curr_queue
;
1297 curr_queue
= (curr_queue
+ 1) % (h
->highest_lun
+ 1);
1302 static void cciss_softirq_done(struct request
*rq
)
1304 CommandList_struct
*cmd
= rq
->completion_data
;
1305 ctlr_info_t
*h
= hba
[cmd
->ctlr
];
1306 unsigned long flags
;
1310 if (cmd
->Request
.Type
.Direction
== XFER_READ
)
1311 ddir
= PCI_DMA_FROMDEVICE
;
1313 ddir
= PCI_DMA_TODEVICE
;
1315 /* command did not need to be retried */
1316 /* unmap the DMA mapping for all the scatter gather elements */
1317 for (i
= 0; i
< cmd
->Header
.SGList
; i
++) {
1318 temp64
.val32
.lower
= cmd
->SG
[i
].Addr
.lower
;
1319 temp64
.val32
.upper
= cmd
->SG
[i
].Addr
.upper
;
1320 pci_unmap_page(h
->pdev
, temp64
.val
, cmd
->SG
[i
].Len
, ddir
);
1324 printk("Done with %p\n", rq
);
1325 #endif /* CCISS_DEBUG */
1327 if (blk_end_request(rq
, (rq
->errors
== 0) ? 0 : -EIO
, blk_rq_bytes(rq
)))
1330 spin_lock_irqsave(&h
->lock
, flags
);
1331 cmd_free(h
, cmd
, 1);
1332 cciss_check_queues(h
);
1333 spin_unlock_irqrestore(&h
->lock
, flags
);
1336 /* This function will check the usage_count of the drive to be updated/added.
1337 * If the usage_count is zero then the drive information will be updated and
1338 * the disk will be re-registered with the kernel. If not then it will be
1339 * left alone for the next reboot. The exception to this is disk 0 which
1340 * will always be left registered with the kernel since it is also the
1341 * controller node. Any changes to disk 0 will show up on the next
1344 static void cciss_update_drive_info(int ctlr
, int drv_index
)
1346 ctlr_info_t
*h
= hba
[ctlr
];
1347 struct gendisk
*disk
;
1348 InquiryData_struct
*inq_buff
= NULL
;
1349 unsigned int block_size
;
1350 sector_t total_size
;
1351 unsigned long flags
= 0;
1354 /* if the disk already exists then deregister it before proceeding */
1355 if (h
->drv
[drv_index
].raid_level
!= -1) {
1356 spin_lock_irqsave(CCISS_LOCK(h
->ctlr
), flags
);
1357 h
->drv
[drv_index
].busy_configuring
= 1;
1358 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1360 /* deregister_disk sets h->drv[drv_index].queue = NULL */
1361 /* which keeps the interrupt handler from starting */
1363 ret
= deregister_disk(h
->gendisk
[drv_index
],
1364 &h
->drv
[drv_index
], 0);
1365 h
->drv
[drv_index
].busy_configuring
= 0;
1368 /* If the disk is in use return */
1372 /* Get information about the disk and modify the driver structure */
1373 inq_buff
= kmalloc(sizeof(InquiryData_struct
), GFP_KERNEL
);
1374 if (inq_buff
== NULL
)
1377 /* testing to see if 16-byte CDBs are already being used */
1378 if (h
->cciss_read
== CCISS_READ_16
) {
1379 cciss_read_capacity_16(h
->ctlr
, drv_index
, 1,
1380 &total_size
, &block_size
);
1384 cciss_read_capacity(ctlr
, drv_index
, 1,
1385 &total_size
, &block_size
);
1387 /* if read_capacity returns all F's this volume is >2TB in size */
1388 /* so we switch to 16-byte CDB's for all read/write ops */
1389 if (total_size
== 0xFFFFFFFFULL
) {
1390 cciss_read_capacity_16(ctlr
, drv_index
, 1,
1391 &total_size
, &block_size
);
1392 h
->cciss_read
= CCISS_READ_16
;
1393 h
->cciss_write
= CCISS_WRITE_16
;
1395 h
->cciss_read
= CCISS_READ_10
;
1396 h
->cciss_write
= CCISS_WRITE_10
;
1399 cciss_geometry_inquiry(ctlr
, drv_index
, 1, total_size
, block_size
,
1400 inq_buff
, &h
->drv
[drv_index
]);
1403 disk
= h
->gendisk
[drv_index
];
1404 set_capacity(disk
, h
->drv
[drv_index
].nr_blocks
);
1406 /* if it's the controller it's already added */
1408 disk
->queue
= blk_init_queue(do_cciss_request
, &h
->lock
);
1409 sprintf(disk
->disk_name
, "cciss/c%dd%d", ctlr
, drv_index
);
1410 disk
->major
= h
->major
;
1411 disk
->first_minor
= drv_index
<< NWD_SHIFT
;
1412 disk
->fops
= &cciss_fops
;
1413 disk
->private_data
= &h
->drv
[drv_index
];
1415 /* Set up queue information */
1416 blk_queue_bounce_limit(disk
->queue
, hba
[ctlr
]->pdev
->dma_mask
);
1418 /* This is a hardware imposed limit. */
1419 blk_queue_max_hw_segments(disk
->queue
, MAXSGENTRIES
);
1421 /* This is a limit in the driver and could be eliminated. */
1422 blk_queue_max_phys_segments(disk
->queue
, MAXSGENTRIES
);
1424 blk_queue_max_sectors(disk
->queue
, h
->cciss_max_sectors
);
1426 blk_queue_softirq_done(disk
->queue
, cciss_softirq_done
);
1428 disk
->queue
->queuedata
= hba
[ctlr
];
1430 blk_queue_hardsect_size(disk
->queue
,
1431 hba
[ctlr
]->drv
[drv_index
].block_size
);
1433 /* Make sure all queue data is written out before */
1434 /* setting h->drv[drv_index].queue, as setting this */
1435 /* allows the interrupt handler to start the queue */
1437 h
->drv
[drv_index
].queue
= disk
->queue
;
1445 printk(KERN_ERR
"cciss: out of memory\n");
1449 /* This function will find the first index of the controllers drive array
1450 * that has a -1 for the raid_level and will return that index. This is
1451 * where new drives will be added. If the index to be returned is greater
1452 * than the highest_lun index for the controller then highest_lun is set
1453 * to this new index. If there are no available indexes then -1 is returned.
1455 static int cciss_find_free_drive_index(int ctlr
)
1459 for (i
= 0; i
< CISS_MAX_LUN
; i
++) {
1460 if (hba
[ctlr
]->drv
[i
].raid_level
== -1) {
1461 if (i
> hba
[ctlr
]->highest_lun
)
1462 hba
[ctlr
]->highest_lun
= i
;
1469 /* This function will add and remove logical drives from the Logical
1470 * drive array of the controller and maintain persistency of ordering
1471 * so that mount points are preserved until the next reboot. This allows
1472 * for the removal of logical drives in the middle of the drive array
1473 * without a re-ordering of those drives.
1475 * h = The controller to perform the operations on
1476 * del_disk = The disk to remove if specified. If the value given
1477 * is NULL then no disk is removed.
1479 static int rebuild_lun_table(ctlr_info_t
*h
, struct gendisk
*del_disk
)
1483 ReportLunData_struct
*ld_buff
= NULL
;
1484 drive_info_struct
*drv
= NULL
;
1491 unsigned long flags
;
1493 /* Set busy_configuring flag for this operation */
1494 spin_lock_irqsave(CCISS_LOCK(h
->ctlr
), flags
);
1495 if (h
->busy_configuring
) {
1496 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1499 h
->busy_configuring
= 1;
1501 /* if del_disk is NULL then we are being called to add a new disk
1502 * and update the logical drive table. If it is not NULL then
1503 * we will check if the disk is in use or not.
1505 if (del_disk
!= NULL
) {
1506 drv
= get_drv(del_disk
);
1507 drv
->busy_configuring
= 1;
1508 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1509 return_code
= deregister_disk(del_disk
, drv
, 1);
1510 drv
->busy_configuring
= 0;
1511 h
->busy_configuring
= 0;
1514 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1515 if (!capable(CAP_SYS_RAWIO
))
1518 ld_buff
= kzalloc(sizeof(ReportLunData_struct
), GFP_KERNEL
);
1519 if (ld_buff
== NULL
)
1522 return_code
= sendcmd_withirq(CISS_REPORT_LOG
, ctlr
, ld_buff
,
1523 sizeof(ReportLunData_struct
), 0,
1526 if (return_code
== IO_OK
) {
1528 be32_to_cpu(*(__be32
*) ld_buff
->LUNListLength
);
1529 } else { /* reading number of logical volumes failed */
1530 printk(KERN_WARNING
"cciss: report logical volume"
1531 " command failed\n");
1536 num_luns
= listlength
/ 8; /* 8 bytes per entry */
1537 if (num_luns
> CISS_MAX_LUN
) {
1538 num_luns
= CISS_MAX_LUN
;
1539 printk(KERN_WARNING
"cciss: more luns configured"
1540 " on controller than can be handled by"
1544 /* Compare controller drive array to drivers drive array.
1545 * Check for updates in the drive information and any new drives
1546 * on the controller.
1548 for (i
= 0; i
< num_luns
; i
++) {
1554 (unsigned int)(ld_buff
->LUN
[i
][3])) << 24;
1556 (unsigned int)(ld_buff
->LUN
[i
][2])) << 16;
1558 (unsigned int)(ld_buff
->LUN
[i
][1])) << 8;
1559 lunid
|= 0xff & (unsigned int)(ld_buff
->LUN
[i
][0]);
1561 /* Find if the LUN is already in the drive array
1562 * of the controller. If so then update its info
1563 * if not is use. If it does not exist then find
1564 * the first free index and add it.
1566 for (j
= 0; j
<= h
->highest_lun
; j
++) {
1567 if (h
->drv
[j
].LunID
== lunid
) {
1573 /* check if the drive was found already in the array */
1575 drv_index
= cciss_find_free_drive_index(ctlr
);
1576 if (drv_index
== -1)
1579 /*Check if the gendisk needs to be allocated */
1580 if (!h
->gendisk
[drv_index
]){
1581 h
->gendisk
[drv_index
] = alloc_disk(1 << NWD_SHIFT
);
1582 if (!h
->gendisk
[drv_index
]){
1583 printk(KERN_ERR
"cciss: could not allocate new disk %d\n", drv_index
);
1588 h
->drv
[drv_index
].LunID
= lunid
;
1589 cciss_update_drive_info(ctlr
, drv_index
);
1595 h
->busy_configuring
= 0;
1596 /* We return -1 here to tell the ACU that we have registered/updated
1597 * all of the drives that we can and to keep it from calling us
1602 printk(KERN_ERR
"cciss: out of memory\n");
1606 /* This function will deregister the disk and it's queue from the
1607 * kernel. It must be called with the controller lock held and the
1608 * drv structures busy_configuring flag set. It's parameters are:
1610 * disk = This is the disk to be deregistered
1611 * drv = This is the drive_info_struct associated with the disk to be
1612 * deregistered. It contains information about the disk used
1614 * clear_all = This flag determines whether or not the disk information
1615 * is going to be completely cleared out and the highest_lun
1616 * reset. Sometimes we want to clear out information about
1617 * the disk in preparation for re-adding it. In this case
1618 * the highest_lun should be left unchanged and the LunID
1619 * should not be cleared.
1621 static int deregister_disk(struct gendisk
*disk
, drive_info_struct
*drv
,
1625 ctlr_info_t
*h
= get_host(disk
);
1627 if (!capable(CAP_SYS_RAWIO
))
1630 /* make sure logical volume is NOT is use */
1631 if (clear_all
|| (h
->gendisk
[0] == disk
)) {
1632 if (drv
->usage_count
> 1)
1634 } else if (drv
->usage_count
> 0)
1637 /* invalidate the devices and deregister the disk. If it is disk
1638 * zero do not deregister it but just zero out it's values. This
1639 * allows us to delete disk zero but keep the controller registered.
1641 if (h
->gendisk
[0] != disk
) {
1642 struct request_queue
*q
= disk
->queue
;
1643 if (disk
->flags
& GENHD_FL_UP
)
1646 blk_cleanup_queue(q
);
1647 /* Set drv->queue to NULL so that we do not try
1648 * to call blk_start_queue on this queue in the
1653 /* If clear_all is set then we are deleting the logical
1654 * drive, not just refreshing its info. For drives
1655 * other than disk 0 we will call put_disk. We do not
1656 * do this for disk 0 as we need it to be able to
1657 * configure the controller.
1660 /* This isn't pretty, but we need to find the
1661 * disk in our array and NULL our the pointer.
1662 * This is so that we will call alloc_disk if
1663 * this index is used again later.
1665 for (i
=0; i
< CISS_MAX_LUN
; i
++){
1666 if(h
->gendisk
[i
] == disk
){
1667 h
->gendisk
[i
] = NULL
;
1674 set_capacity(disk
, 0);
1678 /* zero out the disk size info */
1680 drv
->block_size
= 0;
1684 drv
->raid_level
= -1; /* This can be used as a flag variable to
1685 * indicate that this element of the drive
1690 /* check to see if it was the last disk */
1691 if (drv
== h
->drv
+ h
->highest_lun
) {
1692 /* if so, find the new hightest lun */
1693 int i
, newhighest
= -1;
1694 for (i
= 0; i
< h
->highest_lun
; i
++) {
1695 /* if the disk has size > 0, it is available */
1696 if (h
->drv
[i
].heads
)
1699 h
->highest_lun
= newhighest
;
1707 static int fill_cmd(CommandList_struct
*c
, __u8 cmd
, int ctlr
, void *buff
, size_t size
, unsigned int use_unit_num
, /* 0: address the controller,
1708 1: address logical volume log_unit,
1709 2: periph device address is scsi3addr */
1710 unsigned int log_unit
, __u8 page_code
,
1711 unsigned char *scsi3addr
, int cmd_type
)
1713 ctlr_info_t
*h
= hba
[ctlr
];
1714 u64bit buff_dma_handle
;
1717 c
->cmd_type
= CMD_IOCTL_PEND
;
1718 c
->Header
.ReplyQueue
= 0;
1720 c
->Header
.SGList
= 1;
1721 c
->Header
.SGTotal
= 1;
1723 c
->Header
.SGList
= 0;
1724 c
->Header
.SGTotal
= 0;
1726 c
->Header
.Tag
.lower
= c
->busaddr
;
1728 c
->Request
.Type
.Type
= cmd_type
;
1729 if (cmd_type
== TYPE_CMD
) {
1732 /* If the logical unit number is 0 then, this is going
1733 to controller so It's a physical command
1734 mode = 0 target = 0. So we have nothing to write.
1735 otherwise, if use_unit_num == 1,
1736 mode = 1(volume set addressing) target = LUNID
1737 otherwise, if use_unit_num == 2,
1738 mode = 0(periph dev addr) target = scsi3addr */
1739 if (use_unit_num
== 1) {
1740 c
->Header
.LUN
.LogDev
.VolId
=
1741 h
->drv
[log_unit
].LunID
;
1742 c
->Header
.LUN
.LogDev
.Mode
= 1;
1743 } else if (use_unit_num
== 2) {
1744 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
,
1746 c
->Header
.LUN
.LogDev
.Mode
= 0;
1748 /* are we trying to read a vital product page */
1749 if (page_code
!= 0) {
1750 c
->Request
.CDB
[1] = 0x01;
1751 c
->Request
.CDB
[2] = page_code
;
1753 c
->Request
.CDBLen
= 6;
1754 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1755 c
->Request
.Type
.Direction
= XFER_READ
;
1756 c
->Request
.Timeout
= 0;
1757 c
->Request
.CDB
[0] = CISS_INQUIRY
;
1758 c
->Request
.CDB
[4] = size
& 0xFF;
1760 case CISS_REPORT_LOG
:
1761 case CISS_REPORT_PHYS
:
1762 /* Talking to controller so It's a physical command
1763 mode = 00 target = 0. Nothing to write.
1765 c
->Request
.CDBLen
= 12;
1766 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1767 c
->Request
.Type
.Direction
= XFER_READ
;
1768 c
->Request
.Timeout
= 0;
1769 c
->Request
.CDB
[0] = cmd
;
1770 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; //MSB
1771 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
1772 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
1773 c
->Request
.CDB
[9] = size
& 0xFF;
1776 case CCISS_READ_CAPACITY
:
1777 c
->Header
.LUN
.LogDev
.VolId
= h
->drv
[log_unit
].LunID
;
1778 c
->Header
.LUN
.LogDev
.Mode
= 1;
1779 c
->Request
.CDBLen
= 10;
1780 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1781 c
->Request
.Type
.Direction
= XFER_READ
;
1782 c
->Request
.Timeout
= 0;
1783 c
->Request
.CDB
[0] = cmd
;
1785 case CCISS_READ_CAPACITY_16
:
1786 c
->Header
.LUN
.LogDev
.VolId
= h
->drv
[log_unit
].LunID
;
1787 c
->Header
.LUN
.LogDev
.Mode
= 1;
1788 c
->Request
.CDBLen
= 16;
1789 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1790 c
->Request
.Type
.Direction
= XFER_READ
;
1791 c
->Request
.Timeout
= 0;
1792 c
->Request
.CDB
[0] = cmd
;
1793 c
->Request
.CDB
[1] = 0x10;
1794 c
->Request
.CDB
[10] = (size
>> 24) & 0xFF;
1795 c
->Request
.CDB
[11] = (size
>> 16) & 0xFF;
1796 c
->Request
.CDB
[12] = (size
>> 8) & 0xFF;
1797 c
->Request
.CDB
[13] = size
& 0xFF;
1798 c
->Request
.Timeout
= 0;
1799 c
->Request
.CDB
[0] = cmd
;
1801 case CCISS_CACHE_FLUSH
:
1802 c
->Request
.CDBLen
= 12;
1803 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1804 c
->Request
.Type
.Direction
= XFER_WRITE
;
1805 c
->Request
.Timeout
= 0;
1806 c
->Request
.CDB
[0] = BMIC_WRITE
;
1807 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
1811 "cciss%d: Unknown Command 0x%c\n", ctlr
, cmd
);
1814 } else if (cmd_type
== TYPE_MSG
) {
1816 case 0: /* ABORT message */
1817 c
->Request
.CDBLen
= 12;
1818 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1819 c
->Request
.Type
.Direction
= XFER_WRITE
;
1820 c
->Request
.Timeout
= 0;
1821 c
->Request
.CDB
[0] = cmd
; /* abort */
1822 c
->Request
.CDB
[1] = 0; /* abort a command */
1823 /* buff contains the tag of the command to abort */
1824 memcpy(&c
->Request
.CDB
[4], buff
, 8);
1826 case 1: /* RESET message */
1827 c
->Request
.CDBLen
= 12;
1828 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1829 c
->Request
.Type
.Direction
= XFER_WRITE
;
1830 c
->Request
.Timeout
= 0;
1831 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
1832 c
->Request
.CDB
[0] = cmd
; /* reset */
1833 c
->Request
.CDB
[1] = 0x04; /* reset a LUN */
1835 case 3: /* No-Op message */
1836 c
->Request
.CDBLen
= 1;
1837 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1838 c
->Request
.Type
.Direction
= XFER_WRITE
;
1839 c
->Request
.Timeout
= 0;
1840 c
->Request
.CDB
[0] = cmd
;
1844 "cciss%d: unknown message type %d\n", ctlr
, cmd
);
1849 "cciss%d: unknown command type %d\n", ctlr
, cmd_type
);
1852 /* Fill in the scatter gather information */
1854 buff_dma_handle
.val
= (__u64
) pci_map_single(h
->pdev
,
1856 PCI_DMA_BIDIRECTIONAL
);
1857 c
->SG
[0].Addr
.lower
= buff_dma_handle
.val32
.lower
;
1858 c
->SG
[0].Addr
.upper
= buff_dma_handle
.val32
.upper
;
1859 c
->SG
[0].Len
= size
;
1860 c
->SG
[0].Ext
= 0; /* we are not chaining */
1865 static int sendcmd_withirq(__u8 cmd
,
1869 unsigned int use_unit_num
,
1870 unsigned int log_unit
, __u8 page_code
, int cmd_type
)
1872 ctlr_info_t
*h
= hba
[ctlr
];
1873 CommandList_struct
*c
;
1874 u64bit buff_dma_handle
;
1875 unsigned long flags
;
1877 DECLARE_COMPLETION_ONSTACK(wait
);
1879 if ((c
= cmd_alloc(h
, 0)) == NULL
)
1881 return_status
= fill_cmd(c
, cmd
, ctlr
, buff
, size
, use_unit_num
,
1882 log_unit
, page_code
, NULL
, cmd_type
);
1883 if (return_status
!= IO_OK
) {
1885 return return_status
;
1890 /* Put the request on the tail of the queue and send it */
1891 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
1895 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
1897 wait_for_completion(&wait
);
1899 if (c
->err_info
->CommandStatus
!= 0) { /* an error has occurred */
1900 switch (c
->err_info
->CommandStatus
) {
1901 case CMD_TARGET_STATUS
:
1902 printk(KERN_WARNING
"cciss: cmd %p has "
1903 " completed with errors\n", c
);
1904 if (c
->err_info
->ScsiStatus
) {
1905 printk(KERN_WARNING
"cciss: cmd %p "
1906 "has SCSI Status = %x\n",
1907 c
, c
->err_info
->ScsiStatus
);
1911 case CMD_DATA_UNDERRUN
:
1912 case CMD_DATA_OVERRUN
:
1913 /* expected for inquire and report lun commands */
1916 printk(KERN_WARNING
"cciss: Cmd %p is "
1917 "reported invalid\n", c
);
1918 return_status
= IO_ERROR
;
1920 case CMD_PROTOCOL_ERR
:
1921 printk(KERN_WARNING
"cciss: cmd %p has "
1922 "protocol error \n", c
);
1923 return_status
= IO_ERROR
;
1925 case CMD_HARDWARE_ERR
:
1926 printk(KERN_WARNING
"cciss: cmd %p had "
1927 " hardware error\n", c
);
1928 return_status
= IO_ERROR
;
1930 case CMD_CONNECTION_LOST
:
1931 printk(KERN_WARNING
"cciss: cmd %p had "
1932 "connection lost\n", c
);
1933 return_status
= IO_ERROR
;
1936 printk(KERN_WARNING
"cciss: cmd %p was "
1938 return_status
= IO_ERROR
;
1940 case CMD_ABORT_FAILED
:
1941 printk(KERN_WARNING
"cciss: cmd %p reports "
1942 "abort failed\n", c
);
1943 return_status
= IO_ERROR
;
1945 case CMD_UNSOLICITED_ABORT
:
1947 "cciss%d: unsolicited abort %p\n", ctlr
, c
);
1948 if (c
->retry_count
< MAX_CMD_RETRIES
) {
1950 "cciss%d: retrying %p\n", ctlr
, c
);
1952 /* erase the old error information */
1953 memset(c
->err_info
, 0,
1954 sizeof(ErrorInfo_struct
));
1955 return_status
= IO_OK
;
1956 INIT_COMPLETION(wait
);
1959 return_status
= IO_ERROR
;
1962 printk(KERN_WARNING
"cciss: cmd %p returned "
1963 "unknown status %x\n", c
,
1964 c
->err_info
->CommandStatus
);
1965 return_status
= IO_ERROR
;
1968 /* unlock the buffers from DMA */
1969 buff_dma_handle
.val32
.lower
= c
->SG
[0].Addr
.lower
;
1970 buff_dma_handle
.val32
.upper
= c
->SG
[0].Addr
.upper
;
1971 pci_unmap_single(h
->pdev
, (dma_addr_t
) buff_dma_handle
.val
,
1972 c
->SG
[0].Len
, PCI_DMA_BIDIRECTIONAL
);
1974 return return_status
;
1977 static void cciss_geometry_inquiry(int ctlr
, int logvol
,
1978 int withirq
, sector_t total_size
,
1979 unsigned int block_size
,
1980 InquiryData_struct
*inq_buff
,
1981 drive_info_struct
*drv
)
1986 memset(inq_buff
, 0, sizeof(InquiryData_struct
));
1988 return_code
= sendcmd_withirq(CISS_INQUIRY
, ctlr
,
1989 inq_buff
, sizeof(*inq_buff
), 1,
1990 logvol
, 0xC1, TYPE_CMD
);
1992 return_code
= sendcmd(CISS_INQUIRY
, ctlr
, inq_buff
,
1993 sizeof(*inq_buff
), 1, logvol
, 0xC1, NULL
,
1995 if (return_code
== IO_OK
) {
1996 if (inq_buff
->data_byte
[8] == 0xFF) {
1998 "cciss: reading geometry failed, volume "
1999 "does not support reading geometry\n");
2001 drv
->sectors
= 32; // Sectors per track
2002 drv
->cylinders
= total_size
+ 1;
2003 drv
->raid_level
= RAID_UNKNOWN
;
2005 drv
->heads
= inq_buff
->data_byte
[6];
2006 drv
->sectors
= inq_buff
->data_byte
[7];
2007 drv
->cylinders
= (inq_buff
->data_byte
[4] & 0xff) << 8;
2008 drv
->cylinders
+= inq_buff
->data_byte
[5];
2009 drv
->raid_level
= inq_buff
->data_byte
[8];
2011 drv
->block_size
= block_size
;
2012 drv
->nr_blocks
= total_size
+ 1;
2013 t
= drv
->heads
* drv
->sectors
;
2015 sector_t real_size
= total_size
+ 1;
2016 unsigned long rem
= sector_div(real_size
, t
);
2019 drv
->cylinders
= real_size
;
2021 } else { /* Get geometry failed */
2022 printk(KERN_WARNING
"cciss: reading geometry failed\n");
2024 printk(KERN_INFO
" heads=%d, sectors=%d, cylinders=%d\n\n",
2025 drv
->heads
, drv
->sectors
, drv
->cylinders
);
2029 cciss_read_capacity(int ctlr
, int logvol
, int withirq
, sector_t
*total_size
,
2030 unsigned int *block_size
)
2032 ReadCapdata_struct
*buf
;
2035 buf
= kzalloc(sizeof(ReadCapdata_struct
), GFP_KERNEL
);
2037 printk(KERN_WARNING
"cciss: out of memory\n");
2042 return_code
= sendcmd_withirq(CCISS_READ_CAPACITY
,
2043 ctlr
, buf
, sizeof(ReadCapdata_struct
),
2044 1, logvol
, 0, TYPE_CMD
);
2046 return_code
= sendcmd(CCISS_READ_CAPACITY
,
2047 ctlr
, buf
, sizeof(ReadCapdata_struct
),
2048 1, logvol
, 0, NULL
, TYPE_CMD
);
2049 if (return_code
== IO_OK
) {
2050 *total_size
= be32_to_cpu(*(__be32
*) buf
->total_size
);
2051 *block_size
= be32_to_cpu(*(__be32
*) buf
->block_size
);
2052 } else { /* read capacity command failed */
2053 printk(KERN_WARNING
"cciss: read capacity failed\n");
2055 *block_size
= BLOCK_SIZE
;
2057 if (*total_size
!= 0)
2058 printk(KERN_INFO
" blocks= %llu block_size= %d\n",
2059 (unsigned long long)*total_size
+1, *block_size
);
2064 cciss_read_capacity_16(int ctlr
, int logvol
, int withirq
, sector_t
*total_size
, unsigned int *block_size
)
2066 ReadCapdata_struct_16
*buf
;
2069 buf
= kzalloc(sizeof(ReadCapdata_struct_16
), GFP_KERNEL
);
2071 printk(KERN_WARNING
"cciss: out of memory\n");
2076 return_code
= sendcmd_withirq(CCISS_READ_CAPACITY_16
,
2077 ctlr
, buf
, sizeof(ReadCapdata_struct_16
),
2078 1, logvol
, 0, TYPE_CMD
);
2081 return_code
= sendcmd(CCISS_READ_CAPACITY_16
,
2082 ctlr
, buf
, sizeof(ReadCapdata_struct_16
),
2083 1, logvol
, 0, NULL
, TYPE_CMD
);
2085 if (return_code
== IO_OK
) {
2086 *total_size
= be64_to_cpu(*(__be64
*) buf
->total_size
);
2087 *block_size
= be32_to_cpu(*(__be32
*) buf
->block_size
);
2088 } else { /* read capacity command failed */
2089 printk(KERN_WARNING
"cciss: read capacity failed\n");
2091 *block_size
= BLOCK_SIZE
;
2093 printk(KERN_INFO
" blocks= %llu block_size= %d\n",
2094 (unsigned long long)*total_size
+1, *block_size
);
2098 static int cciss_revalidate(struct gendisk
*disk
)
2100 ctlr_info_t
*h
= get_host(disk
);
2101 drive_info_struct
*drv
= get_drv(disk
);
2104 unsigned int block_size
;
2105 sector_t total_size
;
2106 InquiryData_struct
*inq_buff
= NULL
;
2108 for (logvol
= 0; logvol
< CISS_MAX_LUN
; logvol
++) {
2109 if (h
->drv
[logvol
].LunID
== drv
->LunID
) {
2118 inq_buff
= kmalloc(sizeof(InquiryData_struct
), GFP_KERNEL
);
2119 if (inq_buff
== NULL
) {
2120 printk(KERN_WARNING
"cciss: out of memory\n");
2123 if (h
->cciss_read
== CCISS_READ_10
) {
2124 cciss_read_capacity(h
->ctlr
, logvol
, 1,
2125 &total_size
, &block_size
);
2127 cciss_read_capacity_16(h
->ctlr
, logvol
, 1,
2128 &total_size
, &block_size
);
2130 cciss_geometry_inquiry(h
->ctlr
, logvol
, 1, total_size
, block_size
,
2133 blk_queue_hardsect_size(drv
->queue
, drv
->block_size
);
2134 set_capacity(disk
, drv
->nr_blocks
);
2141 * Wait polling for a command to complete.
2142 * The memory mapped FIFO is polled for the completion.
2143 * Used only at init time, interrupts from the HBA are disabled.
2145 static unsigned long pollcomplete(int ctlr
)
2150 /* Wait (up to 20 seconds) for a command to complete */
2152 for (i
= 20 * HZ
; i
> 0; i
--) {
2153 done
= hba
[ctlr
]->access
.command_completed(hba
[ctlr
]);
2154 if (done
== FIFO_EMPTY
)
2155 schedule_timeout_uninterruptible(1);
2159 /* Invalid address to tell caller we ran out of time */
2163 static int add_sendcmd_reject(__u8 cmd
, int ctlr
, unsigned long complete
)
2165 /* We get in here if sendcmd() is polling for completions
2166 and gets some command back that it wasn't expecting --
2167 something other than that which it just sent down.
2168 Ordinarily, that shouldn't happen, but it can happen when
2169 the scsi tape stuff gets into error handling mode, and
2170 starts using sendcmd() to try to abort commands and
2171 reset tape drives. In that case, sendcmd may pick up
2172 completions of commands that were sent to logical drives
2173 through the block i/o system, or cciss ioctls completing, etc.
2174 In that case, we need to save those completions for later
2175 processing by the interrupt handler.
2178 #ifdef CONFIG_CISS_SCSI_TAPE
2179 struct sendcmd_reject_list
*srl
= &hba
[ctlr
]->scsi_rejects
;
2181 /* If it's not the scsi tape stuff doing error handling, (abort */
2182 /* or reset) then we don't expect anything weird. */
2183 if (cmd
!= CCISS_RESET_MSG
&& cmd
!= CCISS_ABORT_MSG
) {
2185 printk(KERN_WARNING
"cciss cciss%d: SendCmd "
2186 "Invalid command list address returned! (%lx)\n",
2188 /* not much we can do. */
2189 #ifdef CONFIG_CISS_SCSI_TAPE
2193 /* We've sent down an abort or reset, but something else
2195 if (srl
->ncompletions
>= (hba
[ctlr
]->nr_cmds
+ 2)) {
2196 /* Uh oh. No room to save it for later... */
2197 printk(KERN_WARNING
"cciss%d: Sendcmd: Invalid command addr, "
2198 "reject list overflow, command lost!\n", ctlr
);
2201 /* Save it for later */
2202 srl
->complete
[srl
->ncompletions
] = complete
;
2203 srl
->ncompletions
++;
2209 * Send a command to the controller, and wait for it to complete.
2210 * Only used at init time.
2212 static int sendcmd(__u8 cmd
, int ctlr
, void *buff
, size_t size
, unsigned int use_unit_num
, /* 0: address the controller,
2213 1: address logical volume log_unit,
2214 2: periph device address is scsi3addr */
2215 unsigned int log_unit
,
2216 __u8 page_code
, unsigned char *scsi3addr
, int cmd_type
)
2218 CommandList_struct
*c
;
2220 unsigned long complete
;
2221 ctlr_info_t
*info_p
= hba
[ctlr
];
2222 u64bit buff_dma_handle
;
2223 int status
, done
= 0;
2225 if ((c
= cmd_alloc(info_p
, 1)) == NULL
) {
2226 printk(KERN_WARNING
"cciss: unable to get memory");
2229 status
= fill_cmd(c
, cmd
, ctlr
, buff
, size
, use_unit_num
,
2230 log_unit
, page_code
, scsi3addr
, cmd_type
);
2231 if (status
!= IO_OK
) {
2232 cmd_free(info_p
, c
, 1);
2240 printk(KERN_DEBUG
"cciss: turning intr off\n");
2241 #endif /* CCISS_DEBUG */
2242 info_p
->access
.set_intr_mask(info_p
, CCISS_INTR_OFF
);
2244 /* Make sure there is room in the command FIFO */
2245 /* Actually it should be completely empty at this time */
2246 /* unless we are in here doing error handling for the scsi */
2247 /* tape side of the driver. */
2248 for (i
= 200000; i
> 0; i
--) {
2249 /* if fifo isn't full go */
2250 if (!(info_p
->access
.fifo_full(info_p
))) {
2255 printk(KERN_WARNING
"cciss cciss%d: SendCmd FIFO full,"
2256 " waiting!\n", ctlr
);
2261 info_p
->access
.submit_command(info_p
, c
);
2264 complete
= pollcomplete(ctlr
);
2267 printk(KERN_DEBUG
"cciss: command completed\n");
2268 #endif /* CCISS_DEBUG */
2270 if (complete
== 1) {
2272 "cciss cciss%d: SendCmd Timeout out, "
2273 "No command list address returned!\n", ctlr
);
2279 /* This will need to change for direct lookup completions */
2280 if ((complete
& CISS_ERROR_BIT
)
2281 && (complete
& ~CISS_ERROR_BIT
) == c
->busaddr
) {
2282 /* if data overrun or underun on Report command
2285 if (((c
->Request
.CDB
[0] == CISS_REPORT_LOG
) ||
2286 (c
->Request
.CDB
[0] == CISS_REPORT_PHYS
) ||
2287 (c
->Request
.CDB
[0] == CISS_INQUIRY
)) &&
2288 ((c
->err_info
->CommandStatus
==
2289 CMD_DATA_OVERRUN
) ||
2290 (c
->err_info
->CommandStatus
== CMD_DATA_UNDERRUN
)
2292 complete
= c
->busaddr
;
2294 if (c
->err_info
->CommandStatus
==
2295 CMD_UNSOLICITED_ABORT
) {
2296 printk(KERN_WARNING
"cciss%d: "
2297 "unsolicited abort %p\n",
2299 if (c
->retry_count
< MAX_CMD_RETRIES
) {
2301 "cciss%d: retrying %p\n",
2304 /* erase the old error */
2306 memset(c
->err_info
, 0,
2308 (ErrorInfo_struct
));
2312 "cciss%d: retried %p too "
2313 "many times\n", ctlr
, c
);
2317 } else if (c
->err_info
->CommandStatus
==
2320 "cciss%d: command could not be aborted.\n",
2325 printk(KERN_WARNING
"ciss ciss%d: sendcmd"
2326 " Error %x \n", ctlr
,
2327 c
->err_info
->CommandStatus
);
2328 printk(KERN_WARNING
"ciss ciss%d: sendcmd"
2330 " size %x\n num %x value %x\n",
2332 c
->err_info
->MoreErrInfo
.Invalid_Cmd
.
2334 c
->err_info
->MoreErrInfo
.Invalid_Cmd
.
2336 c
->err_info
->MoreErrInfo
.Invalid_Cmd
.
2342 /* This will need changing for direct lookup completions */
2343 if (complete
!= c
->busaddr
) {
2344 if (add_sendcmd_reject(cmd
, ctlr
, complete
) != 0) {
2345 BUG(); /* we are pretty much hosed if we get here. */
2353 /* unlock the data buffer from DMA */
2354 buff_dma_handle
.val32
.lower
= c
->SG
[0].Addr
.lower
;
2355 buff_dma_handle
.val32
.upper
= c
->SG
[0].Addr
.upper
;
2356 pci_unmap_single(info_p
->pdev
, (dma_addr_t
) buff_dma_handle
.val
,
2357 c
->SG
[0].Len
, PCI_DMA_BIDIRECTIONAL
);
2358 #ifdef CONFIG_CISS_SCSI_TAPE
2359 /* if we saved some commands for later, process them now. */
2360 if (info_p
->scsi_rejects
.ncompletions
> 0)
2361 do_cciss_intr(0, info_p
);
2363 cmd_free(info_p
, c
, 1);
2368 * Map (physical) PCI mem into (virtual) kernel space
2370 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
2372 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
2373 ulong page_offs
= ((ulong
) base
) - page_base
;
2374 void __iomem
*page_remapped
= ioremap(page_base
, page_offs
+ size
);
2376 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
2380 * Takes jobs of the Q and sends them to the hardware, then puts it on
2381 * the Q to wait for completion.
2383 static void start_io(ctlr_info_t
*h
)
2385 CommandList_struct
*c
;
2387 while ((c
= h
->reqQ
) != NULL
) {
2388 /* can't do anything if fifo is full */
2389 if ((h
->access
.fifo_full(h
))) {
2390 printk(KERN_WARNING
"cciss: fifo full\n");
2394 /* Get the first entry from the Request Q */
2395 removeQ(&(h
->reqQ
), c
);
2398 /* Tell the controller execute command */
2399 h
->access
.submit_command(h
, c
);
2401 /* Put job onto the completed Q */
2402 addQ(&(h
->cmpQ
), c
);
2406 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2407 /* Zeros out the error record and then resends the command back */
2408 /* to the controller */
2409 static inline void resend_cciss_cmd(ctlr_info_t
*h
, CommandList_struct
*c
)
2411 /* erase the old error information */
2412 memset(c
->err_info
, 0, sizeof(ErrorInfo_struct
));
2414 /* add it to software queue and then send it to the controller */
2415 addQ(&(h
->reqQ
), c
);
2417 if (h
->Qdepth
> h
->maxQsinceinit
)
2418 h
->maxQsinceinit
= h
->Qdepth
;
2423 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte
,
2424 unsigned int msg_byte
, unsigned int host_byte
,
2425 unsigned int driver_byte
)
2427 /* inverse of macros in scsi.h */
2428 return (scsi_status_byte
& 0xff) |
2429 ((msg_byte
& 0xff) << 8) |
2430 ((host_byte
& 0xff) << 16) |
2431 ((driver_byte
& 0xff) << 24);
2434 static inline int evaluate_target_status(CommandList_struct
*cmd
)
2436 unsigned char sense_key
;
2437 unsigned char status_byte
, msg_byte
, host_byte
, driver_byte
;
2440 /* If we get in here, it means we got "target status", that is, scsi status */
2441 status_byte
= cmd
->err_info
->ScsiStatus
;
2442 driver_byte
= DRIVER_OK
;
2443 msg_byte
= cmd
->err_info
->CommandStatus
; /* correct? seems too device specific */
2445 if (blk_pc_request(cmd
->rq
))
2446 host_byte
= DID_PASSTHROUGH
;
2450 error_value
= make_status_bytes(status_byte
, msg_byte
,
2451 host_byte
, driver_byte
);
2453 if (cmd
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
) {
2454 if (!blk_pc_request(cmd
->rq
))
2455 printk(KERN_WARNING
"cciss: cmd %p "
2456 "has SCSI Status 0x%x\n",
2457 cmd
, cmd
->err_info
->ScsiStatus
);
2461 /* check the sense key */
2462 sense_key
= 0xf & cmd
->err_info
->SenseInfo
[2];
2463 /* no status or recovered error */
2464 if (((sense_key
== 0x0) || (sense_key
== 0x1)) && !blk_pc_request(cmd
->rq
))
2467 if (!blk_pc_request(cmd
->rq
)) { /* Not SG_IO or similar? */
2468 if (error_value
!= 0)
2469 printk(KERN_WARNING
"cciss: cmd %p has CHECK CONDITION"
2470 " sense key = 0x%x\n", cmd
, sense_key
);
2474 /* SG_IO or similar, copy sense data back */
2475 if (cmd
->rq
->sense
) {
2476 if (cmd
->rq
->sense_len
> cmd
->err_info
->SenseLen
)
2477 cmd
->rq
->sense_len
= cmd
->err_info
->SenseLen
;
2478 memcpy(cmd
->rq
->sense
, cmd
->err_info
->SenseInfo
,
2479 cmd
->rq
->sense_len
);
2481 cmd
->rq
->sense_len
= 0;
2486 /* checks the status of the job and calls complete buffers to mark all
2487 * buffers for the completed job. Note that this function does not need
2488 * to hold the hba/queue lock.
2490 static inline void complete_command(ctlr_info_t
*h
, CommandList_struct
*cmd
,
2494 struct request
*rq
= cmd
->rq
;
2499 rq
->errors
= make_status_bytes(0, 0, 0, DRIVER_TIMEOUT
);
2501 if (cmd
->err_info
->CommandStatus
== 0) /* no error has occurred */
2502 goto after_error_processing
;
2504 switch (cmd
->err_info
->CommandStatus
) {
2505 case CMD_TARGET_STATUS
:
2506 rq
->errors
= evaluate_target_status(cmd
);
2508 case CMD_DATA_UNDERRUN
:
2509 if (blk_fs_request(cmd
->rq
)) {
2510 printk(KERN_WARNING
"cciss: cmd %p has"
2511 " completed with data underrun "
2513 cmd
->rq
->data_len
= cmd
->err_info
->ResidualCnt
;
2516 case CMD_DATA_OVERRUN
:
2517 if (blk_fs_request(cmd
->rq
))
2518 printk(KERN_WARNING
"cciss: cmd %p has"
2519 " completed with data overrun "
2523 printk(KERN_WARNING
"cciss: cmd %p is "
2524 "reported invalid\n", cmd
);
2525 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2526 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2527 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ERROR
);
2529 case CMD_PROTOCOL_ERR
:
2530 printk(KERN_WARNING
"cciss: cmd %p has "
2531 "protocol error \n", cmd
);
2532 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2533 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2534 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ERROR
);
2536 case CMD_HARDWARE_ERR
:
2537 printk(KERN_WARNING
"cciss: cmd %p had "
2538 " hardware error\n", cmd
);
2539 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2540 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2541 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ERROR
);
2543 case CMD_CONNECTION_LOST
:
2544 printk(KERN_WARNING
"cciss: cmd %p had "
2545 "connection lost\n", cmd
);
2546 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2547 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2548 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ERROR
);
2551 printk(KERN_WARNING
"cciss: cmd %p was "
2553 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2554 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2555 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ABORT
);
2557 case CMD_ABORT_FAILED
:
2558 printk(KERN_WARNING
"cciss: cmd %p reports "
2559 "abort failed\n", cmd
);
2560 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2561 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2562 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ERROR
);
2564 case CMD_UNSOLICITED_ABORT
:
2565 printk(KERN_WARNING
"cciss%d: unsolicited "
2566 "abort %p\n", h
->ctlr
, cmd
);
2567 if (cmd
->retry_count
< MAX_CMD_RETRIES
) {
2570 "cciss%d: retrying %p\n", h
->ctlr
, cmd
);
2574 "cciss%d: %p retried too "
2575 "many times\n", h
->ctlr
, cmd
);
2576 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2577 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2578 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ABORT
);
2581 printk(KERN_WARNING
"cciss: cmd %p timedout\n", cmd
);
2582 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2583 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2584 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ERROR
);
2587 printk(KERN_WARNING
"cciss: cmd %p returned "
2588 "unknown status %x\n", cmd
,
2589 cmd
->err_info
->CommandStatus
);
2590 rq
->errors
= make_status_bytes(SAM_STAT_GOOD
,
2591 cmd
->err_info
->CommandStatus
, DRIVER_OK
,
2592 blk_pc_request(cmd
->rq
) ? DID_PASSTHROUGH
: DID_ERROR
);
2595 after_error_processing
:
2597 /* We need to return this command */
2599 resend_cciss_cmd(h
, cmd
);
2602 cmd
->rq
->completion_data
= cmd
;
2603 blk_complete_request(cmd
->rq
);
2607 * Get a request and submit it to the controller.
2609 static void do_cciss_request(struct request_queue
*q
)
2611 ctlr_info_t
*h
= q
->queuedata
;
2612 CommandList_struct
*c
;
2615 struct request
*creq
;
2617 struct scatterlist tmp_sg
[MAXSGENTRIES
];
2618 drive_info_struct
*drv
;
2621 /* We call start_io here in case there is a command waiting on the
2622 * queue that has not been sent.
2624 if (blk_queue_plugged(q
))
2628 creq
= elv_next_request(q
);
2632 BUG_ON(creq
->nr_phys_segments
> MAXSGENTRIES
);
2634 if ((c
= cmd_alloc(h
, 1)) == NULL
)
2637 blkdev_dequeue_request(creq
);
2639 spin_unlock_irq(q
->queue_lock
);
2641 c
->cmd_type
= CMD_RWREQ
;
2644 /* fill in the request */
2645 drv
= creq
->rq_disk
->private_data
;
2646 c
->Header
.ReplyQueue
= 0; // unused in simple mode
2647 /* got command from pool, so use the command block index instead */
2648 /* for direct lookups. */
2649 /* The first 2 bits are reserved for controller error reporting. */
2650 c
->Header
.Tag
.lower
= (c
->cmdindex
<< 3);
2651 c
->Header
.Tag
.lower
|= 0x04; /* flag for direct lookup. */
2652 c
->Header
.LUN
.LogDev
.VolId
= drv
->LunID
;
2653 c
->Header
.LUN
.LogDev
.Mode
= 1;
2654 c
->Request
.CDBLen
= 10; // 12 byte commands not in FW yet;
2655 c
->Request
.Type
.Type
= TYPE_CMD
; // It is a command.
2656 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2657 c
->Request
.Type
.Direction
=
2658 (rq_data_dir(creq
) == READ
) ? XFER_READ
: XFER_WRITE
;
2659 c
->Request
.Timeout
= 0; // Don't time out
2661 (rq_data_dir(creq
) == READ
) ? h
->cciss_read
: h
->cciss_write
;
2662 start_blk
= creq
->sector
;
2664 printk(KERN_DEBUG
"ciss: sector =%d nr_sectors=%d\n", (int)creq
->sector
,
2665 (int)creq
->nr_sectors
);
2666 #endif /* CCISS_DEBUG */
2668 sg_init_table(tmp_sg
, MAXSGENTRIES
);
2669 seg
= blk_rq_map_sg(q
, creq
, tmp_sg
);
2671 /* get the DMA records for the setup */
2672 if (c
->Request
.Type
.Direction
== XFER_READ
)
2673 dir
= PCI_DMA_FROMDEVICE
;
2675 dir
= PCI_DMA_TODEVICE
;
2677 for (i
= 0; i
< seg
; i
++) {
2678 c
->SG
[i
].Len
= tmp_sg
[i
].length
;
2679 temp64
.val
= (__u64
) pci_map_page(h
->pdev
, sg_page(&tmp_sg
[i
]),
2681 tmp_sg
[i
].length
, dir
);
2682 c
->SG
[i
].Addr
.lower
= temp64
.val32
.lower
;
2683 c
->SG
[i
].Addr
.upper
= temp64
.val32
.upper
;
2684 c
->SG
[i
].Ext
= 0; // we are not chaining
2686 /* track how many SG entries we are using */
2691 printk(KERN_DEBUG
"cciss: Submitting %d sectors in %d segments\n",
2692 creq
->nr_sectors
, seg
);
2693 #endif /* CCISS_DEBUG */
2695 c
->Header
.SGList
= c
->Header
.SGTotal
= seg
;
2696 if (likely(blk_fs_request(creq
))) {
2697 if(h
->cciss_read
== CCISS_READ_10
) {
2698 c
->Request
.CDB
[1] = 0;
2699 c
->Request
.CDB
[2] = (start_blk
>> 24) & 0xff; //MSB
2700 c
->Request
.CDB
[3] = (start_blk
>> 16) & 0xff;
2701 c
->Request
.CDB
[4] = (start_blk
>> 8) & 0xff;
2702 c
->Request
.CDB
[5] = start_blk
& 0xff;
2703 c
->Request
.CDB
[6] = 0; // (sect >> 24) & 0xff; MSB
2704 c
->Request
.CDB
[7] = (creq
->nr_sectors
>> 8) & 0xff;
2705 c
->Request
.CDB
[8] = creq
->nr_sectors
& 0xff;
2706 c
->Request
.CDB
[9] = c
->Request
.CDB
[11] = c
->Request
.CDB
[12] = 0;
2708 u32 upper32
= upper_32_bits(start_blk
);
2710 c
->Request
.CDBLen
= 16;
2711 c
->Request
.CDB
[1]= 0;
2712 c
->Request
.CDB
[2]= (upper32
>> 24) & 0xff; //MSB
2713 c
->Request
.CDB
[3]= (upper32
>> 16) & 0xff;
2714 c
->Request
.CDB
[4]= (upper32
>> 8) & 0xff;
2715 c
->Request
.CDB
[5]= upper32
& 0xff;
2716 c
->Request
.CDB
[6]= (start_blk
>> 24) & 0xff;
2717 c
->Request
.CDB
[7]= (start_blk
>> 16) & 0xff;
2718 c
->Request
.CDB
[8]= (start_blk
>> 8) & 0xff;
2719 c
->Request
.CDB
[9]= start_blk
& 0xff;
2720 c
->Request
.CDB
[10]= (creq
->nr_sectors
>> 24) & 0xff;
2721 c
->Request
.CDB
[11]= (creq
->nr_sectors
>> 16) & 0xff;
2722 c
->Request
.CDB
[12]= (creq
->nr_sectors
>> 8) & 0xff;
2723 c
->Request
.CDB
[13]= creq
->nr_sectors
& 0xff;
2724 c
->Request
.CDB
[14] = c
->Request
.CDB
[15] = 0;
2726 } else if (blk_pc_request(creq
)) {
2727 c
->Request
.CDBLen
= creq
->cmd_len
;
2728 memcpy(c
->Request
.CDB
, creq
->cmd
, BLK_MAX_CDB
);
2730 printk(KERN_WARNING
"cciss%d: bad request type %d\n", h
->ctlr
, creq
->cmd_type
);
2734 spin_lock_irq(q
->queue_lock
);
2736 addQ(&(h
->reqQ
), c
);
2738 if (h
->Qdepth
> h
->maxQsinceinit
)
2739 h
->maxQsinceinit
= h
->Qdepth
;
2745 /* We will already have the driver lock here so not need
2751 static inline unsigned long get_next_completion(ctlr_info_t
*h
)
2753 #ifdef CONFIG_CISS_SCSI_TAPE
2754 /* Any rejects from sendcmd() lying around? Process them first */
2755 if (h
->scsi_rejects
.ncompletions
== 0)
2756 return h
->access
.command_completed(h
);
2758 struct sendcmd_reject_list
*srl
;
2760 srl
= &h
->scsi_rejects
;
2761 n
= --srl
->ncompletions
;
2762 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2764 return srl
->complete
[n
];
2767 return h
->access
.command_completed(h
);
2771 static inline int interrupt_pending(ctlr_info_t
*h
)
2773 #ifdef CONFIG_CISS_SCSI_TAPE
2774 return (h
->access
.intr_pending(h
)
2775 || (h
->scsi_rejects
.ncompletions
> 0));
2777 return h
->access
.intr_pending(h
);
2781 static inline long interrupt_not_for_us(ctlr_info_t
*h
)
2783 #ifdef CONFIG_CISS_SCSI_TAPE
2784 return (((h
->access
.intr_pending(h
) == 0) ||
2785 (h
->interrupts_enabled
== 0))
2786 && (h
->scsi_rejects
.ncompletions
== 0));
2788 return (((h
->access
.intr_pending(h
) == 0) ||
2789 (h
->interrupts_enabled
== 0)));
2793 static irqreturn_t
do_cciss_intr(int irq
, void *dev_id
)
2795 ctlr_info_t
*h
= dev_id
;
2796 CommandList_struct
*c
;
2797 unsigned long flags
;
2800 if (interrupt_not_for_us(h
))
2803 * If there are completed commands in the completion queue,
2804 * we had better do something about it.
2806 spin_lock_irqsave(CCISS_LOCK(h
->ctlr
), flags
);
2807 while (interrupt_pending(h
)) {
2808 while ((a
= get_next_completion(h
)) != FIFO_EMPTY
) {
2812 if (a2
>= h
->nr_cmds
) {
2814 "cciss: controller cciss%d failed, stopping.\n",
2816 fail_all_cmds(h
->ctlr
);
2820 c
= h
->cmd_pool
+ a2
;
2825 if ((c
= h
->cmpQ
) == NULL
) {
2827 "cciss: Completion of %08x ignored\n",
2831 while (c
->busaddr
!= a
) {
2838 * If we've found the command, take it off the
2839 * completion Q and free it
2841 if (c
->busaddr
== a
) {
2842 removeQ(&h
->cmpQ
, c
);
2843 if (c
->cmd_type
== CMD_RWREQ
) {
2844 complete_command(h
, c
, 0);
2845 } else if (c
->cmd_type
== CMD_IOCTL_PEND
) {
2846 complete(c
->waiting
);
2848 # ifdef CONFIG_CISS_SCSI_TAPE
2849 else if (c
->cmd_type
== CMD_SCSI
)
2850 complete_scsi_command(c
, 0, a1
);
2857 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
2862 * We cannot read the structure directly, for portability we must use
2864 * This is for debug only.
2867 static void print_cfg_table(CfgTable_struct
*tb
)
2872 printk("Controller Configuration information\n");
2873 printk("------------------------------------\n");
2874 for (i
= 0; i
< 4; i
++)
2875 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
2876 temp_name
[4] = '\0';
2877 printk(" Signature = %s\n", temp_name
);
2878 printk(" Spec Number = %d\n", readl(&(tb
->SpecValence
)));
2879 printk(" Transport methods supported = 0x%x\n",
2880 readl(&(tb
->TransportSupport
)));
2881 printk(" Transport methods active = 0x%x\n",
2882 readl(&(tb
->TransportActive
)));
2883 printk(" Requested transport Method = 0x%x\n",
2884 readl(&(tb
->HostWrite
.TransportRequest
)));
2885 printk(" Coalesce Interrupt Delay = 0x%x\n",
2886 readl(&(tb
->HostWrite
.CoalIntDelay
)));
2887 printk(" Coalesce Interrupt Count = 0x%x\n",
2888 readl(&(tb
->HostWrite
.CoalIntCount
)));
2889 printk(" Max outstanding commands = 0x%d\n",
2890 readl(&(tb
->CmdsOutMax
)));
2891 printk(" Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
2892 for (i
= 0; i
< 16; i
++)
2893 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
2894 temp_name
[16] = '\0';
2895 printk(" Server Name = %s\n", temp_name
);
2896 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb
->HeartBeat
)));
2898 #endif /* CCISS_DEBUG */
2900 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
2902 int i
, offset
, mem_type
, bar_type
;
2903 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
2906 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
2907 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
2908 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
2911 mem_type
= pci_resource_flags(pdev
, i
) &
2912 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
2914 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
2915 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
2916 offset
+= 4; /* 32 bit */
2918 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
2921 default: /* reserved in PCI 2.2 */
2923 "Base address is invalid\n");
2928 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
2934 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2935 * controllers that are capable. If not, we use IO-APIC mode.
2938 static void __devinit
cciss_interrupt_mode(ctlr_info_t
*c
,
2939 struct pci_dev
*pdev
, __u32 board_id
)
2941 #ifdef CONFIG_PCI_MSI
2943 struct msix_entry cciss_msix_entries
[4] = { {0, 0}, {0, 1},
2947 /* Some boards advertise MSI but don't really support it */
2948 if ((board_id
== 0x40700E11) ||
2949 (board_id
== 0x40800E11) ||
2950 (board_id
== 0x40820E11) || (board_id
== 0x40830E11))
2951 goto default_int_mode
;
2953 if (pci_find_capability(pdev
, PCI_CAP_ID_MSIX
)) {
2954 err
= pci_enable_msix(pdev
, cciss_msix_entries
, 4);
2956 c
->intr
[0] = cciss_msix_entries
[0].vector
;
2957 c
->intr
[1] = cciss_msix_entries
[1].vector
;
2958 c
->intr
[2] = cciss_msix_entries
[2].vector
;
2959 c
->intr
[3] = cciss_msix_entries
[3].vector
;
2964 printk(KERN_WARNING
"cciss: only %d MSI-X vectors "
2965 "available\n", err
);
2966 goto default_int_mode
;
2968 printk(KERN_WARNING
"cciss: MSI-X init failed %d\n",
2970 goto default_int_mode
;
2973 if (pci_find_capability(pdev
, PCI_CAP_ID_MSI
)) {
2974 if (!pci_enable_msi(pdev
)) {
2977 printk(KERN_WARNING
"cciss: MSI init failed\n");
2981 #endif /* CONFIG_PCI_MSI */
2982 /* if we get here we're going to use the default interrupt mode */
2983 c
->intr
[SIMPLE_MODE_INT
] = pdev
->irq
;
2987 static int __devinit
cciss_pci_init(ctlr_info_t
*c
, struct pci_dev
*pdev
)
2989 ushort subsystem_vendor_id
, subsystem_device_id
, command
;
2990 __u32 board_id
, scratchpad
= 0;
2992 __u32 cfg_base_addr
;
2993 __u64 cfg_base_addr_index
;
2996 /* check to see if controller has been disabled */
2997 /* BEFORE trying to enable it */
2998 (void)pci_read_config_word(pdev
, PCI_COMMAND
, &command
);
2999 if (!(command
& 0x02)) {
3001 "cciss: controller appears to be disabled\n");
3005 err
= pci_enable_device(pdev
);
3007 printk(KERN_ERR
"cciss: Unable to Enable PCI device\n");
3011 err
= pci_request_regions(pdev
, "cciss");
3013 printk(KERN_ERR
"cciss: Cannot obtain PCI resources, "
3018 subsystem_vendor_id
= pdev
->subsystem_vendor
;
3019 subsystem_device_id
= pdev
->subsystem_device
;
3020 board_id
= (((__u32
) (subsystem_device_id
<< 16) & 0xffff0000) |
3021 subsystem_vendor_id
);
3024 printk("command = %x\n", command
);
3025 printk("irq = %x\n", pdev
->irq
);
3026 printk("board_id = %x\n", board_id
);
3027 #endif /* CCISS_DEBUG */
3029 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3030 * else we use the IO-APIC interrupt assigned to us by system ROM.
3032 cciss_interrupt_mode(c
, pdev
, board_id
);
3035 * Memory base addr is first addr , the second points to the config
3039 c
->paddr
= pci_resource_start(pdev
, 0); /* addressing mode bits already removed */
3041 printk("address 0 = %x\n", c
->paddr
);
3042 #endif /* CCISS_DEBUG */
3043 c
->vaddr
= remap_pci_mem(c
->paddr
, 0x250);
3045 /* Wait for the board to become ready. (PCI hotplug needs this.)
3046 * We poll for up to 120 secs, once per 100ms. */
3047 for (i
= 0; i
< 1200; i
++) {
3048 scratchpad
= readl(c
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
3049 if (scratchpad
== CCISS_FIRMWARE_READY
)
3051 set_current_state(TASK_INTERRUPTIBLE
);
3052 schedule_timeout(HZ
/ 10); /* wait 100ms */
3054 if (scratchpad
!= CCISS_FIRMWARE_READY
) {
3055 printk(KERN_WARNING
"cciss: Board not ready. Timed out.\n");
3057 goto err_out_free_res
;
3060 /* get the address index number */
3061 cfg_base_addr
= readl(c
->vaddr
+ SA5_CTCFG_OFFSET
);
3062 cfg_base_addr
&= (__u32
) 0x0000ffff;
3064 printk("cfg base address = %x\n", cfg_base_addr
);
3065 #endif /* CCISS_DEBUG */
3066 cfg_base_addr_index
= find_PCI_BAR_index(pdev
, cfg_base_addr
);
3068 printk("cfg base address index = %x\n", cfg_base_addr_index
);
3069 #endif /* CCISS_DEBUG */
3070 if (cfg_base_addr_index
== -1) {
3071 printk(KERN_WARNING
"cciss: Cannot find cfg_base_addr_index\n");
3073 goto err_out_free_res
;
3076 cfg_offset
= readl(c
->vaddr
+ SA5_CTMEM_OFFSET
);
3078 printk("cfg offset = %x\n", cfg_offset
);
3079 #endif /* CCISS_DEBUG */
3080 c
->cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
3081 cfg_base_addr_index
) +
3082 cfg_offset
, sizeof(CfgTable_struct
));
3083 c
->board_id
= board_id
;
3086 print_cfg_table(c
->cfgtable
);
3087 #endif /* CCISS_DEBUG */
3089 for (i
= 0; i
< ARRAY_SIZE(products
); i
++) {
3090 if (board_id
== products
[i
].board_id
) {
3091 c
->product_name
= products
[i
].product_name
;
3092 c
->access
= *(products
[i
].access
);
3093 c
->nr_cmds
= products
[i
].nr_cmds
;
3097 if ((readb(&c
->cfgtable
->Signature
[0]) != 'C') ||
3098 (readb(&c
->cfgtable
->Signature
[1]) != 'I') ||
3099 (readb(&c
->cfgtable
->Signature
[2]) != 'S') ||
3100 (readb(&c
->cfgtable
->Signature
[3]) != 'S')) {
3101 printk("Does not appear to be a valid CISS config table\n");
3103 goto err_out_free_res
;
3105 /* We didn't find the controller in our list. We know the
3106 * signature is valid. If it's an HP device let's try to
3107 * bind to the device and fire it up. Otherwise we bail.
3109 if (i
== ARRAY_SIZE(products
)) {
3110 if (subsystem_vendor_id
== PCI_VENDOR_ID_HP
) {
3111 c
->product_name
= products
[i
-1].product_name
;
3112 c
->access
= *(products
[i
-1].access
);
3113 c
->nr_cmds
= products
[i
-1].nr_cmds
;
3114 printk(KERN_WARNING
"cciss: This is an unknown "
3115 "Smart Array controller.\n"
3116 "cciss: Please update to the latest driver "
3117 "available from www.hp.com.\n");
3119 printk(KERN_WARNING
"cciss: Sorry, I don't know how"
3120 " to access the Smart Array controller %08lx\n"
3121 , (unsigned long)board_id
);
3123 goto err_out_free_res
;
3128 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3130 prefetch
= readl(&(c
->cfgtable
->SCSI_Prefetch
));
3132 writel(prefetch
, &(c
->cfgtable
->SCSI_Prefetch
));
3136 /* Disabling DMA prefetch and refetch for the P600.
3137 * An ASIC bug may result in accesses to invalid memory addresses.
3138 * We've disabled prefetch for some time now. Testing with XEN
3139 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3141 if(board_id
== 0x3225103C) {
3144 dma_prefetch
= readl(c
->vaddr
+ I2O_DMA1_CFG
);
3145 dma_prefetch
|= 0x8000;
3146 writel(dma_prefetch
, c
->vaddr
+ I2O_DMA1_CFG
);
3147 pci_read_config_dword(pdev
, PCI_COMMAND_PARITY
, &dma_refetch
);
3149 pci_write_config_dword(pdev
, PCI_COMMAND_PARITY
, dma_refetch
);
3153 printk("Trying to put board into Simple mode\n");
3154 #endif /* CCISS_DEBUG */
3155 c
->max_commands
= readl(&(c
->cfgtable
->CmdsOutMax
));
3156 /* Update the field, and then ring the doorbell */
3157 writel(CFGTBL_Trans_Simple
, &(c
->cfgtable
->HostWrite
.TransportRequest
));
3158 writel(CFGTBL_ChangeReq
, c
->vaddr
+ SA5_DOORBELL
);
3160 /* under certain very rare conditions, this can take awhile.
3161 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3162 * as we enter this code.) */
3163 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
3164 if (!(readl(c
->vaddr
+ SA5_DOORBELL
) & CFGTBL_ChangeReq
))
3166 /* delay and try again */
3167 set_current_state(TASK_INTERRUPTIBLE
);
3168 schedule_timeout(10);
3172 printk(KERN_DEBUG
"I counter got to %d %x\n", i
,
3173 readl(c
->vaddr
+ SA5_DOORBELL
));
3174 #endif /* CCISS_DEBUG */
3176 print_cfg_table(c
->cfgtable
);
3177 #endif /* CCISS_DEBUG */
3179 if (!(readl(&(c
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
)) {
3180 printk(KERN_WARNING
"cciss: unable to get board into"
3183 goto err_out_free_res
;
3189 * Deliberately omit pci_disable_device(): it does something nasty to
3190 * Smart Array controllers that pci_enable_device does not undo
3192 pci_release_regions(pdev
);
3197 * Gets information about the local volumes attached to the controller.
3199 static void cciss_getgeometry(int cntl_num
)
3201 ReportLunData_struct
*ld_buff
;
3202 InquiryData_struct
*inq_buff
;
3207 unsigned block_size
;
3208 sector_t total_size
;
3210 ld_buff
= kzalloc(sizeof(ReportLunData_struct
), GFP_KERNEL
);
3211 if (ld_buff
== NULL
) {
3212 printk(KERN_ERR
"cciss: out of memory\n");
3215 inq_buff
= kmalloc(sizeof(InquiryData_struct
), GFP_KERNEL
);
3216 if (inq_buff
== NULL
) {
3217 printk(KERN_ERR
"cciss: out of memory\n");
3221 /* Get the firmware version */
3222 return_code
= sendcmd(CISS_INQUIRY
, cntl_num
, inq_buff
,
3223 sizeof(InquiryData_struct
), 0, 0, 0, NULL
,
3225 if (return_code
== IO_OK
) {
3226 hba
[cntl_num
]->firm_ver
[0] = inq_buff
->data_byte
[32];
3227 hba
[cntl_num
]->firm_ver
[1] = inq_buff
->data_byte
[33];
3228 hba
[cntl_num
]->firm_ver
[2] = inq_buff
->data_byte
[34];
3229 hba
[cntl_num
]->firm_ver
[3] = inq_buff
->data_byte
[35];
3230 } else { /* send command failed */
3232 printk(KERN_WARNING
"cciss: unable to determine firmware"
3233 " version of controller\n");
3235 /* Get the number of logical volumes */
3236 return_code
= sendcmd(CISS_REPORT_LOG
, cntl_num
, ld_buff
,
3237 sizeof(ReportLunData_struct
), 0, 0, 0, NULL
,
3240 if (return_code
== IO_OK
) {
3242 printk("LUN Data\n--------------------------\n");
3243 #endif /* CCISS_DEBUG */
3246 (0xff & (unsigned int)(ld_buff
->LUNListLength
[0])) << 24;
3248 (0xff & (unsigned int)(ld_buff
->LUNListLength
[1])) << 16;
3250 (0xff & (unsigned int)(ld_buff
->LUNListLength
[2])) << 8;
3251 listlength
|= 0xff & (unsigned int)(ld_buff
->LUNListLength
[3]);
3252 } else { /* reading number of logical volumes failed */
3254 printk(KERN_WARNING
"cciss: report logical volume"
3255 " command failed\n");
3258 hba
[cntl_num
]->num_luns
= listlength
/ 8; // 8 bytes pre entry
3259 if (hba
[cntl_num
]->num_luns
> CISS_MAX_LUN
) {
3261 "ciss: only %d number of logical volumes supported\n",
3263 hba
[cntl_num
]->num_luns
= CISS_MAX_LUN
;
3266 printk(KERN_DEBUG
"Length = %x %x %x %x = %d\n",
3267 ld_buff
->LUNListLength
[0], ld_buff
->LUNListLength
[1],
3268 ld_buff
->LUNListLength
[2], ld_buff
->LUNListLength
[3],
3269 hba
[cntl_num
]->num_luns
);
3270 #endif /* CCISS_DEBUG */
3272 hba
[cntl_num
]->highest_lun
= hba
[cntl_num
]->num_luns
- 1;
3273 for (i
= 0; i
< CISS_MAX_LUN
; i
++) {
3274 if (i
< hba
[cntl_num
]->num_luns
) {
3275 lunid
= (0xff & (unsigned int)(ld_buff
->LUN
[i
][3]))
3277 lunid
|= (0xff & (unsigned int)(ld_buff
->LUN
[i
][2]))
3279 lunid
|= (0xff & (unsigned int)(ld_buff
->LUN
[i
][1]))
3281 lunid
|= 0xff & (unsigned int)(ld_buff
->LUN
[i
][0]);
3283 hba
[cntl_num
]->drv
[i
].LunID
= lunid
;
3286 printk(KERN_DEBUG
"LUN[%d]: %x %x %x %x = %x\n", i
,
3287 ld_buff
->LUN
[i
][0], ld_buff
->LUN
[i
][1],
3288 ld_buff
->LUN
[i
][2], ld_buff
->LUN
[i
][3],
3289 hba
[cntl_num
]->drv
[i
].LunID
);
3290 #endif /* CCISS_DEBUG */
3292 /* testing to see if 16-byte CDBs are already being used */
3293 if(hba
[cntl_num
]->cciss_read
== CCISS_READ_16
) {
3294 cciss_read_capacity_16(cntl_num
, i
, 0,
3295 &total_size
, &block_size
);
3298 cciss_read_capacity(cntl_num
, i
, 0, &total_size
, &block_size
);
3300 /* If read_capacity returns all F's the logical is >2TB */
3301 /* so we switch to 16-byte CDBs for all read/write ops */
3302 if(total_size
== 0xFFFFFFFFULL
) {
3303 cciss_read_capacity_16(cntl_num
, i
, 0,
3304 &total_size
, &block_size
);
3305 hba
[cntl_num
]->cciss_read
= CCISS_READ_16
;
3306 hba
[cntl_num
]->cciss_write
= CCISS_WRITE_16
;
3308 hba
[cntl_num
]->cciss_read
= CCISS_READ_10
;
3309 hba
[cntl_num
]->cciss_write
= CCISS_WRITE_10
;
3312 cciss_geometry_inquiry(cntl_num
, i
, 0, total_size
,
3313 block_size
, inq_buff
,
3314 &hba
[cntl_num
]->drv
[i
]);
3316 /* initialize raid_level to indicate a free space */
3317 hba
[cntl_num
]->drv
[i
].raid_level
= -1;
3324 /* Function to find the first free pointer into our hba[] array */
3325 /* Returns -1 if no free entries are left. */
3326 static int alloc_cciss_hba(void)
3330 for (i
= 0; i
< MAX_CTLR
; i
++) {
3334 p
= kzalloc(sizeof(ctlr_info_t
), GFP_KERNEL
);
3337 p
->gendisk
[0] = alloc_disk(1 << NWD_SHIFT
);
3338 if (!p
->gendisk
[0]) {
3346 printk(KERN_WARNING
"cciss: This driver supports a maximum"
3347 " of %d controllers.\n", MAX_CTLR
);
3350 printk(KERN_ERR
"cciss: out of memory.\n");
3354 static void free_hba(int i
)
3356 ctlr_info_t
*p
= hba
[i
];
3360 for (n
= 0; n
< CISS_MAX_LUN
; n
++)
3361 put_disk(p
->gendisk
[n
]);
3366 * This is it. Find all the controllers and register them. I really hate
3367 * stealing all these major device numbers.
3368 * returns the number of block devices registered.
3370 static int __devinit
cciss_init_one(struct pci_dev
*pdev
,
3371 const struct pci_device_id
*ent
)
3378 i
= alloc_cciss_hba();
3382 hba
[i
]->busy_initializing
= 1;
3384 if (cciss_pci_init(hba
[i
], pdev
) != 0)
3387 sprintf(hba
[i
]->devname
, "cciss%d", i
);
3389 hba
[i
]->pdev
= pdev
;
3391 /* configure PCI DMA stuff */
3392 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
))
3394 else if (!pci_set_dma_mask(pdev
, DMA_32BIT_MASK
))
3397 printk(KERN_ERR
"cciss: no suitable DMA available\n");
3402 * register with the major number, or get a dynamic major number
3403 * by passing 0 as argument. This is done for greater than
3404 * 8 controller support.
3406 if (i
< MAX_CTLR_ORIG
)
3407 hba
[i
]->major
= COMPAQ_CISS_MAJOR
+ i
;
3408 rc
= register_blkdev(hba
[i
]->major
, hba
[i
]->devname
);
3409 if (rc
== -EBUSY
|| rc
== -EINVAL
) {
3411 "cciss: Unable to get major number %d for %s "
3412 "on hba %d\n", hba
[i
]->major
, hba
[i
]->devname
, i
);
3415 if (i
>= MAX_CTLR_ORIG
)
3419 /* make sure the board interrupts are off */
3420 hba
[i
]->access
.set_intr_mask(hba
[i
], CCISS_INTR_OFF
);
3421 if (request_irq(hba
[i
]->intr
[SIMPLE_MODE_INT
], do_cciss_intr
,
3422 IRQF_DISABLED
| IRQF_SHARED
, hba
[i
]->devname
, hba
[i
])) {
3423 printk(KERN_ERR
"cciss: Unable to get irq %d for %s\n",
3424 hba
[i
]->intr
[SIMPLE_MODE_INT
], hba
[i
]->devname
);
3428 printk(KERN_INFO
"%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3429 hba
[i
]->devname
, pdev
->device
, pci_name(pdev
),
3430 hba
[i
]->intr
[SIMPLE_MODE_INT
], dac
? "" : " not");
3432 hba
[i
]->cmd_pool_bits
=
3433 kmalloc(((hba
[i
]->nr_cmds
+ BITS_PER_LONG
-
3434 1) / BITS_PER_LONG
) * sizeof(unsigned long), GFP_KERNEL
);
3435 hba
[i
]->cmd_pool
= (CommandList_struct
*)
3436 pci_alloc_consistent(hba
[i
]->pdev
,
3437 hba
[i
]->nr_cmds
* sizeof(CommandList_struct
),
3438 &(hba
[i
]->cmd_pool_dhandle
));
3439 hba
[i
]->errinfo_pool
= (ErrorInfo_struct
*)
3440 pci_alloc_consistent(hba
[i
]->pdev
,
3441 hba
[i
]->nr_cmds
* sizeof(ErrorInfo_struct
),
3442 &(hba
[i
]->errinfo_pool_dhandle
));
3443 if ((hba
[i
]->cmd_pool_bits
== NULL
)
3444 || (hba
[i
]->cmd_pool
== NULL
)
3445 || (hba
[i
]->errinfo_pool
== NULL
)) {
3446 printk(KERN_ERR
"cciss: out of memory");
3449 #ifdef CONFIG_CISS_SCSI_TAPE
3450 hba
[i
]->scsi_rejects
.complete
=
3451 kmalloc(sizeof(hba
[i
]->scsi_rejects
.complete
[0]) *
3452 (hba
[i
]->nr_cmds
+ 5), GFP_KERNEL
);
3453 if (hba
[i
]->scsi_rejects
.complete
== NULL
) {
3454 printk(KERN_ERR
"cciss: out of memory");
3458 spin_lock_init(&hba
[i
]->lock
);
3460 /* Initialize the pdev driver private data.
3461 have it point to hba[i]. */
3462 pci_set_drvdata(pdev
, hba
[i
]);
3463 /* command and error info recs zeroed out before
3465 memset(hba
[i
]->cmd_pool_bits
, 0,
3466 ((hba
[i
]->nr_cmds
+ BITS_PER_LONG
-
3467 1) / BITS_PER_LONG
) * sizeof(unsigned long));
3470 printk(KERN_DEBUG
"Scanning for drives on controller cciss%d\n", i
);
3471 #endif /* CCISS_DEBUG */
3473 cciss_getgeometry(i
);
3475 cciss_scsi_setup(i
);
3477 /* Turn the interrupts on so we can service requests */
3478 hba
[i
]->access
.set_intr_mask(hba
[i
], CCISS_INTR_ON
);
3482 hba
[i
]->cciss_max_sectors
= 2048;
3484 hba
[i
]->busy_initializing
= 0;
3487 drive_info_struct
*drv
= &(hba
[i
]->drv
[j
]);
3488 struct gendisk
*disk
= hba
[i
]->gendisk
[j
];
3489 struct request_queue
*q
;
3491 /* Check if the disk was allocated already */
3493 hba
[i
]->gendisk
[j
] = alloc_disk(1 << NWD_SHIFT
);
3494 disk
= hba
[i
]->gendisk
[j
];
3497 /* Check that the disk was able to be allocated */
3499 printk(KERN_ERR
"cciss: unable to allocate memory for disk %d\n", j
);
3503 q
= blk_init_queue(do_cciss_request
, &hba
[i
]->lock
);
3506 "cciss: unable to allocate queue for disk %d\n",
3512 blk_queue_bounce_limit(q
, hba
[i
]->pdev
->dma_mask
);
3514 /* This is a hardware imposed limit. */
3515 blk_queue_max_hw_segments(q
, MAXSGENTRIES
);
3517 /* This is a limit in the driver and could be eliminated. */
3518 blk_queue_max_phys_segments(q
, MAXSGENTRIES
);
3520 blk_queue_max_sectors(q
, hba
[i
]->cciss_max_sectors
);
3522 blk_queue_softirq_done(q
, cciss_softirq_done
);
3524 q
->queuedata
= hba
[i
];
3525 sprintf(disk
->disk_name
, "cciss/c%dd%d", i
, j
);
3526 disk
->major
= hba
[i
]->major
;
3527 disk
->first_minor
= j
<< NWD_SHIFT
;
3528 disk
->fops
= &cciss_fops
;
3530 disk
->private_data
= drv
;
3531 disk
->driverfs_dev
= &pdev
->dev
;
3532 /* we must register the controller even if no disks exist */
3533 /* this is for the online array utilities */
3534 if (!drv
->heads
&& j
)
3536 blk_queue_hardsect_size(q
, drv
->block_size
);
3537 set_capacity(disk
, drv
->nr_blocks
);
3539 } while (j
<= hba
[i
]->highest_lun
);
3541 /* Make sure all queue data is written out before */
3542 /* interrupt handler, triggered by add_disk, */
3543 /* is allowed to start them. */
3546 for (j
= 0; j
<= hba
[i
]->highest_lun
; j
++)
3547 add_disk(hba
[i
]->gendisk
[j
]);
3552 #ifdef CONFIG_CISS_SCSI_TAPE
3553 kfree(hba
[i
]->scsi_rejects
.complete
);
3555 kfree(hba
[i
]->cmd_pool_bits
);
3556 if (hba
[i
]->cmd_pool
)
3557 pci_free_consistent(hba
[i
]->pdev
,
3558 hba
[i
]->nr_cmds
* sizeof(CommandList_struct
),
3559 hba
[i
]->cmd_pool
, hba
[i
]->cmd_pool_dhandle
);
3560 if (hba
[i
]->errinfo_pool
)
3561 pci_free_consistent(hba
[i
]->pdev
,
3562 hba
[i
]->nr_cmds
* sizeof(ErrorInfo_struct
),
3563 hba
[i
]->errinfo_pool
,
3564 hba
[i
]->errinfo_pool_dhandle
);
3565 free_irq(hba
[i
]->intr
[SIMPLE_MODE_INT
], hba
[i
]);
3567 unregister_blkdev(hba
[i
]->major
, hba
[i
]->devname
);
3569 hba
[i
]->busy_initializing
= 0;
3570 /* cleanup any queues that may have been initialized */
3571 for (j
=0; j
<= hba
[i
]->highest_lun
; j
++){
3572 drive_info_struct
*drv
= &(hba
[i
]->drv
[j
]);
3574 blk_cleanup_queue(drv
->queue
);
3577 * Deliberately omit pci_disable_device(): it does something nasty to
3578 * Smart Array controllers that pci_enable_device does not undo
3580 pci_release_regions(pdev
);
3581 pci_set_drvdata(pdev
, NULL
);
3586 static void cciss_shutdown(struct pci_dev
*pdev
)
3588 ctlr_info_t
*tmp_ptr
;
3593 tmp_ptr
= pci_get_drvdata(pdev
);
3594 if (tmp_ptr
== NULL
)
3600 /* Turn board interrupts off and send the flush cache command */
3601 /* sendcmd will turn off interrupt, and send the flush...
3602 * To write all data in the battery backed cache to disks */
3603 memset(flush_buf
, 0, 4);
3604 return_code
= sendcmd(CCISS_CACHE_FLUSH
, i
, flush_buf
, 4, 0, 0, 0, NULL
,
3606 if (return_code
== IO_OK
) {
3607 printk(KERN_INFO
"Completed flushing cache on controller %d\n", i
);
3609 printk(KERN_WARNING
"Error flushing cache on controller %d\n", i
);
3611 free_irq(hba
[i
]->intr
[2], hba
[i
]);
3614 static void __devexit
cciss_remove_one(struct pci_dev
*pdev
)
3616 ctlr_info_t
*tmp_ptr
;
3619 if (pci_get_drvdata(pdev
) == NULL
) {
3620 printk(KERN_ERR
"cciss: Unable to remove device \n");
3623 tmp_ptr
= pci_get_drvdata(pdev
);
3625 if (hba
[i
] == NULL
) {
3626 printk(KERN_ERR
"cciss: device appears to "
3627 "already be removed \n");
3631 remove_proc_entry(hba
[i
]->devname
, proc_cciss
);
3632 unregister_blkdev(hba
[i
]->major
, hba
[i
]->devname
);
3634 /* remove it from the disk list */
3635 for (j
= 0; j
< CISS_MAX_LUN
; j
++) {
3636 struct gendisk
*disk
= hba
[i
]->gendisk
[j
];
3638 struct request_queue
*q
= disk
->queue
;
3640 if (disk
->flags
& GENHD_FL_UP
)
3643 blk_cleanup_queue(q
);
3647 cciss_unregister_scsi(i
); /* unhook from SCSI subsystem */
3649 cciss_shutdown(pdev
);
3651 #ifdef CONFIG_PCI_MSI
3652 if (hba
[i
]->msix_vector
)
3653 pci_disable_msix(hba
[i
]->pdev
);
3654 else if (hba
[i
]->msi_vector
)
3655 pci_disable_msi(hba
[i
]->pdev
);
3656 #endif /* CONFIG_PCI_MSI */
3658 iounmap(hba
[i
]->vaddr
);
3660 pci_free_consistent(hba
[i
]->pdev
, hba
[i
]->nr_cmds
* sizeof(CommandList_struct
),
3661 hba
[i
]->cmd_pool
, hba
[i
]->cmd_pool_dhandle
);
3662 pci_free_consistent(hba
[i
]->pdev
, hba
[i
]->nr_cmds
* sizeof(ErrorInfo_struct
),
3663 hba
[i
]->errinfo_pool
, hba
[i
]->errinfo_pool_dhandle
);
3664 kfree(hba
[i
]->cmd_pool_bits
);
3665 #ifdef CONFIG_CISS_SCSI_TAPE
3666 kfree(hba
[i
]->scsi_rejects
.complete
);
3669 * Deliberately omit pci_disable_device(): it does something nasty to
3670 * Smart Array controllers that pci_enable_device does not undo
3672 pci_release_regions(pdev
);
3673 pci_set_drvdata(pdev
, NULL
);
3677 static struct pci_driver cciss_pci_driver
= {
3679 .probe
= cciss_init_one
,
3680 .remove
= __devexit_p(cciss_remove_one
),
3681 .id_table
= cciss_pci_device_id
, /* id_table */
3682 .shutdown
= cciss_shutdown
,
3686 * This is it. Register the PCI driver information for the cards we control
3687 * the OS will call our registered routines when it finds one of our cards.
3689 static int __init
cciss_init(void)
3691 printk(KERN_INFO DRIVER_NAME
"\n");
3693 /* Register for our PCI devices */
3694 return pci_register_driver(&cciss_pci_driver
);
3697 static void __exit
cciss_cleanup(void)
3701 pci_unregister_driver(&cciss_pci_driver
);
3702 /* double check that all controller entrys have been removed */
3703 for (i
= 0; i
< MAX_CTLR
; i
++) {
3704 if (hba
[i
] != NULL
) {
3705 printk(KERN_WARNING
"cciss: had to remove"
3706 " controller %d\n", i
);
3707 cciss_remove_one(hba
[i
]->pdev
);
3710 remove_proc_entry("driver/cciss", NULL
);
3713 static void fail_all_cmds(unsigned long ctlr
)
3715 /* If we get here, the board is apparently dead. */
3716 ctlr_info_t
*h
= hba
[ctlr
];
3717 CommandList_struct
*c
;
3718 unsigned long flags
;
3720 printk(KERN_WARNING
"cciss%d: controller not responding.\n", h
->ctlr
);
3721 h
->alive
= 0; /* the controller apparently died... */
3723 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
3725 pci_disable_device(h
->pdev
); /* Make sure it is really dead. */
3727 /* move everything off the request queue onto the completed queue */
3728 while ((c
= h
->reqQ
) != NULL
) {
3729 removeQ(&(h
->reqQ
), c
);
3731 addQ(&(h
->cmpQ
), c
);
3734 /* Now, fail everything on the completed queue with a HW error */
3735 while ((c
= h
->cmpQ
) != NULL
) {
3736 removeQ(&h
->cmpQ
, c
);
3737 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
3738 if (c
->cmd_type
== CMD_RWREQ
) {
3739 complete_command(h
, c
, 0);
3740 } else if (c
->cmd_type
== CMD_IOCTL_PEND
)
3741 complete(c
->waiting
);
3742 #ifdef CONFIG_CISS_SCSI_TAPE
3743 else if (c
->cmd_type
== CMD_SCSI
)
3744 complete_scsi_command(c
, 0, 0);
3747 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
3751 module_init(cciss_init
);
3752 module_exit(cciss_cleanup
);