2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58 MODULE_VERSION("3.6.14");
59 MODULE_LICENSE("GPL");
61 #include "cciss_cmd.h"
63 #include <linux/cciss_ioctl.h>
65 /* define the PCI info for the cards we can control */
66 static const struct pci_device_id cciss_pci_device_id
[] = {
67 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISS
, 0x0E11, 0x4070},
68 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSB
, 0x0E11, 0x4080},
69 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSB
, 0x0E11, 0x4082},
70 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSB
, 0x0E11, 0x4083},
71 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x4091},
72 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409A},
73 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409B},
74 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409C},
75 {PCI_VENDOR_ID_COMPAQ
, PCI_DEVICE_ID_COMPAQ_CISSC
, 0x0E11, 0x409D},
76 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSA
, 0x103C, 0x3225},
77 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3223},
78 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3234},
79 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3235},
80 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3211},
81 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3212},
82 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3213},
83 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3214},
84 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSD
, 0x103C, 0x3215},
85 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSC
, 0x103C, 0x3237},
86 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
87 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
91 MODULE_DEVICE_TABLE(pci
, cciss_pci_device_id
);
93 /* board_id = Subsystem Device ID & Vendor ID
94 * product = Marketing Name for the board
95 * access = Address of the struct of function pointers
96 * nr_cmds = Number of commands supported by controller
98 static struct board_type products
[] = {
99 {0x40700E11, "Smart Array 5300", &SA5_access
, 512},
100 {0x40800E11, "Smart Array 5i", &SA5B_access
, 512},
101 {0x40820E11, "Smart Array 532", &SA5B_access
, 512},
102 {0x40830E11, "Smart Array 5312", &SA5B_access
, 512},
103 {0x409A0E11, "Smart Array 641", &SA5_access
, 512},
104 {0x409B0E11, "Smart Array 642", &SA5_access
, 512},
105 {0x409C0E11, "Smart Array 6400", &SA5_access
, 512},
106 {0x409D0E11, "Smart Array 6400 EM", &SA5_access
, 512},
107 {0x40910E11, "Smart Array 6i", &SA5_access
, 512},
108 {0x3225103C, "Smart Array P600", &SA5_access
, 512},
109 {0x3223103C, "Smart Array P800", &SA5_access
, 512},
110 {0x3234103C, "Smart Array P400", &SA5_access
, 512},
111 {0x3235103C, "Smart Array P400i", &SA5_access
, 512},
112 {0x3211103C, "Smart Array E200i", &SA5_access
, 120},
113 {0x3212103C, "Smart Array E200", &SA5_access
, 120},
114 {0x3213103C, "Smart Array E200i", &SA5_access
, 120},
115 {0x3214103C, "Smart Array E200i", &SA5_access
, 120},
116 {0x3215103C, "Smart Array E200i", &SA5_access
, 120},
117 {0x3237103C, "Smart Array E500", &SA5_access
, 512},
118 {0xFFFF103C, "Unknown Smart Array", &SA5_access
, 120},
121 /* How long to wait (in milliseconds) for board to go into simple mode */
122 #define MAX_CONFIG_WAIT 30000
123 #define MAX_IOCTL_CONFIG_WAIT 1000
125 /*define how many times we will try a command because of bus resets */
126 #define MAX_CMD_RETRIES 3
128 #define READ_AHEAD 1024
131 /* Originally cciss driver only supports 8 major numbers */
132 #define MAX_CTLR_ORIG 8
134 static ctlr_info_t
*hba
[MAX_CTLR
];
136 static void do_cciss_request(request_queue_t
*q
);
137 static irqreturn_t
do_cciss_intr(int irq
, void *dev_id
);
138 static int cciss_open(struct inode
*inode
, struct file
*filep
);
139 static int cciss_release(struct inode
*inode
, struct file
*filep
);
140 static int cciss_ioctl(struct inode
*inode
, struct file
*filep
,
141 unsigned int cmd
, unsigned long arg
);
142 static int cciss_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
);
144 static int cciss_revalidate(struct gendisk
*disk
);
145 static int rebuild_lun_table(ctlr_info_t
*h
, struct gendisk
*del_disk
);
146 static int deregister_disk(struct gendisk
*disk
, drive_info_struct
*drv
,
149 static void cciss_read_capacity(int ctlr
, int logvol
, int withirq
,
150 sector_t
*total_size
, unsigned int *block_size
);
151 static void cciss_read_capacity_16(int ctlr
, int logvol
, int withirq
,
152 sector_t
*total_size
, unsigned int *block_size
);
153 static void cciss_geometry_inquiry(int ctlr
, int logvol
,
154 int withirq
, sector_t total_size
,
155 unsigned int block_size
, InquiryData_struct
*inq_buff
,
156 drive_info_struct
*drv
);
157 static void cciss_getgeometry(int cntl_num
);
158 static void __devinit
cciss_interrupt_mode(ctlr_info_t
*, struct pci_dev
*,
160 static void start_io(ctlr_info_t
*h
);
161 static int sendcmd(__u8 cmd
, int ctlr
, void *buff
, size_t size
,
162 unsigned int use_unit_num
, unsigned int log_unit
,
163 __u8 page_code
, unsigned char *scsi3addr
, int cmd_type
);
164 static int sendcmd_withirq(__u8 cmd
, int ctlr
, void *buff
, size_t size
,
165 unsigned int use_unit_num
, unsigned int log_unit
,
166 __u8 page_code
, int cmd_type
);
168 static void fail_all_cmds(unsigned long ctlr
);
170 #ifdef CONFIG_PROC_FS
171 static int cciss_proc_get_info(char *buffer
, char **start
, off_t offset
,
172 int length
, int *eof
, void *data
);
173 static void cciss_procinit(int i
);
175 static void cciss_procinit(int i
)
178 #endif /* CONFIG_PROC_FS */
181 static long cciss_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
);
184 static struct block_device_operations cciss_fops
= {
185 .owner
= THIS_MODULE
,
187 .release
= cciss_release
,
188 .ioctl
= cciss_ioctl
,
189 .getgeo
= cciss_getgeo
,
191 .compat_ioctl
= cciss_compat_ioctl
,
193 .revalidate_disk
= cciss_revalidate
,
197 * Enqueuing and dequeuing functions for cmdlists.
199 static inline void addQ(CommandList_struct
**Qptr
, CommandList_struct
*c
)
203 c
->next
= c
->prev
= c
;
205 c
->prev
= (*Qptr
)->prev
;
207 (*Qptr
)->prev
->next
= c
;
212 static inline CommandList_struct
*removeQ(CommandList_struct
**Qptr
,
213 CommandList_struct
*c
)
215 if (c
&& c
->next
!= c
) {
218 c
->prev
->next
= c
->next
;
219 c
->next
->prev
= c
->prev
;
226 #include "cciss_scsi.c" /* For SCSI tape support */
228 #define RAID_UNKNOWN 6
230 #ifdef CONFIG_PROC_FS
233 * Report information about this controller.
235 #define ENG_GIG 1000000000
236 #define ENG_GIG_FACTOR (ENG_GIG/512)
237 static const char *raid_label
[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
241 static struct proc_dir_entry
*proc_cciss
;
243 static int cciss_proc_get_info(char *buffer
, char **start
, off_t offset
,
244 int length
, int *eof
, void *data
)
249 ctlr_info_t
*h
= (ctlr_info_t
*) data
;
250 drive_info_struct
*drv
;
252 sector_t vol_sz
, vol_sz_frac
;
256 /* prevent displaying bogus info during configuration
257 * or deconfiguration of a logical volume
259 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
260 if (h
->busy_configuring
) {
261 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
264 h
->busy_configuring
= 1;
265 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
267 size
= sprintf(buffer
, "%s: HP %s Controller\n"
268 "Board ID: 0x%08lx\n"
269 "Firmware Version: %c%c%c%c\n"
271 "Logical drives: %d\n"
273 "Current Q depth: %d\n"
274 "Current # commands on controller: %d\n"
275 "Max Q depth since init: %d\n"
276 "Max # commands on controller since init: %d\n"
277 "Max SG entries since init: %d\n\n",
280 (unsigned long)h
->board_id
,
281 h
->firm_ver
[0], h
->firm_ver
[1], h
->firm_ver
[2],
282 h
->firm_ver
[3], (unsigned int)h
->intr
[SIMPLE_MODE_INT
],
284 h
->cciss_max_sectors
,
285 h
->Qdepth
, h
->commands_outstanding
,
286 h
->maxQsinceinit
, h
->max_outstanding
, h
->maxSG
);
290 cciss_proc_tape_report(ctlr
, buffer
, &pos
, &len
);
291 for (i
= 0; i
<= h
->highest_lun
; i
++) {
297 vol_sz
= drv
->nr_blocks
;
298 vol_sz_frac
= sector_div(vol_sz
, ENG_GIG_FACTOR
);
300 sector_div(vol_sz_frac
, ENG_GIG_FACTOR
);
302 if (drv
->raid_level
> 5)
303 drv
->raid_level
= RAID_UNKNOWN
;
304 size
= sprintf(buffer
+ len
, "cciss/c%dd%d:"
305 "\t%4u.%02uGB\tRAID %s\n",
306 ctlr
, i
, (int)vol_sz
, (int)vol_sz_frac
,
307 raid_label
[drv
->raid_level
]);
313 *start
= buffer
+ offset
;
317 h
->busy_configuring
= 0;
322 cciss_proc_write(struct file
*file
, const char __user
*buffer
,
323 unsigned long count
, void *data
)
325 unsigned char cmd
[80];
327 #ifdef CONFIG_CISS_SCSI_TAPE
328 ctlr_info_t
*h
= (ctlr_info_t
*) data
;
332 if (count
> sizeof(cmd
) - 1)
334 if (copy_from_user(cmd
, buffer
, count
))
337 len
= strlen(cmd
); // above 3 lines ensure safety
338 if (len
&& cmd
[len
- 1] == '\n')
340 # ifdef CONFIG_CISS_SCSI_TAPE
341 if (strcmp("engage scsi", cmd
) == 0) {
342 rc
= cciss_engage_scsi(h
->ctlr
);
347 /* might be nice to have "disengage" too, but it's not
348 safely possible. (only 1 module use count, lock issues.) */
354 * Get us a file in /proc/cciss that says something about each controller.
355 * Create /proc/cciss if it doesn't exist yet.
357 static void __devinit
cciss_procinit(int i
)
359 struct proc_dir_entry
*pde
;
361 if (proc_cciss
== NULL
) {
362 proc_cciss
= proc_mkdir("cciss", proc_root_driver
);
367 pde
= create_proc_read_entry(hba
[i
]->devname
,
368 S_IWUSR
| S_IRUSR
| S_IRGRP
| S_IROTH
,
369 proc_cciss
, cciss_proc_get_info
, hba
[i
]);
370 pde
->write_proc
= cciss_proc_write
;
372 #endif /* CONFIG_PROC_FS */
375 * For operations that cannot sleep, a command block is allocated at init,
376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
377 * which ones are free or in use. For operations that can wait for kmalloc
378 * to possible sleep, this routine can be called with get_from_pool set to 0.
379 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
381 static CommandList_struct
*cmd_alloc(ctlr_info_t
*h
, int get_from_pool
)
383 CommandList_struct
*c
;
386 dma_addr_t cmd_dma_handle
, err_dma_handle
;
388 if (!get_from_pool
) {
389 c
= (CommandList_struct
*) pci_alloc_consistent(h
->pdev
,
390 sizeof(CommandList_struct
), &cmd_dma_handle
);
393 memset(c
, 0, sizeof(CommandList_struct
));
397 c
->err_info
= (ErrorInfo_struct
*)
398 pci_alloc_consistent(h
->pdev
, sizeof(ErrorInfo_struct
),
401 if (c
->err_info
== NULL
) {
402 pci_free_consistent(h
->pdev
,
403 sizeof(CommandList_struct
), c
, cmd_dma_handle
);
406 memset(c
->err_info
, 0, sizeof(ErrorInfo_struct
));
407 } else { /* get it out of the controllers pool */
410 i
= find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
);
413 } while (test_and_set_bit
414 (i
& (BITS_PER_LONG
- 1),
415 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
)) != 0);
417 printk(KERN_DEBUG
"cciss: using command buffer %d\n", i
);
420 memset(c
, 0, sizeof(CommandList_struct
));
421 cmd_dma_handle
= h
->cmd_pool_dhandle
422 + i
* sizeof(CommandList_struct
);
423 c
->err_info
= h
->errinfo_pool
+ i
;
424 memset(c
->err_info
, 0, sizeof(ErrorInfo_struct
));
425 err_dma_handle
= h
->errinfo_pool_dhandle
426 + i
* sizeof(ErrorInfo_struct
);
432 c
->busaddr
= (__u32
) cmd_dma_handle
;
433 temp64
.val
= (__u64
) err_dma_handle
;
434 c
->ErrDesc
.Addr
.lower
= temp64
.val32
.lower
;
435 c
->ErrDesc
.Addr
.upper
= temp64
.val32
.upper
;
436 c
->ErrDesc
.Len
= sizeof(ErrorInfo_struct
);
443 * Frees a command block that was previously allocated with cmd_alloc().
445 static void cmd_free(ctlr_info_t
*h
, CommandList_struct
*c
, int got_from_pool
)
450 if (!got_from_pool
) {
451 temp64
.val32
.lower
= c
->ErrDesc
.Addr
.lower
;
452 temp64
.val32
.upper
= c
->ErrDesc
.Addr
.upper
;
453 pci_free_consistent(h
->pdev
, sizeof(ErrorInfo_struct
),
454 c
->err_info
, (dma_addr_t
) temp64
.val
);
455 pci_free_consistent(h
->pdev
, sizeof(CommandList_struct
),
456 c
, (dma_addr_t
) c
->busaddr
);
459 clear_bit(i
& (BITS_PER_LONG
- 1),
460 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
465 static inline ctlr_info_t
*get_host(struct gendisk
*disk
)
467 return disk
->queue
->queuedata
;
470 static inline drive_info_struct
*get_drv(struct gendisk
*disk
)
472 return disk
->private_data
;
476 * Open. Make sure the device is really there.
478 static int cciss_open(struct inode
*inode
, struct file
*filep
)
480 ctlr_info_t
*host
= get_host(inode
->i_bdev
->bd_disk
);
481 drive_info_struct
*drv
= get_drv(inode
->i_bdev
->bd_disk
);
484 printk(KERN_DEBUG
"cciss_open %s\n", inode
->i_bdev
->bd_disk
->disk_name
);
485 #endif /* CCISS_DEBUG */
487 if (host
->busy_initializing
|| drv
->busy_configuring
)
490 * Root is allowed to open raw volume zero even if it's not configured
491 * so array config can still work. Root is also allowed to open any
492 * volume that has a LUN ID, so it can issue IOCTL to reread the
493 * disk information. I don't think I really like this
494 * but I'm already using way to many device nodes to claim another one
495 * for "raw controller".
497 if (drv
->heads
== 0) {
498 if (iminor(inode
) != 0) { /* not node 0? */
499 /* if not node 0 make sure it is a partition = 0 */
500 if (iminor(inode
) & 0x0f) {
502 /* if it is, make sure we have a LUN ID */
503 } else if (drv
->LunID
== 0) {
507 if (!capable(CAP_SYS_ADMIN
))
518 static int cciss_release(struct inode
*inode
, struct file
*filep
)
520 ctlr_info_t
*host
= get_host(inode
->i_bdev
->bd_disk
);
521 drive_info_struct
*drv
= get_drv(inode
->i_bdev
->bd_disk
);
524 printk(KERN_DEBUG
"cciss_release %s\n",
525 inode
->i_bdev
->bd_disk
->disk_name
);
526 #endif /* CCISS_DEBUG */
535 static int do_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
539 ret
= cciss_ioctl(f
->f_path
.dentry
->d_inode
, f
, cmd
, arg
);
544 static int cciss_ioctl32_passthru(struct file
*f
, unsigned cmd
,
546 static int cciss_ioctl32_big_passthru(struct file
*f
, unsigned cmd
,
549 static long cciss_compat_ioctl(struct file
*f
, unsigned cmd
, unsigned long arg
)
552 case CCISS_GETPCIINFO
:
553 case CCISS_GETINTINFO
:
554 case CCISS_SETINTINFO
:
555 case CCISS_GETNODENAME
:
556 case CCISS_SETNODENAME
:
557 case CCISS_GETHEARTBEAT
:
558 case CCISS_GETBUSTYPES
:
559 case CCISS_GETFIRMVER
:
560 case CCISS_GETDRIVVER
:
561 case CCISS_REVALIDVOLS
:
562 case CCISS_DEREGDISK
:
563 case CCISS_REGNEWDISK
:
565 case CCISS_RESCANDISK
:
566 case CCISS_GETLUNINFO
:
567 return do_ioctl(f
, cmd
, arg
);
569 case CCISS_PASSTHRU32
:
570 return cciss_ioctl32_passthru(f
, cmd
, arg
);
571 case CCISS_BIG_PASSTHRU32
:
572 return cciss_ioctl32_big_passthru(f
, cmd
, arg
);
579 static int cciss_ioctl32_passthru(struct file
*f
, unsigned cmd
,
582 IOCTL32_Command_struct __user
*arg32
=
583 (IOCTL32_Command_struct __user
*) arg
;
584 IOCTL_Command_struct arg64
;
585 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
591 copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
592 sizeof(arg64
.LUN_info
));
594 copy_from_user(&arg64
.Request
, &arg32
->Request
,
595 sizeof(arg64
.Request
));
597 copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
598 sizeof(arg64
.error_info
));
599 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
600 err
|= get_user(cp
, &arg32
->buf
);
601 arg64
.buf
= compat_ptr(cp
);
602 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
607 err
= do_ioctl(f
, CCISS_PASSTHRU
, (unsigned long)p
);
611 copy_in_user(&arg32
->error_info
, &p
->error_info
,
612 sizeof(arg32
->error_info
));
618 static int cciss_ioctl32_big_passthru(struct file
*file
, unsigned cmd
,
621 BIG_IOCTL32_Command_struct __user
*arg32
=
622 (BIG_IOCTL32_Command_struct __user
*) arg
;
623 BIG_IOCTL_Command_struct arg64
;
624 BIG_IOCTL_Command_struct __user
*p
=
625 compat_alloc_user_space(sizeof(arg64
));
631 copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
632 sizeof(arg64
.LUN_info
));
634 copy_from_user(&arg64
.Request
, &arg32
->Request
,
635 sizeof(arg64
.Request
));
637 copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
638 sizeof(arg64
.error_info
));
639 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
640 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
641 err
|= get_user(cp
, &arg32
->buf
);
642 arg64
.buf
= compat_ptr(cp
);
643 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
648 err
= do_ioctl(file
, CCISS_BIG_PASSTHRU
, (unsigned long)p
);
652 copy_in_user(&arg32
->error_info
, &p
->error_info
,
653 sizeof(arg32
->error_info
));
660 static int cciss_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
662 drive_info_struct
*drv
= get_drv(bdev
->bd_disk
);
667 geo
->heads
= drv
->heads
;
668 geo
->sectors
= drv
->sectors
;
669 geo
->cylinders
= drv
->cylinders
;
676 static int cciss_ioctl(struct inode
*inode
, struct file
*filep
,
677 unsigned int cmd
, unsigned long arg
)
679 struct block_device
*bdev
= inode
->i_bdev
;
680 struct gendisk
*disk
= bdev
->bd_disk
;
681 ctlr_info_t
*host
= get_host(disk
);
682 drive_info_struct
*drv
= get_drv(disk
);
683 int ctlr
= host
->ctlr
;
684 void __user
*argp
= (void __user
*)arg
;
687 printk(KERN_DEBUG
"cciss_ioctl: Called with cmd=%x %lx\n", cmd
, arg
);
688 #endif /* CCISS_DEBUG */
691 case CCISS_GETPCIINFO
:
693 cciss_pci_info_struct pciinfo
;
697 pciinfo
.domain
= pci_domain_nr(host
->pdev
->bus
);
698 pciinfo
.bus
= host
->pdev
->bus
->number
;
699 pciinfo
.dev_fn
= host
->pdev
->devfn
;
700 pciinfo
.board_id
= host
->board_id
;
702 (argp
, &pciinfo
, sizeof(cciss_pci_info_struct
)))
706 case CCISS_GETINTINFO
:
708 cciss_coalint_struct intinfo
;
712 readl(&host
->cfgtable
->HostWrite
.CoalIntDelay
);
714 readl(&host
->cfgtable
->HostWrite
.CoalIntCount
);
716 (argp
, &intinfo
, sizeof(cciss_coalint_struct
)))
720 case CCISS_SETINTINFO
:
722 cciss_coalint_struct intinfo
;
728 if (!capable(CAP_SYS_ADMIN
))
731 (&intinfo
, argp
, sizeof(cciss_coalint_struct
)))
733 if ((intinfo
.delay
== 0) && (intinfo
.count
== 0))
735 // printk("cciss_ioctl: delay and count cannot be 0\n");
738 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
739 /* Update the field, and then ring the doorbell */
740 writel(intinfo
.delay
,
741 &(host
->cfgtable
->HostWrite
.CoalIntDelay
));
742 writel(intinfo
.count
,
743 &(host
->cfgtable
->HostWrite
.CoalIntCount
));
744 writel(CFGTBL_ChangeReq
, host
->vaddr
+ SA5_DOORBELL
);
746 for (i
= 0; i
< MAX_IOCTL_CONFIG_WAIT
; i
++) {
747 if (!(readl(host
->vaddr
+ SA5_DOORBELL
)
750 /* delay and try again */
753 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
754 if (i
>= MAX_IOCTL_CONFIG_WAIT
)
758 case CCISS_GETNODENAME
:
760 NodeName_type NodeName
;
765 for (i
= 0; i
< 16; i
++)
767 readb(&host
->cfgtable
->ServerName
[i
]);
768 if (copy_to_user(argp
, NodeName
, sizeof(NodeName_type
)))
772 case CCISS_SETNODENAME
:
774 NodeName_type NodeName
;
780 if (!capable(CAP_SYS_ADMIN
))
784 (NodeName
, argp
, sizeof(NodeName_type
)))
787 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
789 /* Update the field, and then ring the doorbell */
790 for (i
= 0; i
< 16; i
++)
792 &host
->cfgtable
->ServerName
[i
]);
794 writel(CFGTBL_ChangeReq
, host
->vaddr
+ SA5_DOORBELL
);
796 for (i
= 0; i
< MAX_IOCTL_CONFIG_WAIT
; i
++) {
797 if (!(readl(host
->vaddr
+ SA5_DOORBELL
)
800 /* delay and try again */
803 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
804 if (i
>= MAX_IOCTL_CONFIG_WAIT
)
809 case CCISS_GETHEARTBEAT
:
811 Heartbeat_type heartbeat
;
815 heartbeat
= readl(&host
->cfgtable
->HeartBeat
);
817 (argp
, &heartbeat
, sizeof(Heartbeat_type
)))
821 case CCISS_GETBUSTYPES
:
823 BusTypes_type BusTypes
;
827 BusTypes
= readl(&host
->cfgtable
->BusTypes
);
829 (argp
, &BusTypes
, sizeof(BusTypes_type
)))
833 case CCISS_GETFIRMVER
:
835 FirmwareVer_type firmware
;
839 memcpy(firmware
, host
->firm_ver
, 4);
842 (argp
, firmware
, sizeof(FirmwareVer_type
)))
846 case CCISS_GETDRIVVER
:
848 DriverVer_type DriverVer
= DRIVER_VERSION
;
854 (argp
, &DriverVer
, sizeof(DriverVer_type
)))
859 case CCISS_REVALIDVOLS
:
860 return rebuild_lun_table(host
, NULL
);
862 case CCISS_GETLUNINFO
:{
863 LogvolInfo_struct luninfo
;
865 luninfo
.LunID
= drv
->LunID
;
866 luninfo
.num_opens
= drv
->usage_count
;
867 luninfo
.num_parts
= 0;
868 if (copy_to_user(argp
, &luninfo
,
869 sizeof(LogvolInfo_struct
)))
873 case CCISS_DEREGDISK
:
874 return rebuild_lun_table(host
, disk
);
877 return rebuild_lun_table(host
, NULL
);
881 IOCTL_Command_struct iocommand
;
882 CommandList_struct
*c
;
886 DECLARE_COMPLETION_ONSTACK(wait
);
891 if (!capable(CAP_SYS_RAWIO
))
895 (&iocommand
, argp
, sizeof(IOCTL_Command_struct
)))
897 if ((iocommand
.buf_size
< 1) &&
898 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
901 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
902 /* Check kmalloc limits */
903 if (iocommand
.buf_size
> 128000)
906 if (iocommand
.buf_size
> 0) {
907 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
911 if (iocommand
.Request
.Type
.Direction
== XFER_WRITE
) {
912 /* Copy the data into the buffer we created */
914 (buff
, iocommand
.buf
, iocommand
.buf_size
)) {
919 memset(buff
, 0, iocommand
.buf_size
);
921 if ((c
= cmd_alloc(host
, 0)) == NULL
) {
925 // Fill in the command type
926 c
->cmd_type
= CMD_IOCTL_PEND
;
927 // Fill in Command Header
928 c
->Header
.ReplyQueue
= 0; // unused in simple mode
929 if (iocommand
.buf_size
> 0) // buffer to fill
931 c
->Header
.SGList
= 1;
932 c
->Header
.SGTotal
= 1;
933 } else // no buffers to fill
935 c
->Header
.SGList
= 0;
936 c
->Header
.SGTotal
= 0;
938 c
->Header
.LUN
= iocommand
.LUN_info
;
939 c
->Header
.Tag
.lower
= c
->busaddr
; // use the kernel address the cmd block for tag
941 // Fill in Request block
942 c
->Request
= iocommand
.Request
;
944 // Fill in the scatter gather information
945 if (iocommand
.buf_size
> 0) {
946 temp64
.val
= pci_map_single(host
->pdev
, buff
,
948 PCI_DMA_BIDIRECTIONAL
);
949 c
->SG
[0].Addr
.lower
= temp64
.val32
.lower
;
950 c
->SG
[0].Addr
.upper
= temp64
.val32
.upper
;
951 c
->SG
[0].Len
= iocommand
.buf_size
;
952 c
->SG
[0].Ext
= 0; // we are not chaining
956 /* Put the request on the tail of the request queue */
957 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
958 addQ(&host
->reqQ
, c
);
961 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
963 wait_for_completion(&wait
);
965 /* unlock the buffers from DMA */
966 temp64
.val32
.lower
= c
->SG
[0].Addr
.lower
;
967 temp64
.val32
.upper
= c
->SG
[0].Addr
.upper
;
968 pci_unmap_single(host
->pdev
, (dma_addr_t
) temp64
.val
,
970 PCI_DMA_BIDIRECTIONAL
);
972 /* Copy the error information out */
973 iocommand
.error_info
= *(c
->err_info
);
975 (argp
, &iocommand
, sizeof(IOCTL_Command_struct
))) {
977 cmd_free(host
, c
, 0);
981 if (iocommand
.Request
.Type
.Direction
== XFER_READ
) {
982 /* Copy the data out of the buffer we created */
984 (iocommand
.buf
, buff
, iocommand
.buf_size
)) {
986 cmd_free(host
, c
, 0);
991 cmd_free(host
, c
, 0);
994 case CCISS_BIG_PASSTHRU
:{
995 BIG_IOCTL_Command_struct
*ioc
;
996 CommandList_struct
*c
;
997 unsigned char **buff
= NULL
;
998 int *buff_size
= NULL
;
1000 unsigned long flags
;
1004 DECLARE_COMPLETION_ONSTACK(wait
);
1007 BYTE __user
*data_ptr
;
1011 if (!capable(CAP_SYS_RAWIO
))
1013 ioc
= (BIG_IOCTL_Command_struct
*)
1014 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
1019 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
1023 if ((ioc
->buf_size
< 1) &&
1024 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
1028 /* Check kmalloc limits using all SGs */
1029 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
1033 if (ioc
->buf_size
> ioc
->malloc_size
* MAXSGENTRIES
) {
1038 kzalloc(MAXSGENTRIES
* sizeof(char *), GFP_KERNEL
);
1043 buff_size
= kmalloc(MAXSGENTRIES
* sizeof(int),
1049 left
= ioc
->buf_size
;
1050 data_ptr
= ioc
->buf
;
1053 ioc
->malloc_size
) ? ioc
->
1055 buff_size
[sg_used
] = sz
;
1056 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
1057 if (buff
[sg_used
] == NULL
) {
1061 if (ioc
->Request
.Type
.Direction
== XFER_WRITE
) {
1063 (buff
[sg_used
], data_ptr
, sz
)) {
1068 memset(buff
[sg_used
], 0, sz
);
1074 if ((c
= cmd_alloc(host
, 0)) == NULL
) {
1078 c
->cmd_type
= CMD_IOCTL_PEND
;
1079 c
->Header
.ReplyQueue
= 0;
1081 if (ioc
->buf_size
> 0) {
1082 c
->Header
.SGList
= sg_used
;
1083 c
->Header
.SGTotal
= sg_used
;
1085 c
->Header
.SGList
= 0;
1086 c
->Header
.SGTotal
= 0;
1088 c
->Header
.LUN
= ioc
->LUN_info
;
1089 c
->Header
.Tag
.lower
= c
->busaddr
;
1091 c
->Request
= ioc
->Request
;
1092 if (ioc
->buf_size
> 0) {
1094 for (i
= 0; i
< sg_used
; i
++) {
1096 pci_map_single(host
->pdev
, buff
[i
],
1098 PCI_DMA_BIDIRECTIONAL
);
1099 c
->SG
[i
].Addr
.lower
=
1101 c
->SG
[i
].Addr
.upper
=
1103 c
->SG
[i
].Len
= buff_size
[i
];
1104 c
->SG
[i
].Ext
= 0; /* we are not chaining */
1108 /* Put the request on the tail of the request queue */
1109 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
1110 addQ(&host
->reqQ
, c
);
1113 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
1114 wait_for_completion(&wait
);
1115 /* unlock the buffers from DMA */
1116 for (i
= 0; i
< sg_used
; i
++) {
1117 temp64
.val32
.lower
= c
->SG
[i
].Addr
.lower
;
1118 temp64
.val32
.upper
= c
->SG
[i
].Addr
.upper
;
1119 pci_unmap_single(host
->pdev
,
1120 (dma_addr_t
) temp64
.val
, buff_size
[i
],
1121 PCI_DMA_BIDIRECTIONAL
);
1123 /* Copy the error information out */
1124 ioc
->error_info
= *(c
->err_info
);
1125 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
1126 cmd_free(host
, c
, 0);
1130 if (ioc
->Request
.Type
.Direction
== XFER_READ
) {
1131 /* Copy the data out of the buffer we created */
1132 BYTE __user
*ptr
= ioc
->buf
;
1133 for (i
= 0; i
< sg_used
; i
++) {
1135 (ptr
, buff
[i
], buff_size
[i
])) {
1136 cmd_free(host
, c
, 0);
1140 ptr
+= buff_size
[i
];
1143 cmd_free(host
, c
, 0);
1147 for (i
= 0; i
< sg_used
; i
++)
1160 static inline void complete_buffers(struct bio
*bio
, int status
)
1163 struct bio
*xbh
= bio
->bi_next
;
1164 int nr_sectors
= bio_sectors(bio
);
1166 bio
->bi_next
= NULL
;
1167 bio_endio(bio
, nr_sectors
<< 9, status
? 0 : -EIO
);
1172 static void cciss_check_queues(ctlr_info_t
*h
)
1174 int start_queue
= h
->next_to_run
;
1177 /* check to see if we have maxed out the number of commands that can
1178 * be placed on the queue. If so then exit. We do this check here
1179 * in case the interrupt we serviced was from an ioctl and did not
1180 * free any new commands.
1182 if ((find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
)) == h
->nr_cmds
)
1185 /* We have room on the queue for more commands. Now we need to queue
1186 * them up. We will also keep track of the next queue to run so
1187 * that every queue gets a chance to be started first.
1189 for (i
= 0; i
< h
->highest_lun
+ 1; i
++) {
1190 int curr_queue
= (start_queue
+ i
) % (h
->highest_lun
+ 1);
1191 /* make sure the disk has been added and the drive is real
1192 * because this can be called from the middle of init_one.
1194 if (!(h
->drv
[curr_queue
].queue
) || !(h
->drv
[curr_queue
].heads
))
1196 blk_start_queue(h
->gendisk
[curr_queue
]->queue
);
1198 /* check to see if we have maxed out the number of commands
1199 * that can be placed on the queue.
1201 if ((find_first_zero_bit(h
->cmd_pool_bits
, h
->nr_cmds
)) == h
->nr_cmds
) {
1202 if (curr_queue
== start_queue
) {
1204 (start_queue
+ 1) % (h
->highest_lun
+ 1);
1207 h
->next_to_run
= curr_queue
;
1211 curr_queue
= (curr_queue
+ 1) % (h
->highest_lun
+ 1);
1216 static void cciss_softirq_done(struct request
*rq
)
1218 CommandList_struct
*cmd
= rq
->completion_data
;
1219 ctlr_info_t
*h
= hba
[cmd
->ctlr
];
1220 unsigned long flags
;
1224 if (cmd
->Request
.Type
.Direction
== XFER_READ
)
1225 ddir
= PCI_DMA_FROMDEVICE
;
1227 ddir
= PCI_DMA_TODEVICE
;
1229 /* command did not need to be retried */
1230 /* unmap the DMA mapping for all the scatter gather elements */
1231 for (i
= 0; i
< cmd
->Header
.SGList
; i
++) {
1232 temp64
.val32
.lower
= cmd
->SG
[i
].Addr
.lower
;
1233 temp64
.val32
.upper
= cmd
->SG
[i
].Addr
.upper
;
1234 pci_unmap_page(h
->pdev
, temp64
.val
, cmd
->SG
[i
].Len
, ddir
);
1237 complete_buffers(rq
->bio
, rq
->errors
);
1239 if (blk_fs_request(rq
)) {
1240 const int rw
= rq_data_dir(rq
);
1242 disk_stat_add(rq
->rq_disk
, sectors
[rw
], rq
->nr_sectors
);
1246 printk("Done with %p\n", rq
);
1247 #endif /* CCISS_DEBUG */
1249 add_disk_randomness(rq
->rq_disk
);
1250 spin_lock_irqsave(&h
->lock
, flags
);
1251 end_that_request_last(rq
, rq
->errors
);
1252 cmd_free(h
, cmd
, 1);
1253 cciss_check_queues(h
);
1254 spin_unlock_irqrestore(&h
->lock
, flags
);
1257 /* This function will check the usage_count of the drive to be updated/added.
1258 * If the usage_count is zero then the drive information will be updated and
1259 * the disk will be re-registered with the kernel. If not then it will be
1260 * left alone for the next reboot. The exception to this is disk 0 which
1261 * will always be left registered with the kernel since it is also the
1262 * controller node. Any changes to disk 0 will show up on the next
1265 static void cciss_update_drive_info(int ctlr
, int drv_index
)
1267 ctlr_info_t
*h
= hba
[ctlr
];
1268 struct gendisk
*disk
;
1269 InquiryData_struct
*inq_buff
= NULL
;
1270 unsigned int block_size
;
1271 sector_t total_size
;
1272 unsigned long flags
= 0;
1275 /* if the disk already exists then deregister it before proceeding */
1276 if (h
->drv
[drv_index
].raid_level
!= -1) {
1277 spin_lock_irqsave(CCISS_LOCK(h
->ctlr
), flags
);
1278 h
->drv
[drv_index
].busy_configuring
= 1;
1279 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1280 ret
= deregister_disk(h
->gendisk
[drv_index
],
1281 &h
->drv
[drv_index
], 0);
1282 h
->drv
[drv_index
].busy_configuring
= 0;
1285 /* If the disk is in use return */
1289 /* Get information about the disk and modify the driver structure */
1290 inq_buff
= kmalloc(sizeof(InquiryData_struct
), GFP_KERNEL
);
1291 if (inq_buff
== NULL
)
1294 /* testing to see if 16-byte CDBs are already being used */
1295 if (h
->cciss_read
== CCISS_READ_16
) {
1296 cciss_read_capacity_16(h
->ctlr
, drv_index
, 1,
1297 &total_size
, &block_size
);
1301 cciss_read_capacity(ctlr
, drv_index
, 1,
1302 &total_size
, &block_size
);
1304 /* if read_capacity returns all F's this volume is >2TB in size */
1305 /* so we switch to 16-byte CDB's for all read/write ops */
1306 if (total_size
== 0xFFFFFFFFULL
) {
1307 cciss_read_capacity_16(ctlr
, drv_index
, 1,
1308 &total_size
, &block_size
);
1309 h
->cciss_read
= CCISS_READ_16
;
1310 h
->cciss_write
= CCISS_WRITE_16
;
1312 h
->cciss_read
= CCISS_READ_10
;
1313 h
->cciss_write
= CCISS_WRITE_10
;
1316 cciss_geometry_inquiry(ctlr
, drv_index
, 1, total_size
, block_size
,
1317 inq_buff
, &h
->drv
[drv_index
]);
1320 disk
= h
->gendisk
[drv_index
];
1321 set_capacity(disk
, h
->drv
[drv_index
].nr_blocks
);
1323 /* if it's the controller it's already added */
1325 disk
->queue
= blk_init_queue(do_cciss_request
, &h
->lock
);
1326 sprintf(disk
->disk_name
, "cciss/c%dd%d", ctlr
, drv_index
);
1327 disk
->major
= h
->major
;
1328 disk
->first_minor
= drv_index
<< NWD_SHIFT
;
1329 disk
->fops
= &cciss_fops
;
1330 disk
->private_data
= &h
->drv
[drv_index
];
1332 /* Set up queue information */
1333 disk
->queue
->backing_dev_info
.ra_pages
= READ_AHEAD
;
1334 blk_queue_bounce_limit(disk
->queue
, hba
[ctlr
]->pdev
->dma_mask
);
1336 /* This is a hardware imposed limit. */
1337 blk_queue_max_hw_segments(disk
->queue
, MAXSGENTRIES
);
1339 /* This is a limit in the driver and could be eliminated. */
1340 blk_queue_max_phys_segments(disk
->queue
, MAXSGENTRIES
);
1342 blk_queue_max_sectors(disk
->queue
, h
->cciss_max_sectors
);
1344 blk_queue_softirq_done(disk
->queue
, cciss_softirq_done
);
1346 disk
->queue
->queuedata
= hba
[ctlr
];
1348 blk_queue_hardsect_size(disk
->queue
,
1349 hba
[ctlr
]->drv
[drv_index
].block_size
);
1351 h
->drv
[drv_index
].queue
= disk
->queue
;
1359 printk(KERN_ERR
"cciss: out of memory\n");
1363 /* This function will find the first index of the controllers drive array
1364 * that has a -1 for the raid_level and will return that index. This is
1365 * where new drives will be added. If the index to be returned is greater
1366 * than the highest_lun index for the controller then highest_lun is set
1367 * to this new index. If there are no available indexes then -1 is returned.
1369 static int cciss_find_free_drive_index(int ctlr
)
1373 for (i
= 0; i
< CISS_MAX_LUN
; i
++) {
1374 if (hba
[ctlr
]->drv
[i
].raid_level
== -1) {
1375 if (i
> hba
[ctlr
]->highest_lun
)
1376 hba
[ctlr
]->highest_lun
= i
;
1383 /* This function will add and remove logical drives from the Logical
1384 * drive array of the controller and maintain persistency of ordering
1385 * so that mount points are preserved until the next reboot. This allows
1386 * for the removal of logical drives in the middle of the drive array
1387 * without a re-ordering of those drives.
1389 * h = The controller to perform the operations on
1390 * del_disk = The disk to remove if specified. If the value given
1391 * is NULL then no disk is removed.
1393 static int rebuild_lun_table(ctlr_info_t
*h
, struct gendisk
*del_disk
)
1397 ReportLunData_struct
*ld_buff
= NULL
;
1398 drive_info_struct
*drv
= NULL
;
1405 unsigned long flags
;
1407 /* Set busy_configuring flag for this operation */
1408 spin_lock_irqsave(CCISS_LOCK(h
->ctlr
), flags
);
1409 if (h
->busy_configuring
) {
1410 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1413 h
->busy_configuring
= 1;
1415 /* if del_disk is NULL then we are being called to add a new disk
1416 * and update the logical drive table. If it is not NULL then
1417 * we will check if the disk is in use or not.
1419 if (del_disk
!= NULL
) {
1420 drv
= get_drv(del_disk
);
1421 drv
->busy_configuring
= 1;
1422 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1423 return_code
= deregister_disk(del_disk
, drv
, 1);
1424 drv
->busy_configuring
= 0;
1425 h
->busy_configuring
= 0;
1428 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
1429 if (!capable(CAP_SYS_RAWIO
))
1432 ld_buff
= kzalloc(sizeof(ReportLunData_struct
), GFP_KERNEL
);
1433 if (ld_buff
== NULL
)
1436 return_code
= sendcmd_withirq(CISS_REPORT_LOG
, ctlr
, ld_buff
,
1437 sizeof(ReportLunData_struct
), 0,
1440 if (return_code
== IO_OK
) {
1442 be32_to_cpu(*(__u32
*) ld_buff
->LUNListLength
);
1443 } else { /* reading number of logical volumes failed */
1444 printk(KERN_WARNING
"cciss: report logical volume"
1445 " command failed\n");
1450 num_luns
= listlength
/ 8; /* 8 bytes per entry */
1451 if (num_luns
> CISS_MAX_LUN
) {
1452 num_luns
= CISS_MAX_LUN
;
1453 printk(KERN_WARNING
"cciss: more luns configured"
1454 " on controller than can be handled by"
1458 /* Compare controller drive array to drivers drive array.
1459 * Check for updates in the drive information and any new drives
1460 * on the controller.
1462 for (i
= 0; i
< num_luns
; i
++) {
1468 (unsigned int)(ld_buff
->LUN
[i
][3])) << 24;
1470 (unsigned int)(ld_buff
->LUN
[i
][2])) << 16;
1472 (unsigned int)(ld_buff
->LUN
[i
][1])) << 8;
1473 lunid
|= 0xff & (unsigned int)(ld_buff
->LUN
[i
][0]);
1475 /* Find if the LUN is already in the drive array
1476 * of the controller. If so then update its info
1477 * if not is use. If it does not exist then find
1478 * the first free index and add it.
1480 for (j
= 0; j
<= h
->highest_lun
; j
++) {
1481 if (h
->drv
[j
].LunID
== lunid
) {
1487 /* check if the drive was found already in the array */
1489 drv_index
= cciss_find_free_drive_index(ctlr
);
1490 if (drv_index
== -1)
1493 /*Check if the gendisk needs to be allocated */
1494 if (!h
->gendisk
[drv_index
]){
1495 h
->gendisk
[drv_index
] = alloc_disk(1 << NWD_SHIFT
);
1496 if (!h
->gendisk
[drv_index
]){
1497 printk(KERN_ERR
"cciss: could not allocate new disk %d\n", drv_index
);
1502 h
->drv
[drv_index
].LunID
= lunid
;
1503 cciss_update_drive_info(ctlr
, drv_index
);
1509 h
->busy_configuring
= 0;
1510 /* We return -1 here to tell the ACU that we have registered/updated
1511 * all of the drives that we can and to keep it from calling us
1516 printk(KERN_ERR
"cciss: out of memory\n");
1520 /* This function will deregister the disk and it's queue from the
1521 * kernel. It must be called with the controller lock held and the
1522 * drv structures busy_configuring flag set. It's parameters are:
1524 * disk = This is the disk to be deregistered
1525 * drv = This is the drive_info_struct associated with the disk to be
1526 * deregistered. It contains information about the disk used
1528 * clear_all = This flag determines whether or not the disk information
1529 * is going to be completely cleared out and the highest_lun
1530 * reset. Sometimes we want to clear out information about
1531 * the disk in preparation for re-adding it. In this case
1532 * the highest_lun should be left unchanged and the LunID
1533 * should not be cleared.
1535 static int deregister_disk(struct gendisk
*disk
, drive_info_struct
*drv
,
1539 ctlr_info_t
*h
= get_host(disk
);
1541 if (!capable(CAP_SYS_RAWIO
))
1544 /* make sure logical volume is NOT is use */
1545 if (clear_all
|| (h
->gendisk
[0] == disk
)) {
1546 if (drv
->usage_count
> 1)
1548 } else if (drv
->usage_count
> 0)
1551 /* invalidate the devices and deregister the disk. If it is disk
1552 * zero do not deregister it but just zero out it's values. This
1553 * allows us to delete disk zero but keep the controller registered.
1555 if (h
->gendisk
[0] != disk
) {
1557 request_queue_t
*q
= disk
->queue
;
1558 if (disk
->flags
& GENHD_FL_UP
)
1561 blk_cleanup_queue(q
);
1562 /* Set drv->queue to NULL so that we do not try
1563 * to call blk_start_queue on this queue in the
1568 /* If clear_all is set then we are deleting the logical
1569 * drive, not just refreshing its info. For drives
1570 * other than disk 0 we will call put_disk. We do not
1571 * do this for disk 0 as we need it to be able to
1572 * configure the controller.
1575 /* This isn't pretty, but we need to find the
1576 * disk in our array and NULL our the pointer.
1577 * This is so that we will call alloc_disk if
1578 * this index is used again later.
1580 for (i
=0; i
< CISS_MAX_LUN
; i
++){
1581 if(h
->gendisk
[i
] == disk
){
1582 h
->gendisk
[i
] = NULL
;
1590 set_capacity(disk
, 0);
1594 /* zero out the disk size info */
1596 drv
->block_size
= 0;
1600 drv
->raid_level
= -1; /* This can be used as a flag variable to
1601 * indicate that this element of the drive
1606 /* check to see if it was the last disk */
1607 if (drv
== h
->drv
+ h
->highest_lun
) {
1608 /* if so, find the new hightest lun */
1609 int i
, newhighest
= -1;
1610 for (i
= 0; i
< h
->highest_lun
; i
++) {
1611 /* if the disk has size > 0, it is available */
1612 if (h
->drv
[i
].heads
)
1615 h
->highest_lun
= newhighest
;
1623 static int fill_cmd(CommandList_struct
*c
, __u8 cmd
, int ctlr
, void *buff
, size_t size
, unsigned int use_unit_num
, /* 0: address the controller,
1624 1: address logical volume log_unit,
1625 2: periph device address is scsi3addr */
1626 unsigned int log_unit
, __u8 page_code
,
1627 unsigned char *scsi3addr
, int cmd_type
)
1629 ctlr_info_t
*h
= hba
[ctlr
];
1630 u64bit buff_dma_handle
;
1633 c
->cmd_type
= CMD_IOCTL_PEND
;
1634 c
->Header
.ReplyQueue
= 0;
1636 c
->Header
.SGList
= 1;
1637 c
->Header
.SGTotal
= 1;
1639 c
->Header
.SGList
= 0;
1640 c
->Header
.SGTotal
= 0;
1642 c
->Header
.Tag
.lower
= c
->busaddr
;
1644 c
->Request
.Type
.Type
= cmd_type
;
1645 if (cmd_type
== TYPE_CMD
) {
1648 /* If the logical unit number is 0 then, this is going
1649 to controller so It's a physical command
1650 mode = 0 target = 0. So we have nothing to write.
1651 otherwise, if use_unit_num == 1,
1652 mode = 1(volume set addressing) target = LUNID
1653 otherwise, if use_unit_num == 2,
1654 mode = 0(periph dev addr) target = scsi3addr */
1655 if (use_unit_num
== 1) {
1656 c
->Header
.LUN
.LogDev
.VolId
=
1657 h
->drv
[log_unit
].LunID
;
1658 c
->Header
.LUN
.LogDev
.Mode
= 1;
1659 } else if (use_unit_num
== 2) {
1660 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
,
1662 c
->Header
.LUN
.LogDev
.Mode
= 0;
1664 /* are we trying to read a vital product page */
1665 if (page_code
!= 0) {
1666 c
->Request
.CDB
[1] = 0x01;
1667 c
->Request
.CDB
[2] = page_code
;
1669 c
->Request
.CDBLen
= 6;
1670 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1671 c
->Request
.Type
.Direction
= XFER_READ
;
1672 c
->Request
.Timeout
= 0;
1673 c
->Request
.CDB
[0] = CISS_INQUIRY
;
1674 c
->Request
.CDB
[4] = size
& 0xFF;
1676 case CISS_REPORT_LOG
:
1677 case CISS_REPORT_PHYS
:
1678 /* Talking to controller so It's a physical command
1679 mode = 00 target = 0. Nothing to write.
1681 c
->Request
.CDBLen
= 12;
1682 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1683 c
->Request
.Type
.Direction
= XFER_READ
;
1684 c
->Request
.Timeout
= 0;
1685 c
->Request
.CDB
[0] = cmd
;
1686 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; //MSB
1687 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
1688 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
1689 c
->Request
.CDB
[9] = size
& 0xFF;
1692 case CCISS_READ_CAPACITY
:
1693 c
->Header
.LUN
.LogDev
.VolId
= h
->drv
[log_unit
].LunID
;
1694 c
->Header
.LUN
.LogDev
.Mode
= 1;
1695 c
->Request
.CDBLen
= 10;
1696 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1697 c
->Request
.Type
.Direction
= XFER_READ
;
1698 c
->Request
.Timeout
= 0;
1699 c
->Request
.CDB
[0] = cmd
;
1701 case CCISS_READ_CAPACITY_16
:
1702 c
->Header
.LUN
.LogDev
.VolId
= h
->drv
[log_unit
].LunID
;
1703 c
->Header
.LUN
.LogDev
.Mode
= 1;
1704 c
->Request
.CDBLen
= 16;
1705 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1706 c
->Request
.Type
.Direction
= XFER_READ
;
1707 c
->Request
.Timeout
= 0;
1708 c
->Request
.CDB
[0] = cmd
;
1709 c
->Request
.CDB
[1] = 0x10;
1710 c
->Request
.CDB
[10] = (size
>> 24) & 0xFF;
1711 c
->Request
.CDB
[11] = (size
>> 16) & 0xFF;
1712 c
->Request
.CDB
[12] = (size
>> 8) & 0xFF;
1713 c
->Request
.CDB
[13] = size
& 0xFF;
1714 c
->Request
.Timeout
= 0;
1715 c
->Request
.CDB
[0] = cmd
;
1717 case CCISS_CACHE_FLUSH
:
1718 c
->Request
.CDBLen
= 12;
1719 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1720 c
->Request
.Type
.Direction
= XFER_WRITE
;
1721 c
->Request
.Timeout
= 0;
1722 c
->Request
.CDB
[0] = BMIC_WRITE
;
1723 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
1727 "cciss%d: Unknown Command 0x%c\n", ctlr
, cmd
);
1730 } else if (cmd_type
== TYPE_MSG
) {
1732 case 0: /* ABORT message */
1733 c
->Request
.CDBLen
= 12;
1734 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1735 c
->Request
.Type
.Direction
= XFER_WRITE
;
1736 c
->Request
.Timeout
= 0;
1737 c
->Request
.CDB
[0] = cmd
; /* abort */
1738 c
->Request
.CDB
[1] = 0; /* abort a command */
1739 /* buff contains the tag of the command to abort */
1740 memcpy(&c
->Request
.CDB
[4], buff
, 8);
1742 case 1: /* RESET message */
1743 c
->Request
.CDBLen
= 12;
1744 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1745 c
->Request
.Type
.Direction
= XFER_WRITE
;
1746 c
->Request
.Timeout
= 0;
1747 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
1748 c
->Request
.CDB
[0] = cmd
; /* reset */
1749 c
->Request
.CDB
[1] = 0x04; /* reset a LUN */
1751 case 3: /* No-Op message */
1752 c
->Request
.CDBLen
= 1;
1753 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
1754 c
->Request
.Type
.Direction
= XFER_WRITE
;
1755 c
->Request
.Timeout
= 0;
1756 c
->Request
.CDB
[0] = cmd
;
1760 "cciss%d: unknown message type %d\n", ctlr
, cmd
);
1765 "cciss%d: unknown command type %d\n", ctlr
, cmd_type
);
1768 /* Fill in the scatter gather information */
1770 buff_dma_handle
.val
= (__u64
) pci_map_single(h
->pdev
,
1772 PCI_DMA_BIDIRECTIONAL
);
1773 c
->SG
[0].Addr
.lower
= buff_dma_handle
.val32
.lower
;
1774 c
->SG
[0].Addr
.upper
= buff_dma_handle
.val32
.upper
;
1775 c
->SG
[0].Len
= size
;
1776 c
->SG
[0].Ext
= 0; /* we are not chaining */
1781 static int sendcmd_withirq(__u8 cmd
,
1785 unsigned int use_unit_num
,
1786 unsigned int log_unit
, __u8 page_code
, int cmd_type
)
1788 ctlr_info_t
*h
= hba
[ctlr
];
1789 CommandList_struct
*c
;
1790 u64bit buff_dma_handle
;
1791 unsigned long flags
;
1793 DECLARE_COMPLETION_ONSTACK(wait
);
1795 if ((c
= cmd_alloc(h
, 0)) == NULL
)
1797 return_status
= fill_cmd(c
, cmd
, ctlr
, buff
, size
, use_unit_num
,
1798 log_unit
, page_code
, NULL
, cmd_type
);
1799 if (return_status
!= IO_OK
) {
1801 return return_status
;
1806 /* Put the request on the tail of the queue and send it */
1807 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
1811 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
1813 wait_for_completion(&wait
);
1815 if (c
->err_info
->CommandStatus
!= 0) { /* an error has occurred */
1816 switch (c
->err_info
->CommandStatus
) {
1817 case CMD_TARGET_STATUS
:
1818 printk(KERN_WARNING
"cciss: cmd %p has "
1819 " completed with errors\n", c
);
1820 if (c
->err_info
->ScsiStatus
) {
1821 printk(KERN_WARNING
"cciss: cmd %p "
1822 "has SCSI Status = %x\n",
1823 c
, c
->err_info
->ScsiStatus
);
1827 case CMD_DATA_UNDERRUN
:
1828 case CMD_DATA_OVERRUN
:
1829 /* expected for inquire and report lun commands */
1832 printk(KERN_WARNING
"cciss: Cmd %p is "
1833 "reported invalid\n", c
);
1834 return_status
= IO_ERROR
;
1836 case CMD_PROTOCOL_ERR
:
1837 printk(KERN_WARNING
"cciss: cmd %p has "
1838 "protocol error \n", c
);
1839 return_status
= IO_ERROR
;
1841 case CMD_HARDWARE_ERR
:
1842 printk(KERN_WARNING
"cciss: cmd %p had "
1843 " hardware error\n", c
);
1844 return_status
= IO_ERROR
;
1846 case CMD_CONNECTION_LOST
:
1847 printk(KERN_WARNING
"cciss: cmd %p had "
1848 "connection lost\n", c
);
1849 return_status
= IO_ERROR
;
1852 printk(KERN_WARNING
"cciss: cmd %p was "
1854 return_status
= IO_ERROR
;
1856 case CMD_ABORT_FAILED
:
1857 printk(KERN_WARNING
"cciss: cmd %p reports "
1858 "abort failed\n", c
);
1859 return_status
= IO_ERROR
;
1861 case CMD_UNSOLICITED_ABORT
:
1863 "cciss%d: unsolicited abort %p\n", ctlr
, c
);
1864 if (c
->retry_count
< MAX_CMD_RETRIES
) {
1866 "cciss%d: retrying %p\n", ctlr
, c
);
1868 /* erase the old error information */
1869 memset(c
->err_info
, 0,
1870 sizeof(ErrorInfo_struct
));
1871 return_status
= IO_OK
;
1872 INIT_COMPLETION(wait
);
1875 return_status
= IO_ERROR
;
1878 printk(KERN_WARNING
"cciss: cmd %p returned "
1879 "unknown status %x\n", c
,
1880 c
->err_info
->CommandStatus
);
1881 return_status
= IO_ERROR
;
1884 /* unlock the buffers from DMA */
1885 buff_dma_handle
.val32
.lower
= c
->SG
[0].Addr
.lower
;
1886 buff_dma_handle
.val32
.upper
= c
->SG
[0].Addr
.upper
;
1887 pci_unmap_single(h
->pdev
, (dma_addr_t
) buff_dma_handle
.val
,
1888 c
->SG
[0].Len
, PCI_DMA_BIDIRECTIONAL
);
1890 return return_status
;
1893 static void cciss_geometry_inquiry(int ctlr
, int logvol
,
1894 int withirq
, sector_t total_size
,
1895 unsigned int block_size
,
1896 InquiryData_struct
*inq_buff
,
1897 drive_info_struct
*drv
)
1902 memset(inq_buff
, 0, sizeof(InquiryData_struct
));
1904 return_code
= sendcmd_withirq(CISS_INQUIRY
, ctlr
,
1905 inq_buff
, sizeof(*inq_buff
), 1,
1906 logvol
, 0xC1, TYPE_CMD
);
1908 return_code
= sendcmd(CISS_INQUIRY
, ctlr
, inq_buff
,
1909 sizeof(*inq_buff
), 1, logvol
, 0xC1, NULL
,
1911 if (return_code
== IO_OK
) {
1912 if (inq_buff
->data_byte
[8] == 0xFF) {
1914 "cciss: reading geometry failed, volume "
1915 "does not support reading geometry\n");
1917 drv
->sectors
= 32; // Sectors per track
1918 drv
->raid_level
= RAID_UNKNOWN
;
1920 drv
->heads
= inq_buff
->data_byte
[6];
1921 drv
->sectors
= inq_buff
->data_byte
[7];
1922 drv
->cylinders
= (inq_buff
->data_byte
[4] & 0xff) << 8;
1923 drv
->cylinders
+= inq_buff
->data_byte
[5];
1924 drv
->raid_level
= inq_buff
->data_byte
[8];
1926 drv
->block_size
= block_size
;
1927 drv
->nr_blocks
= total_size
+ 1;
1928 t
= drv
->heads
* drv
->sectors
;
1930 sector_t real_size
= total_size
+ 1;
1931 unsigned long rem
= sector_div(real_size
, t
);
1934 drv
->cylinders
= real_size
;
1936 } else { /* Get geometry failed */
1937 printk(KERN_WARNING
"cciss: reading geometry failed\n");
1939 printk(KERN_INFO
" heads=%d, sectors=%d, cylinders=%d\n\n",
1940 drv
->heads
, drv
->sectors
, drv
->cylinders
);
1944 cciss_read_capacity(int ctlr
, int logvol
, int withirq
, sector_t
*total_size
,
1945 unsigned int *block_size
)
1947 ReadCapdata_struct
*buf
;
1949 buf
= kmalloc(sizeof(ReadCapdata_struct
), GFP_KERNEL
);
1951 printk(KERN_WARNING
"cciss: out of memory\n");
1954 memset(buf
, 0, sizeof(ReadCapdata_struct
));
1956 return_code
= sendcmd_withirq(CCISS_READ_CAPACITY
,
1957 ctlr
, buf
, sizeof(ReadCapdata_struct
),
1958 1, logvol
, 0, TYPE_CMD
);
1960 return_code
= sendcmd(CCISS_READ_CAPACITY
,
1961 ctlr
, buf
, sizeof(ReadCapdata_struct
),
1962 1, logvol
, 0, NULL
, TYPE_CMD
);
1963 if (return_code
== IO_OK
) {
1964 *total_size
= be32_to_cpu(*(__u32
*) buf
->total_size
);
1965 *block_size
= be32_to_cpu(*(__u32
*) buf
->block_size
);
1966 } else { /* read capacity command failed */
1967 printk(KERN_WARNING
"cciss: read capacity failed\n");
1969 *block_size
= BLOCK_SIZE
;
1971 if (*total_size
!= 0)
1972 printk(KERN_INFO
" blocks= %llu block_size= %d\n",
1973 (unsigned long long)*total_size
+1, *block_size
);
1979 cciss_read_capacity_16(int ctlr
, int logvol
, int withirq
, sector_t
*total_size
, unsigned int *block_size
)
1981 ReadCapdata_struct_16
*buf
;
1983 buf
= kmalloc(sizeof(ReadCapdata_struct_16
), GFP_KERNEL
);
1985 printk(KERN_WARNING
"cciss: out of memory\n");
1988 memset(buf
, 0, sizeof(ReadCapdata_struct_16
));
1990 return_code
= sendcmd_withirq(CCISS_READ_CAPACITY_16
,
1991 ctlr
, buf
, sizeof(ReadCapdata_struct_16
),
1992 1, logvol
, 0, TYPE_CMD
);
1995 return_code
= sendcmd(CCISS_READ_CAPACITY_16
,
1996 ctlr
, buf
, sizeof(ReadCapdata_struct_16
),
1997 1, logvol
, 0, NULL
, TYPE_CMD
);
1999 if (return_code
== IO_OK
) {
2000 *total_size
= be64_to_cpu(*(__u64
*) buf
->total_size
);
2001 *block_size
= be32_to_cpu(*(__u32
*) buf
->block_size
);
2002 } else { /* read capacity command failed */
2003 printk(KERN_WARNING
"cciss: read capacity failed\n");
2005 *block_size
= BLOCK_SIZE
;
2007 printk(KERN_INFO
" blocks= %llu block_size= %d\n",
2008 (unsigned long long)*total_size
+1, *block_size
);
2013 static int cciss_revalidate(struct gendisk
*disk
)
2015 ctlr_info_t
*h
= get_host(disk
);
2016 drive_info_struct
*drv
= get_drv(disk
);
2019 unsigned int block_size
;
2020 sector_t total_size
;
2021 InquiryData_struct
*inq_buff
= NULL
;
2023 for (logvol
= 0; logvol
< CISS_MAX_LUN
; logvol
++) {
2024 if (h
->drv
[logvol
].LunID
== drv
->LunID
) {
2033 inq_buff
= kmalloc(sizeof(InquiryData_struct
), GFP_KERNEL
);
2034 if (inq_buff
== NULL
) {
2035 printk(KERN_WARNING
"cciss: out of memory\n");
2038 if (h
->cciss_read
== CCISS_READ_10
) {
2039 cciss_read_capacity(h
->ctlr
, logvol
, 1,
2040 &total_size
, &block_size
);
2042 cciss_read_capacity_16(h
->ctlr
, logvol
, 1,
2043 &total_size
, &block_size
);
2045 cciss_geometry_inquiry(h
->ctlr
, logvol
, 1, total_size
, block_size
,
2048 blk_queue_hardsect_size(drv
->queue
, drv
->block_size
);
2049 set_capacity(disk
, drv
->nr_blocks
);
2056 * Wait polling for a command to complete.
2057 * The memory mapped FIFO is polled for the completion.
2058 * Used only at init time, interrupts from the HBA are disabled.
2060 static unsigned long pollcomplete(int ctlr
)
2065 /* Wait (up to 20 seconds) for a command to complete */
2067 for (i
= 20 * HZ
; i
> 0; i
--) {
2068 done
= hba
[ctlr
]->access
.command_completed(hba
[ctlr
]);
2069 if (done
== FIFO_EMPTY
)
2070 schedule_timeout_uninterruptible(1);
2074 /* Invalid address to tell caller we ran out of time */
2078 static int add_sendcmd_reject(__u8 cmd
, int ctlr
, unsigned long complete
)
2080 /* We get in here if sendcmd() is polling for completions
2081 and gets some command back that it wasn't expecting --
2082 something other than that which it just sent down.
2083 Ordinarily, that shouldn't happen, but it can happen when
2084 the scsi tape stuff gets into error handling mode, and
2085 starts using sendcmd() to try to abort commands and
2086 reset tape drives. In that case, sendcmd may pick up
2087 completions of commands that were sent to logical drives
2088 through the block i/o system, or cciss ioctls completing, etc.
2089 In that case, we need to save those completions for later
2090 processing by the interrupt handler.
2093 #ifdef CONFIG_CISS_SCSI_TAPE
2094 struct sendcmd_reject_list
*srl
= &hba
[ctlr
]->scsi_rejects
;
2096 /* If it's not the scsi tape stuff doing error handling, (abort */
2097 /* or reset) then we don't expect anything weird. */
2098 if (cmd
!= CCISS_RESET_MSG
&& cmd
!= CCISS_ABORT_MSG
) {
2100 printk(KERN_WARNING
"cciss cciss%d: SendCmd "
2101 "Invalid command list address returned! (%lx)\n",
2103 /* not much we can do. */
2104 #ifdef CONFIG_CISS_SCSI_TAPE
2108 /* We've sent down an abort or reset, but something else
2110 if (srl
->ncompletions
>= (hba
[ctlr
]->nr_cmds
+ 2)) {
2111 /* Uh oh. No room to save it for later... */
2112 printk(KERN_WARNING
"cciss%d: Sendcmd: Invalid command addr, "
2113 "reject list overflow, command lost!\n", ctlr
);
2116 /* Save it for later */
2117 srl
->complete
[srl
->ncompletions
] = complete
;
2118 srl
->ncompletions
++;
2124 * Send a command to the controller, and wait for it to complete.
2125 * Only used at init time.
2127 static int sendcmd(__u8 cmd
, int ctlr
, void *buff
, size_t size
, unsigned int use_unit_num
, /* 0: address the controller,
2128 1: address logical volume log_unit,
2129 2: periph device address is scsi3addr */
2130 unsigned int log_unit
,
2131 __u8 page_code
, unsigned char *scsi3addr
, int cmd_type
)
2133 CommandList_struct
*c
;
2135 unsigned long complete
;
2136 ctlr_info_t
*info_p
= hba
[ctlr
];
2137 u64bit buff_dma_handle
;
2138 int status
, done
= 0;
2140 if ((c
= cmd_alloc(info_p
, 1)) == NULL
) {
2141 printk(KERN_WARNING
"cciss: unable to get memory");
2144 status
= fill_cmd(c
, cmd
, ctlr
, buff
, size
, use_unit_num
,
2145 log_unit
, page_code
, scsi3addr
, cmd_type
);
2146 if (status
!= IO_OK
) {
2147 cmd_free(info_p
, c
, 1);
2155 printk(KERN_DEBUG
"cciss: turning intr off\n");
2156 #endif /* CCISS_DEBUG */
2157 info_p
->access
.set_intr_mask(info_p
, CCISS_INTR_OFF
);
2159 /* Make sure there is room in the command FIFO */
2160 /* Actually it should be completely empty at this time */
2161 /* unless we are in here doing error handling for the scsi */
2162 /* tape side of the driver. */
2163 for (i
= 200000; i
> 0; i
--) {
2164 /* if fifo isn't full go */
2165 if (!(info_p
->access
.fifo_full(info_p
))) {
2170 printk(KERN_WARNING
"cciss cciss%d: SendCmd FIFO full,"
2171 " waiting!\n", ctlr
);
2176 info_p
->access
.submit_command(info_p
, c
);
2179 complete
= pollcomplete(ctlr
);
2182 printk(KERN_DEBUG
"cciss: command completed\n");
2183 #endif /* CCISS_DEBUG */
2185 if (complete
== 1) {
2187 "cciss cciss%d: SendCmd Timeout out, "
2188 "No command list address returned!\n", ctlr
);
2194 /* This will need to change for direct lookup completions */
2195 if ((complete
& CISS_ERROR_BIT
)
2196 && (complete
& ~CISS_ERROR_BIT
) == c
->busaddr
) {
2197 /* if data overrun or underun on Report command
2200 if (((c
->Request
.CDB
[0] == CISS_REPORT_LOG
) ||
2201 (c
->Request
.CDB
[0] == CISS_REPORT_PHYS
) ||
2202 (c
->Request
.CDB
[0] == CISS_INQUIRY
)) &&
2203 ((c
->err_info
->CommandStatus
==
2204 CMD_DATA_OVERRUN
) ||
2205 (c
->err_info
->CommandStatus
== CMD_DATA_UNDERRUN
)
2207 complete
= c
->busaddr
;
2209 if (c
->err_info
->CommandStatus
==
2210 CMD_UNSOLICITED_ABORT
) {
2211 printk(KERN_WARNING
"cciss%d: "
2212 "unsolicited abort %p\n",
2214 if (c
->retry_count
< MAX_CMD_RETRIES
) {
2216 "cciss%d: retrying %p\n",
2219 /* erase the old error */
2221 memset(c
->err_info
, 0,
2223 (ErrorInfo_struct
));
2227 "cciss%d: retried %p too "
2228 "many times\n", ctlr
, c
);
2232 } else if (c
->err_info
->CommandStatus
==
2235 "cciss%d: command could not be aborted.\n",
2240 printk(KERN_WARNING
"ciss ciss%d: sendcmd"
2241 " Error %x \n", ctlr
,
2242 c
->err_info
->CommandStatus
);
2243 printk(KERN_WARNING
"ciss ciss%d: sendcmd"
2245 " size %x\n num %x value %x\n",
2247 c
->err_info
->MoreErrInfo
.Invalid_Cmd
.
2249 c
->err_info
->MoreErrInfo
.Invalid_Cmd
.
2251 c
->err_info
->MoreErrInfo
.Invalid_Cmd
.
2257 /* This will need changing for direct lookup completions */
2258 if (complete
!= c
->busaddr
) {
2259 if (add_sendcmd_reject(cmd
, ctlr
, complete
) != 0) {
2260 BUG(); /* we are pretty much hosed if we get here. */
2268 /* unlock the data buffer from DMA */
2269 buff_dma_handle
.val32
.lower
= c
->SG
[0].Addr
.lower
;
2270 buff_dma_handle
.val32
.upper
= c
->SG
[0].Addr
.upper
;
2271 pci_unmap_single(info_p
->pdev
, (dma_addr_t
) buff_dma_handle
.val
,
2272 c
->SG
[0].Len
, PCI_DMA_BIDIRECTIONAL
);
2273 #ifdef CONFIG_CISS_SCSI_TAPE
2274 /* if we saved some commands for later, process them now. */
2275 if (info_p
->scsi_rejects
.ncompletions
> 0)
2276 do_cciss_intr(0, info_p
);
2278 cmd_free(info_p
, c
, 1);
2283 * Map (physical) PCI mem into (virtual) kernel space
2285 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
2287 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
2288 ulong page_offs
= ((ulong
) base
) - page_base
;
2289 void __iomem
*page_remapped
= ioremap(page_base
, page_offs
+ size
);
2291 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
2295 * Takes jobs of the Q and sends them to the hardware, then puts it on
2296 * the Q to wait for completion.
2298 static void start_io(ctlr_info_t
*h
)
2300 CommandList_struct
*c
;
2302 while ((c
= h
->reqQ
) != NULL
) {
2303 /* can't do anything if fifo is full */
2304 if ((h
->access
.fifo_full(h
))) {
2305 printk(KERN_WARNING
"cciss: fifo full\n");
2309 /* Get the first entry from the Request Q */
2310 removeQ(&(h
->reqQ
), c
);
2313 /* Tell the controller execute command */
2314 h
->access
.submit_command(h
, c
);
2316 /* Put job onto the completed Q */
2317 addQ(&(h
->cmpQ
), c
);
2321 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2322 /* Zeros out the error record and then resends the command back */
2323 /* to the controller */
2324 static inline void resend_cciss_cmd(ctlr_info_t
*h
, CommandList_struct
*c
)
2326 /* erase the old error information */
2327 memset(c
->err_info
, 0, sizeof(ErrorInfo_struct
));
2329 /* add it to software queue and then send it to the controller */
2330 addQ(&(h
->reqQ
), c
);
2332 if (h
->Qdepth
> h
->maxQsinceinit
)
2333 h
->maxQsinceinit
= h
->Qdepth
;
2338 /* checks the status of the job and calls complete buffers to mark all
2339 * buffers for the completed job. Note that this function does not need
2340 * to hold the hba/queue lock.
2342 static inline void complete_command(ctlr_info_t
*h
, CommandList_struct
*cmd
,
2351 if (cmd
->err_info
->CommandStatus
!= 0) { /* an error has occurred */
2352 switch (cmd
->err_info
->CommandStatus
) {
2353 unsigned char sense_key
;
2354 case CMD_TARGET_STATUS
:
2357 if (cmd
->err_info
->ScsiStatus
== 0x02) {
2358 printk(KERN_WARNING
"cciss: cmd %p "
2359 "has CHECK CONDITION "
2360 " byte 2 = 0x%x\n", cmd
,
2361 cmd
->err_info
->SenseInfo
[2]
2363 /* check the sense key */
2364 sense_key
= 0xf & cmd
->err_info
->SenseInfo
[2];
2365 /* no status or recovered error */
2366 if ((sense_key
== 0x0) || (sense_key
== 0x1)) {
2370 printk(KERN_WARNING
"cciss: cmd %p "
2371 "has SCSI Status 0x%x\n",
2372 cmd
, cmd
->err_info
->ScsiStatus
);
2375 case CMD_DATA_UNDERRUN
:
2376 printk(KERN_WARNING
"cciss: cmd %p has"
2377 " completed with data underrun "
2380 case CMD_DATA_OVERRUN
:
2381 printk(KERN_WARNING
"cciss: cmd %p has"
2382 " completed with data overrun "
2386 printk(KERN_WARNING
"cciss: cmd %p is "
2387 "reported invalid\n", cmd
);
2390 case CMD_PROTOCOL_ERR
:
2391 printk(KERN_WARNING
"cciss: cmd %p has "
2392 "protocol error \n", cmd
);
2395 case CMD_HARDWARE_ERR
:
2396 printk(KERN_WARNING
"cciss: cmd %p had "
2397 " hardware error\n", cmd
);
2400 case CMD_CONNECTION_LOST
:
2401 printk(KERN_WARNING
"cciss: cmd %p had "
2402 "connection lost\n", cmd
);
2406 printk(KERN_WARNING
"cciss: cmd %p was "
2410 case CMD_ABORT_FAILED
:
2411 printk(KERN_WARNING
"cciss: cmd %p reports "
2412 "abort failed\n", cmd
);
2415 case CMD_UNSOLICITED_ABORT
:
2416 printk(KERN_WARNING
"cciss%d: unsolicited "
2417 "abort %p\n", h
->ctlr
, cmd
);
2418 if (cmd
->retry_count
< MAX_CMD_RETRIES
) {
2421 "cciss%d: retrying %p\n", h
->ctlr
, cmd
);
2425 "cciss%d: %p retried too "
2426 "many times\n", h
->ctlr
, cmd
);
2430 printk(KERN_WARNING
"cciss: cmd %p timedout\n", cmd
);
2434 printk(KERN_WARNING
"cciss: cmd %p returned "
2435 "unknown status %x\n", cmd
,
2436 cmd
->err_info
->CommandStatus
);
2440 /* We need to return this command */
2442 resend_cciss_cmd(h
, cmd
);
2446 cmd
->rq
->completion_data
= cmd
;
2447 cmd
->rq
->errors
= status
;
2448 blk_add_trace_rq(cmd
->rq
->q
, cmd
->rq
, BLK_TA_COMPLETE
);
2449 blk_complete_request(cmd
->rq
);
2453 * Get a request and submit it to the controller.
2455 static void do_cciss_request(request_queue_t
*q
)
2457 ctlr_info_t
*h
= q
->queuedata
;
2458 CommandList_struct
*c
;
2461 struct request
*creq
;
2463 struct scatterlist tmp_sg
[MAXSGENTRIES
];
2464 drive_info_struct
*drv
;
2467 /* We call start_io here in case there is a command waiting on the
2468 * queue that has not been sent.
2470 if (blk_queue_plugged(q
))
2474 creq
= elv_next_request(q
);
2478 BUG_ON(creq
->nr_phys_segments
> MAXSGENTRIES
);
2480 if ((c
= cmd_alloc(h
, 1)) == NULL
)
2483 blkdev_dequeue_request(creq
);
2485 spin_unlock_irq(q
->queue_lock
);
2487 c
->cmd_type
= CMD_RWREQ
;
2490 /* fill in the request */
2491 drv
= creq
->rq_disk
->private_data
;
2492 c
->Header
.ReplyQueue
= 0; // unused in simple mode
2493 /* got command from pool, so use the command block index instead */
2494 /* for direct lookups. */
2495 /* The first 2 bits are reserved for controller error reporting. */
2496 c
->Header
.Tag
.lower
= (c
->cmdindex
<< 3);
2497 c
->Header
.Tag
.lower
|= 0x04; /* flag for direct lookup. */
2498 c
->Header
.LUN
.LogDev
.VolId
= drv
->LunID
;
2499 c
->Header
.LUN
.LogDev
.Mode
= 1;
2500 c
->Request
.CDBLen
= 10; // 12 byte commands not in FW yet;
2501 c
->Request
.Type
.Type
= TYPE_CMD
; // It is a command.
2502 c
->Request
.Type
.Attribute
= ATTR_SIMPLE
;
2503 c
->Request
.Type
.Direction
=
2504 (rq_data_dir(creq
) == READ
) ? XFER_READ
: XFER_WRITE
;
2505 c
->Request
.Timeout
= 0; // Don't time out
2507 (rq_data_dir(creq
) == READ
) ? h
->cciss_read
: h
->cciss_write
;
2508 start_blk
= creq
->sector
;
2510 printk(KERN_DEBUG
"ciss: sector =%d nr_sectors=%d\n", (int)creq
->sector
,
2511 (int)creq
->nr_sectors
);
2512 #endif /* CCISS_DEBUG */
2514 seg
= blk_rq_map_sg(q
, creq
, tmp_sg
);
2516 /* get the DMA records for the setup */
2517 if (c
->Request
.Type
.Direction
== XFER_READ
)
2518 dir
= PCI_DMA_FROMDEVICE
;
2520 dir
= PCI_DMA_TODEVICE
;
2522 for (i
= 0; i
< seg
; i
++) {
2523 c
->SG
[i
].Len
= tmp_sg
[i
].length
;
2524 temp64
.val
= (__u64
) pci_map_page(h
->pdev
, tmp_sg
[i
].page
,
2526 tmp_sg
[i
].length
, dir
);
2527 c
->SG
[i
].Addr
.lower
= temp64
.val32
.lower
;
2528 c
->SG
[i
].Addr
.upper
= temp64
.val32
.upper
;
2529 c
->SG
[i
].Ext
= 0; // we are not chaining
2531 /* track how many SG entries we are using */
2536 printk(KERN_DEBUG
"cciss: Submitting %d sectors in %d segments\n",
2537 creq
->nr_sectors
, seg
);
2538 #endif /* CCISS_DEBUG */
2540 c
->Header
.SGList
= c
->Header
.SGTotal
= seg
;
2541 if(h
->cciss_read
== CCISS_READ_10
) {
2542 c
->Request
.CDB
[1] = 0;
2543 c
->Request
.CDB
[2] = (start_blk
>> 24) & 0xff; //MSB
2544 c
->Request
.CDB
[3] = (start_blk
>> 16) & 0xff;
2545 c
->Request
.CDB
[4] = (start_blk
>> 8) & 0xff;
2546 c
->Request
.CDB
[5] = start_blk
& 0xff;
2547 c
->Request
.CDB
[6] = 0; // (sect >> 24) & 0xff; MSB
2548 c
->Request
.CDB
[7] = (creq
->nr_sectors
>> 8) & 0xff;
2549 c
->Request
.CDB
[8] = creq
->nr_sectors
& 0xff;
2550 c
->Request
.CDB
[9] = c
->Request
.CDB
[11] = c
->Request
.CDB
[12] = 0;
2552 c
->Request
.CDBLen
= 16;
2553 c
->Request
.CDB
[1]= 0;
2554 c
->Request
.CDB
[2]= (start_blk
>> 56) & 0xff; //MSB
2555 c
->Request
.CDB
[3]= (start_blk
>> 48) & 0xff;
2556 c
->Request
.CDB
[4]= (start_blk
>> 40) & 0xff;
2557 c
->Request
.CDB
[5]= (start_blk
>> 32) & 0xff;
2558 c
->Request
.CDB
[6]= (start_blk
>> 24) & 0xff;
2559 c
->Request
.CDB
[7]= (start_blk
>> 16) & 0xff;
2560 c
->Request
.CDB
[8]= (start_blk
>> 8) & 0xff;
2561 c
->Request
.CDB
[9]= start_blk
& 0xff;
2562 c
->Request
.CDB
[10]= (creq
->nr_sectors
>> 24) & 0xff;
2563 c
->Request
.CDB
[11]= (creq
->nr_sectors
>> 16) & 0xff;
2564 c
->Request
.CDB
[12]= (creq
->nr_sectors
>> 8) & 0xff;
2565 c
->Request
.CDB
[13]= creq
->nr_sectors
& 0xff;
2566 c
->Request
.CDB
[14] = c
->Request
.CDB
[15] = 0;
2569 spin_lock_irq(q
->queue_lock
);
2571 addQ(&(h
->reqQ
), c
);
2573 if (h
->Qdepth
> h
->maxQsinceinit
)
2574 h
->maxQsinceinit
= h
->Qdepth
;
2580 /* We will already have the driver lock here so not need
2586 static inline unsigned long get_next_completion(ctlr_info_t
*h
)
2588 #ifdef CONFIG_CISS_SCSI_TAPE
2589 /* Any rejects from sendcmd() lying around? Process them first */
2590 if (h
->scsi_rejects
.ncompletions
== 0)
2591 return h
->access
.command_completed(h
);
2593 struct sendcmd_reject_list
*srl
;
2595 srl
= &h
->scsi_rejects
;
2596 n
= --srl
->ncompletions
;
2597 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2599 return srl
->complete
[n
];
2602 return h
->access
.command_completed(h
);
2606 static inline int interrupt_pending(ctlr_info_t
*h
)
2608 #ifdef CONFIG_CISS_SCSI_TAPE
2609 return (h
->access
.intr_pending(h
)
2610 || (h
->scsi_rejects
.ncompletions
> 0));
2612 return h
->access
.intr_pending(h
);
2616 static inline long interrupt_not_for_us(ctlr_info_t
*h
)
2618 #ifdef CONFIG_CISS_SCSI_TAPE
2619 return (((h
->access
.intr_pending(h
) == 0) ||
2620 (h
->interrupts_enabled
== 0))
2621 && (h
->scsi_rejects
.ncompletions
== 0));
2623 return (((h
->access
.intr_pending(h
) == 0) ||
2624 (h
->interrupts_enabled
== 0)));
2628 static irqreturn_t
do_cciss_intr(int irq
, void *dev_id
)
2630 ctlr_info_t
*h
= dev_id
;
2631 CommandList_struct
*c
;
2632 unsigned long flags
;
2635 if (interrupt_not_for_us(h
))
2638 * If there are completed commands in the completion queue,
2639 * we had better do something about it.
2641 spin_lock_irqsave(CCISS_LOCK(h
->ctlr
), flags
);
2642 while (interrupt_pending(h
)) {
2643 while ((a
= get_next_completion(h
)) != FIFO_EMPTY
) {
2647 if (a2
>= h
->nr_cmds
) {
2649 "cciss: controller cciss%d failed, stopping.\n",
2651 fail_all_cmds(h
->ctlr
);
2655 c
= h
->cmd_pool
+ a2
;
2660 if ((c
= h
->cmpQ
) == NULL
) {
2662 "cciss: Completion of %08x ignored\n",
2666 while (c
->busaddr
!= a
) {
2673 * If we've found the command, take it off the
2674 * completion Q and free it
2676 if (c
->busaddr
== a
) {
2677 removeQ(&h
->cmpQ
, c
);
2678 if (c
->cmd_type
== CMD_RWREQ
) {
2679 complete_command(h
, c
, 0);
2680 } else if (c
->cmd_type
== CMD_IOCTL_PEND
) {
2681 complete(c
->waiting
);
2683 # ifdef CONFIG_CISS_SCSI_TAPE
2684 else if (c
->cmd_type
== CMD_SCSI
)
2685 complete_scsi_command(c
, 0, a1
);
2692 spin_unlock_irqrestore(CCISS_LOCK(h
->ctlr
), flags
);
2697 * We cannot read the structure directly, for portability we must use
2699 * This is for debug only.
2702 static void print_cfg_table(CfgTable_struct
*tb
)
2707 printk("Controller Configuration information\n");
2708 printk("------------------------------------\n");
2709 for (i
= 0; i
< 4; i
++)
2710 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
2711 temp_name
[4] = '\0';
2712 printk(" Signature = %s\n", temp_name
);
2713 printk(" Spec Number = %d\n", readl(&(tb
->SpecValence
)));
2714 printk(" Transport methods supported = 0x%x\n",
2715 readl(&(tb
->TransportSupport
)));
2716 printk(" Transport methods active = 0x%x\n",
2717 readl(&(tb
->TransportActive
)));
2718 printk(" Requested transport Method = 0x%x\n",
2719 readl(&(tb
->HostWrite
.TransportRequest
)));
2720 printk(" Coalesce Interrupt Delay = 0x%x\n",
2721 readl(&(tb
->HostWrite
.CoalIntDelay
)));
2722 printk(" Coalesce Interrupt Count = 0x%x\n",
2723 readl(&(tb
->HostWrite
.CoalIntCount
)));
2724 printk(" Max outstanding commands = 0x%d\n",
2725 readl(&(tb
->CmdsOutMax
)));
2726 printk(" Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
2727 for (i
= 0; i
< 16; i
++)
2728 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
2729 temp_name
[16] = '\0';
2730 printk(" Server Name = %s\n", temp_name
);
2731 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb
->HeartBeat
)));
2733 #endif /* CCISS_DEBUG */
2735 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
2737 int i
, offset
, mem_type
, bar_type
;
2738 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
2741 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
2742 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
2743 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
2746 mem_type
= pci_resource_flags(pdev
, i
) &
2747 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
2749 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
2750 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
2751 offset
+= 4; /* 32 bit */
2753 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
2756 default: /* reserved in PCI 2.2 */
2758 "Base address is invalid\n");
2763 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
2769 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2770 * controllers that are capable. If not, we use IO-APIC mode.
2773 static void __devinit
cciss_interrupt_mode(ctlr_info_t
*c
,
2774 struct pci_dev
*pdev
, __u32 board_id
)
2776 #ifdef CONFIG_PCI_MSI
2778 struct msix_entry cciss_msix_entries
[4] = { {0, 0}, {0, 1},
2782 /* Some boards advertise MSI but don't really support it */
2783 if ((board_id
== 0x40700E11) ||
2784 (board_id
== 0x40800E11) ||
2785 (board_id
== 0x40820E11) || (board_id
== 0x40830E11))
2786 goto default_int_mode
;
2788 if (pci_find_capability(pdev
, PCI_CAP_ID_MSIX
)) {
2789 err
= pci_enable_msix(pdev
, cciss_msix_entries
, 4);
2791 c
->intr
[0] = cciss_msix_entries
[0].vector
;
2792 c
->intr
[1] = cciss_msix_entries
[1].vector
;
2793 c
->intr
[2] = cciss_msix_entries
[2].vector
;
2794 c
->intr
[3] = cciss_msix_entries
[3].vector
;
2799 printk(KERN_WARNING
"cciss: only %d MSI-X vectors "
2800 "available\n", err
);
2801 goto default_int_mode
;
2803 printk(KERN_WARNING
"cciss: MSI-X init failed %d\n",
2805 goto default_int_mode
;
2808 if (pci_find_capability(pdev
, PCI_CAP_ID_MSI
)) {
2809 if (!pci_enable_msi(pdev
)) {
2812 printk(KERN_WARNING
"cciss: MSI init failed\n");
2816 #endif /* CONFIG_PCI_MSI */
2817 /* if we get here we're going to use the default interrupt mode */
2818 c
->intr
[SIMPLE_MODE_INT
] = pdev
->irq
;
2822 static int cciss_pci_init(ctlr_info_t
*c
, struct pci_dev
*pdev
)
2824 ushort subsystem_vendor_id
, subsystem_device_id
, command
;
2825 __u32 board_id
, scratchpad
= 0;
2827 __u32 cfg_base_addr
;
2828 __u64 cfg_base_addr_index
;
2831 /* check to see if controller has been disabled */
2832 /* BEFORE trying to enable it */
2833 (void)pci_read_config_word(pdev
, PCI_COMMAND
, &command
);
2834 if (!(command
& 0x02)) {
2836 "cciss: controller appears to be disabled\n");
2840 err
= pci_enable_device(pdev
);
2842 printk(KERN_ERR
"cciss: Unable to Enable PCI device\n");
2846 err
= pci_request_regions(pdev
, "cciss");
2848 printk(KERN_ERR
"cciss: Cannot obtain PCI resources, "
2853 subsystem_vendor_id
= pdev
->subsystem_vendor
;
2854 subsystem_device_id
= pdev
->subsystem_device
;
2855 board_id
= (((__u32
) (subsystem_device_id
<< 16) & 0xffff0000) |
2856 subsystem_vendor_id
);
2859 printk("command = %x\n", command
);
2860 printk("irq = %x\n", pdev
->irq
);
2861 printk("board_id = %x\n", board_id
);
2862 #endif /* CCISS_DEBUG */
2864 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2865 * else we use the IO-APIC interrupt assigned to us by system ROM.
2867 cciss_interrupt_mode(c
, pdev
, board_id
);
2870 * Memory base addr is first addr , the second points to the config
2874 c
->paddr
= pci_resource_start(pdev
, 0); /* addressing mode bits already removed */
2876 printk("address 0 = %x\n", c
->paddr
);
2877 #endif /* CCISS_DEBUG */
2878 c
->vaddr
= remap_pci_mem(c
->paddr
, 0x250);
2880 /* Wait for the board to become ready. (PCI hotplug needs this.)
2881 * We poll for up to 120 secs, once per 100ms. */
2882 for (i
= 0; i
< 1200; i
++) {
2883 scratchpad
= readl(c
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
2884 if (scratchpad
== CCISS_FIRMWARE_READY
)
2886 set_current_state(TASK_INTERRUPTIBLE
);
2887 schedule_timeout(HZ
/ 10); /* wait 100ms */
2889 if (scratchpad
!= CCISS_FIRMWARE_READY
) {
2890 printk(KERN_WARNING
"cciss: Board not ready. Timed out.\n");
2892 goto err_out_free_res
;
2895 /* get the address index number */
2896 cfg_base_addr
= readl(c
->vaddr
+ SA5_CTCFG_OFFSET
);
2897 cfg_base_addr
&= (__u32
) 0x0000ffff;
2899 printk("cfg base address = %x\n", cfg_base_addr
);
2900 #endif /* CCISS_DEBUG */
2901 cfg_base_addr_index
= find_PCI_BAR_index(pdev
, cfg_base_addr
);
2903 printk("cfg base address index = %x\n", cfg_base_addr_index
);
2904 #endif /* CCISS_DEBUG */
2905 if (cfg_base_addr_index
== -1) {
2906 printk(KERN_WARNING
"cciss: Cannot find cfg_base_addr_index\n");
2908 goto err_out_free_res
;
2911 cfg_offset
= readl(c
->vaddr
+ SA5_CTMEM_OFFSET
);
2913 printk("cfg offset = %x\n", cfg_offset
);
2914 #endif /* CCISS_DEBUG */
2915 c
->cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
2916 cfg_base_addr_index
) +
2917 cfg_offset
, sizeof(CfgTable_struct
));
2918 c
->board_id
= board_id
;
2921 print_cfg_table(c
->cfgtable
);
2922 #endif /* CCISS_DEBUG */
2924 for (i
= 0; i
< ARRAY_SIZE(products
); i
++) {
2925 if (board_id
== products
[i
].board_id
) {
2926 c
->product_name
= products
[i
].product_name
;
2927 c
->access
= *(products
[i
].access
);
2928 c
->nr_cmds
= products
[i
].nr_cmds
;
2932 if ((readb(&c
->cfgtable
->Signature
[0]) != 'C') ||
2933 (readb(&c
->cfgtable
->Signature
[1]) != 'I') ||
2934 (readb(&c
->cfgtable
->Signature
[2]) != 'S') ||
2935 (readb(&c
->cfgtable
->Signature
[3]) != 'S')) {
2936 printk("Does not appear to be a valid CISS config table\n");
2938 goto err_out_free_res
;
2940 /* We didn't find the controller in our list. We know the
2941 * signature is valid. If it's an HP device let's try to
2942 * bind to the device and fire it up. Otherwise we bail.
2944 if (i
== ARRAY_SIZE(products
)) {
2945 if (subsystem_vendor_id
== PCI_VENDOR_ID_HP
) {
2946 c
->product_name
= products
[i
-1].product_name
;
2947 c
->access
= *(products
[i
-1].access
);
2948 c
->nr_cmds
= products
[i
-1].nr_cmds
;
2949 printk(KERN_WARNING
"cciss: This is an unknown "
2950 "Smart Array controller.\n"
2951 "cciss: Please update to the latest driver "
2952 "available from www.hp.com.\n");
2954 printk(KERN_WARNING
"cciss: Sorry, I don't know how"
2955 " to access the Smart Array controller %08lx\n"
2956 , (unsigned long)board_id
);
2958 goto err_out_free_res
;
2963 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2965 prefetch
= readl(&(c
->cfgtable
->SCSI_Prefetch
));
2967 writel(prefetch
, &(c
->cfgtable
->SCSI_Prefetch
));
2971 /* Disabling DMA prefetch for the P600
2972 * An ASIC bug may result in a prefetch beyond
2975 if(board_id
== 0x3225103C) {
2977 dma_prefetch
= readl(c
->vaddr
+ I2O_DMA1_CFG
);
2978 dma_prefetch
|= 0x8000;
2979 writel(dma_prefetch
, c
->vaddr
+ I2O_DMA1_CFG
);
2983 printk("Trying to put board into Simple mode\n");
2984 #endif /* CCISS_DEBUG */
2985 c
->max_commands
= readl(&(c
->cfgtable
->CmdsOutMax
));
2986 /* Update the field, and then ring the doorbell */
2987 writel(CFGTBL_Trans_Simple
, &(c
->cfgtable
->HostWrite
.TransportRequest
));
2988 writel(CFGTBL_ChangeReq
, c
->vaddr
+ SA5_DOORBELL
);
2990 /* under certain very rare conditions, this can take awhile.
2991 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2992 * as we enter this code.) */
2993 for (i
= 0; i
< MAX_CONFIG_WAIT
; i
++) {
2994 if (!(readl(c
->vaddr
+ SA5_DOORBELL
) & CFGTBL_ChangeReq
))
2996 /* delay and try again */
2997 set_current_state(TASK_INTERRUPTIBLE
);
2998 schedule_timeout(10);
3002 printk(KERN_DEBUG
"I counter got to %d %x\n", i
,
3003 readl(c
->vaddr
+ SA5_DOORBELL
));
3004 #endif /* CCISS_DEBUG */
3006 print_cfg_table(c
->cfgtable
);
3007 #endif /* CCISS_DEBUG */
3009 if (!(readl(&(c
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
)) {
3010 printk(KERN_WARNING
"cciss: unable to get board into"
3013 goto err_out_free_res
;
3019 * Deliberately omit pci_disable_device(): it does something nasty to
3020 * Smart Array controllers that pci_enable_device does not undo
3022 pci_release_regions(pdev
);
3027 * Gets information about the local volumes attached to the controller.
3029 static void cciss_getgeometry(int cntl_num
)
3031 ReportLunData_struct
*ld_buff
;
3032 InquiryData_struct
*inq_buff
;
3038 sector_t total_size
;
3040 ld_buff
= kzalloc(sizeof(ReportLunData_struct
), GFP_KERNEL
);
3041 if (ld_buff
== NULL
) {
3042 printk(KERN_ERR
"cciss: out of memory\n");
3045 inq_buff
= kmalloc(sizeof(InquiryData_struct
), GFP_KERNEL
);
3046 if (inq_buff
== NULL
) {
3047 printk(KERN_ERR
"cciss: out of memory\n");
3051 /* Get the firmware version */
3052 return_code
= sendcmd(CISS_INQUIRY
, cntl_num
, inq_buff
,
3053 sizeof(InquiryData_struct
), 0, 0, 0, NULL
,
3055 if (return_code
== IO_OK
) {
3056 hba
[cntl_num
]->firm_ver
[0] = inq_buff
->data_byte
[32];
3057 hba
[cntl_num
]->firm_ver
[1] = inq_buff
->data_byte
[33];
3058 hba
[cntl_num
]->firm_ver
[2] = inq_buff
->data_byte
[34];
3059 hba
[cntl_num
]->firm_ver
[3] = inq_buff
->data_byte
[35];
3060 } else { /* send command failed */
3062 printk(KERN_WARNING
"cciss: unable to determine firmware"
3063 " version of controller\n");
3065 /* Get the number of logical volumes */
3066 return_code
= sendcmd(CISS_REPORT_LOG
, cntl_num
, ld_buff
,
3067 sizeof(ReportLunData_struct
), 0, 0, 0, NULL
,
3070 if (return_code
== IO_OK
) {
3072 printk("LUN Data\n--------------------------\n");
3073 #endif /* CCISS_DEBUG */
3076 (0xff & (unsigned int)(ld_buff
->LUNListLength
[0])) << 24;
3078 (0xff & (unsigned int)(ld_buff
->LUNListLength
[1])) << 16;
3080 (0xff & (unsigned int)(ld_buff
->LUNListLength
[2])) << 8;
3081 listlength
|= 0xff & (unsigned int)(ld_buff
->LUNListLength
[3]);
3082 } else { /* reading number of logical volumes failed */
3084 printk(KERN_WARNING
"cciss: report logical volume"
3085 " command failed\n");
3088 hba
[cntl_num
]->num_luns
= listlength
/ 8; // 8 bytes pre entry
3089 if (hba
[cntl_num
]->num_luns
> CISS_MAX_LUN
) {
3091 "ciss: only %d number of logical volumes supported\n",
3093 hba
[cntl_num
]->num_luns
= CISS_MAX_LUN
;
3096 printk(KERN_DEBUG
"Length = %x %x %x %x = %d\n",
3097 ld_buff
->LUNListLength
[0], ld_buff
->LUNListLength
[1],
3098 ld_buff
->LUNListLength
[2], ld_buff
->LUNListLength
[3],
3099 hba
[cntl_num
]->num_luns
);
3100 #endif /* CCISS_DEBUG */
3102 hba
[cntl_num
]->highest_lun
= hba
[cntl_num
]->num_luns
- 1;
3103 for (i
= 0; i
< CISS_MAX_LUN
; i
++) {
3104 if (i
< hba
[cntl_num
]->num_luns
) {
3105 lunid
= (0xff & (unsigned int)(ld_buff
->LUN
[i
][3]))
3107 lunid
|= (0xff & (unsigned int)(ld_buff
->LUN
[i
][2]))
3109 lunid
|= (0xff & (unsigned int)(ld_buff
->LUN
[i
][1]))
3111 lunid
|= 0xff & (unsigned int)(ld_buff
->LUN
[i
][0]);
3113 hba
[cntl_num
]->drv
[i
].LunID
= lunid
;
3116 printk(KERN_DEBUG
"LUN[%d]: %x %x %x %x = %x\n", i
,
3117 ld_buff
->LUN
[i
][0], ld_buff
->LUN
[i
][1],
3118 ld_buff
->LUN
[i
][2], ld_buff
->LUN
[i
][3],
3119 hba
[cntl_num
]->drv
[i
].LunID
);
3120 #endif /* CCISS_DEBUG */
3122 /* testing to see if 16-byte CDBs are already being used */
3123 if(hba
[cntl_num
]->cciss_read
== CCISS_READ_16
) {
3124 cciss_read_capacity_16(cntl_num
, i
, 0,
3125 &total_size
, &block_size
);
3128 cciss_read_capacity(cntl_num
, i
, 0, &total_size
, &block_size
);
3130 /* If read_capacity returns all F's the logical is >2TB */
3131 /* so we switch to 16-byte CDBs for all read/write ops */
3132 if(total_size
== 0xFFFFFFFFULL
) {
3133 cciss_read_capacity_16(cntl_num
, i
, 0,
3134 &total_size
, &block_size
);
3135 hba
[cntl_num
]->cciss_read
= CCISS_READ_16
;
3136 hba
[cntl_num
]->cciss_write
= CCISS_WRITE_16
;
3138 hba
[cntl_num
]->cciss_read
= CCISS_READ_10
;
3139 hba
[cntl_num
]->cciss_write
= CCISS_WRITE_10
;
3142 cciss_geometry_inquiry(cntl_num
, i
, 0, total_size
,
3143 block_size
, inq_buff
,
3144 &hba
[cntl_num
]->drv
[i
]);
3146 /* initialize raid_level to indicate a free space */
3147 hba
[cntl_num
]->drv
[i
].raid_level
= -1;
3154 /* Function to find the first free pointer into our hba[] array */
3155 /* Returns -1 if no free entries are left. */
3156 static int alloc_cciss_hba(void)
3160 for (i
= 0; i
< MAX_CTLR
; i
++) {
3163 p
= kzalloc(sizeof(ctlr_info_t
), GFP_KERNEL
);
3166 p
->gendisk
[0] = alloc_disk(1 << NWD_SHIFT
);
3173 printk(KERN_WARNING
"cciss: This driver supports a maximum"
3174 " of %d controllers.\n", MAX_CTLR
);
3177 printk(KERN_ERR
"cciss: out of memory.\n");
3181 static void free_hba(int i
)
3183 ctlr_info_t
*p
= hba
[i
];
3187 for (n
= 0; n
< CISS_MAX_LUN
; n
++)
3188 put_disk(p
->gendisk
[n
]);
3193 * This is it. Find all the controllers and register them. I really hate
3194 * stealing all these major device numbers.
3195 * returns the number of block devices registered.
3197 static int __devinit
cciss_init_one(struct pci_dev
*pdev
,
3198 const struct pci_device_id
*ent
)
3205 i
= alloc_cciss_hba();
3209 hba
[i
]->busy_initializing
= 1;
3211 if (cciss_pci_init(hba
[i
], pdev
) != 0)
3214 sprintf(hba
[i
]->devname
, "cciss%d", i
);
3216 hba
[i
]->pdev
= pdev
;
3218 /* configure PCI DMA stuff */
3219 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
))
3221 else if (!pci_set_dma_mask(pdev
, DMA_32BIT_MASK
))
3224 printk(KERN_ERR
"cciss: no suitable DMA available\n");
3229 * register with the major number, or get a dynamic major number
3230 * by passing 0 as argument. This is done for greater than
3231 * 8 controller support.
3233 if (i
< MAX_CTLR_ORIG
)
3234 hba
[i
]->major
= COMPAQ_CISS_MAJOR
+ i
;
3235 rc
= register_blkdev(hba
[i
]->major
, hba
[i
]->devname
);
3236 if (rc
== -EBUSY
|| rc
== -EINVAL
) {
3238 "cciss: Unable to get major number %d for %s "
3239 "on hba %d\n", hba
[i
]->major
, hba
[i
]->devname
, i
);
3242 if (i
>= MAX_CTLR_ORIG
)
3246 /* make sure the board interrupts are off */
3247 hba
[i
]->access
.set_intr_mask(hba
[i
], CCISS_INTR_OFF
);
3248 if (request_irq(hba
[i
]->intr
[SIMPLE_MODE_INT
], do_cciss_intr
,
3249 IRQF_DISABLED
| IRQF_SHARED
, hba
[i
]->devname
, hba
[i
])) {
3250 printk(KERN_ERR
"cciss: Unable to get irq %d for %s\n",
3251 hba
[i
]->intr
[SIMPLE_MODE_INT
], hba
[i
]->devname
);
3255 printk(KERN_INFO
"%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3256 hba
[i
]->devname
, pdev
->device
, pci_name(pdev
),
3257 hba
[i
]->intr
[SIMPLE_MODE_INT
], dac
? "" : " not");
3259 hba
[i
]->cmd_pool_bits
=
3260 kmalloc(((hba
[i
]->nr_cmds
+ BITS_PER_LONG
-
3261 1) / BITS_PER_LONG
) * sizeof(unsigned long), GFP_KERNEL
);
3262 hba
[i
]->cmd_pool
= (CommandList_struct
*)
3263 pci_alloc_consistent(hba
[i
]->pdev
,
3264 hba
[i
]->nr_cmds
* sizeof(CommandList_struct
),
3265 &(hba
[i
]->cmd_pool_dhandle
));
3266 hba
[i
]->errinfo_pool
= (ErrorInfo_struct
*)
3267 pci_alloc_consistent(hba
[i
]->pdev
,
3268 hba
[i
]->nr_cmds
* sizeof(ErrorInfo_struct
),
3269 &(hba
[i
]->errinfo_pool_dhandle
));
3270 if ((hba
[i
]->cmd_pool_bits
== NULL
)
3271 || (hba
[i
]->cmd_pool
== NULL
)
3272 || (hba
[i
]->errinfo_pool
== NULL
)) {
3273 printk(KERN_ERR
"cciss: out of memory");
3276 #ifdef CONFIG_CISS_SCSI_TAPE
3277 hba
[i
]->scsi_rejects
.complete
=
3278 kmalloc(sizeof(hba
[i
]->scsi_rejects
.complete
[0]) *
3279 (hba
[i
]->nr_cmds
+ 5), GFP_KERNEL
);
3280 if (hba
[i
]->scsi_rejects
.complete
== NULL
) {
3281 printk(KERN_ERR
"cciss: out of memory");
3285 spin_lock_init(&hba
[i
]->lock
);
3287 /* Initialize the pdev driver private data.
3288 have it point to hba[i]. */
3289 pci_set_drvdata(pdev
, hba
[i
]);
3290 /* command and error info recs zeroed out before
3292 memset(hba
[i
]->cmd_pool_bits
, 0,
3293 ((hba
[i
]->nr_cmds
+ BITS_PER_LONG
-
3294 1) / BITS_PER_LONG
) * sizeof(unsigned long));
3297 printk(KERN_DEBUG
"Scanning for drives on controller cciss%d\n", i
);
3298 #endif /* CCISS_DEBUG */
3300 cciss_getgeometry(i
);
3302 cciss_scsi_setup(i
);
3304 /* Turn the interrupts on so we can service requests */
3305 hba
[i
]->access
.set_intr_mask(hba
[i
], CCISS_INTR_ON
);
3309 hba
[i
]->cciss_max_sectors
= 2048;
3311 hba
[i
]->busy_initializing
= 0;
3314 drive_info_struct
*drv
= &(hba
[i
]->drv
[j
]);
3315 struct gendisk
*disk
= hba
[i
]->gendisk
[j
];
3318 /* Check if the disk was allocated already */
3320 hba
[i
]->gendisk
[j
] = alloc_disk(1 << NWD_SHIFT
);
3321 disk
= hba
[i
]->gendisk
[j
];
3324 /* Check that the disk was able to be allocated */
3326 printk(KERN_ERR
"cciss: unable to allocate memory for disk %d\n", j
);
3330 q
= blk_init_queue(do_cciss_request
, &hba
[i
]->lock
);
3333 "cciss: unable to allocate queue for disk %d\n",
3339 q
->backing_dev_info
.ra_pages
= READ_AHEAD
;
3340 blk_queue_bounce_limit(q
, hba
[i
]->pdev
->dma_mask
);
3342 /* This is a hardware imposed limit. */
3343 blk_queue_max_hw_segments(q
, MAXSGENTRIES
);
3345 /* This is a limit in the driver and could be eliminated. */
3346 blk_queue_max_phys_segments(q
, MAXSGENTRIES
);
3348 blk_queue_max_sectors(q
, hba
[i
]->cciss_max_sectors
);
3350 blk_queue_softirq_done(q
, cciss_softirq_done
);
3352 q
->queuedata
= hba
[i
];
3353 sprintf(disk
->disk_name
, "cciss/c%dd%d", i
, j
);
3354 disk
->major
= hba
[i
]->major
;
3355 disk
->first_minor
= j
<< NWD_SHIFT
;
3356 disk
->fops
= &cciss_fops
;
3358 disk
->private_data
= drv
;
3359 disk
->driverfs_dev
= &pdev
->dev
;
3360 /* we must register the controller even if no disks exist */
3361 /* this is for the online array utilities */
3362 if (!drv
->heads
&& j
)
3364 blk_queue_hardsect_size(q
, drv
->block_size
);
3365 set_capacity(disk
, drv
->nr_blocks
);
3368 } while (j
<= hba
[i
]->highest_lun
);
3373 #ifdef CONFIG_CISS_SCSI_TAPE
3374 kfree(hba
[i
]->scsi_rejects
.complete
);
3376 kfree(hba
[i
]->cmd_pool_bits
);
3377 if (hba
[i
]->cmd_pool
)
3378 pci_free_consistent(hba
[i
]->pdev
,
3379 hba
[i
]->nr_cmds
* sizeof(CommandList_struct
),
3380 hba
[i
]->cmd_pool
, hba
[i
]->cmd_pool_dhandle
);
3381 if (hba
[i
]->errinfo_pool
)
3382 pci_free_consistent(hba
[i
]->pdev
,
3383 hba
[i
]->nr_cmds
* sizeof(ErrorInfo_struct
),
3384 hba
[i
]->errinfo_pool
,
3385 hba
[i
]->errinfo_pool_dhandle
);
3386 free_irq(hba
[i
]->intr
[SIMPLE_MODE_INT
], hba
[i
]);
3388 unregister_blkdev(hba
[i
]->major
, hba
[i
]->devname
);
3390 hba
[i
]->busy_initializing
= 0;
3391 /* cleanup any queues that may have been initialized */
3392 for (j
=0; j
<= hba
[i
]->highest_lun
; j
++){
3393 drive_info_struct
*drv
= &(hba
[i
]->drv
[j
]);
3395 blk_cleanup_queue(drv
->queue
);
3398 * Deliberately omit pci_disable_device(): it does something nasty to
3399 * Smart Array controllers that pci_enable_device does not undo
3401 pci_release_regions(pdev
);
3402 pci_set_drvdata(pdev
, NULL
);
3407 static void cciss_remove_one(struct pci_dev
*pdev
)
3409 ctlr_info_t
*tmp_ptr
;
3414 if (pci_get_drvdata(pdev
) == NULL
) {
3415 printk(KERN_ERR
"cciss: Unable to remove device \n");
3418 tmp_ptr
= pci_get_drvdata(pdev
);
3420 if (hba
[i
] == NULL
) {
3421 printk(KERN_ERR
"cciss: device appears to "
3422 "already be removed \n");
3425 /* Turn board interrupts off and send the flush cache command */
3426 /* sendcmd will turn off interrupt, and send the flush...
3427 * To write all data in the battery backed cache to disks */
3428 memset(flush_buf
, 0, 4);
3429 return_code
= sendcmd(CCISS_CACHE_FLUSH
, i
, flush_buf
, 4, 0, 0, 0, NULL
,
3431 if (return_code
== IO_OK
) {
3432 printk(KERN_INFO
"Completed flushing cache on controller %d\n", i
);
3434 printk(KERN_WARNING
"Error flushing cache on controller %d\n", i
);
3436 free_irq(hba
[i
]->intr
[2], hba
[i
]);
3438 #ifdef CONFIG_PCI_MSI
3439 if (hba
[i
]->msix_vector
)
3440 pci_disable_msix(hba
[i
]->pdev
);
3441 else if (hba
[i
]->msi_vector
)
3442 pci_disable_msi(hba
[i
]->pdev
);
3443 #endif /* CONFIG_PCI_MSI */
3445 iounmap(hba
[i
]->vaddr
);
3446 cciss_unregister_scsi(i
); /* unhook from SCSI subsystem */
3447 unregister_blkdev(hba
[i
]->major
, hba
[i
]->devname
);
3448 remove_proc_entry(hba
[i
]->devname
, proc_cciss
);
3450 /* remove it from the disk list */
3451 for (j
= 0; j
< CISS_MAX_LUN
; j
++) {
3452 struct gendisk
*disk
= hba
[i
]->gendisk
[j
];
3454 request_queue_t
*q
= disk
->queue
;
3456 if (disk
->flags
& GENHD_FL_UP
)
3459 blk_cleanup_queue(q
);
3463 pci_free_consistent(hba
[i
]->pdev
, hba
[i
]->nr_cmds
* sizeof(CommandList_struct
),
3464 hba
[i
]->cmd_pool
, hba
[i
]->cmd_pool_dhandle
);
3465 pci_free_consistent(hba
[i
]->pdev
, hba
[i
]->nr_cmds
* sizeof(ErrorInfo_struct
),
3466 hba
[i
]->errinfo_pool
, hba
[i
]->errinfo_pool_dhandle
);
3467 kfree(hba
[i
]->cmd_pool_bits
);
3468 #ifdef CONFIG_CISS_SCSI_TAPE
3469 kfree(hba
[i
]->scsi_rejects
.complete
);
3472 * Deliberately omit pci_disable_device(): it does something nasty to
3473 * Smart Array controllers that pci_enable_device does not undo
3475 pci_release_regions(pdev
);
3476 pci_set_drvdata(pdev
, NULL
);
3480 static struct pci_driver cciss_pci_driver
= {
3482 .probe
= cciss_init_one
,
3483 .remove
= __devexit_p(cciss_remove_one
),
3484 .id_table
= cciss_pci_device_id
, /* id_table */
3485 .shutdown
= cciss_remove_one
,
3489 * This is it. Register the PCI driver information for the cards we control
3490 * the OS will call our registered routines when it finds one of our cards.
3492 static int __init
cciss_init(void)
3494 printk(KERN_INFO DRIVER_NAME
"\n");
3496 /* Register for our PCI devices */
3497 return pci_register_driver(&cciss_pci_driver
);
3500 static void __exit
cciss_cleanup(void)
3504 pci_unregister_driver(&cciss_pci_driver
);
3505 /* double check that all controller entrys have been removed */
3506 for (i
= 0; i
< MAX_CTLR
; i
++) {
3507 if (hba
[i
] != NULL
) {
3508 printk(KERN_WARNING
"cciss: had to remove"
3509 " controller %d\n", i
);
3510 cciss_remove_one(hba
[i
]->pdev
);
3513 remove_proc_entry("cciss", proc_root_driver
);
3516 static void fail_all_cmds(unsigned long ctlr
)
3518 /* If we get here, the board is apparently dead. */
3519 ctlr_info_t
*h
= hba
[ctlr
];
3520 CommandList_struct
*c
;
3521 unsigned long flags
;
3523 printk(KERN_WARNING
"cciss%d: controller not responding.\n", h
->ctlr
);
3524 h
->alive
= 0; /* the controller apparently died... */
3526 spin_lock_irqsave(CCISS_LOCK(ctlr
), flags
);
3528 pci_disable_device(h
->pdev
); /* Make sure it is really dead. */
3530 /* move everything off the request queue onto the completed queue */
3531 while ((c
= h
->reqQ
) != NULL
) {
3532 removeQ(&(h
->reqQ
), c
);
3534 addQ(&(h
->cmpQ
), c
);
3537 /* Now, fail everything on the completed queue with a HW error */
3538 while ((c
= h
->cmpQ
) != NULL
) {
3539 removeQ(&h
->cmpQ
, c
);
3540 c
->err_info
->CommandStatus
= CMD_HARDWARE_ERR
;
3541 if (c
->cmd_type
== CMD_RWREQ
) {
3542 complete_command(h
, c
, 0);
3543 } else if (c
->cmd_type
== CMD_IOCTL_PEND
)
3544 complete(c
->waiting
);
3545 #ifdef CONFIG_CISS_SCSI_TAPE
3546 else if (c
->cmd_type
== CMD_SCSI
)
3547 complete_scsi_command(c
, 0, 0);
3550 spin_unlock_irqrestore(CCISS_LOCK(ctlr
), flags
);
3554 module_init(cciss_init
);
3555 module_exit(cciss_cleanup
);