iwlwifi: introduce host commands callbacks
[linux/fpc-iii.git] / drivers / block / cciss.c
blob55bd35c0f082344a1d33822d081fcd7f9a104663
1 /*
2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
17 * 02111-1307, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
50 #include <scsi/sg.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
54 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
55 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
56 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
58 /* Embedded module documentation macros - see modules.h */
59 MODULE_AUTHOR("Hewlett-Packard Company");
60 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
61 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
62 " SA6i P600 P800 P400 P400i E200 E200i E500");
63 MODULE_VERSION("3.6.14");
64 MODULE_LICENSE("GPL");
66 #include "cciss_cmd.h"
67 #include "cciss.h"
68 #include <linux/cciss_ioctl.h>
70 /* define the PCI info for the cards we can control */
71 static const struct pci_device_id cciss_pci_device_id[] = {
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
92 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
93 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
94 {0,}
97 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
99 /* board_id = Subsystem Device ID & Vendor ID
100 * product = Marketing Name for the board
101 * access = Address of the struct of function pointers
102 * nr_cmds = Number of commands supported by controller
104 static struct board_type products[] = {
105 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
106 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
107 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
108 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
109 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
110 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
111 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
112 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
113 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
114 {0x3225103C, "Smart Array P600", &SA5_access, 512},
115 {0x3223103C, "Smart Array P800", &SA5_access, 512},
116 {0x3234103C, "Smart Array P400", &SA5_access, 512},
117 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
118 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
119 {0x3212103C, "Smart Array E200", &SA5_access, 120},
120 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
121 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
123 {0x3237103C, "Smart Array E500", &SA5_access, 512},
124 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
125 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
128 /* How long to wait (in milliseconds) for board to go into simple mode */
129 #define MAX_CONFIG_WAIT 30000
130 #define MAX_IOCTL_CONFIG_WAIT 1000
132 /*define how many times we will try a command because of bus resets */
133 #define MAX_CMD_RETRIES 3
135 #define MAX_CTLR 32
137 /* Originally cciss driver only supports 8 major numbers */
138 #define MAX_CTLR_ORIG 8
140 static ctlr_info_t *hba[MAX_CTLR];
142 static void do_cciss_request(struct request_queue *q);
143 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
144 static int cciss_open(struct inode *inode, struct file *filep);
145 static int cciss_release(struct inode *inode, struct file *filep);
146 static int cciss_ioctl(struct inode *inode, struct file *filep,
147 unsigned int cmd, unsigned long arg);
148 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
150 static int cciss_revalidate(struct gendisk *disk);
151 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
152 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
153 int clear_all);
155 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
156 sector_t *total_size, unsigned int *block_size);
157 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
158 sector_t *total_size, unsigned int *block_size);
159 static void cciss_geometry_inquiry(int ctlr, int logvol,
160 int withirq, sector_t total_size,
161 unsigned int block_size, InquiryData_struct *inq_buff,
162 drive_info_struct *drv);
163 static void cciss_getgeometry(int cntl_num);
164 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
165 __u32);
166 static void start_io(ctlr_info_t *h);
167 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
168 unsigned int use_unit_num, unsigned int log_unit,
169 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
170 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
171 unsigned int use_unit_num, unsigned int log_unit,
172 __u8 page_code, int cmd_type);
174 static void fail_all_cmds(unsigned long ctlr);
176 #ifdef CONFIG_PROC_FS
177 static void cciss_procinit(int i);
178 #else
179 static void cciss_procinit(int i)
182 #endif /* CONFIG_PROC_FS */
184 #ifdef CONFIG_COMPAT
185 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
186 #endif
188 static struct block_device_operations cciss_fops = {
189 .owner = THIS_MODULE,
190 .open = cciss_open,
191 .release = cciss_release,
192 .ioctl = cciss_ioctl,
193 .getgeo = cciss_getgeo,
194 #ifdef CONFIG_COMPAT
195 .compat_ioctl = cciss_compat_ioctl,
196 #endif
197 .revalidate_disk = cciss_revalidate,
201 * Enqueuing and dequeuing functions for cmdlists.
203 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
205 if (*Qptr == NULL) {
206 *Qptr = c;
207 c->next = c->prev = c;
208 } else {
209 c->prev = (*Qptr)->prev;
210 c->next = (*Qptr);
211 (*Qptr)->prev->next = c;
212 (*Qptr)->prev = c;
216 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
217 CommandList_struct *c)
219 if (c && c->next != c) {
220 if (*Qptr == c)
221 *Qptr = c->next;
222 c->prev->next = c->next;
223 c->next->prev = c->prev;
224 } else {
225 *Qptr = NULL;
227 return c;
230 #include "cciss_scsi.c" /* For SCSI tape support */
232 #define RAID_UNKNOWN 6
234 #ifdef CONFIG_PROC_FS
237 * Report information about this controller.
239 #define ENG_GIG 1000000000
240 #define ENG_GIG_FACTOR (ENG_GIG/512)
241 #define ENGAGE_SCSI "engage scsi"
242 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
243 "UNKNOWN"
246 static struct proc_dir_entry *proc_cciss;
248 static void cciss_seq_show_header(struct seq_file *seq)
250 ctlr_info_t *h = seq->private;
252 seq_printf(seq, "%s: HP %s Controller\n"
253 "Board ID: 0x%08lx\n"
254 "Firmware Version: %c%c%c%c\n"
255 "IRQ: %d\n"
256 "Logical drives: %d\n"
257 "Current Q depth: %d\n"
258 "Current # commands on controller: %d\n"
259 "Max Q depth since init: %d\n"
260 "Max # commands on controller since init: %d\n"
261 "Max SG entries since init: %d\n",
262 h->devname,
263 h->product_name,
264 (unsigned long)h->board_id,
265 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
266 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
267 h->num_luns,
268 h->Qdepth, h->commands_outstanding,
269 h->maxQsinceinit, h->max_outstanding, h->maxSG);
271 #ifdef CONFIG_CISS_SCSI_TAPE
272 cciss_seq_tape_report(seq, h->ctlr);
273 #endif /* CONFIG_CISS_SCSI_TAPE */
276 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
278 ctlr_info_t *h = seq->private;
279 unsigned ctlr = h->ctlr;
280 unsigned long flags;
282 /* prevent displaying bogus info during configuration
283 * or deconfiguration of a logical volume
285 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
286 if (h->busy_configuring) {
287 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
288 return ERR_PTR(-EBUSY);
290 h->busy_configuring = 1;
291 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
293 if (*pos == 0)
294 cciss_seq_show_header(seq);
296 return pos;
299 static int cciss_seq_show(struct seq_file *seq, void *v)
301 sector_t vol_sz, vol_sz_frac;
302 ctlr_info_t *h = seq->private;
303 unsigned ctlr = h->ctlr;
304 loff_t *pos = v;
305 drive_info_struct *drv = &h->drv[*pos];
307 if (*pos > h->highest_lun)
308 return 0;
310 if (drv->heads == 0)
311 return 0;
313 vol_sz = drv->nr_blocks;
314 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
315 vol_sz_frac *= 100;
316 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
318 if (drv->raid_level > 5)
319 drv->raid_level = RAID_UNKNOWN;
320 seq_printf(seq, "cciss/c%dd%d:"
321 "\t%4u.%02uGB\tRAID %s\n",
322 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
323 raid_label[drv->raid_level]);
324 return 0;
327 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
329 ctlr_info_t *h = seq->private;
331 if (*pos > h->highest_lun)
332 return NULL;
333 *pos += 1;
335 return pos;
338 static void cciss_seq_stop(struct seq_file *seq, void *v)
340 ctlr_info_t *h = seq->private;
342 /* Only reset h->busy_configuring if we succeeded in setting
343 * it during cciss_seq_start. */
344 if (v == ERR_PTR(-EBUSY))
345 return;
347 h->busy_configuring = 0;
350 static struct seq_operations cciss_seq_ops = {
351 .start = cciss_seq_start,
352 .show = cciss_seq_show,
353 .next = cciss_seq_next,
354 .stop = cciss_seq_stop,
357 static int cciss_seq_open(struct inode *inode, struct file *file)
359 int ret = seq_open(file, &cciss_seq_ops);
360 struct seq_file *seq = file->private_data;
362 if (!ret)
363 seq->private = PDE(inode)->data;
365 return ret;
368 static ssize_t
369 cciss_proc_write(struct file *file, const char __user *buf,
370 size_t length, loff_t *ppos)
372 int err;
373 char *buffer;
375 #ifndef CONFIG_CISS_SCSI_TAPE
376 return -EINVAL;
377 #endif
379 if (!buf || length > PAGE_SIZE - 1)
380 return -EINVAL;
382 buffer = (char *)__get_free_page(GFP_KERNEL);
383 if (!buffer)
384 return -ENOMEM;
386 err = -EFAULT;
387 if (copy_from_user(buffer, buf, length))
388 goto out;
389 buffer[length] = '\0';
391 #ifdef CONFIG_CISS_SCSI_TAPE
392 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
393 struct seq_file *seq = file->private_data;
394 ctlr_info_t *h = seq->private;
395 int rc;
397 rc = cciss_engage_scsi(h->ctlr);
398 if (rc != 0)
399 err = -rc;
400 else
401 err = length;
402 } else
403 #endif /* CONFIG_CISS_SCSI_TAPE */
404 err = -EINVAL;
405 /* might be nice to have "disengage" too, but it's not
406 safely possible. (only 1 module use count, lock issues.) */
408 out:
409 free_page((unsigned long)buffer);
410 return err;
413 static struct file_operations cciss_proc_fops = {
414 .owner = THIS_MODULE,
415 .open = cciss_seq_open,
416 .read = seq_read,
417 .llseek = seq_lseek,
418 .release = seq_release,
419 .write = cciss_proc_write,
422 static void __devinit cciss_procinit(int i)
424 struct proc_dir_entry *pde;
426 if (proc_cciss == NULL)
427 proc_cciss = proc_mkdir("cciss", proc_root_driver);
428 if (!proc_cciss)
429 return;
430 pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
431 S_IROTH, proc_cciss,
432 &cciss_proc_fops);
433 if (!pde)
434 return;
436 pde->data = hba[i];
438 #endif /* CONFIG_PROC_FS */
441 * For operations that cannot sleep, a command block is allocated at init,
442 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
443 * which ones are free or in use. For operations that can wait for kmalloc
444 * to possible sleep, this routine can be called with get_from_pool set to 0.
445 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
447 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
449 CommandList_struct *c;
450 int i;
451 u64bit temp64;
452 dma_addr_t cmd_dma_handle, err_dma_handle;
454 if (!get_from_pool) {
455 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
456 sizeof(CommandList_struct), &cmd_dma_handle);
457 if (c == NULL)
458 return NULL;
459 memset(c, 0, sizeof(CommandList_struct));
461 c->cmdindex = -1;
463 c->err_info = (ErrorInfo_struct *)
464 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
465 &err_dma_handle);
467 if (c->err_info == NULL) {
468 pci_free_consistent(h->pdev,
469 sizeof(CommandList_struct), c, cmd_dma_handle);
470 return NULL;
472 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
473 } else { /* get it out of the controllers pool */
475 do {
476 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
477 if (i == h->nr_cmds)
478 return NULL;
479 } while (test_and_set_bit
480 (i & (BITS_PER_LONG - 1),
481 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
482 #ifdef CCISS_DEBUG
483 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
484 #endif
485 c = h->cmd_pool + i;
486 memset(c, 0, sizeof(CommandList_struct));
487 cmd_dma_handle = h->cmd_pool_dhandle
488 + i * sizeof(CommandList_struct);
489 c->err_info = h->errinfo_pool + i;
490 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
491 err_dma_handle = h->errinfo_pool_dhandle
492 + i * sizeof(ErrorInfo_struct);
493 h->nr_allocs++;
495 c->cmdindex = i;
498 c->busaddr = (__u32) cmd_dma_handle;
499 temp64.val = (__u64) err_dma_handle;
500 c->ErrDesc.Addr.lower = temp64.val32.lower;
501 c->ErrDesc.Addr.upper = temp64.val32.upper;
502 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
504 c->ctlr = h->ctlr;
505 return c;
509 * Frees a command block that was previously allocated with cmd_alloc().
511 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
513 int i;
514 u64bit temp64;
516 if (!got_from_pool) {
517 temp64.val32.lower = c->ErrDesc.Addr.lower;
518 temp64.val32.upper = c->ErrDesc.Addr.upper;
519 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
520 c->err_info, (dma_addr_t) temp64.val);
521 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
522 c, (dma_addr_t) c->busaddr);
523 } else {
524 i = c - h->cmd_pool;
525 clear_bit(i & (BITS_PER_LONG - 1),
526 h->cmd_pool_bits + (i / BITS_PER_LONG));
527 h->nr_frees++;
531 static inline ctlr_info_t *get_host(struct gendisk *disk)
533 return disk->queue->queuedata;
536 static inline drive_info_struct *get_drv(struct gendisk *disk)
538 return disk->private_data;
542 * Open. Make sure the device is really there.
544 static int cciss_open(struct inode *inode, struct file *filep)
546 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
547 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
549 #ifdef CCISS_DEBUG
550 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
551 #endif /* CCISS_DEBUG */
553 if (host->busy_initializing || drv->busy_configuring)
554 return -EBUSY;
556 * Root is allowed to open raw volume zero even if it's not configured
557 * so array config can still work. Root is also allowed to open any
558 * volume that has a LUN ID, so it can issue IOCTL to reread the
559 * disk information. I don't think I really like this
560 * but I'm already using way to many device nodes to claim another one
561 * for "raw controller".
563 if (drv->heads == 0) {
564 if (iminor(inode) != 0) { /* not node 0? */
565 /* if not node 0 make sure it is a partition = 0 */
566 if (iminor(inode) & 0x0f) {
567 return -ENXIO;
568 /* if it is, make sure we have a LUN ID */
569 } else if (drv->LunID == 0) {
570 return -ENXIO;
573 if (!capable(CAP_SYS_ADMIN))
574 return -EPERM;
576 drv->usage_count++;
577 host->usage_count++;
578 return 0;
582 * Close. Sync first.
584 static int cciss_release(struct inode *inode, struct file *filep)
586 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
587 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
589 #ifdef CCISS_DEBUG
590 printk(KERN_DEBUG "cciss_release %s\n",
591 inode->i_bdev->bd_disk->disk_name);
592 #endif /* CCISS_DEBUG */
594 drv->usage_count--;
595 host->usage_count--;
596 return 0;
599 #ifdef CONFIG_COMPAT
601 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
603 int ret;
604 lock_kernel();
605 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
606 unlock_kernel();
607 return ret;
610 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
611 unsigned long arg);
612 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
613 unsigned long arg);
615 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
617 switch (cmd) {
618 case CCISS_GETPCIINFO:
619 case CCISS_GETINTINFO:
620 case CCISS_SETINTINFO:
621 case CCISS_GETNODENAME:
622 case CCISS_SETNODENAME:
623 case CCISS_GETHEARTBEAT:
624 case CCISS_GETBUSTYPES:
625 case CCISS_GETFIRMVER:
626 case CCISS_GETDRIVVER:
627 case CCISS_REVALIDVOLS:
628 case CCISS_DEREGDISK:
629 case CCISS_REGNEWDISK:
630 case CCISS_REGNEWD:
631 case CCISS_RESCANDISK:
632 case CCISS_GETLUNINFO:
633 return do_ioctl(f, cmd, arg);
635 case CCISS_PASSTHRU32:
636 return cciss_ioctl32_passthru(f, cmd, arg);
637 case CCISS_BIG_PASSTHRU32:
638 return cciss_ioctl32_big_passthru(f, cmd, arg);
640 default:
641 return -ENOIOCTLCMD;
645 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
646 unsigned long arg)
648 IOCTL32_Command_struct __user *arg32 =
649 (IOCTL32_Command_struct __user *) arg;
650 IOCTL_Command_struct arg64;
651 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
652 int err;
653 u32 cp;
655 err = 0;
656 err |=
657 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
658 sizeof(arg64.LUN_info));
659 err |=
660 copy_from_user(&arg64.Request, &arg32->Request,
661 sizeof(arg64.Request));
662 err |=
663 copy_from_user(&arg64.error_info, &arg32->error_info,
664 sizeof(arg64.error_info));
665 err |= get_user(arg64.buf_size, &arg32->buf_size);
666 err |= get_user(cp, &arg32->buf);
667 arg64.buf = compat_ptr(cp);
668 err |= copy_to_user(p, &arg64, sizeof(arg64));
670 if (err)
671 return -EFAULT;
673 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
674 if (err)
675 return err;
676 err |=
677 copy_in_user(&arg32->error_info, &p->error_info,
678 sizeof(arg32->error_info));
679 if (err)
680 return -EFAULT;
681 return err;
684 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
685 unsigned long arg)
687 BIG_IOCTL32_Command_struct __user *arg32 =
688 (BIG_IOCTL32_Command_struct __user *) arg;
689 BIG_IOCTL_Command_struct arg64;
690 BIG_IOCTL_Command_struct __user *p =
691 compat_alloc_user_space(sizeof(arg64));
692 int err;
693 u32 cp;
695 err = 0;
696 err |=
697 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
698 sizeof(arg64.LUN_info));
699 err |=
700 copy_from_user(&arg64.Request, &arg32->Request,
701 sizeof(arg64.Request));
702 err |=
703 copy_from_user(&arg64.error_info, &arg32->error_info,
704 sizeof(arg64.error_info));
705 err |= get_user(arg64.buf_size, &arg32->buf_size);
706 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
707 err |= get_user(cp, &arg32->buf);
708 arg64.buf = compat_ptr(cp);
709 err |= copy_to_user(p, &arg64, sizeof(arg64));
711 if (err)
712 return -EFAULT;
714 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
715 if (err)
716 return err;
717 err |=
718 copy_in_user(&arg32->error_info, &p->error_info,
719 sizeof(arg32->error_info));
720 if (err)
721 return -EFAULT;
722 return err;
724 #endif
726 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
728 drive_info_struct *drv = get_drv(bdev->bd_disk);
730 if (!drv->cylinders)
731 return -ENXIO;
733 geo->heads = drv->heads;
734 geo->sectors = drv->sectors;
735 geo->cylinders = drv->cylinders;
736 return 0;
740 * ioctl
742 static int cciss_ioctl(struct inode *inode, struct file *filep,
743 unsigned int cmd, unsigned long arg)
745 struct block_device *bdev = inode->i_bdev;
746 struct gendisk *disk = bdev->bd_disk;
747 ctlr_info_t *host = get_host(disk);
748 drive_info_struct *drv = get_drv(disk);
749 int ctlr = host->ctlr;
750 void __user *argp = (void __user *)arg;
752 #ifdef CCISS_DEBUG
753 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
754 #endif /* CCISS_DEBUG */
756 switch (cmd) {
757 case CCISS_GETPCIINFO:
759 cciss_pci_info_struct pciinfo;
761 if (!arg)
762 return -EINVAL;
763 pciinfo.domain = pci_domain_nr(host->pdev->bus);
764 pciinfo.bus = host->pdev->bus->number;
765 pciinfo.dev_fn = host->pdev->devfn;
766 pciinfo.board_id = host->board_id;
767 if (copy_to_user
768 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
769 return -EFAULT;
770 return 0;
772 case CCISS_GETINTINFO:
774 cciss_coalint_struct intinfo;
775 if (!arg)
776 return -EINVAL;
777 intinfo.delay =
778 readl(&host->cfgtable->HostWrite.CoalIntDelay);
779 intinfo.count =
780 readl(&host->cfgtable->HostWrite.CoalIntCount);
781 if (copy_to_user
782 (argp, &intinfo, sizeof(cciss_coalint_struct)))
783 return -EFAULT;
784 return 0;
786 case CCISS_SETINTINFO:
788 cciss_coalint_struct intinfo;
789 unsigned long flags;
790 int i;
792 if (!arg)
793 return -EINVAL;
794 if (!capable(CAP_SYS_ADMIN))
795 return -EPERM;
796 if (copy_from_user
797 (&intinfo, argp, sizeof(cciss_coalint_struct)))
798 return -EFAULT;
799 if ((intinfo.delay == 0) && (intinfo.count == 0))
801 // printk("cciss_ioctl: delay and count cannot be 0\n");
802 return -EINVAL;
804 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
805 /* Update the field, and then ring the doorbell */
806 writel(intinfo.delay,
807 &(host->cfgtable->HostWrite.CoalIntDelay));
808 writel(intinfo.count,
809 &(host->cfgtable->HostWrite.CoalIntCount));
810 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
812 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
813 if (!(readl(host->vaddr + SA5_DOORBELL)
814 & CFGTBL_ChangeReq))
815 break;
816 /* delay and try again */
817 udelay(1000);
819 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
820 if (i >= MAX_IOCTL_CONFIG_WAIT)
821 return -EAGAIN;
822 return 0;
824 case CCISS_GETNODENAME:
826 NodeName_type NodeName;
827 int i;
829 if (!arg)
830 return -EINVAL;
831 for (i = 0; i < 16; i++)
832 NodeName[i] =
833 readb(&host->cfgtable->ServerName[i]);
834 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
835 return -EFAULT;
836 return 0;
838 case CCISS_SETNODENAME:
840 NodeName_type NodeName;
841 unsigned long flags;
842 int i;
844 if (!arg)
845 return -EINVAL;
846 if (!capable(CAP_SYS_ADMIN))
847 return -EPERM;
849 if (copy_from_user
850 (NodeName, argp, sizeof(NodeName_type)))
851 return -EFAULT;
853 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
855 /* Update the field, and then ring the doorbell */
856 for (i = 0; i < 16; i++)
857 writeb(NodeName[i],
858 &host->cfgtable->ServerName[i]);
860 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
862 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
863 if (!(readl(host->vaddr + SA5_DOORBELL)
864 & CFGTBL_ChangeReq))
865 break;
866 /* delay and try again */
867 udelay(1000);
869 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
870 if (i >= MAX_IOCTL_CONFIG_WAIT)
871 return -EAGAIN;
872 return 0;
875 case CCISS_GETHEARTBEAT:
877 Heartbeat_type heartbeat;
879 if (!arg)
880 return -EINVAL;
881 heartbeat = readl(&host->cfgtable->HeartBeat);
882 if (copy_to_user
883 (argp, &heartbeat, sizeof(Heartbeat_type)))
884 return -EFAULT;
885 return 0;
887 case CCISS_GETBUSTYPES:
889 BusTypes_type BusTypes;
891 if (!arg)
892 return -EINVAL;
893 BusTypes = readl(&host->cfgtable->BusTypes);
894 if (copy_to_user
895 (argp, &BusTypes, sizeof(BusTypes_type)))
896 return -EFAULT;
897 return 0;
899 case CCISS_GETFIRMVER:
901 FirmwareVer_type firmware;
903 if (!arg)
904 return -EINVAL;
905 memcpy(firmware, host->firm_ver, 4);
907 if (copy_to_user
908 (argp, firmware, sizeof(FirmwareVer_type)))
909 return -EFAULT;
910 return 0;
912 case CCISS_GETDRIVVER:
914 DriverVer_type DriverVer = DRIVER_VERSION;
916 if (!arg)
917 return -EINVAL;
919 if (copy_to_user
920 (argp, &DriverVer, sizeof(DriverVer_type)))
921 return -EFAULT;
922 return 0;
925 case CCISS_REVALIDVOLS:
926 return rebuild_lun_table(host, NULL);
928 case CCISS_GETLUNINFO:{
929 LogvolInfo_struct luninfo;
931 luninfo.LunID = drv->LunID;
932 luninfo.num_opens = drv->usage_count;
933 luninfo.num_parts = 0;
934 if (copy_to_user(argp, &luninfo,
935 sizeof(LogvolInfo_struct)))
936 return -EFAULT;
937 return 0;
939 case CCISS_DEREGDISK:
940 return rebuild_lun_table(host, disk);
942 case CCISS_REGNEWD:
943 return rebuild_lun_table(host, NULL);
945 case CCISS_PASSTHRU:
947 IOCTL_Command_struct iocommand;
948 CommandList_struct *c;
949 char *buff = NULL;
950 u64bit temp64;
951 unsigned long flags;
952 DECLARE_COMPLETION_ONSTACK(wait);
954 if (!arg)
955 return -EINVAL;
957 if (!capable(CAP_SYS_RAWIO))
958 return -EPERM;
960 if (copy_from_user
961 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
962 return -EFAULT;
963 if ((iocommand.buf_size < 1) &&
964 (iocommand.Request.Type.Direction != XFER_NONE)) {
965 return -EINVAL;
967 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
968 /* Check kmalloc limits */
969 if (iocommand.buf_size > 128000)
970 return -EINVAL;
971 #endif
972 if (iocommand.buf_size > 0) {
973 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
974 if (buff == NULL)
975 return -EFAULT;
977 if (iocommand.Request.Type.Direction == XFER_WRITE) {
978 /* Copy the data into the buffer we created */
979 if (copy_from_user
980 (buff, iocommand.buf, iocommand.buf_size)) {
981 kfree(buff);
982 return -EFAULT;
984 } else {
985 memset(buff, 0, iocommand.buf_size);
987 if ((c = cmd_alloc(host, 0)) == NULL) {
988 kfree(buff);
989 return -ENOMEM;
991 // Fill in the command type
992 c->cmd_type = CMD_IOCTL_PEND;
993 // Fill in Command Header
994 c->Header.ReplyQueue = 0; // unused in simple mode
995 if (iocommand.buf_size > 0) // buffer to fill
997 c->Header.SGList = 1;
998 c->Header.SGTotal = 1;
999 } else // no buffers to fill
1001 c->Header.SGList = 0;
1002 c->Header.SGTotal = 0;
1004 c->Header.LUN = iocommand.LUN_info;
1005 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1007 // Fill in Request block
1008 c->Request = iocommand.Request;
1010 // Fill in the scatter gather information
1011 if (iocommand.buf_size > 0) {
1012 temp64.val = pci_map_single(host->pdev, buff,
1013 iocommand.buf_size,
1014 PCI_DMA_BIDIRECTIONAL);
1015 c->SG[0].Addr.lower = temp64.val32.lower;
1016 c->SG[0].Addr.upper = temp64.val32.upper;
1017 c->SG[0].Len = iocommand.buf_size;
1018 c->SG[0].Ext = 0; // we are not chaining
1020 c->waiting = &wait;
1022 /* Put the request on the tail of the request queue */
1023 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1024 addQ(&host->reqQ, c);
1025 host->Qdepth++;
1026 start_io(host);
1027 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1029 wait_for_completion(&wait);
1031 /* unlock the buffers from DMA */
1032 temp64.val32.lower = c->SG[0].Addr.lower;
1033 temp64.val32.upper = c->SG[0].Addr.upper;
1034 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1035 iocommand.buf_size,
1036 PCI_DMA_BIDIRECTIONAL);
1038 /* Copy the error information out */
1039 iocommand.error_info = *(c->err_info);
1040 if (copy_to_user
1041 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1042 kfree(buff);
1043 cmd_free(host, c, 0);
1044 return -EFAULT;
1047 if (iocommand.Request.Type.Direction == XFER_READ) {
1048 /* Copy the data out of the buffer we created */
1049 if (copy_to_user
1050 (iocommand.buf, buff, iocommand.buf_size)) {
1051 kfree(buff);
1052 cmd_free(host, c, 0);
1053 return -EFAULT;
1056 kfree(buff);
1057 cmd_free(host, c, 0);
1058 return 0;
1060 case CCISS_BIG_PASSTHRU:{
1061 BIG_IOCTL_Command_struct *ioc;
1062 CommandList_struct *c;
1063 unsigned char **buff = NULL;
1064 int *buff_size = NULL;
1065 u64bit temp64;
1066 unsigned long flags;
1067 BYTE sg_used = 0;
1068 int status = 0;
1069 int i;
1070 DECLARE_COMPLETION_ONSTACK(wait);
1071 __u32 left;
1072 __u32 sz;
1073 BYTE __user *data_ptr;
1075 if (!arg)
1076 return -EINVAL;
1077 if (!capable(CAP_SYS_RAWIO))
1078 return -EPERM;
1079 ioc = (BIG_IOCTL_Command_struct *)
1080 kmalloc(sizeof(*ioc), GFP_KERNEL);
1081 if (!ioc) {
1082 status = -ENOMEM;
1083 goto cleanup1;
1085 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1086 status = -EFAULT;
1087 goto cleanup1;
1089 if ((ioc->buf_size < 1) &&
1090 (ioc->Request.Type.Direction != XFER_NONE)) {
1091 status = -EINVAL;
1092 goto cleanup1;
1094 /* Check kmalloc limits using all SGs */
1095 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1096 status = -EINVAL;
1097 goto cleanup1;
1099 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1100 status = -EINVAL;
1101 goto cleanup1;
1103 buff =
1104 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1105 if (!buff) {
1106 status = -ENOMEM;
1107 goto cleanup1;
1109 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1110 GFP_KERNEL);
1111 if (!buff_size) {
1112 status = -ENOMEM;
1113 goto cleanup1;
1115 left = ioc->buf_size;
1116 data_ptr = ioc->buf;
1117 while (left) {
1118 sz = (left >
1119 ioc->malloc_size) ? ioc->
1120 malloc_size : left;
1121 buff_size[sg_used] = sz;
1122 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1123 if (buff[sg_used] == NULL) {
1124 status = -ENOMEM;
1125 goto cleanup1;
1127 if (ioc->Request.Type.Direction == XFER_WRITE) {
1128 if (copy_from_user
1129 (buff[sg_used], data_ptr, sz)) {
1130 status = -ENOMEM;
1131 goto cleanup1;
1133 } else {
1134 memset(buff[sg_used], 0, sz);
1136 left -= sz;
1137 data_ptr += sz;
1138 sg_used++;
1140 if ((c = cmd_alloc(host, 0)) == NULL) {
1141 status = -ENOMEM;
1142 goto cleanup1;
1144 c->cmd_type = CMD_IOCTL_PEND;
1145 c->Header.ReplyQueue = 0;
1147 if (ioc->buf_size > 0) {
1148 c->Header.SGList = sg_used;
1149 c->Header.SGTotal = sg_used;
1150 } else {
1151 c->Header.SGList = 0;
1152 c->Header.SGTotal = 0;
1154 c->Header.LUN = ioc->LUN_info;
1155 c->Header.Tag.lower = c->busaddr;
1157 c->Request = ioc->Request;
1158 if (ioc->buf_size > 0) {
1159 int i;
1160 for (i = 0; i < sg_used; i++) {
1161 temp64.val =
1162 pci_map_single(host->pdev, buff[i],
1163 buff_size[i],
1164 PCI_DMA_BIDIRECTIONAL);
1165 c->SG[i].Addr.lower =
1166 temp64.val32.lower;
1167 c->SG[i].Addr.upper =
1168 temp64.val32.upper;
1169 c->SG[i].Len = buff_size[i];
1170 c->SG[i].Ext = 0; /* we are not chaining */
1173 c->waiting = &wait;
1174 /* Put the request on the tail of the request queue */
1175 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1176 addQ(&host->reqQ, c);
1177 host->Qdepth++;
1178 start_io(host);
1179 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1180 wait_for_completion(&wait);
1181 /* unlock the buffers from DMA */
1182 for (i = 0; i < sg_used; i++) {
1183 temp64.val32.lower = c->SG[i].Addr.lower;
1184 temp64.val32.upper = c->SG[i].Addr.upper;
1185 pci_unmap_single(host->pdev,
1186 (dma_addr_t) temp64.val, buff_size[i],
1187 PCI_DMA_BIDIRECTIONAL);
1189 /* Copy the error information out */
1190 ioc->error_info = *(c->err_info);
1191 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1192 cmd_free(host, c, 0);
1193 status = -EFAULT;
1194 goto cleanup1;
1196 if (ioc->Request.Type.Direction == XFER_READ) {
1197 /* Copy the data out of the buffer we created */
1198 BYTE __user *ptr = ioc->buf;
1199 for (i = 0; i < sg_used; i++) {
1200 if (copy_to_user
1201 (ptr, buff[i], buff_size[i])) {
1202 cmd_free(host, c, 0);
1203 status = -EFAULT;
1204 goto cleanup1;
1206 ptr += buff_size[i];
1209 cmd_free(host, c, 0);
1210 status = 0;
1211 cleanup1:
1212 if (buff) {
1213 for (i = 0; i < sg_used; i++)
1214 kfree(buff[i]);
1215 kfree(buff);
1217 kfree(buff_size);
1218 kfree(ioc);
1219 return status;
1222 /* scsi_cmd_ioctl handles these, below, though some are not */
1223 /* very meaningful for cciss. SG_IO is the main one people want. */
1225 case SG_GET_VERSION_NUM:
1226 case SG_SET_TIMEOUT:
1227 case SG_GET_TIMEOUT:
1228 case SG_GET_RESERVED_SIZE:
1229 case SG_SET_RESERVED_SIZE:
1230 case SG_EMULATED_HOST:
1231 case SG_IO:
1232 case SCSI_IOCTL_SEND_COMMAND:
1233 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1235 /* scsi_cmd_ioctl would normally handle these, below, but */
1236 /* they aren't a good fit for cciss, as CD-ROMs are */
1237 /* not supported, and we don't have any bus/target/lun */
1238 /* which we present to the kernel. */
1240 case CDROM_SEND_PACKET:
1241 case CDROMCLOSETRAY:
1242 case CDROMEJECT:
1243 case SCSI_IOCTL_GET_IDLUN:
1244 case SCSI_IOCTL_GET_BUS_NUMBER:
1245 default:
1246 return -ENOTTY;
1250 static void cciss_check_queues(ctlr_info_t *h)
1252 int start_queue = h->next_to_run;
1253 int i;
1255 /* check to see if we have maxed out the number of commands that can
1256 * be placed on the queue. If so then exit. We do this check here
1257 * in case the interrupt we serviced was from an ioctl and did not
1258 * free any new commands.
1260 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1261 return;
1263 /* We have room on the queue for more commands. Now we need to queue
1264 * them up. We will also keep track of the next queue to run so
1265 * that every queue gets a chance to be started first.
1267 for (i = 0; i < h->highest_lun + 1; i++) {
1268 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1269 /* make sure the disk has been added and the drive is real
1270 * because this can be called from the middle of init_one.
1272 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1273 continue;
1274 blk_start_queue(h->gendisk[curr_queue]->queue);
1276 /* check to see if we have maxed out the number of commands
1277 * that can be placed on the queue.
1279 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1280 if (curr_queue == start_queue) {
1281 h->next_to_run =
1282 (start_queue + 1) % (h->highest_lun + 1);
1283 break;
1284 } else {
1285 h->next_to_run = curr_queue;
1286 break;
1288 } else {
1289 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1294 static void cciss_softirq_done(struct request *rq)
1296 CommandList_struct *cmd = rq->completion_data;
1297 ctlr_info_t *h = hba[cmd->ctlr];
1298 unsigned long flags;
1299 u64bit temp64;
1300 int i, ddir;
1302 if (cmd->Request.Type.Direction == XFER_READ)
1303 ddir = PCI_DMA_FROMDEVICE;
1304 else
1305 ddir = PCI_DMA_TODEVICE;
1307 /* command did not need to be retried */
1308 /* unmap the DMA mapping for all the scatter gather elements */
1309 for (i = 0; i < cmd->Header.SGList; i++) {
1310 temp64.val32.lower = cmd->SG[i].Addr.lower;
1311 temp64.val32.upper = cmd->SG[i].Addr.upper;
1312 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1315 #ifdef CCISS_DEBUG
1316 printk("Done with %p\n", rq);
1317 #endif /* CCISS_DEBUG */
1319 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1320 BUG();
1322 spin_lock_irqsave(&h->lock, flags);
1323 cmd_free(h, cmd, 1);
1324 cciss_check_queues(h);
1325 spin_unlock_irqrestore(&h->lock, flags);
1328 /* This function will check the usage_count of the drive to be updated/added.
1329 * If the usage_count is zero then the drive information will be updated and
1330 * the disk will be re-registered with the kernel. If not then it will be
1331 * left alone for the next reboot. The exception to this is disk 0 which
1332 * will always be left registered with the kernel since it is also the
1333 * controller node. Any changes to disk 0 will show up on the next
1334 * reboot.
1336 static void cciss_update_drive_info(int ctlr, int drv_index)
1338 ctlr_info_t *h = hba[ctlr];
1339 struct gendisk *disk;
1340 InquiryData_struct *inq_buff = NULL;
1341 unsigned int block_size;
1342 sector_t total_size;
1343 unsigned long flags = 0;
1344 int ret = 0;
1346 /* if the disk already exists then deregister it before proceeding */
1347 if (h->drv[drv_index].raid_level != -1) {
1348 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1349 h->drv[drv_index].busy_configuring = 1;
1350 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1351 ret = deregister_disk(h->gendisk[drv_index],
1352 &h->drv[drv_index], 0);
1353 h->drv[drv_index].busy_configuring = 0;
1356 /* If the disk is in use return */
1357 if (ret)
1358 return;
1360 /* Get information about the disk and modify the driver structure */
1361 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1362 if (inq_buff == NULL)
1363 goto mem_msg;
1365 /* testing to see if 16-byte CDBs are already being used */
1366 if (h->cciss_read == CCISS_READ_16) {
1367 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1368 &total_size, &block_size);
1369 goto geo_inq;
1372 cciss_read_capacity(ctlr, drv_index, 1,
1373 &total_size, &block_size);
1375 /* if read_capacity returns all F's this volume is >2TB in size */
1376 /* so we switch to 16-byte CDB's for all read/write ops */
1377 if (total_size == 0xFFFFFFFFULL) {
1378 cciss_read_capacity_16(ctlr, drv_index, 1,
1379 &total_size, &block_size);
1380 h->cciss_read = CCISS_READ_16;
1381 h->cciss_write = CCISS_WRITE_16;
1382 } else {
1383 h->cciss_read = CCISS_READ_10;
1384 h->cciss_write = CCISS_WRITE_10;
1386 geo_inq:
1387 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1388 inq_buff, &h->drv[drv_index]);
1390 ++h->num_luns;
1391 disk = h->gendisk[drv_index];
1392 set_capacity(disk, h->drv[drv_index].nr_blocks);
1394 /* if it's the controller it's already added */
1395 if (drv_index) {
1396 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1397 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1398 disk->major = h->major;
1399 disk->first_minor = drv_index << NWD_SHIFT;
1400 disk->fops = &cciss_fops;
1401 disk->private_data = &h->drv[drv_index];
1403 /* Set up queue information */
1404 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1406 /* This is a hardware imposed limit. */
1407 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1409 /* This is a limit in the driver and could be eliminated. */
1410 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1412 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1414 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1416 disk->queue->queuedata = hba[ctlr];
1418 blk_queue_hardsect_size(disk->queue,
1419 hba[ctlr]->drv[drv_index].block_size);
1421 h->drv[drv_index].queue = disk->queue;
1422 add_disk(disk);
1425 freeret:
1426 kfree(inq_buff);
1427 return;
1428 mem_msg:
1429 printk(KERN_ERR "cciss: out of memory\n");
1430 goto freeret;
1433 /* This function will find the first index of the controllers drive array
1434 * that has a -1 for the raid_level and will return that index. This is
1435 * where new drives will be added. If the index to be returned is greater
1436 * than the highest_lun index for the controller then highest_lun is set
1437 * to this new index. If there are no available indexes then -1 is returned.
1439 static int cciss_find_free_drive_index(int ctlr)
1441 int i;
1443 for (i = 0; i < CISS_MAX_LUN; i++) {
1444 if (hba[ctlr]->drv[i].raid_level == -1) {
1445 if (i > hba[ctlr]->highest_lun)
1446 hba[ctlr]->highest_lun = i;
1447 return i;
1450 return -1;
1453 /* This function will add and remove logical drives from the Logical
1454 * drive array of the controller and maintain persistency of ordering
1455 * so that mount points are preserved until the next reboot. This allows
1456 * for the removal of logical drives in the middle of the drive array
1457 * without a re-ordering of those drives.
1458 * INPUT
1459 * h = The controller to perform the operations on
1460 * del_disk = The disk to remove if specified. If the value given
1461 * is NULL then no disk is removed.
1463 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1465 int ctlr = h->ctlr;
1466 int num_luns;
1467 ReportLunData_struct *ld_buff = NULL;
1468 drive_info_struct *drv = NULL;
1469 int return_code;
1470 int listlength = 0;
1471 int i;
1472 int drv_found;
1473 int drv_index = 0;
1474 __u32 lunid = 0;
1475 unsigned long flags;
1477 /* Set busy_configuring flag for this operation */
1478 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1479 if (h->busy_configuring) {
1480 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1481 return -EBUSY;
1483 h->busy_configuring = 1;
1485 /* if del_disk is NULL then we are being called to add a new disk
1486 * and update the logical drive table. If it is not NULL then
1487 * we will check if the disk is in use or not.
1489 if (del_disk != NULL) {
1490 drv = get_drv(del_disk);
1491 drv->busy_configuring = 1;
1492 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1493 return_code = deregister_disk(del_disk, drv, 1);
1494 drv->busy_configuring = 0;
1495 h->busy_configuring = 0;
1496 return return_code;
1497 } else {
1498 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1499 if (!capable(CAP_SYS_RAWIO))
1500 return -EPERM;
1502 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1503 if (ld_buff == NULL)
1504 goto mem_msg;
1506 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1507 sizeof(ReportLunData_struct), 0,
1508 0, 0, TYPE_CMD);
1510 if (return_code == IO_OK) {
1511 listlength =
1512 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1513 } else { /* reading number of logical volumes failed */
1514 printk(KERN_WARNING "cciss: report logical volume"
1515 " command failed\n");
1516 listlength = 0;
1517 goto freeret;
1520 num_luns = listlength / 8; /* 8 bytes per entry */
1521 if (num_luns > CISS_MAX_LUN) {
1522 num_luns = CISS_MAX_LUN;
1523 printk(KERN_WARNING "cciss: more luns configured"
1524 " on controller than can be handled by"
1525 " this driver.\n");
1528 /* Compare controller drive array to drivers drive array.
1529 * Check for updates in the drive information and any new drives
1530 * on the controller.
1532 for (i = 0; i < num_luns; i++) {
1533 int j;
1535 drv_found = 0;
1537 lunid = (0xff &
1538 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1539 lunid |= (0xff &
1540 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1541 lunid |= (0xff &
1542 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1543 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1545 /* Find if the LUN is already in the drive array
1546 * of the controller. If so then update its info
1547 * if not is use. If it does not exist then find
1548 * the first free index and add it.
1550 for (j = 0; j <= h->highest_lun; j++) {
1551 if (h->drv[j].LunID == lunid) {
1552 drv_index = j;
1553 drv_found = 1;
1557 /* check if the drive was found already in the array */
1558 if (!drv_found) {
1559 drv_index = cciss_find_free_drive_index(ctlr);
1560 if (drv_index == -1)
1561 goto freeret;
1563 /*Check if the gendisk needs to be allocated */
1564 if (!h->gendisk[drv_index]){
1565 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1566 if (!h->gendisk[drv_index]){
1567 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1568 goto mem_msg;
1572 h->drv[drv_index].LunID = lunid;
1573 cciss_update_drive_info(ctlr, drv_index);
1574 } /* end for */
1575 } /* end else */
1577 freeret:
1578 kfree(ld_buff);
1579 h->busy_configuring = 0;
1580 /* We return -1 here to tell the ACU that we have registered/updated
1581 * all of the drives that we can and to keep it from calling us
1582 * additional times.
1584 return -1;
1585 mem_msg:
1586 printk(KERN_ERR "cciss: out of memory\n");
1587 goto freeret;
1590 /* This function will deregister the disk and it's queue from the
1591 * kernel. It must be called with the controller lock held and the
1592 * drv structures busy_configuring flag set. It's parameters are:
1594 * disk = This is the disk to be deregistered
1595 * drv = This is the drive_info_struct associated with the disk to be
1596 * deregistered. It contains information about the disk used
1597 * by the driver.
1598 * clear_all = This flag determines whether or not the disk information
1599 * is going to be completely cleared out and the highest_lun
1600 * reset. Sometimes we want to clear out information about
1601 * the disk in preparation for re-adding it. In this case
1602 * the highest_lun should be left unchanged and the LunID
1603 * should not be cleared.
1605 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1606 int clear_all)
1608 int i;
1609 ctlr_info_t *h = get_host(disk);
1611 if (!capable(CAP_SYS_RAWIO))
1612 return -EPERM;
1614 /* make sure logical volume is NOT is use */
1615 if (clear_all || (h->gendisk[0] == disk)) {
1616 if (drv->usage_count > 1)
1617 return -EBUSY;
1618 } else if (drv->usage_count > 0)
1619 return -EBUSY;
1621 /* invalidate the devices and deregister the disk. If it is disk
1622 * zero do not deregister it but just zero out it's values. This
1623 * allows us to delete disk zero but keep the controller registered.
1625 if (h->gendisk[0] != disk) {
1626 struct request_queue *q = disk->queue;
1627 if (disk->flags & GENHD_FL_UP)
1628 del_gendisk(disk);
1629 if (q) {
1630 blk_cleanup_queue(q);
1631 /* Set drv->queue to NULL so that we do not try
1632 * to call blk_start_queue on this queue in the
1633 * interrupt handler
1635 drv->queue = NULL;
1637 /* If clear_all is set then we are deleting the logical
1638 * drive, not just refreshing its info. For drives
1639 * other than disk 0 we will call put_disk. We do not
1640 * do this for disk 0 as we need it to be able to
1641 * configure the controller.
1643 if (clear_all){
1644 /* This isn't pretty, but we need to find the
1645 * disk in our array and NULL our the pointer.
1646 * This is so that we will call alloc_disk if
1647 * this index is used again later.
1649 for (i=0; i < CISS_MAX_LUN; i++){
1650 if(h->gendisk[i] == disk){
1651 h->gendisk[i] = NULL;
1652 break;
1655 put_disk(disk);
1657 } else {
1658 set_capacity(disk, 0);
1661 --h->num_luns;
1662 /* zero out the disk size info */
1663 drv->nr_blocks = 0;
1664 drv->block_size = 0;
1665 drv->heads = 0;
1666 drv->sectors = 0;
1667 drv->cylinders = 0;
1668 drv->raid_level = -1; /* This can be used as a flag variable to
1669 * indicate that this element of the drive
1670 * array is free.
1673 if (clear_all) {
1674 /* check to see if it was the last disk */
1675 if (drv == h->drv + h->highest_lun) {
1676 /* if so, find the new hightest lun */
1677 int i, newhighest = -1;
1678 for (i = 0; i < h->highest_lun; i++) {
1679 /* if the disk has size > 0, it is available */
1680 if (h->drv[i].heads)
1681 newhighest = i;
1683 h->highest_lun = newhighest;
1686 drv->LunID = 0;
1688 return 0;
1691 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1692 1: address logical volume log_unit,
1693 2: periph device address is scsi3addr */
1694 unsigned int log_unit, __u8 page_code,
1695 unsigned char *scsi3addr, int cmd_type)
1697 ctlr_info_t *h = hba[ctlr];
1698 u64bit buff_dma_handle;
1699 int status = IO_OK;
1701 c->cmd_type = CMD_IOCTL_PEND;
1702 c->Header.ReplyQueue = 0;
1703 if (buff != NULL) {
1704 c->Header.SGList = 1;
1705 c->Header.SGTotal = 1;
1706 } else {
1707 c->Header.SGList = 0;
1708 c->Header.SGTotal = 0;
1710 c->Header.Tag.lower = c->busaddr;
1712 c->Request.Type.Type = cmd_type;
1713 if (cmd_type == TYPE_CMD) {
1714 switch (cmd) {
1715 case CISS_INQUIRY:
1716 /* If the logical unit number is 0 then, this is going
1717 to controller so It's a physical command
1718 mode = 0 target = 0. So we have nothing to write.
1719 otherwise, if use_unit_num == 1,
1720 mode = 1(volume set addressing) target = LUNID
1721 otherwise, if use_unit_num == 2,
1722 mode = 0(periph dev addr) target = scsi3addr */
1723 if (use_unit_num == 1) {
1724 c->Header.LUN.LogDev.VolId =
1725 h->drv[log_unit].LunID;
1726 c->Header.LUN.LogDev.Mode = 1;
1727 } else if (use_unit_num == 2) {
1728 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1730 c->Header.LUN.LogDev.Mode = 0;
1732 /* are we trying to read a vital product page */
1733 if (page_code != 0) {
1734 c->Request.CDB[1] = 0x01;
1735 c->Request.CDB[2] = page_code;
1737 c->Request.CDBLen = 6;
1738 c->Request.Type.Attribute = ATTR_SIMPLE;
1739 c->Request.Type.Direction = XFER_READ;
1740 c->Request.Timeout = 0;
1741 c->Request.CDB[0] = CISS_INQUIRY;
1742 c->Request.CDB[4] = size & 0xFF;
1743 break;
1744 case CISS_REPORT_LOG:
1745 case CISS_REPORT_PHYS:
1746 /* Talking to controller so It's a physical command
1747 mode = 00 target = 0. Nothing to write.
1749 c->Request.CDBLen = 12;
1750 c->Request.Type.Attribute = ATTR_SIMPLE;
1751 c->Request.Type.Direction = XFER_READ;
1752 c->Request.Timeout = 0;
1753 c->Request.CDB[0] = cmd;
1754 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1755 c->Request.CDB[7] = (size >> 16) & 0xFF;
1756 c->Request.CDB[8] = (size >> 8) & 0xFF;
1757 c->Request.CDB[9] = size & 0xFF;
1758 break;
1760 case CCISS_READ_CAPACITY:
1761 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1762 c->Header.LUN.LogDev.Mode = 1;
1763 c->Request.CDBLen = 10;
1764 c->Request.Type.Attribute = ATTR_SIMPLE;
1765 c->Request.Type.Direction = XFER_READ;
1766 c->Request.Timeout = 0;
1767 c->Request.CDB[0] = cmd;
1768 break;
1769 case CCISS_READ_CAPACITY_16:
1770 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1771 c->Header.LUN.LogDev.Mode = 1;
1772 c->Request.CDBLen = 16;
1773 c->Request.Type.Attribute = ATTR_SIMPLE;
1774 c->Request.Type.Direction = XFER_READ;
1775 c->Request.Timeout = 0;
1776 c->Request.CDB[0] = cmd;
1777 c->Request.CDB[1] = 0x10;
1778 c->Request.CDB[10] = (size >> 24) & 0xFF;
1779 c->Request.CDB[11] = (size >> 16) & 0xFF;
1780 c->Request.CDB[12] = (size >> 8) & 0xFF;
1781 c->Request.CDB[13] = size & 0xFF;
1782 c->Request.Timeout = 0;
1783 c->Request.CDB[0] = cmd;
1784 break;
1785 case CCISS_CACHE_FLUSH:
1786 c->Request.CDBLen = 12;
1787 c->Request.Type.Attribute = ATTR_SIMPLE;
1788 c->Request.Type.Direction = XFER_WRITE;
1789 c->Request.Timeout = 0;
1790 c->Request.CDB[0] = BMIC_WRITE;
1791 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1792 break;
1793 default:
1794 printk(KERN_WARNING
1795 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1796 return IO_ERROR;
1798 } else if (cmd_type == TYPE_MSG) {
1799 switch (cmd) {
1800 case 0: /* ABORT message */
1801 c->Request.CDBLen = 12;
1802 c->Request.Type.Attribute = ATTR_SIMPLE;
1803 c->Request.Type.Direction = XFER_WRITE;
1804 c->Request.Timeout = 0;
1805 c->Request.CDB[0] = cmd; /* abort */
1806 c->Request.CDB[1] = 0; /* abort a command */
1807 /* buff contains the tag of the command to abort */
1808 memcpy(&c->Request.CDB[4], buff, 8);
1809 break;
1810 case 1: /* RESET message */
1811 c->Request.CDBLen = 12;
1812 c->Request.Type.Attribute = ATTR_SIMPLE;
1813 c->Request.Type.Direction = XFER_WRITE;
1814 c->Request.Timeout = 0;
1815 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1816 c->Request.CDB[0] = cmd; /* reset */
1817 c->Request.CDB[1] = 0x04; /* reset a LUN */
1818 break;
1819 case 3: /* No-Op message */
1820 c->Request.CDBLen = 1;
1821 c->Request.Type.Attribute = ATTR_SIMPLE;
1822 c->Request.Type.Direction = XFER_WRITE;
1823 c->Request.Timeout = 0;
1824 c->Request.CDB[0] = cmd;
1825 break;
1826 default:
1827 printk(KERN_WARNING
1828 "cciss%d: unknown message type %d\n", ctlr, cmd);
1829 return IO_ERROR;
1831 } else {
1832 printk(KERN_WARNING
1833 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1834 return IO_ERROR;
1836 /* Fill in the scatter gather information */
1837 if (size > 0) {
1838 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1839 buff, size,
1840 PCI_DMA_BIDIRECTIONAL);
1841 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1842 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1843 c->SG[0].Len = size;
1844 c->SG[0].Ext = 0; /* we are not chaining */
1846 return status;
1849 static int sendcmd_withirq(__u8 cmd,
1850 int ctlr,
1851 void *buff,
1852 size_t size,
1853 unsigned int use_unit_num,
1854 unsigned int log_unit, __u8 page_code, int cmd_type)
1856 ctlr_info_t *h = hba[ctlr];
1857 CommandList_struct *c;
1858 u64bit buff_dma_handle;
1859 unsigned long flags;
1860 int return_status;
1861 DECLARE_COMPLETION_ONSTACK(wait);
1863 if ((c = cmd_alloc(h, 0)) == NULL)
1864 return -ENOMEM;
1865 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1866 log_unit, page_code, NULL, cmd_type);
1867 if (return_status != IO_OK) {
1868 cmd_free(h, c, 0);
1869 return return_status;
1871 resend_cmd2:
1872 c->waiting = &wait;
1874 /* Put the request on the tail of the queue and send it */
1875 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1876 addQ(&h->reqQ, c);
1877 h->Qdepth++;
1878 start_io(h);
1879 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1881 wait_for_completion(&wait);
1883 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1884 switch (c->err_info->CommandStatus) {
1885 case CMD_TARGET_STATUS:
1886 printk(KERN_WARNING "cciss: cmd %p has "
1887 " completed with errors\n", c);
1888 if (c->err_info->ScsiStatus) {
1889 printk(KERN_WARNING "cciss: cmd %p "
1890 "has SCSI Status = %x\n",
1891 c, c->err_info->ScsiStatus);
1894 break;
1895 case CMD_DATA_UNDERRUN:
1896 case CMD_DATA_OVERRUN:
1897 /* expected for inquire and report lun commands */
1898 break;
1899 case CMD_INVALID:
1900 printk(KERN_WARNING "cciss: Cmd %p is "
1901 "reported invalid\n", c);
1902 return_status = IO_ERROR;
1903 break;
1904 case CMD_PROTOCOL_ERR:
1905 printk(KERN_WARNING "cciss: cmd %p has "
1906 "protocol error \n", c);
1907 return_status = IO_ERROR;
1908 break;
1909 case CMD_HARDWARE_ERR:
1910 printk(KERN_WARNING "cciss: cmd %p had "
1911 " hardware error\n", c);
1912 return_status = IO_ERROR;
1913 break;
1914 case CMD_CONNECTION_LOST:
1915 printk(KERN_WARNING "cciss: cmd %p had "
1916 "connection lost\n", c);
1917 return_status = IO_ERROR;
1918 break;
1919 case CMD_ABORTED:
1920 printk(KERN_WARNING "cciss: cmd %p was "
1921 "aborted\n", c);
1922 return_status = IO_ERROR;
1923 break;
1924 case CMD_ABORT_FAILED:
1925 printk(KERN_WARNING "cciss: cmd %p reports "
1926 "abort failed\n", c);
1927 return_status = IO_ERROR;
1928 break;
1929 case CMD_UNSOLICITED_ABORT:
1930 printk(KERN_WARNING
1931 "cciss%d: unsolicited abort %p\n", ctlr, c);
1932 if (c->retry_count < MAX_CMD_RETRIES) {
1933 printk(KERN_WARNING
1934 "cciss%d: retrying %p\n", ctlr, c);
1935 c->retry_count++;
1936 /* erase the old error information */
1937 memset(c->err_info, 0,
1938 sizeof(ErrorInfo_struct));
1939 return_status = IO_OK;
1940 INIT_COMPLETION(wait);
1941 goto resend_cmd2;
1943 return_status = IO_ERROR;
1944 break;
1945 default:
1946 printk(KERN_WARNING "cciss: cmd %p returned "
1947 "unknown status %x\n", c,
1948 c->err_info->CommandStatus);
1949 return_status = IO_ERROR;
1952 /* unlock the buffers from DMA */
1953 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1954 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1955 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1956 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1957 cmd_free(h, c, 0);
1958 return return_status;
1961 static void cciss_geometry_inquiry(int ctlr, int logvol,
1962 int withirq, sector_t total_size,
1963 unsigned int block_size,
1964 InquiryData_struct *inq_buff,
1965 drive_info_struct *drv)
1967 int return_code;
1968 unsigned long t;
1970 memset(inq_buff, 0, sizeof(InquiryData_struct));
1971 if (withirq)
1972 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1973 inq_buff, sizeof(*inq_buff), 1,
1974 logvol, 0xC1, TYPE_CMD);
1975 else
1976 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1977 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1978 TYPE_CMD);
1979 if (return_code == IO_OK) {
1980 if (inq_buff->data_byte[8] == 0xFF) {
1981 printk(KERN_WARNING
1982 "cciss: reading geometry failed, volume "
1983 "does not support reading geometry\n");
1984 drv->heads = 255;
1985 drv->sectors = 32; // Sectors per track
1986 drv->cylinders = total_size + 1;
1987 drv->raid_level = RAID_UNKNOWN;
1988 } else {
1989 drv->heads = inq_buff->data_byte[6];
1990 drv->sectors = inq_buff->data_byte[7];
1991 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1992 drv->cylinders += inq_buff->data_byte[5];
1993 drv->raid_level = inq_buff->data_byte[8];
1995 drv->block_size = block_size;
1996 drv->nr_blocks = total_size + 1;
1997 t = drv->heads * drv->sectors;
1998 if (t > 1) {
1999 sector_t real_size = total_size + 1;
2000 unsigned long rem = sector_div(real_size, t);
2001 if (rem)
2002 real_size++;
2003 drv->cylinders = real_size;
2005 } else { /* Get geometry failed */
2006 printk(KERN_WARNING "cciss: reading geometry failed\n");
2008 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2009 drv->heads, drv->sectors, drv->cylinders);
2012 static void
2013 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2014 unsigned int *block_size)
2016 ReadCapdata_struct *buf;
2017 int return_code;
2019 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2020 if (!buf) {
2021 printk(KERN_WARNING "cciss: out of memory\n");
2022 return;
2025 if (withirq)
2026 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2027 ctlr, buf, sizeof(ReadCapdata_struct),
2028 1, logvol, 0, TYPE_CMD);
2029 else
2030 return_code = sendcmd(CCISS_READ_CAPACITY,
2031 ctlr, buf, sizeof(ReadCapdata_struct),
2032 1, logvol, 0, NULL, TYPE_CMD);
2033 if (return_code == IO_OK) {
2034 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2035 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2036 } else { /* read capacity command failed */
2037 printk(KERN_WARNING "cciss: read capacity failed\n");
2038 *total_size = 0;
2039 *block_size = BLOCK_SIZE;
2041 if (*total_size != 0)
2042 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2043 (unsigned long long)*total_size+1, *block_size);
2044 kfree(buf);
2047 static void
2048 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2050 ReadCapdata_struct_16 *buf;
2051 int return_code;
2053 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2054 if (!buf) {
2055 printk(KERN_WARNING "cciss: out of memory\n");
2056 return;
2059 if (withirq) {
2060 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2061 ctlr, buf, sizeof(ReadCapdata_struct_16),
2062 1, logvol, 0, TYPE_CMD);
2064 else {
2065 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2066 ctlr, buf, sizeof(ReadCapdata_struct_16),
2067 1, logvol, 0, NULL, TYPE_CMD);
2069 if (return_code == IO_OK) {
2070 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2071 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2072 } else { /* read capacity command failed */
2073 printk(KERN_WARNING "cciss: read capacity failed\n");
2074 *total_size = 0;
2075 *block_size = BLOCK_SIZE;
2077 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2078 (unsigned long long)*total_size+1, *block_size);
2079 kfree(buf);
2082 static int cciss_revalidate(struct gendisk *disk)
2084 ctlr_info_t *h = get_host(disk);
2085 drive_info_struct *drv = get_drv(disk);
2086 int logvol;
2087 int FOUND = 0;
2088 unsigned int block_size;
2089 sector_t total_size;
2090 InquiryData_struct *inq_buff = NULL;
2092 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2093 if (h->drv[logvol].LunID == drv->LunID) {
2094 FOUND = 1;
2095 break;
2099 if (!FOUND)
2100 return 1;
2102 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2103 if (inq_buff == NULL) {
2104 printk(KERN_WARNING "cciss: out of memory\n");
2105 return 1;
2107 if (h->cciss_read == CCISS_READ_10) {
2108 cciss_read_capacity(h->ctlr, logvol, 1,
2109 &total_size, &block_size);
2110 } else {
2111 cciss_read_capacity_16(h->ctlr, logvol, 1,
2112 &total_size, &block_size);
2114 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2115 inq_buff, drv);
2117 blk_queue_hardsect_size(drv->queue, drv->block_size);
2118 set_capacity(disk, drv->nr_blocks);
2120 kfree(inq_buff);
2121 return 0;
2125 * Wait polling for a command to complete.
2126 * The memory mapped FIFO is polled for the completion.
2127 * Used only at init time, interrupts from the HBA are disabled.
2129 static unsigned long pollcomplete(int ctlr)
2131 unsigned long done;
2132 int i;
2134 /* Wait (up to 20 seconds) for a command to complete */
2136 for (i = 20 * HZ; i > 0; i--) {
2137 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2138 if (done == FIFO_EMPTY)
2139 schedule_timeout_uninterruptible(1);
2140 else
2141 return done;
2143 /* Invalid address to tell caller we ran out of time */
2144 return 1;
2147 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2149 /* We get in here if sendcmd() is polling for completions
2150 and gets some command back that it wasn't expecting --
2151 something other than that which it just sent down.
2152 Ordinarily, that shouldn't happen, but it can happen when
2153 the scsi tape stuff gets into error handling mode, and
2154 starts using sendcmd() to try to abort commands and
2155 reset tape drives. In that case, sendcmd may pick up
2156 completions of commands that were sent to logical drives
2157 through the block i/o system, or cciss ioctls completing, etc.
2158 In that case, we need to save those completions for later
2159 processing by the interrupt handler.
2162 #ifdef CONFIG_CISS_SCSI_TAPE
2163 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2165 /* If it's not the scsi tape stuff doing error handling, (abort */
2166 /* or reset) then we don't expect anything weird. */
2167 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2168 #endif
2169 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2170 "Invalid command list address returned! (%lx)\n",
2171 ctlr, complete);
2172 /* not much we can do. */
2173 #ifdef CONFIG_CISS_SCSI_TAPE
2174 return 1;
2177 /* We've sent down an abort or reset, but something else
2178 has completed */
2179 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2180 /* Uh oh. No room to save it for later... */
2181 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2182 "reject list overflow, command lost!\n", ctlr);
2183 return 1;
2185 /* Save it for later */
2186 srl->complete[srl->ncompletions] = complete;
2187 srl->ncompletions++;
2188 #endif
2189 return 0;
2193 * Send a command to the controller, and wait for it to complete.
2194 * Only used at init time.
2196 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2197 1: address logical volume log_unit,
2198 2: periph device address is scsi3addr */
2199 unsigned int log_unit,
2200 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2202 CommandList_struct *c;
2203 int i;
2204 unsigned long complete;
2205 ctlr_info_t *info_p = hba[ctlr];
2206 u64bit buff_dma_handle;
2207 int status, done = 0;
2209 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2210 printk(KERN_WARNING "cciss: unable to get memory");
2211 return IO_ERROR;
2213 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2214 log_unit, page_code, scsi3addr, cmd_type);
2215 if (status != IO_OK) {
2216 cmd_free(info_p, c, 1);
2217 return status;
2219 resend_cmd1:
2221 * Disable interrupt
2223 #ifdef CCISS_DEBUG
2224 printk(KERN_DEBUG "cciss: turning intr off\n");
2225 #endif /* CCISS_DEBUG */
2226 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2228 /* Make sure there is room in the command FIFO */
2229 /* Actually it should be completely empty at this time */
2230 /* unless we are in here doing error handling for the scsi */
2231 /* tape side of the driver. */
2232 for (i = 200000; i > 0; i--) {
2233 /* if fifo isn't full go */
2234 if (!(info_p->access.fifo_full(info_p))) {
2236 break;
2238 udelay(10);
2239 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2240 " waiting!\n", ctlr);
2243 * Send the cmd
2245 info_p->access.submit_command(info_p, c);
2246 done = 0;
2247 do {
2248 complete = pollcomplete(ctlr);
2250 #ifdef CCISS_DEBUG
2251 printk(KERN_DEBUG "cciss: command completed\n");
2252 #endif /* CCISS_DEBUG */
2254 if (complete == 1) {
2255 printk(KERN_WARNING
2256 "cciss cciss%d: SendCmd Timeout out, "
2257 "No command list address returned!\n", ctlr);
2258 status = IO_ERROR;
2259 done = 1;
2260 break;
2263 /* This will need to change for direct lookup completions */
2264 if ((complete & CISS_ERROR_BIT)
2265 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2266 /* if data overrun or underun on Report command
2267 ignore it
2269 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2270 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2271 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2272 ((c->err_info->CommandStatus ==
2273 CMD_DATA_OVERRUN) ||
2274 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2275 )) {
2276 complete = c->busaddr;
2277 } else {
2278 if (c->err_info->CommandStatus ==
2279 CMD_UNSOLICITED_ABORT) {
2280 printk(KERN_WARNING "cciss%d: "
2281 "unsolicited abort %p\n",
2282 ctlr, c);
2283 if (c->retry_count < MAX_CMD_RETRIES) {
2284 printk(KERN_WARNING
2285 "cciss%d: retrying %p\n",
2286 ctlr, c);
2287 c->retry_count++;
2288 /* erase the old error */
2289 /* information */
2290 memset(c->err_info, 0,
2291 sizeof
2292 (ErrorInfo_struct));
2293 goto resend_cmd1;
2294 } else {
2295 printk(KERN_WARNING
2296 "cciss%d: retried %p too "
2297 "many times\n", ctlr, c);
2298 status = IO_ERROR;
2299 goto cleanup1;
2301 } else if (c->err_info->CommandStatus ==
2302 CMD_UNABORTABLE) {
2303 printk(KERN_WARNING
2304 "cciss%d: command could not be aborted.\n",
2305 ctlr);
2306 status = IO_ERROR;
2307 goto cleanup1;
2309 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2310 " Error %x \n", ctlr,
2311 c->err_info->CommandStatus);
2312 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2313 " offensive info\n"
2314 " size %x\n num %x value %x\n",
2315 ctlr,
2316 c->err_info->MoreErrInfo.Invalid_Cmd.
2317 offense_size,
2318 c->err_info->MoreErrInfo.Invalid_Cmd.
2319 offense_num,
2320 c->err_info->MoreErrInfo.Invalid_Cmd.
2321 offense_value);
2322 status = IO_ERROR;
2323 goto cleanup1;
2326 /* This will need changing for direct lookup completions */
2327 if (complete != c->busaddr) {
2328 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2329 BUG(); /* we are pretty much hosed if we get here. */
2331 continue;
2332 } else
2333 done = 1;
2334 } while (!done);
2336 cleanup1:
2337 /* unlock the data buffer from DMA */
2338 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2339 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2340 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2341 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2342 #ifdef CONFIG_CISS_SCSI_TAPE
2343 /* if we saved some commands for later, process them now. */
2344 if (info_p->scsi_rejects.ncompletions > 0)
2345 do_cciss_intr(0, info_p);
2346 #endif
2347 cmd_free(info_p, c, 1);
2348 return status;
2352 * Map (physical) PCI mem into (virtual) kernel space
2354 static void __iomem *remap_pci_mem(ulong base, ulong size)
2356 ulong page_base = ((ulong) base) & PAGE_MASK;
2357 ulong page_offs = ((ulong) base) - page_base;
2358 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2360 return page_remapped ? (page_remapped + page_offs) : NULL;
2364 * Takes jobs of the Q and sends them to the hardware, then puts it on
2365 * the Q to wait for completion.
2367 static void start_io(ctlr_info_t *h)
2369 CommandList_struct *c;
2371 while ((c = h->reqQ) != NULL) {
2372 /* can't do anything if fifo is full */
2373 if ((h->access.fifo_full(h))) {
2374 printk(KERN_WARNING "cciss: fifo full\n");
2375 break;
2378 /* Get the first entry from the Request Q */
2379 removeQ(&(h->reqQ), c);
2380 h->Qdepth--;
2382 /* Tell the controller execute command */
2383 h->access.submit_command(h, c);
2385 /* Put job onto the completed Q */
2386 addQ(&(h->cmpQ), c);
2390 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2391 /* Zeros out the error record and then resends the command back */
2392 /* to the controller */
2393 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2395 /* erase the old error information */
2396 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2398 /* add it to software queue and then send it to the controller */
2399 addQ(&(h->reqQ), c);
2400 h->Qdepth++;
2401 if (h->Qdepth > h->maxQsinceinit)
2402 h->maxQsinceinit = h->Qdepth;
2404 start_io(h);
2407 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2408 unsigned int msg_byte, unsigned int host_byte,
2409 unsigned int driver_byte)
2411 /* inverse of macros in scsi.h */
2412 return (scsi_status_byte & 0xff) |
2413 ((msg_byte & 0xff) << 8) |
2414 ((host_byte & 0xff) << 16) |
2415 ((driver_byte & 0xff) << 24);
2418 static inline int evaluate_target_status(CommandList_struct *cmd)
2420 unsigned char sense_key;
2421 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2422 int error_value;
2424 /* If we get in here, it means we got "target status", that is, scsi status */
2425 status_byte = cmd->err_info->ScsiStatus;
2426 driver_byte = DRIVER_OK;
2427 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2429 if (blk_pc_request(cmd->rq))
2430 host_byte = DID_PASSTHROUGH;
2431 else
2432 host_byte = DID_OK;
2434 error_value = make_status_bytes(status_byte, msg_byte,
2435 host_byte, driver_byte);
2437 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2438 if (!blk_pc_request(cmd->rq))
2439 printk(KERN_WARNING "cciss: cmd %p "
2440 "has SCSI Status 0x%x\n",
2441 cmd, cmd->err_info->ScsiStatus);
2442 return error_value;
2445 /* check the sense key */
2446 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2447 /* no status or recovered error */
2448 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2449 error_value = 0;
2451 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2452 if (error_value != 0)
2453 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2454 " sense key = 0x%x\n", cmd, sense_key);
2455 return error_value;
2458 /* SG_IO or similar, copy sense data back */
2459 if (cmd->rq->sense) {
2460 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2461 cmd->rq->sense_len = cmd->err_info->SenseLen;
2462 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2463 cmd->rq->sense_len);
2464 } else
2465 cmd->rq->sense_len = 0;
2467 return error_value;
2470 /* checks the status of the job and calls complete buffers to mark all
2471 * buffers for the completed job. Note that this function does not need
2472 * to hold the hba/queue lock.
2474 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2475 int timeout)
2477 int retry_cmd = 0;
2478 struct request *rq = cmd->rq;
2480 rq->errors = 0;
2482 if (timeout)
2483 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2485 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2486 goto after_error_processing;
2488 switch (cmd->err_info->CommandStatus) {
2489 case CMD_TARGET_STATUS:
2490 rq->errors = evaluate_target_status(cmd);
2491 break;
2492 case CMD_DATA_UNDERRUN:
2493 if (blk_fs_request(cmd->rq)) {
2494 printk(KERN_WARNING "cciss: cmd %p has"
2495 " completed with data underrun "
2496 "reported\n", cmd);
2497 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2499 break;
2500 case CMD_DATA_OVERRUN:
2501 if (blk_fs_request(cmd->rq))
2502 printk(KERN_WARNING "cciss: cmd %p has"
2503 " completed with data overrun "
2504 "reported\n", cmd);
2505 break;
2506 case CMD_INVALID:
2507 printk(KERN_WARNING "cciss: cmd %p is "
2508 "reported invalid\n", cmd);
2509 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2510 cmd->err_info->CommandStatus, DRIVER_OK,
2511 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2512 break;
2513 case CMD_PROTOCOL_ERR:
2514 printk(KERN_WARNING "cciss: cmd %p has "
2515 "protocol error \n", cmd);
2516 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2517 cmd->err_info->CommandStatus, DRIVER_OK,
2518 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2519 break;
2520 case CMD_HARDWARE_ERR:
2521 printk(KERN_WARNING "cciss: cmd %p had "
2522 " hardware error\n", cmd);
2523 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2524 cmd->err_info->CommandStatus, DRIVER_OK,
2525 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2526 break;
2527 case CMD_CONNECTION_LOST:
2528 printk(KERN_WARNING "cciss: cmd %p had "
2529 "connection lost\n", cmd);
2530 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2531 cmd->err_info->CommandStatus, DRIVER_OK,
2532 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2533 break;
2534 case CMD_ABORTED:
2535 printk(KERN_WARNING "cciss: cmd %p was "
2536 "aborted\n", cmd);
2537 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2538 cmd->err_info->CommandStatus, DRIVER_OK,
2539 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2540 break;
2541 case CMD_ABORT_FAILED:
2542 printk(KERN_WARNING "cciss: cmd %p reports "
2543 "abort failed\n", cmd);
2544 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2545 cmd->err_info->CommandStatus, DRIVER_OK,
2546 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2547 break;
2548 case CMD_UNSOLICITED_ABORT:
2549 printk(KERN_WARNING "cciss%d: unsolicited "
2550 "abort %p\n", h->ctlr, cmd);
2551 if (cmd->retry_count < MAX_CMD_RETRIES) {
2552 retry_cmd = 1;
2553 printk(KERN_WARNING
2554 "cciss%d: retrying %p\n", h->ctlr, cmd);
2555 cmd->retry_count++;
2556 } else
2557 printk(KERN_WARNING
2558 "cciss%d: %p retried too "
2559 "many times\n", h->ctlr, cmd);
2560 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2561 cmd->err_info->CommandStatus, DRIVER_OK,
2562 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2563 break;
2564 case CMD_TIMEOUT:
2565 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2566 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2567 cmd->err_info->CommandStatus, DRIVER_OK,
2568 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2569 break;
2570 default:
2571 printk(KERN_WARNING "cciss: cmd %p returned "
2572 "unknown status %x\n", cmd,
2573 cmd->err_info->CommandStatus);
2574 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2575 cmd->err_info->CommandStatus, DRIVER_OK,
2576 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2579 after_error_processing:
2581 /* We need to return this command */
2582 if (retry_cmd) {
2583 resend_cciss_cmd(h, cmd);
2584 return;
2586 cmd->rq->completion_data = cmd;
2587 blk_complete_request(cmd->rq);
2591 * Get a request and submit it to the controller.
2593 static void do_cciss_request(struct request_queue *q)
2595 ctlr_info_t *h = q->queuedata;
2596 CommandList_struct *c;
2597 sector_t start_blk;
2598 int seg;
2599 struct request *creq;
2600 u64bit temp64;
2601 struct scatterlist tmp_sg[MAXSGENTRIES];
2602 drive_info_struct *drv;
2603 int i, dir;
2605 /* We call start_io here in case there is a command waiting on the
2606 * queue that has not been sent.
2608 if (blk_queue_plugged(q))
2609 goto startio;
2611 queue:
2612 creq = elv_next_request(q);
2613 if (!creq)
2614 goto startio;
2616 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2618 if ((c = cmd_alloc(h, 1)) == NULL)
2619 goto full;
2621 blkdev_dequeue_request(creq);
2623 spin_unlock_irq(q->queue_lock);
2625 c->cmd_type = CMD_RWREQ;
2626 c->rq = creq;
2628 /* fill in the request */
2629 drv = creq->rq_disk->private_data;
2630 c->Header.ReplyQueue = 0; // unused in simple mode
2631 /* got command from pool, so use the command block index instead */
2632 /* for direct lookups. */
2633 /* The first 2 bits are reserved for controller error reporting. */
2634 c->Header.Tag.lower = (c->cmdindex << 3);
2635 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2636 c->Header.LUN.LogDev.VolId = drv->LunID;
2637 c->Header.LUN.LogDev.Mode = 1;
2638 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2639 c->Request.Type.Type = TYPE_CMD; // It is a command.
2640 c->Request.Type.Attribute = ATTR_SIMPLE;
2641 c->Request.Type.Direction =
2642 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2643 c->Request.Timeout = 0; // Don't time out
2644 c->Request.CDB[0] =
2645 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2646 start_blk = creq->sector;
2647 #ifdef CCISS_DEBUG
2648 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2649 (int)creq->nr_sectors);
2650 #endif /* CCISS_DEBUG */
2652 sg_init_table(tmp_sg, MAXSGENTRIES);
2653 seg = blk_rq_map_sg(q, creq, tmp_sg);
2655 /* get the DMA records for the setup */
2656 if (c->Request.Type.Direction == XFER_READ)
2657 dir = PCI_DMA_FROMDEVICE;
2658 else
2659 dir = PCI_DMA_TODEVICE;
2661 for (i = 0; i < seg; i++) {
2662 c->SG[i].Len = tmp_sg[i].length;
2663 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2664 tmp_sg[i].offset,
2665 tmp_sg[i].length, dir);
2666 c->SG[i].Addr.lower = temp64.val32.lower;
2667 c->SG[i].Addr.upper = temp64.val32.upper;
2668 c->SG[i].Ext = 0; // we are not chaining
2670 /* track how many SG entries we are using */
2671 if (seg > h->maxSG)
2672 h->maxSG = seg;
2674 #ifdef CCISS_DEBUG
2675 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2676 creq->nr_sectors, seg);
2677 #endif /* CCISS_DEBUG */
2679 c->Header.SGList = c->Header.SGTotal = seg;
2680 if (likely(blk_fs_request(creq))) {
2681 if(h->cciss_read == CCISS_READ_10) {
2682 c->Request.CDB[1] = 0;
2683 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2684 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2685 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2686 c->Request.CDB[5] = start_blk & 0xff;
2687 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2688 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2689 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2690 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2691 } else {
2692 u32 upper32 = upper_32_bits(start_blk);
2694 c->Request.CDBLen = 16;
2695 c->Request.CDB[1]= 0;
2696 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2697 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2698 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2699 c->Request.CDB[5]= upper32 & 0xff;
2700 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2701 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2702 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2703 c->Request.CDB[9]= start_blk & 0xff;
2704 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2705 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2706 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2707 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2708 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2710 } else if (blk_pc_request(creq)) {
2711 c->Request.CDBLen = creq->cmd_len;
2712 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2713 } else {
2714 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2715 BUG();
2718 spin_lock_irq(q->queue_lock);
2720 addQ(&(h->reqQ), c);
2721 h->Qdepth++;
2722 if (h->Qdepth > h->maxQsinceinit)
2723 h->maxQsinceinit = h->Qdepth;
2725 goto queue;
2726 full:
2727 blk_stop_queue(q);
2728 startio:
2729 /* We will already have the driver lock here so not need
2730 * to lock it.
2732 start_io(h);
2735 static inline unsigned long get_next_completion(ctlr_info_t *h)
2737 #ifdef CONFIG_CISS_SCSI_TAPE
2738 /* Any rejects from sendcmd() lying around? Process them first */
2739 if (h->scsi_rejects.ncompletions == 0)
2740 return h->access.command_completed(h);
2741 else {
2742 struct sendcmd_reject_list *srl;
2743 int n;
2744 srl = &h->scsi_rejects;
2745 n = --srl->ncompletions;
2746 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2747 printk("p");
2748 return srl->complete[n];
2750 #else
2751 return h->access.command_completed(h);
2752 #endif
2755 static inline int interrupt_pending(ctlr_info_t *h)
2757 #ifdef CONFIG_CISS_SCSI_TAPE
2758 return (h->access.intr_pending(h)
2759 || (h->scsi_rejects.ncompletions > 0));
2760 #else
2761 return h->access.intr_pending(h);
2762 #endif
2765 static inline long interrupt_not_for_us(ctlr_info_t *h)
2767 #ifdef CONFIG_CISS_SCSI_TAPE
2768 return (((h->access.intr_pending(h) == 0) ||
2769 (h->interrupts_enabled == 0))
2770 && (h->scsi_rejects.ncompletions == 0));
2771 #else
2772 return (((h->access.intr_pending(h) == 0) ||
2773 (h->interrupts_enabled == 0)));
2774 #endif
2777 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2779 ctlr_info_t *h = dev_id;
2780 CommandList_struct *c;
2781 unsigned long flags;
2782 __u32 a, a1, a2;
2784 if (interrupt_not_for_us(h))
2785 return IRQ_NONE;
2787 * If there are completed commands in the completion queue,
2788 * we had better do something about it.
2790 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2791 while (interrupt_pending(h)) {
2792 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2793 a1 = a;
2794 if ((a & 0x04)) {
2795 a2 = (a >> 3);
2796 if (a2 >= h->nr_cmds) {
2797 printk(KERN_WARNING
2798 "cciss: controller cciss%d failed, stopping.\n",
2799 h->ctlr);
2800 fail_all_cmds(h->ctlr);
2801 return IRQ_HANDLED;
2804 c = h->cmd_pool + a2;
2805 a = c->busaddr;
2807 } else {
2808 a &= ~3;
2809 if ((c = h->cmpQ) == NULL) {
2810 printk(KERN_WARNING
2811 "cciss: Completion of %08x ignored\n",
2812 a1);
2813 continue;
2815 while (c->busaddr != a) {
2816 c = c->next;
2817 if (c == h->cmpQ)
2818 break;
2822 * If we've found the command, take it off the
2823 * completion Q and free it
2825 if (c->busaddr == a) {
2826 removeQ(&h->cmpQ, c);
2827 if (c->cmd_type == CMD_RWREQ) {
2828 complete_command(h, c, 0);
2829 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2830 complete(c->waiting);
2832 # ifdef CONFIG_CISS_SCSI_TAPE
2833 else if (c->cmd_type == CMD_SCSI)
2834 complete_scsi_command(c, 0, a1);
2835 # endif
2836 continue;
2841 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2842 return IRQ_HANDLED;
2846 * We cannot read the structure directly, for portability we must use
2847 * the io functions.
2848 * This is for debug only.
2850 #ifdef CCISS_DEBUG
2851 static void print_cfg_table(CfgTable_struct *tb)
2853 int i;
2854 char temp_name[17];
2856 printk("Controller Configuration information\n");
2857 printk("------------------------------------\n");
2858 for (i = 0; i < 4; i++)
2859 temp_name[i] = readb(&(tb->Signature[i]));
2860 temp_name[4] = '\0';
2861 printk(" Signature = %s\n", temp_name);
2862 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2863 printk(" Transport methods supported = 0x%x\n",
2864 readl(&(tb->TransportSupport)));
2865 printk(" Transport methods active = 0x%x\n",
2866 readl(&(tb->TransportActive)));
2867 printk(" Requested transport Method = 0x%x\n",
2868 readl(&(tb->HostWrite.TransportRequest)));
2869 printk(" Coalesce Interrupt Delay = 0x%x\n",
2870 readl(&(tb->HostWrite.CoalIntDelay)));
2871 printk(" Coalesce Interrupt Count = 0x%x\n",
2872 readl(&(tb->HostWrite.CoalIntCount)));
2873 printk(" Max outstanding commands = 0x%d\n",
2874 readl(&(tb->CmdsOutMax)));
2875 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2876 for (i = 0; i < 16; i++)
2877 temp_name[i] = readb(&(tb->ServerName[i]));
2878 temp_name[16] = '\0';
2879 printk(" Server Name = %s\n", temp_name);
2880 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2882 #endif /* CCISS_DEBUG */
2884 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2886 int i, offset, mem_type, bar_type;
2887 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2888 return 0;
2889 offset = 0;
2890 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2891 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2892 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2893 offset += 4;
2894 else {
2895 mem_type = pci_resource_flags(pdev, i) &
2896 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2897 switch (mem_type) {
2898 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2899 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2900 offset += 4; /* 32 bit */
2901 break;
2902 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2903 offset += 8;
2904 break;
2905 default: /* reserved in PCI 2.2 */
2906 printk(KERN_WARNING
2907 "Base address is invalid\n");
2908 return -1;
2909 break;
2912 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2913 return i + 1;
2915 return -1;
2918 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2919 * controllers that are capable. If not, we use IO-APIC mode.
2922 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2923 struct pci_dev *pdev, __u32 board_id)
2925 #ifdef CONFIG_PCI_MSI
2926 int err;
2927 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2928 {0, 2}, {0, 3}
2931 /* Some boards advertise MSI but don't really support it */
2932 if ((board_id == 0x40700E11) ||
2933 (board_id == 0x40800E11) ||
2934 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2935 goto default_int_mode;
2937 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2938 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2939 if (!err) {
2940 c->intr[0] = cciss_msix_entries[0].vector;
2941 c->intr[1] = cciss_msix_entries[1].vector;
2942 c->intr[2] = cciss_msix_entries[2].vector;
2943 c->intr[3] = cciss_msix_entries[3].vector;
2944 c->msix_vector = 1;
2945 return;
2947 if (err > 0) {
2948 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2949 "available\n", err);
2950 goto default_int_mode;
2951 } else {
2952 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2953 err);
2954 goto default_int_mode;
2957 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2958 if (!pci_enable_msi(pdev)) {
2959 c->msi_vector = 1;
2960 } else {
2961 printk(KERN_WARNING "cciss: MSI init failed\n");
2964 default_int_mode:
2965 #endif /* CONFIG_PCI_MSI */
2966 /* if we get here we're going to use the default interrupt mode */
2967 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2968 return;
2971 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2973 ushort subsystem_vendor_id, subsystem_device_id, command;
2974 __u32 board_id, scratchpad = 0;
2975 __u64 cfg_offset;
2976 __u32 cfg_base_addr;
2977 __u64 cfg_base_addr_index;
2978 int i, err;
2980 /* check to see if controller has been disabled */
2981 /* BEFORE trying to enable it */
2982 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2983 if (!(command & 0x02)) {
2984 printk(KERN_WARNING
2985 "cciss: controller appears to be disabled\n");
2986 return -ENODEV;
2989 err = pci_enable_device(pdev);
2990 if (err) {
2991 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2992 return err;
2995 err = pci_request_regions(pdev, "cciss");
2996 if (err) {
2997 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2998 "aborting\n");
2999 return err;
3002 subsystem_vendor_id = pdev->subsystem_vendor;
3003 subsystem_device_id = pdev->subsystem_device;
3004 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3005 subsystem_vendor_id);
3007 #ifdef CCISS_DEBUG
3008 printk("command = %x\n", command);
3009 printk("irq = %x\n", pdev->irq);
3010 printk("board_id = %x\n", board_id);
3011 #endif /* CCISS_DEBUG */
3013 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3014 * else we use the IO-APIC interrupt assigned to us by system ROM.
3016 cciss_interrupt_mode(c, pdev, board_id);
3019 * Memory base addr is first addr , the second points to the config
3020 * table
3023 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3024 #ifdef CCISS_DEBUG
3025 printk("address 0 = %x\n", c->paddr);
3026 #endif /* CCISS_DEBUG */
3027 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3029 /* Wait for the board to become ready. (PCI hotplug needs this.)
3030 * We poll for up to 120 secs, once per 100ms. */
3031 for (i = 0; i < 1200; i++) {
3032 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3033 if (scratchpad == CCISS_FIRMWARE_READY)
3034 break;
3035 set_current_state(TASK_INTERRUPTIBLE);
3036 schedule_timeout(HZ / 10); /* wait 100ms */
3038 if (scratchpad != CCISS_FIRMWARE_READY) {
3039 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3040 err = -ENODEV;
3041 goto err_out_free_res;
3044 /* get the address index number */
3045 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3046 cfg_base_addr &= (__u32) 0x0000ffff;
3047 #ifdef CCISS_DEBUG
3048 printk("cfg base address = %x\n", cfg_base_addr);
3049 #endif /* CCISS_DEBUG */
3050 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3051 #ifdef CCISS_DEBUG
3052 printk("cfg base address index = %x\n", cfg_base_addr_index);
3053 #endif /* CCISS_DEBUG */
3054 if (cfg_base_addr_index == -1) {
3055 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3056 err = -ENODEV;
3057 goto err_out_free_res;
3060 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3061 #ifdef CCISS_DEBUG
3062 printk("cfg offset = %x\n", cfg_offset);
3063 #endif /* CCISS_DEBUG */
3064 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3065 cfg_base_addr_index) +
3066 cfg_offset, sizeof(CfgTable_struct));
3067 c->board_id = board_id;
3069 #ifdef CCISS_DEBUG
3070 print_cfg_table(c->cfgtable);
3071 #endif /* CCISS_DEBUG */
3073 for (i = 0; i < ARRAY_SIZE(products); i++) {
3074 if (board_id == products[i].board_id) {
3075 c->product_name = products[i].product_name;
3076 c->access = *(products[i].access);
3077 c->nr_cmds = products[i].nr_cmds;
3078 break;
3081 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3082 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3083 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3084 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3085 printk("Does not appear to be a valid CISS config table\n");
3086 err = -ENODEV;
3087 goto err_out_free_res;
3089 /* We didn't find the controller in our list. We know the
3090 * signature is valid. If it's an HP device let's try to
3091 * bind to the device and fire it up. Otherwise we bail.
3093 if (i == ARRAY_SIZE(products)) {
3094 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3095 c->product_name = products[i-1].product_name;
3096 c->access = *(products[i-1].access);
3097 c->nr_cmds = products[i-1].nr_cmds;
3098 printk(KERN_WARNING "cciss: This is an unknown "
3099 "Smart Array controller.\n"
3100 "cciss: Please update to the latest driver "
3101 "available from www.hp.com.\n");
3102 } else {
3103 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3104 " to access the Smart Array controller %08lx\n"
3105 , (unsigned long)board_id);
3106 err = -ENODEV;
3107 goto err_out_free_res;
3110 #ifdef CONFIG_X86
3112 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3113 __u32 prefetch;
3114 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3115 prefetch |= 0x100;
3116 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3118 #endif
3120 /* Disabling DMA prefetch and refetch for the P600.
3121 * An ASIC bug may result in accesses to invalid memory addresses.
3122 * We've disabled prefetch for some time now. Testing with XEN
3123 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3125 if(board_id == 0x3225103C) {
3126 __u32 dma_prefetch;
3127 __u32 dma_refetch;
3128 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3129 dma_prefetch |= 0x8000;
3130 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3131 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3132 dma_refetch |= 0x1;
3133 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3136 #ifdef CCISS_DEBUG
3137 printk("Trying to put board into Simple mode\n");
3138 #endif /* CCISS_DEBUG */
3139 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3140 /* Update the field, and then ring the doorbell */
3141 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3142 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3144 /* under certain very rare conditions, this can take awhile.
3145 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3146 * as we enter this code.) */
3147 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3148 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3149 break;
3150 /* delay and try again */
3151 set_current_state(TASK_INTERRUPTIBLE);
3152 schedule_timeout(10);
3155 #ifdef CCISS_DEBUG
3156 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3157 readl(c->vaddr + SA5_DOORBELL));
3158 #endif /* CCISS_DEBUG */
3159 #ifdef CCISS_DEBUG
3160 print_cfg_table(c->cfgtable);
3161 #endif /* CCISS_DEBUG */
3163 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3164 printk(KERN_WARNING "cciss: unable to get board into"
3165 " simple mode\n");
3166 err = -ENODEV;
3167 goto err_out_free_res;
3169 return 0;
3171 err_out_free_res:
3173 * Deliberately omit pci_disable_device(): it does something nasty to
3174 * Smart Array controllers that pci_enable_device does not undo
3176 pci_release_regions(pdev);
3177 return err;
3181 * Gets information about the local volumes attached to the controller.
3183 static void cciss_getgeometry(int cntl_num)
3185 ReportLunData_struct *ld_buff;
3186 InquiryData_struct *inq_buff;
3187 int return_code;
3188 int i;
3189 int listlength = 0;
3190 __u32 lunid = 0;
3191 unsigned block_size;
3192 sector_t total_size;
3194 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3195 if (ld_buff == NULL) {
3196 printk(KERN_ERR "cciss: out of memory\n");
3197 return;
3199 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3200 if (inq_buff == NULL) {
3201 printk(KERN_ERR "cciss: out of memory\n");
3202 kfree(ld_buff);
3203 return;
3205 /* Get the firmware version */
3206 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3207 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3208 TYPE_CMD);
3209 if (return_code == IO_OK) {
3210 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3211 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3212 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3213 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3214 } else { /* send command failed */
3216 printk(KERN_WARNING "cciss: unable to determine firmware"
3217 " version of controller\n");
3219 /* Get the number of logical volumes */
3220 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3221 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3222 TYPE_CMD);
3224 if (return_code == IO_OK) {
3225 #ifdef CCISS_DEBUG
3226 printk("LUN Data\n--------------------------\n");
3227 #endif /* CCISS_DEBUG */
3229 listlength |=
3230 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3231 listlength |=
3232 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3233 listlength |=
3234 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3235 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3236 } else { /* reading number of logical volumes failed */
3238 printk(KERN_WARNING "cciss: report logical volume"
3239 " command failed\n");
3240 listlength = 0;
3242 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3243 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3244 printk(KERN_ERR
3245 "ciss: only %d number of logical volumes supported\n",
3246 CISS_MAX_LUN);
3247 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3249 #ifdef CCISS_DEBUG
3250 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3251 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3252 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3253 hba[cntl_num]->num_luns);
3254 #endif /* CCISS_DEBUG */
3256 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3257 for (i = 0; i < CISS_MAX_LUN; i++) {
3258 if (i < hba[cntl_num]->num_luns) {
3259 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3260 << 24;
3261 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3262 << 16;
3263 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3264 << 8;
3265 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3267 hba[cntl_num]->drv[i].LunID = lunid;
3269 #ifdef CCISS_DEBUG
3270 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3271 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3272 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3273 hba[cntl_num]->drv[i].LunID);
3274 #endif /* CCISS_DEBUG */
3276 /* testing to see if 16-byte CDBs are already being used */
3277 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3278 cciss_read_capacity_16(cntl_num, i, 0,
3279 &total_size, &block_size);
3280 goto geo_inq;
3282 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3284 /* If read_capacity returns all F's the logical is >2TB */
3285 /* so we switch to 16-byte CDBs for all read/write ops */
3286 if(total_size == 0xFFFFFFFFULL) {
3287 cciss_read_capacity_16(cntl_num, i, 0,
3288 &total_size, &block_size);
3289 hba[cntl_num]->cciss_read = CCISS_READ_16;
3290 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3291 } else {
3292 hba[cntl_num]->cciss_read = CCISS_READ_10;
3293 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3295 geo_inq:
3296 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3297 block_size, inq_buff,
3298 &hba[cntl_num]->drv[i]);
3299 } else {
3300 /* initialize raid_level to indicate a free space */
3301 hba[cntl_num]->drv[i].raid_level = -1;
3304 kfree(ld_buff);
3305 kfree(inq_buff);
3308 /* Function to find the first free pointer into our hba[] array */
3309 /* Returns -1 if no free entries are left. */
3310 static int alloc_cciss_hba(void)
3312 int i;
3314 for (i = 0; i < MAX_CTLR; i++) {
3315 if (!hba[i]) {
3316 ctlr_info_t *p;
3318 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3319 if (!p)
3320 goto Enomem;
3321 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3322 if (!p->gendisk[0]) {
3323 kfree(p);
3324 goto Enomem;
3326 hba[i] = p;
3327 return i;
3330 printk(KERN_WARNING "cciss: This driver supports a maximum"
3331 " of %d controllers.\n", MAX_CTLR);
3332 return -1;
3333 Enomem:
3334 printk(KERN_ERR "cciss: out of memory.\n");
3335 return -1;
3338 static void free_hba(int i)
3340 ctlr_info_t *p = hba[i];
3341 int n;
3343 hba[i] = NULL;
3344 for (n = 0; n < CISS_MAX_LUN; n++)
3345 put_disk(p->gendisk[n]);
3346 kfree(p);
3350 * This is it. Find all the controllers and register them. I really hate
3351 * stealing all these major device numbers.
3352 * returns the number of block devices registered.
3354 static int __devinit cciss_init_one(struct pci_dev *pdev,
3355 const struct pci_device_id *ent)
3357 int i;
3358 int j = 0;
3359 int rc;
3360 int dac;
3362 i = alloc_cciss_hba();
3363 if (i < 0)
3364 return -1;
3366 hba[i]->busy_initializing = 1;
3368 if (cciss_pci_init(hba[i], pdev) != 0)
3369 goto clean1;
3371 sprintf(hba[i]->devname, "cciss%d", i);
3372 hba[i]->ctlr = i;
3373 hba[i]->pdev = pdev;
3375 /* configure PCI DMA stuff */
3376 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3377 dac = 1;
3378 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3379 dac = 0;
3380 else {
3381 printk(KERN_ERR "cciss: no suitable DMA available\n");
3382 goto clean1;
3386 * register with the major number, or get a dynamic major number
3387 * by passing 0 as argument. This is done for greater than
3388 * 8 controller support.
3390 if (i < MAX_CTLR_ORIG)
3391 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3392 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3393 if (rc == -EBUSY || rc == -EINVAL) {
3394 printk(KERN_ERR
3395 "cciss: Unable to get major number %d for %s "
3396 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3397 goto clean1;
3398 } else {
3399 if (i >= MAX_CTLR_ORIG)
3400 hba[i]->major = rc;
3403 /* make sure the board interrupts are off */
3404 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3405 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3406 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3407 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3408 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3409 goto clean2;
3412 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3413 hba[i]->devname, pdev->device, pci_name(pdev),
3414 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3416 hba[i]->cmd_pool_bits =
3417 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3418 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3419 hba[i]->cmd_pool = (CommandList_struct *)
3420 pci_alloc_consistent(hba[i]->pdev,
3421 hba[i]->nr_cmds * sizeof(CommandList_struct),
3422 &(hba[i]->cmd_pool_dhandle));
3423 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3424 pci_alloc_consistent(hba[i]->pdev,
3425 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3426 &(hba[i]->errinfo_pool_dhandle));
3427 if ((hba[i]->cmd_pool_bits == NULL)
3428 || (hba[i]->cmd_pool == NULL)
3429 || (hba[i]->errinfo_pool == NULL)) {
3430 printk(KERN_ERR "cciss: out of memory");
3431 goto clean4;
3433 #ifdef CONFIG_CISS_SCSI_TAPE
3434 hba[i]->scsi_rejects.complete =
3435 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3436 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3437 if (hba[i]->scsi_rejects.complete == NULL) {
3438 printk(KERN_ERR "cciss: out of memory");
3439 goto clean4;
3441 #endif
3442 spin_lock_init(&hba[i]->lock);
3444 /* Initialize the pdev driver private data.
3445 have it point to hba[i]. */
3446 pci_set_drvdata(pdev, hba[i]);
3447 /* command and error info recs zeroed out before
3448 they are used */
3449 memset(hba[i]->cmd_pool_bits, 0,
3450 ((hba[i]->nr_cmds + BITS_PER_LONG -
3451 1) / BITS_PER_LONG) * sizeof(unsigned long));
3453 #ifdef CCISS_DEBUG
3454 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3455 #endif /* CCISS_DEBUG */
3457 cciss_getgeometry(i);
3459 cciss_scsi_setup(i);
3461 /* Turn the interrupts on so we can service requests */
3462 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3464 cciss_procinit(i);
3466 hba[i]->cciss_max_sectors = 2048;
3468 hba[i]->busy_initializing = 0;
3470 do {
3471 drive_info_struct *drv = &(hba[i]->drv[j]);
3472 struct gendisk *disk = hba[i]->gendisk[j];
3473 struct request_queue *q;
3475 /* Check if the disk was allocated already */
3476 if (!disk){
3477 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3478 disk = hba[i]->gendisk[j];
3481 /* Check that the disk was able to be allocated */
3482 if (!disk) {
3483 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3484 goto clean4;
3487 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3488 if (!q) {
3489 printk(KERN_ERR
3490 "cciss: unable to allocate queue for disk %d\n",
3492 goto clean4;
3494 drv->queue = q;
3496 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3498 /* This is a hardware imposed limit. */
3499 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3501 /* This is a limit in the driver and could be eliminated. */
3502 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3504 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3506 blk_queue_softirq_done(q, cciss_softirq_done);
3508 q->queuedata = hba[i];
3509 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3510 disk->major = hba[i]->major;
3511 disk->first_minor = j << NWD_SHIFT;
3512 disk->fops = &cciss_fops;
3513 disk->queue = q;
3514 disk->private_data = drv;
3515 disk->driverfs_dev = &pdev->dev;
3516 /* we must register the controller even if no disks exist */
3517 /* this is for the online array utilities */
3518 if (!drv->heads && j)
3519 continue;
3520 blk_queue_hardsect_size(q, drv->block_size);
3521 set_capacity(disk, drv->nr_blocks);
3522 add_disk(disk);
3523 j++;
3524 } while (j <= hba[i]->highest_lun);
3526 return 1;
3528 clean4:
3529 #ifdef CONFIG_CISS_SCSI_TAPE
3530 kfree(hba[i]->scsi_rejects.complete);
3531 #endif
3532 kfree(hba[i]->cmd_pool_bits);
3533 if (hba[i]->cmd_pool)
3534 pci_free_consistent(hba[i]->pdev,
3535 hba[i]->nr_cmds * sizeof(CommandList_struct),
3536 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3537 if (hba[i]->errinfo_pool)
3538 pci_free_consistent(hba[i]->pdev,
3539 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3540 hba[i]->errinfo_pool,
3541 hba[i]->errinfo_pool_dhandle);
3542 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3543 clean2:
3544 unregister_blkdev(hba[i]->major, hba[i]->devname);
3545 clean1:
3546 hba[i]->busy_initializing = 0;
3547 /* cleanup any queues that may have been initialized */
3548 for (j=0; j <= hba[i]->highest_lun; j++){
3549 drive_info_struct *drv = &(hba[i]->drv[j]);
3550 if (drv->queue)
3551 blk_cleanup_queue(drv->queue);
3554 * Deliberately omit pci_disable_device(): it does something nasty to
3555 * Smart Array controllers that pci_enable_device does not undo
3557 pci_release_regions(pdev);
3558 pci_set_drvdata(pdev, NULL);
3559 free_hba(i);
3560 return -1;
3563 static void cciss_shutdown(struct pci_dev *pdev)
3565 ctlr_info_t *tmp_ptr;
3566 int i;
3567 char flush_buf[4];
3568 int return_code;
3570 tmp_ptr = pci_get_drvdata(pdev);
3571 if (tmp_ptr == NULL)
3572 return;
3573 i = tmp_ptr->ctlr;
3574 if (hba[i] == NULL)
3575 return;
3577 /* Turn board interrupts off and send the flush cache command */
3578 /* sendcmd will turn off interrupt, and send the flush...
3579 * To write all data in the battery backed cache to disks */
3580 memset(flush_buf, 0, 4);
3581 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3582 TYPE_CMD);
3583 if (return_code == IO_OK) {
3584 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3585 } else {
3586 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3588 free_irq(hba[i]->intr[2], hba[i]);
3591 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3593 ctlr_info_t *tmp_ptr;
3594 int i, j;
3596 if (pci_get_drvdata(pdev) == NULL) {
3597 printk(KERN_ERR "cciss: Unable to remove device \n");
3598 return;
3600 tmp_ptr = pci_get_drvdata(pdev);
3601 i = tmp_ptr->ctlr;
3602 if (hba[i] == NULL) {
3603 printk(KERN_ERR "cciss: device appears to "
3604 "already be removed \n");
3605 return;
3608 remove_proc_entry(hba[i]->devname, proc_cciss);
3609 unregister_blkdev(hba[i]->major, hba[i]->devname);
3611 /* remove it from the disk list */
3612 for (j = 0; j < CISS_MAX_LUN; j++) {
3613 struct gendisk *disk = hba[i]->gendisk[j];
3614 if (disk) {
3615 struct request_queue *q = disk->queue;
3617 if (disk->flags & GENHD_FL_UP)
3618 del_gendisk(disk);
3619 if (q)
3620 blk_cleanup_queue(q);
3624 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3626 cciss_shutdown(pdev);
3628 #ifdef CONFIG_PCI_MSI
3629 if (hba[i]->msix_vector)
3630 pci_disable_msix(hba[i]->pdev);
3631 else if (hba[i]->msi_vector)
3632 pci_disable_msi(hba[i]->pdev);
3633 #endif /* CONFIG_PCI_MSI */
3635 iounmap(hba[i]->vaddr);
3637 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3638 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3639 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3640 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3641 kfree(hba[i]->cmd_pool_bits);
3642 #ifdef CONFIG_CISS_SCSI_TAPE
3643 kfree(hba[i]->scsi_rejects.complete);
3644 #endif
3646 * Deliberately omit pci_disable_device(): it does something nasty to
3647 * Smart Array controllers that pci_enable_device does not undo
3649 pci_release_regions(pdev);
3650 pci_set_drvdata(pdev, NULL);
3651 free_hba(i);
3654 static struct pci_driver cciss_pci_driver = {
3655 .name = "cciss",
3656 .probe = cciss_init_one,
3657 .remove = __devexit_p(cciss_remove_one),
3658 .id_table = cciss_pci_device_id, /* id_table */
3659 .shutdown = cciss_shutdown,
3663 * This is it. Register the PCI driver information for the cards we control
3664 * the OS will call our registered routines when it finds one of our cards.
3666 static int __init cciss_init(void)
3668 printk(KERN_INFO DRIVER_NAME "\n");
3670 /* Register for our PCI devices */
3671 return pci_register_driver(&cciss_pci_driver);
3674 static void __exit cciss_cleanup(void)
3676 int i;
3678 pci_unregister_driver(&cciss_pci_driver);
3679 /* double check that all controller entrys have been removed */
3680 for (i = 0; i < MAX_CTLR; i++) {
3681 if (hba[i] != NULL) {
3682 printk(KERN_WARNING "cciss: had to remove"
3683 " controller %d\n", i);
3684 cciss_remove_one(hba[i]->pdev);
3687 remove_proc_entry("cciss", proc_root_driver);
3690 static void fail_all_cmds(unsigned long ctlr)
3692 /* If we get here, the board is apparently dead. */
3693 ctlr_info_t *h = hba[ctlr];
3694 CommandList_struct *c;
3695 unsigned long flags;
3697 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3698 h->alive = 0; /* the controller apparently died... */
3700 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3702 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3704 /* move everything off the request queue onto the completed queue */
3705 while ((c = h->reqQ) != NULL) {
3706 removeQ(&(h->reqQ), c);
3707 h->Qdepth--;
3708 addQ(&(h->cmpQ), c);
3711 /* Now, fail everything on the completed queue with a HW error */
3712 while ((c = h->cmpQ) != NULL) {
3713 removeQ(&h->cmpQ, c);
3714 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3715 if (c->cmd_type == CMD_RWREQ) {
3716 complete_command(h, c, 0);
3717 } else if (c->cmd_type == CMD_IOCTL_PEND)
3718 complete(c->waiting);
3719 #ifdef CONFIG_CISS_SCSI_TAPE
3720 else if (c->cmd_type == CMD_SCSI)
3721 complete_scsi_command(c, 0, 0);
3722 #endif
3724 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3725 return;
3728 module_init(cciss_init);
3729 module_exit(cciss_cleanup);