Linux 2.6.17.7
[linux/fpc-iii.git] / drivers / block / cciss.c
blob1319d8f20640505b69ebc6a37c3f9b435910ee0e
1 /*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/config.h> /* CONFIG_PROC_FS */
24 #include <linux/module.h>
25 #include <linux/interrupt.h>
26 #include <linux/types.h>
27 #include <linux/pci.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/major.h>
32 #include <linux/fs.h>
33 #include <linux/bio.h>
34 #include <linux/blkpg.h>
35 #include <linux/timer.h>
36 #include <linux/proc_fs.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
50 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
51 #define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
52 #define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
54 /* Embedded module documentation macros - see modules.h */
55 MODULE_AUTHOR("Hewlett-Packard Company");
56 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
57 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
58 " SA6i P600 P800 P400 P400i E200 E200i");
59 MODULE_LICENSE("GPL");
61 #include "cciss_cmd.h"
62 #include "cciss.h"
63 #include <linux/cciss_ioctl.h>
65 /* define the PCI info for the cards we can control */
66 static const struct pci_device_id cciss_pci_device_id[] = {
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
68 0x0E11, 0x4070, 0, 0, 0},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
70 0x0E11, 0x4080, 0, 0, 0},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
72 0x0E11, 0x4082, 0, 0, 0},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
74 0x0E11, 0x4083, 0, 0, 0},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
76 0x0E11, 0x409A, 0, 0, 0},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
78 0x0E11, 0x409B, 0, 0, 0},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
80 0x0E11, 0x409C, 0, 0, 0},
81 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
82 0x0E11, 0x409D, 0, 0, 0},
83 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
84 0x0E11, 0x4091, 0, 0, 0},
85 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
86 0x103C, 0x3225, 0, 0, 0},
87 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
88 0x103c, 0x3223, 0, 0, 0},
89 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
90 0x103c, 0x3234, 0, 0, 0},
91 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
92 0x103c, 0x3235, 0, 0, 0},
93 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
94 0x103c, 0x3211, 0, 0, 0},
95 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
96 0x103c, 0x3212, 0, 0, 0},
97 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
98 0x103c, 0x3213, 0, 0, 0},
99 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
100 0x103c, 0x3214, 0, 0, 0},
101 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
102 0x103c, 0x3215, 0, 0, 0},
103 {0,}
105 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
107 #define NR_PRODUCTS ARRAY_SIZE(products)
109 /* board_id = Subsystem Device ID & Vendor ID
110 * product = Marketing Name for the board
111 * access = Address of the struct of function pointers
113 static struct board_type products[] = {
114 { 0x40700E11, "Smart Array 5300", &SA5_access },
115 { 0x40800E11, "Smart Array 5i", &SA5B_access},
116 { 0x40820E11, "Smart Array 532", &SA5B_access},
117 { 0x40830E11, "Smart Array 5312", &SA5B_access},
118 { 0x409A0E11, "Smart Array 641", &SA5_access},
119 { 0x409B0E11, "Smart Array 642", &SA5_access},
120 { 0x409C0E11, "Smart Array 6400", &SA5_access},
121 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
122 { 0x40910E11, "Smart Array 6i", &SA5_access},
123 { 0x3225103C, "Smart Array P600", &SA5_access},
124 { 0x3223103C, "Smart Array P800", &SA5_access},
125 { 0x3234103C, "Smart Array P400", &SA5_access},
126 { 0x3235103C, "Smart Array P400i", &SA5_access},
127 { 0x3211103C, "Smart Array E200i", &SA5_access},
128 { 0x3212103C, "Smart Array E200", &SA5_access},
129 { 0x3213103C, "Smart Array E200i", &SA5_access},
130 { 0x3214103C, "Smart Array E200i", &SA5_access},
131 { 0x3215103C, "Smart Array E200i", &SA5_access},
134 /* How long to wait (in millesconds) for board to go into simple mode */
135 #define MAX_CONFIG_WAIT 30000
136 #define MAX_IOCTL_CONFIG_WAIT 1000
138 /*define how many times we will try a command because of bus resets */
139 #define MAX_CMD_RETRIES 3
141 #define READ_AHEAD 1024
142 #define NR_CMDS 384 /* #commands that can be outstanding */
143 #define MAX_CTLR 32
145 /* Originally cciss driver only supports 8 major numbers */
146 #define MAX_CTLR_ORIG 8
149 static ctlr_info_t *hba[MAX_CTLR];
151 static void do_cciss_request(request_queue_t *q);
152 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
153 static int cciss_open(struct inode *inode, struct file *filep);
154 static int cciss_release(struct inode *inode, struct file *filep);
155 static int cciss_ioctl(struct inode *inode, struct file *filep,
156 unsigned int cmd, unsigned long arg);
157 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
159 static int revalidate_allvol(ctlr_info_t *host);
160 static int cciss_revalidate(struct gendisk *disk);
161 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
162 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
164 static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
165 int withirq, unsigned int *total_size, unsigned int *block_size);
166 static void cciss_geometry_inquiry(int ctlr, int logvol,
167 int withirq, unsigned int total_size,
168 unsigned int block_size, InquiryData_struct *inq_buff,
169 drive_info_struct *drv);
170 static void cciss_getgeometry(int cntl_num);
171 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
172 static void start_io( ctlr_info_t *h);
173 static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
174 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
175 unsigned char *scsi3addr, int cmd_type);
176 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
177 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
178 int cmd_type);
180 static void fail_all_cmds(unsigned long ctlr);
182 #ifdef CONFIG_PROC_FS
183 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
184 int length, int *eof, void *data);
185 static void cciss_procinit(int i);
186 #else
187 static void cciss_procinit(int i) {}
188 #endif /* CONFIG_PROC_FS */
190 #ifdef CONFIG_COMPAT
191 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
192 #endif
194 static struct block_device_operations cciss_fops = {
195 .owner = THIS_MODULE,
196 .open = cciss_open,
197 .release = cciss_release,
198 .ioctl = cciss_ioctl,
199 .getgeo = cciss_getgeo,
200 #ifdef CONFIG_COMPAT
201 .compat_ioctl = cciss_compat_ioctl,
202 #endif
203 .revalidate_disk= cciss_revalidate,
207 * Enqueuing and dequeuing functions for cmdlists.
209 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
211 if (*Qptr == NULL) {
212 *Qptr = c;
213 c->next = c->prev = c;
214 } else {
215 c->prev = (*Qptr)->prev;
216 c->next = (*Qptr);
217 (*Qptr)->prev->next = c;
218 (*Qptr)->prev = c;
222 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
223 CommandList_struct *c)
225 if (c && c->next != c) {
226 if (*Qptr == c) *Qptr = c->next;
227 c->prev->next = c->next;
228 c->next->prev = c->prev;
229 } else {
230 *Qptr = NULL;
232 return c;
235 #include "cciss_scsi.c" /* For SCSI tape support */
237 #ifdef CONFIG_PROC_FS
240 * Report information about this controller.
242 #define ENG_GIG 1000000000
243 #define ENG_GIG_FACTOR (ENG_GIG/512)
244 #define RAID_UNKNOWN 6
245 static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
246 "UNKNOWN"};
248 static struct proc_dir_entry *proc_cciss;
250 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
251 int length, int *eof, void *data)
253 off_t pos = 0;
254 off_t len = 0;
255 int size, i, ctlr;
256 ctlr_info_t *h = (ctlr_info_t*)data;
257 drive_info_struct *drv;
258 unsigned long flags;
259 sector_t vol_sz, vol_sz_frac;
261 ctlr = h->ctlr;
263 /* prevent displaying bogus info during configuration
264 * or deconfiguration of a logical volume
266 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
267 if (h->busy_configuring) {
268 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
269 return -EBUSY;
271 h->busy_configuring = 1;
272 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
274 size = sprintf(buffer, "%s: HP %s Controller\n"
275 "Board ID: 0x%08lx\n"
276 "Firmware Version: %c%c%c%c\n"
277 "IRQ: %d\n"
278 "Logical drives: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
284 h->devname,
285 h->product_name,
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
288 (unsigned int)h->intr[SIMPLE_MODE_INT],
289 h->num_luns,
290 h->Qdepth, h->commands_outstanding,
291 h->maxQsinceinit, h->max_outstanding, h->maxSG);
293 pos += size; len += size;
294 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
295 for(i=0; i<=h->highest_lun; i++) {
297 drv = &h->drv[i];
298 if (drv->heads == 0)
299 continue;
301 vol_sz = drv->nr_blocks;
302 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
303 vol_sz_frac *= 100;
304 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
306 if (drv->raid_level > 5)
307 drv->raid_level = RAID_UNKNOWN;
308 size = sprintf(buffer+len, "cciss/c%dd%d:"
309 "\t%4u.%02uGB\tRAID %s\n",
310 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
311 raid_label[drv->raid_level]);
312 pos += size; len += size;
315 *eof = 1;
316 *start = buffer+offset;
317 len -= offset;
318 if (len>length)
319 len = length;
320 h->busy_configuring = 0;
321 return len;
324 static int
325 cciss_proc_write(struct file *file, const char __user *buffer,
326 unsigned long count, void *data)
328 unsigned char cmd[80];
329 int len;
330 #ifdef CONFIG_CISS_SCSI_TAPE
331 ctlr_info_t *h = (ctlr_info_t *) data;
332 int rc;
333 #endif
335 if (count > sizeof(cmd)-1) return -EINVAL;
336 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
337 cmd[count] = '\0';
338 len = strlen(cmd); // above 3 lines ensure safety
339 if (len && cmd[len-1] == '\n')
340 cmd[--len] = '\0';
341 # ifdef CONFIG_CISS_SCSI_TAPE
342 if (strcmp("engage scsi", cmd)==0) {
343 rc = cciss_engage_scsi(h->ctlr);
344 if (rc != 0) return -rc;
345 return count;
347 /* might be nice to have "disengage" too, but it's not
348 safely possible. (only 1 module use count, lock issues.) */
349 # endif
350 return -EINVAL;
354 * Get us a file in /proc/cciss that says something about each controller.
355 * Create /proc/cciss if it doesn't exist yet.
357 static void __devinit cciss_procinit(int i)
359 struct proc_dir_entry *pde;
361 if (proc_cciss == NULL) {
362 proc_cciss = proc_mkdir("cciss", proc_root_driver);
363 if (!proc_cciss)
364 return;
367 pde = create_proc_read_entry(hba[i]->devname,
368 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
369 proc_cciss, cciss_proc_get_info, hba[i]);
370 pde->write_proc = cciss_proc_write;
372 #endif /* CONFIG_PROC_FS */
375 * For operations that cannot sleep, a command block is allocated at init,
376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
377 * which ones are free or in use. For operations that can wait for kmalloc
378 * to possible sleep, this routine can be called with get_from_pool set to 0.
379 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
381 static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
383 CommandList_struct *c;
384 int i;
385 u64bit temp64;
386 dma_addr_t cmd_dma_handle, err_dma_handle;
388 if (!get_from_pool)
390 c = (CommandList_struct *) pci_alloc_consistent(
391 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
392 if(c==NULL)
393 return NULL;
394 memset(c, 0, sizeof(CommandList_struct));
396 c->cmdindex = -1;
398 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
399 h->pdev, sizeof(ErrorInfo_struct),
400 &err_dma_handle);
402 if (c->err_info == NULL)
404 pci_free_consistent(h->pdev,
405 sizeof(CommandList_struct), c, cmd_dma_handle);
406 return NULL;
408 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
409 } else /* get it out of the controllers pool */
411 do {
412 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
413 if (i == NR_CMDS)
414 return NULL;
415 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
416 #ifdef CCISS_DEBUG
417 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
418 #endif
419 c = h->cmd_pool + i;
420 memset(c, 0, sizeof(CommandList_struct));
421 cmd_dma_handle = h->cmd_pool_dhandle
422 + i*sizeof(CommandList_struct);
423 c->err_info = h->errinfo_pool + i;
424 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
425 err_dma_handle = h->errinfo_pool_dhandle
426 + i*sizeof(ErrorInfo_struct);
427 h->nr_allocs++;
429 c->cmdindex = i;
432 c->busaddr = (__u32) cmd_dma_handle;
433 temp64.val = (__u64) err_dma_handle;
434 c->ErrDesc.Addr.lower = temp64.val32.lower;
435 c->ErrDesc.Addr.upper = temp64.val32.upper;
436 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
438 c->ctlr = h->ctlr;
439 return c;
445 * Frees a command block that was previously allocated with cmd_alloc().
447 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
449 int i;
450 u64bit temp64;
452 if( !got_from_pool)
454 temp64.val32.lower = c->ErrDesc.Addr.lower;
455 temp64.val32.upper = c->ErrDesc.Addr.upper;
456 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
457 c->err_info, (dma_addr_t) temp64.val);
458 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
459 c, (dma_addr_t) c->busaddr);
460 } else
462 i = c - h->cmd_pool;
463 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
464 h->nr_frees++;
468 static inline ctlr_info_t *get_host(struct gendisk *disk)
470 return disk->queue->queuedata;
473 static inline drive_info_struct *get_drv(struct gendisk *disk)
475 return disk->private_data;
479 * Open. Make sure the device is really there.
481 static int cciss_open(struct inode *inode, struct file *filep)
483 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
484 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
486 #ifdef CCISS_DEBUG
487 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
488 #endif /* CCISS_DEBUG */
490 if (host->busy_initializing || drv->busy_configuring)
491 return -EBUSY;
493 * Root is allowed to open raw volume zero even if it's not configured
494 * so array config can still work. Root is also allowed to open any
495 * volume that has a LUN ID, so it can issue IOCTL to reread the
496 * disk information. I don't think I really like this
497 * but I'm already using way to many device nodes to claim another one
498 * for "raw controller".
500 if (drv->nr_blocks == 0) {
501 if (iminor(inode) != 0) { /* not node 0? */
502 /* if not node 0 make sure it is a partition = 0 */
503 if (iminor(inode) & 0x0f) {
504 return -ENXIO;
505 /* if it is, make sure we have a LUN ID */
506 } else if (drv->LunID == 0) {
507 return -ENXIO;
510 if (!capable(CAP_SYS_ADMIN))
511 return -EPERM;
513 drv->usage_count++;
514 host->usage_count++;
515 return 0;
518 * Close. Sync first.
520 static int cciss_release(struct inode *inode, struct file *filep)
522 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
523 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
525 #ifdef CCISS_DEBUG
526 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
527 #endif /* CCISS_DEBUG */
529 drv->usage_count--;
530 host->usage_count--;
531 return 0;
534 #ifdef CONFIG_COMPAT
536 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
538 int ret;
539 lock_kernel();
540 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
541 unlock_kernel();
542 return ret;
545 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
546 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
548 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
550 switch (cmd) {
551 case CCISS_GETPCIINFO:
552 case CCISS_GETINTINFO:
553 case CCISS_SETINTINFO:
554 case CCISS_GETNODENAME:
555 case CCISS_SETNODENAME:
556 case CCISS_GETHEARTBEAT:
557 case CCISS_GETBUSTYPES:
558 case CCISS_GETFIRMVER:
559 case CCISS_GETDRIVVER:
560 case CCISS_REVALIDVOLS:
561 case CCISS_DEREGDISK:
562 case CCISS_REGNEWDISK:
563 case CCISS_REGNEWD:
564 case CCISS_RESCANDISK:
565 case CCISS_GETLUNINFO:
566 return do_ioctl(f, cmd, arg);
568 case CCISS_PASSTHRU32:
569 return cciss_ioctl32_passthru(f, cmd, arg);
570 case CCISS_BIG_PASSTHRU32:
571 return cciss_ioctl32_big_passthru(f, cmd, arg);
573 default:
574 return -ENOIOCTLCMD;
578 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
580 IOCTL32_Command_struct __user *arg32 =
581 (IOCTL32_Command_struct __user *) arg;
582 IOCTL_Command_struct arg64;
583 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
584 int err;
585 u32 cp;
587 err = 0;
588 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
589 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
590 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
591 err |= get_user(arg64.buf_size, &arg32->buf_size);
592 err |= get_user(cp, &arg32->buf);
593 arg64.buf = compat_ptr(cp);
594 err |= copy_to_user(p, &arg64, sizeof(arg64));
596 if (err)
597 return -EFAULT;
599 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
600 if (err)
601 return err;
602 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
603 if (err)
604 return -EFAULT;
605 return err;
608 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
610 BIG_IOCTL32_Command_struct __user *arg32 =
611 (BIG_IOCTL32_Command_struct __user *) arg;
612 BIG_IOCTL_Command_struct arg64;
613 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
614 int err;
615 u32 cp;
617 err = 0;
618 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
619 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
620 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
621 err |= get_user(arg64.buf_size, &arg32->buf_size);
622 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
623 err |= get_user(cp, &arg32->buf);
624 arg64.buf = compat_ptr(cp);
625 err |= copy_to_user(p, &arg64, sizeof(arg64));
627 if (err)
628 return -EFAULT;
630 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
631 if (err)
632 return err;
633 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
634 if (err)
635 return -EFAULT;
636 return err;
638 #endif
640 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
642 drive_info_struct *drv = get_drv(bdev->bd_disk);
644 if (!drv->cylinders)
645 return -ENXIO;
647 geo->heads = drv->heads;
648 geo->sectors = drv->sectors;
649 geo->cylinders = drv->cylinders;
650 return 0;
654 * ioctl
656 static int cciss_ioctl(struct inode *inode, struct file *filep,
657 unsigned int cmd, unsigned long arg)
659 struct block_device *bdev = inode->i_bdev;
660 struct gendisk *disk = bdev->bd_disk;
661 ctlr_info_t *host = get_host(disk);
662 drive_info_struct *drv = get_drv(disk);
663 int ctlr = host->ctlr;
664 void __user *argp = (void __user *)arg;
666 #ifdef CCISS_DEBUG
667 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
668 #endif /* CCISS_DEBUG */
670 switch(cmd) {
671 case CCISS_GETPCIINFO:
673 cciss_pci_info_struct pciinfo;
675 if (!arg) return -EINVAL;
676 pciinfo.domain = pci_domain_nr(host->pdev->bus);
677 pciinfo.bus = host->pdev->bus->number;
678 pciinfo.dev_fn = host->pdev->devfn;
679 pciinfo.board_id = host->board_id;
680 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
681 return -EFAULT;
682 return(0);
684 case CCISS_GETINTINFO:
686 cciss_coalint_struct intinfo;
687 if (!arg) return -EINVAL;
688 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
689 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
690 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
691 return -EFAULT;
692 return(0);
694 case CCISS_SETINTINFO:
696 cciss_coalint_struct intinfo;
697 unsigned long flags;
698 int i;
700 if (!arg) return -EINVAL;
701 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
702 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
703 return -EFAULT;
704 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
707 // printk("cciss_ioctl: delay and count cannot be 0\n");
708 return( -EINVAL);
710 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
711 /* Update the field, and then ring the doorbell */
712 writel( intinfo.delay,
713 &(host->cfgtable->HostWrite.CoalIntDelay));
714 writel( intinfo.count,
715 &(host->cfgtable->HostWrite.CoalIntCount));
716 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
718 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
719 if (!(readl(host->vaddr + SA5_DOORBELL)
720 & CFGTBL_ChangeReq))
721 break;
722 /* delay and try again */
723 udelay(1000);
725 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
726 if (i >= MAX_IOCTL_CONFIG_WAIT)
727 return -EAGAIN;
728 return(0);
730 case CCISS_GETNODENAME:
732 NodeName_type NodeName;
733 int i;
735 if (!arg) return -EINVAL;
736 for(i=0;i<16;i++)
737 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
738 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
739 return -EFAULT;
740 return(0);
742 case CCISS_SETNODENAME:
744 NodeName_type NodeName;
745 unsigned long flags;
746 int i;
748 if (!arg) return -EINVAL;
749 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
751 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
752 return -EFAULT;
754 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
756 /* Update the field, and then ring the doorbell */
757 for(i=0;i<16;i++)
758 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
760 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
762 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
763 if (!(readl(host->vaddr + SA5_DOORBELL)
764 & CFGTBL_ChangeReq))
765 break;
766 /* delay and try again */
767 udelay(1000);
769 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
770 if (i >= MAX_IOCTL_CONFIG_WAIT)
771 return -EAGAIN;
772 return(0);
775 case CCISS_GETHEARTBEAT:
777 Heartbeat_type heartbeat;
779 if (!arg) return -EINVAL;
780 heartbeat = readl(&host->cfgtable->HeartBeat);
781 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
782 return -EFAULT;
783 return(0);
785 case CCISS_GETBUSTYPES:
787 BusTypes_type BusTypes;
789 if (!arg) return -EINVAL;
790 BusTypes = readl(&host->cfgtable->BusTypes);
791 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
792 return -EFAULT;
793 return(0);
795 case CCISS_GETFIRMVER:
797 FirmwareVer_type firmware;
799 if (!arg) return -EINVAL;
800 memcpy(firmware, host->firm_ver, 4);
802 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
803 return -EFAULT;
804 return(0);
806 case CCISS_GETDRIVVER:
808 DriverVer_type DriverVer = DRIVER_VERSION;
810 if (!arg) return -EINVAL;
812 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
813 return -EFAULT;
814 return(0);
817 case CCISS_REVALIDVOLS:
818 if (bdev != bdev->bd_contains || drv != host->drv)
819 return -ENXIO;
820 return revalidate_allvol(host);
822 case CCISS_GETLUNINFO: {
823 LogvolInfo_struct luninfo;
825 luninfo.LunID = drv->LunID;
826 luninfo.num_opens = drv->usage_count;
827 luninfo.num_parts = 0;
828 if (copy_to_user(argp, &luninfo,
829 sizeof(LogvolInfo_struct)))
830 return -EFAULT;
831 return(0);
833 case CCISS_DEREGDISK:
834 return rebuild_lun_table(host, disk);
836 case CCISS_REGNEWD:
837 return rebuild_lun_table(host, NULL);
839 case CCISS_PASSTHRU:
841 IOCTL_Command_struct iocommand;
842 CommandList_struct *c;
843 char *buff = NULL;
844 u64bit temp64;
845 unsigned long flags;
846 DECLARE_COMPLETION(wait);
848 if (!arg) return -EINVAL;
850 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
852 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
853 return -EFAULT;
854 if((iocommand.buf_size < 1) &&
855 (iocommand.Request.Type.Direction != XFER_NONE))
857 return -EINVAL;
859 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
860 /* Check kmalloc limits */
861 if(iocommand.buf_size > 128000)
862 return -EINVAL;
863 #endif
864 if(iocommand.buf_size > 0)
866 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
867 if( buff == NULL)
868 return -EFAULT;
870 if (iocommand.Request.Type.Direction == XFER_WRITE)
872 /* Copy the data into the buffer we created */
873 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
875 kfree(buff);
876 return -EFAULT;
878 } else {
879 memset(buff, 0, iocommand.buf_size);
881 if ((c = cmd_alloc(host , 0)) == NULL)
883 kfree(buff);
884 return -ENOMEM;
886 // Fill in the command type
887 c->cmd_type = CMD_IOCTL_PEND;
888 // Fill in Command Header
889 c->Header.ReplyQueue = 0; // unused in simple mode
890 if( iocommand.buf_size > 0) // buffer to fill
892 c->Header.SGList = 1;
893 c->Header.SGTotal= 1;
894 } else // no buffers to fill
896 c->Header.SGList = 0;
897 c->Header.SGTotal= 0;
899 c->Header.LUN = iocommand.LUN_info;
900 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
902 // Fill in Request block
903 c->Request = iocommand.Request;
905 // Fill in the scatter gather information
906 if (iocommand.buf_size > 0 )
908 temp64.val = pci_map_single( host->pdev, buff,
909 iocommand.buf_size,
910 PCI_DMA_BIDIRECTIONAL);
911 c->SG[0].Addr.lower = temp64.val32.lower;
912 c->SG[0].Addr.upper = temp64.val32.upper;
913 c->SG[0].Len = iocommand.buf_size;
914 c->SG[0].Ext = 0; // we are not chaining
916 c->waiting = &wait;
918 /* Put the request on the tail of the request queue */
919 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
920 addQ(&host->reqQ, c);
921 host->Qdepth++;
922 start_io(host);
923 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
925 wait_for_completion(&wait);
927 /* unlock the buffers from DMA */
928 temp64.val32.lower = c->SG[0].Addr.lower;
929 temp64.val32.upper = c->SG[0].Addr.upper;
930 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
931 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
933 /* Copy the error information out */
934 iocommand.error_info = *(c->err_info);
935 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
937 kfree(buff);
938 cmd_free(host, c, 0);
939 return( -EFAULT);
942 if (iocommand.Request.Type.Direction == XFER_READ)
944 /* Copy the data out of the buffer we created */
945 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
947 kfree(buff);
948 cmd_free(host, c, 0);
949 return -EFAULT;
952 kfree(buff);
953 cmd_free(host, c, 0);
954 return(0);
956 case CCISS_BIG_PASSTHRU: {
957 BIG_IOCTL_Command_struct *ioc;
958 CommandList_struct *c;
959 unsigned char **buff = NULL;
960 int *buff_size = NULL;
961 u64bit temp64;
962 unsigned long flags;
963 BYTE sg_used = 0;
964 int status = 0;
965 int i;
966 DECLARE_COMPLETION(wait);
967 __u32 left;
968 __u32 sz;
969 BYTE __user *data_ptr;
971 if (!arg)
972 return -EINVAL;
973 if (!capable(CAP_SYS_RAWIO))
974 return -EPERM;
975 ioc = (BIG_IOCTL_Command_struct *)
976 kmalloc(sizeof(*ioc), GFP_KERNEL);
977 if (!ioc) {
978 status = -ENOMEM;
979 goto cleanup1;
981 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
982 status = -EFAULT;
983 goto cleanup1;
985 if ((ioc->buf_size < 1) &&
986 (ioc->Request.Type.Direction != XFER_NONE)) {
987 status = -EINVAL;
988 goto cleanup1;
990 /* Check kmalloc limits using all SGs */
991 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
992 status = -EINVAL;
993 goto cleanup1;
995 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
996 status = -EINVAL;
997 goto cleanup1;
999 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1000 if (!buff) {
1001 status = -ENOMEM;
1002 goto cleanup1;
1004 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1005 GFP_KERNEL);
1006 if (!buff_size) {
1007 status = -ENOMEM;
1008 goto cleanup1;
1010 left = ioc->buf_size;
1011 data_ptr = ioc->buf;
1012 while (left) {
1013 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1014 buff_size[sg_used] = sz;
1015 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1016 if (buff[sg_used] == NULL) {
1017 status = -ENOMEM;
1018 goto cleanup1;
1020 if (ioc->Request.Type.Direction == XFER_WRITE) {
1021 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
1022 status = -ENOMEM;
1023 goto cleanup1;
1025 } else {
1026 memset(buff[sg_used], 0, sz);
1028 left -= sz;
1029 data_ptr += sz;
1030 sg_used++;
1032 if ((c = cmd_alloc(host , 0)) == NULL) {
1033 status = -ENOMEM;
1034 goto cleanup1;
1036 c->cmd_type = CMD_IOCTL_PEND;
1037 c->Header.ReplyQueue = 0;
1039 if( ioc->buf_size > 0) {
1040 c->Header.SGList = sg_used;
1041 c->Header.SGTotal= sg_used;
1042 } else {
1043 c->Header.SGList = 0;
1044 c->Header.SGTotal= 0;
1046 c->Header.LUN = ioc->LUN_info;
1047 c->Header.Tag.lower = c->busaddr;
1049 c->Request = ioc->Request;
1050 if (ioc->buf_size > 0 ) {
1051 int i;
1052 for(i=0; i<sg_used; i++) {
1053 temp64.val = pci_map_single( host->pdev, buff[i],
1054 buff_size[i],
1055 PCI_DMA_BIDIRECTIONAL);
1056 c->SG[i].Addr.lower = temp64.val32.lower;
1057 c->SG[i].Addr.upper = temp64.val32.upper;
1058 c->SG[i].Len = buff_size[i];
1059 c->SG[i].Ext = 0; /* we are not chaining */
1062 c->waiting = &wait;
1063 /* Put the request on the tail of the request queue */
1064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1065 addQ(&host->reqQ, c);
1066 host->Qdepth++;
1067 start_io(host);
1068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1069 wait_for_completion(&wait);
1070 /* unlock the buffers from DMA */
1071 for(i=0; i<sg_used; i++) {
1072 temp64.val32.lower = c->SG[i].Addr.lower;
1073 temp64.val32.upper = c->SG[i].Addr.upper;
1074 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1075 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1077 /* Copy the error information out */
1078 ioc->error_info = *(c->err_info);
1079 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1080 cmd_free(host, c, 0);
1081 status = -EFAULT;
1082 goto cleanup1;
1084 if (ioc->Request.Type.Direction == XFER_READ) {
1085 /* Copy the data out of the buffer we created */
1086 BYTE __user *ptr = ioc->buf;
1087 for(i=0; i< sg_used; i++) {
1088 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1089 cmd_free(host, c, 0);
1090 status = -EFAULT;
1091 goto cleanup1;
1093 ptr += buff_size[i];
1096 cmd_free(host, c, 0);
1097 status = 0;
1098 cleanup1:
1099 if (buff) {
1100 for(i=0; i<sg_used; i++)
1101 kfree(buff[i]);
1102 kfree(buff);
1104 kfree(buff_size);
1105 kfree(ioc);
1106 return(status);
1108 default:
1109 return -ENOTTY;
1115 * revalidate_allvol is for online array config utilities. After a
1116 * utility reconfigures the drives in the array, it can use this function
1117 * (through an ioctl) to make the driver zap any previous disk structs for
1118 * that controller and get new ones.
1120 * Right now I'm using the getgeometry() function to do this, but this
1121 * function should probably be finer grained and allow you to revalidate one
1122 * particualar logical volume (instead of all of them on a particular
1123 * controller).
1125 static int revalidate_allvol(ctlr_info_t *host)
1127 int ctlr = host->ctlr, i;
1128 unsigned long flags;
1130 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1131 if (host->usage_count > 1) {
1132 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1133 printk(KERN_WARNING "cciss: Device busy for volume"
1134 " revalidation (usage=%d)\n", host->usage_count);
1135 return -EBUSY;
1137 host->usage_count++;
1138 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1140 for(i=0; i< NWD; i++) {
1141 struct gendisk *disk = host->gendisk[i];
1142 if (disk) {
1143 request_queue_t *q = disk->queue;
1145 if (disk->flags & GENHD_FL_UP)
1146 del_gendisk(disk);
1147 if (q)
1148 blk_cleanup_queue(q);
1153 * Set the partition and block size structures for all volumes
1154 * on this controller to zero. We will reread all of this data
1156 memset(host->drv, 0, sizeof(drive_info_struct)
1157 * CISS_MAX_LUN);
1159 * Tell the array controller not to give us any interrupts while
1160 * we check the new geometry. Then turn interrupts back on when
1161 * we're done.
1163 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1164 cciss_getgeometry(ctlr);
1165 host->access.set_intr_mask(host, CCISS_INTR_ON);
1167 /* Loop through each real device */
1168 for (i = 0; i < NWD; i++) {
1169 struct gendisk *disk = host->gendisk[i];
1170 drive_info_struct *drv = &(host->drv[i]);
1171 /* we must register the controller even if no disks exist */
1172 /* this is for the online array utilities */
1173 if (!drv->heads && i)
1174 continue;
1175 blk_queue_hardsect_size(drv->queue, drv->block_size);
1176 set_capacity(disk, drv->nr_blocks);
1177 add_disk(disk);
1179 host->usage_count--;
1180 return 0;
1183 static inline void complete_buffers(struct bio *bio, int status)
1185 while (bio) {
1186 struct bio *xbh = bio->bi_next;
1187 int nr_sectors = bio_sectors(bio);
1189 bio->bi_next = NULL;
1190 blk_finished_io(len);
1191 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1192 bio = xbh;
1197 static void cciss_softirq_done(struct request *rq)
1199 CommandList_struct *cmd = rq->completion_data;
1200 ctlr_info_t *h = hba[cmd->ctlr];
1201 unsigned long flags;
1202 u64bit temp64;
1203 int i, ddir;
1205 if (cmd->Request.Type.Direction == XFER_READ)
1206 ddir = PCI_DMA_FROMDEVICE;
1207 else
1208 ddir = PCI_DMA_TODEVICE;
1210 /* command did not need to be retried */
1211 /* unmap the DMA mapping for all the scatter gather elements */
1212 for(i=0; i<cmd->Header.SGList; i++) {
1213 temp64.val32.lower = cmd->SG[i].Addr.lower;
1214 temp64.val32.upper = cmd->SG[i].Addr.upper;
1215 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1218 complete_buffers(rq->bio, rq->errors);
1220 #ifdef CCISS_DEBUG
1221 printk("Done with %p\n", rq);
1222 #endif /* CCISS_DEBUG */
1224 spin_lock_irqsave(&h->lock, flags);
1225 end_that_request_last(rq, rq->errors);
1226 cmd_free(h, cmd,1);
1227 spin_unlock_irqrestore(&h->lock, flags);
1230 /* This function will check the usage_count of the drive to be updated/added.
1231 * If the usage_count is zero then the drive information will be updated and
1232 * the disk will be re-registered with the kernel. If not then it will be
1233 * left alone for the next reboot. The exception to this is disk 0 which
1234 * will always be left registered with the kernel since it is also the
1235 * controller node. Any changes to disk 0 will show up on the next
1236 * reboot.
1238 static void cciss_update_drive_info(int ctlr, int drv_index)
1240 ctlr_info_t *h = hba[ctlr];
1241 struct gendisk *disk;
1242 ReadCapdata_struct *size_buff = NULL;
1243 InquiryData_struct *inq_buff = NULL;
1244 unsigned int block_size;
1245 unsigned int total_size;
1246 unsigned long flags = 0;
1247 int ret = 0;
1249 /* if the disk already exists then deregister it before proceeding*/
1250 if (h->drv[drv_index].raid_level != -1){
1251 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1252 h->drv[drv_index].busy_configuring = 1;
1253 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1254 ret = deregister_disk(h->gendisk[drv_index],
1255 &h->drv[drv_index], 0);
1256 h->drv[drv_index].busy_configuring = 0;
1259 /* If the disk is in use return */
1260 if (ret)
1261 return;
1264 /* Get information about the disk and modify the driver sturcture */
1265 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1266 if (size_buff == NULL)
1267 goto mem_msg;
1268 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1269 if (inq_buff == NULL)
1270 goto mem_msg;
1272 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1273 &total_size, &block_size);
1274 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1275 inq_buff, &h->drv[drv_index]);
1277 ++h->num_luns;
1278 disk = h->gendisk[drv_index];
1279 set_capacity(disk, h->drv[drv_index].nr_blocks);
1282 /* if it's the controller it's already added */
1283 if (drv_index){
1284 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1286 /* Set up queue information */
1287 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1288 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1290 /* This is a hardware imposed limit. */
1291 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1293 /* This is a limit in the driver and could be eliminated. */
1294 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1296 blk_queue_max_sectors(disk->queue, 512);
1298 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1300 disk->queue->queuedata = hba[ctlr];
1302 blk_queue_hardsect_size(disk->queue,
1303 hba[ctlr]->drv[drv_index].block_size);
1305 h->drv[drv_index].queue = disk->queue;
1306 add_disk(disk);
1309 freeret:
1310 kfree(size_buff);
1311 kfree(inq_buff);
1312 return;
1313 mem_msg:
1314 printk(KERN_ERR "cciss: out of memory\n");
1315 goto freeret;
1318 /* This function will find the first index of the controllers drive array
1319 * that has a -1 for the raid_level and will return that index. This is
1320 * where new drives will be added. If the index to be returned is greater
1321 * than the highest_lun index for the controller then highest_lun is set
1322 * to this new index. If there are no available indexes then -1 is returned.
1324 static int cciss_find_free_drive_index(int ctlr)
1326 int i;
1328 for (i=0; i < CISS_MAX_LUN; i++){
1329 if (hba[ctlr]->drv[i].raid_level == -1){
1330 if (i > hba[ctlr]->highest_lun)
1331 hba[ctlr]->highest_lun = i;
1332 return i;
1335 return -1;
1338 /* This function will add and remove logical drives from the Logical
1339 * drive array of the controller and maintain persistancy of ordering
1340 * so that mount points are preserved until the next reboot. This allows
1341 * for the removal of logical drives in the middle of the drive array
1342 * without a re-ordering of those drives.
1343 * INPUT
1344 * h = The controller to perform the operations on
1345 * del_disk = The disk to remove if specified. If the value given
1346 * is NULL then no disk is removed.
1348 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1350 int ctlr = h->ctlr;
1351 int num_luns;
1352 ReportLunData_struct *ld_buff = NULL;
1353 drive_info_struct *drv = NULL;
1354 int return_code;
1355 int listlength = 0;
1356 int i;
1357 int drv_found;
1358 int drv_index = 0;
1359 __u32 lunid = 0;
1360 unsigned long flags;
1362 /* Set busy_configuring flag for this operation */
1363 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1364 if (h->num_luns >= CISS_MAX_LUN){
1365 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1366 return -EINVAL;
1369 if (h->busy_configuring){
1370 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1371 return -EBUSY;
1373 h->busy_configuring = 1;
1375 /* if del_disk is NULL then we are being called to add a new disk
1376 * and update the logical drive table. If it is not NULL then
1377 * we will check if the disk is in use or not.
1379 if (del_disk != NULL){
1380 drv = get_drv(del_disk);
1381 drv->busy_configuring = 1;
1382 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1383 return_code = deregister_disk(del_disk, drv, 1);
1384 drv->busy_configuring = 0;
1385 h->busy_configuring = 0;
1386 return return_code;
1387 } else {
1388 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1389 if (!capable(CAP_SYS_RAWIO))
1390 return -EPERM;
1392 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1393 if (ld_buff == NULL)
1394 goto mem_msg;
1396 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1397 sizeof(ReportLunData_struct), 0, 0, 0,
1398 TYPE_CMD);
1400 if (return_code == IO_OK){
1401 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1402 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1403 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1404 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1405 } else{ /* reading number of logical volumes failed */
1406 printk(KERN_WARNING "cciss: report logical volume"
1407 " command failed\n");
1408 listlength = 0;
1409 goto freeret;
1412 num_luns = listlength / 8; /* 8 bytes per entry */
1413 if (num_luns > CISS_MAX_LUN){
1414 num_luns = CISS_MAX_LUN;
1415 printk(KERN_WARNING "cciss: more luns configured"
1416 " on controller than can be handled by"
1417 " this driver.\n");
1420 /* Compare controller drive array to drivers drive array.
1421 * Check for updates in the drive information and any new drives
1422 * on the controller.
1424 for (i=0; i < num_luns; i++){
1425 int j;
1427 drv_found = 0;
1429 lunid = (0xff &
1430 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1431 lunid |= (0xff &
1432 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1433 lunid |= (0xff &
1434 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1435 lunid |= 0xff &
1436 (unsigned int)(ld_buff->LUN[i][0]);
1438 /* Find if the LUN is already in the drive array
1439 * of the controller. If so then update its info
1440 * if not is use. If it does not exist then find
1441 * the first free index and add it.
1443 for (j=0; j <= h->highest_lun; j++){
1444 if (h->drv[j].LunID == lunid){
1445 drv_index = j;
1446 drv_found = 1;
1450 /* check if the drive was found already in the array */
1451 if (!drv_found){
1452 drv_index = cciss_find_free_drive_index(ctlr);
1453 if (drv_index == -1)
1454 goto freeret;
1457 h->drv[drv_index].LunID = lunid;
1458 cciss_update_drive_info(ctlr, drv_index);
1459 } /* end for */
1460 } /* end else */
1462 freeret:
1463 kfree(ld_buff);
1464 h->busy_configuring = 0;
1465 /* We return -1 here to tell the ACU that we have registered/updated
1466 * all of the drives that we can and to keep it from calling us
1467 * additional times.
1469 return -1;
1470 mem_msg:
1471 printk(KERN_ERR "cciss: out of memory\n");
1472 goto freeret;
1475 /* This function will deregister the disk and it's queue from the
1476 * kernel. It must be called with the controller lock held and the
1477 * drv structures busy_configuring flag set. It's parameters are:
1479 * disk = This is the disk to be deregistered
1480 * drv = This is the drive_info_struct associated with the disk to be
1481 * deregistered. It contains information about the disk used
1482 * by the driver.
1483 * clear_all = This flag determines whether or not the disk information
1484 * is going to be completely cleared out and the highest_lun
1485 * reset. Sometimes we want to clear out information about
1486 * the disk in preperation for re-adding it. In this case
1487 * the highest_lun should be left unchanged and the LunID
1488 * should not be cleared.
1490 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1491 int clear_all)
1493 ctlr_info_t *h = get_host(disk);
1495 if (!capable(CAP_SYS_RAWIO))
1496 return -EPERM;
1498 /* make sure logical volume is NOT is use */
1499 if(clear_all || (h->gendisk[0] == disk)) {
1500 if (drv->usage_count > 1)
1501 return -EBUSY;
1503 else
1504 if( drv->usage_count > 0 )
1505 return -EBUSY;
1507 /* invalidate the devices and deregister the disk. If it is disk
1508 * zero do not deregister it but just zero out it's values. This
1509 * allows us to delete disk zero but keep the controller registered.
1511 if (h->gendisk[0] != disk){
1512 if (disk) {
1513 request_queue_t *q = disk->queue;
1514 if (disk->flags & GENHD_FL_UP)
1515 del_gendisk(disk);
1516 if (q) {
1517 blk_cleanup_queue(q);
1518 drv->queue = NULL;
1523 --h->num_luns;
1524 /* zero out the disk size info */
1525 drv->nr_blocks = 0;
1526 drv->block_size = 0;
1527 drv->heads = 0;
1528 drv->sectors = 0;
1529 drv->cylinders = 0;
1530 drv->raid_level = -1; /* This can be used as a flag variable to
1531 * indicate that this element of the drive
1532 * array is free.
1535 if (clear_all){
1536 /* check to see if it was the last disk */
1537 if (drv == h->drv + h->highest_lun) {
1538 /* if so, find the new hightest lun */
1539 int i, newhighest =-1;
1540 for(i=0; i<h->highest_lun; i++) {
1541 /* if the disk has size > 0, it is available */
1542 if (h->drv[i].heads)
1543 newhighest = i;
1545 h->highest_lun = newhighest;
1548 drv->LunID = 0;
1550 return(0);
1553 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1554 size_t size,
1555 unsigned int use_unit_num, /* 0: address the controller,
1556 1: address logical volume log_unit,
1557 2: periph device address is scsi3addr */
1558 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1559 int cmd_type)
1561 ctlr_info_t *h= hba[ctlr];
1562 u64bit buff_dma_handle;
1563 int status = IO_OK;
1565 c->cmd_type = CMD_IOCTL_PEND;
1566 c->Header.ReplyQueue = 0;
1567 if( buff != NULL) {
1568 c->Header.SGList = 1;
1569 c->Header.SGTotal= 1;
1570 } else {
1571 c->Header.SGList = 0;
1572 c->Header.SGTotal= 0;
1574 c->Header.Tag.lower = c->busaddr;
1576 c->Request.Type.Type = cmd_type;
1577 if (cmd_type == TYPE_CMD) {
1578 switch(cmd) {
1579 case CISS_INQUIRY:
1580 /* If the logical unit number is 0 then, this is going
1581 to controller so It's a physical command
1582 mode = 0 target = 0. So we have nothing to write.
1583 otherwise, if use_unit_num == 1,
1584 mode = 1(volume set addressing) target = LUNID
1585 otherwise, if use_unit_num == 2,
1586 mode = 0(periph dev addr) target = scsi3addr */
1587 if (use_unit_num == 1) {
1588 c->Header.LUN.LogDev.VolId=
1589 h->drv[log_unit].LunID;
1590 c->Header.LUN.LogDev.Mode = 1;
1591 } else if (use_unit_num == 2) {
1592 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1593 c->Header.LUN.LogDev.Mode = 0;
1595 /* are we trying to read a vital product page */
1596 if(page_code != 0) {
1597 c->Request.CDB[1] = 0x01;
1598 c->Request.CDB[2] = page_code;
1600 c->Request.CDBLen = 6;
1601 c->Request.Type.Attribute = ATTR_SIMPLE;
1602 c->Request.Type.Direction = XFER_READ;
1603 c->Request.Timeout = 0;
1604 c->Request.CDB[0] = CISS_INQUIRY;
1605 c->Request.CDB[4] = size & 0xFF;
1606 break;
1607 case CISS_REPORT_LOG:
1608 case CISS_REPORT_PHYS:
1609 /* Talking to controller so It's a physical command
1610 mode = 00 target = 0. Nothing to write.
1612 c->Request.CDBLen = 12;
1613 c->Request.Type.Attribute = ATTR_SIMPLE;
1614 c->Request.Type.Direction = XFER_READ;
1615 c->Request.Timeout = 0;
1616 c->Request.CDB[0] = cmd;
1617 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1618 c->Request.CDB[7] = (size >> 16) & 0xFF;
1619 c->Request.CDB[8] = (size >> 8) & 0xFF;
1620 c->Request.CDB[9] = size & 0xFF;
1621 break;
1623 case CCISS_READ_CAPACITY:
1624 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1625 c->Header.LUN.LogDev.Mode = 1;
1626 c->Request.CDBLen = 10;
1627 c->Request.Type.Attribute = ATTR_SIMPLE;
1628 c->Request.Type.Direction = XFER_READ;
1629 c->Request.Timeout = 0;
1630 c->Request.CDB[0] = cmd;
1631 break;
1632 case CCISS_CACHE_FLUSH:
1633 c->Request.CDBLen = 12;
1634 c->Request.Type.Attribute = ATTR_SIMPLE;
1635 c->Request.Type.Direction = XFER_WRITE;
1636 c->Request.Timeout = 0;
1637 c->Request.CDB[0] = BMIC_WRITE;
1638 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1639 break;
1640 default:
1641 printk(KERN_WARNING
1642 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1643 return(IO_ERROR);
1645 } else if (cmd_type == TYPE_MSG) {
1646 switch (cmd) {
1647 case 0: /* ABORT message */
1648 c->Request.CDBLen = 12;
1649 c->Request.Type.Attribute = ATTR_SIMPLE;
1650 c->Request.Type.Direction = XFER_WRITE;
1651 c->Request.Timeout = 0;
1652 c->Request.CDB[0] = cmd; /* abort */
1653 c->Request.CDB[1] = 0; /* abort a command */
1654 /* buff contains the tag of the command to abort */
1655 memcpy(&c->Request.CDB[4], buff, 8);
1656 break;
1657 case 1: /* RESET message */
1658 c->Request.CDBLen = 12;
1659 c->Request.Type.Attribute = ATTR_SIMPLE;
1660 c->Request.Type.Direction = XFER_WRITE;
1661 c->Request.Timeout = 0;
1662 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1663 c->Request.CDB[0] = cmd; /* reset */
1664 c->Request.CDB[1] = 0x04; /* reset a LUN */
1665 case 3: /* No-Op message */
1666 c->Request.CDBLen = 1;
1667 c->Request.Type.Attribute = ATTR_SIMPLE;
1668 c->Request.Type.Direction = XFER_WRITE;
1669 c->Request.Timeout = 0;
1670 c->Request.CDB[0] = cmd;
1671 break;
1672 default:
1673 printk(KERN_WARNING
1674 "cciss%d: unknown message type %d\n",
1675 ctlr, cmd);
1676 return IO_ERROR;
1678 } else {
1679 printk(KERN_WARNING
1680 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1681 return IO_ERROR;
1683 /* Fill in the scatter gather information */
1684 if (size > 0) {
1685 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1686 buff, size, PCI_DMA_BIDIRECTIONAL);
1687 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1688 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1689 c->SG[0].Len = size;
1690 c->SG[0].Ext = 0; /* we are not chaining */
1692 return status;
1694 static int sendcmd_withirq(__u8 cmd,
1695 int ctlr,
1696 void *buff,
1697 size_t size,
1698 unsigned int use_unit_num,
1699 unsigned int log_unit,
1700 __u8 page_code,
1701 int cmd_type)
1703 ctlr_info_t *h = hba[ctlr];
1704 CommandList_struct *c;
1705 u64bit buff_dma_handle;
1706 unsigned long flags;
1707 int return_status;
1708 DECLARE_COMPLETION(wait);
1710 if ((c = cmd_alloc(h , 0)) == NULL)
1711 return -ENOMEM;
1712 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1713 log_unit, page_code, NULL, cmd_type);
1714 if (return_status != IO_OK) {
1715 cmd_free(h, c, 0);
1716 return return_status;
1718 resend_cmd2:
1719 c->waiting = &wait;
1721 /* Put the request on the tail of the queue and send it */
1722 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1723 addQ(&h->reqQ, c);
1724 h->Qdepth++;
1725 start_io(h);
1726 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1728 wait_for_completion(&wait);
1730 if(c->err_info->CommandStatus != 0)
1731 { /* an error has occurred */
1732 switch(c->err_info->CommandStatus)
1734 case CMD_TARGET_STATUS:
1735 printk(KERN_WARNING "cciss: cmd %p has "
1736 " completed with errors\n", c);
1737 if( c->err_info->ScsiStatus)
1739 printk(KERN_WARNING "cciss: cmd %p "
1740 "has SCSI Status = %x\n",
1742 c->err_info->ScsiStatus);
1745 break;
1746 case CMD_DATA_UNDERRUN:
1747 case CMD_DATA_OVERRUN:
1748 /* expected for inquire and report lun commands */
1749 break;
1750 case CMD_INVALID:
1751 printk(KERN_WARNING "cciss: Cmd %p is "
1752 "reported invalid\n", c);
1753 return_status = IO_ERROR;
1754 break;
1755 case CMD_PROTOCOL_ERR:
1756 printk(KERN_WARNING "cciss: cmd %p has "
1757 "protocol error \n", c);
1758 return_status = IO_ERROR;
1759 break;
1760 case CMD_HARDWARE_ERR:
1761 printk(KERN_WARNING "cciss: cmd %p had "
1762 " hardware error\n", c);
1763 return_status = IO_ERROR;
1764 break;
1765 case CMD_CONNECTION_LOST:
1766 printk(KERN_WARNING "cciss: cmd %p had "
1767 "connection lost\n", c);
1768 return_status = IO_ERROR;
1769 break;
1770 case CMD_ABORTED:
1771 printk(KERN_WARNING "cciss: cmd %p was "
1772 "aborted\n", c);
1773 return_status = IO_ERROR;
1774 break;
1775 case CMD_ABORT_FAILED:
1776 printk(KERN_WARNING "cciss: cmd %p reports "
1777 "abort failed\n", c);
1778 return_status = IO_ERROR;
1779 break;
1780 case CMD_UNSOLICITED_ABORT:
1781 printk(KERN_WARNING
1782 "cciss%d: unsolicited abort %p\n",
1783 ctlr, c);
1784 if (c->retry_count < MAX_CMD_RETRIES) {
1785 printk(KERN_WARNING
1786 "cciss%d: retrying %p\n",
1787 ctlr, c);
1788 c->retry_count++;
1789 /* erase the old error information */
1790 memset(c->err_info, 0,
1791 sizeof(ErrorInfo_struct));
1792 return_status = IO_OK;
1793 INIT_COMPLETION(wait);
1794 goto resend_cmd2;
1796 return_status = IO_ERROR;
1797 break;
1798 default:
1799 printk(KERN_WARNING "cciss: cmd %p returned "
1800 "unknown status %x\n", c,
1801 c->err_info->CommandStatus);
1802 return_status = IO_ERROR;
1805 /* unlock the buffers from DMA */
1806 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1807 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1808 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1809 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1810 cmd_free(h, c, 0);
1811 return(return_status);
1814 static void cciss_geometry_inquiry(int ctlr, int logvol,
1815 int withirq, unsigned int total_size,
1816 unsigned int block_size, InquiryData_struct *inq_buff,
1817 drive_info_struct *drv)
1819 int return_code;
1820 memset(inq_buff, 0, sizeof(InquiryData_struct));
1821 if (withirq)
1822 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1823 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1824 else
1825 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1826 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1827 if (return_code == IO_OK) {
1828 if(inq_buff->data_byte[8] == 0xFF) {
1829 printk(KERN_WARNING
1830 "cciss: reading geometry failed, volume "
1831 "does not support reading geometry\n");
1832 drv->block_size = block_size;
1833 drv->nr_blocks = total_size;
1834 drv->heads = 255;
1835 drv->sectors = 32; // Sectors per track
1836 drv->cylinders = total_size / 255 / 32;
1837 } else {
1838 unsigned int t;
1840 drv->block_size = block_size;
1841 drv->nr_blocks = total_size;
1842 drv->heads = inq_buff->data_byte[6];
1843 drv->sectors = inq_buff->data_byte[7];
1844 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1845 drv->cylinders += inq_buff->data_byte[5];
1846 drv->raid_level = inq_buff->data_byte[8];
1847 t = drv->heads * drv->sectors;
1848 if (t > 1) {
1849 drv->cylinders = total_size/t;
1852 } else { /* Get geometry failed */
1853 printk(KERN_WARNING "cciss: reading geometry failed\n");
1855 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1856 drv->heads, drv->sectors, drv->cylinders);
1858 static void
1859 cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1860 int withirq, unsigned int *total_size, unsigned int *block_size)
1862 int return_code;
1863 memset(buf, 0, sizeof(*buf));
1864 if (withirq)
1865 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1866 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1867 else
1868 return_code = sendcmd(CCISS_READ_CAPACITY,
1869 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1870 if (return_code == IO_OK) {
1871 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1872 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1873 } else { /* read capacity command failed */
1874 printk(KERN_WARNING "cciss: read capacity failed\n");
1875 *total_size = 0;
1876 *block_size = BLOCK_SIZE;
1878 printk(KERN_INFO " blocks= %u block_size= %d\n",
1879 *total_size, *block_size);
1880 return;
1883 static int cciss_revalidate(struct gendisk *disk)
1885 ctlr_info_t *h = get_host(disk);
1886 drive_info_struct *drv = get_drv(disk);
1887 int logvol;
1888 int FOUND=0;
1889 unsigned int block_size;
1890 unsigned int total_size;
1891 ReadCapdata_struct *size_buff = NULL;
1892 InquiryData_struct *inq_buff = NULL;
1894 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1896 if(h->drv[logvol].LunID == drv->LunID) {
1897 FOUND=1;
1898 break;
1902 if (!FOUND) return 1;
1904 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1905 if (size_buff == NULL)
1907 printk(KERN_WARNING "cciss: out of memory\n");
1908 return 1;
1910 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1911 if (inq_buff == NULL)
1913 printk(KERN_WARNING "cciss: out of memory\n");
1914 kfree(size_buff);
1915 return 1;
1918 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1919 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1921 blk_queue_hardsect_size(drv->queue, drv->block_size);
1922 set_capacity(disk, drv->nr_blocks);
1924 kfree(size_buff);
1925 kfree(inq_buff);
1926 return 0;
1930 * Wait polling for a command to complete.
1931 * The memory mapped FIFO is polled for the completion.
1932 * Used only at init time, interrupts from the HBA are disabled.
1934 static unsigned long pollcomplete(int ctlr)
1936 unsigned long done;
1937 int i;
1939 /* Wait (up to 20 seconds) for a command to complete */
1941 for (i = 20 * HZ; i > 0; i--) {
1942 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1943 if (done == FIFO_EMPTY)
1944 schedule_timeout_uninterruptible(1);
1945 else
1946 return (done);
1948 /* Invalid address to tell caller we ran out of time */
1949 return 1;
1952 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1954 /* We get in here if sendcmd() is polling for completions
1955 and gets some command back that it wasn't expecting --
1956 something other than that which it just sent down.
1957 Ordinarily, that shouldn't happen, but it can happen when
1958 the scsi tape stuff gets into error handling mode, and
1959 starts using sendcmd() to try to abort commands and
1960 reset tape drives. In that case, sendcmd may pick up
1961 completions of commands that were sent to logical drives
1962 through the block i/o system, or cciss ioctls completing, etc.
1963 In that case, we need to save those completions for later
1964 processing by the interrupt handler.
1967 #ifdef CONFIG_CISS_SCSI_TAPE
1968 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1970 /* If it's not the scsi tape stuff doing error handling, (abort */
1971 /* or reset) then we don't expect anything weird. */
1972 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1973 #endif
1974 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1975 "Invalid command list address returned! (%lx)\n",
1976 ctlr, complete);
1977 /* not much we can do. */
1978 #ifdef CONFIG_CISS_SCSI_TAPE
1979 return 1;
1982 /* We've sent down an abort or reset, but something else
1983 has completed */
1984 if (srl->ncompletions >= (NR_CMDS + 2)) {
1985 /* Uh oh. No room to save it for later... */
1986 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1987 "reject list overflow, command lost!\n", ctlr);
1988 return 1;
1990 /* Save it for later */
1991 srl->complete[srl->ncompletions] = complete;
1992 srl->ncompletions++;
1993 #endif
1994 return 0;
1998 * Send a command to the controller, and wait for it to complete.
1999 * Only used at init time.
2001 static int sendcmd(
2002 __u8 cmd,
2003 int ctlr,
2004 void *buff,
2005 size_t size,
2006 unsigned int use_unit_num, /* 0: address the controller,
2007 1: address logical volume log_unit,
2008 2: periph device address is scsi3addr */
2009 unsigned int log_unit,
2010 __u8 page_code,
2011 unsigned char *scsi3addr,
2012 int cmd_type)
2014 CommandList_struct *c;
2015 int i;
2016 unsigned long complete;
2017 ctlr_info_t *info_p= hba[ctlr];
2018 u64bit buff_dma_handle;
2019 int status, done = 0;
2021 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2022 printk(KERN_WARNING "cciss: unable to get memory");
2023 return(IO_ERROR);
2025 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2026 log_unit, page_code, scsi3addr, cmd_type);
2027 if (status != IO_OK) {
2028 cmd_free(info_p, c, 1);
2029 return status;
2031 resend_cmd1:
2033 * Disable interrupt
2035 #ifdef CCISS_DEBUG
2036 printk(KERN_DEBUG "cciss: turning intr off\n");
2037 #endif /* CCISS_DEBUG */
2038 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2040 /* Make sure there is room in the command FIFO */
2041 /* Actually it should be completely empty at this time */
2042 /* unless we are in here doing error handling for the scsi */
2043 /* tape side of the driver. */
2044 for (i = 200000; i > 0; i--)
2046 /* if fifo isn't full go */
2047 if (!(info_p->access.fifo_full(info_p)))
2050 break;
2052 udelay(10);
2053 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2054 " waiting!\n", ctlr);
2057 * Send the cmd
2059 info_p->access.submit_command(info_p, c);
2060 done = 0;
2061 do {
2062 complete = pollcomplete(ctlr);
2064 #ifdef CCISS_DEBUG
2065 printk(KERN_DEBUG "cciss: command completed\n");
2066 #endif /* CCISS_DEBUG */
2068 if (complete == 1) {
2069 printk( KERN_WARNING
2070 "cciss cciss%d: SendCmd Timeout out, "
2071 "No command list address returned!\n",
2072 ctlr);
2073 status = IO_ERROR;
2074 done = 1;
2075 break;
2078 /* This will need to change for direct lookup completions */
2079 if ( (complete & CISS_ERROR_BIT)
2080 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
2082 /* if data overrun or underun on Report command
2083 ignore it
2085 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2086 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2087 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2088 ((c->err_info->CommandStatus ==
2089 CMD_DATA_OVERRUN) ||
2090 (c->err_info->CommandStatus ==
2091 CMD_DATA_UNDERRUN)
2094 complete = c->busaddr;
2095 } else {
2096 if (c->err_info->CommandStatus ==
2097 CMD_UNSOLICITED_ABORT) {
2098 printk(KERN_WARNING "cciss%d: "
2099 "unsolicited abort %p\n",
2100 ctlr, c);
2101 if (c->retry_count < MAX_CMD_RETRIES) {
2102 printk(KERN_WARNING
2103 "cciss%d: retrying %p\n",
2104 ctlr, c);
2105 c->retry_count++;
2106 /* erase the old error */
2107 /* information */
2108 memset(c->err_info, 0,
2109 sizeof(ErrorInfo_struct));
2110 goto resend_cmd1;
2111 } else {
2112 printk(KERN_WARNING
2113 "cciss%d: retried %p too "
2114 "many times\n", ctlr, c);
2115 status = IO_ERROR;
2116 goto cleanup1;
2118 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) {
2119 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr);
2120 status = IO_ERROR;
2121 goto cleanup1;
2123 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2124 " Error %x \n", ctlr,
2125 c->err_info->CommandStatus);
2126 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2127 " offensive info\n"
2128 " size %x\n num %x value %x\n", ctlr,
2129 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
2130 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
2131 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
2132 status = IO_ERROR;
2133 goto cleanup1;
2136 /* This will need changing for direct lookup completions */
2137 if (complete != c->busaddr) {
2138 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2139 BUG(); /* we are pretty much hosed if we get here. */
2141 continue;
2142 } else
2143 done = 1;
2144 } while (!done);
2146 cleanup1:
2147 /* unlock the data buffer from DMA */
2148 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2149 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2150 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2151 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2152 #ifdef CONFIG_CISS_SCSI_TAPE
2153 /* if we saved some commands for later, process them now. */
2154 if (info_p->scsi_rejects.ncompletions > 0)
2155 do_cciss_intr(0, info_p, NULL);
2156 #endif
2157 cmd_free(info_p, c, 1);
2158 return (status);
2161 * Map (physical) PCI mem into (virtual) kernel space
2163 static void __iomem *remap_pci_mem(ulong base, ulong size)
2165 ulong page_base = ((ulong) base) & PAGE_MASK;
2166 ulong page_offs = ((ulong) base) - page_base;
2167 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
2169 return page_remapped ? (page_remapped + page_offs) : NULL;
2173 * Takes jobs of the Q and sends them to the hardware, then puts it on
2174 * the Q to wait for completion.
2176 static void start_io( ctlr_info_t *h)
2178 CommandList_struct *c;
2180 while(( c = h->reqQ) != NULL )
2182 /* can't do anything if fifo is full */
2183 if ((h->access.fifo_full(h))) {
2184 printk(KERN_WARNING "cciss: fifo full\n");
2185 break;
2188 /* Get the first entry from the Request Q */
2189 removeQ(&(h->reqQ), c);
2190 h->Qdepth--;
2192 /* Tell the controller execute command */
2193 h->access.submit_command(h, c);
2195 /* Put job onto the completed Q */
2196 addQ (&(h->cmpQ), c);
2199 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2200 /* Zeros out the error record and then resends the command back */
2201 /* to the controller */
2202 static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
2204 /* erase the old error information */
2205 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2207 /* add it to software queue and then send it to the controller */
2208 addQ(&(h->reqQ),c);
2209 h->Qdepth++;
2210 if(h->Qdepth > h->maxQsinceinit)
2211 h->maxQsinceinit = h->Qdepth;
2213 start_io(h);
2216 /* checks the status of the job and calls complete buffers to mark all
2217 * buffers for the completed job. Note that this function does not need
2218 * to hold the hba/queue lock.
2220 static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2221 int timeout)
2223 int status = 1;
2224 int retry_cmd = 0;
2226 if (timeout)
2227 status = 0;
2229 if(cmd->err_info->CommandStatus != 0)
2230 { /* an error has occurred */
2231 switch(cmd->err_info->CommandStatus)
2233 unsigned char sense_key;
2234 case CMD_TARGET_STATUS:
2235 status = 0;
2237 if( cmd->err_info->ScsiStatus == 0x02)
2239 printk(KERN_WARNING "cciss: cmd %p "
2240 "has CHECK CONDITION "
2241 " byte 2 = 0x%x\n", cmd,
2242 cmd->err_info->SenseInfo[2]
2244 /* check the sense key */
2245 sense_key = 0xf &
2246 cmd->err_info->SenseInfo[2];
2247 /* no status or recovered error */
2248 if((sense_key == 0x0) ||
2249 (sense_key == 0x1))
2251 status = 1;
2253 } else
2255 printk(KERN_WARNING "cciss: cmd %p "
2256 "has SCSI Status 0x%x\n",
2257 cmd, cmd->err_info->ScsiStatus);
2259 break;
2260 case CMD_DATA_UNDERRUN:
2261 printk(KERN_WARNING "cciss: cmd %p has"
2262 " completed with data underrun "
2263 "reported\n", cmd);
2264 break;
2265 case CMD_DATA_OVERRUN:
2266 printk(KERN_WARNING "cciss: cmd %p has"
2267 " completed with data overrun "
2268 "reported\n", cmd);
2269 break;
2270 case CMD_INVALID:
2271 printk(KERN_WARNING "cciss: cmd %p is "
2272 "reported invalid\n", cmd);
2273 status = 0;
2274 break;
2275 case CMD_PROTOCOL_ERR:
2276 printk(KERN_WARNING "cciss: cmd %p has "
2277 "protocol error \n", cmd);
2278 status = 0;
2279 break;
2280 case CMD_HARDWARE_ERR:
2281 printk(KERN_WARNING "cciss: cmd %p had "
2282 " hardware error\n", cmd);
2283 status = 0;
2284 break;
2285 case CMD_CONNECTION_LOST:
2286 printk(KERN_WARNING "cciss: cmd %p had "
2287 "connection lost\n", cmd);
2288 status=0;
2289 break;
2290 case CMD_ABORTED:
2291 printk(KERN_WARNING "cciss: cmd %p was "
2292 "aborted\n", cmd);
2293 status=0;
2294 break;
2295 case CMD_ABORT_FAILED:
2296 printk(KERN_WARNING "cciss: cmd %p reports "
2297 "abort failed\n", cmd);
2298 status=0;
2299 break;
2300 case CMD_UNSOLICITED_ABORT:
2301 printk(KERN_WARNING "cciss%d: unsolicited "
2302 "abort %p\n", h->ctlr, cmd);
2303 if (cmd->retry_count < MAX_CMD_RETRIES) {
2304 retry_cmd=1;
2305 printk(KERN_WARNING
2306 "cciss%d: retrying %p\n",
2307 h->ctlr, cmd);
2308 cmd->retry_count++;
2309 } else
2310 printk(KERN_WARNING
2311 "cciss%d: %p retried too "
2312 "many times\n", h->ctlr, cmd);
2313 status=0;
2314 break;
2315 case CMD_TIMEOUT:
2316 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2317 cmd);
2318 status=0;
2319 break;
2320 default:
2321 printk(KERN_WARNING "cciss: cmd %p returned "
2322 "unknown status %x\n", cmd,
2323 cmd->err_info->CommandStatus);
2324 status=0;
2327 /* We need to return this command */
2328 if(retry_cmd) {
2329 resend_cciss_cmd(h,cmd);
2330 return;
2333 cmd->rq->completion_data = cmd;
2334 cmd->rq->errors = status;
2335 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2336 blk_complete_request(cmd->rq);
2340 * Get a request and submit it to the controller.
2342 static void do_cciss_request(request_queue_t *q)
2344 ctlr_info_t *h= q->queuedata;
2345 CommandList_struct *c;
2346 int start_blk, seg;
2347 struct request *creq;
2348 u64bit temp64;
2349 struct scatterlist tmp_sg[MAXSGENTRIES];
2350 drive_info_struct *drv;
2351 int i, dir;
2353 /* We call start_io here in case there is a command waiting on the
2354 * queue that has not been sent.
2356 if (blk_queue_plugged(q))
2357 goto startio;
2359 queue:
2360 creq = elv_next_request(q);
2361 if (!creq)
2362 goto startio;
2364 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2366 if (( c = cmd_alloc(h, 1)) == NULL)
2367 goto full;
2369 blkdev_dequeue_request(creq);
2371 spin_unlock_irq(q->queue_lock);
2373 c->cmd_type = CMD_RWREQ;
2374 c->rq = creq;
2376 /* fill in the request */
2377 drv = creq->rq_disk->private_data;
2378 c->Header.ReplyQueue = 0; // unused in simple mode
2379 /* got command from pool, so use the command block index instead */
2380 /* for direct lookups. */
2381 /* The first 2 bits are reserved for controller error reporting. */
2382 c->Header.Tag.lower = (c->cmdindex << 3);
2383 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2384 c->Header.LUN.LogDev.VolId= drv->LunID;
2385 c->Header.LUN.LogDev.Mode = 1;
2386 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2387 c->Request.Type.Type = TYPE_CMD; // It is a command.
2388 c->Request.Type.Attribute = ATTR_SIMPLE;
2389 c->Request.Type.Direction =
2390 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2391 c->Request.Timeout = 0; // Don't time out
2392 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2393 start_blk = creq->sector;
2394 #ifdef CCISS_DEBUG
2395 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2396 (int) creq->nr_sectors);
2397 #endif /* CCISS_DEBUG */
2399 seg = blk_rq_map_sg(q, creq, tmp_sg);
2401 /* get the DMA records for the setup */
2402 if (c->Request.Type.Direction == XFER_READ)
2403 dir = PCI_DMA_FROMDEVICE;
2404 else
2405 dir = PCI_DMA_TODEVICE;
2407 for (i=0; i<seg; i++)
2409 c->SG[i].Len = tmp_sg[i].length;
2410 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2411 tmp_sg[i].offset, tmp_sg[i].length,
2412 dir);
2413 c->SG[i].Addr.lower = temp64.val32.lower;
2414 c->SG[i].Addr.upper = temp64.val32.upper;
2415 c->SG[i].Ext = 0; // we are not chaining
2417 /* track how many SG entries we are using */
2418 if( seg > h->maxSG)
2419 h->maxSG = seg;
2421 #ifdef CCISS_DEBUG
2422 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2423 #endif /* CCISS_DEBUG */
2425 c->Header.SGList = c->Header.SGTotal = seg;
2426 c->Request.CDB[1]= 0;
2427 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2428 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2429 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2430 c->Request.CDB[5]= start_blk & 0xff;
2431 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2432 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2433 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2434 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2436 spin_lock_irq(q->queue_lock);
2438 addQ(&(h->reqQ),c);
2439 h->Qdepth++;
2440 if(h->Qdepth > h->maxQsinceinit)
2441 h->maxQsinceinit = h->Qdepth;
2443 goto queue;
2444 full:
2445 blk_stop_queue(q);
2446 startio:
2447 /* We will already have the driver lock here so not need
2448 * to lock it.
2450 start_io(h);
2453 static inline unsigned long get_next_completion(ctlr_info_t *h)
2455 #ifdef CONFIG_CISS_SCSI_TAPE
2456 /* Any rejects from sendcmd() lying around? Process them first */
2457 if (h->scsi_rejects.ncompletions == 0)
2458 return h->access.command_completed(h);
2459 else {
2460 struct sendcmd_reject_list *srl;
2461 int n;
2462 srl = &h->scsi_rejects;
2463 n = --srl->ncompletions;
2464 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2465 printk("p");
2466 return srl->complete[n];
2468 #else
2469 return h->access.command_completed(h);
2470 #endif
2473 static inline int interrupt_pending(ctlr_info_t *h)
2475 #ifdef CONFIG_CISS_SCSI_TAPE
2476 return ( h->access.intr_pending(h)
2477 || (h->scsi_rejects.ncompletions > 0));
2478 #else
2479 return h->access.intr_pending(h);
2480 #endif
2483 static inline long interrupt_not_for_us(ctlr_info_t *h)
2485 #ifdef CONFIG_CISS_SCSI_TAPE
2486 return (((h->access.intr_pending(h) == 0) ||
2487 (h->interrupts_enabled == 0))
2488 && (h->scsi_rejects.ncompletions == 0));
2489 #else
2490 return (((h->access.intr_pending(h) == 0) ||
2491 (h->interrupts_enabled == 0)));
2492 #endif
2495 static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2497 ctlr_info_t *h = dev_id;
2498 CommandList_struct *c;
2499 unsigned long flags;
2500 __u32 a, a1, a2;
2501 int j;
2502 int start_queue = h->next_to_run;
2504 if (interrupt_not_for_us(h))
2505 return IRQ_NONE;
2507 * If there are completed commands in the completion queue,
2508 * we had better do something about it.
2510 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2511 while (interrupt_pending(h)) {
2512 while((a = get_next_completion(h)) != FIFO_EMPTY) {
2513 a1 = a;
2514 if ((a & 0x04)) {
2515 a2 = (a >> 3);
2516 if (a2 >= NR_CMDS) {
2517 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2518 fail_all_cmds(h->ctlr);
2519 return IRQ_HANDLED;
2522 c = h->cmd_pool + a2;
2523 a = c->busaddr;
2525 } else {
2526 a &= ~3;
2527 if ((c = h->cmpQ) == NULL) {
2528 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2529 continue;
2531 while(c->busaddr != a) {
2532 c = c->next;
2533 if (c == h->cmpQ)
2534 break;
2538 * If we've found the command, take it off the
2539 * completion Q and free it
2541 if (c->busaddr == a) {
2542 removeQ(&h->cmpQ, c);
2543 if (c->cmd_type == CMD_RWREQ) {
2544 complete_command(h, c, 0);
2545 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2546 complete(c->waiting);
2548 # ifdef CONFIG_CISS_SCSI_TAPE
2549 else if (c->cmd_type == CMD_SCSI)
2550 complete_scsi_command(c, 0, a1);
2551 # endif
2552 continue;
2557 /* check to see if we have maxed out the number of commands that can
2558 * be placed on the queue. If so then exit. We do this check here
2559 * in case the interrupt we serviced was from an ioctl and did not
2560 * free any new commands.
2562 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2563 goto cleanup;
2565 /* We have room on the queue for more commands. Now we need to queue
2566 * them up. We will also keep track of the next queue to run so
2567 * that every queue gets a chance to be started first.
2569 for (j=0; j < h->highest_lun + 1; j++){
2570 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2571 /* make sure the disk has been added and the drive is real
2572 * because this can be called from the middle of init_one.
2574 if(!(h->drv[curr_queue].queue) ||
2575 !(h->drv[curr_queue].heads))
2576 continue;
2577 blk_start_queue(h->gendisk[curr_queue]->queue);
2579 /* check to see if we have maxed out the number of commands
2580 * that can be placed on the queue.
2582 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2584 if (curr_queue == start_queue){
2585 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1);
2586 goto cleanup;
2587 } else {
2588 h->next_to_run = curr_queue;
2589 goto cleanup;
2591 } else {
2592 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2596 cleanup:
2597 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2598 return IRQ_HANDLED;
2601 * We cannot read the structure directly, for portablity we must use
2602 * the io functions.
2603 * This is for debug only.
2605 #ifdef CCISS_DEBUG
2606 static void print_cfg_table( CfgTable_struct *tb)
2608 int i;
2609 char temp_name[17];
2611 printk("Controller Configuration information\n");
2612 printk("------------------------------------\n");
2613 for(i=0;i<4;i++)
2614 temp_name[i] = readb(&(tb->Signature[i]));
2615 temp_name[4]='\0';
2616 printk(" Signature = %s\n", temp_name);
2617 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2618 printk(" Transport methods supported = 0x%x\n",
2619 readl(&(tb-> TransportSupport)));
2620 printk(" Transport methods active = 0x%x\n",
2621 readl(&(tb->TransportActive)));
2622 printk(" Requested transport Method = 0x%x\n",
2623 readl(&(tb->HostWrite.TransportRequest)));
2624 printk(" Coalese Interrupt Delay = 0x%x\n",
2625 readl(&(tb->HostWrite.CoalIntDelay)));
2626 printk(" Coalese Interrupt Count = 0x%x\n",
2627 readl(&(tb->HostWrite.CoalIntCount)));
2628 printk(" Max outstanding commands = 0x%d\n",
2629 readl(&(tb->CmdsOutMax)));
2630 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2631 for(i=0;i<16;i++)
2632 temp_name[i] = readb(&(tb->ServerName[i]));
2633 temp_name[16] = '\0';
2634 printk(" Server Name = %s\n", temp_name);
2635 printk(" Heartbeat Counter = 0x%x\n\n\n",
2636 readl(&(tb->HeartBeat)));
2638 #endif /* CCISS_DEBUG */
2640 static void release_io_mem(ctlr_info_t *c)
2642 /* if IO mem was not protected do nothing */
2643 if( c->io_mem_addr == 0)
2644 return;
2645 release_region(c->io_mem_addr, c->io_mem_length);
2646 c->io_mem_addr = 0;
2647 c->io_mem_length = 0;
2650 static int find_PCI_BAR_index(struct pci_dev *pdev,
2651 unsigned long pci_bar_addr)
2653 int i, offset, mem_type, bar_type;
2654 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2655 return 0;
2656 offset = 0;
2657 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2658 bar_type = pci_resource_flags(pdev, i) &
2659 PCI_BASE_ADDRESS_SPACE;
2660 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2661 offset += 4;
2662 else {
2663 mem_type = pci_resource_flags(pdev, i) &
2664 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2665 switch (mem_type) {
2666 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2667 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2668 offset += 4; /* 32 bit */
2669 break;
2670 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2671 offset += 8;
2672 break;
2673 default: /* reserved in PCI 2.2 */
2674 printk(KERN_WARNING "Base address is invalid\n");
2675 return -1;
2676 break;
2679 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2680 return i+1;
2682 return -1;
2685 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2686 * controllers that are capable. If not, we use IO-APIC mode.
2689 static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
2691 #ifdef CONFIG_PCI_MSI
2692 int err;
2693 struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1},
2694 {0,2}, {0,3}};
2696 /* Some boards advertise MSI but don't really support it */
2697 if ((board_id == 0x40700E11) ||
2698 (board_id == 0x40800E11) ||
2699 (board_id == 0x40820E11) ||
2700 (board_id == 0x40830E11))
2701 goto default_int_mode;
2703 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2704 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2705 if (!err) {
2706 c->intr[0] = cciss_msix_entries[0].vector;
2707 c->intr[1] = cciss_msix_entries[1].vector;
2708 c->intr[2] = cciss_msix_entries[2].vector;
2709 c->intr[3] = cciss_msix_entries[3].vector;
2710 c->msix_vector = 1;
2711 return;
2713 if (err > 0) {
2714 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2715 "available\n", err);
2716 } else {
2717 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2718 err);
2721 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2722 if (!pci_enable_msi(pdev)) {
2723 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2724 c->msi_vector = 1;
2725 return;
2726 } else {
2727 printk(KERN_WARNING "cciss: MSI init failed\n");
2728 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2729 return;
2732 default_int_mode:
2733 #endif /* CONFIG_PCI_MSI */
2734 /* if we get here we're going to use the default interrupt mode */
2735 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2736 return;
2739 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2741 ushort subsystem_vendor_id, subsystem_device_id, command;
2742 __u32 board_id, scratchpad = 0;
2743 __u64 cfg_offset;
2744 __u32 cfg_base_addr;
2745 __u64 cfg_base_addr_index;
2746 int i;
2748 /* check to see if controller has been disabled */
2749 /* BEFORE trying to enable it */
2750 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2751 if(!(command & 0x02))
2753 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2754 return(-1);
2757 if (pci_enable_device(pdev))
2759 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2760 return( -1);
2763 subsystem_vendor_id = pdev->subsystem_vendor;
2764 subsystem_device_id = pdev->subsystem_device;
2765 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2766 subsystem_vendor_id);
2768 /* search for our IO range so we can protect it */
2769 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2771 /* is this an IO range */
2772 if( pci_resource_flags(pdev, i) & 0x01 ) {
2773 c->io_mem_addr = pci_resource_start(pdev, i);
2774 c->io_mem_length = pci_resource_end(pdev, i) -
2775 pci_resource_start(pdev, i) +1;
2776 #ifdef CCISS_DEBUG
2777 printk("IO value found base_addr[%d] %lx %lx\n", i,
2778 c->io_mem_addr, c->io_mem_length);
2779 #endif /* CCISS_DEBUG */
2780 /* register the IO range */
2781 if(!request_region( c->io_mem_addr,
2782 c->io_mem_length, "cciss"))
2784 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2785 c->io_mem_addr, c->io_mem_length);
2786 c->io_mem_addr= 0;
2787 c->io_mem_length = 0;
2789 break;
2793 #ifdef CCISS_DEBUG
2794 printk("command = %x\n", command);
2795 printk("irq = %x\n", pdev->irq);
2796 printk("board_id = %x\n", board_id);
2797 #endif /* CCISS_DEBUG */
2799 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2800 * else we use the IO-APIC interrupt assigned to us by system ROM.
2802 cciss_interrupt_mode(c, pdev, board_id);
2805 * Memory base addr is first addr , the second points to the config
2806 * table
2809 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2810 #ifdef CCISS_DEBUG
2811 printk("address 0 = %x\n", c->paddr);
2812 #endif /* CCISS_DEBUG */
2813 c->vaddr = remap_pci_mem(c->paddr, 200);
2815 /* Wait for the board to become ready. (PCI hotplug needs this.)
2816 * We poll for up to 120 secs, once per 100ms. */
2817 for (i=0; i < 1200; i++) {
2818 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2819 if (scratchpad == CCISS_FIRMWARE_READY)
2820 break;
2821 set_current_state(TASK_INTERRUPTIBLE);
2822 schedule_timeout(HZ / 10); /* wait 100ms */
2824 if (scratchpad != CCISS_FIRMWARE_READY) {
2825 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2826 return -1;
2829 /* get the address index number */
2830 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2831 cfg_base_addr &= (__u32) 0x0000ffff;
2832 #ifdef CCISS_DEBUG
2833 printk("cfg base address = %x\n", cfg_base_addr);
2834 #endif /* CCISS_DEBUG */
2835 cfg_base_addr_index =
2836 find_PCI_BAR_index(pdev, cfg_base_addr);
2837 #ifdef CCISS_DEBUG
2838 printk("cfg base address index = %x\n", cfg_base_addr_index);
2839 #endif /* CCISS_DEBUG */
2840 if (cfg_base_addr_index == -1) {
2841 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2842 release_io_mem(c);
2843 return -1;
2846 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2847 #ifdef CCISS_DEBUG
2848 printk("cfg offset = %x\n", cfg_offset);
2849 #endif /* CCISS_DEBUG */
2850 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2851 cfg_base_addr_index) + cfg_offset,
2852 sizeof(CfgTable_struct));
2853 c->board_id = board_id;
2855 #ifdef CCISS_DEBUG
2856 print_cfg_table(c->cfgtable);
2857 #endif /* CCISS_DEBUG */
2859 for(i=0; i<NR_PRODUCTS; i++) {
2860 if (board_id == products[i].board_id) {
2861 c->product_name = products[i].product_name;
2862 c->access = *(products[i].access);
2863 break;
2866 if (i == NR_PRODUCTS) {
2867 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2868 " to access the Smart Array controller %08lx\n",
2869 (unsigned long)board_id);
2870 return -1;
2872 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2873 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2874 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2875 (readb(&c->cfgtable->Signature[3]) != 'S') )
2877 printk("Does not appear to be a valid CISS config table\n");
2878 return -1;
2881 #ifdef CONFIG_X86
2883 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2884 __u32 prefetch;
2885 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2886 prefetch |= 0x100;
2887 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2889 #endif
2891 #ifdef CCISS_DEBUG
2892 printk("Trying to put board into Simple mode\n");
2893 #endif /* CCISS_DEBUG */
2894 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2895 /* Update the field, and then ring the doorbell */
2896 writel( CFGTBL_Trans_Simple,
2897 &(c->cfgtable->HostWrite.TransportRequest));
2898 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2900 /* under certain very rare conditions, this can take awhile.
2901 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2902 * as we enter this code.) */
2903 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2904 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2905 break;
2906 /* delay and try again */
2907 set_current_state(TASK_INTERRUPTIBLE);
2908 schedule_timeout(10);
2911 #ifdef CCISS_DEBUG
2912 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2913 #endif /* CCISS_DEBUG */
2914 #ifdef CCISS_DEBUG
2915 print_cfg_table(c->cfgtable);
2916 #endif /* CCISS_DEBUG */
2918 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2920 printk(KERN_WARNING "cciss: unable to get board into"
2921 " simple mode\n");
2922 return -1;
2924 return 0;
2929 * Gets information about the local volumes attached to the controller.
2931 static void cciss_getgeometry(int cntl_num)
2933 ReportLunData_struct *ld_buff;
2934 ReadCapdata_struct *size_buff;
2935 InquiryData_struct *inq_buff;
2936 int return_code;
2937 int i;
2938 int listlength = 0;
2939 __u32 lunid = 0;
2940 int block_size;
2941 int total_size;
2943 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2944 if (ld_buff == NULL)
2946 printk(KERN_ERR "cciss: out of memory\n");
2947 return;
2949 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2950 if (size_buff == NULL)
2952 printk(KERN_ERR "cciss: out of memory\n");
2953 kfree(ld_buff);
2954 return;
2956 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2957 if (inq_buff == NULL)
2959 printk(KERN_ERR "cciss: out of memory\n");
2960 kfree(ld_buff);
2961 kfree(size_buff);
2962 return;
2964 /* Get the firmware version */
2965 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2966 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2967 if (return_code == IO_OK)
2969 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2970 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2971 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2972 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2973 } else /* send command failed */
2975 printk(KERN_WARNING "cciss: unable to determine firmware"
2976 " version of controller\n");
2978 /* Get the number of logical volumes */
2979 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2980 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2982 if( return_code == IO_OK)
2984 #ifdef CCISS_DEBUG
2985 printk("LUN Data\n--------------------------\n");
2986 #endif /* CCISS_DEBUG */
2988 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2989 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2990 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2991 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2992 } else /* reading number of logical volumes failed */
2994 printk(KERN_WARNING "cciss: report logical volume"
2995 " command failed\n");
2996 listlength = 0;
2998 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2999 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
3001 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
3002 CISS_MAX_LUN);
3003 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3005 #ifdef CCISS_DEBUG
3006 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
3007 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
3008 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
3009 #endif /* CCISS_DEBUG */
3011 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
3012 // for(i=0; i< hba[cntl_num]->num_luns; i++)
3013 for(i=0; i < CISS_MAX_LUN; i++)
3015 if (i < hba[cntl_num]->num_luns){
3016 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3017 << 24;
3018 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3019 << 16;
3020 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3021 << 8;
3022 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3024 hba[cntl_num]->drv[i].LunID = lunid;
3027 #ifdef CCISS_DEBUG
3028 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3029 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3030 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3031 hba[cntl_num]->drv[i].LunID);
3032 #endif /* CCISS_DEBUG */
3033 cciss_read_capacity(cntl_num, i, size_buff, 0,
3034 &total_size, &block_size);
3035 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3036 block_size, inq_buff, &hba[cntl_num]->drv[i]);
3037 } else {
3038 /* initialize raid_level to indicate a free space */
3039 hba[cntl_num]->drv[i].raid_level = -1;
3042 kfree(ld_buff);
3043 kfree(size_buff);
3044 kfree(inq_buff);
3047 /* Function to find the first free pointer into our hba[] array */
3048 /* Returns -1 if no free entries are left. */
3049 static int alloc_cciss_hba(void)
3051 struct gendisk *disk[NWD];
3052 int i, n;
3053 for (n = 0; n < NWD; n++) {
3054 disk[n] = alloc_disk(1 << NWD_SHIFT);
3055 if (!disk[n])
3056 goto out;
3059 for(i=0; i< MAX_CTLR; i++) {
3060 if (!hba[i]) {
3061 ctlr_info_t *p;
3062 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3063 if (!p)
3064 goto Enomem;
3065 for (n = 0; n < NWD; n++)
3066 p->gendisk[n] = disk[n];
3067 hba[i] = p;
3068 return i;
3071 printk(KERN_WARNING "cciss: This driver supports a maximum"
3072 " of %d controllers.\n", MAX_CTLR);
3073 goto out;
3074 Enomem:
3075 printk(KERN_ERR "cciss: out of memory.\n");
3076 out:
3077 while (n--)
3078 put_disk(disk[n]);
3079 return -1;
3082 static void free_hba(int i)
3084 ctlr_info_t *p = hba[i];
3085 int n;
3087 hba[i] = NULL;
3088 for (n = 0; n < NWD; n++)
3089 put_disk(p->gendisk[n]);
3090 kfree(p);
3094 * This is it. Find all the controllers and register them. I really hate
3095 * stealing all these major device numbers.
3096 * returns the number of block devices registered.
3098 static int __devinit cciss_init_one(struct pci_dev *pdev,
3099 const struct pci_device_id *ent)
3101 request_queue_t *q;
3102 int i;
3103 int j;
3104 int rc;
3106 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3107 " bus %d dev %d func %d\n",
3108 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3109 PCI_FUNC(pdev->devfn));
3110 i = alloc_cciss_hba();
3111 if(i < 0)
3112 return (-1);
3114 hba[i]->busy_initializing = 1;
3116 if (cciss_pci_init(hba[i], pdev) != 0)
3117 goto clean1;
3119 sprintf(hba[i]->devname, "cciss%d", i);
3120 hba[i]->ctlr = i;
3121 hba[i]->pdev = pdev;
3123 /* configure PCI DMA stuff */
3124 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3125 printk("cciss: using DAC cycles\n");
3126 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3127 printk("cciss: not using DAC cycles\n");
3128 else {
3129 printk("cciss: no suitable DMA available\n");
3130 goto clean1;
3134 * register with the major number, or get a dynamic major number
3135 * by passing 0 as argument. This is done for greater than
3136 * 8 controller support.
3138 if (i < MAX_CTLR_ORIG)
3139 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3140 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3141 if(rc == -EBUSY || rc == -EINVAL) {
3142 printk(KERN_ERR
3143 "cciss: Unable to get major number %d for %s "
3144 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3145 goto clean1;
3147 else {
3148 if (i >= MAX_CTLR_ORIG)
3149 hba[i]->major = rc;
3152 /* make sure the board interrupts are off */
3153 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3154 if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3155 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
3156 hba[i]->devname, hba[i])) {
3157 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3158 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3159 goto clean2;
3161 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
3162 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
3163 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3164 &(hba[i]->cmd_pool_dhandle));
3165 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
3166 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3167 &(hba[i]->errinfo_pool_dhandle));
3168 if((hba[i]->cmd_pool_bits == NULL)
3169 || (hba[i]->cmd_pool == NULL)
3170 || (hba[i]->errinfo_pool == NULL)) {
3171 printk( KERN_ERR "cciss: out of memory");
3172 goto clean4;
3174 #ifdef CONFIG_CISS_SCSI_TAPE
3175 hba[i]->scsi_rejects.complete =
3176 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3177 (NR_CMDS + 5), GFP_KERNEL);
3178 if (hba[i]->scsi_rejects.complete == NULL) {
3179 printk( KERN_ERR "cciss: out of memory");
3180 goto clean4;
3182 #endif
3183 spin_lock_init(&hba[i]->lock);
3185 /* Initialize the pdev driver private data.
3186 have it point to hba[i]. */
3187 pci_set_drvdata(pdev, hba[i]);
3188 /* command and error info recs zeroed out before
3189 they are used */
3190 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
3192 #ifdef CCISS_DEBUG
3193 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
3194 #endif /* CCISS_DEBUG */
3196 cciss_getgeometry(i);
3198 cciss_scsi_setup(i);
3200 /* Turn the interrupts on so we can service requests */
3201 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3203 cciss_procinit(i);
3204 hba[i]->busy_initializing = 0;
3206 for(j=0; j < NWD; j++) { /* mfm */
3207 drive_info_struct *drv = &(hba[i]->drv[j]);
3208 struct gendisk *disk = hba[i]->gendisk[j];
3210 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3211 if (!q) {
3212 printk(KERN_ERR
3213 "cciss: unable to allocate queue for disk %d\n",
3215 break;
3217 drv->queue = q;
3219 q->backing_dev_info.ra_pages = READ_AHEAD;
3220 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3222 /* This is a hardware imposed limit. */
3223 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3225 /* This is a limit in the driver and could be eliminated. */
3226 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3228 blk_queue_max_sectors(q, 512);
3230 blk_queue_softirq_done(q, cciss_softirq_done);
3232 q->queuedata = hba[i];
3233 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3234 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
3235 disk->major = hba[i]->major;
3236 disk->first_minor = j << NWD_SHIFT;
3237 disk->fops = &cciss_fops;
3238 disk->queue = q;
3239 disk->private_data = drv;
3240 /* we must register the controller even if no disks exist */
3241 /* this is for the online array utilities */
3242 if(!drv->heads && j)
3243 continue;
3244 blk_queue_hardsect_size(q, drv->block_size);
3245 set_capacity(disk, drv->nr_blocks);
3246 add_disk(disk);
3249 return(1);
3251 clean4:
3252 #ifdef CONFIG_CISS_SCSI_TAPE
3253 kfree(hba[i]->scsi_rejects.complete);
3254 #endif
3255 kfree(hba[i]->cmd_pool_bits);
3256 if(hba[i]->cmd_pool)
3257 pci_free_consistent(hba[i]->pdev,
3258 NR_CMDS * sizeof(CommandList_struct),
3259 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3260 if(hba[i]->errinfo_pool)
3261 pci_free_consistent(hba[i]->pdev,
3262 NR_CMDS * sizeof( ErrorInfo_struct),
3263 hba[i]->errinfo_pool,
3264 hba[i]->errinfo_pool_dhandle);
3265 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3266 clean2:
3267 unregister_blkdev(hba[i]->major, hba[i]->devname);
3268 clean1:
3269 release_io_mem(hba[i]);
3270 hba[i]->busy_initializing = 0;
3271 free_hba(i);
3272 return(-1);
3275 static void __devexit cciss_remove_one (struct pci_dev *pdev)
3277 ctlr_info_t *tmp_ptr;
3278 int i, j;
3279 char flush_buf[4];
3280 int return_code;
3282 if (pci_get_drvdata(pdev) == NULL)
3284 printk( KERN_ERR "cciss: Unable to remove device \n");
3285 return;
3287 tmp_ptr = pci_get_drvdata(pdev);
3288 i = tmp_ptr->ctlr;
3289 if (hba[i] == NULL)
3291 printk(KERN_ERR "cciss: device appears to "
3292 "already be removed \n");
3293 return;
3295 /* Turn board interrupts off and send the flush cache command */
3296 /* sendcmd will turn off interrupt, and send the flush...
3297 * To write all data in the battery backed cache to disks */
3298 memset(flush_buf, 0, 4);
3299 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3300 TYPE_CMD);
3301 if(return_code != IO_OK)
3303 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3306 free_irq(hba[i]->intr[2], hba[i]);
3308 #ifdef CONFIG_PCI_MSI
3309 if (hba[i]->msix_vector)
3310 pci_disable_msix(hba[i]->pdev);
3311 else if (hba[i]->msi_vector)
3312 pci_disable_msi(hba[i]->pdev);
3313 #endif /* CONFIG_PCI_MSI */
3315 pci_set_drvdata(pdev, NULL);
3316 iounmap(hba[i]->vaddr);
3317 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3318 unregister_blkdev(hba[i]->major, hba[i]->devname);
3319 remove_proc_entry(hba[i]->devname, proc_cciss);
3321 /* remove it from the disk list */
3322 for (j = 0; j < NWD; j++) {
3323 struct gendisk *disk = hba[i]->gendisk[j];
3324 if (disk) {
3325 request_queue_t *q = disk->queue;
3327 if (disk->flags & GENHD_FL_UP)
3328 del_gendisk(disk);
3329 if (q)
3330 blk_cleanup_queue(q);
3334 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3335 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3336 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
3337 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3338 kfree(hba[i]->cmd_pool_bits);
3339 #ifdef CONFIG_CISS_SCSI_TAPE
3340 kfree(hba[i]->scsi_rejects.complete);
3341 #endif
3342 release_io_mem(hba[i]);
3343 free_hba(i);
3346 static struct pci_driver cciss_pci_driver = {
3347 .name = "cciss",
3348 .probe = cciss_init_one,
3349 .remove = __devexit_p(cciss_remove_one),
3350 .id_table = cciss_pci_device_id, /* id_table */
3354 * This is it. Register the PCI driver information for the cards we control
3355 * the OS will call our registered routines when it finds one of our cards.
3357 static int __init cciss_init(void)
3359 printk(KERN_INFO DRIVER_NAME "\n");
3361 /* Register for our PCI devices */
3362 return pci_register_driver(&cciss_pci_driver);
3365 static void __exit cciss_cleanup(void)
3367 int i;
3369 pci_unregister_driver(&cciss_pci_driver);
3370 /* double check that all controller entrys have been removed */
3371 for (i=0; i< MAX_CTLR; i++)
3373 if (hba[i] != NULL)
3375 printk(KERN_WARNING "cciss: had to remove"
3376 " controller %d\n", i);
3377 cciss_remove_one(hba[i]->pdev);
3380 remove_proc_entry("cciss", proc_root_driver);
3383 static void fail_all_cmds(unsigned long ctlr)
3385 /* If we get here, the board is apparently dead. */
3386 ctlr_info_t *h = hba[ctlr];
3387 CommandList_struct *c;
3388 unsigned long flags;
3390 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3391 h->alive = 0; /* the controller apparently died... */
3393 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3395 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3397 /* move everything off the request queue onto the completed queue */
3398 while( (c = h->reqQ) != NULL ) {
3399 removeQ(&(h->reqQ), c);
3400 h->Qdepth--;
3401 addQ (&(h->cmpQ), c);
3404 /* Now, fail everything on the completed queue with a HW error */
3405 while( (c = h->cmpQ) != NULL ) {
3406 removeQ(&h->cmpQ, c);
3407 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3408 if (c->cmd_type == CMD_RWREQ) {
3409 complete_command(h, c, 0);
3410 } else if (c->cmd_type == CMD_IOCTL_PEND)
3411 complete(c->waiting);
3412 #ifdef CONFIG_CISS_SCSI_TAPE
3413 else if (c->cmd_type == CMD_SCSI)
3414 complete_scsi_command(c, 0, 0);
3415 #endif
3417 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3418 return;
3421 module_init(cciss_init);
3422 module_exit(cciss_cleanup);