2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * Abstract: Contains all routines for control of the AFA comm layer
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/types.h>
34 #include <linux/pci.h>
35 #include <linux/spinlock.h>
36 #include <linux/slab.h>
37 #include <linux/completion.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h> /* ssleep prototype */
41 #include <linux/kthread.h>
42 #include <asm/semaphore.h>
43 #include <asm/uaccess.h>
48 * ioctl_send_fib - send a FIB from userspace
49 * @dev: adapter is being processed
50 * @arg: arguments to the ioctl call
52 * This routine sends a fib to the adapter on behalf of a user level
55 # define AAC_DEBUG_PREAMBLE KERN_INFO
56 # define AAC_DEBUG_POSTAMBLE
58 static int ioctl_send_fib(struct aac_dev
* dev
, void __user
*arg
)
62 struct hw_fib
* hw_fib
= (struct hw_fib
*)0;
63 dma_addr_t hw_fib_pa
= (dma_addr_t
)0LL;
70 fibptr
= aac_fib_alloc(dev
);
75 kfib
= fibptr
->hw_fib_va
;
77 * First copy in the header so that we can check the size field.
79 if (copy_from_user((void *)kfib
, arg
, sizeof(struct aac_fibhdr
))) {
84 * Since we copy based on the fib header size, make sure that we
85 * will not overrun the buffer when we copy the memory. Return
86 * an error if we would.
88 size
= le16_to_cpu(kfib
->header
.Size
) + sizeof(struct aac_fibhdr
);
89 if (size
< le16_to_cpu(kfib
->header
.SenderSize
))
90 size
= le16_to_cpu(kfib
->header
.SenderSize
);
91 if (size
> dev
->max_fib_size
) {
96 /* Highjack the hw_fib */
97 hw_fib
= fibptr
->hw_fib_va
;
98 hw_fib_pa
= fibptr
->hw_fib_pa
;
99 fibptr
->hw_fib_va
= kfib
= pci_alloc_consistent(dev
->pdev
, size
, &fibptr
->hw_fib_pa
);
100 memset(((char *)kfib
) + dev
->max_fib_size
, 0, size
- dev
->max_fib_size
);
101 memcpy(kfib
, hw_fib
, dev
->max_fib_size
);
104 if (copy_from_user(kfib
, arg
, size
)) {
109 if (kfib
->header
.Command
== cpu_to_le16(TakeABreakPt
)) {
110 aac_adapter_interrupt(dev
);
112 * Since we didn't really send a fib, zero out the state to allow
113 * cleanup code not to assert.
115 kfib
->header
.XferState
= 0;
117 retval
= aac_fib_send(le16_to_cpu(kfib
->header
.Command
), fibptr
,
118 le16_to_cpu(kfib
->header
.Size
) , FsaNormal
,
123 if (aac_fib_complete(fibptr
) != 0) {
129 * Make sure that the size returned by the adapter (which includes
130 * the header) is less than or equal to the size of a fib, so we
131 * don't corrupt application data. Then copy that size to the user
132 * buffer. (Don't try to add the header information again, since it
133 * was already included by the adapter.)
137 if (copy_to_user(arg
, (void *)kfib
, size
))
141 pci_free_consistent(dev
->pdev
, size
, kfib
, fibptr
->hw_fib_pa
);
142 fibptr
->hw_fib_pa
= hw_fib_pa
;
143 fibptr
->hw_fib_va
= hw_fib
;
145 if (retval
!= -EINTR
)
146 aac_fib_free(fibptr
);
151 * open_getadapter_fib - Get the next fib
153 * This routine will get the next Fib, if available, from the AdapterFibContext
154 * passed in from the user.
157 static int open_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
159 struct aac_fib_context
* fibctx
;
162 fibctx
= kmalloc(sizeof(struct aac_fib_context
), GFP_KERNEL
);
163 if (fibctx
== NULL
) {
167 struct list_head
* entry
;
168 struct aac_fib_context
* context
;
170 fibctx
->type
= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
;
171 fibctx
->size
= sizeof(struct aac_fib_context
);
173 * Yes yes, I know this could be an index, but we have a
174 * better guarantee of uniqueness for the locked loop below.
175 * Without the aid of a persistent history, this also helps
176 * reduce the chance that the opaque context would be reused.
178 fibctx
->unique
= (u32
)((ulong
)fibctx
& 0xFFFFFFFF);
180 * Initialize the mutex used to wait for the next AIF.
182 init_MUTEX_LOCKED(&fibctx
->wait_sem
);
185 * Initialize the fibs and set the count of fibs on
189 INIT_LIST_HEAD(&fibctx
->fib_list
);
190 fibctx
->jiffies
= jiffies
/HZ
;
192 * Now add this context onto the adapter's
193 * AdapterFibContext list.
195 spin_lock_irqsave(&dev
->fib_lock
, flags
);
196 /* Ensure that we have a unique identifier */
197 entry
= dev
->fib_list
.next
;
198 while (entry
!= &dev
->fib_list
) {
199 context
= list_entry(entry
, struct aac_fib_context
, next
);
200 if (context
->unique
== fibctx
->unique
) {
201 /* Not unique (32 bits) */
203 entry
= dev
->fib_list
.next
;
208 list_add_tail(&fibctx
->next
, &dev
->fib_list
);
209 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
210 if (copy_to_user(arg
, &fibctx
->unique
,
211 sizeof(fibctx
->unique
))) {
221 * next_getadapter_fib - get the next fib
222 * @dev: adapter to use
223 * @arg: ioctl argument
225 * This routine will get the next Fib, if available, from the AdapterFibContext
226 * passed in from the user.
229 static int next_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
233 struct aac_fib_context
*fibctx
;
235 struct list_head
* entry
;
238 if(copy_from_user((void *)&f
, arg
, sizeof(struct fib_ioctl
)))
241 * Verify that the HANDLE passed in was a valid AdapterFibContext
243 * Search the list of AdapterFibContext addresses on the adapter
244 * to be sure this is a valid address
246 spin_lock_irqsave(&dev
->fib_lock
, flags
);
247 entry
= dev
->fib_list
.next
;
250 while (entry
!= &dev
->fib_list
) {
251 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
253 * Extract the AdapterFibContext from the Input parameters.
255 if (fibctx
->unique
== f
.fibctx
) { /* We found a winner */
262 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
263 dprintk ((KERN_INFO
"Fib Context not found\n"));
267 if((fibctx
->type
!= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
) ||
268 (fibctx
->size
!= sizeof(struct aac_fib_context
))) {
269 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
270 dprintk ((KERN_INFO
"Fib Context corrupt?\n"));
275 * If there are no fibs to send back, then either wait or return
279 if (!list_empty(&fibctx
->fib_list
)) {
281 * Pull the next fib from the fibs
283 entry
= fibctx
->fib_list
.next
;
286 fib
= list_entry(entry
, struct fib
, fiblink
);
288 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
289 if (copy_to_user(f
.fib
, fib
->hw_fib_va
, sizeof(struct hw_fib
))) {
290 kfree(fib
->hw_fib_va
);
295 * Free the space occupied by this copy of the fib.
297 kfree(fib
->hw_fib_va
);
301 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
302 /* If someone killed the AIF aacraid thread, restart it */
303 status
= !dev
->aif_thread
;
304 if (status
&& !dev
->in_reset
&& dev
->queues
&& dev
->fsa_dev
) {
305 /* Be paranoid, be very paranoid! */
306 kthread_stop(dev
->thread
);
309 dev
->thread
= kthread_run(aac_command_thread
, dev
, dev
->name
);
313 if(down_interruptible(&fibctx
->wait_sem
) < 0) {
316 /* Lock again and retry */
317 spin_lock_irqsave(&dev
->fib_lock
, flags
);
324 fibctx
->jiffies
= jiffies
/HZ
;
328 int aac_close_fib_context(struct aac_dev
* dev
, struct aac_fib_context
* fibctx
)
333 * First free any FIBs that have not been consumed.
335 while (!list_empty(&fibctx
->fib_list
)) {
336 struct list_head
* entry
;
338 * Pull the next fib from the fibs
340 entry
= fibctx
->fib_list
.next
;
342 fib
= list_entry(entry
, struct fib
, fiblink
);
345 * Free the space occupied by this copy of the fib.
347 kfree(fib
->hw_fib_va
);
351 * Remove the Context from the AdapterFibContext List
353 list_del(&fibctx
->next
);
359 * Free the space occupied by the Context
366 * close_getadapter_fib - close down user fib context
368 * @arg: ioctl arguments
370 * This routine will close down the fibctx passed in from the user.
373 static int close_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
375 struct aac_fib_context
*fibctx
;
378 struct list_head
* entry
;
381 * Verify that the HANDLE passed in was a valid AdapterFibContext
383 * Search the list of AdapterFibContext addresses on the adapter
384 * to be sure this is a valid address
387 entry
= dev
->fib_list
.next
;
390 while(entry
!= &dev
->fib_list
) {
391 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
393 * Extract the fibctx from the input parameters
395 if (fibctx
->unique
== (u32
)(uintptr_t)arg
) /* We found a winner */
402 return 0; /* Already gone */
404 if((fibctx
->type
!= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
) ||
405 (fibctx
->size
!= sizeof(struct aac_fib_context
)))
407 spin_lock_irqsave(&dev
->fib_lock
, flags
);
408 status
= aac_close_fib_context(dev
, fibctx
);
409 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
414 * check_revision - close down user fib context
416 * @arg: ioctl arguments
418 * This routine returns the driver version.
419 * Under Linux, there have been no version incompatibilities, so this is
423 static int check_revision(struct aac_dev
*dev
, void __user
*arg
)
425 struct revision response
;
426 char *driver_version
= aac_driver_version
;
430 version
= (simple_strtol(driver_version
,
431 &driver_version
, 10) << 24) | 0x00000400;
432 version
+= simple_strtol(driver_version
+ 1, &driver_version
, 10) << 16;
433 version
+= simple_strtol(driver_version
+ 1, NULL
, 10);
434 response
.version
= cpu_to_le32(version
);
435 # ifdef AAC_DRIVER_BUILD
436 response
.build
= cpu_to_le32(AAC_DRIVER_BUILD
);
438 response
.build
= cpu_to_le32(9999);
441 if (copy_to_user(arg
, &response
, sizeof(response
)))
453 static int aac_send_raw_srb(struct aac_dev
* dev
, void __user
* arg
)
457 struct aac_srb
*srbcmd
= NULL
;
458 struct user_aac_srb
*user_srbcmd
= NULL
;
459 struct user_aac_srb __user
*user_srb
= arg
;
460 struct aac_srb_reply __user
*user_reply
;
461 struct aac_srb_reply
* reply
;
466 void __user
*sg_user
[32];
470 u32 actual_fibsize64
, actual_fibsize
= 0;
475 dprintk((KERN_DEBUG
"aacraid: send raw srb -EBUSY\n"));
478 if (!capable(CAP_SYS_ADMIN
)){
479 dprintk((KERN_DEBUG
"aacraid: No permission to send raw srb\n"));
483 * Allocate and initialize a Fib then setup a SRB command
485 if (!(srbfib
= aac_fib_alloc(dev
))) {
488 aac_fib_init(srbfib
);
490 srbcmd
= (struct aac_srb
*) fib_data(srbfib
);
492 memset(sg_list
, 0, sizeof(sg_list
)); /* cleanup may take issue */
493 if(copy_from_user(&fibsize
, &user_srb
->count
,sizeof(u32
))){
494 dprintk((KERN_DEBUG
"aacraid: Could not copy data size from user\n"));
499 if (fibsize
> (dev
->max_fib_size
- sizeof(struct aac_fibhdr
))) {
504 user_srbcmd
= kmalloc(fibsize
, GFP_KERNEL
);
506 dprintk((KERN_DEBUG
"aacraid: Could not make a copy of the srb\n"));
510 if(copy_from_user(user_srbcmd
, user_srb
,fibsize
)){
511 dprintk((KERN_DEBUG
"aacraid: Could not copy srb from user\n"));
516 user_reply
= arg
+fibsize
;
518 flags
= user_srbcmd
->flags
; /* from user in cpu order */
519 // Fix up srb for endian and force some values
521 srbcmd
->function
= cpu_to_le32(SRBF_ExecuteScsi
); // Force this
522 srbcmd
->channel
= cpu_to_le32(user_srbcmd
->channel
);
523 srbcmd
->id
= cpu_to_le32(user_srbcmd
->id
);
524 srbcmd
->lun
= cpu_to_le32(user_srbcmd
->lun
);
525 srbcmd
->timeout
= cpu_to_le32(user_srbcmd
->timeout
);
526 srbcmd
->flags
= cpu_to_le32(flags
);
527 srbcmd
->retry_limit
= 0; // Obsolete parameter
528 srbcmd
->cdb_size
= cpu_to_le32(user_srbcmd
->cdb_size
);
529 memcpy(srbcmd
->cdb
, user_srbcmd
->cdb
, sizeof(srbcmd
->cdb
));
531 switch (flags
& (SRB_DataIn
| SRB_DataOut
)) {
533 data_dir
= DMA_TO_DEVICE
;
535 case (SRB_DataIn
| SRB_DataOut
):
536 data_dir
= DMA_BIDIRECTIONAL
;
539 data_dir
= DMA_FROM_DEVICE
;
544 if (user_srbcmd
->sg
.count
> ARRAY_SIZE(sg_list
)) {
545 dprintk((KERN_DEBUG
"aacraid: too many sg entries %d\n",
546 le32_to_cpu(srbcmd
->sg
.count
)));
550 actual_fibsize
= sizeof(struct aac_srb
) - sizeof(struct sgentry
) +
551 ((user_srbcmd
->sg
.count
& 0xff) * sizeof(struct sgentry
));
552 actual_fibsize64
= actual_fibsize
+ (user_srbcmd
->sg
.count
& 0xff) *
553 (sizeof(struct sgentry64
) - sizeof(struct sgentry
));
554 /* User made a mistake - should not continue */
555 if ((actual_fibsize
!= fibsize
) && (actual_fibsize64
!= fibsize
)) {
556 dprintk((KERN_DEBUG
"aacraid: Bad Size specified in "
557 "Raw SRB command calculated fibsize=%lu;%lu "
558 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
559 "issued fibsize=%d\n",
560 actual_fibsize
, actual_fibsize64
, user_srbcmd
->sg
.count
,
561 sizeof(struct aac_srb
), sizeof(struct sgentry
),
562 sizeof(struct sgentry64
), fibsize
));
566 if ((data_dir
== DMA_NONE
) && user_srbcmd
->sg
.count
) {
567 dprintk((KERN_DEBUG
"aacraid: SG with no direction specified in Raw SRB command\n"));
572 if (dev
->adapter_info
.options
& AAC_OPT_SGMAP_HOST64
) {
573 struct user_sgmap64
* upsg
= (struct user_sgmap64
*)&user_srbcmd
->sg
;
574 struct sgmap64
* psg
= (struct sgmap64
*)&srbcmd
->sg
;
577 * This should also catch if user used the 32 bit sgmap
579 if (actual_fibsize64
== fibsize
) {
580 actual_fibsize
= actual_fibsize64
;
581 for (i
= 0; i
< upsg
->count
; i
++) {
584 /* Does this really need to be GFP_DMA? */
585 p
= kmalloc(upsg
->sg
[i
].count
,GFP_KERNEL
|__GFP_DMA
);
587 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
588 upsg
->sg
[i
].count
,i
,upsg
->count
));
592 addr
= (u64
)upsg
->sg
[i
].addr
[0];
593 addr
+= ((u64
)upsg
->sg
[i
].addr
[1]) << 32;
594 sg_user
[i
] = (void __user
*)(uintptr_t)addr
;
595 sg_list
[i
] = p
; // save so we can clean up later
598 if (flags
& SRB_DataOut
) {
599 if(copy_from_user(p
,sg_user
[i
],upsg
->sg
[i
].count
)){
600 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
605 addr
= pci_map_single(dev
->pdev
, p
, upsg
->sg
[i
].count
, data_dir
);
607 psg
->sg
[i
].addr
[0] = cpu_to_le32(addr
& 0xffffffff);
608 psg
->sg
[i
].addr
[1] = cpu_to_le32(addr
>>32);
609 byte_count
+= upsg
->sg
[i
].count
;
610 psg
->sg
[i
].count
= cpu_to_le32(upsg
->sg
[i
].count
);
613 struct user_sgmap
* usg
;
614 usg
= kmalloc(actual_fibsize
- sizeof(struct aac_srb
)
615 + sizeof(struct sgmap
), GFP_KERNEL
);
617 dprintk((KERN_DEBUG
"aacraid: Allocation error in Raw SRB command\n"));
621 memcpy (usg
, upsg
, actual_fibsize
- sizeof(struct aac_srb
)
622 + sizeof(struct sgmap
));
623 actual_fibsize
= actual_fibsize64
;
625 for (i
= 0; i
< usg
->count
; i
++) {
628 /* Does this really need to be GFP_DMA? */
629 p
= kmalloc(usg
->sg
[i
].count
,GFP_KERNEL
|__GFP_DMA
);
632 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
633 usg
->sg
[i
].count
,i
,usg
->count
));
637 sg_user
[i
] = (void __user
*)(uintptr_t)usg
->sg
[i
].addr
;
638 sg_list
[i
] = p
; // save so we can clean up later
641 if (flags
& SRB_DataOut
) {
642 if(copy_from_user(p
,sg_user
[i
],upsg
->sg
[i
].count
)){
644 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
649 addr
= pci_map_single(dev
->pdev
, p
, usg
->sg
[i
].count
, data_dir
);
651 psg
->sg
[i
].addr
[0] = cpu_to_le32(addr
& 0xffffffff);
652 psg
->sg
[i
].addr
[1] = cpu_to_le32(addr
>>32);
653 byte_count
+= usg
->sg
[i
].count
;
654 psg
->sg
[i
].count
= cpu_to_le32(usg
->sg
[i
].count
);
658 srbcmd
->count
= cpu_to_le32(byte_count
);
659 psg
->count
= cpu_to_le32(sg_indx
+1);
660 status
= aac_fib_send(ScsiPortCommand64
, srbfib
, actual_fibsize
, FsaNormal
, 1, 1,NULL
,NULL
);
662 struct user_sgmap
* upsg
= &user_srbcmd
->sg
;
663 struct sgmap
* psg
= &srbcmd
->sg
;
665 if (actual_fibsize64
== fibsize
) {
666 struct user_sgmap64
* usg
= (struct user_sgmap64
*)upsg
;
667 for (i
= 0; i
< upsg
->count
; i
++) {
670 /* Does this really need to be GFP_DMA? */
671 p
= kmalloc(usg
->sg
[i
].count
,GFP_KERNEL
|__GFP_DMA
);
673 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
674 usg
->sg
[i
].count
,i
,usg
->count
));
678 addr
= (u64
)usg
->sg
[i
].addr
[0];
679 addr
+= ((u64
)usg
->sg
[i
].addr
[1]) << 32;
680 sg_user
[i
] = (void __user
*)addr
;
681 sg_list
[i
] = p
; // save so we can clean up later
684 if (flags
& SRB_DataOut
) {
685 if(copy_from_user(p
,sg_user
[i
],usg
->sg
[i
].count
)){
686 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
691 addr
= pci_map_single(dev
->pdev
, p
, usg
->sg
[i
].count
, data_dir
);
693 psg
->sg
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
694 byte_count
+= usg
->sg
[i
].count
;
695 psg
->sg
[i
].count
= cpu_to_le32(usg
->sg
[i
].count
);
698 for (i
= 0; i
< upsg
->count
; i
++) {
701 p
= kmalloc(upsg
->sg
[i
].count
, GFP_KERNEL
);
703 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
704 upsg
->sg
[i
].count
, i
, upsg
->count
));
708 sg_user
[i
] = (void __user
*)(uintptr_t)upsg
->sg
[i
].addr
;
709 sg_list
[i
] = p
; // save so we can clean up later
712 if (flags
& SRB_DataOut
) {
713 if(copy_from_user(p
, sg_user
[i
],
714 upsg
->sg
[i
].count
)) {
715 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
720 addr
= pci_map_single(dev
->pdev
, p
,
721 upsg
->sg
[i
].count
, data_dir
);
723 psg
->sg
[i
].addr
= cpu_to_le32(addr
);
724 byte_count
+= upsg
->sg
[i
].count
;
725 psg
->sg
[i
].count
= cpu_to_le32(upsg
->sg
[i
].count
);
728 srbcmd
->count
= cpu_to_le32(byte_count
);
729 psg
->count
= cpu_to_le32(sg_indx
+1);
730 status
= aac_fib_send(ScsiPortCommand
, srbfib
, actual_fibsize
, FsaNormal
, 1, 1, NULL
, NULL
);
732 if (status
== -EINTR
) {
738 dprintk((KERN_DEBUG
"aacraid: Could not send raw srb fib to hba\n"));
743 if (flags
& SRB_DataIn
) {
744 for(i
= 0 ; i
<= sg_indx
; i
++){
745 byte_count
= le32_to_cpu(
746 (dev
->adapter_info
.options
& AAC_OPT_SGMAP_HOST64
)
747 ? ((struct sgmap64
*)&srbcmd
->sg
)->sg
[i
].count
748 : srbcmd
->sg
.sg
[i
].count
);
749 if(copy_to_user(sg_user
[i
], sg_list
[i
], byte_count
)){
750 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data to user\n"));
758 reply
= (struct aac_srb_reply
*) fib_data(srbfib
);
759 if(copy_to_user(user_reply
,reply
,sizeof(struct aac_srb_reply
))){
760 dprintk((KERN_DEBUG
"aacraid: Could not copy reply to user\n"));
767 for(i
=0; i
<= sg_indx
; i
++){
770 if (rcode
!= -EINTR
) {
771 aac_fib_complete(srbfib
);
772 aac_fib_free(srbfib
);
778 struct aac_pci_info
{
784 static int aac_get_pci_info(struct aac_dev
* dev
, void __user
*arg
)
786 struct aac_pci_info pci_info
;
788 pci_info
.bus
= dev
->pdev
->bus
->number
;
789 pci_info
.slot
= PCI_SLOT(dev
->pdev
->devfn
);
791 if (copy_to_user(arg
, &pci_info
, sizeof(struct aac_pci_info
))) {
792 dprintk((KERN_DEBUG
"aacraid: Could not copy pci info\n"));
799 int aac_do_ioctl(struct aac_dev
* dev
, int cmd
, void __user
*arg
)
804 * HBA gets first crack
807 status
= aac_dev_ioctl(dev
, cmd
, arg
);
808 if(status
!= -ENOTTY
)
812 case FSACTL_MINIPORT_REV_CHECK
:
813 status
= check_revision(dev
, arg
);
815 case FSACTL_SEND_LARGE_FIB
:
817 status
= ioctl_send_fib(dev
, arg
);
819 case FSACTL_OPEN_GET_ADAPTER_FIB
:
820 status
= open_getadapter_fib(dev
, arg
);
822 case FSACTL_GET_NEXT_ADAPTER_FIB
:
823 status
= next_getadapter_fib(dev
, arg
);
825 case FSACTL_CLOSE_GET_ADAPTER_FIB
:
826 status
= close_getadapter_fib(dev
, arg
);
828 case FSACTL_SEND_RAW_SRB
:
829 status
= aac_send_raw_srb(dev
,arg
);
831 case FSACTL_GET_PCI_INFO
:
832 status
= aac_get_pci_info(dev
,arg
);