2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 * Abstract: Contains all routines for control of the AFA comm layer
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/blkdev.h>
42 #include <linux/delay.h> /* ssleep prototype */
43 #include <linux/kthread.h>
44 #include <linux/semaphore.h>
45 #include <linux/uaccess.h>
46 #include <scsi/scsi_host.h>
51 * ioctl_send_fib - send a FIB from userspace
52 * @dev: adapter is being processed
53 * @arg: arguments to the ioctl call
55 * This routine sends a fib to the adapter on behalf of a user level
58 # define AAC_DEBUG_PREAMBLE KERN_INFO
59 # define AAC_DEBUG_POSTAMBLE
61 static int ioctl_send_fib(struct aac_dev
* dev
, void __user
*arg
)
65 struct hw_fib
* hw_fib
= (struct hw_fib
*)0;
66 dma_addr_t hw_fib_pa
= (dma_addr_t
)0LL;
67 unsigned int size
, osize
;
73 fibptr
= aac_fib_alloc(dev
);
78 kfib
= fibptr
->hw_fib_va
;
80 * First copy in the header so that we can check the size field.
82 if (copy_from_user((void *)kfib
, arg
, sizeof(struct aac_fibhdr
))) {
87 * Since we copy based on the fib header size, make sure that we
88 * will not overrun the buffer when we copy the memory. Return
89 * an error if we would.
91 osize
= size
= le16_to_cpu(kfib
->header
.Size
) +
92 sizeof(struct aac_fibhdr
);
93 if (size
< le16_to_cpu(kfib
->header
.SenderSize
))
94 size
= le16_to_cpu(kfib
->header
.SenderSize
);
95 if (size
> dev
->max_fib_size
) {
103 kfib
= pci_alloc_consistent(dev
->pdev
, size
, &daddr
);
109 /* Highjack the hw_fib */
110 hw_fib
= fibptr
->hw_fib_va
;
111 hw_fib_pa
= fibptr
->hw_fib_pa
;
112 fibptr
->hw_fib_va
= kfib
;
113 fibptr
->hw_fib_pa
= daddr
;
114 memset(((char *)kfib
) + dev
->max_fib_size
, 0, size
- dev
->max_fib_size
);
115 memcpy(kfib
, hw_fib
, dev
->max_fib_size
);
118 if (copy_from_user(kfib
, arg
, size
)) {
123 /* Sanity check the second copy */
124 if ((osize
!= le16_to_cpu(kfib
->header
.Size
) +
125 sizeof(struct aac_fibhdr
))
126 || (size
< le16_to_cpu(kfib
->header
.SenderSize
))) {
131 if (kfib
->header
.Command
== cpu_to_le16(TakeABreakPt
)) {
132 aac_adapter_interrupt(dev
);
134 * Since we didn't really send a fib, zero out the state to allow
135 * cleanup code not to assert.
137 kfib
->header
.XferState
= 0;
139 retval
= aac_fib_send(le16_to_cpu(kfib
->header
.Command
), fibptr
,
140 le16_to_cpu(kfib
->header
.Size
) , FsaNormal
,
145 if (aac_fib_complete(fibptr
) != 0) {
151 * Make sure that the size returned by the adapter (which includes
152 * the header) is less than or equal to the size of a fib, so we
153 * don't corrupt application data. Then copy that size to the user
154 * buffer. (Don't try to add the header information again, since it
155 * was already included by the adapter.)
159 if (copy_to_user(arg
, (void *)kfib
, size
))
163 pci_free_consistent(dev
->pdev
, size
, kfib
, fibptr
->hw_fib_pa
);
164 fibptr
->hw_fib_pa
= hw_fib_pa
;
165 fibptr
->hw_fib_va
= hw_fib
;
167 if (retval
!= -ERESTARTSYS
)
168 aac_fib_free(fibptr
);
173 * open_getadapter_fib - Get the next fib
175 * This routine will get the next Fib, if available, from the AdapterFibContext
176 * passed in from the user.
179 static int open_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
181 struct aac_fib_context
* fibctx
;
184 fibctx
= kmalloc(sizeof(struct aac_fib_context
), GFP_KERNEL
);
185 if (fibctx
== NULL
) {
189 struct list_head
* entry
;
190 struct aac_fib_context
* context
;
192 fibctx
->type
= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
;
193 fibctx
->size
= sizeof(struct aac_fib_context
);
195 * Yes yes, I know this could be an index, but we have a
196 * better guarantee of uniqueness for the locked loop below.
197 * Without the aid of a persistent history, this also helps
198 * reduce the chance that the opaque context would be reused.
200 fibctx
->unique
= (u32
)((ulong
)fibctx
& 0xFFFFFFFF);
202 * Initialize the mutex used to wait for the next AIF.
204 sema_init(&fibctx
->wait_sem
, 0);
207 * Initialize the fibs and set the count of fibs on
211 INIT_LIST_HEAD(&fibctx
->fib_list
);
212 fibctx
->jiffies
= jiffies
/HZ
;
214 * Now add this context onto the adapter's
215 * AdapterFibContext list.
217 spin_lock_irqsave(&dev
->fib_lock
, flags
);
218 /* Ensure that we have a unique identifier */
219 entry
= dev
->fib_list
.next
;
220 while (entry
!= &dev
->fib_list
) {
221 context
= list_entry(entry
, struct aac_fib_context
, next
);
222 if (context
->unique
== fibctx
->unique
) {
223 /* Not unique (32 bits) */
225 entry
= dev
->fib_list
.next
;
230 list_add_tail(&fibctx
->next
, &dev
->fib_list
);
231 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
232 if (copy_to_user(arg
, &fibctx
->unique
,
233 sizeof(fibctx
->unique
))) {
243 * next_getadapter_fib - get the next fib
244 * @dev: adapter to use
245 * @arg: ioctl argument
247 * This routine will get the next Fib, if available, from the AdapterFibContext
248 * passed in from the user.
251 static int next_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
255 struct aac_fib_context
*fibctx
;
257 struct list_head
* entry
;
260 if(copy_from_user((void *)&f
, arg
, sizeof(struct fib_ioctl
)))
263 * Verify that the HANDLE passed in was a valid AdapterFibContext
265 * Search the list of AdapterFibContext addresses on the adapter
266 * to be sure this is a valid address
268 spin_lock_irqsave(&dev
->fib_lock
, flags
);
269 entry
= dev
->fib_list
.next
;
272 while (entry
!= &dev
->fib_list
) {
273 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
275 * Extract the AdapterFibContext from the Input parameters.
277 if (fibctx
->unique
== f
.fibctx
) { /* We found a winner */
284 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
285 dprintk ((KERN_INFO
"Fib Context not found\n"));
289 if((fibctx
->type
!= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
) ||
290 (fibctx
->size
!= sizeof(struct aac_fib_context
))) {
291 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
292 dprintk ((KERN_INFO
"Fib Context corrupt?\n"));
297 * If there are no fibs to send back, then either wait or return
301 if (!list_empty(&fibctx
->fib_list
)) {
303 * Pull the next fib from the fibs
305 entry
= fibctx
->fib_list
.next
;
308 fib
= list_entry(entry
, struct fib
, fiblink
);
310 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
311 if (copy_to_user(f
.fib
, fib
->hw_fib_va
, sizeof(struct hw_fib
))) {
312 kfree(fib
->hw_fib_va
);
317 * Free the space occupied by this copy of the fib.
319 kfree(fib
->hw_fib_va
);
323 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
324 /* If someone killed the AIF aacraid thread, restart it */
325 status
= !dev
->aif_thread
;
326 if (status
&& !dev
->in_reset
&& dev
->queues
&& dev
->fsa_dev
) {
327 /* Be paranoid, be very paranoid! */
328 kthread_stop(dev
->thread
);
331 dev
->thread
= kthread_run(aac_command_thread
, dev
,
336 if(down_interruptible(&fibctx
->wait_sem
) < 0) {
337 status
= -ERESTARTSYS
;
339 /* Lock again and retry */
340 spin_lock_irqsave(&dev
->fib_lock
, flags
);
347 fibctx
->jiffies
= jiffies
/HZ
;
351 int aac_close_fib_context(struct aac_dev
* dev
, struct aac_fib_context
* fibctx
)
356 * First free any FIBs that have not been consumed.
358 while (!list_empty(&fibctx
->fib_list
)) {
359 struct list_head
* entry
;
361 * Pull the next fib from the fibs
363 entry
= fibctx
->fib_list
.next
;
365 fib
= list_entry(entry
, struct fib
, fiblink
);
368 * Free the space occupied by this copy of the fib.
370 kfree(fib
->hw_fib_va
);
374 * Remove the Context from the AdapterFibContext List
376 list_del(&fibctx
->next
);
382 * Free the space occupied by the Context
389 * close_getadapter_fib - close down user fib context
391 * @arg: ioctl arguments
393 * This routine will close down the fibctx passed in from the user.
396 static int close_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
398 struct aac_fib_context
*fibctx
;
401 struct list_head
* entry
;
404 * Verify that the HANDLE passed in was a valid AdapterFibContext
406 * Search the list of AdapterFibContext addresses on the adapter
407 * to be sure this is a valid address
410 entry
= dev
->fib_list
.next
;
413 while(entry
!= &dev
->fib_list
) {
414 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
416 * Extract the fibctx from the input parameters
418 if (fibctx
->unique
== (u32
)(uintptr_t)arg
) /* We found a winner */
425 return 0; /* Already gone */
427 if((fibctx
->type
!= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
) ||
428 (fibctx
->size
!= sizeof(struct aac_fib_context
)))
430 spin_lock_irqsave(&dev
->fib_lock
, flags
);
431 status
= aac_close_fib_context(dev
, fibctx
);
432 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
437 * check_revision - close down user fib context
439 * @arg: ioctl arguments
441 * This routine returns the driver version.
442 * Under Linux, there have been no version incompatibilities, so this is
446 static int check_revision(struct aac_dev
*dev
, void __user
*arg
)
448 struct revision response
;
449 char *driver_version
= aac_driver_version
;
453 version
= (simple_strtol(driver_version
,
454 &driver_version
, 10) << 24) | 0x00000400;
455 version
+= simple_strtol(driver_version
+ 1, &driver_version
, 10) << 16;
456 version
+= simple_strtol(driver_version
+ 1, NULL
, 10);
457 response
.version
= cpu_to_le32(version
);
458 # ifdef AAC_DRIVER_BUILD
459 response
.build
= cpu_to_le32(AAC_DRIVER_BUILD
);
461 response
.build
= cpu_to_le32(9999);
464 if (copy_to_user(arg
, &response
, sizeof(response
)))
476 static int aac_send_raw_srb(struct aac_dev
* dev
, void __user
* arg
)
480 struct aac_srb
*srbcmd
= NULL
;
481 struct aac_hba_cmd_req
*hbacmd
= NULL
;
482 struct user_aac_srb
*user_srbcmd
= NULL
;
483 struct user_aac_srb __user
*user_srb
= arg
;
484 struct aac_srb_reply __user
*user_reply
;
490 void __user
*sg_user
[HBA_MAX_SG_EMBEDDED
];
491 void *sg_list
[HBA_MAX_SG_EMBEDDED
];
492 u32 sg_count
[HBA_MAX_SG_EMBEDDED
];
495 u32 actual_fibsize64
, actual_fibsize
= 0;
497 int is_native_device
;
502 dprintk((KERN_DEBUG
"aacraid: send raw srb -EBUSY\n"));
505 if (!capable(CAP_SYS_ADMIN
)){
506 dprintk((KERN_DEBUG
"aacraid: No permission to send raw srb\n"));
510 * Allocate and initialize a Fib then setup a SRB command
512 if (!(srbfib
= aac_fib_alloc(dev
))) {
516 memset(sg_list
, 0, sizeof(sg_list
)); /* cleanup may take issue */
517 if(copy_from_user(&fibsize
, &user_srb
->count
,sizeof(u32
))){
518 dprintk((KERN_DEBUG
"aacraid: Could not copy data size from user\n"));
523 if ((fibsize
< (sizeof(struct user_aac_srb
) - sizeof(struct user_sgentry
))) ||
524 (fibsize
> (dev
->max_fib_size
- sizeof(struct aac_fibhdr
)))) {
529 user_srbcmd
= kmalloc(fibsize
, GFP_KERNEL
);
531 dprintk((KERN_DEBUG
"aacraid: Could not make a copy of the srb\n"));
535 if(copy_from_user(user_srbcmd
, user_srb
,fibsize
)){
536 dprintk((KERN_DEBUG
"aacraid: Could not copy srb from user\n"));
541 flags
= user_srbcmd
->flags
; /* from user in cpu order */
542 switch (flags
& (SRB_DataIn
| SRB_DataOut
)) {
544 data_dir
= DMA_TO_DEVICE
;
546 case (SRB_DataIn
| SRB_DataOut
):
547 data_dir
= DMA_BIDIRECTIONAL
;
550 data_dir
= DMA_FROM_DEVICE
;
555 if (user_srbcmd
->sg
.count
> ARRAY_SIZE(sg_list
)) {
556 dprintk((KERN_DEBUG
"aacraid: too many sg entries %d\n",
557 user_srbcmd
->sg
.count
));
561 if ((data_dir
== DMA_NONE
) && user_srbcmd
->sg
.count
) {
562 dprintk((KERN_DEBUG
"aacraid:SG with no direction specified\n"));
566 actual_fibsize
= sizeof(struct aac_srb
) - sizeof(struct sgentry
) +
567 ((user_srbcmd
->sg
.count
& 0xff) * sizeof(struct sgentry
));
568 actual_fibsize64
= actual_fibsize
+ (user_srbcmd
->sg
.count
& 0xff) *
569 (sizeof(struct sgentry64
) - sizeof(struct sgentry
));
570 /* User made a mistake - should not continue */
571 if ((actual_fibsize
!= fibsize
) && (actual_fibsize64
!= fibsize
)) {
572 dprintk((KERN_DEBUG
"aacraid: Bad Size specified in "
573 "Raw SRB command calculated fibsize=%lu;%lu "
574 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
575 "issued fibsize=%d\n",
576 actual_fibsize
, actual_fibsize64
, user_srbcmd
->sg
.count
,
577 sizeof(struct aac_srb
), sizeof(struct sgentry
),
578 sizeof(struct sgentry64
), fibsize
));
583 chn
= user_srbcmd
->channel
;
584 if (chn
< AAC_MAX_BUSES
&& user_srbcmd
->id
< AAC_MAX_TARGETS
&&
585 dev
->hba_map
[chn
][user_srbcmd
->id
].devtype
==
586 AAC_DEVTYPE_NATIVE_RAW
) {
587 is_native_device
= 1;
588 hbacmd
= (struct aac_hba_cmd_req
*)srbfib
->hw_fib_va
;
589 memset(hbacmd
, 0, 96); /* sizeof(*hbacmd) is not necessary */
591 /* iu_type is a parameter of aac_hba_send */
596 case DMA_FROM_DEVICE
:
597 case DMA_BIDIRECTIONAL
:
604 hbacmd
->lun
[1] = cpu_to_le32(user_srbcmd
->lun
);
605 hbacmd
->it_nexus
= dev
->hba_map
[chn
][user_srbcmd
->id
].rmw_nexus
;
608 * we fill in reply_qid later in aac_src_deliver_message
609 * we fill in iu_type, request_id later in aac_hba_send
610 * we fill in emb_data_desc_count, data_length later
614 memcpy(hbacmd
->cdb
, user_srbcmd
->cdb
, sizeof(hbacmd
->cdb
));
616 address
= (u64
)srbfib
->hw_error_pa
;
617 hbacmd
->error_ptr_hi
= cpu_to_le32((u32
)(address
>> 32));
618 hbacmd
->error_ptr_lo
= cpu_to_le32((u32
)(address
& 0xffffffff));
619 hbacmd
->error_length
= cpu_to_le32(FW_ERROR_BUFFER_SIZE
);
620 hbacmd
->emb_data_desc_count
=
621 cpu_to_le32(user_srbcmd
->sg
.count
);
622 srbfib
->hbacmd_size
= 64 +
623 user_srbcmd
->sg
.count
* sizeof(struct aac_hba_sgl
);
626 is_native_device
= 0;
627 aac_fib_init(srbfib
);
629 /* raw_srb FIB is not FastResponseCapable */
630 srbfib
->hw_fib_va
->header
.XferState
&=
631 ~cpu_to_le32(FastResponseCapable
);
633 srbcmd
= (struct aac_srb
*) fib_data(srbfib
);
635 // Fix up srb for endian and force some values
637 srbcmd
->function
= cpu_to_le32(SRBF_ExecuteScsi
); // Force this
638 srbcmd
->channel
= cpu_to_le32(user_srbcmd
->channel
);
639 srbcmd
->id
= cpu_to_le32(user_srbcmd
->id
);
640 srbcmd
->lun
= cpu_to_le32(user_srbcmd
->lun
);
641 srbcmd
->timeout
= cpu_to_le32(user_srbcmd
->timeout
);
642 srbcmd
->flags
= cpu_to_le32(flags
);
643 srbcmd
->retry_limit
= 0; // Obsolete parameter
644 srbcmd
->cdb_size
= cpu_to_le32(user_srbcmd
->cdb_size
);
645 memcpy(srbcmd
->cdb
, user_srbcmd
->cdb
, sizeof(srbcmd
->cdb
));
649 if (is_native_device
) {
650 struct user_sgmap
*usg32
= &user_srbcmd
->sg
;
651 struct user_sgmap64
*usg64
=
652 (struct user_sgmap64
*)&user_srbcmd
->sg
;
654 for (i
= 0; i
< usg32
->count
; i
++) {
658 sg_count
[i
] = (actual_fibsize64
== fibsize
) ?
659 usg64
->sg
[i
].count
: usg32
->sg
[i
].count
;
661 (dev
->scsi_host_ptr
->max_sectors
<< 9)) {
662 pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
664 dev
->scsi_host_ptr
->max_sectors
<< 9);
669 p
= kmalloc(sg_count
[i
], GFP_KERNEL
|__GFP_DMA
);
675 if (actual_fibsize64
== fibsize
) {
676 addr
= (u64
)usg64
->sg
[i
].addr
[0];
677 addr
+= ((u64
)usg64
->sg
[i
].addr
[1]) << 32;
679 addr
= (u64
)usg32
->sg
[i
].addr
;
682 sg_user
[i
] = (void __user
*)(uintptr_t)addr
;
683 sg_list
[i
] = p
; // save so we can clean up later
686 if (flags
& SRB_DataOut
) {
687 if (copy_from_user(p
, sg_user
[i
],
693 addr
= pci_map_single(dev
->pdev
, p
, sg_count
[i
],
695 hbacmd
->sge
[i
].addr_hi
= cpu_to_le32((u32
)(addr
>>32));
696 hbacmd
->sge
[i
].addr_lo
= cpu_to_le32(
697 (u32
)(addr
& 0xffffffff));
698 hbacmd
->sge
[i
].len
= cpu_to_le32(sg_count
[i
]);
699 hbacmd
->sge
[i
].flags
= 0;
700 byte_count
+= sg_count
[i
];
703 if (usg32
->count
> 0) /* embedded sglist */
704 hbacmd
->sge
[usg32
->count
-1].flags
=
705 cpu_to_le32(0x40000000);
706 hbacmd
->data_length
= cpu_to_le32(byte_count
);
708 status
= aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ
, srbfib
,
711 } else if (dev
->adapter_info
.options
& AAC_OPT_SGMAP_HOST64
) {
712 struct user_sgmap64
* upsg
= (struct user_sgmap64
*)&user_srbcmd
->sg
;
713 struct sgmap64
* psg
= (struct sgmap64
*)&srbcmd
->sg
;
716 * This should also catch if user used the 32 bit sgmap
718 if (actual_fibsize64
== fibsize
) {
719 actual_fibsize
= actual_fibsize64
;
720 for (i
= 0; i
< upsg
->count
; i
++) {
724 sg_count
[i
] = upsg
->sg
[i
].count
;
726 ((dev
->adapter_info
.options
&
728 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
733 /* Does this really need to be GFP_DMA? */
734 p
= kmalloc(sg_count
[i
], GFP_KERNEL
|__GFP_DMA
);
736 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
737 sg_count
[i
], i
, upsg
->count
));
741 addr
= (u64
)upsg
->sg
[i
].addr
[0];
742 addr
+= ((u64
)upsg
->sg
[i
].addr
[1]) << 32;
743 sg_user
[i
] = (void __user
*)(uintptr_t)addr
;
744 sg_list
[i
] = p
; // save so we can clean up later
747 if (flags
& SRB_DataOut
) {
748 if (copy_from_user(p
, sg_user
[i
],
750 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
755 addr
= pci_map_single(dev
->pdev
, p
,
756 sg_count
[i
], data_dir
);
758 psg
->sg
[i
].addr
[0] = cpu_to_le32(addr
& 0xffffffff);
759 psg
->sg
[i
].addr
[1] = cpu_to_le32(addr
>>32);
760 byte_count
+= sg_count
[i
];
761 psg
->sg
[i
].count
= cpu_to_le32(sg_count
[i
]);
764 struct user_sgmap
* usg
;
766 actual_fibsize
- sizeof(struct aac_srb
)
767 + sizeof(struct sgmap
), GFP_KERNEL
);
769 dprintk((KERN_DEBUG
"aacraid: Allocation error in Raw SRB command\n"));
773 actual_fibsize
= actual_fibsize64
;
775 for (i
= 0; i
< usg
->count
; i
++) {
779 sg_count
[i
] = usg
->sg
[i
].count
;
781 ((dev
->adapter_info
.options
&
783 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
789 /* Does this really need to be GFP_DMA? */
790 p
= kmalloc(sg_count
[i
], GFP_KERNEL
|__GFP_DMA
);
792 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
793 sg_count
[i
], i
, usg
->count
));
798 sg_user
[i
] = (void __user
*)(uintptr_t)usg
->sg
[i
].addr
;
799 sg_list
[i
] = p
; // save so we can clean up later
802 if (flags
& SRB_DataOut
) {
803 if (copy_from_user(p
, sg_user
[i
],
806 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
811 addr
= pci_map_single(dev
->pdev
, p
,
812 sg_count
[i
], data_dir
);
814 psg
->sg
[i
].addr
[0] = cpu_to_le32(addr
& 0xffffffff);
815 psg
->sg
[i
].addr
[1] = cpu_to_le32(addr
>>32);
816 byte_count
+= sg_count
[i
];
817 psg
->sg
[i
].count
= cpu_to_le32(sg_count
[i
]);
821 srbcmd
->count
= cpu_to_le32(byte_count
);
822 if (user_srbcmd
->sg
.count
)
823 psg
->count
= cpu_to_le32(sg_indx
+1);
826 status
= aac_fib_send(ScsiPortCommand64
, srbfib
, actual_fibsize
, FsaNormal
, 1, 1,NULL
,NULL
);
828 struct user_sgmap
* upsg
= &user_srbcmd
->sg
;
829 struct sgmap
* psg
= &srbcmd
->sg
;
831 if (actual_fibsize64
== fibsize
) {
832 struct user_sgmap64
* usg
= (struct user_sgmap64
*)upsg
;
833 for (i
= 0; i
< upsg
->count
; i
++) {
837 sg_count
[i
] = usg
->sg
[i
].count
;
839 ((dev
->adapter_info
.options
&
841 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
846 /* Does this really need to be GFP_DMA? */
847 p
= kmalloc(sg_count
[i
], GFP_KERNEL
|__GFP_DMA
);
849 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
850 sg_count
[i
], i
, usg
->count
));
854 addr
= (u64
)usg
->sg
[i
].addr
[0];
855 addr
+= ((u64
)usg
->sg
[i
].addr
[1]) << 32;
856 sg_user
[i
] = (void __user
*)addr
;
857 sg_list
[i
] = p
; // save so we can clean up later
860 if (flags
& SRB_DataOut
) {
861 if (copy_from_user(p
, sg_user
[i
],
863 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
868 addr
= pci_map_single(dev
->pdev
, p
, usg
->sg
[i
].count
, data_dir
);
870 psg
->sg
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
871 byte_count
+= usg
->sg
[i
].count
;
872 psg
->sg
[i
].count
= cpu_to_le32(sg_count
[i
]);
875 for (i
= 0; i
< upsg
->count
; i
++) {
879 sg_count
[i
] = upsg
->sg
[i
].count
;
881 ((dev
->adapter_info
.options
&
883 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
888 p
= kmalloc(sg_count
[i
], GFP_KERNEL
);
890 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
891 sg_count
[i
], i
, upsg
->count
));
895 sg_user
[i
] = (void __user
*)(uintptr_t)upsg
->sg
[i
].addr
;
896 sg_list
[i
] = p
; // save so we can clean up later
899 if (flags
& SRB_DataOut
) {
900 if (copy_from_user(p
, sg_user
[i
],
902 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
907 addr
= pci_map_single(dev
->pdev
, p
,
908 sg_count
[i
], data_dir
);
910 psg
->sg
[i
].addr
= cpu_to_le32(addr
);
911 byte_count
+= sg_count
[i
];
912 psg
->sg
[i
].count
= cpu_to_le32(sg_count
[i
]);
915 srbcmd
->count
= cpu_to_le32(byte_count
);
916 if (user_srbcmd
->sg
.count
)
917 psg
->count
= cpu_to_le32(sg_indx
+1);
920 status
= aac_fib_send(ScsiPortCommand
, srbfib
, actual_fibsize
, FsaNormal
, 1, 1, NULL
, NULL
);
923 if (status
== -ERESTARTSYS
) {
924 rcode
= -ERESTARTSYS
;
929 dprintk((KERN_DEBUG
"aacraid: Could not send raw srb fib to hba\n"));
934 if (flags
& SRB_DataIn
) {
935 for(i
= 0 ; i
<= sg_indx
; i
++){
936 if (copy_to_user(sg_user
[i
], sg_list
[i
], sg_count
[i
])) {
937 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data to user\n"));
945 user_reply
= arg
+ fibsize
;
946 if (is_native_device
) {
947 struct aac_hba_resp
*err
=
948 &((struct aac_native_hba
*)srbfib
->hw_fib_va
)->resp
.err
;
949 struct aac_srb_reply reply
;
951 reply
.status
= ST_OK
;
952 if (srbfib
->flags
& FIB_CONTEXT_FLAG_FASTRESP
) {
954 reply
.srb_status
= SRB_STATUS_SUCCESS
;
955 reply
.scsi_status
= 0;
956 reply
.data_xfer_length
= byte_count
;
958 reply
.srb_status
= err
->service_response
;
959 reply
.scsi_status
= err
->status
;
960 reply
.data_xfer_length
= byte_count
-
961 le32_to_cpu(err
->residual_count
);
962 reply
.sense_data_size
= err
->sense_response_data_len
;
963 memcpy(reply
.sense_data
, err
->sense_response_buf
,
964 AAC_SENSE_BUFFERSIZE
);
966 if (copy_to_user(user_reply
, &reply
,
967 sizeof(struct aac_srb_reply
))) {
968 dprintk((KERN_DEBUG
"aacraid: Copy to user failed\n"));
973 struct aac_srb_reply
*reply
;
975 reply
= (struct aac_srb_reply
*) fib_data(srbfib
);
976 if (copy_to_user(user_reply
, reply
,
977 sizeof(struct aac_srb_reply
))) {
978 dprintk((KERN_DEBUG
"aacraid: Copy to user failed\n"));
986 if (rcode
!= -ERESTARTSYS
) {
987 for (i
= 0; i
<= sg_indx
; i
++)
989 aac_fib_complete(srbfib
);
990 aac_fib_free(srbfib
);
996 struct aac_pci_info
{
1002 static int aac_get_pci_info(struct aac_dev
* dev
, void __user
*arg
)
1004 struct aac_pci_info pci_info
;
1006 pci_info
.bus
= dev
->pdev
->bus
->number
;
1007 pci_info
.slot
= PCI_SLOT(dev
->pdev
->devfn
);
1009 if (copy_to_user(arg
, &pci_info
, sizeof(struct aac_pci_info
))) {
1010 dprintk((KERN_DEBUG
"aacraid: Could not copy pci info\n"));
1016 static int aac_get_hba_info(struct aac_dev
*dev
, void __user
*arg
)
1018 struct aac_hba_info hbainfo
;
1020 hbainfo
.adapter_number
= (u8
) dev
->id
;
1021 hbainfo
.system_io_bus_number
= dev
->pdev
->bus
->number
;
1022 hbainfo
.device_number
= (dev
->pdev
->devfn
>> 3);
1023 hbainfo
.function_number
= (dev
->pdev
->devfn
& 0x0007);
1025 hbainfo
.vendor_id
= dev
->pdev
->vendor
;
1026 hbainfo
.device_id
= dev
->pdev
->device
;
1027 hbainfo
.sub_vendor_id
= dev
->pdev
->subsystem_vendor
;
1028 hbainfo
.sub_system_id
= dev
->pdev
->subsystem_device
;
1030 if (copy_to_user(arg
, &hbainfo
, sizeof(struct aac_hba_info
))) {
1031 dprintk((KERN_DEBUG
"aacraid: Could not copy hba info\n"));
1038 struct aac_reset_iop
{
1042 static int aac_send_reset_adapter(struct aac_dev
*dev
, void __user
*arg
)
1044 struct aac_reset_iop reset
;
1047 if (copy_from_user((void *)&reset
, arg
, sizeof(struct aac_reset_iop
)))
1050 retval
= aac_reset_adapter(dev
, 0, reset
.reset_type
);
1055 int aac_do_ioctl(struct aac_dev
* dev
, int cmd
, void __user
*arg
)
1059 mutex_lock(&dev
->ioctl_mutex
);
1061 if (dev
->adapter_shutdown
) {
1067 * HBA gets first crack
1070 status
= aac_dev_ioctl(dev
, cmd
, arg
);
1071 if (status
!= -ENOTTY
)
1075 case FSACTL_MINIPORT_REV_CHECK
:
1076 status
= check_revision(dev
, arg
);
1078 case FSACTL_SEND_LARGE_FIB
:
1079 case FSACTL_SENDFIB
:
1080 status
= ioctl_send_fib(dev
, arg
);
1082 case FSACTL_OPEN_GET_ADAPTER_FIB
:
1083 status
= open_getadapter_fib(dev
, arg
);
1085 case FSACTL_GET_NEXT_ADAPTER_FIB
:
1086 status
= next_getadapter_fib(dev
, arg
);
1088 case FSACTL_CLOSE_GET_ADAPTER_FIB
:
1089 status
= close_getadapter_fib(dev
, arg
);
1091 case FSACTL_SEND_RAW_SRB
:
1092 status
= aac_send_raw_srb(dev
,arg
);
1094 case FSACTL_GET_PCI_INFO
:
1095 status
= aac_get_pci_info(dev
,arg
);
1097 case FSACTL_GET_HBA_INFO
:
1098 status
= aac_get_hba_info(dev
, arg
);
1100 case FSACTL_RESET_IOP
:
1101 status
= aac_send_reset_adapter(dev
, arg
);
1110 mutex_unlock(&dev
->ioctl_mutex
);