2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 * Abstract: Contain all routines that are required for FSA host/adapter
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/crash_dump.h>
37 #include <linux/types.h>
38 #include <linux/sched.h>
39 #include <linux/pci.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/completion.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/kthread.h>
46 #include <linux/interrupt.h>
47 #include <linux/semaphore.h>
48 #include <linux/bcd.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_host.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_cmnd.h>
57 * fib_map_alloc - allocate the fib objects
58 * @dev: Adapter to allocate for
60 * Allocate and map the shared PCI space for the FIB blocks used to
61 * talk to the Adaptec firmware.
64 static int fib_map_alloc(struct aac_dev
*dev
)
66 if (dev
->max_fib_size
> AAC_MAX_NATIVE_SIZE
)
67 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
69 dev
->max_cmd_size
= dev
->max_fib_size
;
70 if (dev
->max_fib_size
< AAC_MAX_NATIVE_SIZE
) {
71 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
73 dev
->max_cmd_size
= dev
->max_fib_size
;
77 "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
78 &dev
->pdev
->dev
, dev
->max_cmd_size
, dev
->scsi_host_ptr
->can_queue
,
79 AAC_NUM_MGT_FIB
, &dev
->hw_fib_pa
));
80 dev
->hw_fib_va
= dma_alloc_coherent(&dev
->pdev
->dev
,
81 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
))
82 * (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) + (ALIGN32
- 1),
83 &dev
->hw_fib_pa
, GFP_KERNEL
);
84 if (dev
->hw_fib_va
== NULL
)
90 * aac_fib_map_free - free the fib objects
91 * @dev: Adapter to free
93 * Free the PCI mappings and the memory allocated for FIB blocks
97 void aac_fib_map_free(struct aac_dev
*dev
)
103 if(!dev
->hw_fib_va
|| !dev
->max_cmd_size
)
106 num_fibs
= dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
107 fib_size
= dev
->max_fib_size
+ sizeof(struct aac_fib_xporthdr
);
108 alloc_size
= fib_size
* num_fibs
+ ALIGN32
- 1;
110 dma_free_coherent(&dev
->pdev
->dev
, alloc_size
, dev
->hw_fib_va
,
113 dev
->hw_fib_va
= NULL
;
117 void aac_fib_vector_assign(struct aac_dev
*dev
)
121 struct fib
*fibptr
= NULL
;
123 for (i
= 0, fibptr
= &dev
->fibs
[i
];
124 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
126 if ((dev
->max_msix
== 1) ||
127 (i
> ((dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1)
128 - dev
->vector_cap
))) {
129 fibptr
->vector_no
= 0;
131 fibptr
->vector_no
= vector
;
133 if (vector
== dev
->max_msix
)
140 * aac_fib_setup - setup the fibs
141 * @dev: Adapter to set up
143 * Allocate the PCI space for the fibs, map it and then initialise the
144 * fib area, the unmapped fib data and also the free list
147 int aac_fib_setup(struct aac_dev
* dev
)
150 struct hw_fib
*hw_fib
;
151 dma_addr_t hw_fib_pa
;
155 while (((i
= fib_map_alloc(dev
)) == -ENOMEM
)
156 && (dev
->scsi_host_ptr
->can_queue
> (64 - AAC_NUM_MGT_FIB
))) {
157 max_cmds
= (dev
->scsi_host_ptr
->can_queue
+AAC_NUM_MGT_FIB
) >> 1;
158 dev
->scsi_host_ptr
->can_queue
= max_cmds
- AAC_NUM_MGT_FIB
;
159 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE3
)
160 dev
->init
->r7
.max_io_commands
= cpu_to_le32(max_cmds
);
165 memset(dev
->hw_fib_va
, 0,
166 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
)) *
167 (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
));
169 /* 32 byte alignment for PMC */
170 hw_fib_pa
= (dev
->hw_fib_pa
+ (ALIGN32
- 1)) & ~(ALIGN32
- 1);
171 hw_fib
= (struct hw_fib
*)((unsigned char *)dev
->hw_fib_va
+
172 (hw_fib_pa
- dev
->hw_fib_pa
));
174 /* add Xport header */
175 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
176 sizeof(struct aac_fib_xporthdr
));
177 hw_fib_pa
+= sizeof(struct aac_fib_xporthdr
);
180 * Initialise the fibs
182 for (i
= 0, fibptr
= &dev
->fibs
[i
];
183 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
187 fibptr
->size
= sizeof(struct fib
);
189 fibptr
->hw_fib_va
= hw_fib
;
190 fibptr
->data
= (void *) fibptr
->hw_fib_va
->data
;
191 fibptr
->next
= fibptr
+1; /* Forward chain the fibs */
192 sema_init(&fibptr
->event_wait
, 0);
193 spin_lock_init(&fibptr
->event_lock
);
194 hw_fib
->header
.XferState
= cpu_to_le32(0xffffffff);
195 hw_fib
->header
.SenderSize
=
196 cpu_to_le16(dev
->max_fib_size
); /* ?? max_cmd_size */
197 fibptr
->hw_fib_pa
= hw_fib_pa
;
198 fibptr
->hw_sgl_pa
= hw_fib_pa
+
199 offsetof(struct aac_hba_cmd_req
, sge
[2]);
201 * one element is for the ptr to the separate sg list,
202 * second element for 32 byte alignment
204 fibptr
->hw_error_pa
= hw_fib_pa
+
205 offsetof(struct aac_native_hba
, resp
.resp_bytes
[0]);
207 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
208 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
));
209 hw_fib_pa
= hw_fib_pa
+
210 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
);
214 *Assign vector numbers to fibs
216 aac_fib_vector_assign(dev
);
219 * Add the fib chain to the free list
221 dev
->fibs
[dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1].next
= NULL
;
223 * Set 8 fibs aside for management tools
225 dev
->free_fib
= &dev
->fibs
[dev
->scsi_host_ptr
->can_queue
];
230 * aac_fib_alloc_tag-allocate a fib using tags
231 * @dev: Adapter to allocate the fib for
233 * Allocate a fib from the adapter fib pool using tags
234 * from the blk layer.
237 struct fib
*aac_fib_alloc_tag(struct aac_dev
*dev
, struct scsi_cmnd
*scmd
)
241 fibptr
= &dev
->fibs
[scmd
->request
->tag
];
243 * Null out fields that depend on being zero at the start of
246 fibptr
->hw_fib_va
->header
.XferState
= 0;
247 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
248 fibptr
->callback_data
= NULL
;
249 fibptr
->callback
= NULL
;
255 * aac_fib_alloc - allocate a fib
256 * @dev: Adapter to allocate the fib for
258 * Allocate a fib from the adapter fib pool. If the pool is empty we
262 struct fib
*aac_fib_alloc(struct aac_dev
*dev
)
266 spin_lock_irqsave(&dev
->fib_lock
, flags
);
267 fibptr
= dev
->free_fib
;
269 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
272 dev
->free_fib
= fibptr
->next
;
273 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
275 * Set the proper node type code and node byte size
277 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
278 fibptr
->size
= sizeof(struct fib
);
280 * Null out fields that depend on being zero at the start of
283 fibptr
->hw_fib_va
->header
.XferState
= 0;
285 fibptr
->callback
= NULL
;
286 fibptr
->callback_data
= NULL
;
292 * aac_fib_free - free a fib
293 * @fibptr: fib to free up
295 * Frees up a fib and places it on the appropriate queue
298 void aac_fib_free(struct fib
*fibptr
)
302 if (fibptr
->done
== 2)
305 spin_lock_irqsave(&fibptr
->dev
->fib_lock
, flags
);
306 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
307 aac_config
.fib_timeouts
++;
308 if (!(fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) &&
309 fibptr
->hw_fib_va
->header
.XferState
!= 0) {
310 printk(KERN_WARNING
"aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
312 le32_to_cpu(fibptr
->hw_fib_va
->header
.XferState
));
314 fibptr
->next
= fibptr
->dev
->free_fib
;
315 fibptr
->dev
->free_fib
= fibptr
;
316 spin_unlock_irqrestore(&fibptr
->dev
->fib_lock
, flags
);
320 * aac_fib_init - initialise a fib
321 * @fibptr: The fib to initialize
323 * Set up the generic fib fields ready for use
326 void aac_fib_init(struct fib
*fibptr
)
328 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
330 memset(&hw_fib
->header
, 0, sizeof(struct aac_fibhdr
));
331 hw_fib
->header
.StructType
= FIB_MAGIC
;
332 hw_fib
->header
.Size
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
333 hw_fib
->header
.XferState
= cpu_to_le32(HostOwned
| FibInitialized
| FibEmpty
| FastResponseCapable
);
334 hw_fib
->header
.u
.ReceiverFibAddress
= cpu_to_le32(fibptr
->hw_fib_pa
);
335 hw_fib
->header
.SenderSize
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
339 * fib_deallocate - deallocate a fib
340 * @fibptr: fib to deallocate
342 * Will deallocate and return to the free pool the FIB pointed to by the
346 static void fib_dealloc(struct fib
* fibptr
)
348 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
349 hw_fib
->header
.XferState
= 0;
353 * Commuication primitives define and support the queuing method we use to
354 * support host to adapter commuication. All queue accesses happen through
355 * these routines and are the only routines which have a knowledge of the
356 * how these queues are implemented.
360 * aac_get_entry - get a queue entry
363 * @entry: Entry return
364 * @index: Index return
365 * @nonotify: notification control
367 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
368 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
372 static int aac_get_entry (struct aac_dev
* dev
, u32 qid
, struct aac_entry
**entry
, u32
* index
, unsigned long *nonotify
)
374 struct aac_queue
* q
;
378 * All of the queues wrap when they reach the end, so we check
379 * to see if they have reached the end and if they have we just
380 * set the index back to zero. This is a wrap. You could or off
381 * the high bits in all updates but this is a bit faster I think.
384 q
= &dev
->queues
->queue
[qid
];
386 idx
= *index
= le32_to_cpu(*(q
->headers
.producer
));
387 /* Interrupt Moderation, only interrupt for first two entries */
388 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
))) {
390 if (qid
== AdapNormCmdQueue
)
391 idx
= ADAP_NORM_CMD_ENTRIES
;
393 idx
= ADAP_NORM_RESP_ENTRIES
;
395 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
)))
399 if (qid
== AdapNormCmdQueue
) {
400 if (*index
>= ADAP_NORM_CMD_ENTRIES
)
401 *index
= 0; /* Wrap to front of the Producer Queue. */
403 if (*index
>= ADAP_NORM_RESP_ENTRIES
)
404 *index
= 0; /* Wrap to front of the Producer Queue. */
408 if ((*index
+ 1) == le32_to_cpu(*(q
->headers
.consumer
))) {
409 printk(KERN_WARNING
"Queue %d full, %u outstanding.\n",
410 qid
, atomic_read(&q
->numpending
));
413 *entry
= q
->base
+ *index
;
419 * aac_queue_get - get the next free QE
421 * @index: Returned index
422 * @priority: Priority of fib
423 * @fib: Fib to associate with the queue entry
424 * @wait: Wait if queue full
425 * @fibptr: Driver fib object to go with fib
426 * @nonotify: Don't notify the adapter
428 * Gets the next free QE off the requested priorty adapter command
429 * queue and associates the Fib with the QE. The QE represented by
430 * index is ready to insert on the queue when this routine returns
434 int aac_queue_get(struct aac_dev
* dev
, u32
* index
, u32 qid
, struct hw_fib
* hw_fib
, int wait
, struct fib
* fibptr
, unsigned long *nonotify
)
436 struct aac_entry
* entry
= NULL
;
439 if (qid
== AdapNormCmdQueue
) {
440 /* if no entries wait for some if caller wants to */
441 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
442 printk(KERN_ERR
"GetEntries failed\n");
445 * Setup queue entry with a command, status and fib mapped
447 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
450 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
451 /* if no entries wait for some if caller wants to */
454 * Setup queue entry with command, status and fib mapped
456 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
457 entry
->addr
= hw_fib
->header
.SenderFibAddress
;
458 /* Restore adapters pointer to the FIB */
459 hw_fib
->header
.u
.ReceiverFibAddress
= hw_fib
->header
.SenderFibAddress
; /* Let the adapter now where to find its data */
463 * If MapFib is true than we need to map the Fib and put pointers
464 * in the queue entry.
467 entry
->addr
= cpu_to_le32(fibptr
->hw_fib_pa
);
472 * Define the highest level of host to adapter communication routines.
473 * These routines will support host to adapter FS commuication. These
474 * routines have no knowledge of the commuication method used. This level
475 * sends and receives FIBs. This level has no knowledge of how these FIBs
476 * get passed back and forth.
480 * aac_fib_send - send a fib to the adapter
481 * @command: Command to send
483 * @size: Size of fib data area
484 * @priority: Priority of Fib
485 * @wait: Async/sync select
486 * @reply: True if a reply is wanted
487 * @callback: Called with reply
488 * @callback_data: Passed to callback
490 * Sends the requested FIB to the adapter and optionally will wait for a
491 * response FIB. If the caller does not wish to wait for a response than
492 * an event to wait on must be supplied. This event will be set when a
493 * response FIB is received from the adapter.
496 int aac_fib_send(u16 command
, struct fib
*fibptr
, unsigned long size
,
497 int priority
, int wait
, int reply
, fib_callback callback
,
500 struct aac_dev
* dev
= fibptr
->dev
;
501 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
502 unsigned long flags
= 0;
503 unsigned long mflags
= 0;
504 unsigned long sflags
= 0;
506 if (!(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)))
509 if (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
))
513 * There are 5 cases with the wait and response requested flags.
514 * The only invalid cases are if the caller requests to wait and
515 * does not request a response and if the caller does not want a
516 * response and the Fib is not allocated from pool. If a response
517 * is not requesed the Fib will just be deallocaed by the DPC
518 * routine when the response comes back from the adapter. No
519 * further processing will be done besides deleting the Fib. We
520 * will have a debug mode where the adapter can notify the host
521 * it had a problem and the host can log that fact.
524 if (wait
&& !reply
) {
526 } else if (!wait
&& reply
) {
527 hw_fib
->header
.XferState
|= cpu_to_le32(Async
| ResponseExpected
);
528 FIB_COUNTER_INCREMENT(aac_config
.AsyncSent
);
529 } else if (!wait
&& !reply
) {
530 hw_fib
->header
.XferState
|= cpu_to_le32(NoResponseExpected
);
531 FIB_COUNTER_INCREMENT(aac_config
.NoResponseSent
);
532 } else if (wait
&& reply
) {
533 hw_fib
->header
.XferState
|= cpu_to_le32(ResponseExpected
);
534 FIB_COUNTER_INCREMENT(aac_config
.NormalSent
);
537 * Map the fib into 32bits by using the fib number
540 hw_fib
->header
.SenderFibAddress
=
541 cpu_to_le32(((u32
)(fibptr
- dev
->fibs
)) << 2);
543 /* use the same shifted value for handle to be compatible
544 * with the new native hba command handle
546 hw_fib
->header
.Handle
=
547 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
550 * Set FIB state to indicate where it came from and if we want a
551 * response from the adapter. Also load the command from the
554 * Map the hw fib pointer as a 32bit value
556 hw_fib
->header
.Command
= cpu_to_le16(command
);
557 hw_fib
->header
.XferState
|= cpu_to_le32(SentFromHost
);
559 * Set the size of the Fib we want to send to the adapter
561 hw_fib
->header
.Size
= cpu_to_le16(sizeof(struct aac_fibhdr
) + size
);
562 if (le16_to_cpu(hw_fib
->header
.Size
) > le16_to_cpu(hw_fib
->header
.SenderSize
)) {
566 * Get a queue entry connect the FIB to it and send an notify
567 * the adapter a command is ready.
569 hw_fib
->header
.XferState
|= cpu_to_le32(NormalPriority
);
572 * Fill in the Callback and CallbackContext if we are not
576 fibptr
->callback
= callback
;
577 fibptr
->callback_data
= callback_data
;
578 fibptr
->flags
= FIB_CONTEXT_FLAG
;
583 FIB_COUNTER_INCREMENT(aac_config
.FibsSent
);
585 dprintk((KERN_DEBUG
"Fib contents:.\n"));
586 dprintk((KERN_DEBUG
" Command = %d.\n", le32_to_cpu(hw_fib
->header
.Command
)));
587 dprintk((KERN_DEBUG
" SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount
*)fib_data(fibptr
))->command
)));
588 dprintk((KERN_DEBUG
" XferState = %x.\n", le32_to_cpu(hw_fib
->header
.XferState
)));
589 dprintk((KERN_DEBUG
" hw_fib va being sent=%p\n",fibptr
->hw_fib_va
));
590 dprintk((KERN_DEBUG
" hw_fib pa being sent=%lx\n",(ulong
)fibptr
->hw_fib_pa
));
591 dprintk((KERN_DEBUG
" fib being sent=%p\n",fibptr
));
598 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
599 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
600 printk(KERN_INFO
"No management Fibs Available:%d\n",
601 dev
->management_fib_count
);
602 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
605 dev
->management_fib_count
++;
606 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
607 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
610 if (dev
->sync_mode
) {
612 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
613 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
615 list_add_tail(&fibptr
->fiblink
, &dev
->sync_fib_list
);
616 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
618 dev
->sync_fib
= fibptr
;
619 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
620 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
621 (u32
)fibptr
->hw_fib_pa
, 0, 0, 0, 0, 0,
622 NULL
, NULL
, NULL
, NULL
, NULL
);
625 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
626 if (down_interruptible(&fibptr
->event_wait
)) {
627 fibptr
->flags
&= ~FIB_CONTEXT_FLAG_WAIT
;
635 if (aac_adapter_deliver(fibptr
) != 0) {
636 printk(KERN_ERR
"aac_fib_send: returned -EBUSY\n");
638 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
639 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
640 dev
->management_fib_count
--;
641 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
648 * If the caller wanted us to wait for response wait now.
652 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
653 /* Only set for first known interruptable command */
656 * *VERY* Dangerous to time out a command, the
657 * assumption is made that we have no hope of
658 * functioning because an interrupt routing or other
659 * hardware failure has occurred.
661 unsigned long timeout
= jiffies
+ (180 * HZ
); /* 3 minutes */
662 while (down_trylock(&fibptr
->event_wait
)) {
664 if (time_is_before_eq_jiffies(timeout
)) {
665 struct aac_queue
* q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
666 atomic_dec(&q
->numpending
);
668 printk(KERN_ERR
"aacraid: aac_fib_send: first asynchronous command timed out.\n"
669 "Usually a result of a PCI interrupt routing problem;\n"
670 "update mother board BIOS or consider utilizing one of\n"
671 "the SAFE mode kernel options (acpi, apic etc)\n");
676 if (unlikely(pci_channel_offline(dev
->pdev
)))
679 if ((blink
= aac_adapter_check_health(dev
)) > 0) {
681 printk(KERN_ERR
"aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
682 "Usually a result of a serious unrecoverable hardware problem\n",
688 * Allow other processes / CPUS to use core
692 } else if (down_interruptible(&fibptr
->event_wait
)) {
693 /* Do nothing ... satisfy
694 * down_interruptible must_check */
697 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
698 if (fibptr
->done
== 0) {
699 fibptr
->done
= 2; /* Tell interrupt we aborted */
700 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
703 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
704 BUG_ON(fibptr
->done
== 0);
706 if(unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
711 * If the user does not want a response than return success otherwise
720 int aac_hba_send(u8 command
, struct fib
*fibptr
, fib_callback callback
,
723 struct aac_dev
*dev
= fibptr
->dev
;
725 unsigned long flags
= 0;
726 unsigned long mflags
= 0;
727 struct aac_hba_cmd_req
*hbacmd
= (struct aac_hba_cmd_req
*)
730 fibptr
->flags
= (FIB_CONTEXT_FLAG
| FIB_CONTEXT_FLAG_NATIVE_HBA
);
733 fibptr
->callback
= callback
;
734 fibptr
->callback_data
= callback_data
;
739 hbacmd
->iu_type
= command
;
741 if (command
== HBA_IU_TYPE_SCSI_CMD_REQ
) {
742 /* bit1 of request_id must be 0 */
744 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
745 fibptr
->flags
|= FIB_CONTEXT_FLAG_SCSI_CMD
;
746 } else if (command
!= HBA_IU_TYPE_SCSI_TM_REQ
)
751 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
752 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
753 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
756 dev
->management_fib_count
++;
757 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
758 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
761 if (aac_adapter_deliver(fibptr
) != 0) {
763 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
764 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
765 dev
->management_fib_count
--;
766 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
770 FIB_COUNTER_INCREMENT(aac_config
.NativeSent
);
774 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
776 if (unlikely(pci_channel_offline(dev
->pdev
)))
779 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
780 if (down_interruptible(&fibptr
->event_wait
))
782 fibptr
->flags
&= ~(FIB_CONTEXT_FLAG_WAIT
);
784 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
785 if ((fibptr
->done
== 0) || (fibptr
->done
== 2)) {
786 fibptr
->done
= 2; /* Tell interrupt we aborted */
787 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
790 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
791 WARN_ON(fibptr
->done
== 0);
793 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
803 * aac_consumer_get - get the top of the queue
806 * @entry: Return entry
808 * Will return a pointer to the entry on the top of the queue requested that
809 * we are a consumer of, and return the address of the queue entry. It does
810 * not change the state of the queue.
813 int aac_consumer_get(struct aac_dev
* dev
, struct aac_queue
* q
, struct aac_entry
**entry
)
817 if (le32_to_cpu(*q
->headers
.producer
) == le32_to_cpu(*q
->headers
.consumer
)) {
821 * The consumer index must be wrapped if we have reached
822 * the end of the queue, else we just use the entry
823 * pointed to by the header index
825 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
828 index
= le32_to_cpu(*q
->headers
.consumer
);
829 *entry
= q
->base
+ index
;
836 * aac_consumer_free - free consumer entry
841 * Frees up the current top of the queue we are a consumer of. If the
842 * queue was full notify the producer that the queue is no longer full.
845 void aac_consumer_free(struct aac_dev
* dev
, struct aac_queue
*q
, u32 qid
)
850 if ((le32_to_cpu(*q
->headers
.producer
)+1) == le32_to_cpu(*q
->headers
.consumer
))
853 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
854 *q
->headers
.consumer
= cpu_to_le32(1);
856 le32_add_cpu(q
->headers
.consumer
, 1);
861 case HostNormCmdQueue
:
862 notify
= HostNormCmdNotFull
;
864 case HostNormRespQueue
:
865 notify
= HostNormRespNotFull
;
871 aac_adapter_notify(dev
, notify
);
876 * aac_fib_adapter_complete - complete adapter issued fib
877 * @fibptr: fib to complete
880 * Will do all necessary work to complete a FIB that was sent from
884 int aac_fib_adapter_complete(struct fib
*fibptr
, unsigned short size
)
886 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
887 struct aac_dev
* dev
= fibptr
->dev
;
888 struct aac_queue
* q
;
889 unsigned long nointr
= 0;
890 unsigned long qflags
;
892 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
893 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
||
894 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
899 if (hw_fib
->header
.XferState
== 0) {
900 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
905 * If we plan to do anything check the structure type first.
907 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
908 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
909 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
) {
910 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
915 * This block handles the case where the adapter had sent us a
916 * command and we have finished processing the command. We
917 * call completeFib when we are done processing the command
918 * and want to send a response back to the adapter. This will
919 * send the completed cdb to the adapter.
921 if (hw_fib
->header
.XferState
& cpu_to_le32(SentFromAdapter
)) {
922 if (dev
->comm_interface
== AAC_COMM_MESSAGE
) {
926 hw_fib
->header
.XferState
|= cpu_to_le32(HostProcessed
);
928 size
+= sizeof(struct aac_fibhdr
);
929 if (size
> le16_to_cpu(hw_fib
->header
.SenderSize
))
931 hw_fib
->header
.Size
= cpu_to_le16(size
);
933 q
= &dev
->queues
->queue
[AdapNormRespQueue
];
934 spin_lock_irqsave(q
->lock
, qflags
);
935 aac_queue_get(dev
, &index
, AdapNormRespQueue
, hw_fib
, 1, NULL
, &nointr
);
936 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
937 spin_unlock_irqrestore(q
->lock
, qflags
);
938 if (!(nointr
& (int)aac_config
.irq_mod
))
939 aac_adapter_notify(dev
, AdapNormRespQueue
);
942 printk(KERN_WARNING
"aac_fib_adapter_complete: "
943 "Unknown xferstate detected.\n");
950 * aac_fib_complete - fib completion handler
951 * @fib: FIB to complete
953 * Will do all necessary work to complete a FIB.
956 int aac_fib_complete(struct fib
*fibptr
)
958 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
960 if (fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) {
966 * Check for a fib which has already been completed or with a
967 * status wait timeout
970 if (hw_fib
->header
.XferState
== 0 || fibptr
->done
== 2)
973 * If we plan to do anything check the structure type first.
976 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
977 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
978 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
)
981 * This block completes a cdb which orginated on the host and we
982 * just need to deallocate the cdb or reinit it. At this point the
983 * command is complete that we had sent to the adapter and this
984 * cdb could be reused.
987 if((hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
)) &&
988 (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
)))
992 else if(hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
))
995 * This handles the case when the host has aborted the I/O
996 * to the adapter because the adapter is not responding
999 } else if(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)) {
1000 fib_dealloc(fibptr
);
1008 * aac_printf - handle printf from firmware
1010 * @val: Message info
1012 * Print a message passed to us by the controller firmware on the
1016 void aac_printf(struct aac_dev
*dev
, u32 val
)
1018 char *cp
= dev
->printfbuf
;
1019 if (dev
->printf_enabled
)
1021 int length
= val
& 0xffff;
1022 int level
= (val
>> 16) & 0xffff;
1025 * The size of the printfbuf is set in port.c
1026 * There is no variable or define for it
1030 if (cp
[length
] != 0)
1032 if (level
== LOG_AAC_HIGH_ERROR
)
1033 printk(KERN_WARNING
"%s:%s", dev
->name
, cp
);
1035 printk(KERN_INFO
"%s:%s", dev
->name
, cp
);
1040 static inline int aac_aif_data(struct aac_aifcmd
*aifcmd
, uint32_t index
)
1042 return le32_to_cpu(((__le32
*)aifcmd
->data
)[index
]);
1046 static void aac_handle_aif_bu(struct aac_dev
*dev
, struct aac_aifcmd
*aifcmd
)
1048 switch (aac_aif_data(aifcmd
, 1)) {
1049 case AifBuCacheDataLoss
:
1050 if (aac_aif_data(aifcmd
, 2))
1051 dev_info(&dev
->pdev
->dev
, "Backup unit had cache data loss - [%d]\n",
1052 aac_aif_data(aifcmd
, 2));
1054 dev_info(&dev
->pdev
->dev
, "Backup Unit had cache data loss\n");
1056 case AifBuCacheDataRecover
:
1057 if (aac_aif_data(aifcmd
, 2))
1058 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully - [%d]\n",
1059 aac_aif_data(aifcmd
, 2));
1061 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully\n");
1067 * aac_handle_aif - Handle a message from the firmware
1068 * @dev: Which adapter this fib is from
1069 * @fibptr: Pointer to fibptr from adapter
1071 * This routine handles a driver notify fib from the adapter and
1072 * dispatches it to the appropriate routine for handling.
1075 #define AIF_SNIFF_TIMEOUT (500*HZ)
1076 static void aac_handle_aif(struct aac_dev
* dev
, struct fib
* fibptr
)
1078 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
1079 struct aac_aifcmd
* aifcmd
= (struct aac_aifcmd
*)hw_fib
->data
;
1080 u32 channel
, id
, lun
, container
;
1081 struct scsi_device
*device
;
1087 } device_config_needed
= NOTHING
;
1089 /* Sniff for container changes */
1091 if (!dev
|| !dev
->fsa_dev
)
1093 container
= channel
= id
= lun
= (u32
)-1;
1096 * We have set this up to try and minimize the number of
1097 * re-configures that take place. As a result of this when
1098 * certain AIF's come in we will set a flag waiting for another
1099 * type of AIF before setting the re-config flag.
1101 switch (le32_to_cpu(aifcmd
->command
)) {
1102 case AifCmdDriverNotify
:
1103 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1104 case AifRawDeviceRemove
:
1105 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1106 if ((container
>> 28)) {
1107 container
= (u32
)-1;
1110 channel
= (container
>> 24) & 0xF;
1111 if (channel
>= dev
->maximum_num_channels
) {
1112 container
= (u32
)-1;
1115 id
= container
& 0xFFFF;
1116 if (id
>= dev
->maximum_num_physicals
) {
1117 container
= (u32
)-1;
1120 lun
= (container
>> 16) & 0xFF;
1121 container
= (u32
)-1;
1122 channel
= aac_phys_to_logical(channel
);
1123 device_config_needed
= DELETE
;
1127 * Morph or Expand complete
1129 case AifDenMorphComplete
:
1130 case AifDenVolumeExtendComplete
:
1131 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1132 if (container
>= dev
->maximum_num_containers
)
1136 * Find the scsi_device associated with the SCSI
1137 * address. Make sure we have the right array, and if
1138 * so set the flag to initiate a new re-config once we
1139 * see an AifEnConfigChange AIF come through.
1142 if ((dev
!= NULL
) && (dev
->scsi_host_ptr
!= NULL
)) {
1143 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1144 CONTAINER_TO_CHANNEL(container
),
1145 CONTAINER_TO_ID(container
),
1146 CONTAINER_TO_LUN(container
));
1148 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1149 dev
->fsa_dev
[container
].config_waiting_on
= AifEnConfigChange
;
1150 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1151 scsi_device_put(device
);
1157 * If we are waiting on something and this happens to be
1158 * that thing then set the re-configure flag.
1160 if (container
!= (u32
)-1) {
1161 if (container
>= dev
->maximum_num_containers
)
1163 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1164 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1165 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1166 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1167 } else for (container
= 0;
1168 container
< dev
->maximum_num_containers
; ++container
) {
1169 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1170 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1171 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1172 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1176 case AifCmdEventNotify
:
1177 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1178 case AifEnBatteryEvent
:
1179 dev
->cache_protected
=
1180 (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(3));
1185 case AifEnAddContainer
:
1186 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1187 if (container
>= dev
->maximum_num_containers
)
1189 dev
->fsa_dev
[container
].config_needed
= ADD
;
1190 dev
->fsa_dev
[container
].config_waiting_on
=
1192 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1198 case AifEnDeleteContainer
:
1199 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1200 if (container
>= dev
->maximum_num_containers
)
1202 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1203 dev
->fsa_dev
[container
].config_waiting_on
=
1205 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1209 * Container change detected. If we currently are not
1210 * waiting on something else, setup to wait on a Config Change.
1212 case AifEnContainerChange
:
1213 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1214 if (container
>= dev
->maximum_num_containers
)
1216 if (dev
->fsa_dev
[container
].config_waiting_on
&&
1217 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1219 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1220 dev
->fsa_dev
[container
].config_waiting_on
=
1222 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1225 case AifEnConfigChange
:
1229 case AifEnDeleteJBOD
:
1230 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1231 if ((container
>> 28)) {
1232 container
= (u32
)-1;
1235 channel
= (container
>> 24) & 0xF;
1236 if (channel
>= dev
->maximum_num_channels
) {
1237 container
= (u32
)-1;
1240 id
= container
& 0xFFFF;
1241 if (id
>= dev
->maximum_num_physicals
) {
1242 container
= (u32
)-1;
1245 lun
= (container
>> 16) & 0xFF;
1246 container
= (u32
)-1;
1247 channel
= aac_phys_to_logical(channel
);
1248 device_config_needed
=
1249 (((__le32
*)aifcmd
->data
)[0] ==
1250 cpu_to_le32(AifEnAddJBOD
)) ? ADD
: DELETE
;
1251 if (device_config_needed
== ADD
) {
1252 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1257 scsi_remove_device(device
);
1258 scsi_device_put(device
);
1263 case AifEnEnclosureManagement
:
1265 * If in JBOD mode, automatic exposure of new
1266 * physical target to be suppressed until configured.
1270 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[3])) {
1271 case EM_DRIVE_INSERTION
:
1272 case EM_DRIVE_REMOVAL
:
1273 case EM_SES_DRIVE_INSERTION
:
1274 case EM_SES_DRIVE_REMOVAL
:
1275 container
= le32_to_cpu(
1276 ((__le32
*)aifcmd
->data
)[2]);
1277 if ((container
>> 28)) {
1278 container
= (u32
)-1;
1281 channel
= (container
>> 24) & 0xF;
1282 if (channel
>= dev
->maximum_num_channels
) {
1283 container
= (u32
)-1;
1286 id
= container
& 0xFFFF;
1287 lun
= (container
>> 16) & 0xFF;
1288 container
= (u32
)-1;
1289 if (id
>= dev
->maximum_num_physicals
) {
1290 /* legacy dev_t ? */
1291 if ((0x2000 <= id
) || lun
|| channel
||
1292 ((channel
= (id
>> 7) & 0x3F) >=
1293 dev
->maximum_num_channels
))
1295 lun
= (id
>> 4) & 7;
1298 channel
= aac_phys_to_logical(channel
);
1299 device_config_needed
=
1300 ((((__le32
*)aifcmd
->data
)[3]
1301 == cpu_to_le32(EM_DRIVE_INSERTION
)) ||
1302 (((__le32
*)aifcmd
->data
)[3]
1303 == cpu_to_le32(EM_SES_DRIVE_INSERTION
))) ?
1307 case AifBuManagerEvent
:
1308 aac_handle_aif_bu(dev
, aifcmd
);
1313 * If we are waiting on something and this happens to be
1314 * that thing then set the re-configure flag.
1316 if (container
!= (u32
)-1) {
1317 if (container
>= dev
->maximum_num_containers
)
1319 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1320 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1321 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1322 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1323 } else for (container
= 0;
1324 container
< dev
->maximum_num_containers
; ++container
) {
1325 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1326 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1327 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1328 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1332 case AifCmdJobProgress
:
1334 * These are job progress AIF's. When a Clear is being
1335 * done on a container it is initially created then hidden from
1336 * the OS. When the clear completes we don't get a config
1337 * change so we monitor the job status complete on a clear then
1338 * wait for a container change.
1341 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1342 (((__le32
*)aifcmd
->data
)[6] == ((__le32
*)aifcmd
->data
)[5] ||
1343 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsSuccess
))) {
1345 container
< dev
->maximum_num_containers
;
1348 * Stomp on all config sequencing for all
1351 dev
->fsa_dev
[container
].config_waiting_on
=
1352 AifEnContainerChange
;
1353 dev
->fsa_dev
[container
].config_needed
= ADD
;
1354 dev
->fsa_dev
[container
].config_waiting_stamp
=
1358 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1359 ((__le32
*)aifcmd
->data
)[6] == 0 &&
1360 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsRunning
)) {
1362 container
< dev
->maximum_num_containers
;
1365 * Stomp on all config sequencing for all
1368 dev
->fsa_dev
[container
].config_waiting_on
=
1369 AifEnContainerChange
;
1370 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1371 dev
->fsa_dev
[container
].config_waiting_stamp
=
1380 if (device_config_needed
== NOTHING
)
1381 for (; container
< dev
->maximum_num_containers
; ++container
) {
1382 if ((dev
->fsa_dev
[container
].config_waiting_on
== 0) &&
1383 (dev
->fsa_dev
[container
].config_needed
!= NOTHING
) &&
1384 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
)) {
1385 device_config_needed
=
1386 dev
->fsa_dev
[container
].config_needed
;
1387 dev
->fsa_dev
[container
].config_needed
= NOTHING
;
1388 channel
= CONTAINER_TO_CHANNEL(container
);
1389 id
= CONTAINER_TO_ID(container
);
1390 lun
= CONTAINER_TO_LUN(container
);
1394 if (device_config_needed
== NOTHING
)
1398 * If we decided that a re-configuration needs to be done,
1399 * schedule it here on the way out the door, please close the door
1404 * Find the scsi_device associated with the SCSI address,
1405 * and mark it as changed, invalidating the cache. This deals
1406 * with changes to existing device IDs.
1409 if (!dev
|| !dev
->scsi_host_ptr
)
1412 * force reload of disk info via aac_probe_container
1414 if ((channel
== CONTAINER_CHANNEL
) &&
1415 (device_config_needed
!= NOTHING
)) {
1416 if (dev
->fsa_dev
[container
].valid
== 1)
1417 dev
->fsa_dev
[container
].valid
= 2;
1418 aac_probe_container(dev
, container
);
1420 device
= scsi_device_lookup(dev
->scsi_host_ptr
, channel
, id
, lun
);
1422 switch (device_config_needed
) {
1424 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1425 scsi_remove_device(device
);
1427 if (scsi_device_online(device
)) {
1428 scsi_device_set_state(device
, SDEV_OFFLINE
);
1429 sdev_printk(KERN_INFO
, device
,
1430 "Device offlined - %s\n",
1431 (channel
== CONTAINER_CHANNEL
) ?
1433 "enclosure services event");
1438 if (!scsi_device_online(device
)) {
1439 sdev_printk(KERN_INFO
, device
,
1440 "Device online - %s\n",
1441 (channel
== CONTAINER_CHANNEL
) ?
1443 "enclosure services event");
1444 scsi_device_set_state(device
, SDEV_RUNNING
);
1448 if ((channel
== CONTAINER_CHANNEL
)
1449 && (!dev
->fsa_dev
[container
].valid
)) {
1450 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1451 scsi_remove_device(device
);
1453 if (!scsi_device_online(device
))
1455 scsi_device_set_state(device
, SDEV_OFFLINE
);
1456 sdev_printk(KERN_INFO
, device
,
1457 "Device offlined - %s\n",
1462 scsi_rescan_device(&device
->sdev_gendev
);
1467 scsi_device_put(device
);
1468 device_config_needed
= NOTHING
;
1470 if (device_config_needed
== ADD
)
1471 scsi_add_device(dev
->scsi_host_ptr
, channel
, id
, lun
);
1472 if (channel
== CONTAINER_CHANNEL
) {
1474 device_config_needed
= NOTHING
;
1479 static int _aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1483 struct Scsi_Host
*host
;
1484 struct scsi_device
*dev
;
1485 struct scsi_cmnd
*command
;
1486 struct scsi_cmnd
*command_list
;
1490 int num_of_fibs
= 0;
1494 * - host is locked, unless called by the aacraid thread.
1495 * (a matter of convenience, due to legacy issues surrounding
1496 * eh_host_adapter_reset).
1497 * - in_reset is asserted, so no new i/o is getting to the
1499 * - The card is dead, or will be very shortly ;-/ so no new
1500 * commands are completing in the interrupt service.
1502 host
= aac
->scsi_host_ptr
;
1503 scsi_block_requests(host
);
1504 aac_adapter_disable_int(aac
);
1505 if (aac
->thread
&& aac
->thread
->pid
!= current
->pid
) {
1506 spin_unlock_irq(host
->host_lock
);
1507 kthread_stop(aac
->thread
);
1513 * If a positive health, means in a known DEAD PANIC
1514 * state and the adapter could be reset to `try again'.
1516 bled
= forced
? 0 : aac_adapter_check_health(aac
);
1517 retval
= aac_adapter_restart(aac
, bled
, reset_type
);
1523 * Loop through the fibs, close the synchronous FIBS
1526 num_of_fibs
= aac
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
1527 for (index
= 0; index
< num_of_fibs
; index
++) {
1529 struct fib
*fib
= &aac
->fibs
[index
];
1530 __le32 XferState
= fib
->hw_fib_va
->header
.XferState
;
1531 bool is_response_expected
= false;
1533 if (!(XferState
& cpu_to_le32(NoResponseExpected
| Async
)) &&
1534 (XferState
& cpu_to_le32(ResponseExpected
)))
1535 is_response_expected
= true;
1537 if (is_response_expected
1538 || fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
1539 unsigned long flagv
;
1540 spin_lock_irqsave(&fib
->event_lock
, flagv
);
1541 up(&fib
->event_wait
);
1542 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
1547 /* Give some extra time for ioctls to complete. */
1550 index
= aac
->cardtype
;
1553 * Re-initialize the adapter, first free resources, then carefully
1554 * apply the initialization sequence to come back again. Only risk
1555 * is a change in Firmware dropping cache, it is assumed the caller
1556 * will ensure that i/o is queisced and the card is flushed in that
1560 aac_fib_map_free(aac
);
1561 dma_free_coherent(&aac
->pdev
->dev
, aac
->comm_size
, aac
->comm_addr
,
1563 aac
->comm_addr
= NULL
;
1567 kfree(aac
->fsa_dev
);
1568 aac
->fsa_dev
= NULL
;
1570 dmamask
= DMA_BIT_MASK(32);
1571 quirks
= aac_get_driver_ident(index
)->quirks
;
1572 if (quirks
& AAC_QUIRK_31BIT
)
1573 retval
= pci_set_dma_mask(aac
->pdev
, dmamask
);
1574 else if (!(quirks
& AAC_QUIRK_SRC
))
1575 retval
= pci_set_dma_mask(aac
->pdev
, dmamask
);
1577 retval
= pci_set_consistent_dma_mask(aac
->pdev
, dmamask
);
1579 if (quirks
& AAC_QUIRK_31BIT
&& !retval
) {
1580 dmamask
= DMA_BIT_MASK(31);
1581 retval
= pci_set_consistent_dma_mask(aac
->pdev
, dmamask
);
1587 if ((retval
= (*(aac_get_driver_ident(index
)->init
))(aac
)))
1591 aac
->thread
= kthread_run(aac_command_thread
, aac
, "%s",
1593 if (IS_ERR(aac
->thread
)) {
1594 retval
= PTR_ERR(aac
->thread
);
1599 (void)aac_get_adapter_info(aac
);
1600 if ((quirks
& AAC_QUIRK_34SG
) && (host
->sg_tablesize
> 34)) {
1601 host
->sg_tablesize
= 34;
1602 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1604 if ((quirks
& AAC_QUIRK_17SG
) && (host
->sg_tablesize
> 17)) {
1605 host
->sg_tablesize
= 17;
1606 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1608 aac_get_config_status(aac
, 1);
1609 aac_get_containers(aac
);
1611 * This is where the assumption that the Adapter is quiesced
1614 command_list
= NULL
;
1615 __shost_for_each_device(dev
, host
) {
1616 unsigned long flags
;
1617 spin_lock_irqsave(&dev
->list_lock
, flags
);
1618 list_for_each_entry(command
, &dev
->cmd_list
, list
)
1619 if (command
->SCp
.phase
== AAC_OWNER_FIRMWARE
) {
1620 command
->SCp
.buffer
= (struct scatterlist
*)command_list
;
1621 command_list
= command
;
1623 spin_unlock_irqrestore(&dev
->list_lock
, flags
);
1625 while ((command
= command_list
)) {
1626 command_list
= (struct scsi_cmnd
*)command
->SCp
.buffer
;
1627 command
->SCp
.buffer
= NULL
;
1628 command
->result
= DID_OK
<< 16
1629 | COMMAND_COMPLETE
<< 8
1630 | SAM_STAT_TASK_SET_FULL
;
1631 command
->SCp
.phase
= AAC_OWNER_ERROR_HANDLER
;
1632 command
->scsi_done(command
);
1635 * Any Device that was already marked offline needs to be marked
1638 __shost_for_each_device(dev
, host
) {
1639 if (!scsi_device_online(dev
))
1640 scsi_device_set_state(dev
, SDEV_RUNNING
);
1646 scsi_unblock_requests(host
);
1649 * Issue bus rescan to catch any configuration that might have
1652 if (!retval
&& !is_kdump_kernel()) {
1653 dev_info(&aac
->pdev
->dev
, "Scheduling bus rescan\n");
1654 aac_schedule_safw_scan_worker(aac
);
1658 spin_lock_irq(host
->host_lock
);
1663 int aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1665 unsigned long flagv
= 0;
1667 struct Scsi_Host
* host
;
1670 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1673 if (aac
->in_reset
) {
1674 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1678 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1681 * Wait for all commands to complete to this specific
1682 * target (block maximum 60 seconds). Although not necessary,
1683 * it does make us a good storage citizen.
1685 host
= aac
->scsi_host_ptr
;
1686 scsi_block_requests(host
);
1688 /* Quiesce build, flush cache, write through mode */
1690 aac_send_shutdown(aac
);
1691 spin_lock_irqsave(host
->host_lock
, flagv
);
1692 bled
= forced
? forced
:
1693 (aac_check_reset
!= 0 && aac_check_reset
!= 1);
1694 retval
= _aac_reset_adapter(aac
, bled
, reset_type
);
1695 spin_unlock_irqrestore(host
->host_lock
, flagv
);
1697 if ((forced
< 2) && (retval
== -ENODEV
)) {
1698 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1699 struct fib
* fibctx
= aac_fib_alloc(aac
);
1701 struct aac_pause
*cmd
;
1704 aac_fib_init(fibctx
);
1706 cmd
= (struct aac_pause
*) fib_data(fibctx
);
1708 cmd
->command
= cpu_to_le32(VM_ContainerConfig
);
1709 cmd
->type
= cpu_to_le32(CT_PAUSE_IO
);
1710 cmd
->timeout
= cpu_to_le32(1);
1711 cmd
->min
= cpu_to_le32(1);
1712 cmd
->noRescan
= cpu_to_le32(1);
1713 cmd
->count
= cpu_to_le32(0);
1715 status
= aac_fib_send(ContainerCommand
,
1717 sizeof(struct aac_pause
),
1719 -2 /* Timeout silently */, 1,
1723 aac_fib_complete(fibctx
);
1724 /* FIB should be freed only after getting
1725 * the response from the F/W */
1726 if (status
!= -ERESTARTSYS
)
1727 aac_fib_free(fibctx
);
1734 int aac_check_health(struct aac_dev
* aac
)
1737 unsigned long time_now
, flagv
= 0;
1738 struct list_head
* entry
;
1740 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1741 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1744 if (aac
->in_reset
|| !(BlinkLED
= aac_adapter_check_health(aac
))) {
1745 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1752 * aac_aifcmd.command = AifCmdEventNotify = 1
1753 * aac_aifcmd.seqnum = 0xFFFFFFFF
1754 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1755 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1756 * aac.aifcmd.data[2] = AifHighPriority = 3
1757 * aac.aifcmd.data[3] = BlinkLED
1760 time_now
= jiffies
/HZ
;
1761 entry
= aac
->fib_list
.next
;
1764 * For each Context that is on the
1765 * fibctxList, make a copy of the
1766 * fib, and then set the event to wake up the
1767 * thread that is waiting for it.
1769 while (entry
!= &aac
->fib_list
) {
1771 * Extract the fibctx
1773 struct aac_fib_context
*fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
1774 struct hw_fib
* hw_fib
;
1777 * Check if the queue is getting
1780 if (fibctx
->count
> 20) {
1782 * It's *not* jiffies folks,
1783 * but jiffies / HZ, so do not
1786 u32 time_last
= fibctx
->jiffies
;
1788 * Has it been > 2 minutes
1789 * since the last read off
1792 if ((time_now
- time_last
) > aif_timeout
) {
1793 entry
= entry
->next
;
1794 aac_close_fib_context(aac
, fibctx
);
1799 * Warning: no sleep allowed while
1802 hw_fib
= kzalloc(sizeof(struct hw_fib
), GFP_ATOMIC
);
1803 fib
= kzalloc(sizeof(struct fib
), GFP_ATOMIC
);
1804 if (fib
&& hw_fib
) {
1805 struct aac_aifcmd
* aif
;
1807 fib
->hw_fib_va
= hw_fib
;
1810 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
1811 fib
->size
= sizeof (struct fib
);
1812 fib
->data
= hw_fib
->data
;
1813 aif
= (struct aac_aifcmd
*)hw_fib
->data
;
1814 aif
->command
= cpu_to_le32(AifCmdEventNotify
);
1815 aif
->seqnum
= cpu_to_le32(0xFFFFFFFF);
1816 ((__le32
*)aif
->data
)[0] = cpu_to_le32(AifEnExpEvent
);
1817 ((__le32
*)aif
->data
)[1] = cpu_to_le32(AifExeFirmwarePanic
);
1818 ((__le32
*)aif
->data
)[2] = cpu_to_le32(AifHighPriority
);
1819 ((__le32
*)aif
->data
)[3] = cpu_to_le32(BlinkLED
);
1822 * Put the FIB onto the
1825 list_add_tail(&fib
->fiblink
, &fibctx
->fib_list
);
1828 * Set the event to wake up the
1829 * thread that will waiting.
1831 up(&fibctx
->wait_sem
);
1833 printk(KERN_WARNING
"aifd: didn't allocate NewFib.\n");
1837 entry
= entry
->next
;
1840 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1843 printk(KERN_ERR
"%s: Host adapter is dead (or got a PCI error) %d\n",
1844 aac
->name
, BlinkLED
);
1848 printk(KERN_ERR
"%s: Host adapter BLINK LED 0x%x\n", aac
->name
, BlinkLED
);
1855 static inline int is_safw_raid_volume(struct aac_dev
*aac
, int bus
, int target
)
1857 return bus
== CONTAINER_CHANNEL
&& target
< aac
->maximum_num_containers
;
1860 static struct scsi_device
*aac_lookup_safw_scsi_device(struct aac_dev
*dev
,
1864 if (bus
!= CONTAINER_CHANNEL
)
1865 bus
= aac_phys_to_logical(bus
);
1867 return scsi_device_lookup(dev
->scsi_host_ptr
, bus
, target
, 0);
1870 static int aac_add_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1872 if (bus
!= CONTAINER_CHANNEL
)
1873 bus
= aac_phys_to_logical(bus
);
1875 return scsi_add_device(dev
->scsi_host_ptr
, bus
, target
, 0);
1878 static void aac_put_safw_scsi_device(struct scsi_device
*sdev
)
1881 scsi_device_put(sdev
);
1884 static void aac_remove_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1886 struct scsi_device
*sdev
;
1888 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1889 scsi_remove_device(sdev
);
1890 aac_put_safw_scsi_device(sdev
);
1893 static inline int aac_is_safw_scan_count_equal(struct aac_dev
*dev
,
1894 int bus
, int target
)
1896 return dev
->hba_map
[bus
][target
].scan_counter
== dev
->scan_counter
;
1899 static int aac_is_safw_target_valid(struct aac_dev
*dev
, int bus
, int target
)
1901 if (is_safw_raid_volume(dev
, bus
, target
))
1902 return dev
->fsa_dev
[target
].valid
;
1904 return aac_is_safw_scan_count_equal(dev
, bus
, target
);
1907 static int aac_is_safw_device_exposed(struct aac_dev
*dev
, int bus
, int target
)
1910 struct scsi_device
*sdev
;
1912 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1915 aac_put_safw_scsi_device(sdev
);
1920 static int aac_update_safw_host_devices(struct aac_dev
*dev
)
1928 rcode
= aac_setup_safw_adapter(dev
);
1929 if (unlikely(rcode
< 0)) {
1933 for (i
= 0; i
< AAC_BUS_TARGET_LOOP
; i
++) {
1935 bus
= get_bus_number(i
);
1936 target
= get_target_number(i
);
1938 is_exposed
= aac_is_safw_device_exposed(dev
, bus
, target
);
1940 if (aac_is_safw_target_valid(dev
, bus
, target
) && !is_exposed
)
1941 aac_add_safw_device(dev
, bus
, target
);
1942 else if (!aac_is_safw_target_valid(dev
, bus
, target
) &&
1944 aac_remove_safw_device(dev
, bus
, target
);
1950 static int aac_scan_safw_host(struct aac_dev
*dev
)
1954 rcode
= aac_update_safw_host_devices(dev
);
1956 aac_schedule_safw_scan_worker(dev
);
1961 int aac_scan_host(struct aac_dev
*dev
)
1965 mutex_lock(&dev
->scan_mutex
);
1966 if (dev
->sa_firmware
)
1967 rcode
= aac_scan_safw_host(dev
);
1969 scsi_scan_host(dev
->scsi_host_ptr
);
1970 mutex_unlock(&dev
->scan_mutex
);
1976 * aac_handle_sa_aif Handle a message from the firmware
1977 * @dev: Which adapter this fib is from
1978 * @fibptr: Pointer to fibptr from adapter
1980 * This routine handles a driver notify fib from the adapter and
1981 * dispatches it to the appropriate routine for handling.
1983 static void aac_handle_sa_aif(struct aac_dev
*dev
, struct fib
*fibptr
)
1988 if (fibptr
->hbacmd_size
& SA_AIF_HOTPLUG
)
1989 events
= SA_AIF_HOTPLUG
;
1990 else if (fibptr
->hbacmd_size
& SA_AIF_HARDWARE
)
1991 events
= SA_AIF_HARDWARE
;
1992 else if (fibptr
->hbacmd_size
& SA_AIF_PDEV_CHANGE
)
1993 events
= SA_AIF_PDEV_CHANGE
;
1994 else if (fibptr
->hbacmd_size
& SA_AIF_LDEV_CHANGE
)
1995 events
= SA_AIF_LDEV_CHANGE
;
1996 else if (fibptr
->hbacmd_size
& SA_AIF_BPSTAT_CHANGE
)
1997 events
= SA_AIF_BPSTAT_CHANGE
;
1998 else if (fibptr
->hbacmd_size
& SA_AIF_BPCFG_CHANGE
)
1999 events
= SA_AIF_BPCFG_CHANGE
;
2002 case SA_AIF_HOTPLUG
:
2003 case SA_AIF_HARDWARE
:
2004 case SA_AIF_PDEV_CHANGE
:
2005 case SA_AIF_LDEV_CHANGE
:
2006 case SA_AIF_BPCFG_CHANGE
:
2012 case SA_AIF_BPSTAT_CHANGE
:
2013 /* currently do nothing */
2017 for (i
= 1; i
<= 10; ++i
) {
2018 events
= src_readl(dev
, MUnit
.IDR
);
2019 if (events
& (1<<23)) {
2020 pr_warn(" AIF not cleared by firmware - %d/%d)\n",
2027 static int get_fib_count(struct aac_dev
*dev
)
2029 unsigned int num
= 0;
2030 struct list_head
*entry
;
2031 unsigned long flagv
;
2034 * Warning: no sleep allowed while
2035 * holding spinlock. We take the estimate
2036 * and pre-allocate a set of fibs outside the
2039 num
= le32_to_cpu(dev
->init
->r7
.adapter_fibs_size
)
2040 / sizeof(struct hw_fib
); /* some extra */
2041 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2042 entry
= dev
->fib_list
.next
;
2043 while (entry
!= &dev
->fib_list
) {
2044 entry
= entry
->next
;
2047 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2052 static int fillup_pools(struct aac_dev
*dev
, struct hw_fib
**hw_fib_pool
,
2053 struct fib
**fib_pool
,
2056 struct hw_fib
**hw_fib_p
;
2059 hw_fib_p
= hw_fib_pool
;
2061 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2062 *(hw_fib_p
) = kmalloc(sizeof(struct hw_fib
), GFP_KERNEL
);
2063 if (!(*(hw_fib_p
++))) {
2068 *(fib_p
) = kmalloc(sizeof(struct fib
), GFP_KERNEL
);
2069 if (!(*(fib_p
++))) {
2070 kfree(*(--hw_fib_p
));
2076 * Get the actual number of allocated fibs
2078 num
= hw_fib_p
- hw_fib_pool
;
2082 static void wakeup_fibctx_threads(struct aac_dev
*dev
,
2083 struct hw_fib
**hw_fib_pool
,
2084 struct fib
**fib_pool
,
2086 struct hw_fib
*hw_fib
,
2089 unsigned long flagv
;
2090 struct list_head
*entry
;
2091 struct hw_fib
**hw_fib_p
;
2093 u32 time_now
, time_last
;
2094 struct hw_fib
*hw_newfib
;
2096 struct aac_fib_context
*fibctx
;
2098 time_now
= jiffies
/HZ
;
2099 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2100 entry
= dev
->fib_list
.next
;
2102 * For each Context that is on the
2103 * fibctxList, make a copy of the
2104 * fib, and then set the event to wake up the
2105 * thread that is waiting for it.
2108 hw_fib_p
= hw_fib_pool
;
2110 while (entry
!= &dev
->fib_list
) {
2112 * Extract the fibctx
2114 fibctx
= list_entry(entry
, struct aac_fib_context
,
2117 * Check if the queue is getting
2120 if (fibctx
->count
> 20) {
2122 * It's *not* jiffies folks,
2123 * but jiffies / HZ so do not
2126 time_last
= fibctx
->jiffies
;
2128 * Has it been > 2 minutes
2129 * since the last read off
2132 if ((time_now
- time_last
) > aif_timeout
) {
2133 entry
= entry
->next
;
2134 aac_close_fib_context(dev
, fibctx
);
2139 * Warning: no sleep allowed while
2142 if (hw_fib_p
>= &hw_fib_pool
[num
]) {
2143 pr_warn("aifd: didn't allocate NewFib\n");
2144 entry
= entry
->next
;
2148 hw_newfib
= *hw_fib_p
;
2149 *(hw_fib_p
++) = NULL
;
2153 * Make the copy of the FIB
2155 memcpy(hw_newfib
, hw_fib
, sizeof(struct hw_fib
));
2156 memcpy(newfib
, fib
, sizeof(struct fib
));
2157 newfib
->hw_fib_va
= hw_newfib
;
2159 * Put the FIB onto the
2162 list_add_tail(&newfib
->fiblink
, &fibctx
->fib_list
);
2165 * Set the event to wake up the
2166 * thread that is waiting.
2168 up(&fibctx
->wait_sem
);
2170 entry
= entry
->next
;
2173 * Set the status of this FIB
2175 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2176 aac_fib_adapter_complete(fib
, sizeof(u32
));
2177 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2181 static void aac_process_events(struct aac_dev
*dev
)
2183 struct hw_fib
*hw_fib
;
2185 unsigned long flags
;
2188 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2189 spin_lock_irqsave(t_lock
, flags
);
2191 while (!list_empty(&(dev
->queues
->queue
[HostNormCmdQueue
].cmdq
))) {
2192 struct list_head
*entry
;
2193 struct aac_aifcmd
*aifcmd
;
2195 struct hw_fib
**hw_fib_pool
, **hw_fib_p
;
2196 struct fib
**fib_pool
, **fib_p
;
2198 set_current_state(TASK_RUNNING
);
2200 entry
= dev
->queues
->queue
[HostNormCmdQueue
].cmdq
.next
;
2203 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2204 spin_unlock_irqrestore(t_lock
, flags
);
2206 fib
= list_entry(entry
, struct fib
, fiblink
);
2207 hw_fib
= fib
->hw_fib_va
;
2208 if (dev
->sa_firmware
) {
2210 aac_handle_sa_aif(dev
, fib
);
2211 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2215 * We will process the FIB here or pass it to a
2216 * worker thread that is TBD. We Really can't
2217 * do anything at this point since we don't have
2218 * anything defined for this thread to do.
2220 memset(fib
, 0, sizeof(struct fib
));
2221 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
2222 fib
->size
= sizeof(struct fib
);
2223 fib
->hw_fib_va
= hw_fib
;
2224 fib
->data
= hw_fib
->data
;
2227 * We only handle AifRequest fibs from the adapter.
2230 aifcmd
= (struct aac_aifcmd
*) hw_fib
->data
;
2231 if (aifcmd
->command
== cpu_to_le32(AifCmdDriverNotify
)) {
2232 /* Handle Driver Notify Events */
2233 aac_handle_aif(dev
, fib
);
2234 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2235 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2239 * The u32 here is important and intended. We are using
2240 * 32bit wrapping time to fit the adapter field
2244 if (aifcmd
->command
== cpu_to_le32(AifCmdEventNotify
)
2245 || aifcmd
->command
== cpu_to_le32(AifCmdJobProgress
)) {
2246 aac_handle_aif(dev
, fib
);
2250 * get number of fibs to process
2252 num
= get_fib_count(dev
);
2256 hw_fib_pool
= kmalloc_array(num
, sizeof(struct hw_fib
*),
2261 fib_pool
= kmalloc_array(num
, sizeof(struct fib
*), GFP_KERNEL
);
2263 goto free_hw_fib_pool
;
2266 * Fill up fib pointer pools with actual fibs
2269 num
= fillup_pools(dev
, hw_fib_pool
, fib_pool
, num
);
2274 * wakeup the thread that is waiting for
2275 * the response from fw (ioctl)
2277 wakeup_fibctx_threads(dev
, hw_fib_pool
, fib_pool
,
2281 /* Free up the remaining resources */
2282 hw_fib_p
= hw_fib_pool
;
2284 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2295 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2296 spin_lock_irqsave(t_lock
, flags
);
2299 * There are no more AIF's
2301 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2302 spin_unlock_irqrestore(t_lock
, flags
);
2305 static int aac_send_wellness_command(struct aac_dev
*dev
, char *wellness_str
,
2308 struct aac_srb
*srbcmd
;
2309 struct sgmap64
*sg64
;
2316 fibptr
= aac_fib_alloc(dev
);
2320 dma_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, datasize
, &addr
,
2325 aac_fib_init(fibptr
);
2327 vbus
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_bus
);
2328 vid
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_target
);
2330 srbcmd
= (struct aac_srb
*)fib_data(fibptr
);
2332 srbcmd
->function
= cpu_to_le32(SRBF_ExecuteScsi
);
2333 srbcmd
->channel
= cpu_to_le32(vbus
);
2334 srbcmd
->id
= cpu_to_le32(vid
);
2336 srbcmd
->flags
= cpu_to_le32(SRB_DataOut
);
2337 srbcmd
->timeout
= cpu_to_le32(10);
2338 srbcmd
->retry_limit
= 0;
2339 srbcmd
->cdb_size
= cpu_to_le32(12);
2340 srbcmd
->count
= cpu_to_le32(datasize
);
2342 memset(srbcmd
->cdb
, 0, sizeof(srbcmd
->cdb
));
2343 srbcmd
->cdb
[0] = BMIC_OUT
;
2344 srbcmd
->cdb
[6] = WRITE_HOST_WELLNESS
;
2345 memcpy(dma_buf
, (char *)wellness_str
, datasize
);
2347 sg64
= (struct sgmap64
*)&srbcmd
->sg
;
2348 sg64
->count
= cpu_to_le32(1);
2349 sg64
->sg
[0].addr
[1] = cpu_to_le32((u32
)(((addr
) >> 16) >> 16));
2350 sg64
->sg
[0].addr
[0] = cpu_to_le32((u32
)(addr
& 0xffffffff));
2351 sg64
->sg
[0].count
= cpu_to_le32(datasize
);
2353 ret
= aac_fib_send(ScsiPortCommand64
, fibptr
, sizeof(struct aac_srb
),
2354 FsaNormal
, 1, 1, NULL
, NULL
);
2356 dma_free_coherent(&dev
->pdev
->dev
, datasize
, dma_buf
, addr
);
2359 * Do not set XferState to zero unless
2360 * receives a response from F/W
2363 aac_fib_complete(fibptr
);
2366 * FIB should be freed only after
2367 * getting the response from the F/W
2369 if (ret
!= -ERESTARTSYS
)
2375 aac_fib_free(fibptr
);
2379 int aac_send_safw_hostttime(struct aac_dev
*dev
, struct timespec64
*now
)
2382 char wellness_str
[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2383 u32 datasize
= sizeof(wellness_str
);
2384 time64_t local_time
;
2387 if (!dev
->sa_firmware
)
2390 local_time
= (now
->tv_sec
- (sys_tz
.tz_minuteswest
* 60));
2391 time64_to_tm(local_time
, 0, &cur_tm
);
2393 cur_tm
.tm_year
+= 1900;
2394 wellness_str
[8] = bin2bcd(cur_tm
.tm_hour
);
2395 wellness_str
[9] = bin2bcd(cur_tm
.tm_min
);
2396 wellness_str
[10] = bin2bcd(cur_tm
.tm_sec
);
2397 wellness_str
[12] = bin2bcd(cur_tm
.tm_mon
);
2398 wellness_str
[13] = bin2bcd(cur_tm
.tm_mday
);
2399 wellness_str
[14] = bin2bcd(cur_tm
.tm_year
/ 100);
2400 wellness_str
[15] = bin2bcd(cur_tm
.tm_year
% 100);
2402 ret
= aac_send_wellness_command(dev
, wellness_str
, datasize
);
2408 int aac_send_hosttime(struct aac_dev
*dev
, struct timespec64
*now
)
2414 fibptr
= aac_fib_alloc(dev
);
2418 aac_fib_init(fibptr
);
2419 info
= (__le32
*)fib_data(fibptr
);
2420 *info
= cpu_to_le32(now
->tv_sec
); /* overflow in y2106 */
2421 ret
= aac_fib_send(SendHostTime
, fibptr
, sizeof(*info
), FsaNormal
,
2425 * Do not set XferState to zero unless
2426 * receives a response from F/W
2429 aac_fib_complete(fibptr
);
2432 * FIB should be freed only after
2433 * getting the response from the F/W
2435 if (ret
!= -ERESTARTSYS
)
2436 aac_fib_free(fibptr
);
2443 * aac_command_thread - command processing thread
2444 * @dev: Adapter to monitor
2446 * Waits on the commandready event in it's queue. When the event gets set
2447 * it will pull FIBs off it's queue. It will continue to pull FIBs off
2448 * until the queue is empty. When the queue is empty it will wait for
2452 int aac_command_thread(void *data
)
2454 struct aac_dev
*dev
= data
;
2455 DECLARE_WAITQUEUE(wait
, current
);
2456 unsigned long next_jiffies
= jiffies
+ HZ
;
2457 unsigned long next_check_jiffies
= next_jiffies
;
2458 long difference
= HZ
;
2461 * We can only have one thread per adapter for AIF's.
2463 if (dev
->aif_thread
)
2467 * Let the DPC know it has a place to send the AIF's to.
2469 dev
->aif_thread
= 1;
2470 add_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2471 set_current_state(TASK_INTERRUPTIBLE
);
2472 dprintk ((KERN_INFO
"aac_command_thread start\n"));
2475 aac_process_events(dev
);
2478 * Background activity
2480 if ((time_before(next_check_jiffies
,next_jiffies
))
2481 && ((difference
= next_check_jiffies
- jiffies
) <= 0)) {
2482 next_check_jiffies
= next_jiffies
;
2483 if (aac_adapter_check_health(dev
) == 0) {
2484 difference
= ((long)(unsigned)check_interval
)
2486 next_check_jiffies
= jiffies
+ difference
;
2487 } else if (!dev
->queues
)
2490 if (!time_before(next_check_jiffies
,next_jiffies
)
2491 && ((difference
= next_jiffies
- jiffies
) <= 0)) {
2492 struct timespec64 now
;
2495 /* Don't even try to talk to adapter if its sick */
2496 ret
= aac_adapter_check_health(dev
);
2497 if (ret
|| !dev
->queues
)
2499 next_check_jiffies
= jiffies
2500 + ((long)(unsigned)check_interval
)
2502 ktime_get_real_ts64(&now
);
2504 /* Synchronize our watches */
2505 if (((NSEC_PER_SEC
- (NSEC_PER_SEC
/ HZ
)) > now
.tv_nsec
)
2506 && (now
.tv_nsec
> (NSEC_PER_SEC
/ HZ
)))
2507 difference
= HZ
+ HZ
/ 2 -
2508 now
.tv_nsec
/ (NSEC_PER_SEC
/ HZ
);
2510 if (now
.tv_nsec
> NSEC_PER_SEC
/ 2)
2513 if (dev
->sa_firmware
)
2515 aac_send_safw_hostttime(dev
, &now
);
2517 ret
= aac_send_hosttime(dev
, &now
);
2519 difference
= (long)(unsigned)update_interval
*HZ
;
2521 next_jiffies
= jiffies
+ difference
;
2522 if (time_before(next_check_jiffies
,next_jiffies
))
2523 difference
= next_check_jiffies
- jiffies
;
2525 if (difference
<= 0)
2527 set_current_state(TASK_INTERRUPTIBLE
);
2529 if (kthread_should_stop())
2533 * we probably want usleep_range() here instead of the
2534 * jiffies computation
2536 schedule_timeout(difference
);
2538 if (kthread_should_stop())
2542 remove_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2543 dev
->aif_thread
= 0;
2547 int aac_acquire_irq(struct aac_dev
*dev
)
2553 if (!dev
->sync_mode
&& dev
->msi_enabled
&& dev
->max_msix
> 1) {
2554 for (i
= 0; i
< dev
->max_msix
; i
++) {
2555 dev
->aac_msix
[i
].vector_no
= i
;
2556 dev
->aac_msix
[i
].dev
= dev
;
2557 if (request_irq(pci_irq_vector(dev
->pdev
, i
),
2558 dev
->a_ops
.adapter_intr
,
2559 0, "aacraid", &(dev
->aac_msix
[i
]))) {
2560 printk(KERN_ERR
"%s%d: Failed to register IRQ for vector %d.\n",
2561 dev
->name
, dev
->id
, i
);
2562 for (j
= 0 ; j
< i
; j
++)
2563 free_irq(pci_irq_vector(dev
->pdev
, j
),
2564 &(dev
->aac_msix
[j
]));
2565 pci_disable_msix(dev
->pdev
);
2570 dev
->aac_msix
[0].vector_no
= 0;
2571 dev
->aac_msix
[0].dev
= dev
;
2573 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
2574 IRQF_SHARED
, "aacraid",
2575 &(dev
->aac_msix
[0])) < 0) {
2577 pci_disable_msi(dev
->pdev
);
2578 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
2579 dev
->name
, dev
->id
);
2586 void aac_free_irq(struct aac_dev
*dev
)
2591 cpu
= cpumask_first(cpu_online_mask
);
2592 if (aac_is_src(dev
)) {
2593 if (dev
->max_msix
> 1) {
2594 for (i
= 0; i
< dev
->max_msix
; i
++)
2595 free_irq(pci_irq_vector(dev
->pdev
, i
),
2596 &(dev
->aac_msix
[i
]));
2598 free_irq(dev
->pdev
->irq
, &(dev
->aac_msix
[0]));
2601 free_irq(dev
->pdev
->irq
, dev
);
2604 pci_disable_msi(dev
->pdev
);
2605 else if (dev
->max_msix
> 1)
2606 pci_disable_msix(dev
->pdev
);