1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
16 * Abstract: Contain all routines that are required for FSA host/adapter
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/types.h>
24 #include <linux/sched.h>
25 #include <linux/pci.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/completion.h>
29 #include <linux/blkdev.h>
30 #include <linux/delay.h>
31 #include <linux/kthread.h>
32 #include <linux/interrupt.h>
33 #include <linux/bcd.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_cmnd.h>
42 * fib_map_alloc - allocate the fib objects
43 * @dev: Adapter to allocate for
45 * Allocate and map the shared PCI space for the FIB blocks used to
46 * talk to the Adaptec firmware.
49 static int fib_map_alloc(struct aac_dev
*dev
)
51 if (dev
->max_fib_size
> AAC_MAX_NATIVE_SIZE
)
52 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
54 dev
->max_cmd_size
= dev
->max_fib_size
;
55 if (dev
->max_fib_size
< AAC_MAX_NATIVE_SIZE
) {
56 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
58 dev
->max_cmd_size
= dev
->max_fib_size
;
62 "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
63 &dev
->pdev
->dev
, dev
->max_cmd_size
, dev
->scsi_host_ptr
->can_queue
,
64 AAC_NUM_MGT_FIB
, &dev
->hw_fib_pa
));
65 dev
->hw_fib_va
= dma_alloc_coherent(&dev
->pdev
->dev
,
66 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
))
67 * (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) + (ALIGN32
- 1),
68 &dev
->hw_fib_pa
, GFP_KERNEL
);
69 if (dev
->hw_fib_va
== NULL
)
75 * aac_fib_map_free - free the fib objects
76 * @dev: Adapter to free
78 * Free the PCI mappings and the memory allocated for FIB blocks
82 void aac_fib_map_free(struct aac_dev
*dev
)
88 if(!dev
->hw_fib_va
|| !dev
->max_cmd_size
)
91 num_fibs
= dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
92 fib_size
= dev
->max_fib_size
+ sizeof(struct aac_fib_xporthdr
);
93 alloc_size
= fib_size
* num_fibs
+ ALIGN32
- 1;
95 dma_free_coherent(&dev
->pdev
->dev
, alloc_size
, dev
->hw_fib_va
,
98 dev
->hw_fib_va
= NULL
;
102 void aac_fib_vector_assign(struct aac_dev
*dev
)
106 struct fib
*fibptr
= NULL
;
108 for (i
= 0, fibptr
= &dev
->fibs
[i
];
109 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
111 if ((dev
->max_msix
== 1) ||
112 (i
> ((dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1)
113 - dev
->vector_cap
))) {
114 fibptr
->vector_no
= 0;
116 fibptr
->vector_no
= vector
;
118 if (vector
== dev
->max_msix
)
125 * aac_fib_setup - setup the fibs
126 * @dev: Adapter to set up
128 * Allocate the PCI space for the fibs, map it and then initialise the
129 * fib area, the unmapped fib data and also the free list
132 int aac_fib_setup(struct aac_dev
* dev
)
135 struct hw_fib
*hw_fib
;
136 dma_addr_t hw_fib_pa
;
140 while (((i
= fib_map_alloc(dev
)) == -ENOMEM
)
141 && (dev
->scsi_host_ptr
->can_queue
> (64 - AAC_NUM_MGT_FIB
))) {
142 max_cmds
= (dev
->scsi_host_ptr
->can_queue
+AAC_NUM_MGT_FIB
) >> 1;
143 dev
->scsi_host_ptr
->can_queue
= max_cmds
- AAC_NUM_MGT_FIB
;
144 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE3
)
145 dev
->init
->r7
.max_io_commands
= cpu_to_le32(max_cmds
);
150 memset(dev
->hw_fib_va
, 0,
151 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
)) *
152 (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
));
154 /* 32 byte alignment for PMC */
155 hw_fib_pa
= (dev
->hw_fib_pa
+ (ALIGN32
- 1)) & ~(ALIGN32
- 1);
156 hw_fib
= (struct hw_fib
*)((unsigned char *)dev
->hw_fib_va
+
157 (hw_fib_pa
- dev
->hw_fib_pa
));
159 /* add Xport header */
160 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
161 sizeof(struct aac_fib_xporthdr
));
162 hw_fib_pa
+= sizeof(struct aac_fib_xporthdr
);
165 * Initialise the fibs
167 for (i
= 0, fibptr
= &dev
->fibs
[i
];
168 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
172 fibptr
->size
= sizeof(struct fib
);
174 fibptr
->hw_fib_va
= hw_fib
;
175 fibptr
->data
= (void *) fibptr
->hw_fib_va
->data
;
176 fibptr
->next
= fibptr
+1; /* Forward chain the fibs */
177 init_completion(&fibptr
->event_wait
);
178 spin_lock_init(&fibptr
->event_lock
);
179 hw_fib
->header
.XferState
= cpu_to_le32(0xffffffff);
180 hw_fib
->header
.SenderSize
=
181 cpu_to_le16(dev
->max_fib_size
); /* ?? max_cmd_size */
182 fibptr
->hw_fib_pa
= hw_fib_pa
;
183 fibptr
->hw_sgl_pa
= hw_fib_pa
+
184 offsetof(struct aac_hba_cmd_req
, sge
[2]);
186 * one element is for the ptr to the separate sg list,
187 * second element for 32 byte alignment
189 fibptr
->hw_error_pa
= hw_fib_pa
+
190 offsetof(struct aac_native_hba
, resp
.resp_bytes
[0]);
192 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
193 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
));
194 hw_fib_pa
= hw_fib_pa
+
195 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
);
199 *Assign vector numbers to fibs
201 aac_fib_vector_assign(dev
);
204 * Add the fib chain to the free list
206 dev
->fibs
[dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1].next
= NULL
;
208 * Set 8 fibs aside for management tools
210 dev
->free_fib
= &dev
->fibs
[dev
->scsi_host_ptr
->can_queue
];
215 * aac_fib_alloc_tag-allocate a fib using tags
216 * @dev: Adapter to allocate the fib for
218 * Allocate a fib from the adapter fib pool using tags
219 * from the blk layer.
222 struct fib
*aac_fib_alloc_tag(struct aac_dev
*dev
, struct scsi_cmnd
*scmd
)
226 fibptr
= &dev
->fibs
[scmd
->request
->tag
];
228 * Null out fields that depend on being zero at the start of
231 fibptr
->hw_fib_va
->header
.XferState
= 0;
232 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
233 fibptr
->callback_data
= NULL
;
234 fibptr
->callback
= NULL
;
241 * aac_fib_alloc - allocate a fib
242 * @dev: Adapter to allocate the fib for
244 * Allocate a fib from the adapter fib pool. If the pool is empty we
248 struct fib
*aac_fib_alloc(struct aac_dev
*dev
)
252 spin_lock_irqsave(&dev
->fib_lock
, flags
);
253 fibptr
= dev
->free_fib
;
255 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
258 dev
->free_fib
= fibptr
->next
;
259 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
261 * Set the proper node type code and node byte size
263 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
264 fibptr
->size
= sizeof(struct fib
);
266 * Null out fields that depend on being zero at the start of
269 fibptr
->hw_fib_va
->header
.XferState
= 0;
271 fibptr
->callback
= NULL
;
272 fibptr
->callback_data
= NULL
;
278 * aac_fib_free - free a fib
279 * @fibptr: fib to free up
281 * Frees up a fib and places it on the appropriate queue
284 void aac_fib_free(struct fib
*fibptr
)
288 if (fibptr
->done
== 2)
291 spin_lock_irqsave(&fibptr
->dev
->fib_lock
, flags
);
292 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
293 aac_config
.fib_timeouts
++;
294 if (!(fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) &&
295 fibptr
->hw_fib_va
->header
.XferState
!= 0) {
296 printk(KERN_WARNING
"aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
298 le32_to_cpu(fibptr
->hw_fib_va
->header
.XferState
));
300 fibptr
->next
= fibptr
->dev
->free_fib
;
301 fibptr
->dev
->free_fib
= fibptr
;
302 spin_unlock_irqrestore(&fibptr
->dev
->fib_lock
, flags
);
306 * aac_fib_init - initialise a fib
307 * @fibptr: The fib to initialize
309 * Set up the generic fib fields ready for use
312 void aac_fib_init(struct fib
*fibptr
)
314 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
316 memset(&hw_fib
->header
, 0, sizeof(struct aac_fibhdr
));
317 hw_fib
->header
.StructType
= FIB_MAGIC
;
318 hw_fib
->header
.Size
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
319 hw_fib
->header
.XferState
= cpu_to_le32(HostOwned
| FibInitialized
| FibEmpty
| FastResponseCapable
);
320 hw_fib
->header
.u
.ReceiverFibAddress
= cpu_to_le32(fibptr
->hw_fib_pa
);
321 hw_fib
->header
.SenderSize
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
325 * fib_deallocate - deallocate a fib
326 * @fibptr: fib to deallocate
328 * Will deallocate and return to the free pool the FIB pointed to by the
332 static void fib_dealloc(struct fib
* fibptr
)
334 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
335 hw_fib
->header
.XferState
= 0;
339 * Commuication primitives define and support the queuing method we use to
340 * support host to adapter commuication. All queue accesses happen through
341 * these routines and are the only routines which have a knowledge of the
342 * how these queues are implemented.
346 * aac_get_entry - get a queue entry
349 * @entry: Entry return
350 * @index: Index return
351 * @nonotify: notification control
353 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
354 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
358 static int aac_get_entry (struct aac_dev
* dev
, u32 qid
, struct aac_entry
**entry
, u32
* index
, unsigned long *nonotify
)
360 struct aac_queue
* q
;
364 * All of the queues wrap when they reach the end, so we check
365 * to see if they have reached the end and if they have we just
366 * set the index back to zero. This is a wrap. You could or off
367 * the high bits in all updates but this is a bit faster I think.
370 q
= &dev
->queues
->queue
[qid
];
372 idx
= *index
= le32_to_cpu(*(q
->headers
.producer
));
373 /* Interrupt Moderation, only interrupt for first two entries */
374 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
))) {
376 if (qid
== AdapNormCmdQueue
)
377 idx
= ADAP_NORM_CMD_ENTRIES
;
379 idx
= ADAP_NORM_RESP_ENTRIES
;
381 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
)))
385 if (qid
== AdapNormCmdQueue
) {
386 if (*index
>= ADAP_NORM_CMD_ENTRIES
)
387 *index
= 0; /* Wrap to front of the Producer Queue. */
389 if (*index
>= ADAP_NORM_RESP_ENTRIES
)
390 *index
= 0; /* Wrap to front of the Producer Queue. */
394 if ((*index
+ 1) == le32_to_cpu(*(q
->headers
.consumer
))) {
395 printk(KERN_WARNING
"Queue %d full, %u outstanding.\n",
396 qid
, atomic_read(&q
->numpending
));
399 *entry
= q
->base
+ *index
;
405 * aac_queue_get - get the next free QE
407 * @index: Returned index
408 * @priority: Priority of fib
409 * @fib: Fib to associate with the queue entry
410 * @wait: Wait if queue full
411 * @fibptr: Driver fib object to go with fib
412 * @nonotify: Don't notify the adapter
414 * Gets the next free QE off the requested priorty adapter command
415 * queue and associates the Fib with the QE. The QE represented by
416 * index is ready to insert on the queue when this routine returns
420 int aac_queue_get(struct aac_dev
* dev
, u32
* index
, u32 qid
, struct hw_fib
* hw_fib
, int wait
, struct fib
* fibptr
, unsigned long *nonotify
)
422 struct aac_entry
* entry
= NULL
;
425 if (qid
== AdapNormCmdQueue
) {
426 /* if no entries wait for some if caller wants to */
427 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
428 printk(KERN_ERR
"GetEntries failed\n");
431 * Setup queue entry with a command, status and fib mapped
433 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
436 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
437 /* if no entries wait for some if caller wants to */
440 * Setup queue entry with command, status and fib mapped
442 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
443 entry
->addr
= hw_fib
->header
.SenderFibAddress
;
444 /* Restore adapters pointer to the FIB */
445 hw_fib
->header
.u
.ReceiverFibAddress
= hw_fib
->header
.SenderFibAddress
; /* Let the adapter now where to find its data */
449 * If MapFib is true than we need to map the Fib and put pointers
450 * in the queue entry.
453 entry
->addr
= cpu_to_le32(fibptr
->hw_fib_pa
);
458 * Define the highest level of host to adapter communication routines.
459 * These routines will support host to adapter FS commuication. These
460 * routines have no knowledge of the commuication method used. This level
461 * sends and receives FIBs. This level has no knowledge of how these FIBs
462 * get passed back and forth.
466 * aac_fib_send - send a fib to the adapter
467 * @command: Command to send
469 * @size: Size of fib data area
470 * @priority: Priority of Fib
471 * @wait: Async/sync select
472 * @reply: True if a reply is wanted
473 * @callback: Called with reply
474 * @callback_data: Passed to callback
476 * Sends the requested FIB to the adapter and optionally will wait for a
477 * response FIB. If the caller does not wish to wait for a response than
478 * an event to wait on must be supplied. This event will be set when a
479 * response FIB is received from the adapter.
482 int aac_fib_send(u16 command
, struct fib
*fibptr
, unsigned long size
,
483 int priority
, int wait
, int reply
, fib_callback callback
,
486 struct aac_dev
* dev
= fibptr
->dev
;
487 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
488 unsigned long flags
= 0;
489 unsigned long mflags
= 0;
490 unsigned long sflags
= 0;
492 if (!(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)))
495 if (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
))
499 * There are 5 cases with the wait and response requested flags.
500 * The only invalid cases are if the caller requests to wait and
501 * does not request a response and if the caller does not want a
502 * response and the Fib is not allocated from pool. If a response
503 * is not requested the Fib will just be deallocaed by the DPC
504 * routine when the response comes back from the adapter. No
505 * further processing will be done besides deleting the Fib. We
506 * will have a debug mode where the adapter can notify the host
507 * it had a problem and the host can log that fact.
510 if (wait
&& !reply
) {
512 } else if (!wait
&& reply
) {
513 hw_fib
->header
.XferState
|= cpu_to_le32(Async
| ResponseExpected
);
514 FIB_COUNTER_INCREMENT(aac_config
.AsyncSent
);
515 } else if (!wait
&& !reply
) {
516 hw_fib
->header
.XferState
|= cpu_to_le32(NoResponseExpected
);
517 FIB_COUNTER_INCREMENT(aac_config
.NoResponseSent
);
518 } else if (wait
&& reply
) {
519 hw_fib
->header
.XferState
|= cpu_to_le32(ResponseExpected
);
520 FIB_COUNTER_INCREMENT(aac_config
.NormalSent
);
523 * Map the fib into 32bits by using the fib number
526 hw_fib
->header
.SenderFibAddress
=
527 cpu_to_le32(((u32
)(fibptr
- dev
->fibs
)) << 2);
529 /* use the same shifted value for handle to be compatible
530 * with the new native hba command handle
532 hw_fib
->header
.Handle
=
533 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
536 * Set FIB state to indicate where it came from and if we want a
537 * response from the adapter. Also load the command from the
540 * Map the hw fib pointer as a 32bit value
542 hw_fib
->header
.Command
= cpu_to_le16(command
);
543 hw_fib
->header
.XferState
|= cpu_to_le32(SentFromHost
);
545 * Set the size of the Fib we want to send to the adapter
547 hw_fib
->header
.Size
= cpu_to_le16(sizeof(struct aac_fibhdr
) + size
);
548 if (le16_to_cpu(hw_fib
->header
.Size
) > le16_to_cpu(hw_fib
->header
.SenderSize
)) {
552 * Get a queue entry connect the FIB to it and send an notify
553 * the adapter a command is ready.
555 hw_fib
->header
.XferState
|= cpu_to_le32(NormalPriority
);
558 * Fill in the Callback and CallbackContext if we are not
562 fibptr
->callback
= callback
;
563 fibptr
->callback_data
= callback_data
;
564 fibptr
->flags
= FIB_CONTEXT_FLAG
;
569 FIB_COUNTER_INCREMENT(aac_config
.FibsSent
);
571 dprintk((KERN_DEBUG
"Fib contents:.\n"));
572 dprintk((KERN_DEBUG
" Command = %d.\n", le32_to_cpu(hw_fib
->header
.Command
)));
573 dprintk((KERN_DEBUG
" SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount
*)fib_data(fibptr
))->command
)));
574 dprintk((KERN_DEBUG
" XferState = %x.\n", le32_to_cpu(hw_fib
->header
.XferState
)));
575 dprintk((KERN_DEBUG
" hw_fib va being sent=%p\n",fibptr
->hw_fib_va
));
576 dprintk((KERN_DEBUG
" hw_fib pa being sent=%lx\n",(ulong
)fibptr
->hw_fib_pa
));
577 dprintk((KERN_DEBUG
" fib being sent=%p\n",fibptr
));
584 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
585 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
586 printk(KERN_INFO
"No management Fibs Available:%d\n",
587 dev
->management_fib_count
);
588 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
591 dev
->management_fib_count
++;
592 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
593 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
596 if (dev
->sync_mode
) {
598 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
599 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
601 list_add_tail(&fibptr
->fiblink
, &dev
->sync_fib_list
);
602 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
604 dev
->sync_fib
= fibptr
;
605 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
606 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
607 (u32
)fibptr
->hw_fib_pa
, 0, 0, 0, 0, 0,
608 NULL
, NULL
, NULL
, NULL
, NULL
);
611 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
612 if (wait_for_completion_interruptible(&fibptr
->event_wait
)) {
613 fibptr
->flags
&= ~FIB_CONTEXT_FLAG_WAIT
;
621 if (aac_adapter_deliver(fibptr
) != 0) {
622 printk(KERN_ERR
"aac_fib_send: returned -EBUSY\n");
624 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
625 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
626 dev
->management_fib_count
--;
627 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
634 * If the caller wanted us to wait for response wait now.
638 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
639 /* Only set for first known interruptable command */
642 * *VERY* Dangerous to time out a command, the
643 * assumption is made that we have no hope of
644 * functioning because an interrupt routing or other
645 * hardware failure has occurred.
647 unsigned long timeout
= jiffies
+ (180 * HZ
); /* 3 minutes */
648 while (!try_wait_for_completion(&fibptr
->event_wait
)) {
650 if (time_is_before_eq_jiffies(timeout
)) {
651 struct aac_queue
* q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
652 atomic_dec(&q
->numpending
);
654 printk(KERN_ERR
"aacraid: aac_fib_send: first asynchronous command timed out.\n"
655 "Usually a result of a PCI interrupt routing problem;\n"
656 "update mother board BIOS or consider utilizing one of\n"
657 "the SAFE mode kernel options (acpi, apic etc)\n");
662 if (unlikely(aac_pci_offline(dev
)))
665 if ((blink
= aac_adapter_check_health(dev
)) > 0) {
667 printk(KERN_ERR
"aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
668 "Usually a result of a serious unrecoverable hardware problem\n",
674 * Allow other processes / CPUS to use core
678 } else if (wait_for_completion_interruptible(&fibptr
->event_wait
)) {
679 /* Do nothing ... satisfy
680 * wait_for_completion_interruptible must_check */
683 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
684 if (fibptr
->done
== 0) {
685 fibptr
->done
= 2; /* Tell interrupt we aborted */
686 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
689 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
690 BUG_ON(fibptr
->done
== 0);
692 if(unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
697 * If the user does not want a response than return success otherwise
706 int aac_hba_send(u8 command
, struct fib
*fibptr
, fib_callback callback
,
709 struct aac_dev
*dev
= fibptr
->dev
;
711 unsigned long flags
= 0;
712 unsigned long mflags
= 0;
713 struct aac_hba_cmd_req
*hbacmd
= (struct aac_hba_cmd_req
*)
716 fibptr
->flags
= (FIB_CONTEXT_FLAG
| FIB_CONTEXT_FLAG_NATIVE_HBA
);
719 fibptr
->callback
= callback
;
720 fibptr
->callback_data
= callback_data
;
725 hbacmd
->iu_type
= command
;
727 if (command
== HBA_IU_TYPE_SCSI_CMD_REQ
) {
728 /* bit1 of request_id must be 0 */
730 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
731 fibptr
->flags
|= FIB_CONTEXT_FLAG_SCSI_CMD
;
732 } else if (command
!= HBA_IU_TYPE_SCSI_TM_REQ
)
737 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
738 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
739 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
742 dev
->management_fib_count
++;
743 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
744 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
747 if (aac_adapter_deliver(fibptr
) != 0) {
749 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
750 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
751 dev
->management_fib_count
--;
752 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
756 FIB_COUNTER_INCREMENT(aac_config
.NativeSent
);
760 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
762 if (unlikely(aac_pci_offline(dev
)))
765 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
766 if (wait_for_completion_interruptible(&fibptr
->event_wait
))
768 fibptr
->flags
&= ~(FIB_CONTEXT_FLAG_WAIT
);
770 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
771 if ((fibptr
->done
== 0) || (fibptr
->done
== 2)) {
772 fibptr
->done
= 2; /* Tell interrupt we aborted */
773 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
776 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
777 WARN_ON(fibptr
->done
== 0);
779 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
789 * aac_consumer_get - get the top of the queue
792 * @entry: Return entry
794 * Will return a pointer to the entry on the top of the queue requested that
795 * we are a consumer of, and return the address of the queue entry. It does
796 * not change the state of the queue.
799 int aac_consumer_get(struct aac_dev
* dev
, struct aac_queue
* q
, struct aac_entry
**entry
)
803 if (le32_to_cpu(*q
->headers
.producer
) == le32_to_cpu(*q
->headers
.consumer
)) {
807 * The consumer index must be wrapped if we have reached
808 * the end of the queue, else we just use the entry
809 * pointed to by the header index
811 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
814 index
= le32_to_cpu(*q
->headers
.consumer
);
815 *entry
= q
->base
+ index
;
822 * aac_consumer_free - free consumer entry
827 * Frees up the current top of the queue we are a consumer of. If the
828 * queue was full notify the producer that the queue is no longer full.
831 void aac_consumer_free(struct aac_dev
* dev
, struct aac_queue
*q
, u32 qid
)
836 if ((le32_to_cpu(*q
->headers
.producer
)+1) == le32_to_cpu(*q
->headers
.consumer
))
839 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
840 *q
->headers
.consumer
= cpu_to_le32(1);
842 le32_add_cpu(q
->headers
.consumer
, 1);
847 case HostNormCmdQueue
:
848 notify
= HostNormCmdNotFull
;
850 case HostNormRespQueue
:
851 notify
= HostNormRespNotFull
;
857 aac_adapter_notify(dev
, notify
);
862 * aac_fib_adapter_complete - complete adapter issued fib
863 * @fibptr: fib to complete
866 * Will do all necessary work to complete a FIB that was sent from
870 int aac_fib_adapter_complete(struct fib
*fibptr
, unsigned short size
)
872 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
873 struct aac_dev
* dev
= fibptr
->dev
;
874 struct aac_queue
* q
;
875 unsigned long nointr
= 0;
876 unsigned long qflags
;
878 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
879 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
||
880 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
885 if (hw_fib
->header
.XferState
== 0) {
886 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
891 * If we plan to do anything check the structure type first.
893 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
894 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
895 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
) {
896 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
901 * This block handles the case where the adapter had sent us a
902 * command and we have finished processing the command. We
903 * call completeFib when we are done processing the command
904 * and want to send a response back to the adapter. This will
905 * send the completed cdb to the adapter.
907 if (hw_fib
->header
.XferState
& cpu_to_le32(SentFromAdapter
)) {
908 if (dev
->comm_interface
== AAC_COMM_MESSAGE
) {
912 hw_fib
->header
.XferState
|= cpu_to_le32(HostProcessed
);
914 size
+= sizeof(struct aac_fibhdr
);
915 if (size
> le16_to_cpu(hw_fib
->header
.SenderSize
))
917 hw_fib
->header
.Size
= cpu_to_le16(size
);
919 q
= &dev
->queues
->queue
[AdapNormRespQueue
];
920 spin_lock_irqsave(q
->lock
, qflags
);
921 aac_queue_get(dev
, &index
, AdapNormRespQueue
, hw_fib
, 1, NULL
, &nointr
);
922 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
923 spin_unlock_irqrestore(q
->lock
, qflags
);
924 if (!(nointr
& (int)aac_config
.irq_mod
))
925 aac_adapter_notify(dev
, AdapNormRespQueue
);
928 printk(KERN_WARNING
"aac_fib_adapter_complete: "
929 "Unknown xferstate detected.\n");
936 * aac_fib_complete - fib completion handler
937 * @fib: FIB to complete
939 * Will do all necessary work to complete a FIB.
942 int aac_fib_complete(struct fib
*fibptr
)
944 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
946 if (fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) {
952 * Check for a fib which has already been completed or with a
953 * status wait timeout
956 if (hw_fib
->header
.XferState
== 0 || fibptr
->done
== 2)
959 * If we plan to do anything check the structure type first.
962 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
963 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
964 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
)
967 * This block completes a cdb which orginated on the host and we
968 * just need to deallocate the cdb or reinit it. At this point the
969 * command is complete that we had sent to the adapter and this
970 * cdb could be reused.
973 if((hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
)) &&
974 (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
)))
978 else if(hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
))
981 * This handles the case when the host has aborted the I/O
982 * to the adapter because the adapter is not responding
985 } else if(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)) {
994 * aac_printf - handle printf from firmware
998 * Print a message passed to us by the controller firmware on the
1002 void aac_printf(struct aac_dev
*dev
, u32 val
)
1004 char *cp
= dev
->printfbuf
;
1005 if (dev
->printf_enabled
)
1007 int length
= val
& 0xffff;
1008 int level
= (val
>> 16) & 0xffff;
1011 * The size of the printfbuf is set in port.c
1012 * There is no variable or define for it
1016 if (cp
[length
] != 0)
1018 if (level
== LOG_AAC_HIGH_ERROR
)
1019 printk(KERN_WARNING
"%s:%s", dev
->name
, cp
);
1021 printk(KERN_INFO
"%s:%s", dev
->name
, cp
);
1026 static inline int aac_aif_data(struct aac_aifcmd
*aifcmd
, uint32_t index
)
1028 return le32_to_cpu(((__le32
*)aifcmd
->data
)[index
]);
1032 static void aac_handle_aif_bu(struct aac_dev
*dev
, struct aac_aifcmd
*aifcmd
)
1034 switch (aac_aif_data(aifcmd
, 1)) {
1035 case AifBuCacheDataLoss
:
1036 if (aac_aif_data(aifcmd
, 2))
1037 dev_info(&dev
->pdev
->dev
, "Backup unit had cache data loss - [%d]\n",
1038 aac_aif_data(aifcmd
, 2));
1040 dev_info(&dev
->pdev
->dev
, "Backup Unit had cache data loss\n");
1042 case AifBuCacheDataRecover
:
1043 if (aac_aif_data(aifcmd
, 2))
1044 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully - [%d]\n",
1045 aac_aif_data(aifcmd
, 2));
1047 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully\n");
1053 * aac_handle_aif - Handle a message from the firmware
1054 * @dev: Which adapter this fib is from
1055 * @fibptr: Pointer to fibptr from adapter
1057 * This routine handles a driver notify fib from the adapter and
1058 * dispatches it to the appropriate routine for handling.
1061 #define AIF_SNIFF_TIMEOUT (500*HZ)
1062 static void aac_handle_aif(struct aac_dev
* dev
, struct fib
* fibptr
)
1064 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
1065 struct aac_aifcmd
* aifcmd
= (struct aac_aifcmd
*)hw_fib
->data
;
1066 u32 channel
, id
, lun
, container
;
1067 struct scsi_device
*device
;
1073 } device_config_needed
= NOTHING
;
1075 /* Sniff for container changes */
1077 if (!dev
|| !dev
->fsa_dev
)
1079 container
= channel
= id
= lun
= (u32
)-1;
1082 * We have set this up to try and minimize the number of
1083 * re-configures that take place. As a result of this when
1084 * certain AIF's come in we will set a flag waiting for another
1085 * type of AIF before setting the re-config flag.
1087 switch (le32_to_cpu(aifcmd
->command
)) {
1088 case AifCmdDriverNotify
:
1089 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1090 case AifRawDeviceRemove
:
1091 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1092 if ((container
>> 28)) {
1093 container
= (u32
)-1;
1096 channel
= (container
>> 24) & 0xF;
1097 if (channel
>= dev
->maximum_num_channels
) {
1098 container
= (u32
)-1;
1101 id
= container
& 0xFFFF;
1102 if (id
>= dev
->maximum_num_physicals
) {
1103 container
= (u32
)-1;
1106 lun
= (container
>> 16) & 0xFF;
1107 container
= (u32
)-1;
1108 channel
= aac_phys_to_logical(channel
);
1109 device_config_needed
= DELETE
;
1113 * Morph or Expand complete
1115 case AifDenMorphComplete
:
1116 case AifDenVolumeExtendComplete
:
1117 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1118 if (container
>= dev
->maximum_num_containers
)
1122 * Find the scsi_device associated with the SCSI
1123 * address. Make sure we have the right array, and if
1124 * so set the flag to initiate a new re-config once we
1125 * see an AifEnConfigChange AIF come through.
1128 if ((dev
!= NULL
) && (dev
->scsi_host_ptr
!= NULL
)) {
1129 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1130 CONTAINER_TO_CHANNEL(container
),
1131 CONTAINER_TO_ID(container
),
1132 CONTAINER_TO_LUN(container
));
1134 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1135 dev
->fsa_dev
[container
].config_waiting_on
= AifEnConfigChange
;
1136 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1137 scsi_device_put(device
);
1143 * If we are waiting on something and this happens to be
1144 * that thing then set the re-configure flag.
1146 if (container
!= (u32
)-1) {
1147 if (container
>= dev
->maximum_num_containers
)
1149 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1150 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1151 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1152 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1153 } else for (container
= 0;
1154 container
< dev
->maximum_num_containers
; ++container
) {
1155 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1156 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1157 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1158 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1162 case AifCmdEventNotify
:
1163 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1164 case AifEnBatteryEvent
:
1165 dev
->cache_protected
=
1166 (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(3));
1171 case AifEnAddContainer
:
1172 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1173 if (container
>= dev
->maximum_num_containers
)
1175 dev
->fsa_dev
[container
].config_needed
= ADD
;
1176 dev
->fsa_dev
[container
].config_waiting_on
=
1178 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1184 case AifEnDeleteContainer
:
1185 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1186 if (container
>= dev
->maximum_num_containers
)
1188 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1189 dev
->fsa_dev
[container
].config_waiting_on
=
1191 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1195 * Container change detected. If we currently are not
1196 * waiting on something else, setup to wait on a Config Change.
1198 case AifEnContainerChange
:
1199 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1200 if (container
>= dev
->maximum_num_containers
)
1202 if (dev
->fsa_dev
[container
].config_waiting_on
&&
1203 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1205 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1206 dev
->fsa_dev
[container
].config_waiting_on
=
1208 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1211 case AifEnConfigChange
:
1215 case AifEnDeleteJBOD
:
1216 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1217 if ((container
>> 28)) {
1218 container
= (u32
)-1;
1221 channel
= (container
>> 24) & 0xF;
1222 if (channel
>= dev
->maximum_num_channels
) {
1223 container
= (u32
)-1;
1226 id
= container
& 0xFFFF;
1227 if (id
>= dev
->maximum_num_physicals
) {
1228 container
= (u32
)-1;
1231 lun
= (container
>> 16) & 0xFF;
1232 container
= (u32
)-1;
1233 channel
= aac_phys_to_logical(channel
);
1234 device_config_needed
=
1235 (((__le32
*)aifcmd
->data
)[0] ==
1236 cpu_to_le32(AifEnAddJBOD
)) ? ADD
: DELETE
;
1237 if (device_config_needed
== ADD
) {
1238 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1243 scsi_remove_device(device
);
1244 scsi_device_put(device
);
1249 case AifEnEnclosureManagement
:
1251 * If in JBOD mode, automatic exposure of new
1252 * physical target to be suppressed until configured.
1256 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[3])) {
1257 case EM_DRIVE_INSERTION
:
1258 case EM_DRIVE_REMOVAL
:
1259 case EM_SES_DRIVE_INSERTION
:
1260 case EM_SES_DRIVE_REMOVAL
:
1261 container
= le32_to_cpu(
1262 ((__le32
*)aifcmd
->data
)[2]);
1263 if ((container
>> 28)) {
1264 container
= (u32
)-1;
1267 channel
= (container
>> 24) & 0xF;
1268 if (channel
>= dev
->maximum_num_channels
) {
1269 container
= (u32
)-1;
1272 id
= container
& 0xFFFF;
1273 lun
= (container
>> 16) & 0xFF;
1274 container
= (u32
)-1;
1275 if (id
>= dev
->maximum_num_physicals
) {
1276 /* legacy dev_t ? */
1277 if ((0x2000 <= id
) || lun
|| channel
||
1278 ((channel
= (id
>> 7) & 0x3F) >=
1279 dev
->maximum_num_channels
))
1281 lun
= (id
>> 4) & 7;
1284 channel
= aac_phys_to_logical(channel
);
1285 device_config_needed
=
1286 ((((__le32
*)aifcmd
->data
)[3]
1287 == cpu_to_le32(EM_DRIVE_INSERTION
)) ||
1288 (((__le32
*)aifcmd
->data
)[3]
1289 == cpu_to_le32(EM_SES_DRIVE_INSERTION
))) ?
1294 case AifBuManagerEvent
:
1295 aac_handle_aif_bu(dev
, aifcmd
);
1300 * If we are waiting on something and this happens to be
1301 * that thing then set the re-configure flag.
1303 if (container
!= (u32
)-1) {
1304 if (container
>= dev
->maximum_num_containers
)
1306 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1307 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1308 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1309 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1310 } else for (container
= 0;
1311 container
< dev
->maximum_num_containers
; ++container
) {
1312 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1313 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1314 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1315 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1319 case AifCmdJobProgress
:
1321 * These are job progress AIF's. When a Clear is being
1322 * done on a container it is initially created then hidden from
1323 * the OS. When the clear completes we don't get a config
1324 * change so we monitor the job status complete on a clear then
1325 * wait for a container change.
1328 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1329 (((__le32
*)aifcmd
->data
)[6] == ((__le32
*)aifcmd
->data
)[5] ||
1330 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsSuccess
))) {
1332 container
< dev
->maximum_num_containers
;
1335 * Stomp on all config sequencing for all
1338 dev
->fsa_dev
[container
].config_waiting_on
=
1339 AifEnContainerChange
;
1340 dev
->fsa_dev
[container
].config_needed
= ADD
;
1341 dev
->fsa_dev
[container
].config_waiting_stamp
=
1345 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1346 ((__le32
*)aifcmd
->data
)[6] == 0 &&
1347 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsRunning
)) {
1349 container
< dev
->maximum_num_containers
;
1352 * Stomp on all config sequencing for all
1355 dev
->fsa_dev
[container
].config_waiting_on
=
1356 AifEnContainerChange
;
1357 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1358 dev
->fsa_dev
[container
].config_waiting_stamp
=
1367 if (device_config_needed
== NOTHING
) {
1368 for (; container
< dev
->maximum_num_containers
; ++container
) {
1369 if ((dev
->fsa_dev
[container
].config_waiting_on
== 0) &&
1370 (dev
->fsa_dev
[container
].config_needed
!= NOTHING
) &&
1371 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
)) {
1372 device_config_needed
=
1373 dev
->fsa_dev
[container
].config_needed
;
1374 dev
->fsa_dev
[container
].config_needed
= NOTHING
;
1375 channel
= CONTAINER_TO_CHANNEL(container
);
1376 id
= CONTAINER_TO_ID(container
);
1377 lun
= CONTAINER_TO_LUN(container
);
1382 if (device_config_needed
== NOTHING
)
1386 * If we decided that a re-configuration needs to be done,
1387 * schedule it here on the way out the door, please close the door
1392 * Find the scsi_device associated with the SCSI address,
1393 * and mark it as changed, invalidating the cache. This deals
1394 * with changes to existing device IDs.
1397 if (!dev
|| !dev
->scsi_host_ptr
)
1400 * force reload of disk info via aac_probe_container
1402 if ((channel
== CONTAINER_CHANNEL
) &&
1403 (device_config_needed
!= NOTHING
)) {
1404 if (dev
->fsa_dev
[container
].valid
== 1)
1405 dev
->fsa_dev
[container
].valid
= 2;
1406 aac_probe_container(dev
, container
);
1408 device
= scsi_device_lookup(dev
->scsi_host_ptr
, channel
, id
, lun
);
1410 switch (device_config_needed
) {
1412 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1413 scsi_remove_device(device
);
1415 if (scsi_device_online(device
)) {
1416 scsi_device_set_state(device
, SDEV_OFFLINE
);
1417 sdev_printk(KERN_INFO
, device
,
1418 "Device offlined - %s\n",
1419 (channel
== CONTAINER_CHANNEL
) ?
1421 "enclosure services event");
1426 if (!scsi_device_online(device
)) {
1427 sdev_printk(KERN_INFO
, device
,
1428 "Device online - %s\n",
1429 (channel
== CONTAINER_CHANNEL
) ?
1431 "enclosure services event");
1432 scsi_device_set_state(device
, SDEV_RUNNING
);
1436 if ((channel
== CONTAINER_CHANNEL
)
1437 && (!dev
->fsa_dev
[container
].valid
)) {
1438 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1439 scsi_remove_device(device
);
1441 if (!scsi_device_online(device
))
1443 scsi_device_set_state(device
, SDEV_OFFLINE
);
1444 sdev_printk(KERN_INFO
, device
,
1445 "Device offlined - %s\n",
1450 scsi_rescan_device(&device
->sdev_gendev
);
1455 scsi_device_put(device
);
1456 device_config_needed
= NOTHING
;
1458 if (device_config_needed
== ADD
)
1459 scsi_add_device(dev
->scsi_host_ptr
, channel
, id
, lun
);
1460 if (channel
== CONTAINER_CHANNEL
) {
1462 device_config_needed
= NOTHING
;
1467 static void aac_schedule_bus_scan(struct aac_dev
*aac
)
1469 if (aac
->sa_firmware
)
1470 aac_schedule_safw_scan_worker(aac
);
1472 aac_schedule_src_reinit_aif_worker(aac
);
1475 static int _aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1479 struct Scsi_Host
*host
;
1480 struct scsi_device
*dev
;
1481 struct scsi_cmnd
*command
;
1482 struct scsi_cmnd
*command_list
;
1486 int num_of_fibs
= 0;
1490 * - host is locked, unless called by the aacraid thread.
1491 * (a matter of convenience, due to legacy issues surrounding
1492 * eh_host_adapter_reset).
1493 * - in_reset is asserted, so no new i/o is getting to the
1495 * - The card is dead, or will be very shortly ;-/ so no new
1496 * commands are completing in the interrupt service.
1498 host
= aac
->scsi_host_ptr
;
1499 scsi_block_requests(host
);
1500 aac_adapter_disable_int(aac
);
1501 if (aac
->thread
&& aac
->thread
->pid
!= current
->pid
) {
1502 spin_unlock_irq(host
->host_lock
);
1503 kthread_stop(aac
->thread
);
1509 * If a positive health, means in a known DEAD PANIC
1510 * state and the adapter could be reset to `try again'.
1512 bled
= forced
? 0 : aac_adapter_check_health(aac
);
1513 retval
= aac_adapter_restart(aac
, bled
, reset_type
);
1519 * Loop through the fibs, close the synchronous FIBS
1522 num_of_fibs
= aac
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
1523 for (index
= 0; index
< num_of_fibs
; index
++) {
1525 struct fib
*fib
= &aac
->fibs
[index
];
1526 __le32 XferState
= fib
->hw_fib_va
->header
.XferState
;
1527 bool is_response_expected
= false;
1529 if (!(XferState
& cpu_to_le32(NoResponseExpected
| Async
)) &&
1530 (XferState
& cpu_to_le32(ResponseExpected
)))
1531 is_response_expected
= true;
1533 if (is_response_expected
1534 || fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
1535 unsigned long flagv
;
1536 spin_lock_irqsave(&fib
->event_lock
, flagv
);
1537 complete(&fib
->event_wait
);
1538 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
1543 /* Give some extra time for ioctls to complete. */
1546 index
= aac
->cardtype
;
1549 * Re-initialize the adapter, first free resources, then carefully
1550 * apply the initialization sequence to come back again. Only risk
1551 * is a change in Firmware dropping cache, it is assumed the caller
1552 * will ensure that i/o is queisced and the card is flushed in that
1556 aac_fib_map_free(aac
);
1557 dma_free_coherent(&aac
->pdev
->dev
, aac
->comm_size
, aac
->comm_addr
,
1559 aac
->comm_addr
= NULL
;
1563 kfree(aac
->fsa_dev
);
1564 aac
->fsa_dev
= NULL
;
1566 dmamask
= DMA_BIT_MASK(32);
1567 quirks
= aac_get_driver_ident(index
)->quirks
;
1568 if (quirks
& AAC_QUIRK_31BIT
)
1569 retval
= pci_set_dma_mask(aac
->pdev
, dmamask
);
1570 else if (!(quirks
& AAC_QUIRK_SRC
))
1571 retval
= pci_set_dma_mask(aac
->pdev
, dmamask
);
1573 retval
= pci_set_consistent_dma_mask(aac
->pdev
, dmamask
);
1575 if (quirks
& AAC_QUIRK_31BIT
&& !retval
) {
1576 dmamask
= DMA_BIT_MASK(31);
1577 retval
= pci_set_consistent_dma_mask(aac
->pdev
, dmamask
);
1583 if ((retval
= (*(aac_get_driver_ident(index
)->init
))(aac
)))
1587 aac
->thread
= kthread_run(aac_command_thread
, aac
, "%s",
1589 if (IS_ERR(aac
->thread
)) {
1590 retval
= PTR_ERR(aac
->thread
);
1595 (void)aac_get_adapter_info(aac
);
1596 if ((quirks
& AAC_QUIRK_34SG
) && (host
->sg_tablesize
> 34)) {
1597 host
->sg_tablesize
= 34;
1598 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1600 if ((quirks
& AAC_QUIRK_17SG
) && (host
->sg_tablesize
> 17)) {
1601 host
->sg_tablesize
= 17;
1602 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1604 aac_get_config_status(aac
, 1);
1605 aac_get_containers(aac
);
1607 * This is where the assumption that the Adapter is quiesced
1610 command_list
= NULL
;
1611 __shost_for_each_device(dev
, host
) {
1612 unsigned long flags
;
1613 spin_lock_irqsave(&dev
->list_lock
, flags
);
1614 list_for_each_entry(command
, &dev
->cmd_list
, list
)
1615 if (command
->SCp
.phase
== AAC_OWNER_FIRMWARE
) {
1616 command
->SCp
.buffer
= (struct scatterlist
*)command_list
;
1617 command_list
= command
;
1619 spin_unlock_irqrestore(&dev
->list_lock
, flags
);
1621 while ((command
= command_list
)) {
1622 command_list
= (struct scsi_cmnd
*)command
->SCp
.buffer
;
1623 command
->SCp
.buffer
= NULL
;
1624 command
->result
= DID_OK
<< 16
1625 | COMMAND_COMPLETE
<< 8
1626 | SAM_STAT_TASK_SET_FULL
;
1627 command
->SCp
.phase
= AAC_OWNER_ERROR_HANDLER
;
1628 command
->scsi_done(command
);
1631 * Any Device that was already marked offline needs to be marked
1634 __shost_for_each_device(dev
, host
) {
1635 if (!scsi_device_online(dev
))
1636 scsi_device_set_state(dev
, SDEV_RUNNING
);
1642 scsi_unblock_requests(host
);
1645 * Issue bus rescan to catch any configuration that might have
1648 if (!retval
&& !is_kdump_kernel()) {
1649 dev_info(&aac
->pdev
->dev
, "Scheduling bus rescan\n");
1650 aac_schedule_bus_scan(aac
);
1654 spin_lock_irq(host
->host_lock
);
1659 int aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1661 unsigned long flagv
= 0;
1663 struct Scsi_Host
* host
;
1666 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1669 if (aac
->in_reset
) {
1670 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1674 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1677 * Wait for all commands to complete to this specific
1678 * target (block maximum 60 seconds). Although not necessary,
1679 * it does make us a good storage citizen.
1681 host
= aac
->scsi_host_ptr
;
1682 scsi_block_requests(host
);
1684 /* Quiesce build, flush cache, write through mode */
1686 aac_send_shutdown(aac
);
1687 spin_lock_irqsave(host
->host_lock
, flagv
);
1688 bled
= forced
? forced
:
1689 (aac_check_reset
!= 0 && aac_check_reset
!= 1);
1690 retval
= _aac_reset_adapter(aac
, bled
, reset_type
);
1691 spin_unlock_irqrestore(host
->host_lock
, flagv
);
1693 if ((forced
< 2) && (retval
== -ENODEV
)) {
1694 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1695 struct fib
* fibctx
= aac_fib_alloc(aac
);
1697 struct aac_pause
*cmd
;
1700 aac_fib_init(fibctx
);
1702 cmd
= (struct aac_pause
*) fib_data(fibctx
);
1704 cmd
->command
= cpu_to_le32(VM_ContainerConfig
);
1705 cmd
->type
= cpu_to_le32(CT_PAUSE_IO
);
1706 cmd
->timeout
= cpu_to_le32(1);
1707 cmd
->min
= cpu_to_le32(1);
1708 cmd
->noRescan
= cpu_to_le32(1);
1709 cmd
->count
= cpu_to_le32(0);
1711 status
= aac_fib_send(ContainerCommand
,
1713 sizeof(struct aac_pause
),
1715 -2 /* Timeout silently */, 1,
1719 aac_fib_complete(fibctx
);
1720 /* FIB should be freed only after getting
1721 * the response from the F/W */
1722 if (status
!= -ERESTARTSYS
)
1723 aac_fib_free(fibctx
);
1730 int aac_check_health(struct aac_dev
* aac
)
1733 unsigned long time_now
, flagv
= 0;
1734 struct list_head
* entry
;
1736 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1737 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1740 if (aac
->in_reset
|| !(BlinkLED
= aac_adapter_check_health(aac
))) {
1741 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1748 * aac_aifcmd.command = AifCmdEventNotify = 1
1749 * aac_aifcmd.seqnum = 0xFFFFFFFF
1750 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1751 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1752 * aac.aifcmd.data[2] = AifHighPriority = 3
1753 * aac.aifcmd.data[3] = BlinkLED
1756 time_now
= jiffies
/HZ
;
1757 entry
= aac
->fib_list
.next
;
1760 * For each Context that is on the
1761 * fibctxList, make a copy of the
1762 * fib, and then set the event to wake up the
1763 * thread that is waiting for it.
1765 while (entry
!= &aac
->fib_list
) {
1767 * Extract the fibctx
1769 struct aac_fib_context
*fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
1770 struct hw_fib
* hw_fib
;
1773 * Check if the queue is getting
1776 if (fibctx
->count
> 20) {
1778 * It's *not* jiffies folks,
1779 * but jiffies / HZ, so do not
1782 u32 time_last
= fibctx
->jiffies
;
1784 * Has it been > 2 minutes
1785 * since the last read off
1788 if ((time_now
- time_last
) > aif_timeout
) {
1789 entry
= entry
->next
;
1790 aac_close_fib_context(aac
, fibctx
);
1795 * Warning: no sleep allowed while
1798 hw_fib
= kzalloc(sizeof(struct hw_fib
), GFP_ATOMIC
);
1799 fib
= kzalloc(sizeof(struct fib
), GFP_ATOMIC
);
1800 if (fib
&& hw_fib
) {
1801 struct aac_aifcmd
* aif
;
1803 fib
->hw_fib_va
= hw_fib
;
1806 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
1807 fib
->size
= sizeof (struct fib
);
1808 fib
->data
= hw_fib
->data
;
1809 aif
= (struct aac_aifcmd
*)hw_fib
->data
;
1810 aif
->command
= cpu_to_le32(AifCmdEventNotify
);
1811 aif
->seqnum
= cpu_to_le32(0xFFFFFFFF);
1812 ((__le32
*)aif
->data
)[0] = cpu_to_le32(AifEnExpEvent
);
1813 ((__le32
*)aif
->data
)[1] = cpu_to_le32(AifExeFirmwarePanic
);
1814 ((__le32
*)aif
->data
)[2] = cpu_to_le32(AifHighPriority
);
1815 ((__le32
*)aif
->data
)[3] = cpu_to_le32(BlinkLED
);
1818 * Put the FIB onto the
1821 list_add_tail(&fib
->fiblink
, &fibctx
->fib_list
);
1824 * Set the event to wake up the
1825 * thread that will waiting.
1827 complete(&fibctx
->completion
);
1829 printk(KERN_WARNING
"aifd: didn't allocate NewFib.\n");
1833 entry
= entry
->next
;
1836 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1839 printk(KERN_ERR
"%s: Host adapter is dead (or got a PCI error) %d\n",
1840 aac
->name
, BlinkLED
);
1844 printk(KERN_ERR
"%s: Host adapter BLINK LED 0x%x\n", aac
->name
, BlinkLED
);
1851 static inline int is_safw_raid_volume(struct aac_dev
*aac
, int bus
, int target
)
1853 return bus
== CONTAINER_CHANNEL
&& target
< aac
->maximum_num_containers
;
1856 static struct scsi_device
*aac_lookup_safw_scsi_device(struct aac_dev
*dev
,
1860 if (bus
!= CONTAINER_CHANNEL
)
1861 bus
= aac_phys_to_logical(bus
);
1863 return scsi_device_lookup(dev
->scsi_host_ptr
, bus
, target
, 0);
1866 static int aac_add_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1868 if (bus
!= CONTAINER_CHANNEL
)
1869 bus
= aac_phys_to_logical(bus
);
1871 return scsi_add_device(dev
->scsi_host_ptr
, bus
, target
, 0);
1874 static void aac_put_safw_scsi_device(struct scsi_device
*sdev
)
1877 scsi_device_put(sdev
);
1880 static void aac_remove_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1882 struct scsi_device
*sdev
;
1884 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1885 scsi_remove_device(sdev
);
1886 aac_put_safw_scsi_device(sdev
);
1889 static inline int aac_is_safw_scan_count_equal(struct aac_dev
*dev
,
1890 int bus
, int target
)
1892 return dev
->hba_map
[bus
][target
].scan_counter
== dev
->scan_counter
;
1895 static int aac_is_safw_target_valid(struct aac_dev
*dev
, int bus
, int target
)
1897 if (is_safw_raid_volume(dev
, bus
, target
))
1898 return dev
->fsa_dev
[target
].valid
;
1900 return aac_is_safw_scan_count_equal(dev
, bus
, target
);
1903 static int aac_is_safw_device_exposed(struct aac_dev
*dev
, int bus
, int target
)
1906 struct scsi_device
*sdev
;
1908 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1911 aac_put_safw_scsi_device(sdev
);
1916 static int aac_update_safw_host_devices(struct aac_dev
*dev
)
1924 rcode
= aac_setup_safw_adapter(dev
);
1925 if (unlikely(rcode
< 0)) {
1929 for (i
= 0; i
< AAC_BUS_TARGET_LOOP
; i
++) {
1931 bus
= get_bus_number(i
);
1932 target
= get_target_number(i
);
1934 is_exposed
= aac_is_safw_device_exposed(dev
, bus
, target
);
1936 if (aac_is_safw_target_valid(dev
, bus
, target
) && !is_exposed
)
1937 aac_add_safw_device(dev
, bus
, target
);
1938 else if (!aac_is_safw_target_valid(dev
, bus
, target
) &&
1940 aac_remove_safw_device(dev
, bus
, target
);
1946 static int aac_scan_safw_host(struct aac_dev
*dev
)
1950 rcode
= aac_update_safw_host_devices(dev
);
1952 aac_schedule_safw_scan_worker(dev
);
1957 int aac_scan_host(struct aac_dev
*dev
)
1961 mutex_lock(&dev
->scan_mutex
);
1962 if (dev
->sa_firmware
)
1963 rcode
= aac_scan_safw_host(dev
);
1965 scsi_scan_host(dev
->scsi_host_ptr
);
1966 mutex_unlock(&dev
->scan_mutex
);
1971 void aac_src_reinit_aif_worker(struct work_struct
*work
)
1973 struct aac_dev
*dev
= container_of(to_delayed_work(work
),
1974 struct aac_dev
, src_reinit_aif_worker
);
1976 wait_event(dev
->scsi_host_ptr
->host_wait
,
1977 !scsi_host_in_recovery(dev
->scsi_host_ptr
));
1978 aac_reinit_aif(dev
, dev
->cardtype
);
1982 * aac_handle_sa_aif Handle a message from the firmware
1983 * @dev: Which adapter this fib is from
1984 * @fibptr: Pointer to fibptr from adapter
1986 * This routine handles a driver notify fib from the adapter and
1987 * dispatches it to the appropriate routine for handling.
1989 static void aac_handle_sa_aif(struct aac_dev
*dev
, struct fib
*fibptr
)
1994 if (fibptr
->hbacmd_size
& SA_AIF_HOTPLUG
)
1995 events
= SA_AIF_HOTPLUG
;
1996 else if (fibptr
->hbacmd_size
& SA_AIF_HARDWARE
)
1997 events
= SA_AIF_HARDWARE
;
1998 else if (fibptr
->hbacmd_size
& SA_AIF_PDEV_CHANGE
)
1999 events
= SA_AIF_PDEV_CHANGE
;
2000 else if (fibptr
->hbacmd_size
& SA_AIF_LDEV_CHANGE
)
2001 events
= SA_AIF_LDEV_CHANGE
;
2002 else if (fibptr
->hbacmd_size
& SA_AIF_BPSTAT_CHANGE
)
2003 events
= SA_AIF_BPSTAT_CHANGE
;
2004 else if (fibptr
->hbacmd_size
& SA_AIF_BPCFG_CHANGE
)
2005 events
= SA_AIF_BPCFG_CHANGE
;
2008 case SA_AIF_HOTPLUG
:
2009 case SA_AIF_HARDWARE
:
2010 case SA_AIF_PDEV_CHANGE
:
2011 case SA_AIF_LDEV_CHANGE
:
2012 case SA_AIF_BPCFG_CHANGE
:
2018 case SA_AIF_BPSTAT_CHANGE
:
2019 /* currently do nothing */
2023 for (i
= 1; i
<= 10; ++i
) {
2024 events
= src_readl(dev
, MUnit
.IDR
);
2025 if (events
& (1<<23)) {
2026 pr_warn(" AIF not cleared by firmware - %d/%d)\n",
2033 static int get_fib_count(struct aac_dev
*dev
)
2035 unsigned int num
= 0;
2036 struct list_head
*entry
;
2037 unsigned long flagv
;
2040 * Warning: no sleep allowed while
2041 * holding spinlock. We take the estimate
2042 * and pre-allocate a set of fibs outside the
2045 num
= le32_to_cpu(dev
->init
->r7
.adapter_fibs_size
)
2046 / sizeof(struct hw_fib
); /* some extra */
2047 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2048 entry
= dev
->fib_list
.next
;
2049 while (entry
!= &dev
->fib_list
) {
2050 entry
= entry
->next
;
2053 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2058 static int fillup_pools(struct aac_dev
*dev
, struct hw_fib
**hw_fib_pool
,
2059 struct fib
**fib_pool
,
2062 struct hw_fib
**hw_fib_p
;
2065 hw_fib_p
= hw_fib_pool
;
2067 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2068 *(hw_fib_p
) = kmalloc(sizeof(struct hw_fib
), GFP_KERNEL
);
2069 if (!(*(hw_fib_p
++))) {
2074 *(fib_p
) = kmalloc(sizeof(struct fib
), GFP_KERNEL
);
2075 if (!(*(fib_p
++))) {
2076 kfree(*(--hw_fib_p
));
2082 * Get the actual number of allocated fibs
2084 num
= hw_fib_p
- hw_fib_pool
;
2088 static void wakeup_fibctx_threads(struct aac_dev
*dev
,
2089 struct hw_fib
**hw_fib_pool
,
2090 struct fib
**fib_pool
,
2092 struct hw_fib
*hw_fib
,
2095 unsigned long flagv
;
2096 struct list_head
*entry
;
2097 struct hw_fib
**hw_fib_p
;
2099 u32 time_now
, time_last
;
2100 struct hw_fib
*hw_newfib
;
2102 struct aac_fib_context
*fibctx
;
2104 time_now
= jiffies
/HZ
;
2105 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2106 entry
= dev
->fib_list
.next
;
2108 * For each Context that is on the
2109 * fibctxList, make a copy of the
2110 * fib, and then set the event to wake up the
2111 * thread that is waiting for it.
2114 hw_fib_p
= hw_fib_pool
;
2116 while (entry
!= &dev
->fib_list
) {
2118 * Extract the fibctx
2120 fibctx
= list_entry(entry
, struct aac_fib_context
,
2123 * Check if the queue is getting
2126 if (fibctx
->count
> 20) {
2128 * It's *not* jiffies folks,
2129 * but jiffies / HZ so do not
2132 time_last
= fibctx
->jiffies
;
2134 * Has it been > 2 minutes
2135 * since the last read off
2138 if ((time_now
- time_last
) > aif_timeout
) {
2139 entry
= entry
->next
;
2140 aac_close_fib_context(dev
, fibctx
);
2145 * Warning: no sleep allowed while
2148 if (hw_fib_p
>= &hw_fib_pool
[num
]) {
2149 pr_warn("aifd: didn't allocate NewFib\n");
2150 entry
= entry
->next
;
2154 hw_newfib
= *hw_fib_p
;
2155 *(hw_fib_p
++) = NULL
;
2159 * Make the copy of the FIB
2161 memcpy(hw_newfib
, hw_fib
, sizeof(struct hw_fib
));
2162 memcpy(newfib
, fib
, sizeof(struct fib
));
2163 newfib
->hw_fib_va
= hw_newfib
;
2165 * Put the FIB onto the
2168 list_add_tail(&newfib
->fiblink
, &fibctx
->fib_list
);
2171 * Set the event to wake up the
2172 * thread that is waiting.
2174 complete(&fibctx
->completion
);
2176 entry
= entry
->next
;
2179 * Set the status of this FIB
2181 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2182 aac_fib_adapter_complete(fib
, sizeof(u32
));
2183 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2187 static void aac_process_events(struct aac_dev
*dev
)
2189 struct hw_fib
*hw_fib
;
2191 unsigned long flags
;
2194 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2195 spin_lock_irqsave(t_lock
, flags
);
2197 while (!list_empty(&(dev
->queues
->queue
[HostNormCmdQueue
].cmdq
))) {
2198 struct list_head
*entry
;
2199 struct aac_aifcmd
*aifcmd
;
2201 struct hw_fib
**hw_fib_pool
, **hw_fib_p
;
2202 struct fib
**fib_pool
, **fib_p
;
2204 set_current_state(TASK_RUNNING
);
2206 entry
= dev
->queues
->queue
[HostNormCmdQueue
].cmdq
.next
;
2209 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2210 spin_unlock_irqrestore(t_lock
, flags
);
2212 fib
= list_entry(entry
, struct fib
, fiblink
);
2213 hw_fib
= fib
->hw_fib_va
;
2214 if (dev
->sa_firmware
) {
2216 aac_handle_sa_aif(dev
, fib
);
2217 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2221 * We will process the FIB here or pass it to a
2222 * worker thread that is TBD. We Really can't
2223 * do anything at this point since we don't have
2224 * anything defined for this thread to do.
2226 memset(fib
, 0, sizeof(struct fib
));
2227 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
2228 fib
->size
= sizeof(struct fib
);
2229 fib
->hw_fib_va
= hw_fib
;
2230 fib
->data
= hw_fib
->data
;
2233 * We only handle AifRequest fibs from the adapter.
2236 aifcmd
= (struct aac_aifcmd
*) hw_fib
->data
;
2237 if (aifcmd
->command
== cpu_to_le32(AifCmdDriverNotify
)) {
2238 /* Handle Driver Notify Events */
2239 aac_handle_aif(dev
, fib
);
2240 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2241 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2245 * The u32 here is important and intended. We are using
2246 * 32bit wrapping time to fit the adapter field
2250 if (aifcmd
->command
== cpu_to_le32(AifCmdEventNotify
)
2251 || aifcmd
->command
== cpu_to_le32(AifCmdJobProgress
)) {
2252 aac_handle_aif(dev
, fib
);
2256 * get number of fibs to process
2258 num
= get_fib_count(dev
);
2262 hw_fib_pool
= kmalloc_array(num
, sizeof(struct hw_fib
*),
2267 fib_pool
= kmalloc_array(num
, sizeof(struct fib
*), GFP_KERNEL
);
2269 goto free_hw_fib_pool
;
2272 * Fill up fib pointer pools with actual fibs
2275 num
= fillup_pools(dev
, hw_fib_pool
, fib_pool
, num
);
2280 * wakeup the thread that is waiting for
2281 * the response from fw (ioctl)
2283 wakeup_fibctx_threads(dev
, hw_fib_pool
, fib_pool
,
2287 /* Free up the remaining resources */
2288 hw_fib_p
= hw_fib_pool
;
2290 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2301 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2302 spin_lock_irqsave(t_lock
, flags
);
2305 * There are no more AIF's
2307 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2308 spin_unlock_irqrestore(t_lock
, flags
);
2311 static int aac_send_wellness_command(struct aac_dev
*dev
, char *wellness_str
,
2314 struct aac_srb
*srbcmd
;
2315 struct sgmap64
*sg64
;
2322 fibptr
= aac_fib_alloc(dev
);
2326 dma_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, datasize
, &addr
,
2331 aac_fib_init(fibptr
);
2333 vbus
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_bus
);
2334 vid
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_target
);
2336 srbcmd
= (struct aac_srb
*)fib_data(fibptr
);
2338 srbcmd
->function
= cpu_to_le32(SRBF_ExecuteScsi
);
2339 srbcmd
->channel
= cpu_to_le32(vbus
);
2340 srbcmd
->id
= cpu_to_le32(vid
);
2342 srbcmd
->flags
= cpu_to_le32(SRB_DataOut
);
2343 srbcmd
->timeout
= cpu_to_le32(10);
2344 srbcmd
->retry_limit
= 0;
2345 srbcmd
->cdb_size
= cpu_to_le32(12);
2346 srbcmd
->count
= cpu_to_le32(datasize
);
2348 memset(srbcmd
->cdb
, 0, sizeof(srbcmd
->cdb
));
2349 srbcmd
->cdb
[0] = BMIC_OUT
;
2350 srbcmd
->cdb
[6] = WRITE_HOST_WELLNESS
;
2351 memcpy(dma_buf
, (char *)wellness_str
, datasize
);
2353 sg64
= (struct sgmap64
*)&srbcmd
->sg
;
2354 sg64
->count
= cpu_to_le32(1);
2355 sg64
->sg
[0].addr
[1] = cpu_to_le32((u32
)(((addr
) >> 16) >> 16));
2356 sg64
->sg
[0].addr
[0] = cpu_to_le32((u32
)(addr
& 0xffffffff));
2357 sg64
->sg
[0].count
= cpu_to_le32(datasize
);
2359 ret
= aac_fib_send(ScsiPortCommand64
, fibptr
, sizeof(struct aac_srb
),
2360 FsaNormal
, 1, 1, NULL
, NULL
);
2362 dma_free_coherent(&dev
->pdev
->dev
, datasize
, dma_buf
, addr
);
2365 * Do not set XferState to zero unless
2366 * receives a response from F/W
2369 aac_fib_complete(fibptr
);
2372 * FIB should be freed only after
2373 * getting the response from the F/W
2375 if (ret
!= -ERESTARTSYS
)
2381 aac_fib_free(fibptr
);
2385 int aac_send_safw_hostttime(struct aac_dev
*dev
, struct timespec64
*now
)
2388 char wellness_str
[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2389 u32 datasize
= sizeof(wellness_str
);
2390 time64_t local_time
;
2393 if (!dev
->sa_firmware
)
2396 local_time
= (now
->tv_sec
- (sys_tz
.tz_minuteswest
* 60));
2397 time64_to_tm(local_time
, 0, &cur_tm
);
2399 cur_tm
.tm_year
+= 1900;
2400 wellness_str
[8] = bin2bcd(cur_tm
.tm_hour
);
2401 wellness_str
[9] = bin2bcd(cur_tm
.tm_min
);
2402 wellness_str
[10] = bin2bcd(cur_tm
.tm_sec
);
2403 wellness_str
[12] = bin2bcd(cur_tm
.tm_mon
);
2404 wellness_str
[13] = bin2bcd(cur_tm
.tm_mday
);
2405 wellness_str
[14] = bin2bcd(cur_tm
.tm_year
/ 100);
2406 wellness_str
[15] = bin2bcd(cur_tm
.tm_year
% 100);
2408 ret
= aac_send_wellness_command(dev
, wellness_str
, datasize
);
2414 int aac_send_hosttime(struct aac_dev
*dev
, struct timespec64
*now
)
2420 fibptr
= aac_fib_alloc(dev
);
2424 aac_fib_init(fibptr
);
2425 info
= (__le32
*)fib_data(fibptr
);
2426 *info
= cpu_to_le32(now
->tv_sec
); /* overflow in y2106 */
2427 ret
= aac_fib_send(SendHostTime
, fibptr
, sizeof(*info
), FsaNormal
,
2431 * Do not set XferState to zero unless
2432 * receives a response from F/W
2435 aac_fib_complete(fibptr
);
2438 * FIB should be freed only after
2439 * getting the response from the F/W
2441 if (ret
!= -ERESTARTSYS
)
2442 aac_fib_free(fibptr
);
2449 * aac_command_thread - command processing thread
2450 * @dev: Adapter to monitor
2452 * Waits on the commandready event in it's queue. When the event gets set
2453 * it will pull FIBs off it's queue. It will continue to pull FIBs off
2454 * until the queue is empty. When the queue is empty it will wait for
2458 int aac_command_thread(void *data
)
2460 struct aac_dev
*dev
= data
;
2461 DECLARE_WAITQUEUE(wait
, current
);
2462 unsigned long next_jiffies
= jiffies
+ HZ
;
2463 unsigned long next_check_jiffies
= next_jiffies
;
2464 long difference
= HZ
;
2467 * We can only have one thread per adapter for AIF's.
2469 if (dev
->aif_thread
)
2473 * Let the DPC know it has a place to send the AIF's to.
2475 dev
->aif_thread
= 1;
2476 add_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2477 set_current_state(TASK_INTERRUPTIBLE
);
2478 dprintk ((KERN_INFO
"aac_command_thread start\n"));
2481 aac_process_events(dev
);
2484 * Background activity
2486 if ((time_before(next_check_jiffies
,next_jiffies
))
2487 && ((difference
= next_check_jiffies
- jiffies
) <= 0)) {
2488 next_check_jiffies
= next_jiffies
;
2489 if (aac_adapter_check_health(dev
) == 0) {
2490 difference
= ((long)(unsigned)check_interval
)
2492 next_check_jiffies
= jiffies
+ difference
;
2493 } else if (!dev
->queues
)
2496 if (!time_before(next_check_jiffies
,next_jiffies
)
2497 && ((difference
= next_jiffies
- jiffies
) <= 0)) {
2498 struct timespec64 now
;
2501 /* Don't even try to talk to adapter if its sick */
2502 ret
= aac_adapter_check_health(dev
);
2503 if (ret
|| !dev
->queues
)
2505 next_check_jiffies
= jiffies
2506 + ((long)(unsigned)check_interval
)
2508 ktime_get_real_ts64(&now
);
2510 /* Synchronize our watches */
2511 if (((NSEC_PER_SEC
- (NSEC_PER_SEC
/ HZ
)) > now
.tv_nsec
)
2512 && (now
.tv_nsec
> (NSEC_PER_SEC
/ HZ
)))
2513 difference
= HZ
+ HZ
/ 2 -
2514 now
.tv_nsec
/ (NSEC_PER_SEC
/ HZ
);
2516 if (now
.tv_nsec
> NSEC_PER_SEC
/ 2)
2519 if (dev
->sa_firmware
)
2521 aac_send_safw_hostttime(dev
, &now
);
2523 ret
= aac_send_hosttime(dev
, &now
);
2525 difference
= (long)(unsigned)update_interval
*HZ
;
2527 next_jiffies
= jiffies
+ difference
;
2528 if (time_before(next_check_jiffies
,next_jiffies
))
2529 difference
= next_check_jiffies
- jiffies
;
2531 if (difference
<= 0)
2533 set_current_state(TASK_INTERRUPTIBLE
);
2535 if (kthread_should_stop())
2539 * we probably want usleep_range() here instead of the
2540 * jiffies computation
2542 schedule_timeout(difference
);
2544 if (kthread_should_stop())
2548 remove_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2549 dev
->aif_thread
= 0;
2553 int aac_acquire_irq(struct aac_dev
*dev
)
2559 if (!dev
->sync_mode
&& dev
->msi_enabled
&& dev
->max_msix
> 1) {
2560 for (i
= 0; i
< dev
->max_msix
; i
++) {
2561 dev
->aac_msix
[i
].vector_no
= i
;
2562 dev
->aac_msix
[i
].dev
= dev
;
2563 if (request_irq(pci_irq_vector(dev
->pdev
, i
),
2564 dev
->a_ops
.adapter_intr
,
2565 0, "aacraid", &(dev
->aac_msix
[i
]))) {
2566 printk(KERN_ERR
"%s%d: Failed to register IRQ for vector %d.\n",
2567 dev
->name
, dev
->id
, i
);
2568 for (j
= 0 ; j
< i
; j
++)
2569 free_irq(pci_irq_vector(dev
->pdev
, j
),
2570 &(dev
->aac_msix
[j
]));
2571 pci_disable_msix(dev
->pdev
);
2576 dev
->aac_msix
[0].vector_no
= 0;
2577 dev
->aac_msix
[0].dev
= dev
;
2579 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
2580 IRQF_SHARED
, "aacraid",
2581 &(dev
->aac_msix
[0])) < 0) {
2583 pci_disable_msi(dev
->pdev
);
2584 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
2585 dev
->name
, dev
->id
);
2592 void aac_free_irq(struct aac_dev
*dev
)
2596 if (aac_is_src(dev
)) {
2597 if (dev
->max_msix
> 1) {
2598 for (i
= 0; i
< dev
->max_msix
; i
++)
2599 free_irq(pci_irq_vector(dev
->pdev
, i
),
2600 &(dev
->aac_msix
[i
]));
2602 free_irq(dev
->pdev
->irq
, &(dev
->aac_msix
[0]));
2605 free_irq(dev
->pdev
->irq
, dev
);
2608 pci_disable_msi(dev
->pdev
);
2609 else if (dev
->max_msix
> 1)
2610 pci_disable_msix(dev
->pdev
);