1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
16 * Abstract: Contain all routines that are required for FSA host/adapter
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/types.h>
24 #include <linux/sched.h>
25 #include <linux/pci.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/completion.h>
29 #include <linux/blkdev.h>
30 #include <linux/delay.h>
31 #include <linux/kthread.h>
32 #include <linux/interrupt.h>
33 #include <linux/bcd.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_cmnd.h>
42 * fib_map_alloc - allocate the fib objects
43 * @dev: Adapter to allocate for
45 * Allocate and map the shared PCI space for the FIB blocks used to
46 * talk to the Adaptec firmware.
49 static int fib_map_alloc(struct aac_dev
*dev
)
51 if (dev
->max_fib_size
> AAC_MAX_NATIVE_SIZE
)
52 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
54 dev
->max_cmd_size
= dev
->max_fib_size
;
55 if (dev
->max_fib_size
< AAC_MAX_NATIVE_SIZE
) {
56 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
58 dev
->max_cmd_size
= dev
->max_fib_size
;
62 "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
63 &dev
->pdev
->dev
, dev
->max_cmd_size
, dev
->scsi_host_ptr
->can_queue
,
64 AAC_NUM_MGT_FIB
, &dev
->hw_fib_pa
));
65 dev
->hw_fib_va
= dma_alloc_coherent(&dev
->pdev
->dev
,
66 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
))
67 * (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) + (ALIGN32
- 1),
68 &dev
->hw_fib_pa
, GFP_KERNEL
);
69 if (dev
->hw_fib_va
== NULL
)
75 * aac_fib_map_free - free the fib objects
76 * @dev: Adapter to free
78 * Free the PCI mappings and the memory allocated for FIB blocks
82 void aac_fib_map_free(struct aac_dev
*dev
)
88 if(!dev
->hw_fib_va
|| !dev
->max_cmd_size
)
91 num_fibs
= dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
92 fib_size
= dev
->max_fib_size
+ sizeof(struct aac_fib_xporthdr
);
93 alloc_size
= fib_size
* num_fibs
+ ALIGN32
- 1;
95 dma_free_coherent(&dev
->pdev
->dev
, alloc_size
, dev
->hw_fib_va
,
98 dev
->hw_fib_va
= NULL
;
102 void aac_fib_vector_assign(struct aac_dev
*dev
)
106 struct fib
*fibptr
= NULL
;
108 for (i
= 0, fibptr
= &dev
->fibs
[i
];
109 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
111 if ((dev
->max_msix
== 1) ||
112 (i
> ((dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1)
113 - dev
->vector_cap
))) {
114 fibptr
->vector_no
= 0;
116 fibptr
->vector_no
= vector
;
118 if (vector
== dev
->max_msix
)
125 * aac_fib_setup - setup the fibs
126 * @dev: Adapter to set up
128 * Allocate the PCI space for the fibs, map it and then initialise the
129 * fib area, the unmapped fib data and also the free list
132 int aac_fib_setup(struct aac_dev
* dev
)
135 struct hw_fib
*hw_fib
;
136 dma_addr_t hw_fib_pa
;
140 while (((i
= fib_map_alloc(dev
)) == -ENOMEM
)
141 && (dev
->scsi_host_ptr
->can_queue
> (64 - AAC_NUM_MGT_FIB
))) {
142 max_cmds
= (dev
->scsi_host_ptr
->can_queue
+AAC_NUM_MGT_FIB
) >> 1;
143 dev
->scsi_host_ptr
->can_queue
= max_cmds
- AAC_NUM_MGT_FIB
;
144 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE3
)
145 dev
->init
->r7
.max_io_commands
= cpu_to_le32(max_cmds
);
150 memset(dev
->hw_fib_va
, 0,
151 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
)) *
152 (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
));
154 /* 32 byte alignment for PMC */
155 hw_fib_pa
= (dev
->hw_fib_pa
+ (ALIGN32
- 1)) & ~(ALIGN32
- 1);
156 hw_fib
= (struct hw_fib
*)((unsigned char *)dev
->hw_fib_va
+
157 (hw_fib_pa
- dev
->hw_fib_pa
));
159 /* add Xport header */
160 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
161 sizeof(struct aac_fib_xporthdr
));
162 hw_fib_pa
+= sizeof(struct aac_fib_xporthdr
);
165 * Initialise the fibs
167 for (i
= 0, fibptr
= &dev
->fibs
[i
];
168 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
172 fibptr
->size
= sizeof(struct fib
);
174 fibptr
->hw_fib_va
= hw_fib
;
175 fibptr
->data
= (void *) fibptr
->hw_fib_va
->data
;
176 fibptr
->next
= fibptr
+1; /* Forward chain the fibs */
177 init_completion(&fibptr
->event_wait
);
178 spin_lock_init(&fibptr
->event_lock
);
179 hw_fib
->header
.XferState
= cpu_to_le32(0xffffffff);
180 hw_fib
->header
.SenderSize
=
181 cpu_to_le16(dev
->max_fib_size
); /* ?? max_cmd_size */
182 fibptr
->hw_fib_pa
= hw_fib_pa
;
183 fibptr
->hw_sgl_pa
= hw_fib_pa
+
184 offsetof(struct aac_hba_cmd_req
, sge
[2]);
186 * one element is for the ptr to the separate sg list,
187 * second element for 32 byte alignment
189 fibptr
->hw_error_pa
= hw_fib_pa
+
190 offsetof(struct aac_native_hba
, resp
.resp_bytes
[0]);
192 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
193 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
));
194 hw_fib_pa
= hw_fib_pa
+
195 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
);
199 *Assign vector numbers to fibs
201 aac_fib_vector_assign(dev
);
204 * Add the fib chain to the free list
206 dev
->fibs
[dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1].next
= NULL
;
208 * Set 8 fibs aside for management tools
210 dev
->free_fib
= &dev
->fibs
[dev
->scsi_host_ptr
->can_queue
];
215 * aac_fib_alloc_tag-allocate a fib using tags
216 * @dev: Adapter to allocate the fib for
217 * @scmd: SCSI command
219 * Allocate a fib from the adapter fib pool using tags
220 * from the blk layer.
223 struct fib
*aac_fib_alloc_tag(struct aac_dev
*dev
, struct scsi_cmnd
*scmd
)
227 fibptr
= &dev
->fibs
[scmd
->request
->tag
];
229 * Null out fields that depend on being zero at the start of
232 fibptr
->hw_fib_va
->header
.XferState
= 0;
233 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
234 fibptr
->callback_data
= NULL
;
235 fibptr
->callback
= NULL
;
242 * aac_fib_alloc - allocate a fib
243 * @dev: Adapter to allocate the fib for
245 * Allocate a fib from the adapter fib pool. If the pool is empty we
249 struct fib
*aac_fib_alloc(struct aac_dev
*dev
)
253 spin_lock_irqsave(&dev
->fib_lock
, flags
);
254 fibptr
= dev
->free_fib
;
256 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
259 dev
->free_fib
= fibptr
->next
;
260 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
262 * Set the proper node type code and node byte size
264 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
265 fibptr
->size
= sizeof(struct fib
);
267 * Null out fields that depend on being zero at the start of
270 fibptr
->hw_fib_va
->header
.XferState
= 0;
272 fibptr
->callback
= NULL
;
273 fibptr
->callback_data
= NULL
;
279 * aac_fib_free - free a fib
280 * @fibptr: fib to free up
282 * Frees up a fib and places it on the appropriate queue
285 void aac_fib_free(struct fib
*fibptr
)
289 if (fibptr
->done
== 2)
292 spin_lock_irqsave(&fibptr
->dev
->fib_lock
, flags
);
293 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
294 aac_config
.fib_timeouts
++;
295 if (!(fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) &&
296 fibptr
->hw_fib_va
->header
.XferState
!= 0) {
297 printk(KERN_WARNING
"aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
299 le32_to_cpu(fibptr
->hw_fib_va
->header
.XferState
));
301 fibptr
->next
= fibptr
->dev
->free_fib
;
302 fibptr
->dev
->free_fib
= fibptr
;
303 spin_unlock_irqrestore(&fibptr
->dev
->fib_lock
, flags
);
307 * aac_fib_init - initialise a fib
308 * @fibptr: The fib to initialize
310 * Set up the generic fib fields ready for use
313 void aac_fib_init(struct fib
*fibptr
)
315 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
317 memset(&hw_fib
->header
, 0, sizeof(struct aac_fibhdr
));
318 hw_fib
->header
.StructType
= FIB_MAGIC
;
319 hw_fib
->header
.Size
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
320 hw_fib
->header
.XferState
= cpu_to_le32(HostOwned
| FibInitialized
| FibEmpty
| FastResponseCapable
);
321 hw_fib
->header
.u
.ReceiverFibAddress
= cpu_to_le32(fibptr
->hw_fib_pa
);
322 hw_fib
->header
.SenderSize
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
326 * fib_deallocate - deallocate a fib
327 * @fibptr: fib to deallocate
329 * Will deallocate and return to the free pool the FIB pointed to by the
333 static void fib_dealloc(struct fib
* fibptr
)
335 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
336 hw_fib
->header
.XferState
= 0;
340 * Commuication primitives define and support the queuing method we use to
341 * support host to adapter commuication. All queue accesses happen through
342 * these routines and are the only routines which have a knowledge of the
343 * how these queues are implemented.
347 * aac_get_entry - get a queue entry
350 * @entry: Entry return
351 * @index: Index return
352 * @nonotify: notification control
354 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
355 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
359 static int aac_get_entry (struct aac_dev
* dev
, u32 qid
, struct aac_entry
**entry
, u32
* index
, unsigned long *nonotify
)
361 struct aac_queue
* q
;
365 * All of the queues wrap when they reach the end, so we check
366 * to see if they have reached the end and if they have we just
367 * set the index back to zero. This is a wrap. You could or off
368 * the high bits in all updates but this is a bit faster I think.
371 q
= &dev
->queues
->queue
[qid
];
373 idx
= *index
= le32_to_cpu(*(q
->headers
.producer
));
374 /* Interrupt Moderation, only interrupt for first two entries */
375 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
))) {
377 if (qid
== AdapNormCmdQueue
)
378 idx
= ADAP_NORM_CMD_ENTRIES
;
380 idx
= ADAP_NORM_RESP_ENTRIES
;
382 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
)))
386 if (qid
== AdapNormCmdQueue
) {
387 if (*index
>= ADAP_NORM_CMD_ENTRIES
)
388 *index
= 0; /* Wrap to front of the Producer Queue. */
390 if (*index
>= ADAP_NORM_RESP_ENTRIES
)
391 *index
= 0; /* Wrap to front of the Producer Queue. */
395 if ((*index
+ 1) == le32_to_cpu(*(q
->headers
.consumer
))) {
396 printk(KERN_WARNING
"Queue %d full, %u outstanding.\n",
397 qid
, atomic_read(&q
->numpending
));
400 *entry
= q
->base
+ *index
;
406 * aac_queue_get - get the next free QE
408 * @index: Returned index
410 * @hw_fib: Fib to associate with the queue entry
411 * @wait: Wait if queue full
412 * @fibptr: Driver fib object to go with fib
413 * @nonotify: Don't notify the adapter
415 * Gets the next free QE off the requested priorty adapter command
416 * queue and associates the Fib with the QE. The QE represented by
417 * index is ready to insert on the queue when this routine returns
421 int aac_queue_get(struct aac_dev
* dev
, u32
* index
, u32 qid
, struct hw_fib
* hw_fib
, int wait
, struct fib
* fibptr
, unsigned long *nonotify
)
423 struct aac_entry
* entry
= NULL
;
426 if (qid
== AdapNormCmdQueue
) {
427 /* if no entries wait for some if caller wants to */
428 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
429 printk(KERN_ERR
"GetEntries failed\n");
432 * Setup queue entry with a command, status and fib mapped
434 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
437 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
438 /* if no entries wait for some if caller wants to */
441 * Setup queue entry with command, status and fib mapped
443 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
444 entry
->addr
= hw_fib
->header
.SenderFibAddress
;
445 /* Restore adapters pointer to the FIB */
446 hw_fib
->header
.u
.ReceiverFibAddress
= hw_fib
->header
.SenderFibAddress
; /* Let the adapter now where to find its data */
450 * If MapFib is true than we need to map the Fib and put pointers
451 * in the queue entry.
454 entry
->addr
= cpu_to_le32(fibptr
->hw_fib_pa
);
459 * Define the highest level of host to adapter communication routines.
460 * These routines will support host to adapter FS commuication. These
461 * routines have no knowledge of the commuication method used. This level
462 * sends and receives FIBs. This level has no knowledge of how these FIBs
463 * get passed back and forth.
467 * aac_fib_send - send a fib to the adapter
468 * @command: Command to send
470 * @size: Size of fib data area
471 * @priority: Priority of Fib
472 * @wait: Async/sync select
473 * @reply: True if a reply is wanted
474 * @callback: Called with reply
475 * @callback_data: Passed to callback
477 * Sends the requested FIB to the adapter and optionally will wait for a
478 * response FIB. If the caller does not wish to wait for a response than
479 * an event to wait on must be supplied. This event will be set when a
480 * response FIB is received from the adapter.
483 int aac_fib_send(u16 command
, struct fib
*fibptr
, unsigned long size
,
484 int priority
, int wait
, int reply
, fib_callback callback
,
487 struct aac_dev
* dev
= fibptr
->dev
;
488 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
489 unsigned long flags
= 0;
490 unsigned long mflags
= 0;
491 unsigned long sflags
= 0;
493 if (!(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)))
496 if (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
))
500 * There are 5 cases with the wait and response requested flags.
501 * The only invalid cases are if the caller requests to wait and
502 * does not request a response and if the caller does not want a
503 * response and the Fib is not allocated from pool. If a response
504 * is not requested the Fib will just be deallocaed by the DPC
505 * routine when the response comes back from the adapter. No
506 * further processing will be done besides deleting the Fib. We
507 * will have a debug mode where the adapter can notify the host
508 * it had a problem and the host can log that fact.
511 if (wait
&& !reply
) {
513 } else if (!wait
&& reply
) {
514 hw_fib
->header
.XferState
|= cpu_to_le32(Async
| ResponseExpected
);
515 FIB_COUNTER_INCREMENT(aac_config
.AsyncSent
);
516 } else if (!wait
&& !reply
) {
517 hw_fib
->header
.XferState
|= cpu_to_le32(NoResponseExpected
);
518 FIB_COUNTER_INCREMENT(aac_config
.NoResponseSent
);
519 } else if (wait
&& reply
) {
520 hw_fib
->header
.XferState
|= cpu_to_le32(ResponseExpected
);
521 FIB_COUNTER_INCREMENT(aac_config
.NormalSent
);
524 * Map the fib into 32bits by using the fib number
527 hw_fib
->header
.SenderFibAddress
=
528 cpu_to_le32(((u32
)(fibptr
- dev
->fibs
)) << 2);
530 /* use the same shifted value for handle to be compatible
531 * with the new native hba command handle
533 hw_fib
->header
.Handle
=
534 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
537 * Set FIB state to indicate where it came from and if we want a
538 * response from the adapter. Also load the command from the
541 * Map the hw fib pointer as a 32bit value
543 hw_fib
->header
.Command
= cpu_to_le16(command
);
544 hw_fib
->header
.XferState
|= cpu_to_le32(SentFromHost
);
546 * Set the size of the Fib we want to send to the adapter
548 hw_fib
->header
.Size
= cpu_to_le16(sizeof(struct aac_fibhdr
) + size
);
549 if (le16_to_cpu(hw_fib
->header
.Size
) > le16_to_cpu(hw_fib
->header
.SenderSize
)) {
553 * Get a queue entry connect the FIB to it and send an notify
554 * the adapter a command is ready.
556 hw_fib
->header
.XferState
|= cpu_to_le32(NormalPriority
);
559 * Fill in the Callback and CallbackContext if we are not
563 fibptr
->callback
= callback
;
564 fibptr
->callback_data
= callback_data
;
565 fibptr
->flags
= FIB_CONTEXT_FLAG
;
570 FIB_COUNTER_INCREMENT(aac_config
.FibsSent
);
572 dprintk((KERN_DEBUG
"Fib contents:.\n"));
573 dprintk((KERN_DEBUG
" Command = %d.\n", le32_to_cpu(hw_fib
->header
.Command
)));
574 dprintk((KERN_DEBUG
" SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount
*)fib_data(fibptr
))->command
)));
575 dprintk((KERN_DEBUG
" XferState = %x.\n", le32_to_cpu(hw_fib
->header
.XferState
)));
576 dprintk((KERN_DEBUG
" hw_fib va being sent=%p\n",fibptr
->hw_fib_va
));
577 dprintk((KERN_DEBUG
" hw_fib pa being sent=%lx\n",(ulong
)fibptr
->hw_fib_pa
));
578 dprintk((KERN_DEBUG
" fib being sent=%p\n",fibptr
));
585 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
586 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
587 printk(KERN_INFO
"No management Fibs Available:%d\n",
588 dev
->management_fib_count
);
589 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
592 dev
->management_fib_count
++;
593 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
594 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
597 if (dev
->sync_mode
) {
599 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
600 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
602 list_add_tail(&fibptr
->fiblink
, &dev
->sync_fib_list
);
603 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
605 dev
->sync_fib
= fibptr
;
606 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
607 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
608 (u32
)fibptr
->hw_fib_pa
, 0, 0, 0, 0, 0,
609 NULL
, NULL
, NULL
, NULL
, NULL
);
612 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
613 if (wait_for_completion_interruptible(&fibptr
->event_wait
)) {
614 fibptr
->flags
&= ~FIB_CONTEXT_FLAG_WAIT
;
622 if (aac_adapter_deliver(fibptr
) != 0) {
623 printk(KERN_ERR
"aac_fib_send: returned -EBUSY\n");
625 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
626 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
627 dev
->management_fib_count
--;
628 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
635 * If the caller wanted us to wait for response wait now.
639 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
640 /* Only set for first known interruptable command */
643 * *VERY* Dangerous to time out a command, the
644 * assumption is made that we have no hope of
645 * functioning because an interrupt routing or other
646 * hardware failure has occurred.
648 unsigned long timeout
= jiffies
+ (180 * HZ
); /* 3 minutes */
649 while (!try_wait_for_completion(&fibptr
->event_wait
)) {
651 if (time_is_before_eq_jiffies(timeout
)) {
652 struct aac_queue
* q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
653 atomic_dec(&q
->numpending
);
655 printk(KERN_ERR
"aacraid: aac_fib_send: first asynchronous command timed out.\n"
656 "Usually a result of a PCI interrupt routing problem;\n"
657 "update mother board BIOS or consider utilizing one of\n"
658 "the SAFE mode kernel options (acpi, apic etc)\n");
663 if (unlikely(aac_pci_offline(dev
)))
666 if ((blink
= aac_adapter_check_health(dev
)) > 0) {
668 printk(KERN_ERR
"aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
669 "Usually a result of a serious unrecoverable hardware problem\n",
675 * Allow other processes / CPUS to use core
679 } else if (wait_for_completion_interruptible(&fibptr
->event_wait
)) {
680 /* Do nothing ... satisfy
681 * wait_for_completion_interruptible must_check */
684 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
685 if (fibptr
->done
== 0) {
686 fibptr
->done
= 2; /* Tell interrupt we aborted */
687 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
690 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
691 BUG_ON(fibptr
->done
== 0);
693 if(unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
698 * If the user does not want a response than return success otherwise
707 int aac_hba_send(u8 command
, struct fib
*fibptr
, fib_callback callback
,
710 struct aac_dev
*dev
= fibptr
->dev
;
712 unsigned long flags
= 0;
713 unsigned long mflags
= 0;
714 struct aac_hba_cmd_req
*hbacmd
= (struct aac_hba_cmd_req
*)
717 fibptr
->flags
= (FIB_CONTEXT_FLAG
| FIB_CONTEXT_FLAG_NATIVE_HBA
);
720 fibptr
->callback
= callback
;
721 fibptr
->callback_data
= callback_data
;
726 hbacmd
->iu_type
= command
;
728 if (command
== HBA_IU_TYPE_SCSI_CMD_REQ
) {
729 /* bit1 of request_id must be 0 */
731 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
732 fibptr
->flags
|= FIB_CONTEXT_FLAG_SCSI_CMD
;
738 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
739 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
740 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
743 dev
->management_fib_count
++;
744 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
745 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
748 if (aac_adapter_deliver(fibptr
) != 0) {
750 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
751 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
752 dev
->management_fib_count
--;
753 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
757 FIB_COUNTER_INCREMENT(aac_config
.NativeSent
);
761 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
763 if (unlikely(aac_pci_offline(dev
)))
766 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
767 if (wait_for_completion_interruptible(&fibptr
->event_wait
))
769 fibptr
->flags
&= ~(FIB_CONTEXT_FLAG_WAIT
);
771 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
772 if ((fibptr
->done
== 0) || (fibptr
->done
== 2)) {
773 fibptr
->done
= 2; /* Tell interrupt we aborted */
774 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
777 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
778 WARN_ON(fibptr
->done
== 0);
780 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
790 * aac_consumer_get - get the top of the queue
793 * @entry: Return entry
795 * Will return a pointer to the entry on the top of the queue requested that
796 * we are a consumer of, and return the address of the queue entry. It does
797 * not change the state of the queue.
800 int aac_consumer_get(struct aac_dev
* dev
, struct aac_queue
* q
, struct aac_entry
**entry
)
804 if (le32_to_cpu(*q
->headers
.producer
) == le32_to_cpu(*q
->headers
.consumer
)) {
808 * The consumer index must be wrapped if we have reached
809 * the end of the queue, else we just use the entry
810 * pointed to by the header index
812 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
815 index
= le32_to_cpu(*q
->headers
.consumer
);
816 *entry
= q
->base
+ index
;
823 * aac_consumer_free - free consumer entry
828 * Frees up the current top of the queue we are a consumer of. If the
829 * queue was full notify the producer that the queue is no longer full.
832 void aac_consumer_free(struct aac_dev
* dev
, struct aac_queue
*q
, u32 qid
)
837 if ((le32_to_cpu(*q
->headers
.producer
)+1) == le32_to_cpu(*q
->headers
.consumer
))
840 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
841 *q
->headers
.consumer
= cpu_to_le32(1);
843 le32_add_cpu(q
->headers
.consumer
, 1);
848 case HostNormCmdQueue
:
849 notify
= HostNormCmdNotFull
;
851 case HostNormRespQueue
:
852 notify
= HostNormRespNotFull
;
858 aac_adapter_notify(dev
, notify
);
863 * aac_fib_adapter_complete - complete adapter issued fib
864 * @fibptr: fib to complete
867 * Will do all necessary work to complete a FIB that was sent from
871 int aac_fib_adapter_complete(struct fib
*fibptr
, unsigned short size
)
873 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
874 struct aac_dev
* dev
= fibptr
->dev
;
875 struct aac_queue
* q
;
876 unsigned long nointr
= 0;
877 unsigned long qflags
;
879 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
880 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
||
881 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
886 if (hw_fib
->header
.XferState
== 0) {
887 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
892 * If we plan to do anything check the structure type first.
894 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
895 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
896 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
) {
897 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
902 * This block handles the case where the adapter had sent us a
903 * command and we have finished processing the command. We
904 * call completeFib when we are done processing the command
905 * and want to send a response back to the adapter. This will
906 * send the completed cdb to the adapter.
908 if (hw_fib
->header
.XferState
& cpu_to_le32(SentFromAdapter
)) {
909 if (dev
->comm_interface
== AAC_COMM_MESSAGE
) {
913 hw_fib
->header
.XferState
|= cpu_to_le32(HostProcessed
);
915 size
+= sizeof(struct aac_fibhdr
);
916 if (size
> le16_to_cpu(hw_fib
->header
.SenderSize
))
918 hw_fib
->header
.Size
= cpu_to_le16(size
);
920 q
= &dev
->queues
->queue
[AdapNormRespQueue
];
921 spin_lock_irqsave(q
->lock
, qflags
);
922 aac_queue_get(dev
, &index
, AdapNormRespQueue
, hw_fib
, 1, NULL
, &nointr
);
923 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
924 spin_unlock_irqrestore(q
->lock
, qflags
);
925 if (!(nointr
& (int)aac_config
.irq_mod
))
926 aac_adapter_notify(dev
, AdapNormRespQueue
);
929 printk(KERN_WARNING
"aac_fib_adapter_complete: "
930 "Unknown xferstate detected.\n");
937 * aac_fib_complete - fib completion handler
938 * @fibptr: FIB to complete
940 * Will do all necessary work to complete a FIB.
943 int aac_fib_complete(struct fib
*fibptr
)
945 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
947 if (fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) {
953 * Check for a fib which has already been completed or with a
954 * status wait timeout
957 if (hw_fib
->header
.XferState
== 0 || fibptr
->done
== 2)
960 * If we plan to do anything check the structure type first.
963 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
964 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
965 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
)
968 * This block completes a cdb which orginated on the host and we
969 * just need to deallocate the cdb or reinit it. At this point the
970 * command is complete that we had sent to the adapter and this
971 * cdb could be reused.
974 if((hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
)) &&
975 (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
)))
979 else if(hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
))
982 * This handles the case when the host has aborted the I/O
983 * to the adapter because the adapter is not responding
986 } else if(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)) {
995 * aac_printf - handle printf from firmware
999 * Print a message passed to us by the controller firmware on the
1003 void aac_printf(struct aac_dev
*dev
, u32 val
)
1005 char *cp
= dev
->printfbuf
;
1006 if (dev
->printf_enabled
)
1008 int length
= val
& 0xffff;
1009 int level
= (val
>> 16) & 0xffff;
1012 * The size of the printfbuf is set in port.c
1013 * There is no variable or define for it
1017 if (cp
[length
] != 0)
1019 if (level
== LOG_AAC_HIGH_ERROR
)
1020 printk(KERN_WARNING
"%s:%s", dev
->name
, cp
);
1022 printk(KERN_INFO
"%s:%s", dev
->name
, cp
);
1027 static inline int aac_aif_data(struct aac_aifcmd
*aifcmd
, uint32_t index
)
1029 return le32_to_cpu(((__le32
*)aifcmd
->data
)[index
]);
1033 static void aac_handle_aif_bu(struct aac_dev
*dev
, struct aac_aifcmd
*aifcmd
)
1035 switch (aac_aif_data(aifcmd
, 1)) {
1036 case AifBuCacheDataLoss
:
1037 if (aac_aif_data(aifcmd
, 2))
1038 dev_info(&dev
->pdev
->dev
, "Backup unit had cache data loss - [%d]\n",
1039 aac_aif_data(aifcmd
, 2));
1041 dev_info(&dev
->pdev
->dev
, "Backup Unit had cache data loss\n");
1043 case AifBuCacheDataRecover
:
1044 if (aac_aif_data(aifcmd
, 2))
1045 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully - [%d]\n",
1046 aac_aif_data(aifcmd
, 2));
1048 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully\n");
1053 #define AIF_SNIFF_TIMEOUT (500*HZ)
1055 * aac_handle_aif - Handle a message from the firmware
1056 * @dev: Which adapter this fib is from
1057 * @fibptr: Pointer to fibptr from adapter
1059 * This routine handles a driver notify fib from the adapter and
1060 * dispatches it to the appropriate routine for handling.
1062 static void aac_handle_aif(struct aac_dev
* dev
, struct fib
* fibptr
)
1064 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
1065 struct aac_aifcmd
* aifcmd
= (struct aac_aifcmd
*)hw_fib
->data
;
1066 u32 channel
, id
, lun
, container
;
1067 struct scsi_device
*device
;
1073 } device_config_needed
= NOTHING
;
1075 /* Sniff for container changes */
1077 if (!dev
|| !dev
->fsa_dev
)
1079 container
= channel
= id
= lun
= (u32
)-1;
1082 * We have set this up to try and minimize the number of
1083 * re-configures that take place. As a result of this when
1084 * certain AIF's come in we will set a flag waiting for another
1085 * type of AIF before setting the re-config flag.
1087 switch (le32_to_cpu(aifcmd
->command
)) {
1088 case AifCmdDriverNotify
:
1089 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1090 case AifRawDeviceRemove
:
1091 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1092 if ((container
>> 28)) {
1093 container
= (u32
)-1;
1096 channel
= (container
>> 24) & 0xF;
1097 if (channel
>= dev
->maximum_num_channels
) {
1098 container
= (u32
)-1;
1101 id
= container
& 0xFFFF;
1102 if (id
>= dev
->maximum_num_physicals
) {
1103 container
= (u32
)-1;
1106 lun
= (container
>> 16) & 0xFF;
1107 container
= (u32
)-1;
1108 channel
= aac_phys_to_logical(channel
);
1109 device_config_needed
= DELETE
;
1113 * Morph or Expand complete
1115 case AifDenMorphComplete
:
1116 case AifDenVolumeExtendComplete
:
1117 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1118 if (container
>= dev
->maximum_num_containers
)
1122 * Find the scsi_device associated with the SCSI
1123 * address. Make sure we have the right array, and if
1124 * so set the flag to initiate a new re-config once we
1125 * see an AifEnConfigChange AIF come through.
1128 if ((dev
!= NULL
) && (dev
->scsi_host_ptr
!= NULL
)) {
1129 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1130 CONTAINER_TO_CHANNEL(container
),
1131 CONTAINER_TO_ID(container
),
1132 CONTAINER_TO_LUN(container
));
1134 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1135 dev
->fsa_dev
[container
].config_waiting_on
= AifEnConfigChange
;
1136 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1137 scsi_device_put(device
);
1143 * If we are waiting on something and this happens to be
1144 * that thing then set the re-configure flag.
1146 if (container
!= (u32
)-1) {
1147 if (container
>= dev
->maximum_num_containers
)
1149 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1150 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1151 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1152 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1153 } else for (container
= 0;
1154 container
< dev
->maximum_num_containers
; ++container
) {
1155 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1156 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1157 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1158 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1162 case AifCmdEventNotify
:
1163 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1164 case AifEnBatteryEvent
:
1165 dev
->cache_protected
=
1166 (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(3));
1171 case AifEnAddContainer
:
1172 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1173 if (container
>= dev
->maximum_num_containers
)
1175 dev
->fsa_dev
[container
].config_needed
= ADD
;
1176 dev
->fsa_dev
[container
].config_waiting_on
=
1178 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1184 case AifEnDeleteContainer
:
1185 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1186 if (container
>= dev
->maximum_num_containers
)
1188 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1189 dev
->fsa_dev
[container
].config_waiting_on
=
1191 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1195 * Container change detected. If we currently are not
1196 * waiting on something else, setup to wait on a Config Change.
1198 case AifEnContainerChange
:
1199 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1200 if (container
>= dev
->maximum_num_containers
)
1202 if (dev
->fsa_dev
[container
].config_waiting_on
&&
1203 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1205 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1206 dev
->fsa_dev
[container
].config_waiting_on
=
1208 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1211 case AifEnConfigChange
:
1215 case AifEnDeleteJBOD
:
1216 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1217 if ((container
>> 28)) {
1218 container
= (u32
)-1;
1221 channel
= (container
>> 24) & 0xF;
1222 if (channel
>= dev
->maximum_num_channels
) {
1223 container
= (u32
)-1;
1226 id
= container
& 0xFFFF;
1227 if (id
>= dev
->maximum_num_physicals
) {
1228 container
= (u32
)-1;
1231 lun
= (container
>> 16) & 0xFF;
1232 container
= (u32
)-1;
1233 channel
= aac_phys_to_logical(channel
);
1234 device_config_needed
=
1235 (((__le32
*)aifcmd
->data
)[0] ==
1236 cpu_to_le32(AifEnAddJBOD
)) ? ADD
: DELETE
;
1237 if (device_config_needed
== ADD
) {
1238 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1243 scsi_remove_device(device
);
1244 scsi_device_put(device
);
1249 case AifEnEnclosureManagement
:
1251 * If in JBOD mode, automatic exposure of new
1252 * physical target to be suppressed until configured.
1256 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[3])) {
1257 case EM_DRIVE_INSERTION
:
1258 case EM_DRIVE_REMOVAL
:
1259 case EM_SES_DRIVE_INSERTION
:
1260 case EM_SES_DRIVE_REMOVAL
:
1261 container
= le32_to_cpu(
1262 ((__le32
*)aifcmd
->data
)[2]);
1263 if ((container
>> 28)) {
1264 container
= (u32
)-1;
1267 channel
= (container
>> 24) & 0xF;
1268 if (channel
>= dev
->maximum_num_channels
) {
1269 container
= (u32
)-1;
1272 id
= container
& 0xFFFF;
1273 lun
= (container
>> 16) & 0xFF;
1274 container
= (u32
)-1;
1275 if (id
>= dev
->maximum_num_physicals
) {
1276 /* legacy dev_t ? */
1277 if ((0x2000 <= id
) || lun
|| channel
||
1278 ((channel
= (id
>> 7) & 0x3F) >=
1279 dev
->maximum_num_channels
))
1281 lun
= (id
>> 4) & 7;
1284 channel
= aac_phys_to_logical(channel
);
1285 device_config_needed
=
1286 ((((__le32
*)aifcmd
->data
)[3]
1287 == cpu_to_le32(EM_DRIVE_INSERTION
)) ||
1288 (((__le32
*)aifcmd
->data
)[3]
1289 == cpu_to_le32(EM_SES_DRIVE_INSERTION
))) ?
1294 case AifBuManagerEvent
:
1295 aac_handle_aif_bu(dev
, aifcmd
);
1300 * If we are waiting on something and this happens to be
1301 * that thing then set the re-configure flag.
1303 if (container
!= (u32
)-1) {
1304 if (container
>= dev
->maximum_num_containers
)
1306 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1307 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1308 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1309 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1310 } else for (container
= 0;
1311 container
< dev
->maximum_num_containers
; ++container
) {
1312 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1313 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1314 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1315 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1319 case AifCmdJobProgress
:
1321 * These are job progress AIF's. When a Clear is being
1322 * done on a container it is initially created then hidden from
1323 * the OS. When the clear completes we don't get a config
1324 * change so we monitor the job status complete on a clear then
1325 * wait for a container change.
1328 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1329 (((__le32
*)aifcmd
->data
)[6] == ((__le32
*)aifcmd
->data
)[5] ||
1330 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsSuccess
))) {
1332 container
< dev
->maximum_num_containers
;
1335 * Stomp on all config sequencing for all
1338 dev
->fsa_dev
[container
].config_waiting_on
=
1339 AifEnContainerChange
;
1340 dev
->fsa_dev
[container
].config_needed
= ADD
;
1341 dev
->fsa_dev
[container
].config_waiting_stamp
=
1345 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1346 ((__le32
*)aifcmd
->data
)[6] == 0 &&
1347 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsRunning
)) {
1349 container
< dev
->maximum_num_containers
;
1352 * Stomp on all config sequencing for all
1355 dev
->fsa_dev
[container
].config_waiting_on
=
1356 AifEnContainerChange
;
1357 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1358 dev
->fsa_dev
[container
].config_waiting_stamp
=
1367 if (device_config_needed
== NOTHING
) {
1368 for (; container
< dev
->maximum_num_containers
; ++container
) {
1369 if ((dev
->fsa_dev
[container
].config_waiting_on
== 0) &&
1370 (dev
->fsa_dev
[container
].config_needed
!= NOTHING
) &&
1371 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
)) {
1372 device_config_needed
=
1373 dev
->fsa_dev
[container
].config_needed
;
1374 dev
->fsa_dev
[container
].config_needed
= NOTHING
;
1375 channel
= CONTAINER_TO_CHANNEL(container
);
1376 id
= CONTAINER_TO_ID(container
);
1377 lun
= CONTAINER_TO_LUN(container
);
1382 if (device_config_needed
== NOTHING
)
1386 * If we decided that a re-configuration needs to be done,
1387 * schedule it here on the way out the door, please close the door
1392 * Find the scsi_device associated with the SCSI address,
1393 * and mark it as changed, invalidating the cache. This deals
1394 * with changes to existing device IDs.
1397 if (!dev
|| !dev
->scsi_host_ptr
)
1400 * force reload of disk info via aac_probe_container
1402 if ((channel
== CONTAINER_CHANNEL
) &&
1403 (device_config_needed
!= NOTHING
)) {
1404 if (dev
->fsa_dev
[container
].valid
== 1)
1405 dev
->fsa_dev
[container
].valid
= 2;
1406 aac_probe_container(dev
, container
);
1408 device
= scsi_device_lookup(dev
->scsi_host_ptr
, channel
, id
, lun
);
1410 switch (device_config_needed
) {
1412 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1413 scsi_remove_device(device
);
1415 if (scsi_device_online(device
)) {
1416 scsi_device_set_state(device
, SDEV_OFFLINE
);
1417 sdev_printk(KERN_INFO
, device
,
1418 "Device offlined - %s\n",
1419 (channel
== CONTAINER_CHANNEL
) ?
1421 "enclosure services event");
1426 if (!scsi_device_online(device
)) {
1427 sdev_printk(KERN_INFO
, device
,
1428 "Device online - %s\n",
1429 (channel
== CONTAINER_CHANNEL
) ?
1431 "enclosure services event");
1432 scsi_device_set_state(device
, SDEV_RUNNING
);
1436 if ((channel
== CONTAINER_CHANNEL
)
1437 && (!dev
->fsa_dev
[container
].valid
)) {
1438 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1439 scsi_remove_device(device
);
1441 if (!scsi_device_online(device
))
1443 scsi_device_set_state(device
, SDEV_OFFLINE
);
1444 sdev_printk(KERN_INFO
, device
,
1445 "Device offlined - %s\n",
1450 scsi_rescan_device(&device
->sdev_gendev
);
1456 scsi_device_put(device
);
1457 device_config_needed
= NOTHING
;
1459 if (device_config_needed
== ADD
)
1460 scsi_add_device(dev
->scsi_host_ptr
, channel
, id
, lun
);
1461 if (channel
== CONTAINER_CHANNEL
) {
1463 device_config_needed
= NOTHING
;
1468 static void aac_schedule_bus_scan(struct aac_dev
*aac
)
1470 if (aac
->sa_firmware
)
1471 aac_schedule_safw_scan_worker(aac
);
1473 aac_schedule_src_reinit_aif_worker(aac
);
1476 static int _aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1480 struct Scsi_Host
*host
= aac
->scsi_host_ptr
;
1484 int num_of_fibs
= 0;
1488 * - host is locked, unless called by the aacraid thread.
1489 * (a matter of convenience, due to legacy issues surrounding
1490 * eh_host_adapter_reset).
1491 * - in_reset is asserted, so no new i/o is getting to the
1493 * - The card is dead, or will be very shortly ;-/ so no new
1494 * commands are completing in the interrupt service.
1496 aac_adapter_disable_int(aac
);
1497 if (aac
->thread
&& aac
->thread
->pid
!= current
->pid
) {
1498 spin_unlock_irq(host
->host_lock
);
1499 kthread_stop(aac
->thread
);
1505 * If a positive health, means in a known DEAD PANIC
1506 * state and the adapter could be reset to `try again'.
1508 bled
= forced
? 0 : aac_adapter_check_health(aac
);
1509 retval
= aac_adapter_restart(aac
, bled
, reset_type
);
1515 * Loop through the fibs, close the synchronous FIBS
1518 num_of_fibs
= aac
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
1519 for (index
= 0; index
< num_of_fibs
; index
++) {
1521 struct fib
*fib
= &aac
->fibs
[index
];
1522 __le32 XferState
= fib
->hw_fib_va
->header
.XferState
;
1523 bool is_response_expected
= false;
1525 if (!(XferState
& cpu_to_le32(NoResponseExpected
| Async
)) &&
1526 (XferState
& cpu_to_le32(ResponseExpected
)))
1527 is_response_expected
= true;
1529 if (is_response_expected
1530 || fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
1531 unsigned long flagv
;
1532 spin_lock_irqsave(&fib
->event_lock
, flagv
);
1533 complete(&fib
->event_wait
);
1534 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
1539 /* Give some extra time for ioctls to complete. */
1542 index
= aac
->cardtype
;
1545 * Re-initialize the adapter, first free resources, then carefully
1546 * apply the initialization sequence to come back again. Only risk
1547 * is a change in Firmware dropping cache, it is assumed the caller
1548 * will ensure that i/o is queisced and the card is flushed in that
1552 aac_fib_map_free(aac
);
1553 dma_free_coherent(&aac
->pdev
->dev
, aac
->comm_size
, aac
->comm_addr
,
1555 aac_adapter_ioremap(aac
, 0);
1556 aac
->comm_addr
= NULL
;
1560 kfree(aac
->fsa_dev
);
1561 aac
->fsa_dev
= NULL
;
1563 dmamask
= DMA_BIT_MASK(32);
1564 quirks
= aac_get_driver_ident(index
)->quirks
;
1565 if (quirks
& AAC_QUIRK_31BIT
)
1566 retval
= dma_set_mask(&aac
->pdev
->dev
, dmamask
);
1567 else if (!(quirks
& AAC_QUIRK_SRC
))
1568 retval
= dma_set_mask(&aac
->pdev
->dev
, dmamask
);
1570 retval
= dma_set_coherent_mask(&aac
->pdev
->dev
, dmamask
);
1572 if (quirks
& AAC_QUIRK_31BIT
&& !retval
) {
1573 dmamask
= DMA_BIT_MASK(31);
1574 retval
= dma_set_coherent_mask(&aac
->pdev
->dev
, dmamask
);
1580 if ((retval
= (*(aac_get_driver_ident(index
)->init
))(aac
)))
1584 aac
->thread
= kthread_run(aac_command_thread
, aac
, "%s",
1586 if (IS_ERR(aac
->thread
)) {
1587 retval
= PTR_ERR(aac
->thread
);
1592 (void)aac_get_adapter_info(aac
);
1593 if ((quirks
& AAC_QUIRK_34SG
) && (host
->sg_tablesize
> 34)) {
1594 host
->sg_tablesize
= 34;
1595 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1597 if ((quirks
& AAC_QUIRK_17SG
) && (host
->sg_tablesize
> 17)) {
1598 host
->sg_tablesize
= 17;
1599 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1601 aac_get_config_status(aac
, 1);
1602 aac_get_containers(aac
);
1604 * This is where the assumption that the Adapter is quiesced
1607 scsi_host_complete_all_commands(host
, DID_RESET
);
1614 * Issue bus rescan to catch any configuration that might have
1617 if (!retval
&& !is_kdump_kernel()) {
1618 dev_info(&aac
->pdev
->dev
, "Scheduling bus rescan\n");
1619 aac_schedule_bus_scan(aac
);
1623 spin_lock_irq(host
->host_lock
);
1628 int aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1630 unsigned long flagv
= 0;
1631 int retval
, unblock_retval
;
1632 struct Scsi_Host
*host
= aac
->scsi_host_ptr
;
1635 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1638 if (aac
->in_reset
) {
1639 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1643 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1646 * Wait for all commands to complete to this specific
1647 * target (block maximum 60 seconds). Although not necessary,
1648 * it does make us a good storage citizen.
1650 scsi_host_block(host
);
1652 /* Quiesce build, flush cache, write through mode */
1654 aac_send_shutdown(aac
);
1655 spin_lock_irqsave(host
->host_lock
, flagv
);
1656 bled
= forced
? forced
:
1657 (aac_check_reset
!= 0 && aac_check_reset
!= 1);
1658 retval
= _aac_reset_adapter(aac
, bled
, reset_type
);
1659 spin_unlock_irqrestore(host
->host_lock
, flagv
);
1661 unblock_retval
= scsi_host_unblock(host
, SDEV_RUNNING
);
1663 retval
= unblock_retval
;
1664 if ((forced
< 2) && (retval
== -ENODEV
)) {
1665 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1666 struct fib
* fibctx
= aac_fib_alloc(aac
);
1668 struct aac_pause
*cmd
;
1671 aac_fib_init(fibctx
);
1673 cmd
= (struct aac_pause
*) fib_data(fibctx
);
1675 cmd
->command
= cpu_to_le32(VM_ContainerConfig
);
1676 cmd
->type
= cpu_to_le32(CT_PAUSE_IO
);
1677 cmd
->timeout
= cpu_to_le32(1);
1678 cmd
->min
= cpu_to_le32(1);
1679 cmd
->noRescan
= cpu_to_le32(1);
1680 cmd
->count
= cpu_to_le32(0);
1682 status
= aac_fib_send(ContainerCommand
,
1684 sizeof(struct aac_pause
),
1686 -2 /* Timeout silently */, 1,
1690 aac_fib_complete(fibctx
);
1691 /* FIB should be freed only after getting
1692 * the response from the F/W */
1693 if (status
!= -ERESTARTSYS
)
1694 aac_fib_free(fibctx
);
1701 int aac_check_health(struct aac_dev
* aac
)
1704 unsigned long time_now
, flagv
= 0;
1705 struct list_head
* entry
;
1707 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1708 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1711 if (aac
->in_reset
|| !(BlinkLED
= aac_adapter_check_health(aac
))) {
1712 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1719 * aac_aifcmd.command = AifCmdEventNotify = 1
1720 * aac_aifcmd.seqnum = 0xFFFFFFFF
1721 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1722 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1723 * aac.aifcmd.data[2] = AifHighPriority = 3
1724 * aac.aifcmd.data[3] = BlinkLED
1727 time_now
= jiffies
/HZ
;
1728 entry
= aac
->fib_list
.next
;
1731 * For each Context that is on the
1732 * fibctxList, make a copy of the
1733 * fib, and then set the event to wake up the
1734 * thread that is waiting for it.
1736 while (entry
!= &aac
->fib_list
) {
1738 * Extract the fibctx
1740 struct aac_fib_context
*fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
1741 struct hw_fib
* hw_fib
;
1744 * Check if the queue is getting
1747 if (fibctx
->count
> 20) {
1749 * It's *not* jiffies folks,
1750 * but jiffies / HZ, so do not
1753 u32 time_last
= fibctx
->jiffies
;
1755 * Has it been > 2 minutes
1756 * since the last read off
1759 if ((time_now
- time_last
) > aif_timeout
) {
1760 entry
= entry
->next
;
1761 aac_close_fib_context(aac
, fibctx
);
1766 * Warning: no sleep allowed while
1769 hw_fib
= kzalloc(sizeof(struct hw_fib
), GFP_ATOMIC
);
1770 fib
= kzalloc(sizeof(struct fib
), GFP_ATOMIC
);
1771 if (fib
&& hw_fib
) {
1772 struct aac_aifcmd
* aif
;
1774 fib
->hw_fib_va
= hw_fib
;
1777 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
1778 fib
->size
= sizeof (struct fib
);
1779 fib
->data
= hw_fib
->data
;
1780 aif
= (struct aac_aifcmd
*)hw_fib
->data
;
1781 aif
->command
= cpu_to_le32(AifCmdEventNotify
);
1782 aif
->seqnum
= cpu_to_le32(0xFFFFFFFF);
1783 ((__le32
*)aif
->data
)[0] = cpu_to_le32(AifEnExpEvent
);
1784 ((__le32
*)aif
->data
)[1] = cpu_to_le32(AifExeFirmwarePanic
);
1785 ((__le32
*)aif
->data
)[2] = cpu_to_le32(AifHighPriority
);
1786 ((__le32
*)aif
->data
)[3] = cpu_to_le32(BlinkLED
);
1789 * Put the FIB onto the
1792 list_add_tail(&fib
->fiblink
, &fibctx
->fib_list
);
1795 * Set the event to wake up the
1796 * thread that will waiting.
1798 complete(&fibctx
->completion
);
1800 printk(KERN_WARNING
"aifd: didn't allocate NewFib.\n");
1804 entry
= entry
->next
;
1807 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1810 printk(KERN_ERR
"%s: Host adapter is dead (or got a PCI error) %d\n",
1811 aac
->name
, BlinkLED
);
1815 printk(KERN_ERR
"%s: Host adapter BLINK LED 0x%x\n", aac
->name
, BlinkLED
);
1822 static inline int is_safw_raid_volume(struct aac_dev
*aac
, int bus
, int target
)
1824 return bus
== CONTAINER_CHANNEL
&& target
< aac
->maximum_num_containers
;
1827 static struct scsi_device
*aac_lookup_safw_scsi_device(struct aac_dev
*dev
,
1831 if (bus
!= CONTAINER_CHANNEL
)
1832 bus
= aac_phys_to_logical(bus
);
1834 return scsi_device_lookup(dev
->scsi_host_ptr
, bus
, target
, 0);
1837 static int aac_add_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1839 if (bus
!= CONTAINER_CHANNEL
)
1840 bus
= aac_phys_to_logical(bus
);
1842 return scsi_add_device(dev
->scsi_host_ptr
, bus
, target
, 0);
1845 static void aac_put_safw_scsi_device(struct scsi_device
*sdev
)
1848 scsi_device_put(sdev
);
1851 static void aac_remove_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1853 struct scsi_device
*sdev
;
1855 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1856 scsi_remove_device(sdev
);
1857 aac_put_safw_scsi_device(sdev
);
1860 static inline int aac_is_safw_scan_count_equal(struct aac_dev
*dev
,
1861 int bus
, int target
)
1863 return dev
->hba_map
[bus
][target
].scan_counter
== dev
->scan_counter
;
1866 static int aac_is_safw_target_valid(struct aac_dev
*dev
, int bus
, int target
)
1868 if (is_safw_raid_volume(dev
, bus
, target
))
1869 return dev
->fsa_dev
[target
].valid
;
1871 return aac_is_safw_scan_count_equal(dev
, bus
, target
);
1874 static int aac_is_safw_device_exposed(struct aac_dev
*dev
, int bus
, int target
)
1877 struct scsi_device
*sdev
;
1879 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1882 aac_put_safw_scsi_device(sdev
);
1887 static int aac_update_safw_host_devices(struct aac_dev
*dev
)
1895 rcode
= aac_setup_safw_adapter(dev
);
1896 if (unlikely(rcode
< 0)) {
1900 for (i
= 0; i
< AAC_BUS_TARGET_LOOP
; i
++) {
1902 bus
= get_bus_number(i
);
1903 target
= get_target_number(i
);
1905 is_exposed
= aac_is_safw_device_exposed(dev
, bus
, target
);
1907 if (aac_is_safw_target_valid(dev
, bus
, target
) && !is_exposed
)
1908 aac_add_safw_device(dev
, bus
, target
);
1909 else if (!aac_is_safw_target_valid(dev
, bus
, target
) &&
1911 aac_remove_safw_device(dev
, bus
, target
);
1917 static int aac_scan_safw_host(struct aac_dev
*dev
)
1921 rcode
= aac_update_safw_host_devices(dev
);
1923 aac_schedule_safw_scan_worker(dev
);
1928 int aac_scan_host(struct aac_dev
*dev
)
1932 mutex_lock(&dev
->scan_mutex
);
1933 if (dev
->sa_firmware
)
1934 rcode
= aac_scan_safw_host(dev
);
1936 scsi_scan_host(dev
->scsi_host_ptr
);
1937 mutex_unlock(&dev
->scan_mutex
);
1942 void aac_src_reinit_aif_worker(struct work_struct
*work
)
1944 struct aac_dev
*dev
= container_of(to_delayed_work(work
),
1945 struct aac_dev
, src_reinit_aif_worker
);
1947 wait_event(dev
->scsi_host_ptr
->host_wait
,
1948 !scsi_host_in_recovery(dev
->scsi_host_ptr
));
1949 aac_reinit_aif(dev
, dev
->cardtype
);
1953 * aac_handle_sa_aif Handle a message from the firmware
1954 * @dev: Which adapter this fib is from
1955 * @fibptr: Pointer to fibptr from adapter
1957 * This routine handles a driver notify fib from the adapter and
1958 * dispatches it to the appropriate routine for handling.
1960 static void aac_handle_sa_aif(struct aac_dev
*dev
, struct fib
*fibptr
)
1965 if (fibptr
->hbacmd_size
& SA_AIF_HOTPLUG
)
1966 events
= SA_AIF_HOTPLUG
;
1967 else if (fibptr
->hbacmd_size
& SA_AIF_HARDWARE
)
1968 events
= SA_AIF_HARDWARE
;
1969 else if (fibptr
->hbacmd_size
& SA_AIF_PDEV_CHANGE
)
1970 events
= SA_AIF_PDEV_CHANGE
;
1971 else if (fibptr
->hbacmd_size
& SA_AIF_LDEV_CHANGE
)
1972 events
= SA_AIF_LDEV_CHANGE
;
1973 else if (fibptr
->hbacmd_size
& SA_AIF_BPSTAT_CHANGE
)
1974 events
= SA_AIF_BPSTAT_CHANGE
;
1975 else if (fibptr
->hbacmd_size
& SA_AIF_BPCFG_CHANGE
)
1976 events
= SA_AIF_BPCFG_CHANGE
;
1979 case SA_AIF_HOTPLUG
:
1980 case SA_AIF_HARDWARE
:
1981 case SA_AIF_PDEV_CHANGE
:
1982 case SA_AIF_LDEV_CHANGE
:
1983 case SA_AIF_BPCFG_CHANGE
:
1989 case SA_AIF_BPSTAT_CHANGE
:
1990 /* currently do nothing */
1994 for (i
= 1; i
<= 10; ++i
) {
1995 events
= src_readl(dev
, MUnit
.IDR
);
1996 if (events
& (1<<23)) {
1997 pr_warn(" AIF not cleared by firmware - %d/%d)\n",
2004 static int get_fib_count(struct aac_dev
*dev
)
2006 unsigned int num
= 0;
2007 struct list_head
*entry
;
2008 unsigned long flagv
;
2011 * Warning: no sleep allowed while
2012 * holding spinlock. We take the estimate
2013 * and pre-allocate a set of fibs outside the
2016 num
= le32_to_cpu(dev
->init
->r7
.adapter_fibs_size
)
2017 / sizeof(struct hw_fib
); /* some extra */
2018 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2019 entry
= dev
->fib_list
.next
;
2020 while (entry
!= &dev
->fib_list
) {
2021 entry
= entry
->next
;
2024 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2029 static int fillup_pools(struct aac_dev
*dev
, struct hw_fib
**hw_fib_pool
,
2030 struct fib
**fib_pool
,
2033 struct hw_fib
**hw_fib_p
;
2036 hw_fib_p
= hw_fib_pool
;
2038 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2039 *(hw_fib_p
) = kmalloc(sizeof(struct hw_fib
), GFP_KERNEL
);
2040 if (!(*(hw_fib_p
++))) {
2045 *(fib_p
) = kmalloc(sizeof(struct fib
), GFP_KERNEL
);
2046 if (!(*(fib_p
++))) {
2047 kfree(*(--hw_fib_p
));
2053 * Get the actual number of allocated fibs
2055 num
= hw_fib_p
- hw_fib_pool
;
2059 static void wakeup_fibctx_threads(struct aac_dev
*dev
,
2060 struct hw_fib
**hw_fib_pool
,
2061 struct fib
**fib_pool
,
2063 struct hw_fib
*hw_fib
,
2066 unsigned long flagv
;
2067 struct list_head
*entry
;
2068 struct hw_fib
**hw_fib_p
;
2070 u32 time_now
, time_last
;
2071 struct hw_fib
*hw_newfib
;
2073 struct aac_fib_context
*fibctx
;
2075 time_now
= jiffies
/HZ
;
2076 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2077 entry
= dev
->fib_list
.next
;
2079 * For each Context that is on the
2080 * fibctxList, make a copy of the
2081 * fib, and then set the event to wake up the
2082 * thread that is waiting for it.
2085 hw_fib_p
= hw_fib_pool
;
2087 while (entry
!= &dev
->fib_list
) {
2089 * Extract the fibctx
2091 fibctx
= list_entry(entry
, struct aac_fib_context
,
2094 * Check if the queue is getting
2097 if (fibctx
->count
> 20) {
2099 * It's *not* jiffies folks,
2100 * but jiffies / HZ so do not
2103 time_last
= fibctx
->jiffies
;
2105 * Has it been > 2 minutes
2106 * since the last read off
2109 if ((time_now
- time_last
) > aif_timeout
) {
2110 entry
= entry
->next
;
2111 aac_close_fib_context(dev
, fibctx
);
2116 * Warning: no sleep allowed while
2119 if (hw_fib_p
>= &hw_fib_pool
[num
]) {
2120 pr_warn("aifd: didn't allocate NewFib\n");
2121 entry
= entry
->next
;
2125 hw_newfib
= *hw_fib_p
;
2126 *(hw_fib_p
++) = NULL
;
2130 * Make the copy of the FIB
2132 memcpy(hw_newfib
, hw_fib
, sizeof(struct hw_fib
));
2133 memcpy(newfib
, fib
, sizeof(struct fib
));
2134 newfib
->hw_fib_va
= hw_newfib
;
2136 * Put the FIB onto the
2139 list_add_tail(&newfib
->fiblink
, &fibctx
->fib_list
);
2142 * Set the event to wake up the
2143 * thread that is waiting.
2145 complete(&fibctx
->completion
);
2147 entry
= entry
->next
;
2150 * Set the status of this FIB
2152 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2153 aac_fib_adapter_complete(fib
, sizeof(u32
));
2154 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2158 static void aac_process_events(struct aac_dev
*dev
)
2160 struct hw_fib
*hw_fib
;
2162 unsigned long flags
;
2165 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2166 spin_lock_irqsave(t_lock
, flags
);
2168 while (!list_empty(&(dev
->queues
->queue
[HostNormCmdQueue
].cmdq
))) {
2169 struct list_head
*entry
;
2170 struct aac_aifcmd
*aifcmd
;
2172 struct hw_fib
**hw_fib_pool
, **hw_fib_p
;
2173 struct fib
**fib_pool
, **fib_p
;
2175 set_current_state(TASK_RUNNING
);
2177 entry
= dev
->queues
->queue
[HostNormCmdQueue
].cmdq
.next
;
2180 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2181 spin_unlock_irqrestore(t_lock
, flags
);
2183 fib
= list_entry(entry
, struct fib
, fiblink
);
2184 hw_fib
= fib
->hw_fib_va
;
2185 if (dev
->sa_firmware
) {
2187 aac_handle_sa_aif(dev
, fib
);
2188 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2192 * We will process the FIB here or pass it to a
2193 * worker thread that is TBD. We Really can't
2194 * do anything at this point since we don't have
2195 * anything defined for this thread to do.
2197 memset(fib
, 0, sizeof(struct fib
));
2198 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
2199 fib
->size
= sizeof(struct fib
);
2200 fib
->hw_fib_va
= hw_fib
;
2201 fib
->data
= hw_fib
->data
;
2204 * We only handle AifRequest fibs from the adapter.
2207 aifcmd
= (struct aac_aifcmd
*) hw_fib
->data
;
2208 if (aifcmd
->command
== cpu_to_le32(AifCmdDriverNotify
)) {
2209 /* Handle Driver Notify Events */
2210 aac_handle_aif(dev
, fib
);
2211 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2212 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2216 * The u32 here is important and intended. We are using
2217 * 32bit wrapping time to fit the adapter field
2221 if (aifcmd
->command
== cpu_to_le32(AifCmdEventNotify
)
2222 || aifcmd
->command
== cpu_to_le32(AifCmdJobProgress
)) {
2223 aac_handle_aif(dev
, fib
);
2227 * get number of fibs to process
2229 num
= get_fib_count(dev
);
2233 hw_fib_pool
= kmalloc_array(num
, sizeof(struct hw_fib
*),
2238 fib_pool
= kmalloc_array(num
, sizeof(struct fib
*), GFP_KERNEL
);
2240 goto free_hw_fib_pool
;
2243 * Fill up fib pointer pools with actual fibs
2246 num
= fillup_pools(dev
, hw_fib_pool
, fib_pool
, num
);
2251 * wakeup the thread that is waiting for
2252 * the response from fw (ioctl)
2254 wakeup_fibctx_threads(dev
, hw_fib_pool
, fib_pool
,
2258 /* Free up the remaining resources */
2259 hw_fib_p
= hw_fib_pool
;
2261 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2272 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2273 spin_lock_irqsave(t_lock
, flags
);
2276 * There are no more AIF's
2278 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2279 spin_unlock_irqrestore(t_lock
, flags
);
2282 static int aac_send_wellness_command(struct aac_dev
*dev
, char *wellness_str
,
2285 struct aac_srb
*srbcmd
;
2286 struct sgmap64
*sg64
;
2293 fibptr
= aac_fib_alloc(dev
);
2297 dma_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, datasize
, &addr
,
2302 aac_fib_init(fibptr
);
2304 vbus
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_bus
);
2305 vid
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_target
);
2307 srbcmd
= (struct aac_srb
*)fib_data(fibptr
);
2309 srbcmd
->function
= cpu_to_le32(SRBF_ExecuteScsi
);
2310 srbcmd
->channel
= cpu_to_le32(vbus
);
2311 srbcmd
->id
= cpu_to_le32(vid
);
2313 srbcmd
->flags
= cpu_to_le32(SRB_DataOut
);
2314 srbcmd
->timeout
= cpu_to_le32(10);
2315 srbcmd
->retry_limit
= 0;
2316 srbcmd
->cdb_size
= cpu_to_le32(12);
2317 srbcmd
->count
= cpu_to_le32(datasize
);
2319 memset(srbcmd
->cdb
, 0, sizeof(srbcmd
->cdb
));
2320 srbcmd
->cdb
[0] = BMIC_OUT
;
2321 srbcmd
->cdb
[6] = WRITE_HOST_WELLNESS
;
2322 memcpy(dma_buf
, (char *)wellness_str
, datasize
);
2324 sg64
= (struct sgmap64
*)&srbcmd
->sg
;
2325 sg64
->count
= cpu_to_le32(1);
2326 sg64
->sg
[0].addr
[1] = cpu_to_le32((u32
)(((addr
) >> 16) >> 16));
2327 sg64
->sg
[0].addr
[0] = cpu_to_le32((u32
)(addr
& 0xffffffff));
2328 sg64
->sg
[0].count
= cpu_to_le32(datasize
);
2330 ret
= aac_fib_send(ScsiPortCommand64
, fibptr
, sizeof(struct aac_srb
),
2331 FsaNormal
, 1, 1, NULL
, NULL
);
2333 dma_free_coherent(&dev
->pdev
->dev
, datasize
, dma_buf
, addr
);
2336 * Do not set XferState to zero unless
2337 * receives a response from F/W
2340 aac_fib_complete(fibptr
);
2343 * FIB should be freed only after
2344 * getting the response from the F/W
2346 if (ret
!= -ERESTARTSYS
)
2352 aac_fib_free(fibptr
);
2356 static int aac_send_safw_hostttime(struct aac_dev
*dev
, struct timespec64
*now
)
2359 char wellness_str
[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2360 u32 datasize
= sizeof(wellness_str
);
2361 time64_t local_time
;
2364 if (!dev
->sa_firmware
)
2367 local_time
= (now
->tv_sec
- (sys_tz
.tz_minuteswest
* 60));
2368 time64_to_tm(local_time
, 0, &cur_tm
);
2370 cur_tm
.tm_year
+= 1900;
2371 wellness_str
[8] = bin2bcd(cur_tm
.tm_hour
);
2372 wellness_str
[9] = bin2bcd(cur_tm
.tm_min
);
2373 wellness_str
[10] = bin2bcd(cur_tm
.tm_sec
);
2374 wellness_str
[12] = bin2bcd(cur_tm
.tm_mon
);
2375 wellness_str
[13] = bin2bcd(cur_tm
.tm_mday
);
2376 wellness_str
[14] = bin2bcd(cur_tm
.tm_year
/ 100);
2377 wellness_str
[15] = bin2bcd(cur_tm
.tm_year
% 100);
2379 ret
= aac_send_wellness_command(dev
, wellness_str
, datasize
);
2385 static int aac_send_hosttime(struct aac_dev
*dev
, struct timespec64
*now
)
2391 fibptr
= aac_fib_alloc(dev
);
2395 aac_fib_init(fibptr
);
2396 info
= (__le32
*)fib_data(fibptr
);
2397 *info
= cpu_to_le32(now
->tv_sec
); /* overflow in y2106 */
2398 ret
= aac_fib_send(SendHostTime
, fibptr
, sizeof(*info
), FsaNormal
,
2402 * Do not set XferState to zero unless
2403 * receives a response from F/W
2406 aac_fib_complete(fibptr
);
2409 * FIB should be freed only after
2410 * getting the response from the F/W
2412 if (ret
!= -ERESTARTSYS
)
2413 aac_fib_free(fibptr
);
2420 * aac_command_thread - command processing thread
2421 * @data: Adapter to monitor
2423 * Waits on the commandready event in it's queue. When the event gets set
2424 * it will pull FIBs off it's queue. It will continue to pull FIBs off
2425 * until the queue is empty. When the queue is empty it will wait for
2429 int aac_command_thread(void *data
)
2431 struct aac_dev
*dev
= data
;
2432 DECLARE_WAITQUEUE(wait
, current
);
2433 unsigned long next_jiffies
= jiffies
+ HZ
;
2434 unsigned long next_check_jiffies
= next_jiffies
;
2435 long difference
= HZ
;
2438 * We can only have one thread per adapter for AIF's.
2440 if (dev
->aif_thread
)
2444 * Let the DPC know it has a place to send the AIF's to.
2446 dev
->aif_thread
= 1;
2447 add_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2448 set_current_state(TASK_INTERRUPTIBLE
);
2449 dprintk ((KERN_INFO
"aac_command_thread start\n"));
2452 aac_process_events(dev
);
2455 * Background activity
2457 if ((time_before(next_check_jiffies
,next_jiffies
))
2458 && ((difference
= next_check_jiffies
- jiffies
) <= 0)) {
2459 next_check_jiffies
= next_jiffies
;
2460 if (aac_adapter_check_health(dev
) == 0) {
2461 difference
= ((long)(unsigned)check_interval
)
2463 next_check_jiffies
= jiffies
+ difference
;
2464 } else if (!dev
->queues
)
2467 if (!time_before(next_check_jiffies
,next_jiffies
)
2468 && ((difference
= next_jiffies
- jiffies
) <= 0)) {
2469 struct timespec64 now
;
2472 /* Don't even try to talk to adapter if its sick */
2473 ret
= aac_adapter_check_health(dev
);
2474 if (ret
|| !dev
->queues
)
2476 next_check_jiffies
= jiffies
2477 + ((long)(unsigned)check_interval
)
2479 ktime_get_real_ts64(&now
);
2481 /* Synchronize our watches */
2482 if (((NSEC_PER_SEC
- (NSEC_PER_SEC
/ HZ
)) > now
.tv_nsec
)
2483 && (now
.tv_nsec
> (NSEC_PER_SEC
/ HZ
)))
2484 difference
= HZ
+ HZ
/ 2 -
2485 now
.tv_nsec
/ (NSEC_PER_SEC
/ HZ
);
2487 if (now
.tv_nsec
> NSEC_PER_SEC
/ 2)
2490 if (dev
->sa_firmware
)
2492 aac_send_safw_hostttime(dev
, &now
);
2494 ret
= aac_send_hosttime(dev
, &now
);
2496 difference
= (long)(unsigned)update_interval
*HZ
;
2498 next_jiffies
= jiffies
+ difference
;
2499 if (time_before(next_check_jiffies
,next_jiffies
))
2500 difference
= next_check_jiffies
- jiffies
;
2502 if (difference
<= 0)
2504 set_current_state(TASK_INTERRUPTIBLE
);
2506 if (kthread_should_stop())
2510 * we probably want usleep_range() here instead of the
2511 * jiffies computation
2513 schedule_timeout(difference
);
2515 if (kthread_should_stop())
2519 remove_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2520 dev
->aif_thread
= 0;
2524 int aac_acquire_irq(struct aac_dev
*dev
)
2530 if (!dev
->sync_mode
&& dev
->msi_enabled
&& dev
->max_msix
> 1) {
2531 for (i
= 0; i
< dev
->max_msix
; i
++) {
2532 dev
->aac_msix
[i
].vector_no
= i
;
2533 dev
->aac_msix
[i
].dev
= dev
;
2534 if (request_irq(pci_irq_vector(dev
->pdev
, i
),
2535 dev
->a_ops
.adapter_intr
,
2536 0, "aacraid", &(dev
->aac_msix
[i
]))) {
2537 printk(KERN_ERR
"%s%d: Failed to register IRQ for vector %d.\n",
2538 dev
->name
, dev
->id
, i
);
2539 for (j
= 0 ; j
< i
; j
++)
2540 free_irq(pci_irq_vector(dev
->pdev
, j
),
2541 &(dev
->aac_msix
[j
]));
2542 pci_disable_msix(dev
->pdev
);
2547 dev
->aac_msix
[0].vector_no
= 0;
2548 dev
->aac_msix
[0].dev
= dev
;
2550 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
2551 IRQF_SHARED
, "aacraid",
2552 &(dev
->aac_msix
[0])) < 0) {
2554 pci_disable_msi(dev
->pdev
);
2555 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
2556 dev
->name
, dev
->id
);
2563 void aac_free_irq(struct aac_dev
*dev
)
2567 if (aac_is_src(dev
)) {
2568 if (dev
->max_msix
> 1) {
2569 for (i
= 0; i
< dev
->max_msix
; i
++)
2570 free_irq(pci_irq_vector(dev
->pdev
, i
),
2571 &(dev
->aac_msix
[i
]));
2573 free_irq(dev
->pdev
->irq
, &(dev
->aac_msix
[0]));
2576 free_irq(dev
->pdev
->irq
, dev
);
2579 pci_disable_msi(dev
->pdev
);
2580 else if (dev
->max_msix
> 1)
2581 pci_disable_msix(dev
->pdev
);