2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * Abstract: Contain all routines that are required for FSA host/adapter
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/delay.h>
42 #include <linux/kthread.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi_device.h>
45 #include <asm/semaphore.h>
50 * fib_map_alloc - allocate the fib objects
51 * @dev: Adapter to allocate for
53 * Allocate and map the shared PCI space for the FIB blocks used to
54 * talk to the Adaptec firmware.
57 static int fib_map_alloc(struct aac_dev
*dev
)
60 "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
61 dev
->pdev
, dev
->max_fib_size
, dev
->scsi_host_ptr
->can_queue
,
62 AAC_NUM_MGT_FIB
, &dev
->hw_fib_pa
));
63 if((dev
->hw_fib_va
= pci_alloc_consistent(dev
->pdev
, dev
->max_fib_size
64 * (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
),
65 &dev
->hw_fib_pa
))==NULL
)
71 * aac_fib_map_free - free the fib objects
72 * @dev: Adapter to free
74 * Free the PCI mappings and the memory allocated for FIB blocks
78 void aac_fib_map_free(struct aac_dev
*dev
)
80 pci_free_consistent(dev
->pdev
, dev
->max_fib_size
* (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
), dev
->hw_fib_va
, dev
->hw_fib_pa
);
84 * aac_fib_setup - setup the fibs
85 * @dev: Adapter to set up
87 * Allocate the PCI space for the fibs, map it and then intialise the
88 * fib area, the unmapped fib data and also the free list
91 int aac_fib_setup(struct aac_dev
* dev
)
94 struct hw_fib
*hw_fib_va
;
98 while (((i
= fib_map_alloc(dev
)) == -ENOMEM
)
99 && (dev
->scsi_host_ptr
->can_queue
> (64 - AAC_NUM_MGT_FIB
))) {
100 dev
->init
->MaxIoCommands
= cpu_to_le32((dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) >> 1);
101 dev
->scsi_host_ptr
->can_queue
= le32_to_cpu(dev
->init
->MaxIoCommands
) - AAC_NUM_MGT_FIB
;
106 hw_fib_va
= dev
->hw_fib_va
;
107 hw_fib_pa
= dev
->hw_fib_pa
;
108 memset(hw_fib_va
, 0, dev
->max_fib_size
* (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
));
110 * Initialise the fibs
112 for (i
= 0, fibptr
= &dev
->fibs
[i
]; i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
); i
++, fibptr
++)
115 fibptr
->hw_fib
= hw_fib_va
;
116 fibptr
->data
= (void *) fibptr
->hw_fib
->data
;
117 fibptr
->next
= fibptr
+1; /* Forward chain the fibs */
118 init_MUTEX_LOCKED(&fibptr
->event_wait
);
119 spin_lock_init(&fibptr
->event_lock
);
120 hw_fib_va
->header
.XferState
= cpu_to_le32(0xffffffff);
121 hw_fib_va
->header
.SenderSize
= cpu_to_le16(dev
->max_fib_size
);
122 fibptr
->hw_fib_pa
= hw_fib_pa
;
123 hw_fib_va
= (struct hw_fib
*)((unsigned char *)hw_fib_va
+ dev
->max_fib_size
);
124 hw_fib_pa
= hw_fib_pa
+ dev
->max_fib_size
;
127 * Add the fib chain to the free list
129 dev
->fibs
[dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1].next
= NULL
;
131 * Enable this to debug out of queue space
133 dev
->free_fib
= &dev
->fibs
[0];
138 * aac_fib_alloc - allocate a fib
139 * @dev: Adapter to allocate the fib for
141 * Allocate a fib from the adapter fib pool. If the pool is empty we
145 struct fib
*aac_fib_alloc(struct aac_dev
*dev
)
149 spin_lock_irqsave(&dev
->fib_lock
, flags
);
150 fibptr
= dev
->free_fib
;
152 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
155 dev
->free_fib
= fibptr
->next
;
156 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
158 * Set the proper node type code and node byte size
160 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
161 fibptr
->size
= sizeof(struct fib
);
163 * Null out fields that depend on being zero at the start of
166 fibptr
->hw_fib
->header
.XferState
= 0;
167 fibptr
->callback
= NULL
;
168 fibptr
->callback_data
= NULL
;
174 * aac_fib_free - free a fib
175 * @fibptr: fib to free up
177 * Frees up a fib and places it on the appropriate queue
178 * (either free or timed out)
181 void aac_fib_free(struct fib
*fibptr
)
185 spin_lock_irqsave(&fibptr
->dev
->fib_lock
, flags
);
186 if (fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
) {
187 aac_config
.fib_timeouts
++;
188 fibptr
->next
= fibptr
->dev
->timeout_fib
;
189 fibptr
->dev
->timeout_fib
= fibptr
;
191 if (fibptr
->hw_fib
->header
.XferState
!= 0) {
192 printk(KERN_WARNING
"aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
194 le32_to_cpu(fibptr
->hw_fib
->header
.XferState
));
196 fibptr
->next
= fibptr
->dev
->free_fib
;
197 fibptr
->dev
->free_fib
= fibptr
;
199 spin_unlock_irqrestore(&fibptr
->dev
->fib_lock
, flags
);
203 * aac_fib_init - initialise a fib
204 * @fibptr: The fib to initialize
206 * Set up the generic fib fields ready for use
209 void aac_fib_init(struct fib
*fibptr
)
211 struct hw_fib
*hw_fib
= fibptr
->hw_fib
;
213 hw_fib
->header
.StructType
= FIB_MAGIC
;
214 hw_fib
->header
.Size
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
215 hw_fib
->header
.XferState
= cpu_to_le32(HostOwned
| FibInitialized
| FibEmpty
| FastResponseCapable
);
216 hw_fib
->header
.SenderFibAddress
= 0; /* Filled in later if needed */
217 hw_fib
->header
.ReceiverFibAddress
= cpu_to_le32(fibptr
->hw_fib_pa
);
218 hw_fib
->header
.SenderSize
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
222 * fib_deallocate - deallocate a fib
223 * @fibptr: fib to deallocate
225 * Will deallocate and return to the free pool the FIB pointed to by the
229 static void fib_dealloc(struct fib
* fibptr
)
231 struct hw_fib
*hw_fib
= fibptr
->hw_fib
;
232 if(hw_fib
->header
.StructType
!= FIB_MAGIC
)
234 hw_fib
->header
.XferState
= 0;
238 * Commuication primitives define and support the queuing method we use to
239 * support host to adapter commuication. All queue accesses happen through
240 * these routines and are the only routines which have a knowledge of the
241 * how these queues are implemented.
245 * aac_get_entry - get a queue entry
248 * @entry: Entry return
249 * @index: Index return
250 * @nonotify: notification control
252 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
253 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
257 static int aac_get_entry (struct aac_dev
* dev
, u32 qid
, struct aac_entry
**entry
, u32
* index
, unsigned long *nonotify
)
259 struct aac_queue
* q
;
263 * All of the queues wrap when they reach the end, so we check
264 * to see if they have reached the end and if they have we just
265 * set the index back to zero. This is a wrap. You could or off
266 * the high bits in all updates but this is a bit faster I think.
269 q
= &dev
->queues
->queue
[qid
];
271 idx
= *index
= le32_to_cpu(*(q
->headers
.producer
));
272 /* Interrupt Moderation, only interrupt for first two entries */
273 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
))) {
275 if (qid
== AdapNormCmdQueue
)
276 idx
= ADAP_NORM_CMD_ENTRIES
;
278 idx
= ADAP_NORM_RESP_ENTRIES
;
280 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
)))
284 if (qid
== AdapNormCmdQueue
) {
285 if (*index
>= ADAP_NORM_CMD_ENTRIES
)
286 *index
= 0; /* Wrap to front of the Producer Queue. */
288 if (*index
>= ADAP_NORM_RESP_ENTRIES
)
289 *index
= 0; /* Wrap to front of the Producer Queue. */
292 if ((*index
+ 1) == le32_to_cpu(*(q
->headers
.consumer
))) { /* Queue is full */
293 printk(KERN_WARNING
"Queue %d full, %u outstanding.\n",
297 *entry
= q
->base
+ *index
;
303 * aac_queue_get - get the next free QE
305 * @index: Returned index
306 * @priority: Priority of fib
307 * @fib: Fib to associate with the queue entry
308 * @wait: Wait if queue full
309 * @fibptr: Driver fib object to go with fib
310 * @nonotify: Don't notify the adapter
312 * Gets the next free QE off the requested priorty adapter command
313 * queue and associates the Fib with the QE. The QE represented by
314 * index is ready to insert on the queue when this routine returns
318 static int aac_queue_get(struct aac_dev
* dev
, u32
* index
, u32 qid
, struct hw_fib
* hw_fib
, int wait
, struct fib
* fibptr
, unsigned long *nonotify
)
320 struct aac_entry
* entry
= NULL
;
323 if (qid
== AdapNormCmdQueue
) {
324 /* if no entries wait for some if caller wants to */
325 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
))
327 printk(KERN_ERR
"GetEntries failed\n");
330 * Setup queue entry with a command, status and fib mapped
332 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
335 while(!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
))
337 /* if no entries wait for some if caller wants to */
340 * Setup queue entry with command, status and fib mapped
342 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
343 entry
->addr
= hw_fib
->header
.SenderFibAddress
;
344 /* Restore adapters pointer to the FIB */
345 hw_fib
->header
.ReceiverFibAddress
= hw_fib
->header
.SenderFibAddress
; /* Let the adapter now where to find its data */
349 * If MapFib is true than we need to map the Fib and put pointers
350 * in the queue entry.
353 entry
->addr
= cpu_to_le32(fibptr
->hw_fib_pa
);
358 * Define the highest level of host to adapter communication routines.
359 * These routines will support host to adapter FS commuication. These
360 * routines have no knowledge of the commuication method used. This level
361 * sends and receives FIBs. This level has no knowledge of how these FIBs
362 * get passed back and forth.
366 * aac_fib_send - send a fib to the adapter
367 * @command: Command to send
369 * @size: Size of fib data area
370 * @priority: Priority of Fib
371 * @wait: Async/sync select
372 * @reply: True if a reply is wanted
373 * @callback: Called with reply
374 * @callback_data: Passed to callback
376 * Sends the requested FIB to the adapter and optionally will wait for a
377 * response FIB. If the caller does not wish to wait for a response than
378 * an event to wait on must be supplied. This event will be set when a
379 * response FIB is received from the adapter.
382 int aac_fib_send(u16 command
, struct fib
*fibptr
, unsigned long size
,
383 int priority
, int wait
, int reply
, fib_callback callback
,
386 struct aac_dev
* dev
= fibptr
->dev
;
387 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
388 struct aac_queue
* q
;
389 unsigned long flags
= 0;
390 unsigned long qflags
;
392 if (!(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)))
395 * There are 5 cases with the wait and reponse requested flags.
396 * The only invalid cases are if the caller requests to wait and
397 * does not request a response and if the caller does not want a
398 * response and the Fib is not allocated from pool. If a response
399 * is not requesed the Fib will just be deallocaed by the DPC
400 * routine when the response comes back from the adapter. No
401 * further processing will be done besides deleting the Fib. We
402 * will have a debug mode where the adapter can notify the host
403 * it had a problem and the host can log that fact.
405 if (wait
&& !reply
) {
407 } else if (!wait
&& reply
) {
408 hw_fib
->header
.XferState
|= cpu_to_le32(Async
| ResponseExpected
);
409 FIB_COUNTER_INCREMENT(aac_config
.AsyncSent
);
410 } else if (!wait
&& !reply
) {
411 hw_fib
->header
.XferState
|= cpu_to_le32(NoResponseExpected
);
412 FIB_COUNTER_INCREMENT(aac_config
.NoResponseSent
);
413 } else if (wait
&& reply
) {
414 hw_fib
->header
.XferState
|= cpu_to_le32(ResponseExpected
);
415 FIB_COUNTER_INCREMENT(aac_config
.NormalSent
);
418 * Map the fib into 32bits by using the fib number
421 hw_fib
->header
.SenderFibAddress
= cpu_to_le32(((u32
)(fibptr
- dev
->fibs
)) << 2);
422 hw_fib
->header
.SenderData
= (u32
)(fibptr
- dev
->fibs
);
424 * Set FIB state to indicate where it came from and if we want a
425 * response from the adapter. Also load the command from the
428 * Map the hw fib pointer as a 32bit value
430 hw_fib
->header
.Command
= cpu_to_le16(command
);
431 hw_fib
->header
.XferState
|= cpu_to_le32(SentFromHost
);
432 fibptr
->hw_fib
->header
.Flags
= 0; /* 0 the flags field - internal only*/
434 * Set the size of the Fib we want to send to the adapter
436 hw_fib
->header
.Size
= cpu_to_le16(sizeof(struct aac_fibhdr
) + size
);
437 if (le16_to_cpu(hw_fib
->header
.Size
) > le16_to_cpu(hw_fib
->header
.SenderSize
)) {
441 * Get a queue entry connect the FIB to it and send an notify
442 * the adapter a command is ready.
444 hw_fib
->header
.XferState
|= cpu_to_le32(NormalPriority
);
447 * Fill in the Callback and CallbackContext if we are not
451 fibptr
->callback
= callback
;
452 fibptr
->callback_data
= callback_data
;
458 FIB_COUNTER_INCREMENT(aac_config
.FibsSent
);
460 dprintk((KERN_DEBUG
"Fib contents:.\n"));
461 dprintk((KERN_DEBUG
" Command = %d.\n", le32_to_cpu(hw_fib
->header
.Command
)));
462 dprintk((KERN_DEBUG
" SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount
*)fib_data(fibptr
))->command
)));
463 dprintk((KERN_DEBUG
" XferState = %x.\n", le32_to_cpu(hw_fib
->header
.XferState
)));
464 dprintk((KERN_DEBUG
" hw_fib va being sent=%p\n",fibptr
->hw_fib
));
465 dprintk((KERN_DEBUG
" hw_fib pa being sent=%lx\n",(ulong
)fibptr
->hw_fib_pa
));
466 dprintk((KERN_DEBUG
" fib being sent=%p\n",fibptr
));
468 q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
471 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
472 spin_lock_irqsave(q
->lock
, qflags
);
473 if (dev
->new_comm_interface
) {
474 unsigned long count
= 10000000L; /* 50 seconds */
475 list_add_tail(&fibptr
->queue
, &q
->pendingq
);
477 spin_unlock_irqrestore(q
->lock
, qflags
);
478 while (aac_adapter_send(fibptr
) != 0) {
481 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
482 spin_lock_irqsave(q
->lock
, qflags
);
484 list_del(&fibptr
->queue
);
485 spin_unlock_irqrestore(q
->lock
, qflags
);
492 unsigned long nointr
= 0;
493 aac_queue_get( dev
, &index
, AdapNormCmdQueue
, hw_fib
, 1, fibptr
, &nointr
);
495 list_add_tail(&fibptr
->queue
, &q
->pendingq
);
497 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
498 spin_unlock_irqrestore(q
->lock
, qflags
);
499 dprintk((KERN_DEBUG
"aac_fib_send: inserting a queue entry at index %d.\n",index
));
500 if (!(nointr
& aac_config
.irq_mod
))
501 aac_adapter_notify(dev
, AdapNormCmdQueue
);
505 * If the caller wanted us to wait for response wait now.
509 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
510 /* Only set for first known interruptable command */
513 * *VERY* Dangerous to time out a command, the
514 * assumption is made that we have no hope of
515 * functioning because an interrupt routing or other
516 * hardware failure has occurred.
518 unsigned long count
= 36000000L; /* 3 minutes */
519 while (down_trylock(&fibptr
->event_wait
)) {
521 spin_lock_irqsave(q
->lock
, qflags
);
523 list_del(&fibptr
->queue
);
524 spin_unlock_irqrestore(q
->lock
, qflags
);
526 printk(KERN_ERR
"aacraid: aac_fib_send: first asynchronous command timed out.\n"
527 "Usually a result of a PCI interrupt routing problem;\n"
528 "update mother board BIOS or consider utilizing one of\n"
529 "the SAFE mode kernel options (acpi, apic etc)\n");
536 down(&fibptr
->event_wait
);
537 if(fibptr
->done
== 0)
540 if((fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)){
547 * If the user does not want a response than return success otherwise
557 * aac_consumer_get - get the top of the queue
560 * @entry: Return entry
562 * Will return a pointer to the entry on the top of the queue requested that
563 * we are a consumer of, and return the address of the queue entry. It does
564 * not change the state of the queue.
567 int aac_consumer_get(struct aac_dev
* dev
, struct aac_queue
* q
, struct aac_entry
**entry
)
571 if (le32_to_cpu(*q
->headers
.producer
) == le32_to_cpu(*q
->headers
.consumer
)) {
575 * The consumer index must be wrapped if we have reached
576 * the end of the queue, else we just use the entry
577 * pointed to by the header index
579 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
582 index
= le32_to_cpu(*q
->headers
.consumer
);
583 *entry
= q
->base
+ index
;
590 * aac_consumer_free - free consumer entry
595 * Frees up the current top of the queue we are a consumer of. If the
596 * queue was full notify the producer that the queue is no longer full.
599 void aac_consumer_free(struct aac_dev
* dev
, struct aac_queue
*q
, u32 qid
)
604 if ((le32_to_cpu(*q
->headers
.producer
)+1) == le32_to_cpu(*q
->headers
.consumer
))
607 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
608 *q
->headers
.consumer
= cpu_to_le32(1);
610 *q
->headers
.consumer
= cpu_to_le32(le32_to_cpu(*q
->headers
.consumer
)+1);
615 case HostNormCmdQueue
:
616 notify
= HostNormCmdNotFull
;
618 case HostNormRespQueue
:
619 notify
= HostNormRespNotFull
;
625 aac_adapter_notify(dev
, notify
);
630 * aac_fib_adapter_complete - complete adapter issued fib
631 * @fibptr: fib to complete
634 * Will do all necessary work to complete a FIB that was sent from
638 int aac_fib_adapter_complete(struct fib
*fibptr
, unsigned short size
)
640 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
641 struct aac_dev
* dev
= fibptr
->dev
;
642 struct aac_queue
* q
;
643 unsigned long nointr
= 0;
644 unsigned long qflags
;
646 if (hw_fib
->header
.XferState
== 0) {
647 if (dev
->new_comm_interface
)
652 * If we plan to do anything check the structure type first.
654 if ( hw_fib
->header
.StructType
!= FIB_MAGIC
) {
655 if (dev
->new_comm_interface
)
660 * This block handles the case where the adapter had sent us a
661 * command and we have finished processing the command. We
662 * call completeFib when we are done processing the command
663 * and want to send a response back to the adapter. This will
664 * send the completed cdb to the adapter.
666 if (hw_fib
->header
.XferState
& cpu_to_le32(SentFromAdapter
)) {
667 if (dev
->new_comm_interface
) {
671 hw_fib
->header
.XferState
|= cpu_to_le32(HostProcessed
);
673 size
+= sizeof(struct aac_fibhdr
);
674 if (size
> le16_to_cpu(hw_fib
->header
.SenderSize
))
676 hw_fib
->header
.Size
= cpu_to_le16(size
);
678 q
= &dev
->queues
->queue
[AdapNormRespQueue
];
679 spin_lock_irqsave(q
->lock
, qflags
);
680 aac_queue_get(dev
, &index
, AdapNormRespQueue
, hw_fib
, 1, NULL
, &nointr
);
681 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
682 spin_unlock_irqrestore(q
->lock
, qflags
);
683 if (!(nointr
& (int)aac_config
.irq_mod
))
684 aac_adapter_notify(dev
, AdapNormRespQueue
);
689 printk(KERN_WARNING
"aac_fib_adapter_complete: Unknown xferstate detected.\n");
696 * aac_fib_complete - fib completion handler
697 * @fib: FIB to complete
699 * Will do all necessary work to complete a FIB.
702 int aac_fib_complete(struct fib
*fibptr
)
704 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
707 * Check for a fib which has already been completed
710 if (hw_fib
->header
.XferState
== 0)
713 * If we plan to do anything check the structure type first.
716 if (hw_fib
->header
.StructType
!= FIB_MAGIC
)
719 * This block completes a cdb which orginated on the host and we
720 * just need to deallocate the cdb or reinit it. At this point the
721 * command is complete that we had sent to the adapter and this
722 * cdb could be reused.
724 if((hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
)) &&
725 (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
)))
729 else if(hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
))
732 * This handles the case when the host has aborted the I/O
733 * to the adapter because the adapter is not responding
736 } else if(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)) {
745 * aac_printf - handle printf from firmware
749 * Print a message passed to us by the controller firmware on the
753 void aac_printf(struct aac_dev
*dev
, u32 val
)
755 char *cp
= dev
->printfbuf
;
756 if (dev
->printf_enabled
)
758 int length
= val
& 0xffff;
759 int level
= (val
>> 16) & 0xffff;
762 * The size of the printfbuf is set in port.c
763 * There is no variable or define for it
769 if (level
== LOG_AAC_HIGH_ERROR
)
770 printk(KERN_WARNING
"%s:%s", dev
->name
, cp
);
772 printk(KERN_INFO
"%s:%s", dev
->name
, cp
);
779 * aac_handle_aif - Handle a message from the firmware
780 * @dev: Which adapter this fib is from
781 * @fibptr: Pointer to fibptr from adapter
783 * This routine handles a driver notify fib from the adapter and
784 * dispatches it to the appropriate routine for handling.
787 #define AIF_SNIFF_TIMEOUT (30*HZ)
788 static void aac_handle_aif(struct aac_dev
* dev
, struct fib
* fibptr
)
790 struct hw_fib
* hw_fib
= fibptr
->hw_fib
;
791 struct aac_aifcmd
* aifcmd
= (struct aac_aifcmd
*)hw_fib
->data
;
794 struct scsi_device
*device
;
800 } device_config_needed
;
802 /* Sniff for container changes */
809 * We have set this up to try and minimize the number of
810 * re-configures that take place. As a result of this when
811 * certain AIF's come in we will set a flag waiting for another
812 * type of AIF before setting the re-config flag.
814 switch (le32_to_cpu(aifcmd
->command
)) {
815 case AifCmdDriverNotify
:
816 switch (le32_to_cpu(((u32
*)aifcmd
->data
)[0])) {
818 * Morph or Expand complete
820 case AifDenMorphComplete
:
821 case AifDenVolumeExtendComplete
:
822 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
823 if (container
>= dev
->maximum_num_containers
)
827 * Find the scsi_device associated with the SCSI
828 * address. Make sure we have the right array, and if
829 * so set the flag to initiate a new re-config once we
830 * see an AifEnConfigChange AIF come through.
833 if ((dev
!= NULL
) && (dev
->scsi_host_ptr
!= NULL
)) {
834 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
835 CONTAINER_TO_CHANNEL(container
),
836 CONTAINER_TO_ID(container
),
837 CONTAINER_TO_LUN(container
));
839 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
840 dev
->fsa_dev
[container
].config_waiting_on
= AifEnConfigChange
;
841 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
842 scsi_device_put(device
);
848 * If we are waiting on something and this happens to be
849 * that thing then set the re-configure flag.
851 if (container
!= (u32
)-1) {
852 if (container
>= dev
->maximum_num_containers
)
854 if ((dev
->fsa_dev
[container
].config_waiting_on
==
855 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
856 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
857 dev
->fsa_dev
[container
].config_waiting_on
= 0;
858 } else for (container
= 0;
859 container
< dev
->maximum_num_containers
; ++container
) {
860 if ((dev
->fsa_dev
[container
].config_waiting_on
==
861 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
862 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
863 dev
->fsa_dev
[container
].config_waiting_on
= 0;
867 case AifCmdEventNotify
:
868 switch (le32_to_cpu(((u32
*)aifcmd
->data
)[0])) {
872 case AifEnAddContainer
:
873 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
874 if (container
>= dev
->maximum_num_containers
)
876 dev
->fsa_dev
[container
].config_needed
= ADD
;
877 dev
->fsa_dev
[container
].config_waiting_on
=
879 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
885 case AifEnDeleteContainer
:
886 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
887 if (container
>= dev
->maximum_num_containers
)
889 dev
->fsa_dev
[container
].config_needed
= DELETE
;
890 dev
->fsa_dev
[container
].config_waiting_on
=
892 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
896 * Container change detected. If we currently are not
897 * waiting on something else, setup to wait on a Config Change.
899 case AifEnContainerChange
:
900 container
= le32_to_cpu(((u32
*)aifcmd
->data
)[1]);
901 if (container
>= dev
->maximum_num_containers
)
903 if (dev
->fsa_dev
[container
].config_waiting_on
&&
904 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
906 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
907 dev
->fsa_dev
[container
].config_waiting_on
=
909 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
912 case AifEnConfigChange
:
918 * If we are waiting on something and this happens to be
919 * that thing then set the re-configure flag.
921 if (container
!= (u32
)-1) {
922 if (container
>= dev
->maximum_num_containers
)
924 if ((dev
->fsa_dev
[container
].config_waiting_on
==
925 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
926 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
927 dev
->fsa_dev
[container
].config_waiting_on
= 0;
928 } else for (container
= 0;
929 container
< dev
->maximum_num_containers
; ++container
) {
930 if ((dev
->fsa_dev
[container
].config_waiting_on
==
931 le32_to_cpu(*(u32
*)aifcmd
->data
)) &&
932 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
933 dev
->fsa_dev
[container
].config_waiting_on
= 0;
937 case AifCmdJobProgress
:
939 * These are job progress AIF's. When a Clear is being
940 * done on a container it is initially created then hidden from
941 * the OS. When the clear completes we don't get a config
942 * change so we monitor the job status complete on a clear then
943 * wait for a container change.
946 if ((((u32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
))
947 && ((((u32
*)aifcmd
->data
)[6] == ((u32
*)aifcmd
->data
)[5])
948 || (((u32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsSuccess
)))) {
950 container
< dev
->maximum_num_containers
;
953 * Stomp on all config sequencing for all
956 dev
->fsa_dev
[container
].config_waiting_on
=
957 AifEnContainerChange
;
958 dev
->fsa_dev
[container
].config_needed
= ADD
;
959 dev
->fsa_dev
[container
].config_waiting_stamp
=
963 if ((((u32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
))
964 && (((u32
*)aifcmd
->data
)[6] == 0)
965 && (((u32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsRunning
))) {
967 container
< dev
->maximum_num_containers
;
970 * Stomp on all config sequencing for all
973 dev
->fsa_dev
[container
].config_waiting_on
=
974 AifEnContainerChange
;
975 dev
->fsa_dev
[container
].config_needed
= DELETE
;
976 dev
->fsa_dev
[container
].config_waiting_stamp
=
983 device_config_needed
= NOTHING
;
984 for (container
= 0; container
< dev
->maximum_num_containers
;
986 if ((dev
->fsa_dev
[container
].config_waiting_on
== 0) &&
987 (dev
->fsa_dev
[container
].config_needed
!= NOTHING
) &&
988 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
)) {
989 device_config_needed
=
990 dev
->fsa_dev
[container
].config_needed
;
991 dev
->fsa_dev
[container
].config_needed
= NOTHING
;
995 if (device_config_needed
== NOTHING
)
999 * If we decided that a re-configuration needs to be done,
1000 * schedule it here on the way out the door, please close the door
1008 * Find the scsi_device associated with the SCSI address,
1009 * and mark it as changed, invalidating the cache. This deals
1010 * with changes to existing device IDs.
1013 if (!dev
|| !dev
->scsi_host_ptr
)
1016 * force reload of disk info via aac_probe_container
1018 if ((device_config_needed
== CHANGE
)
1019 && (dev
->fsa_dev
[container
].valid
== 1))
1020 dev
->fsa_dev
[container
].valid
= 2;
1021 if ((device_config_needed
== CHANGE
) ||
1022 (device_config_needed
== ADD
))
1023 aac_probe_container(dev
, container
);
1024 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1025 CONTAINER_TO_CHANNEL(container
),
1026 CONTAINER_TO_ID(container
),
1027 CONTAINER_TO_LUN(container
));
1029 switch (device_config_needed
) {
1031 scsi_remove_device(device
);
1034 if (!dev
->fsa_dev
[container
].valid
) {
1035 scsi_remove_device(device
);
1038 scsi_rescan_device(&device
->sdev_gendev
);
1043 scsi_device_put(device
);
1045 if (device_config_needed
== ADD
) {
1046 scsi_add_device(dev
->scsi_host_ptr
,
1047 CONTAINER_TO_CHANNEL(container
),
1048 CONTAINER_TO_ID(container
),
1049 CONTAINER_TO_LUN(container
));
1055 * aac_command_thread - command processing thread
1056 * @dev: Adapter to monitor
1058 * Waits on the commandready event in it's queue. When the event gets set
1059 * it will pull FIBs off it's queue. It will continue to pull FIBs off
1060 * until the queue is empty. When the queue is empty it will wait for
1064 int aac_command_thread(void *data
)
1066 struct aac_dev
*dev
= data
;
1067 struct hw_fib
*hw_fib
, *hw_newfib
;
1068 struct fib
*fib
, *newfib
;
1069 struct aac_fib_context
*fibctx
;
1070 unsigned long flags
;
1071 DECLARE_WAITQUEUE(wait
, current
);
1074 * We can only have one thread per adapter for AIF's.
1076 if (dev
->aif_thread
)
1080 * Let the DPC know it has a place to send the AIF's to.
1082 dev
->aif_thread
= 1;
1083 add_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
1084 set_current_state(TASK_INTERRUPTIBLE
);
1085 dprintk ((KERN_INFO
"aac_command_thread start\n"));
1088 spin_lock_irqsave(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1089 while(!list_empty(&(dev
->queues
->queue
[HostNormCmdQueue
].cmdq
))) {
1090 struct list_head
*entry
;
1091 struct aac_aifcmd
* aifcmd
;
1093 set_current_state(TASK_RUNNING
);
1095 entry
= dev
->queues
->queue
[HostNormCmdQueue
].cmdq
.next
;
1098 spin_unlock_irqrestore(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1099 fib
= list_entry(entry
, struct fib
, fiblink
);
1101 * We will process the FIB here or pass it to a
1102 * worker thread that is TBD. We Really can't
1103 * do anything at this point since we don't have
1104 * anything defined for this thread to do.
1106 hw_fib
= fib
->hw_fib
;
1107 memset(fib
, 0, sizeof(struct fib
));
1108 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
1109 fib
->size
= sizeof( struct fib
);
1110 fib
->hw_fib
= hw_fib
;
1111 fib
->data
= hw_fib
->data
;
1114 * We only handle AifRequest fibs from the adapter.
1116 aifcmd
= (struct aac_aifcmd
*) hw_fib
->data
;
1117 if (aifcmd
->command
== cpu_to_le32(AifCmdDriverNotify
)) {
1118 /* Handle Driver Notify Events */
1119 aac_handle_aif(dev
, fib
);
1120 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
1121 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
1123 struct list_head
*entry
;
1124 /* The u32 here is important and intended. We are using
1125 32bit wrapping time to fit the adapter field */
1127 u32 time_now
, time_last
;
1128 unsigned long flagv
;
1130 struct hw_fib
** hw_fib_pool
, ** hw_fib_p
;
1131 struct fib
** fib_pool
, ** fib_p
;
1134 if ((aifcmd
->command
==
1135 cpu_to_le32(AifCmdEventNotify
)) ||
1137 cpu_to_le32(AifCmdJobProgress
))) {
1138 aac_handle_aif(dev
, fib
);
1141 time_now
= jiffies
/HZ
;
1144 * Warning: no sleep allowed while
1145 * holding spinlock. We take the estimate
1146 * and pre-allocate a set of fibs outside the
1149 num
= le32_to_cpu(dev
->init
->AdapterFibsSize
)
1150 / sizeof(struct hw_fib
); /* some extra */
1151 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
1152 entry
= dev
->fib_list
.next
;
1153 while (entry
!= &dev
->fib_list
) {
1154 entry
= entry
->next
;
1157 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
1161 && ((hw_fib_pool
= kmalloc(sizeof(struct hw_fib
*) * num
, GFP_KERNEL
)))
1162 && ((fib_pool
= kmalloc(sizeof(struct fib
*) * num
, GFP_KERNEL
)))) {
1163 hw_fib_p
= hw_fib_pool
;
1165 while (hw_fib_p
< &hw_fib_pool
[num
]) {
1166 if (!(*(hw_fib_p
++) = kmalloc(sizeof(struct hw_fib
), GFP_KERNEL
))) {
1170 if (!(*(fib_p
++) = kmalloc(sizeof(struct fib
), GFP_KERNEL
))) {
1171 kfree(*(--hw_fib_p
));
1175 if ((num
= hw_fib_p
- hw_fib_pool
) == 0) {
1185 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
1186 entry
= dev
->fib_list
.next
;
1188 * For each Context that is on the
1189 * fibctxList, make a copy of the
1190 * fib, and then set the event to wake up the
1191 * thread that is waiting for it.
1193 hw_fib_p
= hw_fib_pool
;
1195 while (entry
!= &dev
->fib_list
) {
1197 * Extract the fibctx
1199 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
1201 * Check if the queue is getting
1204 if (fibctx
->count
> 20)
1207 * It's *not* jiffies folks,
1208 * but jiffies / HZ so do not
1211 time_last
= fibctx
->jiffies
;
1213 * Has it been > 2 minutes
1214 * since the last read off
1217 if ((time_now
- time_last
) > 120) {
1218 entry
= entry
->next
;
1219 aac_close_fib_context(dev
, fibctx
);
1224 * Warning: no sleep allowed while
1227 if (hw_fib_p
< &hw_fib_pool
[num
]) {
1228 hw_newfib
= *hw_fib_p
;
1229 *(hw_fib_p
++) = NULL
;
1233 * Make the copy of the FIB
1235 memcpy(hw_newfib
, hw_fib
, sizeof(struct hw_fib
));
1236 memcpy(newfib
, fib
, sizeof(struct fib
));
1237 newfib
->hw_fib
= hw_newfib
;
1239 * Put the FIB onto the
1242 list_add_tail(&newfib
->fiblink
, &fibctx
->fib_list
);
1245 * Set the event to wake up the
1246 * thread that is waiting.
1248 up(&fibctx
->wait_sem
);
1250 printk(KERN_WARNING
"aifd: didn't allocate NewFib.\n");
1252 entry
= entry
->next
;
1255 * Set the status of this FIB
1257 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
1258 aac_fib_adapter_complete(fib
, sizeof(u32
));
1259 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
1260 /* Free up the remaining resources */
1261 hw_fib_p
= hw_fib_pool
;
1263 while (hw_fib_p
< &hw_fib_pool
[num
]) {
1273 spin_lock_irqsave(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1276 * There are no more AIF's
1278 spin_unlock_irqrestore(dev
->queues
->queue
[HostNormCmdQueue
].lock
, flags
);
1281 if (kthread_should_stop())
1283 set_current_state(TASK_INTERRUPTIBLE
);
1286 remove_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
1287 dev
->aif_thread
= 0;