2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * Abstract: All DPC processing routines for the cyclone board occur here.
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <asm/semaphore.h>
46 * aac_response_normal - Handle command replies
47 * @q: Queue to read from
49 * This DPC routine will be run when the adapter interrupts us to let us
50 * know there is a response on our normal priority queue. We will pull off
51 * all QE there are and wake up all the waiters before exiting. We will
52 * take a spinlock out on the queue before operating on it.
55 unsigned int aac_response_normal(struct aac_queue
* q
)
57 struct aac_dev
* dev
= q
->dev
;
58 struct aac_entry
*entry
;
59 struct hw_fib
* hwfib
;
64 spin_lock_irqsave(q
->lock
, flags
);
66 * Keep pulling response QEs off the response queue and waking
67 * up the waiters until there are no more QEs. We then return
68 * back to the system. If no response was requesed we just
69 * deallocate the Fib here and continue.
71 while(aac_consumer_get(dev
, q
, &entry
))
74 u32 index
= le32_to_cpu(entry
->addr
);
76 fib
= &dev
->fibs
[index
>> 1];
79 aac_consumer_free(dev
, q
, HostNormRespQueue
);
81 * Remove this fib from the Outstanding I/O queue.
82 * But only if it has not already been timed out.
84 * If the fib has been timed out already, then just
85 * continue. The caller has already been notified that
88 if (!(fib
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)) {
89 list_del(&fib
->queue
);
90 dev
->queues
->queue
[AdapNormCmdQueue
].numpending
--;
92 printk(KERN_WARNING
"aacraid: FIB timeout (%x).\n", fib
->flags
);
93 printk(KERN_DEBUG
"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib
, hwfib
->header
.SenderData
,fib
);
96 spin_unlock_irqrestore(q
->lock
, flags
);
102 *(u32
*)hwfib
->data
= cpu_to_le32(ST_OK
);
103 hwfib
->header
.XferState
|= cpu_to_le32(AdapterProcessed
);
106 FIB_COUNTER_INCREMENT(aac_config
.FibRecved
);
108 if (hwfib
->header
.Command
== cpu_to_le16(NuFileSystem
))
110 u32
*pstatus
= (u32
*)hwfib
->data
;
111 if (*pstatus
& cpu_to_le32(0xffff0000))
112 *pstatus
= cpu_to_le32(ST_OK
);
114 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
| Async
))
116 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
))
117 FIB_COUNTER_INCREMENT(aac_config
.NoResponseRecved
);
119 FIB_COUNTER_INCREMENT(aac_config
.AsyncRecved
);
121 * NOTE: we cannot touch the fib after this
122 * call, because it may have been deallocated.
124 fib
->callback(fib
->callback_data
, fib
);
127 spin_lock_irqsave(&fib
->event_lock
, flagv
);
129 up(&fib
->event_wait
);
130 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
131 FIB_COUNTER_INCREMENT(aac_config
.NormalRecved
);
134 spin_lock_irqsave(q
->lock
, flags
);
137 if (consumed
> aac_config
.peak_fibs
)
138 aac_config
.peak_fibs
= consumed
;
140 aac_config
.zero_fibs
++;
142 spin_unlock_irqrestore(q
->lock
, flags
);
148 * aac_command_normal - handle commands
149 * @q: queue to process
151 * This DPC routine will be queued when the adapter interrupts us to
152 * let us know there is a command on our normal priority queue. We will
153 * pull off all QE there are and wake up all the waiters before exiting.
154 * We will take a spinlock out on the queue before operating on it.
157 unsigned int aac_command_normal(struct aac_queue
*q
)
159 struct aac_dev
* dev
= q
->dev
;
160 struct aac_entry
*entry
;
163 spin_lock_irqsave(q
->lock
, flags
);
166 * Keep pulling response QEs off the response queue and waking
167 * up the waiters until there are no more QEs. We then return
168 * back to the system.
170 while(aac_consumer_get(dev
, q
, &entry
))
173 struct hw_fib
* hw_fib
;
175 struct fib
*fib
= &fibctx
;
177 index
= le32_to_cpu(entry
->addr
) / sizeof(struct hw_fib
);
178 hw_fib
= &dev
->aif_base_va
[index
];
181 * Allocate a FIB at all costs. For non queued stuff
182 * we can just use the stack so we are happy. We need
183 * a fib object in order to manage the linked lists
186 if((fib
= kmalloc(sizeof(struct fib
), GFP_ATOMIC
)) == NULL
)
189 memset(fib
, 0, sizeof(struct fib
));
190 INIT_LIST_HEAD(&fib
->fiblink
);
191 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
192 fib
->size
= sizeof(struct fib
);
193 fib
->hw_fib
= hw_fib
;
194 fib
->data
= hw_fib
->data
;
198 if (dev
->aif_thread
&& fib
!= &fibctx
) {
199 list_add_tail(&fib
->fiblink
, &q
->cmdq
);
200 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
201 wake_up_interruptible(&q
->cmdready
);
203 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
204 spin_unlock_irqrestore(q
->lock
, flags
);
206 * Set the status of this FIB
208 *(u32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
209 fib_adapter_complete(fib
, sizeof(u32
));
210 spin_lock_irqsave(q
->lock
, flags
);
213 spin_unlock_irqrestore(q
->lock
, flags
);