2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 * Abstract: All DPC processing routines for the cyclone board occur here.
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/types.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/completion.h>
40 #include <linux/blkdev.h>
41 #include <linux/semaphore.h>
46 * aac_response_normal - Handle command replies
47 * @q: Queue to read from
49 * This DPC routine will be run when the adapter interrupts us to let us
50 * know there is a response on our normal priority queue. We will pull off
51 * all QE there are and wake up all the waiters before exiting. We will
52 * take a spinlock out on the queue before operating on it.
55 unsigned int aac_response_normal(struct aac_queue
* q
)
57 struct aac_dev
* dev
= q
->dev
;
58 struct aac_entry
*entry
;
59 struct hw_fib
* hwfib
;
62 unsigned long flags
, mflags
;
64 spin_lock_irqsave(q
->lock
, flags
);
66 * Keep pulling response QEs off the response queue and waking
67 * up the waiters until there are no more QEs. We then return
68 * back to the system. If no response was requesed we just
69 * deallocate the Fib here and continue.
71 while(aac_consumer_get(dev
, q
, &entry
))
74 u32 index
= le32_to_cpu(entry
->addr
);
76 fib
= &dev
->fibs
[index
>> 2];
77 hwfib
= fib
->hw_fib_va
;
79 aac_consumer_free(dev
, q
, HostNormRespQueue
);
81 * Remove this fib from the Outstanding I/O queue.
82 * But only if it has not already been timed out.
84 * If the fib has been timed out already, then just
85 * continue. The caller has already been notified that
88 atomic_dec(&dev
->queues
->queue
[AdapNormCmdQueue
].numpending
);
90 if (unlikely(fib
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)) {
91 spin_unlock_irqrestore(q
->lock
, flags
);
92 aac_fib_complete(fib
);
94 spin_lock_irqsave(q
->lock
, flags
);
97 spin_unlock_irqrestore(q
->lock
, flags
);
103 *(__le32
*)hwfib
->data
= cpu_to_le32(ST_OK
);
104 hwfib
->header
.XferState
|= cpu_to_le32(AdapterProcessed
);
105 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
108 FIB_COUNTER_INCREMENT(aac_config
.FibRecved
);
110 if (hwfib
->header
.Command
== cpu_to_le16(NuFileSystem
))
112 __le32
*pstatus
= (__le32
*)hwfib
->data
;
113 if (*pstatus
& cpu_to_le32(0xffff0000))
114 *pstatus
= cpu_to_le32(ST_OK
);
116 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
| Async
))
118 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
))
119 FIB_COUNTER_INCREMENT(aac_config
.NoResponseRecved
);
121 FIB_COUNTER_INCREMENT(aac_config
.AsyncRecved
);
123 * NOTE: we cannot touch the fib after this
124 * call, because it may have been deallocated.
126 fib
->callback(fib
->callback_data
, fib
);
129 spin_lock_irqsave(&fib
->event_lock
, flagv
);
132 up(&fib
->event_wait
);
134 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
136 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
137 dev
->management_fib_count
--;
138 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
140 FIB_COUNTER_INCREMENT(aac_config
.NormalRecved
);
141 if (fib
->done
== 2) {
142 spin_lock_irqsave(&fib
->event_lock
, flagv
);
144 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
145 aac_fib_complete(fib
);
150 spin_lock_irqsave(q
->lock
, flags
);
153 if (consumed
> aac_config
.peak_fibs
)
154 aac_config
.peak_fibs
= consumed
;
156 aac_config
.zero_fibs
++;
158 spin_unlock_irqrestore(q
->lock
, flags
);
164 * aac_command_normal - handle commands
165 * @q: queue to process
167 * This DPC routine will be queued when the adapter interrupts us to
168 * let us know there is a command on our normal priority queue. We will
169 * pull off all QE there are and wake up all the waiters before exiting.
170 * We will take a spinlock out on the queue before operating on it.
173 unsigned int aac_command_normal(struct aac_queue
*q
)
175 struct aac_dev
* dev
= q
->dev
;
176 struct aac_entry
*entry
;
179 spin_lock_irqsave(q
->lock
, flags
);
182 * Keep pulling response QEs off the response queue and waking
183 * up the waiters until there are no more QEs. We then return
184 * back to the system.
186 while(aac_consumer_get(dev
, q
, &entry
))
189 struct hw_fib
* hw_fib
;
191 struct fib
*fib
= &fibctx
;
193 index
= le32_to_cpu(entry
->addr
) / sizeof(struct hw_fib
);
194 hw_fib
= &dev
->aif_base_va
[index
];
197 * Allocate a FIB at all costs. For non queued stuff
198 * we can just use the stack so we are happy. We need
199 * a fib object in order to manage the linked lists
202 if((fib
= kmalloc(sizeof(struct fib
), GFP_ATOMIC
)) == NULL
)
205 memset(fib
, 0, sizeof(struct fib
));
206 INIT_LIST_HEAD(&fib
->fiblink
);
207 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
208 fib
->size
= sizeof(struct fib
);
209 fib
->hw_fib_va
= hw_fib
;
210 fib
->data
= hw_fib
->data
;
214 if (dev
->aif_thread
&& fib
!= &fibctx
) {
215 list_add_tail(&fib
->fiblink
, &q
->cmdq
);
216 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
217 wake_up_interruptible(&q
->cmdready
);
219 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
220 spin_unlock_irqrestore(q
->lock
, flags
);
222 * Set the status of this FIB
224 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
225 aac_fib_adapter_complete(fib
, sizeof(u32
));
226 spin_lock_irqsave(q
->lock
, flags
);
229 spin_unlock_irqrestore(q
->lock
, flags
);
236 * @context: the context set in the fib - here it is scsi cmd
237 * @fibptr: pointer to the fib
239 * Handles the AIFs - new method (SRC)
243 static void aac_aif_callback(void *context
, struct fib
* fibptr
)
247 struct aac_aifcmd
*cmd
;
250 fibctx
= (struct fib
*)context
;
251 BUG_ON(fibptr
== NULL
);
254 if ((fibptr
->hw_fib_va
->header
.XferState
&
255 cpu_to_le32(NoMoreAifDataAvailable
)) ||
257 aac_fib_complete(fibptr
);
258 aac_fib_free(fibptr
);
262 aac_intr_normal(dev
, 0, 1, 0, fibptr
->hw_fib_va
);
264 aac_fib_init(fibctx
);
265 cmd
= (struct aac_aifcmd
*) fib_data(fibctx
);
266 cmd
->command
= cpu_to_le32(AifReqEvent
);
268 status
= aac_fib_send(AifRequest
,
270 sizeof(struct hw_fib
)-sizeof(struct aac_fibhdr
),
273 (fib_callback
)aac_aif_callback
, fibctx
);
278 * aac_intr_normal - Handle command replies
280 * @index: completion reference
282 * This DPC routine will be run when the adapter interrupts us to let us
283 * know there is a response on our normal priority queue. We will pull off
284 * all QE there are and wake up all the waiters before exiting.
286 unsigned int aac_intr_normal(struct aac_dev
*dev
, u32 index
, int isAif
,
287 int isFastResponse
, struct hw_fib
*aif_fib
)
289 unsigned long mflags
;
290 dprintk((KERN_INFO
"aac_intr_normal(%p,%x)\n", dev
, index
));
291 if (isAif
== 1) { /* AIF - common */
292 struct hw_fib
* hw_fib
;
294 struct aac_queue
*q
= &dev
->queues
->queue
[HostNormCmdQueue
];
298 * Allocate a FIB. For non queued stuff we can just use
299 * the stack so we are happy. We need a fib object in order to
300 * manage the linked lists.
302 if ((!dev
->aif_thread
)
303 || (!(fib
= kzalloc(sizeof(struct fib
),GFP_ATOMIC
))))
305 if (!(hw_fib
= kzalloc(sizeof(struct hw_fib
),GFP_ATOMIC
))) {
309 if (dev
->sa_firmware
) {
310 fib
->hbacmd_size
= index
; /* store event type */
311 } else if (aif_fib
!= NULL
) {
312 memcpy(hw_fib
, aif_fib
, sizeof(struct hw_fib
));
314 memcpy(hw_fib
, (struct hw_fib
*)
315 (((uintptr_t)(dev
->regs
.sa
)) + index
),
316 sizeof(struct hw_fib
));
318 INIT_LIST_HEAD(&fib
->fiblink
);
319 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
320 fib
->size
= sizeof(struct fib
);
321 fib
->hw_fib_va
= hw_fib
;
322 fib
->data
= hw_fib
->data
;
325 spin_lock_irqsave(q
->lock
, flags
);
326 list_add_tail(&fib
->fiblink
, &q
->cmdq
);
327 wake_up_interruptible(&q
->cmdready
);
328 spin_unlock_irqrestore(q
->lock
, flags
);
330 } else if (isAif
== 2) { /* AIF - new (SRC) */
332 struct aac_aifcmd
*cmd
;
334 fibctx
= aac_fib_alloc(dev
);
337 aac_fib_init(fibctx
);
339 cmd
= (struct aac_aifcmd
*) fib_data(fibctx
);
340 cmd
->command
= cpu_to_le32(AifReqEvent
);
342 return aac_fib_send(AifRequest
,
344 sizeof(struct hw_fib
)-sizeof(struct aac_fibhdr
),
347 (fib_callback
)aac_aif_callback
, fibctx
);
349 struct fib
*fib
= &dev
->fibs
[index
];
350 int start_callback
= 0;
353 * Remove this fib from the Outstanding I/O queue.
354 * But only if it has not already been timed out.
356 * If the fib has been timed out already, then just
357 * continue. The caller has already been notified that
360 atomic_dec(&dev
->queues
->queue
[AdapNormCmdQueue
].numpending
);
362 if (unlikely(fib
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)) {
363 aac_fib_complete(fib
);
368 FIB_COUNTER_INCREMENT(aac_config
.FibRecved
);
370 if (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) {
373 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
381 dprintk((KERN_INFO
"event_wait up\n"));
382 spin_lock_irqsave(&fib
->event_lock
, flagv
);
383 if (fib
->done
== 2) {
388 up(&fib
->event_wait
);
390 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
392 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
393 dev
->management_fib_count
--;
394 spin_unlock_irqrestore(&dev
->manage_lock
,
397 FIB_COUNTER_INCREMENT(aac_config
.NativeRecved
);
399 aac_fib_complete(fib
);
402 struct hw_fib
*hwfib
= fib
->hw_fib_va
;
404 if (isFastResponse
) {
406 *(__le32
*)hwfib
->data
= cpu_to_le32(ST_OK
);
407 hwfib
->header
.XferState
|=
408 cpu_to_le32(AdapterProcessed
);
409 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
412 if (hwfib
->header
.Command
==
413 cpu_to_le16(NuFileSystem
)) {
414 __le32
*pstatus
= (__le32
*)hwfib
->data
;
416 if (*pstatus
& cpu_to_le32(0xffff0000))
417 *pstatus
= cpu_to_le32(ST_OK
);
419 if (hwfib
->header
.XferState
&
420 cpu_to_le32(NoResponseExpected
| Async
)) {
421 if (hwfib
->header
.XferState
& cpu_to_le32(
423 FIB_COUNTER_INCREMENT(
424 aac_config
.NoResponseRecved
);
426 FIB_COUNTER_INCREMENT(
427 aac_config
.AsyncRecved
);
433 dprintk((KERN_INFO
"event_wait up\n"));
434 spin_lock_irqsave(&fib
->event_lock
, flagv
);
435 if (fib
->done
== 2) {
440 up(&fib
->event_wait
);
442 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
444 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
445 dev
->management_fib_count
--;
446 spin_unlock_irqrestore(&dev
->manage_lock
,
449 FIB_COUNTER_INCREMENT(aac_config
.NormalRecved
);
451 aac_fib_complete(fib
);
456 if (start_callback
) {
458 * NOTE: we cannot touch the fib after this
459 * call, because it may have been deallocated.
461 if (likely(fib
->callback
&& fib
->callback_data
)) {
462 fib
->callback(fib
->callback_data
, fib
);
464 aac_fib_complete(fib
);