1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
16 * Abstract: All DPC processing routines for the cyclone board occur here.
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/blkdev.h>
30 * aac_response_normal - Handle command replies
31 * @q: Queue to read from
33 * This DPC routine will be run when the adapter interrupts us to let us
34 * know there is a response on our normal priority queue. We will pull off
35 * all QE there are and wake up all the waiters before exiting. We will
36 * take a spinlock out on the queue before operating on it.
39 unsigned int aac_response_normal(struct aac_queue
* q
)
41 struct aac_dev
* dev
= q
->dev
;
42 struct aac_entry
*entry
;
43 struct hw_fib
* hwfib
;
46 unsigned long flags
, mflags
;
48 spin_lock_irqsave(q
->lock
, flags
);
50 * Keep pulling response QEs off the response queue and waking
51 * up the waiters until there are no more QEs. We then return
52 * back to the system. If no response was requested we just
53 * deallocate the Fib here and continue.
55 while(aac_consumer_get(dev
, q
, &entry
))
58 u32 index
= le32_to_cpu(entry
->addr
);
60 fib
= &dev
->fibs
[index
>> 2];
61 hwfib
= fib
->hw_fib_va
;
63 aac_consumer_free(dev
, q
, HostNormRespQueue
);
65 * Remove this fib from the Outstanding I/O queue.
66 * But only if it has not already been timed out.
68 * If the fib has been timed out already, then just
69 * continue. The caller has already been notified that
72 atomic_dec(&dev
->queues
->queue
[AdapNormCmdQueue
].numpending
);
74 if (unlikely(fib
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)) {
75 spin_unlock_irqrestore(q
->lock
, flags
);
76 aac_fib_complete(fib
);
78 spin_lock_irqsave(q
->lock
, flags
);
81 spin_unlock_irqrestore(q
->lock
, flags
);
87 *(__le32
*)hwfib
->data
= cpu_to_le32(ST_OK
);
88 hwfib
->header
.XferState
|= cpu_to_le32(AdapterProcessed
);
89 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
92 FIB_COUNTER_INCREMENT(aac_config
.FibRecved
);
94 if (hwfib
->header
.Command
== cpu_to_le16(NuFileSystem
))
96 __le32
*pstatus
= (__le32
*)hwfib
->data
;
97 if (*pstatus
& cpu_to_le32(0xffff0000))
98 *pstatus
= cpu_to_le32(ST_OK
);
100 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
| Async
))
102 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
)) {
103 FIB_COUNTER_INCREMENT(aac_config
.NoResponseRecved
);
105 FIB_COUNTER_INCREMENT(aac_config
.AsyncRecved
);
108 * NOTE: we cannot touch the fib after this
109 * call, because it may have been deallocated.
111 fib
->callback(fib
->callback_data
, fib
);
114 spin_lock_irqsave(&fib
->event_lock
, flagv
);
117 complete(&fib
->event_wait
);
119 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
121 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
122 dev
->management_fib_count
--;
123 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
125 FIB_COUNTER_INCREMENT(aac_config
.NormalRecved
);
126 if (fib
->done
== 2) {
127 spin_lock_irqsave(&fib
->event_lock
, flagv
);
129 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
130 aac_fib_complete(fib
);
135 spin_lock_irqsave(q
->lock
, flags
);
138 if (consumed
> aac_config
.peak_fibs
)
139 aac_config
.peak_fibs
= consumed
;
141 aac_config
.zero_fibs
++;
143 spin_unlock_irqrestore(q
->lock
, flags
);
149 * aac_command_normal - handle commands
150 * @q: queue to process
152 * This DPC routine will be queued when the adapter interrupts us to
153 * let us know there is a command on our normal priority queue. We will
154 * pull off all QE there are and wake up all the waiters before exiting.
155 * We will take a spinlock out on the queue before operating on it.
158 unsigned int aac_command_normal(struct aac_queue
*q
)
160 struct aac_dev
* dev
= q
->dev
;
161 struct aac_entry
*entry
;
164 spin_lock_irqsave(q
->lock
, flags
);
167 * Keep pulling response QEs off the response queue and waking
168 * up the waiters until there are no more QEs. We then return
169 * back to the system.
171 while(aac_consumer_get(dev
, q
, &entry
))
174 struct hw_fib
* hw_fib
;
176 struct fib
*fib
= &fibctx
;
178 index
= le32_to_cpu(entry
->addr
) / sizeof(struct hw_fib
);
179 hw_fib
= &dev
->aif_base_va
[index
];
182 * Allocate a FIB at all costs. For non queued stuff
183 * we can just use the stack so we are happy. We need
184 * a fib object in order to manage the linked lists
187 if((fib
= kmalloc(sizeof(struct fib
), GFP_ATOMIC
)) == NULL
)
190 memset(fib
, 0, sizeof(struct fib
));
191 INIT_LIST_HEAD(&fib
->fiblink
);
192 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
193 fib
->size
= sizeof(struct fib
);
194 fib
->hw_fib_va
= hw_fib
;
195 fib
->data
= hw_fib
->data
;
199 if (dev
->aif_thread
&& fib
!= &fibctx
) {
200 list_add_tail(&fib
->fiblink
, &q
->cmdq
);
201 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
202 wake_up_interruptible(&q
->cmdready
);
204 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
205 spin_unlock_irqrestore(q
->lock
, flags
);
207 * Set the status of this FIB
209 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
210 aac_fib_adapter_complete(fib
, sizeof(u32
));
211 spin_lock_irqsave(q
->lock
, flags
);
214 spin_unlock_irqrestore(q
->lock
, flags
);
221 * @context: the context set in the fib - here it is scsi cmd
222 * @fibptr: pointer to the fib
224 * Handles the AIFs - new method (SRC)
228 static void aac_aif_callback(void *context
, struct fib
* fibptr
)
232 struct aac_aifcmd
*cmd
;
234 fibctx
= (struct fib
*)context
;
235 BUG_ON(fibptr
== NULL
);
238 if ((fibptr
->hw_fib_va
->header
.XferState
&
239 cpu_to_le32(NoMoreAifDataAvailable
)) ||
241 aac_fib_complete(fibptr
);
242 aac_fib_free(fibptr
);
246 aac_intr_normal(dev
, 0, 1, 0, fibptr
->hw_fib_va
);
248 aac_fib_init(fibctx
);
249 cmd
= (struct aac_aifcmd
*) fib_data(fibctx
);
250 cmd
->command
= cpu_to_le32(AifReqEvent
);
252 aac_fib_send(AifRequest
,
254 sizeof(struct hw_fib
)-sizeof(struct aac_fibhdr
),
257 (fib_callback
)aac_aif_callback
, fibctx
);
262 * aac_intr_normal - Handle command replies
264 * @index: completion reference
266 * This DPC routine will be run when the adapter interrupts us to let us
267 * know there is a response on our normal priority queue. We will pull off
268 * all QE there are and wake up all the waiters before exiting.
270 unsigned int aac_intr_normal(struct aac_dev
*dev
, u32 index
, int isAif
,
271 int isFastResponse
, struct hw_fib
*aif_fib
)
273 unsigned long mflags
;
274 dprintk((KERN_INFO
"aac_intr_normal(%p,%x)\n", dev
, index
));
275 if (isAif
== 1) { /* AIF - common */
276 struct hw_fib
* hw_fib
;
278 struct aac_queue
*q
= &dev
->queues
->queue
[HostNormCmdQueue
];
282 * Allocate a FIB. For non queued stuff we can just use
283 * the stack so we are happy. We need a fib object in order to
284 * manage the linked lists.
286 if ((!dev
->aif_thread
)
287 || (!(fib
= kzalloc(sizeof(struct fib
),GFP_ATOMIC
))))
289 if (!(hw_fib
= kzalloc(sizeof(struct hw_fib
),GFP_ATOMIC
))) {
293 if (dev
->sa_firmware
) {
294 fib
->hbacmd_size
= index
; /* store event type */
295 } else if (aif_fib
!= NULL
) {
296 memcpy(hw_fib
, aif_fib
, sizeof(struct hw_fib
));
298 memcpy(hw_fib
, (struct hw_fib
*)
299 (((uintptr_t)(dev
->regs
.sa
)) + index
),
300 sizeof(struct hw_fib
));
302 INIT_LIST_HEAD(&fib
->fiblink
);
303 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
304 fib
->size
= sizeof(struct fib
);
305 fib
->hw_fib_va
= hw_fib
;
306 fib
->data
= hw_fib
->data
;
309 spin_lock_irqsave(q
->lock
, flags
);
310 list_add_tail(&fib
->fiblink
, &q
->cmdq
);
311 wake_up_interruptible(&q
->cmdready
);
312 spin_unlock_irqrestore(q
->lock
, flags
);
314 } else if (isAif
== 2) { /* AIF - new (SRC) */
316 struct aac_aifcmd
*cmd
;
318 fibctx
= aac_fib_alloc(dev
);
321 aac_fib_init(fibctx
);
323 cmd
= (struct aac_aifcmd
*) fib_data(fibctx
);
324 cmd
->command
= cpu_to_le32(AifReqEvent
);
326 return aac_fib_send(AifRequest
,
328 sizeof(struct hw_fib
)-sizeof(struct aac_fibhdr
),
331 (fib_callback
)aac_aif_callback
, fibctx
);
333 struct fib
*fib
= &dev
->fibs
[index
];
334 int start_callback
= 0;
337 * Remove this fib from the Outstanding I/O queue.
338 * But only if it has not already been timed out.
340 * If the fib has been timed out already, then just
341 * continue. The caller has already been notified that
344 atomic_dec(&dev
->queues
->queue
[AdapNormCmdQueue
].numpending
);
346 if (unlikely(fib
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)) {
347 aac_fib_complete(fib
);
352 FIB_COUNTER_INCREMENT(aac_config
.FibRecved
);
354 if (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) {
357 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
365 dprintk((KERN_INFO
"event_wait up\n"));
366 spin_lock_irqsave(&fib
->event_lock
, flagv
);
367 if (fib
->done
== 2) {
372 complete(&fib
->event_wait
);
374 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
376 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
377 dev
->management_fib_count
--;
378 spin_unlock_irqrestore(&dev
->manage_lock
,
381 FIB_COUNTER_INCREMENT(aac_config
.NativeRecved
);
383 aac_fib_complete(fib
);
386 struct hw_fib
*hwfib
= fib
->hw_fib_va
;
388 if (isFastResponse
) {
390 *(__le32
*)hwfib
->data
= cpu_to_le32(ST_OK
);
391 hwfib
->header
.XferState
|=
392 cpu_to_le32(AdapterProcessed
);
393 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
396 if (hwfib
->header
.Command
==
397 cpu_to_le16(NuFileSystem
)) {
398 __le32
*pstatus
= (__le32
*)hwfib
->data
;
400 if (*pstatus
& cpu_to_le32(0xffff0000))
401 *pstatus
= cpu_to_le32(ST_OK
);
403 if (hwfib
->header
.XferState
&
404 cpu_to_le32(NoResponseExpected
| Async
)) {
405 if (hwfib
->header
.XferState
& cpu_to_le32(
406 NoResponseExpected
)) {
407 FIB_COUNTER_INCREMENT(
408 aac_config
.NoResponseRecved
);
410 FIB_COUNTER_INCREMENT(
411 aac_config
.AsyncRecved
);
418 dprintk((KERN_INFO
"event_wait up\n"));
419 spin_lock_irqsave(&fib
->event_lock
, flagv
);
420 if (fib
->done
== 2) {
425 complete(&fib
->event_wait
);
427 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
429 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
430 dev
->management_fib_count
--;
431 spin_unlock_irqrestore(&dev
->manage_lock
,
434 FIB_COUNTER_INCREMENT(aac_config
.NormalRecved
);
436 aac_fib_complete(fib
);
441 if (start_callback
) {
443 * NOTE: we cannot touch the fib after this
444 * call, because it may have been deallocated.
446 if (likely(fib
->callback
&& fib
->callback_data
)) {
447 fib
->callback(fib
->callback_data
, fib
);
449 aac_fib_complete(fib
);