1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
16 * Abstract: All DPC processing routines for the cyclone board occur here.
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/spinlock.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/blkdev.h>
30 * aac_response_normal - Handle command replies
31 * @q: Queue to read from
33 * This DPC routine will be run when the adapter interrupts us to let us
34 * know there is a response on our normal priority queue. We will pull off
35 * all QE there are and wake up all the waiters before exiting. We will
36 * take a spinlock out on the queue before operating on it.
39 unsigned int aac_response_normal(struct aac_queue
* q
)
41 struct aac_dev
* dev
= q
->dev
;
42 struct aac_entry
*entry
;
43 struct hw_fib
* hwfib
;
46 unsigned long flags
, mflags
;
48 spin_lock_irqsave(q
->lock
, flags
);
50 * Keep pulling response QEs off the response queue and waking
51 * up the waiters until there are no more QEs. We then return
52 * back to the system. If no response was requested we just
53 * deallocate the Fib here and continue.
55 while(aac_consumer_get(dev
, q
, &entry
))
58 u32 index
= le32_to_cpu(entry
->addr
);
60 fib
= &dev
->fibs
[index
>> 2];
61 hwfib
= fib
->hw_fib_va
;
63 aac_consumer_free(dev
, q
, HostNormRespQueue
);
65 * Remove this fib from the Outstanding I/O queue.
66 * But only if it has not already been timed out.
68 * If the fib has been timed out already, then just
69 * continue. The caller has already been notified that
72 atomic_dec(&dev
->queues
->queue
[AdapNormCmdQueue
].numpending
);
74 if (unlikely(fib
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)) {
75 spin_unlock_irqrestore(q
->lock
, flags
);
76 aac_fib_complete(fib
);
78 spin_lock_irqsave(q
->lock
, flags
);
81 spin_unlock_irqrestore(q
->lock
, flags
);
87 *(__le32
*)hwfib
->data
= cpu_to_le32(ST_OK
);
88 hwfib
->header
.XferState
|= cpu_to_le32(AdapterProcessed
);
89 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
92 FIB_COUNTER_INCREMENT(aac_config
.FibRecved
);
94 if (hwfib
->header
.Command
== cpu_to_le16(NuFileSystem
))
96 __le32
*pstatus
= (__le32
*)hwfib
->data
;
97 if (*pstatus
& cpu_to_le32(0xffff0000))
98 *pstatus
= cpu_to_le32(ST_OK
);
100 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
| Async
))
102 if (hwfib
->header
.XferState
& cpu_to_le32(NoResponseExpected
))
103 FIB_COUNTER_INCREMENT(aac_config
.NoResponseRecved
);
105 FIB_COUNTER_INCREMENT(aac_config
.AsyncRecved
);
107 * NOTE: we cannot touch the fib after this
108 * call, because it may have been deallocated.
110 fib
->callback(fib
->callback_data
, fib
);
113 spin_lock_irqsave(&fib
->event_lock
, flagv
);
116 complete(&fib
->event_wait
);
118 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
120 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
121 dev
->management_fib_count
--;
122 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
124 FIB_COUNTER_INCREMENT(aac_config
.NormalRecved
);
125 if (fib
->done
== 2) {
126 spin_lock_irqsave(&fib
->event_lock
, flagv
);
128 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
129 aac_fib_complete(fib
);
134 spin_lock_irqsave(q
->lock
, flags
);
137 if (consumed
> aac_config
.peak_fibs
)
138 aac_config
.peak_fibs
= consumed
;
140 aac_config
.zero_fibs
++;
142 spin_unlock_irqrestore(q
->lock
, flags
);
148 * aac_command_normal - handle commands
149 * @q: queue to process
151 * This DPC routine will be queued when the adapter interrupts us to
152 * let us know there is a command on our normal priority queue. We will
153 * pull off all QE there are and wake up all the waiters before exiting.
154 * We will take a spinlock out on the queue before operating on it.
157 unsigned int aac_command_normal(struct aac_queue
*q
)
159 struct aac_dev
* dev
= q
->dev
;
160 struct aac_entry
*entry
;
163 spin_lock_irqsave(q
->lock
, flags
);
166 * Keep pulling response QEs off the response queue and waking
167 * up the waiters until there are no more QEs. We then return
168 * back to the system.
170 while(aac_consumer_get(dev
, q
, &entry
))
173 struct hw_fib
* hw_fib
;
175 struct fib
*fib
= &fibctx
;
177 index
= le32_to_cpu(entry
->addr
) / sizeof(struct hw_fib
);
178 hw_fib
= &dev
->aif_base_va
[index
];
181 * Allocate a FIB at all costs. For non queued stuff
182 * we can just use the stack so we are happy. We need
183 * a fib object in order to manage the linked lists
186 if((fib
= kmalloc(sizeof(struct fib
), GFP_ATOMIC
)) == NULL
)
189 memset(fib
, 0, sizeof(struct fib
));
190 INIT_LIST_HEAD(&fib
->fiblink
);
191 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
192 fib
->size
= sizeof(struct fib
);
193 fib
->hw_fib_va
= hw_fib
;
194 fib
->data
= hw_fib
->data
;
198 if (dev
->aif_thread
&& fib
!= &fibctx
) {
199 list_add_tail(&fib
->fiblink
, &q
->cmdq
);
200 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
201 wake_up_interruptible(&q
->cmdready
);
203 aac_consumer_free(dev
, q
, HostNormCmdQueue
);
204 spin_unlock_irqrestore(q
->lock
, flags
);
206 * Set the status of this FIB
208 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
209 aac_fib_adapter_complete(fib
, sizeof(u32
));
210 spin_lock_irqsave(q
->lock
, flags
);
213 spin_unlock_irqrestore(q
->lock
, flags
);
220 * @context: the context set in the fib - here it is scsi cmd
221 * @fibptr: pointer to the fib
223 * Handles the AIFs - new method (SRC)
227 static void aac_aif_callback(void *context
, struct fib
* fibptr
)
231 struct aac_aifcmd
*cmd
;
234 fibctx
= (struct fib
*)context
;
235 BUG_ON(fibptr
== NULL
);
238 if ((fibptr
->hw_fib_va
->header
.XferState
&
239 cpu_to_le32(NoMoreAifDataAvailable
)) ||
241 aac_fib_complete(fibptr
);
242 aac_fib_free(fibptr
);
246 aac_intr_normal(dev
, 0, 1, 0, fibptr
->hw_fib_va
);
248 aac_fib_init(fibctx
);
249 cmd
= (struct aac_aifcmd
*) fib_data(fibctx
);
250 cmd
->command
= cpu_to_le32(AifReqEvent
);
252 status
= aac_fib_send(AifRequest
,
254 sizeof(struct hw_fib
)-sizeof(struct aac_fibhdr
),
257 (fib_callback
)aac_aif_callback
, fibctx
);
262 * aac_intr_normal - Handle command replies
264 * @index: completion reference
266 * This DPC routine will be run when the adapter interrupts us to let us
267 * know there is a response on our normal priority queue. We will pull off
268 * all QE there are and wake up all the waiters before exiting.
270 unsigned int aac_intr_normal(struct aac_dev
*dev
, u32 index
, int isAif
,
271 int isFastResponse
, struct hw_fib
*aif_fib
)
273 unsigned long mflags
;
274 dprintk((KERN_INFO
"aac_intr_normal(%p,%x)\n", dev
, index
));
275 if (isAif
== 1) { /* AIF - common */
276 struct hw_fib
* hw_fib
;
278 struct aac_queue
*q
= &dev
->queues
->queue
[HostNormCmdQueue
];
282 * Allocate a FIB. For non queued stuff we can just use
283 * the stack so we are happy. We need a fib object in order to
284 * manage the linked lists.
286 if ((!dev
->aif_thread
)
287 || (!(fib
= kzalloc(sizeof(struct fib
),GFP_ATOMIC
))))
289 if (!(hw_fib
= kzalloc(sizeof(struct hw_fib
),GFP_ATOMIC
))) {
293 if (dev
->sa_firmware
) {
294 fib
->hbacmd_size
= index
; /* store event type */
295 } else if (aif_fib
!= NULL
) {
296 memcpy(hw_fib
, aif_fib
, sizeof(struct hw_fib
));
298 memcpy(hw_fib
, (struct hw_fib
*)
299 (((uintptr_t)(dev
->regs
.sa
)) + index
),
300 sizeof(struct hw_fib
));
302 INIT_LIST_HEAD(&fib
->fiblink
);
303 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
304 fib
->size
= sizeof(struct fib
);
305 fib
->hw_fib_va
= hw_fib
;
306 fib
->data
= hw_fib
->data
;
309 spin_lock_irqsave(q
->lock
, flags
);
310 list_add_tail(&fib
->fiblink
, &q
->cmdq
);
311 wake_up_interruptible(&q
->cmdready
);
312 spin_unlock_irqrestore(q
->lock
, flags
);
314 } else if (isAif
== 2) { /* AIF - new (SRC) */
316 struct aac_aifcmd
*cmd
;
318 fibctx
= aac_fib_alloc(dev
);
321 aac_fib_init(fibctx
);
323 cmd
= (struct aac_aifcmd
*) fib_data(fibctx
);
324 cmd
->command
= cpu_to_le32(AifReqEvent
);
326 return aac_fib_send(AifRequest
,
328 sizeof(struct hw_fib
)-sizeof(struct aac_fibhdr
),
331 (fib_callback
)aac_aif_callback
, fibctx
);
333 struct fib
*fib
= &dev
->fibs
[index
];
334 int start_callback
= 0;
337 * Remove this fib from the Outstanding I/O queue.
338 * But only if it has not already been timed out.
340 * If the fib has been timed out already, then just
341 * continue. The caller has already been notified that
344 atomic_dec(&dev
->queues
->queue
[AdapNormCmdQueue
].numpending
);
346 if (unlikely(fib
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
)) {
347 aac_fib_complete(fib
);
352 FIB_COUNTER_INCREMENT(aac_config
.FibRecved
);
354 if (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) {
357 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
365 dprintk((KERN_INFO
"event_wait up\n"));
366 spin_lock_irqsave(&fib
->event_lock
, flagv
);
367 if (fib
->done
== 2) {
372 complete(&fib
->event_wait
);
374 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
376 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
377 dev
->management_fib_count
--;
378 spin_unlock_irqrestore(&dev
->manage_lock
,
381 FIB_COUNTER_INCREMENT(aac_config
.NativeRecved
);
383 aac_fib_complete(fib
);
386 struct hw_fib
*hwfib
= fib
->hw_fib_va
;
388 if (isFastResponse
) {
390 *(__le32
*)hwfib
->data
= cpu_to_le32(ST_OK
);
391 hwfib
->header
.XferState
|=
392 cpu_to_le32(AdapterProcessed
);
393 fib
->flags
|= FIB_CONTEXT_FLAG_FASTRESP
;
396 if (hwfib
->header
.Command
==
397 cpu_to_le16(NuFileSystem
)) {
398 __le32
*pstatus
= (__le32
*)hwfib
->data
;
400 if (*pstatus
& cpu_to_le32(0xffff0000))
401 *pstatus
= cpu_to_le32(ST_OK
);
403 if (hwfib
->header
.XferState
&
404 cpu_to_le32(NoResponseExpected
| Async
)) {
405 if (hwfib
->header
.XferState
& cpu_to_le32(
407 FIB_COUNTER_INCREMENT(
408 aac_config
.NoResponseRecved
);
410 FIB_COUNTER_INCREMENT(
411 aac_config
.AsyncRecved
);
417 dprintk((KERN_INFO
"event_wait up\n"));
418 spin_lock_irqsave(&fib
->event_lock
, flagv
);
419 if (fib
->done
== 2) {
424 complete(&fib
->event_wait
);
426 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
428 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
429 dev
->management_fib_count
--;
430 spin_unlock_irqrestore(&dev
->manage_lock
,
433 FIB_COUNTER_INCREMENT(aac_config
.NormalRecved
);
435 aac_fib_complete(fib
);
440 if (start_callback
) {
442 * NOTE: we cannot touch the fib after this
443 * call, because it may have been deallocated.
445 if (likely(fib
->callback
&& fib
->callback_data
)) {
446 fib
->callback(fib
->callback_data
, fib
);
448 aac_fib_complete(fib
);