Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / pci / n8 / QMgr / QMQueue.c
blob5d366c4981cb603d2b710ee74d0c25863bc13c04
1 /*-
2 * Copyright (C) 2001-2003 by NBMK Encryption Technologies.
3 * All rights reserved.
5 * NBMK Encryption Technologies provides no support of any kind for
6 * this software. Questions or concerns about it may be addressed to
7 * the members of the relevant open-source community at
8 * <tech-crypto@netbsd.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 static char const n8_id[] = "$Id: QMQueue.c,v 1.2 2009/11/22 19:09:16 mbalmer Exp $";
36 /*****************************************************************************/
37 /** @file QMQueue.c
38 * @brief Public Key Encryption / Encryption Authentication
39 * Request Queue Handler.
41 * This file handles the queue of command blocks sent by the API
42 * to the Device Driver for the Public Key Encryption hardware,
43 * or the Encryption Authentication hardware.
45 * NOTE: I have made a general assumption in this module, that if a
46 * queue is not an Encryption/Authentication queue then it is a
47 * Public Key Encryption queue. The Random Number Generation queue
48 * has its own separate routines.
50 *****************************************************************************/
52 /*****************************************************************************
53 * Revision history:
54 * 07/01/03 brr Do not set command complete bit on EA errors in QMgrCmdError.
55 * 05/08/03 brr Support reworked user pools.
56 * 04/21/03 brr Added support for multiple memory banks.
57 * 04/01/03 brr Reverted N8_WaitOnRequest to accept timeout parameter.
58 * 03/31/03 brr Do not rely on atomic_inc_and_test.
59 * 03/27/03 brr Fix race condition when waking processes on wait queue.
60 * 03/24/03 brr Fix race condition when conditionally enabling the AMBA
61 * timer, also improve performance by performing fewer atomic
62 * operations in QMgrCheckQueue.
63 * 03/21/03 brr Track requests queued by unit type.
64 * 03/19/03 brr Enable the AMBA timer only when requests are queued to the
65 * NSP2000. Modified N8_WaitOnRequest to wait on a specific
66 * request and reuse the "synchronous" wait queue.
67 * 03/10/03 brr Added conditional support to perform callbacks in interrupt
68 * in interrupt context.
69 * 02/02/03 brr Updated command completion determination to use the number
70 * of commands completed instead of queue position. This
71 * elimated the need for forceCheck & relying on the request
72 * state to avoid prematurely marking a command complete.
73 * 11/02/02 brr Reinstate forceCheck.
74 * 10/23/02 brr Disable interrupts during queue operation and remove
75 * forceCheck since it is no longer needed.
76 * 10/25/02 brr Clean up function prototypes & include files.
77 * 09/18/02 brr Have QMgrCheckQueue return the number of requests completed.
78 * 09/10/02 brr OR command complete bit with last NOP in QMgrCmdError.
79 * 07/08/02 brr Added support for polling NSP2000 device.
80 * 06/26/02 brr Remove bank from the calls to N8_PhysToVirt.
81 * 06/25/02 brr Rework user pool allocation to only mmap portion used by
82 * the individual process.
83 * 06/14/02 hml Make sure we have physical addresses when we try to
84 * delete a request allocated from a request pool.
85 * 06/12/02 hml Passes the bank from kmem_p to calls to N8_PhysToVirt.
86 * 06/10/02 hml Handle deletion of buffers from deleted pools.
87 * 06/11/02 brr Convert N8_WaitEventInt to an OS dependent function.
88 * 06/07/02 brr Use unsigned arithmetic to compute queue depth.
89 * 06/06/02 brr Move wait queue initializaton to QMgrInit.
90 * 06/04/02 brr Make sure QMgrCheckQueue retrieves an updated write pointer
91 * for each loop iteration to prevent a race condition where
92 * requests queued by the other processor are marked as
93 * completed as they are being queued.
94 * 05/30/02 brr Reworked error handling such that it is done by the ISR.
95 * Replaced QMgrReadNspStats with QMgrCmdError that the ISR
96 * calls if a command entry needs to be NOP'ed.
97 * 05/07/02 msz Pend on synchronous requests.
98 * 05/01/02 brr Updated with comments from code review including update
99 * comments, add error code for API request queue full,
100 * eliminate modulo operations, and remove all chained request
101 * references. Also reworked to improve performance.
102 * 03/27/02 hml Changed all N8_QueueReturnCodes_t to N8_Status_t. Needed for
103 * Bug 634.
104 * 03/22/02 msz Don't process a request as done until it has been queued.
105 * 03/26/02 brr Ensure that buffers on the command queue are not returned to
106 * the buffer pool before the commands using them have executed.
107 * 03/20/02 mmd Incrementing requestsCompleted stat counter in QMgrCheckQueue.
108 * 03/19/02 msz Removed shared memory
109 * 03/18/02 msz We no longer need the shadow queue nor it associated changes
110 * 03/06/02 brr Do not include and SAPI includes & removed old semaphores
111 * 03/01/02 brr Convert the queueControlSem to an ATOMICLOCK. Reworked
112 * dequeue operation not to require a semaphore.
113 * 02/26/02 brr Keep local copies of read/write pointers & have the ISR write
114 * them to hardware. Also have ISR complete API requests which
115 * are now KMALLOC'ed and no longer need copied to user space.
116 * 02/20/02 brr Streamlined copyInRequestsCommandBlocks &
117 * copyOutRequestsCommandBlocks to better support copying blocks
118 * to user space. Also eliminated QMgrSetupQueue.
119 * 02/19/02 brr Copy the command block from user space, if the request was
120 * submitted from user space.
121 * 02/18/02 brr Select the queue on entry to QMgrQueue.
122 * 02/14/02 brr Resove 2.0 merge problems.
123 * 02/04/02 msz Clear shadow status on error, changes in QMgrReadNspStatus
124 * 01/31/02 brr Substantial rework to improve performance.
125 * 01/25/01 msz Check for hardware error on Queue Full condition.
126 * Also added a debug routine
127 * 01/16/02 brr Removed obsolete include file.
128 * 01/14/02 hml Replaced N8_VIRT_FREE with N8_FREE.
129 * 01/13/02 brr Removed obsolete include file.
130 * 01/09/02 brr Replaced N8_VIRT_MALLOC with N8_UMALLOC.
131 * 12/15/01 msz Adding shadow copy of command queue. Fix for BUG 382, 411.
132 * Rewrote QMgrReadNspStatus so it will clean up any hw error.
133 * 12/07/01 bac Fixed an annoying compiler warning.
134 * 12/05/01 brr only perform queue initialization for the behavioral model.
135 * 12/05/01 brr Move initialization of Queue to the driver.
136 * 12/05/01 brr Move initialization of Queue to the driver.
137 * 12/03/01 mel BUG 395 : Added stdio.h include - to eliminate compilation
138 * errors.
139 * 12/04/01 msz Changes to allow chaining of requests.
140 * 11/27/01 msz BUG 373 (actually couldn't occur, but cleaned up code)
141 * Fixed copyOutRequestsCommandBlocks so it doesn't return
142 * status, as it always returned ok.
143 * Don't get process lock and setup queue if we don't have
144 * to - BUG 379
145 * 11/26/01 msz Remove the setting of the timeFirstCommandSent field. BUG 335
146 * 11/14/01 spm Gated prom-to-cache copy so that it is only done for PK
147 * 11/14/01 msz Moved callbacks out of locked area. BUG 282
148 * 11/13/01 brr Removed references to shared_resource, fixed warnings.
149 * 11/12/01 msz Some code review fixes. Fixes to items 19,20,21,24,30,32
150 * 11/11/01 mmd Modified QMgrSetupQueue to attach to existing command queue
151 * if using real hardware.
152 * 11/10/01 brr Modified to support static allocations of persistant data
153 * by the driver.
154 * 11/10/01 spm Added SKS prom-to-cache copy to QMgr setup. Addressed
155 * bug #295 with a comment.
156 * 11/08/01 bac Fixed documenation that referred to the unused return code
157 * N8_QUEUE_REQUEST_ERROR_QUEUE_IN_ERROR
158 * 10/29/01 msz Fixed NOPing of CCH (ea) commands.
159 * 10/10/01 brr Removed warning exposed when optimization turned up.
160 * 10/12/01 msz Protect Behavioral Model with hardwareAccessSem. Needed
161 * for running in a multithreaded environment.
162 * 10/09/01 msz Initialize hardware if it has not been initialized, not
163 * if we didn't attach to memory.
164 * 10/08/01 bac Correctly set the API Request field
165 * indexOfCommandThatCausedError.
166 * 10/03/01 msz removed extra DBG, fixed when queue is initialized and when
167 * it is set to initialized.
168 * 10/02/01 bac Potential fix to QMgrCheckQueue bug when the hardware locks.
169 * 09/24/01 msz Shared memory changes & some extra DBG.
170 * 09/21/01 msz Fixed a bug in QMgrCheckQueue, where we were dereferencing
171 * a freed structure.
172 * 09/14/01 bac Set the bitfields in the command block before allocation to
173 * the default set for unshared kernel memory. This will need to
174 * be addressed for the multi-process case.
175 * 09/06/01 bac Added include of <string.h> to silence warning.
176 * 08/29/01 bac Removed a incorrect call to QMgrWaitForNotBusy in the setup
177 * routine.
178 * 08/25/01 msz Changed QMgrCheckQueue, QMgrReadNspStatus
179 * 08/16/01 msz Code review changes
180 * 08/09/01 msz Added locking.
181 * 08/01/01 msz Created from merging EA_EnQueue.h and PK_EnQueue.h
182 * To see older revision history please see those files.
184 ****************************************************************************/
185 /** @defgroup QMgr QMQueue
188 #include "n8_pub_types.h"
189 #include "n8_common.h"
190 #include "n8_OS_intf.h"
191 #include "n8_malloc_common.h"
192 #include "QMQueue.h"
193 #include "n8_enqueue_common.h"
194 #include "QMUtil.h"
195 #include "n8_semaphore.h"
196 #include "n8_time.h"
197 #include "RN_Queue.h"
198 #include "n8_manage_memory.h"
199 #include "userPool.h"
200 #include "n8_driver_api.h"
201 #include "irq.h"
202 #include "n8_memory.h"
203 #include <sys/proc.h>
205 n8_atomic_t requestsComplete;
206 n8_atomic_t requestsQueued[N8_NUM_COMPONENTS];
207 #if defined(__linux) && defined(SUPPORT_DEVICE_POLL)
208 extern wait_queue_head_t nsp2000_wait;
209 #endif
210 /*****************************************************************************
211 * copyOutRequestsCommandBlocks
212 *****************************************************************************/
213 /** @ingroup QMgr
214 * @brief copy command blocks to the API back after hardware completes a request
216 * @param queue_p RO: Pointer to the Queue Control Structure for
217 * this EA or PK execution unit.
218 * @param request_p RO: Pointer to the request to be copied from
219 * this EA or PK execution unit.
221 * @par Externals:
222 * None.
224 * @return
225 * None
227 * @par Errors: none checked for
229 * @par Locks:
230 * This routine is called from interrupt context and assumes exclusive
231 * access to the command blocks pointed to by the API request.
233 * @par Assumptions:
234 * this function is called only after it is verified that the API requires the
235 * command blocks to be copied back
237 ******************************************************************************/
239 static void
240 copyOutRequestsCommandBlocks(QueueControl_t *queue_p, API_Request_t *request_p)
243 void *CommandBlock_p = NULL;
244 int firstCmd = request_p->index_of_first_command;
245 int numCmds = request_p->numNewCmds;
246 int remainCmds;
247 void *copy_p;
249 CommandBlock_p = (void *)((int)request_p + sizeof(API_Request_t));
251 /* Compute the address in the queue base on the index and the size of a */
252 /* command for this queue. */
253 copy_p = (void *)(queue_p->cmdQueVirtPtr + (firstCmd * queue_p->cmdEntrySize));
255 /* If the Queue did not wrap with this API request, copy the entire */
256 /* block back into the API Request in one operation. */
257 if (firstCmd <= request_p->index_of_last_command)
259 memcpy(CommandBlock_p, copy_p, queue_p->cmdEntrySize * numCmds);
261 else
262 /* This request wrapped to the beginning of the command queue, */
263 /* perform the copy back in two operations. */
265 remainCmds = queue_p->sizeOfQueue - firstCmd;
267 memcpy(CommandBlock_p, copy_p, (queue_p->cmdEntrySize * remainCmds));
268 memcpy((void *)((int)CommandBlock_p + (remainCmds * queue_p->cmdEntrySize)),
269 (void *)queue_p->cmdQueVirtPtr,
270 queue_p->cmdEntrySize * (numCmds - remainCmds));
273 return;
275 } /* copyOutRequestsCommandBlocks */
277 /*****************************************************************************
278 * copyInRequestsCommandBlocks
279 *****************************************************************************/
280 /** @ingroup QMgr
281 * @brief copy command blocks from the API request into the command queue.
283 * @param API_req_p RO: Request from API, used to return status
284 * back to the API in the request.
285 * @param writeQueueIndex_p RW: The write index pointer from hardware
287 * @par Externals:
288 * None.
290 * @return
291 * None.
293 * @par Errors:
294 * See return values.
296 * @par Locks:
297 * This routine requires that the queueControlSem has been previously
298 * acquired. (It is called from a section that requires that this lock
299 * be held, so the routine requires the lock be held rather than acquiring
300 * the lock internally.)
302 * @par Assumptions:
304 ******************************************************************************/
306 static void
307 copyInRequestsCommandBlocks( QueueControl_t *queue_p,
308 API_Request_t *API_req_p,
309 uint32_t *writeQueueIndex_p )
311 void *CommandBlock_p;
312 int remainCmds;
313 void *copy_p;
315 /* copy this requests' command blocks to the queue */
316 CommandBlock_p = (void *)((int)API_req_p + sizeof(API_Request_t));
318 /* Compute the address in the queue base on the index and the size of a */
319 /* command for this queue. */
320 copy_p = (void *)
321 (queue_p->cmdQueVirtPtr + (*writeQueueIndex_p * queue_p->cmdEntrySize));
323 /* If the Queue does not wrap with API request, copy the entire block */
324 /* into the command queue in one operation. */
325 if (*writeQueueIndex_p + API_req_p->numNewCmds < queue_p->sizeOfQueue)
327 memcpy(copy_p, CommandBlock_p,
328 queue_p->cmdEntrySize * API_req_p->numNewCmds);
329 *writeQueueIndex_p += API_req_p->numNewCmds;
331 else
332 /* This request will wrap to the beginning of the command queue, */
333 /* perform the copy in two operations. */
335 remainCmds = queue_p->sizeOfQueue - *writeQueueIndex_p;
336 memcpy(copy_p, CommandBlock_p,
337 queue_p->cmdEntrySize * remainCmds);
338 *writeQueueIndex_p =
339 (*writeQueueIndex_p + API_req_p->numNewCmds) & queue_p->sizeMask;
340 memcpy((void *)queue_p->cmdQueVirtPtr,
341 (void *)((int)CommandBlock_p + (remainCmds * queue_p->cmdEntrySize)),
342 queue_p->cmdEntrySize * *writeQueueIndex_p);
345 } /* copyInRequestsCommandBlocks */
348 /*****************************************************************************
349 * QMgrCheckQueue
350 *****************************************************************************/
351 /** @ingroup QMgr
352 * @brief Given the number of completed command blocks, traverse the list of i
353 * API Requests and mark them finished until the number of command blocks
354 * has been exhausted.
356 * @param unit RO: Indicates EA or PK execution unit.
357 * chip RO: Indicates which chip.
358 * cmdsComplete RO: Number of command blocks completed since
359 * the last call.
361 * @par Externals:
363 * @return
364 * int - number of completed requests on this queue.
366 * @par Errors:
367 * Error conditions are handled in the ISR. This function only processes
368 * completed requests.
370 * @par Locks:
371 * This routine function is called in interrupt context and assumes
372 * excluse access to dequeue API requests. IT MUST NOT TAKE A SEMAPHORE
373 * NOR CAN ANY PROCESS UPDATE THE apiReadIdx.
375 * If this routine is called from a non interrupt context, it assumes
376 * exclusive access to dequeue API requests. Therefore it MUST
377 * DISABLE INTERRUPTS BEFORE CALLING THIS FUNCTION.
379 * @par Assumptions:
380 * It is assumed that the user is not modifying requests that are on the
381 * queue.
383 ******************************************************************************/
386 QMgrCheckQueue(N8_Component_t unit,
387 int chip,
388 uint16_t cmdsComplete)
390 API_Request_t *API_Request_p;
391 N8_MemoryHandle_t *kmem_p;
392 QueueControl_t *queue_p;
393 int bankIndex;
394 int reqsComplete = 0;
395 uint32_t totalCmds;
397 queue_p = &(queueTable_g.controlSets_p[chip][unit]);
399 /* Get the first API request on the queue. */
400 API_Request_p = (API_Request_t *)queue_p->requestQueue[queue_p->apiReadIdx];
401 totalCmds = cmdsComplete + queue_p->remainingCmds;
403 while ((API_Request_p != NULL) &&
404 (totalCmds >= API_Request_p->numNewCmds))
407 totalCmds -= API_Request_p->numNewCmds;
409 /* Request is done */
410 /* Check to see if the process that queued this request has */
411 /* terminated. If so, simply free the buffer and move on. */
412 kmem_p = (N8_MemoryHandle_t *)((int)API_Request_p - N8_BUFFER_HEADER_SIZE);
414 if (kmem_p->bufferState == N8_BUFFER_SESS_DELETED)
416 /* Kernel buffers can be handled in the good old simple way */
417 bankIndex = kmem_p->bankIndex;
418 N8_FreeBuffer(kmem_p);
421 /* If this buffer was a member of a pool userPoolFreePool will
422 determine if the pool can now be freed. */
423 userPoolFreePool(bankIndex);
425 else
427 kmem_p->bufferState = N8_BUFFER_NOT_QUEUED;
429 /* If this command is to be copied back, and had no */
430 /* errors, then copy the contents of the command block */
431 /* that hardware has completed back into the command */
432 /* block sent by the API */
433 if ( ( API_Request_p->copyBackCommandBlock == N8_TRUE ) &&
434 ( API_Request_p->err_rpt_bfr.errorReturnedFromSimon == 0 ) )
436 copyOutRequestsCommandBlocks(queue_p, API_Request_p);
439 API_Request_p->qr.requestStatus = N8_QUEUE_REQUEST_FINISHED;
441 /* If this is a synchronous request, wake up the */
442 /* process waiting on it. */
443 if ( API_Request_p->qr.synchronous == N8_TRUE )
445 WakeUp( (&queue_p->waitQueue[queue_p->apiReadIdx] ) );
447 #ifdef SUPPORT_INT_CONTEXT_CALLBACKS
448 else
450 /* This request was submitted by a kernel operation, perform */
451 /* the callback processing immediately. */
452 if (API_Request_p->userSpace == N8_FALSE)
454 /* Perform the SDK callback if needed */
455 if (API_Request_p->qr.callback)
457 API_Request_p->qr.callback ((void *)API_Request_p);
460 /* Perform the user's callback if needed */
461 if (API_Request_p->usrCallback)
463 API_Request_p->usrCallback (API_Request_p->usrData,
464 N8_STATUS_OK);
466 /* Free the API request */
467 N8_FreeBuffer(kmem_p);
470 #endif
474 /* Increment our local count of completed requests. */
475 reqsComplete++;
477 /* Remove the API request from the requestQueue since */
478 /* QMgr has completed the processing of this API request. */
479 queue_p->requestQueue[queue_p->apiReadIdx] = NULL;
480 queue_p->apiReadIdx = (queue_p->apiReadIdx + 1) & (API_REQ_MASK);
481 API_Request_p = (API_Request_t *)
482 queue_p->requestQueue[queue_p->apiReadIdx];
485 } /* end -- while (API_Request_p != NULL) */
487 queue_p->remainingCmds = totalCmds;
489 /* Increment our global counters for completed requests. */
490 queue_p->stats.requestsCompleted += reqsComplete;
491 n8_atomic_sub(requestsQueued[unit], reqsComplete);
493 #if defined(__linux) && defined(SUPPORT_DEVICE_POLL)
494 n8_atomic_add(requestsComplete, reqsComplete);
495 if (n8_atomic_read(requestsComplete))
497 WakeUp(&nsp2000_wait);
499 #endif
501 /* Increment our count of how many times the check queue was called. */
502 queue_p->stats.requestCheckCalled++;
504 /* return the number of requests completed on this queue. */
505 return (reqsComplete);
507 } /* QMgrCheckQueue */
509 /*****************************************************************************
510 * N8_QMgrQueue
511 *****************************************************************************/
512 /** @ingroup QMgr
513 * @brief API Interface to the EA/PK Queue: accept new requests
514 * from the upper layers (ie, the API) and writes the command
515 * blocks into the command queue.
517 * Requests consist of (>=1) commands. Requests are kept
518 * in a circular queue while the commands persist in the
519 * command queue. Once the commands have been executed, the
520 * request is marked as complete and is removed from the circular
521 * queue.
523 * @param API_req_p RO: Pointer to the request from the API
525 * @par Externals:
526 * None.
528 * @return
529 * typedef-enum-ed type see N8_QueueReturnCodes_t
530 * values from PK_ValidateRequest if it failed,
531 * N8_QUEUE_FULL if the new request contains more commands than the
532 * queue has capacity for
534 * @par Errors:
535 * N8_QUEUE_FULL if the new request contains more commands than the
536 * queue has capacity for
538 * On an error API_req_p->requestStatus will also be set.
540 * @par Locks:
541 * This routine acquires the queueControlSem to assure the queue operation
542 * is atomic.
544 * @par Assumptions:
546 ******************************************************************************/
547 extern int ambaTimerActive;
549 N8_Status_t
550 N8_QMgrQueue( API_Request_t *API_req_p )
552 uint32_t writeQueueIndex;
553 uint32_t readQueueIndex;
554 uint32_t firstCommand;
555 uint32_t lastCommand;
556 uint32_t cmd_blks_available;
557 QueueControl_t *queue_p;
558 N8_Status_t returnCode = N8_STATUS_OK;
559 N8_MemoryHandle_t *kmem_p;
560 int waitReturn;
561 int requestIndex;
562 N8_Component_t unit;
564 /* Determine which queue to use, and set the queue_p in the API request */
565 unit = API_req_p->qr.unit;
566 returnCode = QMgr_get_valid_unit_num(unit,
567 API_req_p->qr.chip,
568 &API_req_p->qr.chip);
569 if (returnCode != N8_STATUS_OK)
571 /* unit is invalid, we're out of here */
572 goto QMgrQueueReturn;
575 queue_p = &(queueTable_g.controlSets_p[API_req_p->qr.chip][unit]);
577 /* Acquire the lock to insure that this is the only process or */
578 /* thread that is adding to the queue at a time. We want to hold */
579 /* the lock from the moment we get the write index until we are done */
580 /* copying in the data and have incremented the write index. */
581 N8_AtomicLock(queue_p->queueControlSem);
584 /* Get the current read and write index. */
585 readQueueIndex = *queue_p->readIndex_p;
586 writeQueueIndex = queue_p->writeIndex;
588 /* Calculate the available space in the queue. Note that the */
589 /* capacity of the queue is one less than the size of the queue */
590 /* so we can distinguish between the full and empty conditions. */
591 /* NOTE: the unsigned arithemetic handles negative numbers */
592 cmd_blks_available = (readQueueIndex - writeQueueIndex - 1) &
593 queue_p->sizeMask;
595 /* if there are more new commands than room */
596 if ( API_req_p->numNewCmds > cmd_blks_available )
598 /* this request cannot be filled */
599 returnCode = N8_QUEUE_FULL;
600 goto QMgrQueueReleaseReturn;
603 requestIndex = queue_p->apiWriteIdx;
604 if (queue_p->requestQueue[requestIndex] == NULL)
606 queue_p->requestQueue[requestIndex] = &API_req_p->qr;
607 queue_p->apiWriteIdx = (requestIndex + 1) & (API_REQ_MASK);
609 else
611 /* then this request cannot be filled */
612 returnCode = N8_API_QUEUE_FULL;
613 goto QMgrQueueReleaseReturn;
616 /* denote where this requests' commands begin */
617 firstCommand = writeQueueIndex;
618 API_req_p->index_of_first_command = firstCommand;
621 /* copy this requests' command blocks to the queue */
622 /* On a failure, do not expect writeQueueIndex to */
623 /* be valid. */
624 copyInRequestsCommandBlocks(queue_p, API_req_p, &writeQueueIndex );
626 /* denote where this requests' commands end */
627 lastCommand =
628 (writeQueueIndex - 1 + queue_p->sizeOfQueue) & queue_p->sizeMask;
629 API_req_p->index_of_last_command = lastCommand;
631 /* Mark this buffer as QUEUED so a close of the driver will not free */
632 /* this buffer until the command has been processed. */
633 kmem_p = (N8_MemoryHandle_t *)((int)API_req_p - N8_BUFFER_HEADER_SIZE);
634 kmem_p->bufferState = N8_BUFFER_QUEUED;
636 /* The queue operation is complete. */
637 /* set the status flag so API knows msg rcvd */
638 API_req_p->qr.requestStatus = N8_QUEUE_REQUEST_QUEUED;
640 /* Add this to the current number of requests queued */
641 n8_atomic_add(requestsQueued[unit], 1);
643 /* notify the hardware that new commands are pending */
644 /* by setting the write pointer to the next empty */
645 queue_p->writeIndex = writeQueueIndex;
646 queue_p->n8reg_p->q_ptr = writeQueueIndex;
648 /* If this is the only outstanding EA request, the */
649 /* AMBA timer must be reloaded */
650 if ((unit == N8_EA) && (ambaTimerActive == FALSE))
652 reload_AMBA_timer();
655 /* Increment our count of requests queued. */
656 queue_p->stats.requestsQueued++;
658 /* As we notified hardware, we can now release the */
659 /* semaphore. We can also release the queue semaphore */
660 N8_AtomicUnlock(queue_p->queueControlSem);
662 /* If the request is a synchronous request, then wait for it */
663 /* to be done before we return. We can look at the request */
664 /* status field to see that it is finished, and this is safe */
665 /* because the SAPI code is waiting/sleeping on this request */
666 /* therefore can't be manipulating it. */
667 if ( API_req_p->qr.synchronous == N8_TRUE )
669 waitReturn = N8_WaitEventInt(&queue_p->waitQueue[requestIndex], API_req_p);
670 if (waitReturn != 0)
672 /* The wait was preempted by a signal or timeout, increment stat and requeue */
673 queue_p->stats.requestsPreempted++;
674 returnCode = N8_EVENT_INCOMPLETE;
678 QMgrQueueReturn:
679 return returnCode;
681 /* These is an error return case */
683 QMgrQueueReleaseReturn:
684 API_req_p->qr.requestError = N8_QUEUE_REQUEST_ERROR;
685 API_req_p->qr.requestStatus = N8_QUEUE_REQUEST_FINISHED;
686 N8_AtomicUnlock(queue_p->queueControlSem);
687 return returnCode;
689 } /* N8_QMgrQueue */
691 /*****************************************************************************
692 * QMgrCmdError
693 *****************************************************************************/
694 /** @ingroup QMgr
695 * @brief a problems has been detected with the current command block,
696 * write NOPs all command block for this request so the hardware
697 * can be re-enabled
699 * -----------------
700 * 0 | | base (0) first < last
701 * ----------------- 1, 2, 3 have commands in an errored
702 * 1 | Error Request | first (1) request, read pointer points to (2)
703 * ----------------- which is command that halted hardware.
704 * 2 | Error Request | read (2)
705 * -----------------
706 * 3 | Error Request | last (3) read >= first && read <= last
707 * ----------------- then its halting command
708 * 4 | | write (4)
709 * ----------------- read < first || read > last
710 * 5 | | then its not halting command
711 * ----------------- (but if its halted clean up anyhow)
712 * 6 | |
713 * -----------------
714 * 7 | |
715 * -----------------
718 * -----------------
719 * 0 | Error Request | base (0) last < first
720 * ----------------- 7, 1, 2 have commands in an errored
721 * 1 | Error Request | last (1) request, read pointer points to (7)
722 * ----------------- which is command that halted hardware.
723 * 2 | |
724 * -----------------
725 * 3 | | read >= first || read <= last
726 * ----------------- then its halting command
727 * 4 | |
728 * ----------------- read < first && read > last
729 * 5 | | then its not halting command
730 * ----------------- (but if its halted clean up anyhow)
731 * 6 | |
732 * -----------------
733 * 7 | Error Request | first (7) read (7)
734 * -----------------
739 * @param unit RO: this is an EA or PK execution unit.
740 * chip RO: the chip number for the failed command
741 * readQueueIndex RO: the index of the command that has failed
742 * configStatus RO: the value of the status word for the
743 * failed command
745 * @par Externals:
747 * @return
748 * None.
750 * @par Errors:
752 * @par Locks:
753 * This routine is called from interrupt context and assumes exclusive
754 * access to the command blocks pointed to by the error condition.
756 * @par Assumptions:
757 * This function assumes that QMgrCheckQueue has been called prior to
758 * this call to ensure apiReadIdx is pointing to the failed command.
759 * This command has halted hardware and we need to write NOPs to this
760 * command and to all subsequent commands in this request.
762 ******************************************************************************/
764 void
765 QMgrCmdError(N8_Component_t unit,
766 int chip,
767 int readQueueIndex,
768 int configStatus)
770 uint32_t requestCommandIndex;
771 uint32_t requestLastCommand;
772 uint32_t requestNextCommand;
773 API_Request_t *API_Request_p;
774 QueueControl_t *queue_p;
776 queue_p = &(queueTable_g.controlSets_p[chip][unit]);
778 /* An error has occurred, the failing command and any subsequent */
779 /* commands in the request must be changed to NOPs and restarted */
781 /* Increment our count of hardware errors encountered. */
782 queue_p->stats.hardwareErrorCount++;
784 /* The apiReadIdx pointing into the requestQueue of the queue */
785 /* will be the last request that had a command that was */
786 /* executing, therefore it is the request with the errored */
787 /* command. */
788 API_Request_p =
789 (API_Request_t *)queue_p->requestQueue[queue_p->apiReadIdx];
791 /* Mark the request as errored. */
792 API_Request_p->qr.requestError = N8_QUEUE_REQUEST_ERROR;
793 API_Request_p->err_rpt_bfr.errorReturnedFromSimon = configStatus;
794 API_Request_p->err_rpt_bfr.indexOfCommandThatCausedError =
795 ((readQueueIndex - API_Request_p->index_of_first_command) %
796 queue_p->sizeOfQueue) + 1;
798 /* NOP out the request starting at the errored command to the */
799 /* end of the request that caused the hardware to halt. */
800 requestCommandIndex = readQueueIndex;
801 requestLastCommand = API_Request_p->index_of_last_command;
802 requestNextCommand = (requestLastCommand+1) & queue_p->sizeMask;
807 if ( QMGR_IS_EA(queue_p) )
809 (queue_p->EAq_head_p + requestCommandIndex)->opcode_iter_length =
810 EA_Cmd_NOP;
812 else
814 (queue_p->PKq_head_p + requestCommandIndex)->opcode_si = PK_Cmd_NOP;
815 if (requestCommandIndex == requestLastCommand)
817 (queue_p->PKq_head_p + requestCommandIndex)->opcode_si |=
818 PK_Cmd_SI_Mask;
822 requestCommandIndex = (requestCommandIndex+1) & queue_p->sizeMask;
824 } while (requestCommandIndex != requestNextCommand);
826 #ifdef SUPPORT_INT_CONTEXT_CALLBACKS
827 /* This request was submitted by a kernel operation, perform */
828 /* the callback processing immediately. */
829 if (API_Request_p->userSpace == N8_FALSE)
831 N8_MemoryHandle_t *kmem_p;
833 /* Perform the user's callback if needed */
834 if (API_Request_p->usrCallback)
836 API_Request_p->usrCallback (API_Request_p->usrData,
837 N8_HARDWARE_ERROR);
839 /* Free the API request */
840 kmem_p = (N8_MemoryHandle_t *)((int)API_Request_p - N8_BUFFER_HEADER_SIZE);
841 N8_FreeBuffer(kmem_p);
843 #endif
847 /*****************************************************************************
848 * N8_QMgrDequeue
849 *****************************************************************************/
850 /** @ingroup QMgr
851 * @brief API read Interface to the EA/PK Queue:
852 * This call is optional and used to support device polling. It
853 * simply decrements the count of completed requests available.
855 * @param
857 * @par Externals:
858 * requestsComplete.
860 * @return
862 * @par Errors:
864 * @par Locks:
865 * This routine use the atomic library to ensure operation is atomic.
867 * @par Assumptions:
869 ******************************************************************************/
871 N8_Status_t N8_QMgrDequeue(void)
873 n8_atomic_sub(requestsComplete, 1);
874 return N8_STATUS_OK;
877 /*****************************************************************************
878 * QMgrPoll
879 *****************************************************************************/
880 /** @ingroup QMgr
881 * @brief API poll Interface to the EA/PK Queue:
882 * This call simply returns the count of completed requests
883 * available.
885 * @param
887 * @par Externals:
888 * requestsComplete.
890 * @return
891 * number of complete events
893 * @par Errors:
895 * @par Locks:
896 * This routine use the atomic library to ensure operation is atomic.
898 * @par Assumptions:
899 * This call assumes the user is calling N8_QMgrDequeue to maintain the
900 * requestsComplete count correctly.
902 ******************************************************************************/
904 int QMgrPoll(void)
906 return(n8_atomic_read(requestsComplete));
908 /*****************************************************************************
909 * QMgrCount
910 *****************************************************************************/
911 /** @ingroup QMgr
912 * @brief This call simply returns the count of requests currently queued to
913 * the NSP2000
915 * @param
917 * @par Externals:
918 * requestsQueued.
920 * @return
921 * number of queued requests
923 * @par Errors:
925 * @par Locks:
926 * This routine use the atomic library to ensure operation is atomic.
928 * @par Assumptions:
930 ******************************************************************************/
932 int QMgrCount(N8_Component_t unit)
934 return(n8_atomic_read(requestsQueued[unit]));