Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / 1394 / adapters / hci1394_ixl_misc.c
blobcaca0aed7c7dbd7b5cbc55314409eb0be8fb4984
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
20 * CDDL HEADER END
23 * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * hci1394_ixl_misc.c
31 * Isochronous IXL miscellaneous routines.
32 * Contains common routines used by the ixl compiler, interrupt handler and
33 * dynamic update.
36 #include <sys/kmem.h>
37 #include <sys/types.h>
38 #include <sys/conf.h>
40 #include <sys/tnf_probe.h>
42 #include <sys/1394/h1394.h>
43 #include <sys/1394/ixl1394.h>
44 #include <sys/1394/adapters/hci1394.h>
47 /* local routines */
48 static void hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
49 hci1394_idma_desc_mem_t *);
50 static void hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *);
54 * hci1394_ixl_set_start()
55 * Set up the context structure with the first ixl command to process
56 * and the first hci descriptor to execute.
58 * This function assumes the current context is stopped!
60 * If ixlstp IS NOT null AND is not the first compiled ixl command and
61 * is not an ixl label command, returns an error.
62 * If ixlstp IS null, uses the first compiled ixl command (ixl_firstp)
63 * in place of ixlstp.
65 * If no executeable xfer found along exec path from ixlstp, returns error.
67 int
68 hci1394_ixl_set_start(hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlstp)
71 ixl1394_command_t *ixl_exec_startp;
73 /* if ixl start command is null, use first compiled ixl command */
74 if (ixlstp == NULL) {
75 ixlstp = ctxtp->ixl_firstp;
79 * if ixl start command is not first ixl compiled and is not a label,
80 * error
82 if ((ixlstp != ctxtp->ixl_firstp) && (ixlstp->ixl_opcode !=
83 IXL1394_OP_LABEL)) {
84 return (-1);
87 /* follow exec path to find first ixl command that's an xfer command */
88 (void) hci1394_ixl_find_next_exec_xfer(ixlstp, NULL, &ixl_exec_startp);
91 * if there was one, then in it's compiler private, its
92 * hci1394_xfer_ctl structure has the appropriate bound address
94 if (ixl_exec_startp != NULL) {
96 /* set up for start of context and return done */
97 ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
98 ixl_exec_startp->compiler_privatep)->dma[0].dma_bound;
100 ctxtp->dma_last_time = 0;
101 ctxtp->ixl_exec_depth = 0;
102 ctxtp->ixl_execp = ixlstp;
103 ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
105 return (0);
108 /* else no executeable xfer command found, return error */
109 return (1);
111 #ifdef _KERNEL
113 * hci1394_ixl_reset_status()
114 * Reset all statuses in all hci descriptor blocks associated with the
115 * current linked list of compiled ixl commands.
117 * This function assumes the current context is stopped!
119 void
120 hci1394_ixl_reset_status(hci1394_iso_ctxt_t *ctxtp)
122 ixl1394_command_t *ixlcur;
123 ixl1394_command_t *ixlnext;
124 hci1394_xfer_ctl_t *xferctlp;
125 uint_t ixldepth;
126 uint16_t timestamp;
128 ixlnext = ctxtp->ixl_firstp;
131 * Scan for next ixl xfer start command along ixl link path.
132 * Once xfer command found, clear its hci descriptor block's
133 * status. If is composite ixl xfer command, clear statuses
134 * in each of its hci descriptor blocks.
136 while (ixlnext != NULL) {
138 /* set current and next ixl command */
139 ixlcur = ixlnext;
140 ixlnext = ixlcur->next_ixlp;
142 /* skip to examine next if this is not xfer start ixl command */
143 if (((ixlcur->ixl_opcode & IXL1394_OPF_ISXFER) == 0) ||
144 ((ixlcur->ixl_opcode & IXL1394_OPTY_MASK) == 0)) {
145 continue;
148 /* get control struct for this xfer start ixl command */
149 xferctlp = (hci1394_xfer_ctl_t *)ixlcur->compiler_privatep;
151 /* clear status in each hci descriptor block for this ixl cmd */
152 ixldepth = 0;
153 while (ixldepth < xferctlp->cnt) {
154 (void) hci1394_ixl_check_status(
155 &xferctlp->dma[ixldepth], ixlcur->ixl_opcode,
156 &timestamp, B_TRUE);
157 ixldepth++;
161 #endif
163 * hci1394_ixl_find_next_exec_xfer()
164 * Follows execution path of ixl linked list until finds next xfer start IXL
165 * command, including the current IXL command or finds end of IXL linked
166 * list. Counts callback commands found along the way. (Previously, counted
167 * store timestamp commands, as well.)
169 * To detect an infinite loop of label<->jump without an intervening xfer,
170 * a tolerance level of HCI1394_IXL_MAX_SEQ_JUMPS is used. Once this
171 * number of jumps is traversed, the IXL prog is assumed to have a loop.
173 * Returns DDI_SUCCESS or DDI_FAILURE. DDI_FAILURE, indicates an infinite
174 * loop of labels & jumps was detected without any intervening xfers.
175 * DDI_SUCCESS indicates the next_exec_ixlpp contains the next xfer ixlp
176 * address, or NULL indicating the end of the list was reached. Note that
177 * DDI_FAILURE can only be returned during the IXL compilation phase, and
178 * not during ixl_update processing.
181 hci1394_ixl_find_next_exec_xfer(ixl1394_command_t *ixl_start,
182 uint_t *callback_cnt, ixl1394_command_t **next_exec_ixlpp)
184 uint16_t ixlopcode;
185 boolean_t xferfound;
186 ixl1394_command_t *ixlp;
187 int ii;
189 ixlp = ixl_start;
190 xferfound = B_FALSE;
191 ii = HCI1394_IXL_MAX_SEQ_JUMPS;
192 if (callback_cnt != NULL) {
193 *callback_cnt = 0;
196 /* continue until xfer start ixl cmd or end of ixl list found */
197 while ((xferfound == B_FALSE) && (ixlp != NULL) && (ii > 0)) {
199 /* get current ixl cmd opcode without update flag */
200 ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
202 /* if found an xfer start ixl command, are done */
203 if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
204 ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {
205 xferfound = B_TRUE;
206 continue;
209 /* if found jump command, adjust to follow its path */
210 if (ixlopcode == IXL1394_OP_JUMP) {
211 ixlp = (ixl1394_command_t *)
212 ((ixl1394_jump_t *)ixlp)->label;
213 ii--;
215 /* if exceeded tolerance, give up */
216 if (ii == 0) {
217 return (DDI_FAILURE);
219 continue;
222 /* if current ixl command is a callback, count it */
223 if ((ixlopcode == IXL1394_OP_CALLBACK) &&
224 (callback_cnt != NULL)) {
225 (*callback_cnt)++;
228 /* advance to next linked ixl command */
229 ixlp = ixlp->next_ixlp;
232 /* return ixl xfer start command found, if any */
233 *next_exec_ixlpp = ixlp;
235 return (DDI_SUCCESS);
237 #ifdef _KERNEL
239 * hci1394_ixl_check_status()
240 * Read the descriptor status and hdrs, clear as appropriate.
242 int32_t
243 hci1394_ixl_check_status(hci1394_xfer_ctl_dma_t *dma, uint16_t ixlopcode,
244 uint16_t *timestamp, boolean_t do_status_reset)
246 uint16_t bufsiz;
247 uint16_t hcicnt;
248 uint16_t hcirecvcnt;
249 hci1394_desc_t *hcidescp;
250 off_t hcidesc_off;
251 ddi_acc_handle_t acc_hdl;
252 ddi_dma_handle_t dma_hdl;
253 uint32_t desc_status;
254 uint32_t desc_hdr;
256 /* last dma descriptor in descriptor block from dma structure */
257 hcidescp = (hci1394_desc_t *)(dma->dma_descp);
258 hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
259 acc_hdl = dma->dma_buf->bi_handle;
260 dma_hdl = dma->dma_buf->bi_dma_handle;
262 /* if current ixl command opcode is xmit */
263 if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
265 /* Sync the descriptor before we get the status */
266 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
267 sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
268 desc_status = ddi_get32(acc_hdl, &hcidescp->status);
270 /* check if status is set in last dma descriptor in block */
271 if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
273 * dma descriptor status set - I/O done.
274 * if not to reset status, just return; else extract
275 * timestamp, reset desc status and return dma
276 * descriptor block status set
278 if (do_status_reset == B_FALSE) {
279 return (1);
281 *timestamp = (uint16_t)
282 ((desc_status & DESC_ST_TIMESTAMP_MASK) >>
283 DESC_ST_TIMESTAMP_SHIFT);
284 ddi_put32(acc_hdl, &hcidescp->status, 0);
286 /* Sync descriptor for device (status was cleared) */
287 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
288 sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
290 return (1);
292 /* else, return dma descriptor block status not set */
293 return (0);
296 /* else current ixl opcode is recv */
297 hcirecvcnt = 0;
299 /* get count of descriptors in current dma descriptor block */
300 hcicnt = dma->dma_bound & DESC_Z_MASK;
301 hcidescp -= (hcicnt - 1);
302 hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
304 /* iterate fwd through hci descriptors until end or find status set */
305 while (hcicnt-- != 0) {
307 /* Sync the descriptor before we get the status */
308 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
309 hcicnt * sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
311 desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
313 /* get cur buffer size & accumulate potential buffr usage */
314 bufsiz = (desc_hdr & DESC_HDR_REQCOUNT_MASK) >>
315 DESC_HDR_REQCOUNT_SHIFT;
316 hcirecvcnt += bufsiz;
318 desc_status = ddi_get32(acc_hdl, &hcidescp->status);
320 /* check if status set on this descriptor block descriptor */
321 if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
323 * dma descriptor status set - I/O done.
324 * if not to reset status, just return; else extract
325 * buffer space used, reset desc status and return dma
326 * descriptor block status set
328 if (do_status_reset == B_FALSE) {
329 return (1);
332 hcirecvcnt -= (desc_status & DESC_ST_RESCOUNT_MASK) >>
333 DESC_ST_RESCOUNT_SHIFT;
334 *timestamp = hcirecvcnt;
335 desc_status = (bufsiz << DESC_ST_RESCOUNT_SHIFT) &
336 DESC_ST_RESCOUNT_MASK;
337 ddi_put32(acc_hdl, &hcidescp->status, desc_status);
339 /* Sync descriptor for device (status was cleared) */
340 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
341 sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
343 return (1);
344 } else {
345 /* else, set to evaluate next descriptor. */
346 hcidescp++;
347 hcidesc_off = (off_t)hcidescp -
348 (off_t)dma->dma_buf->bi_kaddr;
352 /* return input not complete status */
353 return (0);
355 #endif
357 * hci1394_ixl_cleanup()
358 * Delete all memory that has earlier been allocated for a context's IXL prog
360 void
361 hci1394_ixl_cleanup(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
363 hci1394_delete_xfer_ctl((hci1394_xfer_ctl_t *)ctxtp->xcs_firstp);
364 hci1394_delete_dma_desc_mem(soft_statep, ctxtp->dma_firstp);
368 * hci1394_delete_dma_desc_mem()
369 * Iterate through linked list of dma memory descriptors, deleting
370 * allocated dma memory blocks, then deleting the dma memory
371 * descriptor after advancing to next one
373 static void
374 /* ARGSUSED */
375 hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
376 hci1394_idma_desc_mem_t *dma_firstp)
378 hci1394_idma_desc_mem_t *dma_next;
380 while (dma_firstp != NULL) {
381 dma_next = dma_firstp->dma_nextp;
382 #ifdef _KERNEL
384 * if this dma descriptor memory block has the handles, then
385 * free the memory. (Note that valid handles are kept only with
386 * the most recently acquired cookie, and that each cookie is in
387 * it's own idma_desc_mem_t struct.)
389 if (dma_firstp->mem_handle != NULL) {
390 hci1394_buf_free(&dma_firstp->mem_handle);
393 /* free current dma memory descriptor */
394 kmem_free(dma_firstp, sizeof (hci1394_idma_desc_mem_t));
395 #else
396 /* user mode free */
397 /* free dma memory block and current dma mem descriptor */
398 free(dma_firstp->mem.bi_kaddr);
399 free(dma_firstp);
400 #endif
401 /* advance to next dma memory descriptor */
402 dma_firstp = dma_next;
407 * hci1394_delete_xfer_ctl()
408 * Iterate thru linked list of xfer_ctl structs, deleting allocated memory.
410 void
411 hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *xcsp)
413 hci1394_xfer_ctl_t *delp;
415 while ((delp = xcsp) != NULL) {
416 /* advance ptr to next xfer_ctl struct */
417 xcsp = xcsp->ctl_nextp;
420 * delete current xfer_ctl struct and included
421 * xfer_ctl_dma structs
423 #ifdef _KERNEL
424 kmem_free(delp,
425 sizeof (hci1394_xfer_ctl_t) +
426 sizeof (hci1394_xfer_ctl_dma_t) * (delp->cnt - 1));
427 #else
428 free(delp);
429 #endif