Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / 1394 / adapters / hci1394_ixl_isr.c
blobcdb4128095c8935c64478b7efb6a98d3a3d56bb7
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
20 * CDDL HEADER END
23 * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * hci1394_ixl_isr.c
31 * Isochronous IXL Interrupt Service Routines.
32 * The interrupt handler determines which OpenHCI DMA descriptors
33 * have been executed by the hardware, tracks the path in the
34 * corresponding IXL program, issues callbacks as needed, and resets
35 * the OpenHCI DMA descriptors.
38 #include <sys/types.h>
39 #include <sys/conf.h>
41 #include <sys/tnf_probe.h>
43 #include <sys/1394/h1394.h>
44 #include <sys/1394/ixl1394.h>
45 #include <sys/1394/adapters/hci1394.h>
48 /* Return values for local hci1394_ixl_intr_check_done() */
49 #define IXL_CHECK_LOST (-1) /* ixl cmd intr processing lost */
50 #define IXL_CHECK_DONE 0 /* ixl cmd intr processing done */
51 #define IXL_CHECK_SKIP 1 /* ixl cmd intr processing context skipped */
52 #define IXL_CHECK_STOP 2 /* ixl cmd intr processing context stopped */
54 static boolean_t hci1394_ixl_intr_check_xfer(hci1394_state_t *soft_statep,
55 hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlp,
56 ixl1394_command_t **ixlnextpp, uint16_t *timestampp, int *donecodep);
57 static int hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
58 hci1394_iso_ctxt_t *ctxtp);
61 * hci1394_ixl_interrupt
62 * main entry point (front-end) into interrupt processing.
63 * acquires mutex, checks if update in progress, sets flags accordingly,
64 * and calls to do real interrupt processing.
66 void
67 hci1394_ixl_interrupt(hci1394_state_t *soft_statep,
68 hci1394_iso_ctxt_t *ctxtp, boolean_t in_stop)
70 uint_t status;
71 int retcode;
73 status = 1;
75 /* acquire the interrupt processing context mutex */
76 mutex_enter(&ctxtp->intrprocmutex);
78 /* set flag to indicate that interrupt processing is required */
79 ctxtp->intr_flags |= HCI1394_ISO_CTXT_INTRSET;
81 /* if update proc already in progress, let it handle intr processing */
82 if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
83 retcode = HCI1394_IXL_INTR_INUPDATE;
84 status = 0;
86 } else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
87 /* else fatal error if inter processing already in progress */
88 retcode = HCI1394_IXL_INTR_ININTR;
89 status = 0;
91 } else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
92 /* else fatal error if callback in progress flag is set */
93 retcode = HCI1394_IXL_INTR_INCALL;
94 status = 0;
95 } else if (!in_stop && (ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP)) {
96 /* context is being stopped */
97 retcode = HCI1394_IXL_INTR_STOP;
98 status = 0;
102 * if context is available, reserve it, do interrupt processing
103 * and free it
105 if (status) {
106 ctxtp->intr_flags |= HCI1394_ISO_CTXT_ININTR;
107 ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
108 mutex_exit(&ctxtp->intrprocmutex);
110 retcode = hci1394_ixl_dma_sync(soft_statep, ctxtp);
112 mutex_enter(&ctxtp->intrprocmutex);
113 ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_ININTR;
115 /* notify stop thread that the interrupt is finished */
116 if ((ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP) && !in_stop) {
117 cv_signal(&ctxtp->intr_cv);
121 /* free the intr processing context mutex before error checks */
122 mutex_exit(&ctxtp->intrprocmutex);
124 /* if context stopped, invoke callback */
125 if (retcode == HCI1394_IXL_INTR_DMASTOP) {
126 hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_DONE);
128 /* if error, stop and invoke callback */
129 if (retcode == HCI1394_IXL_INTR_DMALOST) {
130 hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
135 * hci1394_ixl_dma_sync()
136 * the heart of interrupt processing, this routine correlates where the
137 * hardware is for the specified context with the IXL program. Invokes
138 * callbacks as needed. Also called by "update" to make sure ixl is
139 * sync'ed up with where the hardware is.
140 * Returns one of the ixl_intr defined return codes - HCI1394_IXL_INTR...
141 * {..._DMALOST, ..._DMASTOP, ..._NOADV,... _NOERROR}
144 hci1394_ixl_dma_sync(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
146 ixl1394_command_t *ixlp = NULL; /* current ixl command */
147 ixl1394_command_t *ixlnextp; /* next ixl command */
148 uint16_t ixlopcode;
149 uint16_t timestamp;
150 int donecode;
151 boolean_t isdone;
153 void (*callback)(opaque_t, struct ixl1394_callback *);
155 ASSERT(MUTEX_NOT_HELD(&ctxtp->intrprocmutex));
157 /* xfer start ixl cmd where last left off */
158 ixlnextp = ctxtp->ixl_execp;
160 /* last completed descriptor block's timestamp */
161 timestamp = ctxtp->dma_last_time;
164 * follow execution path in IXL, until find dma descriptor in IXL
165 * xfer command whose status isn't set or until run out of IXL cmds
167 while (ixlnextp != NULL) {
168 ixlp = ixlnextp;
169 ixlnextp = ixlp->next_ixlp;
170 ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
173 * process IXL commands: xfer start, callback, store timestamp
174 * and jump and ignore the others
177 /* determine if this is an xfer start IXL command */
178 if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
179 ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {
181 /* process xfer cmd to see if HW has been here */
182 isdone = hci1394_ixl_intr_check_xfer(soft_statep, ctxtp,
183 ixlp, &ixlnextp, &timestamp, &donecode);
185 if (isdone == B_TRUE) {
186 return (donecode);
189 /* continue to process next IXL command */
190 continue;
193 /* else check if IXL cmd - jump, callback or store timestamp */
194 switch (ixlopcode) {
195 case IXL1394_OP_JUMP:
197 * set next IXL cmd to label ptr in current IXL jump cmd
199 ixlnextp = ((ixl1394_jump_t *)ixlp)->label;
200 break;
202 case IXL1394_OP_STORE_TIMESTAMP:
204 * set last timestamp value recorded into current IXL
205 * cmd
207 ((ixl1394_store_timestamp_t *)ixlp)->timestamp =
208 timestamp;
209 break;
211 case IXL1394_OP_CALLBACK:
213 * if callback function is specified, call it with IXL
214 * cmd addr. Make sure to grab the lock before setting
215 * the "in callback" flag in intr_flags.
217 mutex_enter(&ctxtp->intrprocmutex);
218 ctxtp->intr_flags |= HCI1394_ISO_CTXT_INCALL;
219 mutex_exit(&ctxtp->intrprocmutex);
221 callback = ((ixl1394_callback_t *)ixlp)->callback;
222 if (callback != NULL) {
223 callback(ctxtp->global_callback_arg,
224 (ixl1394_callback_t *)ixlp);
228 * And grab the lock again before clearing
229 * the "in callback" flag.
231 mutex_enter(&ctxtp->intrprocmutex);
232 ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INCALL;
233 mutex_exit(&ctxtp->intrprocmutex);
234 break;
239 * If we jumped to NULL because of an updateable JUMP, set ixl_execp
240 * back to ixlp. The destination label might get updated to a
241 * non-NULL value.
243 if ((ixlp != NULL) && (ixlp->ixl_opcode == IXL1394_OP_JUMP_U)) {
244 ctxtp->ixl_execp = ixlp;
245 return (HCI1394_IXL_INTR_NOERROR);
248 /* save null IXL cmd and depth and last timestamp */
249 ctxtp->ixl_execp = NULL;
250 ctxtp->ixl_exec_depth = 0;
251 ctxtp->dma_last_time = timestamp;
253 ctxtp->rem_noadv_intrs = 0;
256 /* return stopped status if at end of IXL cmds & context stopped */
257 if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
258 return (HCI1394_IXL_INTR_DMASTOP);
261 /* else interrupt processing is lost */
262 return (HCI1394_IXL_INTR_DMALOST);
266 * hci1394_ixl_intr_check_xfer()
267 * Process given IXL xfer cmd, checking status of each dma descriptor block
268 * for the command until find one whose status isn't set or until full depth
269 * reached at current IXL command or until find hardware skip has occurred.
271 * Returns B_TRUE if processing should terminate (either have stopped
272 * or encountered an error), and B_FALSE if it should continue looking.
273 * If B_TRUE, donecodep contains the reason: HCI1394_IXL_INTR_DMALOST,
274 * HCI1394_IXL_INTR_DMASTOP, HCI1394_IXL_INTR_NOADV, or
275 * HCI1394_IXL_INTR_NOERROR. NOERROR means that the current location
276 * has been determined and do not need to look further.
278 static boolean_t
279 hci1394_ixl_intr_check_xfer(hci1394_state_t *soft_statep,
280 hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlp,
281 ixl1394_command_t **ixlnextpp, uint16_t *timestampp, int *donecodep)
283 uint_t dma_advances;
284 int intrstatus;
285 uint_t skipped;
286 hci1394_xfer_ctl_t *xferctlp;
287 uint16_t ixldepth;
288 uint16_t ixlopcode;
290 *donecodep = 0;
291 dma_advances = 0;
292 ixldepth = ctxtp->ixl_exec_depth;
293 ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
295 /* get control struct for this xfer start IXL command */
296 xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
298 skipped = 0;
299 while ((skipped == 0) && (ixldepth < xferctlp->cnt)) {
301 * check if status is set in dma descriptor
302 * block at cur depth in cur xfer start IXL cmd
304 if (hci1394_ixl_check_status(&xferctlp->dma[ixldepth],
305 ixlopcode, timestampp, B_TRUE) != 0) {
307 /* advance depth to next desc block in cur IXL cmd */
308 ixldepth++;
311 * count dma desc blks whose status was set
312 * (i.e. advanced to next dma desc)
314 dma_advances++;
315 continue;
318 /* if get to here, status is not set */
321 * cur IXL cmd dma desc status not set. save IXL cur cmd
322 * and depth and last timestamp for next time.
324 ctxtp->ixl_execp = ixlp;
325 ctxtp->ixl_exec_depth = ixldepth;
326 ctxtp->dma_last_time = *timestampp;
329 * check if dma descriptor processing location is indeterminate
330 * (lost), context has either stopped, is done, or has skipped
332 intrstatus = hci1394_ixl_intr_check_done(soft_statep, ctxtp);
333 if (intrstatus == IXL_CHECK_LOST) {
335 * location indeterminate, try once more to determine
336 * current state. First, recheck if status has become
337 * set in cur dma descriptor block. (don't reset status
338 * here if is set)
340 if (hci1394_ixl_check_status(&xferctlp->dma[ixldepth],
341 ixlopcode, timestampp, 1) != B_TRUE) {
342 /* resume from where we left off */
343 skipped = 0;
344 continue;
348 * status not set, check intr processing
349 * completion status again
351 if ((intrstatus = hci1394_ixl_intr_check_done(
352 soft_statep, ctxtp)) == IXL_CHECK_LOST) {
354 * location still indeterminate,
355 * processing is lost
357 *donecodep = HCI1394_IXL_INTR_DMALOST;
359 return (B_TRUE);
364 * if dma processing stopped. current location has been
365 * determined.
367 if (intrstatus == IXL_CHECK_STOP) {
369 * save timestamp, clear currently executing IXL
370 * command and depth. return stopped.
372 ctxtp->ixl_execp = NULL;
373 ctxtp->ixl_exec_depth = 0;
374 ctxtp->dma_last_time = *timestampp;
375 ctxtp->rem_noadv_intrs = 0;
377 *donecodep = HCI1394_IXL_INTR_DMASTOP;
379 return (B_TRUE);
383 * dma processing done for now. current location has
384 * has been determined
386 if (intrstatus == IXL_CHECK_DONE) {
388 * if in update processing call:
389 * clear update processing flag & return ok.
390 * if dma advances happened, reset to max allowed.
391 * however, if none have, don't reduce remaining
392 * amount - that's for real interrupt call to adjust.
394 if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
396 if (dma_advances > 0) {
397 ctxtp->rem_noadv_intrs =
398 ctxtp->max_noadv_intrs;
401 *donecodep = HCI1394_IXL_INTR_NOERROR;
403 return (B_TRUE);
407 * else, not in update call processing, are in normal
408 * intr call. if no dma statuses were found set
409 * (i.e. no dma advances), reduce remaining count of
410 * interrupts allowed with no I/O completions
412 if (dma_advances == 0) {
413 ctxtp->rem_noadv_intrs--;
414 } else {
416 * else some dma statuses were found set.
417 * reinit remaining count of interrupts allowed
418 * with no I/O completions
420 ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
424 * if no remaining count of interrupts allowed with no
425 * I/O completions, return failure (no dma advance after
426 * max retries), else return ok
428 if (ctxtp->rem_noadv_intrs == 0) {
429 *donecodep = HCI1394_IXL_INTR_NOADV;
431 return (B_TRUE);
434 *donecodep = HCI1394_IXL_INTR_NOERROR;
436 return (B_TRUE);
440 * else (intrstatus == IXL_CHECK_SKIP) indicating skip has
441 * occured, retrieve current IXL cmd, depth, and timestamp and
442 * continue interrupt processing
444 skipped = 1;
445 *ixlnextpp = ctxtp->ixl_execp;
446 ixldepth = ctxtp->ixl_exec_depth;
447 *timestampp = ctxtp->dma_last_time;
450 * also count as 1, intervening skips to next posted
451 * dma descriptor.
453 dma_advances++;
457 * if full depth reached at current IXL cmd, set back to start for next
458 * IXL xfer command that will be processed
460 if ((skipped == 0) && (ixldepth >= xferctlp->cnt)) {
461 ctxtp->ixl_exec_depth = 0;
465 * make sure rem_noadv_intrs is reset to max if we advanced.
467 if (dma_advances > 0) {
468 ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
471 /* continue to process next IXL command */
472 return (B_FALSE);
476 * hci1394_ixl_intr_check_done()
477 * checks if context has stopped, or if able to match hardware location
478 * with an expected IXL program location.
480 static int
481 hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
482 hci1394_iso_ctxt_t *ctxtp)
484 ixl1394_command_t *ixlp;
485 hci1394_xfer_ctl_t *xferctlp;
486 uint_t ixldepth;
487 hci1394_xfer_ctl_dma_t *dma;
488 ddi_acc_handle_t acc_hdl;
489 ddi_dma_handle_t dma_hdl;
490 uint32_t desc_status;
491 hci1394_desc_t *hcidescp;
492 off_t hcidesc_off;
493 int err;
494 uint32_t dma_cmd_cur_loc;
495 uint32_t dma_cmd_last_loc;
496 uint32_t dma_loc_check_enabled;
497 uint32_t dmastartp;
498 uint32_t dmaendp;
500 uint_t rem_dma_skips;
501 uint16_t skipmode;
502 uint16_t skipdepth;
503 ixl1394_command_t *skipdestp;
504 ixl1394_command_t *skipxferp;
507 * start looking through the IXL list from the xfer start command where
508 * we last left off (for composite opcodes, need to start from the
509 * appropriate depth).
512 ixlp = ctxtp->ixl_execp;
513 ixldepth = ctxtp->ixl_exec_depth;
515 /* control struct for xfer start IXL command */
516 xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
517 dma = &xferctlp->dma[ixldepth];
519 /* determine if dma location checking is enabled */
520 if ((dma_loc_check_enabled =
521 (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_CMDREG)) != 0) {
523 /* if so, get current dma command location */
524 dma_cmd_last_loc = 0xFFFFFFFF;
526 while ((dma_cmd_cur_loc = HCI1394_ISOCH_CTXT_CMD_PTR(
527 soft_statep, ctxtp)) != dma_cmd_last_loc) {
529 /* retry get until location register stabilizes */
530 dma_cmd_last_loc = dma_cmd_cur_loc;
535 * compare the (bound) address of the DMA descriptor corresponding to
536 * the current xfer IXL command against the current value in the
537 * DMA location register. If exists and if matches, then
538 * if context stopped, return stopped, else return done.
540 * The dma start address is the first address of the descriptor block.
541 * Since "Z" is a count of 16-byte descriptors in the block, calculate
542 * the end address by adding Z*16 to the start addr.
544 dmastartp = dma->dma_bound & ~DESC_Z_MASK;
545 dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);
547 if (dma_loc_check_enabled &&
548 ((dma_cmd_cur_loc >= dmastartp) && (dma_cmd_cur_loc < dmaendp))) {
550 if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
551 return (IXL_CHECK_STOP);
554 return (IXL_CHECK_DONE);
558 * if receive mode:
560 if ((ixlp->ixl_opcode & IXL1394_OPF_ONXMIT) == 0) {
562 * if context stopped, return stopped, else,
563 * if there is no current dma location reg, return done
564 * else return location indeterminate
566 if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
567 return (IXL_CHECK_STOP);
569 if (!dma_loc_check_enabled) {
570 return (IXL_CHECK_DONE);
573 return (IXL_CHECK_LOST);
577 * else is xmit mode:
578 * check status of current xfer IXL command's dma descriptor
580 acc_hdl = dma->dma_buf->bi_handle;
581 dma_hdl = dma->dma_buf->bi_dma_handle;
582 hcidescp = (hci1394_desc_t *)dma->dma_descp;
583 hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
585 /* Sync the descriptor before we get the status */
586 err = ddi_dma_sync(dma_hdl, hcidesc_off, sizeof (hci1394_desc_t),
587 DDI_DMA_SYNC_FORCPU);
588 if (err != DDI_SUCCESS) {
590 desc_status = ddi_get32(acc_hdl, &hcidescp->status);
592 if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
595 * if status is now set here, return skipped, to cause calling
596 * function to continue, even though location hasn't changed
598 return (IXL_CHECK_SKIP);
602 * At this point, we have gotten to a DMA descriptor with an empty
603 * status. This is not enough information however to determine that
604 * we've found all processed DMA descriptors because during cycle-lost
605 * conditions, the HW will skip over some descriptors without writing
606 * status. So we have to look ahead until we're convinced that the HW
607 * hasn't jumped ahead.
609 * Follow the IXL skip-to links until find one whose status is set
610 * or until dma location register (if any) matches an xfer IXL
611 * command's dma location or until have examined max_dma_skips
612 * IXL commands.
614 rem_dma_skips = ctxtp->max_dma_skips;
616 while (rem_dma_skips-- > 0) {
619 * get either IXL command specific or
620 * system default skipmode info
622 skipdepth = 0;
623 if (xferctlp->skipmodep != NULL) {
624 skipmode = xferctlp->skipmodep->skipmode;
625 skipdestp = xferctlp->skipmodep->label;
626 skipxferp = (ixl1394_command_t *)
627 xferctlp->skipmodep->compiler_privatep;
628 } else {
629 skipmode = ctxtp->default_skipmode;
630 skipdestp = ctxtp->default_skiplabelp;
631 skipxferp = ctxtp->default_skipxferp;
634 switch (skipmode) {
636 case IXL1394_SKIP_TO_SELF:
638 * mode is skip to self:
639 * if context is stopped, return stopped, else
640 * if dma location reg not enabled, return done
641 * else, return location indeterminate
643 if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
644 0) {
645 return (IXL_CHECK_STOP);
648 if (!dma_loc_check_enabled) {
649 return (IXL_CHECK_DONE);
652 return (IXL_CHECK_LOST);
654 case IXL1394_SKIP_TO_NEXT:
656 * mode is skip to next:
657 * set potential skip target to current command at
658 * next depth
660 skipdestp = ixlp;
661 skipxferp = ixlp;
662 skipdepth = ixldepth + 1;
665 * else if at max depth at current cmd adjust to next
666 * IXL command.
668 * (NOTE: next means next IXL command along execution
669 * path, whatever IXL command it might be. e.g. store
670 * timestamp or callback or label or jump or send... )
672 if (skipdepth >= xferctlp->cnt) {
673 skipdepth = 0;
674 skipdestp = ixlp->next_ixlp;
675 skipxferp = xferctlp->execp;
678 /* evaluate skip to status further, below */
679 break;
682 case IXL1394_SKIP_TO_LABEL:
684 * mode is skip to label:
685 * set skip destination depth to 0 (should be
686 * redundant)
688 skipdepth = 0;
690 /* evaluate skip to status further, below */
691 break;
693 case IXL1394_SKIP_TO_STOP:
695 * mode is skip to stop:
696 * set all xfer and destination skip to locations to
697 * null
699 skipxferp = NULL;
700 skipdestp = NULL;
701 skipdepth = 0;
703 /* evaluate skip to status further, below */
704 break;
706 } /* end switch */
709 * if no xfer IXL command follows at or after current skip-to
710 * location
712 if (skipxferp == NULL) {
714 * if context is stopped, return stopped, else
715 * if dma location reg not enabled, return done
716 * else, return location indeterminate
718 if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
719 0) {
720 return (IXL_CHECK_STOP);
723 if (!dma_loc_check_enabled) {
724 return (IXL_CHECK_DONE);
726 return (IXL_CHECK_LOST);
730 * if the skip to xfer IXL dma descriptor's status is set,
731 * then execution did skip
733 xferctlp = (hci1394_xfer_ctl_t *)skipxferp->compiler_privatep;
734 dma = &xferctlp->dma[skipdepth];
735 acc_hdl = dma->dma_buf->bi_handle;
736 dma_hdl = dma->dma_buf->bi_dma_handle;
737 hcidescp = (hci1394_desc_t *)dma->dma_descp;
738 hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
740 /* Sync the descriptor before we get the status */
741 err = ddi_dma_sync(dma_hdl, hcidesc_off,
742 sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
743 if (err != DDI_SUCCESS) {
745 desc_status = ddi_get32(acc_hdl, &hcidescp->status);
747 if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
750 * adjust to continue from skip to IXL command and
751 * return skipped, to have calling func continue.
752 * (Note: next IXL command may be any allowed IXL
753 * command)
755 ctxtp->ixl_execp = skipdestp;
756 ctxtp->ixl_exec_depth = skipdepth;
758 return (IXL_CHECK_SKIP);
762 * if dma location command register checking is enabled,
763 * and the skip to xfer IXL dma location matches current
764 * dma location register value, execution did skip
766 dmastartp = dma->dma_bound & ~DESC_Z_MASK;
767 dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);
769 if (dma_loc_check_enabled && ((dma_cmd_cur_loc >= dmastartp) &&
770 (dma_cmd_cur_loc < dmaendp))) {
772 /* if the context is stopped, return stopped */
773 if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
774 0) {
775 return (IXL_CHECK_STOP);
778 * adjust to continue from skip to IXL command and
779 * return skipped, to have calling func continue
780 * (Note: next IXL command may be any allowed IXL cmd)
782 ctxtp->ixl_execp = skipdestp;
783 ctxtp->ixl_exec_depth = skipdepth;
785 return (IXL_CHECK_SKIP);
789 * else, advance working current locn to skipxferp and
790 * skipdepth and continue skip evaluation loop processing
792 ixlp = skipxferp;
793 ixldepth = skipdepth;
795 } /* end while */
798 * didn't find dma status set, nor location reg match, along skip path
800 * if context is stopped, return stopped,
802 * else if no current location reg active don't change context values,
803 * just return done (no skip)
805 * else, return location indeterminate
808 if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
809 return (IXL_CHECK_STOP);
811 if (!dma_loc_check_enabled) {
812 return (IXL_CHECK_DONE);
815 return (IXL_CHECK_LOST);
819 * hci1394_isoch_cycle_inconsistent()
820 * Called during interrupt notification to indicate that the cycle time
821 * has changed unexpectedly. We need to take this opportunity to
822 * update our tracking of each running transmit context's execution.
823 * cycle_inconsistent only affects transmit, so recv contexts are left alone.
825 void
826 hci1394_isoch_cycle_inconsistent(hci1394_state_t *soft_statep)
828 int i, cnt_thresh;
829 boolean_t note;
830 hrtime_t current_time, last_time, delta, delta_thresh;
831 hci1394_iso_ctxt_t *ctxtp; /* current context */
833 ASSERT(soft_statep);
835 hci1394_ohci_intr_clear(soft_statep->ohci, OHCI_INTR_CYC_INCONSISTENT);
837 /* grab the mutex before checking each context's INUSE and RUNNING */
838 mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
840 /* check for transmit contexts which are inuse and running */
841 for (i = 0; i < soft_statep->isoch->ctxt_xmit_count; i++) {
842 ctxtp = &soft_statep->isoch->ctxt_xmit[i];
844 if ((ctxtp->ctxt_flags &
845 (HCI1394_ISO_CTXT_INUSE | HCI1394_ISO_CTXT_RUNNING)) != 0) {
847 mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
848 hci1394_ixl_interrupt(soft_statep, ctxtp, B_FALSE);
849 mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
854 * get the current time and calculate the delta between now and
855 * when the last interrupt was processed. (NOTE: if the time
856 * returned by gethrtime() rolls-over while we are counting these
857 * interrupts, we will incorrectly restart the counting process.
858 * However, because the probability of this happening is small and
859 * not catching the roll-over will AT MOST double the time it takes
860 * us to discover and correct from this condition, we can safely
861 * ignore it.)
863 current_time = gethrtime();
864 last_time = soft_statep->isoch->cycle_incon_thresh.last_intr_time;
865 delta = current_time - last_time;
868 * compare the calculated delta to the delta T threshold. If it
869 * is less than the threshold, then increment the counter. If it
870 * is not then reset the counter.
872 delta_thresh = soft_statep->isoch->cycle_incon_thresh.delta_t_thresh;
873 if (delta < delta_thresh)
874 soft_statep->isoch->cycle_incon_thresh.delta_t_counter++;
875 else
876 soft_statep->isoch->cycle_incon_thresh.delta_t_counter = 0;
879 * compare the counter to the counter threshold. If it is greater,
880 * then disable the cycle inconsistent interrupt.
882 cnt_thresh = soft_statep->isoch->cycle_incon_thresh.counter_thresh;
883 note = B_FALSE;
884 if (soft_statep->isoch->cycle_incon_thresh.delta_t_counter >
885 cnt_thresh) {
886 hci1394_ohci_intr_disable(soft_statep->ohci,
887 OHCI_INTR_CYC_INCONSISTENT);
888 note = B_TRUE;
891 /* save away the current time into the last_intr_time field */
892 soft_statep->isoch->cycle_incon_thresh.last_intr_time = current_time;
894 mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
896 if (note == B_TRUE) {
897 cmn_err(CE_NOTE, "!hci1394(%d): cycle_inconsistent interrupt "
898 "disabled until next bus reset",
899 soft_statep->drvinfo.di_instance);
905 * hci1394_isoch_cycle_lost()
906 * Interrupt indicates an expected cycle_start packet (and therefore our
907 * opportunity to transmit) did not show up. Update our tracking of each
908 * running transmit context.
910 void
911 hci1394_isoch_cycle_lost(hci1394_state_t *soft_statep)
913 int i, cnt_thresh;
914 boolean_t note;
915 hrtime_t current_time, last_time, delta, delta_thresh;
916 hci1394_iso_ctxt_t *ctxtp; /* current context */
918 ASSERT(soft_statep);
920 hci1394_ohci_intr_clear(soft_statep->ohci, OHCI_INTR_CYC_LOST);
922 /* grab the mutex before checking each context's INUSE and RUNNING */
923 mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
925 /* check for transmit contexts which are inuse and running */
926 for (i = 0; i < soft_statep->isoch->ctxt_xmit_count; i++) {
927 ctxtp = &soft_statep->isoch->ctxt_xmit[i];
929 if ((ctxtp->ctxt_flags &
930 (HCI1394_ISO_CTXT_INUSE | HCI1394_ISO_CTXT_RUNNING)) != 0) {
932 mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
933 hci1394_ixl_interrupt(soft_statep, ctxtp, B_FALSE);
934 mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
939 * get the current time and calculate the delta between now and
940 * when the last interrupt was processed. (NOTE: if the time
941 * returned by gethrtime() rolls-over while we are counting these
942 * interrupts, we will incorrectly restart the counting process.
943 * However, because the probability of this happening is small and
944 * not catching the roll-over will AT MOST double the time it takes
945 * us to discover and correct from this condition, we can safely
946 * ignore it.)
948 current_time = gethrtime();
949 last_time = soft_statep->isoch->cycle_lost_thresh.last_intr_time;
950 delta = current_time - last_time;
953 * compare the calculated delta to the delta T threshold. If it
954 * is less than the threshold, then increment the counter. If it
955 * is not then reset the counter.
957 delta_thresh = soft_statep->isoch->cycle_lost_thresh.delta_t_thresh;
958 if (delta < delta_thresh)
959 soft_statep->isoch->cycle_lost_thresh.delta_t_counter++;
960 else
961 soft_statep->isoch->cycle_lost_thresh.delta_t_counter = 0;
964 * compare the counter to the counter threshold. If it is greater,
965 * then disable the cycle lost interrupt.
967 cnt_thresh = soft_statep->isoch->cycle_lost_thresh.counter_thresh;
968 note = B_FALSE;
969 if (soft_statep->isoch->cycle_lost_thresh.delta_t_counter >
970 cnt_thresh) {
971 hci1394_ohci_intr_disable(soft_statep->ohci,
972 OHCI_INTR_CYC_LOST);
973 note = B_TRUE;
976 /* save away the current time into the last_intr_time field */
977 soft_statep->isoch->cycle_lost_thresh.last_intr_time = current_time;
979 mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
981 if (note == B_TRUE) {
982 cmn_err(CE_NOTE, "!hci1394(%d): cycle_lost interrupt "
983 "disabled until next bus reset",
984 soft_statep->drvinfo.di_instance);