4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * hci1394_ixl_update.c
28 * Isochronous IXL update routines.
29 * Routines used to dynamically update a compiled and presumably running
34 #include <sys/types.h>
38 #include <sys/tnf_probe.h>
40 #include <sys/1394/h1394.h>
41 #include <sys/1394/ixl1394.h> /* IXL opcodes & data structs */
43 #include <sys/1394/adapters/hci1394.h>
46 /* local defines for hci1394_ixl_update_prepare return codes */
47 #define IXL_PREP_READY 1
48 #define IXL_PREP_SUCCESS 0
49 #define IXL_PREP_FAILURE (-1)
52 * variable used to indicate the number of times update will wait for
53 * interrupt routine to complete.
55 int hci1394_upd_retries_before_fail
= 50;
57 /* IXL runtime update static functions */
58 static int hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t
*uvp
);
59 static int hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t
*uvp
);
60 static int hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t
*uvp
);
61 static int hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t
*uvp
);
62 static int hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t
*uvp
);
63 static int hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t
*uvp
);
64 static int hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t
*uvp
);
65 static int hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t
*uvp
);
66 static int hci1394_ixl_update_perform(hci1394_ixl_update_vars_t
*uvp
);
67 static int hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t
*uvp
);
68 static int hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t
*uvp
);
69 static void hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t
*uvp
);
70 static int hci1394_ixl_update_enable(hci1394_ixl_update_vars_t
*uvp
);
71 static int hci1394_ixl_update_endup(hci1394_ixl_update_vars_t
*uvp
);
74 * IXL commands and included fields which can be updated
75 * IXL1394_OP_CALLBACK: callback(), callback_data
76 * IXL1394_OP_JUMP: label
77 * IXL1394_OP_RECV_PKT ixl_buf, size, mem_bufp
78 * IXL1394_OP_RECV_PKT_ST ixl_buf, size, mem_bufp
79 * IXL1394_OP_RECV_BUF(ppb) ixl_buf, size, pkt_size, mem_bufp, buf_offset
80 * IXL1394_OP_RECV_BUF(fill) ixl_buf, size, pkt_size, mem_bufp, buf_offset
81 * IXL1394_OP_SEND_PKT ixl_buf, size, mem_bufp
82 * IXL1394_OP_SEND_PKT_ST ixl_buf, size, mem_bufp
83 * IXL1394_OP_SEND_PKT_WHDR_ST ixl_buf, size, mem_bufp
84 * IXL1394_OP_SEND_BUF ixl_buf, size, pkt_size, mem_bufp, buf_offset
85 * IXL1394_OP_SET_TAGSYNC tag, sync
86 * IXL1394_OP_SET_SKIPMODE skipmode, label
88 * IXL commands which can not be updated
90 * IXL1394_OP_SEND_HDR_ONLY
91 * IXL1394_OP_SEND_NOPKT
92 * IXL1394_OP_STORE_VALUE
93 * IXL1394_OP_STORE_TIMESTAMP
94 * IXL1394_OP_SET_SYNCWAIT
99 * main entrypoint into dynamic update code: initializes temporary
100 * update variables, evaluates request, coordinates with potentially
101 * simultaneous run of interrupt stack, evaluates likelyhood of success,
102 * performs the update, checks if completed, performs cleanup
103 * resulting from coordination with interrupt stack.
106 hci1394_ixl_update(hci1394_state_t
*soft_statep
, hci1394_iso_ctxt_t
*ctxtp
,
107 ixl1394_command_t
*ixlnewp
, ixl1394_command_t
*ixloldp
,
108 uint_t riskoverride
, int *resultp
)
110 hci1394_ixl_update_vars_t uv
; /* update work variables structure */
115 /* save caller specified values in update work variables structure */
116 uv
.soft_statep
= soft_statep
;
118 uv
.ixlnewp
= ixlnewp
;
119 uv
.ixloldp
= ixloldp
;
120 uv
.risklevel
= riskoverride
;
122 /* initialize remainder of update work variables */
132 uv
.ixl_opcode
= uv
.ixlnewp
->ixl_opcode
;
138 /* set done ok return status */
141 /* evaluate request and prepare to perform update */
142 prepstatus
= hci1394_ixl_update_prepare(&uv
);
143 if (prepstatus
!= IXL_PREP_READY
) {
145 * if either done or nothing to do or an evaluation error,
146 * return update status
148 *resultp
= uv
.upd_status
;
150 /* if prep evaluation error, return failure */
151 if (prepstatus
!= IXL_PREP_SUCCESS
) {
152 return (DDI_FAILURE
);
154 /* if no action or update done, return update successful */
155 return (DDI_SUCCESS
);
158 /* perform update processing reservation of interrupt context */
159 ret
= hci1394_ixl_update_enable(&uv
);
160 if (ret
!= DDI_SUCCESS
) {
162 /* error acquiring control of context - return */
163 *resultp
= uv
.upd_status
;
165 return (DDI_FAILURE
);
168 /* perform update risk analysis */
169 if (hci1394_ixl_update_analysis(&uv
) != DDI_SUCCESS
) {
171 * return, if excessive risk or dma execution processing lost
172 * (note: caller risk override not yet implemented)
175 /* attempt intr processing cleanup, unless err is dmalost */
176 if (uv
.upd_status
!= IXL1394_EPRE_UPD_DMALOST
) {
177 (void) hci1394_ixl_update_endup(&uv
);
180 * error is dmalost, just release interrupt context.
181 * take the lock here to ensure an atomic read, modify,
182 * write of the "intr_flags" field while we try to
183 * clear the "in update" flag. protects from the
186 mutex_enter(&ctxtp
->intrprocmutex
);
187 ctxtp
->intr_flags
&= ~HCI1394_ISO_CTXT_INUPDATE
;
188 mutex_exit(&ctxtp
->intrprocmutex
);
190 *resultp
= uv
.upd_status
;
192 return (DDI_FAILURE
);
196 /* perform requested update */
197 if (hci1394_ixl_update_perform(&uv
) != DDI_SUCCESS
) {
199 * if non-completion condition, return update status
200 * attempt interrupt processing cleanup first
202 (void) hci1394_ixl_update_endup(&uv
);
204 *resultp
= uv
.upd_status
;
206 return (DDI_FAILURE
);
209 /* evaluate update completion, setting completion status */
210 if (hci1394_ixl_update_evaluate(&uv
) != DDI_SUCCESS
) {
212 * update failed - bad, just release interrupt context
213 * take the lock here too (jsut like above) to ensure an
214 * atomic read, modify, write of the "intr_flags" field
215 * while we try to clear the "in update" flag. protects
216 * from the interrupt routine.
218 mutex_enter(&ctxtp
->intrprocmutex
);
219 ctxtp
->intr_flags
&= ~HCI1394_ISO_CTXT_INUPDATE
;
220 mutex_exit(&ctxtp
->intrprocmutex
);
222 /* if DMA stopped or lost, formally stop context */
223 if (uv
.upd_status
== HCI1394_IXL_INTR_DMASTOP
) {
224 hci1394_do_stop(soft_statep
, ctxtp
, B_TRUE
,
226 } else if (uv
.upd_status
== HCI1394_IXL_INTR_DMALOST
) {
227 hci1394_do_stop(soft_statep
, ctxtp
, B_TRUE
,
231 *resultp
= uv
.upd_status
;
233 return (DDI_FAILURE
);
236 /* perform interrupt processing cleanup */
237 uv
.upd_status
= hci1394_ixl_update_endup(&uv
);
239 /* return update completion status */
240 *resultp
= uv
.upd_status
;
242 return (DDI_SUCCESS
);
246 * hci1394_ixl_update_enable
247 * Used to coordinate dynamic update activities with simultaneous
248 * interrupt handler processing, while holding the context mutex
249 * for as short a time as possible.
252 hci1394_ixl_update_enable(hci1394_ixl_update_vars_t
*uvp
)
259 /* set arbitrary number of retries before giving up */
260 remretries
= hci1394_upd_retries_before_fail
;
261 status
= DDI_SUCCESS
;
264 * if waited for completion of interrupt processing generated callback,
267 ASSERT(MUTEX_NOT_HELD(&uvp
->ctxtp
->intrprocmutex
));
268 mutex_enter(&uvp
->ctxtp
->intrprocmutex
);
270 while (retry
== B_TRUE
) {
274 /* failure if update processing is already in progress */
275 if (uvp
->ctxtp
->intr_flags
& HCI1394_ISO_CTXT_INUPDATE
) {
276 uvp
->upd_status
= IXL1394_EUPDATE_DISALLOWED
;
277 status
= DDI_FAILURE
;
278 } else if (uvp
->ctxtp
->intr_flags
& HCI1394_ISO_CTXT_ININTR
) {
280 * if have retried max number of times or if this update
281 * request is on the interrupt stack, which means that
282 * the callback function of the target driver initiated
283 * the update, set update failure.
285 if ((remretries
<= 0) ||
286 (servicing_interrupt())) {
287 uvp
->upd_status
= IXL1394_EUPDATE_DISALLOWED
;
288 status
= DDI_FAILURE
;
291 * if not on interrupt stack and retries not
292 * exhausted, free mutex, wait a short time
296 mutex_exit(&uvp
->ctxtp
->intrprocmutex
);
298 mutex_enter(&uvp
->ctxtp
->intrprocmutex
);
301 } else if (uvp
->ctxtp
->intr_flags
& HCI1394_ISO_CTXT_INCALL
) {
302 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
303 status
= DDI_FAILURE
;
307 /* if context is available, reserve it for this update request */
308 if (status
== DDI_SUCCESS
) {
309 uvp
->ctxtp
->intr_flags
|= HCI1394_ISO_CTXT_INUPDATE
;
312 ASSERT(MUTEX_HELD(&uvp
->ctxtp
->intrprocmutex
));
313 mutex_exit(&uvp
->ctxtp
->intrprocmutex
);
319 * hci1394_ixl_update_endup()
320 * The ending stage of coordinating with simultaneously running interrupts.
321 * Perform interrupt processing sync tasks if we (update) had blocked the
322 * interrupt out when it wanted a turn.
325 hci1394_ixl_update_endup(hci1394_ixl_update_vars_t
*uvp
)
328 hci1394_iso_ctxt_t
*ctxtp
;
330 status
= HCI1394_IXL_INTR_NOERROR
;
333 while (ctxtp
->intr_flags
& HCI1394_ISO_CTXT_INUPDATE
) {
335 if (ctxtp
->intr_flags
& HCI1394_ISO_CTXT_INTRSET
) {
337 * We don't need to grab the lock here because
338 * the "intr_flags" field is only modified in two
339 * ways - one in UPDATE and one in INTR routine. Since
340 * we know that it can't be modified simulataneously
341 * in another UDPATE thread - that is assured by the
342 * checks in "update_enable" - we would only be trying
343 * to protect against the INTR thread. And since we
344 * are going to clear a bit here (and check it again
345 * at the top of the loop) we are not really concerned
346 * about missing its being set by the INTR routine.
348 ctxtp
->intr_flags
&= ~HCI1394_ISO_CTXT_INTRSET
;
350 status
= hci1394_ixl_dma_sync(uvp
->soft_statep
, ctxtp
);
351 if (status
== HCI1394_IXL_INTR_DMALOST
) {
353 * Unlike above, we do care here as we are
354 * trying to clear the "in update" flag, and
355 * we don't want that lost because the INTR
356 * routine is trying to set its flag.
358 mutex_enter(&uvp
->ctxtp
->intrprocmutex
);
359 ctxtp
->intr_flags
&= ~HCI1394_ISO_CTXT_INUPDATE
;
360 mutex_exit(&uvp
->ctxtp
->intrprocmutex
);
365 ASSERT(MUTEX_NOT_HELD(&uvp
->ctxtp
->intrprocmutex
));
366 mutex_enter(&uvp
->ctxtp
->intrprocmutex
);
367 if (!(ctxtp
->intr_flags
& HCI1394_ISO_CTXT_INTRSET
)) {
368 ctxtp
->intr_flags
&= ~HCI1394_ISO_CTXT_INUPDATE
;
370 mutex_exit(&uvp
->ctxtp
->intrprocmutex
);
373 /* if DMA stopped or lost, formally stop context */
374 if (status
== HCI1394_IXL_INTR_DMASTOP
) {
375 hci1394_do_stop(uvp
->soft_statep
, ctxtp
, B_TRUE
, ID1394_DONE
);
376 } else if (status
== HCI1394_IXL_INTR_DMALOST
) {
377 hci1394_do_stop(uvp
->soft_statep
, ctxtp
, B_TRUE
, ID1394_FAIL
);
384 * hci1394_ixl_update_prepare()
385 * Preparation for the actual update (using temp uvp struct)
388 hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t
*uvp
)
392 /* both new and old ixl commands must be the same */
393 if (uvp
->ixlnewp
->ixl_opcode
!= uvp
->ixloldp
->ixl_opcode
) {
395 uvp
->upd_status
= IXL1394_EOPCODE_MISMATCH
;
397 return (IXL_PREP_FAILURE
);
401 * perform evaluation and prepare update based on specific
404 switch (uvp
->ixl_opcode
) {
406 case IXL1394_OP_CALLBACK_U
: {
407 ixl1394_callback_t
*old_callback_ixlp
;
408 ixl1394_callback_t
*new_callback_ixlp
;
410 old_callback_ixlp
= (ixl1394_callback_t
*)uvp
->ixloldp
;
411 new_callback_ixlp
= (ixl1394_callback_t
*)uvp
->ixlnewp
;
413 /* perform update now without further evaluation */
414 old_callback_ixlp
->callback_arg
=
415 new_callback_ixlp
->callback_arg
;
416 old_callback_ixlp
->callback
= new_callback_ixlp
->callback
;
418 /* nothing else to do, return with done ok status */
419 return (IXL_PREP_SUCCESS
);
422 case IXL1394_OP_JUMP_U
:
423 ret
= hci1394_ixl_update_prep_jump(uvp
);
427 case IXL1394_OP_SET_SKIPMODE_U
:
428 ret
= hci1394_ixl_update_prep_set_skipmode(uvp
);
432 case IXL1394_OP_SET_TAGSYNC_U
:
433 ret
= hci1394_ixl_update_prep_set_tagsync(uvp
);
437 case IXL1394_OP_RECV_PKT_U
:
438 case IXL1394_OP_RECV_PKT_ST_U
:
439 ret
= hci1394_ixl_update_prep_recv_pkt(uvp
);
443 case IXL1394_OP_RECV_BUF_U
:
444 ret
= hci1394_ixl_update_prep_recv_buf(uvp
);
448 case IXL1394_OP_SEND_PKT_U
:
449 case IXL1394_OP_SEND_PKT_ST_U
:
450 case IXL1394_OP_SEND_PKT_WHDR_ST_U
:
451 ret
= hci1394_ixl_update_prep_send_pkt(uvp
);
455 case IXL1394_OP_SEND_BUF_U
:
456 ret
= hci1394_ixl_update_prep_send_buf(uvp
);
461 /* ixl command being updated must be one of above, else error */
462 uvp
->upd_status
= IXL1394_EOPCODE_DISALLOWED
;
464 return (IXL_PREP_FAILURE
);
469 * hci1394_ixl_update_prep_jump()
470 * Preparation for update of an IXL1394_OP_JUMP_U command.
473 hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t
*uvp
)
475 ixl1394_jump_t
*old_jump_ixlp
;
476 ixl1394_jump_t
*new_jump_ixlp
;
477 ixl1394_command_t
*ixlp
;
478 hci1394_xfer_ctl_t
*xferctlp
;
479 hci1394_desc_t
*hcidescp
;
481 ddi_acc_handle_t acc_hdl
;
482 ddi_dma_handle_t dma_hdl
;
486 old_jump_ixlp
= (ixl1394_jump_t
*)uvp
->ixloldp
;
487 new_jump_ixlp
= (ixl1394_jump_t
*)uvp
->ixlnewp
;
489 /* check if any change between new and old ixl jump command */
490 if (new_jump_ixlp
->label
== old_jump_ixlp
->label
) {
492 /* if none, return with done ok status */
493 return (IXL_PREP_SUCCESS
);
496 /* new ixl jump command label must be ptr to valid ixl label or NULL */
497 if ((new_jump_ixlp
->label
!= NULL
) &&
498 (new_jump_ixlp
->label
->ixl_opcode
!= IXL1394_OP_LABEL
)) {
500 /* if not jumping to label, return an error */
501 uvp
->upd_status
= IXL1394_EJUMP_NOT_TO_LABEL
;
503 return (IXL_PREP_FAILURE
);
507 * follow exec path from new ixl jump command label to determine new
508 * jump destination ixl xfer command
510 (void) hci1394_ixl_find_next_exec_xfer(new_jump_ixlp
->label
, &cbcnt
,
514 * get the bound address of the first descriptor block reached
515 * by the jump destination. (This descriptor is the first
516 * transfer command following the jumped-to label.) Set the
517 * descriptor's address (with Z bits) into jumpaddr.
519 uvp
->jumpaddr
= ((hci1394_xfer_ctl_t
*)
520 ixlp
->compiler_privatep
)->dma
[0].dma_bound
;
524 * get associated xfer IXL command from compiler_privatep of old
527 if ((uvp
->ixlxferp
= (ixl1394_command_t
*)
528 old_jump_ixlp
->compiler_privatep
) == NULL
) {
530 /* if none, return an error */
531 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
533 return (IXL_PREP_FAILURE
);
537 * get the associated IXL xfer command's last dma descriptor block
538 * last descriptor, then get hcihdr from its hdr field,
539 * removing interrupt enabled bits
541 xferctlp
= (hci1394_xfer_ctl_t
*)uvp
->ixlxferp
->compiler_privatep
;
542 hcidescp
= (hci1394_desc_t
*)xferctlp
->dma
[xferctlp
->cnt
- 1].dma_descp
;
543 acc_hdl
= xferctlp
->dma
[xferctlp
->cnt
- 1].dma_buf
->bi_handle
;
544 dma_hdl
= xferctlp
->dma
[xferctlp
->cnt
- 1].dma_buf
->bi_dma_handle
;
546 /* Sync the descriptor before we grab the header(s) */
547 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
, sizeof (hci1394_desc_t
),
548 DDI_DMA_SYNC_FORCPU
);
549 if (err
!= DDI_SUCCESS
) {
550 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
552 return (IXL_PREP_FAILURE
);
555 desc_hdr
= ddi_get32(acc_hdl
, &hcidescp
->hdr
);
556 uvp
->hcihdr
= desc_hdr
& ~DESC_INTR_ENBL
;
558 /* set depth to last dma descriptor block & update count to 1 */
559 uvp
->ixldepth
= xferctlp
->cnt
- 1;
563 * if there is only one dma descriptor block and IXL xfer command
564 * inited by a label or have found callbacks along the exec path to the
565 * new destination IXL xfer command, enable interrupt in hcihdr value
567 if (((xferctlp
->cnt
== 1) &&
568 ((xferctlp
->ctl_flags
& XCTL_LABELLED
) != 0)) || (cbcnt
!= 0)) {
570 uvp
->hcihdr
|= DESC_INTR_ENBL
;
573 /* If either old or new destination was/is NULL, enable interrupt */
574 if ((new_jump_ixlp
->label
== NULL
) || (old_jump_ixlp
->label
== NULL
)) {
575 uvp
->hcihdr
|= DESC_INTR_ENBL
;
579 * if xfer type is xmit and skip mode for this for this xfer command is
580 * IXL1394_SKIP_TO_NEXT then set uvp->skipmode to IXL1394_SKIP_TO_NEXT
581 * and set uvp->skipxferp to uvp->jumpaddr and set uvp->hci_offset to
582 * offset from last dma descriptor to first dma descriptor
583 * (where skipaddr goes).
585 * update perform processing will have to set skip branch address to
586 * same location as jump destination in this case.
588 uvp
->skipmode
= IXL1394_SKIP_TO_STOP
;
589 if ((uvp
->ixlxferp
->ixl_opcode
& IXL1394_OPF_ONXMIT
) != 0) {
591 if ((xferctlp
->skipmodep
&& (((ixl1394_set_skipmode_t
*)
592 xferctlp
->skipmodep
)->skipmode
== IXL1394_SKIP_TO_NEXT
)) ||
593 (uvp
->ctxtp
->default_skipmode
== IXL1394_OPF_ONXMIT
)) {
595 uvp
->skipmode
= IXL1394_SKIP_TO_NEXT
;
596 uvp
->skipaddr
= uvp
->jumpaddr
;
599 * calc hci_offset to first descriptor (where skipaddr
600 * goes) of dma descriptor block from current (last)
601 * descriptor of the descriptor block (accessed in
602 * xfer_ctl dma_descp of IXL xfer command)
604 if (uvp
->ixlxferp
->ixl_opcode
==
605 IXL1394_OP_SEND_HDR_ONLY
) {
607 * send header only is (Z bits - 2)
608 * descriptor components back from last one
610 uvp
->hci_offset
-= 2;
613 * all others are (Z bits - 1) descriptor
614 * components back from last component
616 uvp
->hci_offset
-= 1;
620 return (IXL_PREP_READY
);
624 * hci1394_ixl_update_prep_set_skipmode()
625 * Preparation for update of an IXL1394_OP_SET_SKIPMODE_U command.
628 hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t
*uvp
)
630 ixl1394_set_skipmode_t
*old_set_skipmode_ixlp
;
631 ixl1394_set_skipmode_t
*new_set_skipmode_ixlp
;
632 ixl1394_command_t
*ixlp
;
633 hci1394_xfer_ctl_t
*xferctlp
;
635 old_set_skipmode_ixlp
= (ixl1394_set_skipmode_t
*)uvp
->ixloldp
;
636 new_set_skipmode_ixlp
= (ixl1394_set_skipmode_t
*)uvp
->ixlnewp
;
638 /* check if new set skipmode is change from old set skipmode */
639 if (new_set_skipmode_ixlp
->skipmode
==
640 old_set_skipmode_ixlp
->skipmode
) {
642 if ((new_set_skipmode_ixlp
->skipmode
!=
643 IXL1394_SKIP_TO_LABEL
) ||
644 (old_set_skipmode_ixlp
->label
==
645 new_set_skipmode_ixlp
->label
)) {
646 /* No change, return with done ok status */
647 return (IXL_PREP_SUCCESS
);
651 /* find associated ixl xfer commnd by following old ixl links */
652 uvp
->ixlxferp
= uvp
->ixloldp
->next_ixlp
;
653 while ((uvp
->ixlxferp
!= NULL
) && (((uvp
->ixlxferp
->ixl_opcode
&
654 IXL1394_OPF_ISXFER
) == 0) ||
655 ((uvp
->ixlxferp
->ixl_opcode
& IXL1394_OPTY_MASK
) != 0))) {
657 uvp
->ixlxferp
= uvp
->ixlxferp
->next_ixlp
;
660 /* return an error if no ixl xfer command found */
661 if (uvp
->ixlxferp
== NULL
) {
663 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
665 return (IXL_PREP_FAILURE
);
669 * get Z bits (number of descriptor components in descriptor block)
670 * from a dma bound addr in the xfer_ctl struct of the IXL xfer command
672 if ((xferctlp
= (hci1394_xfer_ctl_t
*)
673 uvp
->ixlxferp
->compiler_privatep
) == NULL
) {
675 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
677 return (IXL_PREP_FAILURE
);
679 uvp
->hci_offset
= xferctlp
->dma
[0].dma_bound
& DESC_Z_MASK
;
682 * determine hci_offset to first component (where skipaddr goes) of
683 * dma descriptor block from current (last) descriptor component of
684 * desciptor block (accessed in xfer_ctl dma_descp of IXL xfer command)
686 if (uvp
->ixlxferp
->ixl_opcode
== IXL1394_OP_SEND_HDR_ONLY
) {
688 * "send header only" is (Z bits - 2) descriptors back
691 uvp
->hci_offset
-= 2;
694 * all others are (Z bits - 1) descroptors back from
697 uvp
->hci_offset
-= 1;
700 /* set depth to zero and count to update all dma descriptors */
702 uvp
->ixlcount
= xferctlp
->cnt
;
704 /* set new skipmode and validate */
705 uvp
->skipmode
= new_set_skipmode_ixlp
->skipmode
;
707 if ((uvp
->skipmode
!= IXL1394_SKIP_TO_NEXT
) &&
708 (uvp
->skipmode
!= IXL1394_SKIP_TO_SELF
) &&
709 (uvp
->skipmode
!= IXL1394_SKIP_TO_STOP
) &&
710 (uvp
->skipmode
!= IXL1394_SKIP_TO_LABEL
)) {
712 /* return an error if invalid mode */
713 uvp
->upd_status
= IXL1394_EBAD_SKIPMODE
;
715 return (IXL_PREP_FAILURE
);
718 /* if mode is skip to label */
719 if (uvp
->skipmode
== IXL1394_SKIP_TO_LABEL
) {
721 /* verify label field is valid ixl label cmd */
722 if ((new_set_skipmode_ixlp
->label
== NULL
) ||
723 (new_set_skipmode_ixlp
->label
->ixl_opcode
!=
726 /* Error - not skipping to valid label */
727 uvp
->upd_status
= IXL1394_EBAD_SKIP_LABEL
;
729 return (IXL_PREP_FAILURE
);
733 * follow new skip exec path after label to next xfer
736 (void) hci1394_ixl_find_next_exec_xfer(
737 new_set_skipmode_ixlp
->label
, NULL
, &ixlp
);
740 * set skip destination IXL xfer command.
741 * after update set into old set skip mode IXL compiler_privatep
743 if ((uvp
->skipxferp
= ixlp
) != NULL
) {
745 * set skipaddr to be the first dma descriptor block's
746 * dma bound address w/Z bits
748 xferctlp
= (hci1394_xfer_ctl_t
*)
749 ixlp
->compiler_privatep
;
750 uvp
->skipaddr
= xferctlp
->dma
[0].dma_bound
;
755 * if mode is skip to next, get skipaddr for last dma descriptor block
757 if (uvp
->skipmode
== IXL1394_SKIP_TO_NEXT
) {
758 /* follow normal exec path to next xfer ixl command */
759 (void) hci1394_ixl_find_next_exec_xfer(uvp
->ixlxferp
->next_ixlp
,
763 * get skip_next destination IXL xfer command
764 * (for last iteration)
768 * set skipaddr to first dma descriptor block's
769 * dma bound address w/Z bits
771 xferctlp
= (hci1394_xfer_ctl_t
*)
772 ixlp
->compiler_privatep
;
773 uvp
->skipaddr
= xferctlp
->dma
[0].dma_bound
;
776 return (IXL_PREP_READY
);
780 * hci1394_ixl_update_prep_set_tagsync()
781 * Preparation for update of an IXL1394_OP_SET_TAGSYNC_U command.
784 hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t
*uvp
)
786 ixl1394_set_tagsync_t
*old_set_tagsync_ixlp
;
787 ixl1394_set_tagsync_t
*new_set_tagsync_ixlp
;
788 hci1394_xfer_ctl_t
*xferctlp
;
790 old_set_tagsync_ixlp
= (ixl1394_set_tagsync_t
*)uvp
->ixloldp
;
791 new_set_tagsync_ixlp
= (ixl1394_set_tagsync_t
*)uvp
->ixlnewp
;
793 /* check if new set tagsync is change from old set tagsync. */
794 if ((new_set_tagsync_ixlp
->tag
== old_set_tagsync_ixlp
->tag
) &&
795 (new_set_tagsync_ixlp
->sync
== old_set_tagsync_ixlp
->sync
)) {
797 /* no change, return with done ok status */
798 return (IXL_PREP_SUCCESS
);
801 /* find associated IXL xfer commnd by following old ixl links */
802 uvp
->ixlxferp
= uvp
->ixloldp
->next_ixlp
;
803 while ((uvp
->ixlxferp
!= NULL
) && (((uvp
->ixlxferp
->ixl_opcode
&
804 IXL1394_OPF_ISXFER
) == 0) ||
805 ((uvp
->ixlxferp
->ixl_opcode
& IXL1394_OPTY_MASK
) != 0))) {
807 uvp
->ixlxferp
= uvp
->ixlxferp
->next_ixlp
;
810 /* return an error if no IXL xfer command found */
811 if (uvp
->ixlxferp
== NULL
) {
813 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
815 return (IXL_PREP_FAILURE
);
818 /* is IXL xfer command an IXL1394_OP_SEND_NO_PKT? */
819 if (uvp
->ixlxferp
->ixl_opcode
== IXL1394_OP_SEND_NO_PKT
) {
820 /* no update needed, return done ok status */
821 return (IXL_PREP_SUCCESS
);
824 /* build new pkthdr1 from new IXL tag/sync bits */
825 uvp
->pkthdr1
= (uvp
->ctxtp
->isospd
<< DESC_PKT_SPD_SHIFT
) |
826 (new_set_tagsync_ixlp
->tag
<< DESC_PKT_TAG_SHIFT
) |
827 (uvp
->ctxtp
->isochan
<< DESC_PKT_CHAN_SHIFT
) |
828 (new_set_tagsync_ixlp
->sync
<< DESC_PKT_SY_SHIFT
);
831 * get Z bits (# of descriptor components in descriptor block) from
832 * any dma bound address in the xfer_ctl struct of the IXL xfer cmd
834 if ((xferctlp
= (hci1394_xfer_ctl_t
*)
835 uvp
->ixlxferp
->compiler_privatep
) == NULL
) {
837 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
839 return (IXL_PREP_FAILURE
);
841 uvp
->hdr_offset
= xferctlp
->dma
[0].dma_bound
& DESC_Z_MASK
;
844 * determine hdr_offset from the current(last) descriptor of the
845 * DMA descriptor block to the descriptor where pkthdr1 goes
846 * by examining IXL xfer command
848 if (uvp
->ixlxferp
->ixl_opcode
== IXL1394_OP_SEND_HDR_ONLY
) {
850 * if IXL send header only, the current (last)
851 * descriptor is the one
856 * all others are the first descriptor (Z bits - 1)
859 uvp
->hdr_offset
-= 1;
862 /* set depth to zero and count to update all dma descriptors */
864 uvp
->ixlcount
= xferctlp
->cnt
;
866 return (IXL_PREP_READY
);
870 * hci1394_ixl_update_prep_recv_pkt()
871 * Preparation for update of an IXL1394_OP_RECV_PKT_U or
872 * IXL1394_OP_RECV_PKT_ST_U command.
875 hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t
*uvp
)
877 ixl1394_xfer_pkt_t
*old_xfer_pkt_ixlp
;
878 ixl1394_xfer_pkt_t
*new_xfer_pkt_ixlp
;
879 hci1394_xfer_ctl_t
*xferctlp
;
880 hci1394_desc_t
*hcidescp
;
881 ddi_acc_handle_t acc_hdl
;
882 ddi_dma_handle_t dma_hdl
;
886 old_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixloldp
;
887 new_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixlnewp
;
889 /* check if any change between new and old IXL xfer commands */
890 if ((new_xfer_pkt_ixlp
->size
== old_xfer_pkt_ixlp
->size
) &&
891 (new_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
==
892 old_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
) &&
893 (new_xfer_pkt_ixlp
->mem_bufp
== old_xfer_pkt_ixlp
->mem_bufp
)) {
895 /* no change. return with done ok status */
896 return (IXL_PREP_SUCCESS
);
899 /* if new IXL buffer addrs are null, return error */
900 if ((new_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
== (uintptr_t)NULL
) ||
901 (new_xfer_pkt_ixlp
->mem_bufp
== NULL
)) {
903 uvp
->upd_status
= IXL1394_EXFER_BUF_MISSING
;
905 return (IXL_PREP_FAILURE
);
908 /* if IXL xfer command is not xfer start command */
909 if (uvp
->ixl_opcode
== IXL1394_OP_RECV_PKT_U
) {
911 * find IXL xfer start command in the compiler_privatep of the
912 * old IXL xfer command
914 uvp
->ixlxferp
= (ixl1394_command_t
*)
915 uvp
->ixloldp
->compiler_privatep
;
917 if (uvp
->ixlxferp
== NULL
) {
919 /* Error - no IXL xfer start command found */
920 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
922 return (IXL_PREP_FAILURE
);
925 /* IXL xfer command is the IXL xfer start command */
926 uvp
->ixlxferp
= uvp
->ixloldp
;
929 /* check that xfer_ctl is present in the IXL xfer start command */
930 if ((xferctlp
= (hci1394_xfer_ctl_t
*)
931 uvp
->ixlxferp
->compiler_privatep
) == NULL
) {
933 /* Error - no xfer_ctl struct found */
934 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
936 return (IXL_PREP_FAILURE
);
939 /* set depth to zero and count to 1 to update dma descriptor */
944 * get Z bits (number of descriptors in descriptor block) from the DMA
945 * bound address in the xfer_ctl struct of the IXL xfer start cpmmand.
947 uvp
->hci_offset
= xferctlp
->dma
[0].dma_bound
& DESC_Z_MASK
;
950 * set offset from the current(last) descriptor to the descriptor for
951 * this packet command
953 uvp
->hci_offset
-= (1 + uvp
->ixloldp
->compiler_resv
);
956 * set bufsize to the new IXL xfer size, and bufaddr to the new
959 uvp
->bufsize
= ((ixl1394_xfer_pkt_t
*)uvp
->ixlnewp
)->size
;
960 uvp
->bufaddr
= ((ixl1394_xfer_pkt_t
*)
961 uvp
->ixlnewp
)->ixl_buf
.ixldmac_addr
;
964 * update old hcihdr w/new bufsize, set hcistatus rescnt to
967 hcidescp
= (hci1394_desc_t
*)xferctlp
->dma
[0].dma_descp
-
969 acc_hdl
= xferctlp
->dma
[0].dma_buf
->bi_handle
;
970 dma_hdl
= xferctlp
->dma
[0].dma_buf
->bi_dma_handle
;
972 /* Sync the descriptor before we grab the header(s) */
973 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
, sizeof (hci1394_desc_t
),
974 DDI_DMA_SYNC_FORCPU
);
975 if (err
!= DDI_SUCCESS
) {
976 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
978 return (IXL_PREP_FAILURE
);
981 desc_hdr
= ddi_get32(acc_hdl
, &hcidescp
->hdr
);
982 uvp
->hcihdr
= desc_hdr
;
983 uvp
->hcihdr
&= ~DESC_HDR_REQCOUNT_MASK
;
984 uvp
->hcihdr
|= (uvp
->bufsize
<< DESC_HDR_REQCOUNT_SHIFT
) &
985 DESC_HDR_REQCOUNT_MASK
;
986 uvp
->hcistatus
= (uvp
->bufsize
<< DESC_ST_RESCOUNT_SHIFT
) &
987 DESC_ST_RESCOUNT_MASK
;
989 return (IXL_PREP_READY
);
993 * hci1394_ixl_update_prep_recv_buf()
994 * Preparation for update of an IXL1394_OP_RECV_BUF_U command.
997 hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t
*uvp
)
999 ixl1394_xfer_buf_t
*old_xfer_buf_ixlp
;
1000 ixl1394_xfer_buf_t
*new_xfer_buf_ixlp
;
1001 hci1394_xfer_ctl_t
*xferctlp
;
1003 old_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixloldp
;
1004 new_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixlnewp
;
1006 /* check if any change between new and old IXL xfer commands */
1007 if ((new_xfer_buf_ixlp
->size
== old_xfer_buf_ixlp
->size
) &&
1008 (new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
==
1009 old_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
) &&
1010 (new_xfer_buf_ixlp
->mem_bufp
== old_xfer_buf_ixlp
->mem_bufp
)) {
1012 if (((uvp
->ctxtp
->ctxt_flags
& HCI1394_ISO_CTXT_BFFILL
) != 0) ||
1013 (new_xfer_buf_ixlp
->pkt_size
==
1014 old_xfer_buf_ixlp
->pkt_size
)) {
1016 /* no change. return with done ok status */
1017 return (IXL_PREP_SUCCESS
);
1021 /* if new IXL buffer addrs are null, return error */
1022 if ((new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
== (uintptr_t)NULL
) ||
1023 (new_xfer_buf_ixlp
->mem_bufp
== NULL
)) {
1025 uvp
->upd_status
= IXL1394_EXFER_BUF_MISSING
;
1027 return (IXL_PREP_FAILURE
);
1031 * if not buffer fill mode, check that the new pkt_size > 0 and
1032 * new size/pkt_size doesn't change the count of dma descriptor
1035 if ((uvp
->ctxtp
->ctxt_flags
& HCI1394_ISO_CTXT_BFFILL
) == 0) {
1036 if ((new_xfer_buf_ixlp
->pkt_size
== 0) ||
1037 ((new_xfer_buf_ixlp
->size
/ new_xfer_buf_ixlp
->pkt_size
) !=
1038 (old_xfer_buf_ixlp
->size
/ old_xfer_buf_ixlp
->pkt_size
))) {
1040 /* count changes. return an error */
1041 uvp
->upd_status
= IXL1394_EXFER_BUF_CNT_DIFF
;
1043 return (IXL_PREP_FAILURE
);
1047 /* set old IXL xfer command as the current IXL xfer command */
1048 uvp
->ixlxferp
= uvp
->ixloldp
;
1050 /* check that the xfer_ctl struct is present in IXL xfer command */
1051 if ((xferctlp
= (hci1394_xfer_ctl_t
*)uvp
->ixlxferp
->compiler_privatep
)
1054 /* return an error if no xfer_ctl struct is found for command */
1055 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
1057 return (IXL_PREP_FAILURE
);
1060 /* set depth to zero and count to update all dma descriptors */
1062 uvp
->ixlcount
= xferctlp
->cnt
;
1064 /* set bufsize to new pkt_size (or to new size if buffer fill mode) */
1065 if ((uvp
->ctxtp
->ctxt_flags
& HCI1394_ISO_CTXT_BFFILL
) == 0) {
1066 uvp
->bufsize
= new_xfer_buf_ixlp
->pkt_size
;
1068 uvp
->bufsize
= new_xfer_buf_ixlp
->size
;
1071 /* set bufaddr to new ixl_buf */
1072 uvp
->bufaddr
= new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
;
1074 /* set hcihdr reqcnt and hcistatus rescnt to new bufsize */
1075 uvp
->hci_offset
= 0;
1076 uvp
->hcihdr
= (uvp
->bufsize
<< DESC_HDR_REQCOUNT_SHIFT
) &
1077 DESC_HDR_REQCOUNT_MASK
;
1078 uvp
->hcistatus
= (uvp
->bufsize
<< DESC_ST_RESCOUNT_SHIFT
) &
1079 DESC_ST_RESCOUNT_MASK
;
1081 return (IXL_PREP_READY
);
1085 * hci1394_ixl_update_prep_send_pkt()
1086 * Preparation for update of an IXL1394_OP_SEND_PKT_U command,
1087 * IXL1394_OP_SEND_PKT_ST_U command and IXL1394_OP_SEND_PKT_WHDR_ST_U
1091 hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t
*uvp
)
1093 ixl1394_xfer_pkt_t
*old_xfer_pkt_ixlp
;
1094 ixl1394_xfer_pkt_t
*new_xfer_pkt_ixlp
;
1095 hci1394_xfer_ctl_t
*xferctlp
;
1096 hci1394_desc_imm_t
*hcidescp
;
1097 ddi_acc_handle_t acc_hdl
;
1098 ddi_dma_handle_t dma_hdl
;
1099 uint32_t desc_hdr
, desc_hdr2
;
1102 old_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixloldp
;
1103 new_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixlnewp
;
1105 /* check if any change between new and old IXL xfer commands */
1106 if ((new_xfer_pkt_ixlp
->size
== old_xfer_pkt_ixlp
->size
) &&
1107 (new_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
==
1108 old_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
) &&
1109 (new_xfer_pkt_ixlp
->mem_bufp
== old_xfer_pkt_ixlp
->mem_bufp
)) {
1111 /* if none, return with done ok status */
1112 return (IXL_PREP_SUCCESS
);
1115 /* if new ixl buffer addrs are null, return error */
1116 if ((new_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
== (uintptr_t)NULL
) ||
1117 (new_xfer_pkt_ixlp
->mem_bufp
== NULL
)) {
1119 uvp
->upd_status
= IXL1394_EXFER_BUF_MISSING
;
1121 return (IXL_PREP_FAILURE
);
1124 /* error if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode and size < 4 */
1125 if ((uvp
->ixl_opcode
== IXL1394_OP_SEND_PKT_WHDR_ST_U
) &&
1126 (new_xfer_pkt_ixlp
->size
< 4)) {
1128 uvp
->upd_status
= IXL1394_EPKT_HDR_MISSING
;
1130 return (IXL_PREP_FAILURE
);
1133 /* if IXL xfer command is not an IXL xfer start command */
1134 if (uvp
->ixl_opcode
== IXL1394_OP_SEND_PKT_U
) {
1136 * find IXL xfer start command in the compiler_privatep of the
1137 * old IXL xfer command
1139 uvp
->ixlxferp
= (ixl1394_command_t
*)
1140 old_xfer_pkt_ixlp
->compiler_privatep
;
1142 if (uvp
->ixlxferp
== NULL
) {
1143 /* error if no IXL xfer start command found */
1144 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
1146 return (IXL_PREP_FAILURE
);
1149 /* IXL xfer command is the IXL xfer start command */
1150 uvp
->ixlxferp
= uvp
->ixloldp
;
1154 * get Z bits (number of descriptor components in the descriptor block)
1155 * from a dma bound address in the xfer_ctl structure of the IXL
1156 * xfer start command
1158 if ((xferctlp
= (hci1394_xfer_ctl_t
*)
1159 uvp
->ixlxferp
->compiler_privatep
) == NULL
) {
1161 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
1163 return (IXL_PREP_FAILURE
);
1166 /* set depth to zero and count to 1 to update dma descriptor */
1171 * set offset to the header(first) descriptor from the
1172 * current(last) descriptor
1174 uvp
->hdr_offset
= xferctlp
->dma
[0].dma_bound
& DESC_Z_MASK
- 1;
1177 * set offset from the current(last) descriptor to the descriptor for
1178 * this packet command
1180 uvp
->hci_offset
= uvp
->hdr_offset
- 2 - uvp
->ixloldp
->compiler_resv
;
1182 /* set bufsize to new pkt buffr size, set bufaddr to new bufp */
1183 uvp
->bufsize
= new_xfer_pkt_ixlp
->size
;
1184 uvp
->bufaddr
= new_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
;
1187 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1190 if (uvp
->ixl_opcode
== IXL1394_OP_SEND_PKT_WHDR_ST_U
) {
1195 /* update old hcihdr w/new bufsize */
1196 hcidescp
= (hci1394_desc_imm_t
*)xferctlp
->dma
[0].dma_descp
-
1198 acc_hdl
= xferctlp
->dma
[0].dma_buf
->bi_handle
;
1199 dma_hdl
= xferctlp
->dma
[0].dma_buf
->bi_dma_handle
;
1201 /* Sync the descriptor before we grab the header(s) */
1202 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1203 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORCPU
);
1204 if (err
!= DDI_SUCCESS
) {
1205 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1207 return (IXL_PREP_FAILURE
);
1210 desc_hdr
= ddi_get32(acc_hdl
, &hcidescp
->hdr
);
1211 uvp
->hcihdr
= desc_hdr
;
1212 uvp
->hcihdr
&= ~DESC_HDR_REQCOUNT_MASK
;
1213 uvp
->hcihdr
|= (uvp
->bufsize
<< DESC_HDR_REQCOUNT_SHIFT
) &
1214 DESC_HDR_REQCOUNT_MASK
;
1216 /* update old pkthdr2 w/new bufsize. error if exceeds 16k */
1217 desc_hdr2
= ddi_get32(acc_hdl
, &hcidescp
->q2
);
1218 uvp
->pkthdr2
= desc_hdr2
;
1219 uvp
->pkthdr2
= (uvp
->pkthdr2
& DESC_PKT_DATALEN_MASK
) >>
1220 DESC_PKT_DATALEN_SHIFT
;
1221 uvp
->pkthdr2
-= old_xfer_pkt_ixlp
->size
;
1222 uvp
->pkthdr2
+= uvp
->bufsize
;
1224 if (uvp
->pkthdr2
> 0xFFFF) {
1225 uvp
->upd_status
= IXL1394_EPKTSIZE_MAX_OFLO
;
1227 return (IXL_PREP_FAILURE
);
1229 uvp
->pkthdr2
= (uvp
->pkthdr2
<< DESC_PKT_DATALEN_SHIFT
) &
1230 DESC_PKT_DATALEN_MASK
;
1232 return (IXL_PREP_READY
);
1236 * hci1394_ixl_update_prep_send_buf()
1237 * Preparation for update of an IXL1394_OP_SEND_BUF_U command.
1240 hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t
*uvp
)
1242 ixl1394_xfer_buf_t
*old_xfer_buf_ixlp
;
1243 ixl1394_xfer_buf_t
*new_xfer_buf_ixlp
;
1244 hci1394_xfer_ctl_t
*xferctlp
;
1246 old_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixloldp
;
1247 new_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixlnewp
;
1249 /* check if any change between new and old IXL xfer commands */
1250 if ((new_xfer_buf_ixlp
->size
== old_xfer_buf_ixlp
->size
) &&
1251 (new_xfer_buf_ixlp
->pkt_size
== old_xfer_buf_ixlp
->pkt_size
) &&
1252 (new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
==
1253 old_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
) &&
1254 (new_xfer_buf_ixlp
->mem_bufp
== old_xfer_buf_ixlp
->mem_bufp
)) {
1256 /* no change, return with done ok status */
1257 return (IXL_PREP_SUCCESS
);
1260 /* if new IXL buffer addresses are null, return error */
1261 if ((new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
== (uintptr_t)NULL
) ||
1262 (new_xfer_buf_ixlp
->mem_bufp
== NULL
)) {
1264 uvp
->upd_status
= IXL1394_EXFER_BUF_MISSING
;
1266 return (IXL_PREP_FAILURE
);
1270 * check that the new pkt_size > 0 and the new size/pkt_size
1271 * doesn't change the count of DMA descriptor blocks required
1273 if ((new_xfer_buf_ixlp
->pkt_size
== 0) ||
1274 ((new_xfer_buf_ixlp
->size
/ new_xfer_buf_ixlp
->pkt_size
) !=
1275 (old_xfer_buf_ixlp
->size
/ old_xfer_buf_ixlp
->pkt_size
))) {
1277 /* Error - new has different pkt count than old */
1278 uvp
->upd_status
= IXL1394_EXFER_BUF_CNT_DIFF
;
1280 return (IXL_PREP_FAILURE
);
1283 /* set the old IXL xfer command as the current IXL xfer command */
1284 uvp
->ixlxferp
= uvp
->ixloldp
;
1287 * get Z bits (number of descriptor components in descriptor block)
1288 * from a DMA bound address in the xfer_ctl struct of the
1291 if ((xferctlp
= (hci1394_xfer_ctl_t
*)
1292 uvp
->ixlxferp
->compiler_privatep
) == NULL
) {
1294 uvp
->upd_status
= IXL1394_EORIG_IXL_CORRUPTED
;
1296 return (IXL_PREP_FAILURE
);
1299 /* set depth to zero and count to update all dma descriptors */
1301 uvp
->ixlcount
= xferctlp
->cnt
;
1304 * set offset to the header(first) descriptor from the current (last)
1307 uvp
->hdr_offset
= xferctlp
->dma
[0].dma_bound
& DESC_Z_MASK
- 1;
1309 /* set offset to the only(last) xfer descriptor */
1310 uvp
->hci_offset
= 0;
1312 /* set bufsize to the new pkt_size, set bufaddr to the new bufp */
1313 uvp
->bufsize
= new_xfer_buf_ixlp
->pkt_size
;
1314 uvp
->bufaddr
= new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
;
1317 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1318 * step over header (a quadlet)
1320 if (uvp
->ixl_opcode
== IXL1394_OP_SEND_PKT_WHDR_ST_U
) {
1325 /* set hcihdr to new bufsize */
1326 uvp
->hcihdr
= (uvp
->bufsize
<< DESC_HDR_REQCOUNT_SHIFT
) &
1327 DESC_HDR_REQCOUNT_MASK
;
1329 /* set pkthdr2 to new bufsize */
1330 uvp
->pkthdr2
= (uvp
->bufsize
<< DESC_PKT_DATALEN_SHIFT
) &
1331 DESC_PKT_DATALEN_MASK
;
1333 return (IXL_PREP_READY
);
1337 * hci1394_ixl_update_perform()
1338 * performs the actual update into DMA memory.
1341 hci1394_ixl_update_perform(hci1394_ixl_update_vars_t
*uvp
)
1344 uint_t skipaddrlast
;
1345 hci1394_xfer_ctl_t
*xferctlp
;
1346 hci1394_desc_imm_t
*hcidescp
;
1347 hci1394_iso_ctxt_t
*ctxtp
;
1348 ddi_acc_handle_t acc_hdl
;
1349 ddi_dma_handle_t dma_hdl
;
1355 * if no target ixl xfer command to be updated or it has
1356 * no xfer_ctl struct, then internal error.
1358 if ((uvp
->ixlxferp
== NULL
) ||
1359 ((xferctlp
= (hci1394_xfer_ctl_t
*)
1360 uvp
->ixlxferp
->compiler_privatep
) == NULL
)) {
1362 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1364 return (DDI_FAILURE
);
1367 /* perform update based on specific ixl command type */
1368 switch (uvp
->ixl_opcode
) {
1370 case IXL1394_OP_JUMP_U
: {
1371 ixl1394_jump_t
*old_jump_ixlp
;
1372 ixl1394_jump_t
*new_jump_ixlp
;
1374 old_jump_ixlp
= (ixl1394_jump_t
*)uvp
->ixloldp
;
1375 new_jump_ixlp
= (ixl1394_jump_t
*)uvp
->ixlnewp
;
1378 * set new hdr and new branch fields into last component of last
1379 * dma descriptor block of ixl xfer cmd associated with
1382 hcidescp
= (hci1394_desc_imm_t
*)
1383 xferctlp
->dma
[xferctlp
->cnt
- 1].dma_descp
;
1384 acc_hdl
= xferctlp
->dma
[xferctlp
->cnt
- 1].dma_buf
->bi_handle
;
1386 xferctlp
->dma
[xferctlp
->cnt
- 1].dma_buf
->bi_dma_handle
;
1388 ddi_put32(acc_hdl
, &hcidescp
->hdr
, uvp
->hcihdr
);
1389 ddi_put32(acc_hdl
, &hcidescp
->branch
, uvp
->jumpaddr
);
1392 * if xfer type is send and skip mode is IXL1394__SKIP_TO_NEXT
1393 * also set branch location into branch field of first
1394 * component (skip to address) of last dma descriptor block
1396 if (uvp
->skipmode
== IXL1394_SKIP_TO_NEXT
) {
1397 hcidescp
-= uvp
->hci_offset
;
1398 ddi_put32(acc_hdl
, &hcidescp
->branch
, uvp
->skipaddr
);
1401 /* Sync descriptor for device (desc was modified) */
1402 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1403 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORDEV
);
1404 if (err
!= DDI_SUCCESS
) {
1405 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1407 return (DDI_FAILURE
);
1410 /* set old ixl jump cmd label from new ixl jump cmd label */
1411 old_jump_ixlp
->label
= new_jump_ixlp
->label
;
1414 case IXL1394_OP_SET_SKIPMODE_U
: {
1415 ixl1394_set_skipmode_t
*old_set_skipmode_ixlp
;
1416 ixl1394_set_skipmode_t
*new_set_skipmode_ixlp
;
1418 old_set_skipmode_ixlp
= (ixl1394_set_skipmode_t
*)uvp
->ixloldp
;
1419 new_set_skipmode_ixlp
= (ixl1394_set_skipmode_t
*)uvp
->ixlnewp
;
1422 * if skip to next mode, save skip addr for last iteration
1423 * thru dma descriptor blocks for associated ixl xfer command
1425 if (uvp
->skipmode
== IXL1394_SKIP_TO_NEXT
) {
1426 skipaddrlast
= uvp
->skipaddr
;
1430 * iterate through set of dma descriptor blocks for associated
1431 * ixl xfer start cmd and set new skip address into first hci
1432 * descriptor of each if skip next or skip self, first determine
1433 * address in each iteration
1435 for (ii
= 0; ii
< xferctlp
->cnt
; ii
++) {
1436 hcidescp
= (hci1394_desc_imm_t
*)
1437 xferctlp
->dma
[ii
].dma_descp
- uvp
->hci_offset
;
1438 acc_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_handle
;
1439 dma_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_dma_handle
;
1441 if (uvp
->skipmode
== IXL1394_SKIP_TO_NEXT
) {
1442 if (ii
< (xferctlp
->cnt
- 1)) {
1444 xferctlp
->dma
[ii
+ 1].dma_bound
;
1446 uvp
->skipaddr
= skipaddrlast
;
1448 } else if (uvp
->skipmode
== IXL1394_SKIP_TO_SELF
) {
1449 uvp
->skipaddr
= xferctlp
->dma
[ii
].dma_bound
;
1452 ddi_put32(acc_hdl
, &hcidescp
->branch
, uvp
->skipaddr
);
1454 /* Sync descriptor for device (desc was modified) */
1455 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1456 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORDEV
);
1457 if (err
!= DDI_SUCCESS
) {
1458 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1460 return (DDI_FAILURE
);
1465 * set old ixl set skip mode cmd mode and label from new ixl cmd
1466 * set old ixl set skip mode cmd compilier_privatep to
1469 old_set_skipmode_ixlp
->skipmode
= uvp
->skipmode
;
1470 old_set_skipmode_ixlp
->label
= new_set_skipmode_ixlp
->label
;
1471 old_set_skipmode_ixlp
->compiler_privatep
=
1472 (ixl1394_priv_t
)uvp
->skipxferp
;
1475 case IXL1394_OP_SET_TAGSYNC_U
: {
1476 ixl1394_set_tagsync_t
*old_set_tagsync_ixlp
;
1477 ixl1394_set_tagsync_t
*new_set_tagsync_ixlp
;
1479 old_set_tagsync_ixlp
= (ixl1394_set_tagsync_t
*)uvp
->ixloldp
;
1480 new_set_tagsync_ixlp
= (ixl1394_set_tagsync_t
*)uvp
->ixlnewp
;
1483 * iterate through set of descriptor blocks for associated IXL
1484 * xfer command and set new pkthdr1 value into output more/last
1485 * immediate hci descriptor (first/last hci descriptor of each
1488 for (ii
= 0; ii
< xferctlp
->cnt
; ii
++) {
1489 hcidescp
= (hci1394_desc_imm_t
*)
1490 xferctlp
->dma
[ii
].dma_descp
- uvp
->hdr_offset
;
1491 acc_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_handle
;
1492 dma_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_dma_handle
;
1493 ddi_put32(acc_hdl
, &hcidescp
->q1
, uvp
->pkthdr1
);
1495 /* Sync descriptor for device (desc was modified) */
1496 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1497 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORDEV
);
1498 if (err
!= DDI_SUCCESS
) {
1499 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1501 return (DDI_FAILURE
);
1506 * set old ixl set tagsync cmd tag & sync from new ixl set
1509 old_set_tagsync_ixlp
->tag
= new_set_tagsync_ixlp
->tag
;
1510 old_set_tagsync_ixlp
->sync
= new_set_tagsync_ixlp
->sync
;
1513 case IXL1394_OP_RECV_PKT_U
:
1514 case IXL1394_OP_RECV_PKT_ST_U
: {
1515 ixl1394_xfer_pkt_t
*old_xfer_pkt_ixlp
;
1516 ixl1394_xfer_pkt_t
*new_xfer_pkt_ixlp
;
1517 uint32_t desc_status
;
1519 old_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixloldp
;
1520 new_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixlnewp
;
1523 * alter buffer address, count and rescount in ixl recv pkt cmd
1524 * related hci component in dma descriptor block
1526 hcidescp
= (hci1394_desc_imm_t
*)
1527 xferctlp
->dma
[0].dma_descp
- uvp
->hci_offset
;
1528 acc_hdl
= xferctlp
->dma
[0].dma_buf
->bi_handle
;
1529 dma_hdl
= xferctlp
->dma
[0].dma_buf
->bi_dma_handle
;
1530 ddi_put32(acc_hdl
, &hcidescp
->hdr
, uvp
->hcihdr
);
1531 ddi_put32(acc_hdl
, &hcidescp
->data_addr
, uvp
->bufaddr
);
1533 /* Sync the descriptor before we grab the status */
1534 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1535 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORCPU
);
1536 if (err
!= DDI_SUCCESS
) {
1537 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1539 return (DDI_FAILURE
);
1542 /* change only low 1/2 word and leave status bits unchanged */
1543 desc_status
= ddi_get32(acc_hdl
, &hcidescp
->status
);
1544 desc_status
= (desc_status
& ~DESC_ST_RESCOUNT_MASK
) |
1546 ddi_put32(acc_hdl
, &hcidescp
->status
, desc_status
);
1548 /* Sync descriptor for device (desc was modified) */
1549 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1550 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORDEV
);
1551 if (err
!= DDI_SUCCESS
) {
1552 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1554 return (DDI_FAILURE
);
1558 * set old ixl recv pkt size and buffers from new
1559 * ixl recv pkt command
1561 old_xfer_pkt_ixlp
->size
= new_xfer_pkt_ixlp
->size
;
1562 old_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
=
1563 new_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
;
1564 old_xfer_pkt_ixlp
->mem_bufp
= new_xfer_pkt_ixlp
->mem_bufp
;
1567 case IXL1394_OP_RECV_BUF_U
: {
1568 ixl1394_xfer_buf_t
*old_xfer_buf_ixlp
;
1569 ixl1394_xfer_buf_t
*new_xfer_buf_ixlp
;
1571 uint32_t desc_status
;
1573 old_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixloldp
;
1574 new_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixlnewp
;
1577 * iterate through set of descriptor blocks for this IXL xfer
1578 * command altering buffer, count and rescount in each
1579 * input more/last(the only) hci descriptor block descriptor.
1581 for (ii
= 0; ii
< xferctlp
->cnt
; ii
++) {
1583 hcidescp
= (hci1394_desc_imm_t
*)
1584 xferctlp
->dma
[ii
].dma_descp
- uvp
->hci_offset
;
1585 acc_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_handle
;
1586 dma_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_dma_handle
;
1588 ddi_put32(acc_hdl
, &hcidescp
->data_addr
, uvp
->bufaddr
);
1591 * advance to next buffer segment, adjust over header
1594 uvp
->bufaddr
+= uvp
->bufsize
;
1596 /* Sync the descriptor before we grab the header(s) */
1597 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1598 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORCPU
);
1599 if (err
!= DDI_SUCCESS
) {
1600 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1602 return (DDI_FAILURE
);
1606 * this preserves interrupt enable bits, et al. in each
1607 * descriptor block header.
1609 desc_hdr
= ddi_get32(acc_hdl
, &hcidescp
->hdr
);
1610 desc_hdr
= (desc_hdr
& ~DESC_HDR_REQCOUNT_MASK
) |
1612 ddi_put32(acc_hdl
, &hcidescp
->hdr
, desc_hdr
);
1615 * change only low 1/2 word leaving status bits
1618 desc_status
= ddi_get32(acc_hdl
, &hcidescp
->status
);
1619 desc_status
= (desc_status
& ~DESC_ST_RESCOUNT_MASK
) |
1621 ddi_put32(acc_hdl
, &hcidescp
->status
, desc_status
);
1623 /* Sync descriptor for device (desc was modified) */
1624 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1625 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORDEV
);
1626 if (err
!= DDI_SUCCESS
) {
1627 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1629 return (DDI_FAILURE
);
1634 * set old ixl recv buf sizes and buffers from
1635 * new ixl recv pkt cmd
1637 old_xfer_buf_ixlp
->pkt_size
= new_xfer_buf_ixlp
->pkt_size
;
1638 old_xfer_buf_ixlp
->size
= new_xfer_buf_ixlp
->size
;
1639 old_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
=
1640 new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
;
1641 old_xfer_buf_ixlp
->mem_bufp
= new_xfer_buf_ixlp
->mem_bufp
;
1644 case IXL1394_OP_SEND_PKT_U
:
1645 case IXL1394_OP_SEND_PKT_ST_U
:
1646 case IXL1394_OP_SEND_PKT_WHDR_ST_U
: {
1647 ixl1394_xfer_pkt_t
*old_xfer_pkt_ixlp
;
1648 ixl1394_xfer_pkt_t
*new_xfer_pkt_ixlp
;
1650 old_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixloldp
;
1651 new_xfer_pkt_ixlp
= (ixl1394_xfer_pkt_t
*)uvp
->ixlnewp
;
1654 * replace pkthdr2 in output more immediate (the first) hci
1655 * descriptor in block, then alter buffer address and count in
1656 * IXL send pkt command related output more/last hci descriptor.
1658 hcidescp
= (hci1394_desc_imm_t
*)xferctlp
->dma
[0].dma_descp
-
1660 acc_hdl
= xferctlp
->dma
[0].dma_buf
->bi_handle
;
1661 dma_hdl
= xferctlp
->dma
[0].dma_buf
->bi_dma_handle
;
1663 ddi_put32(acc_hdl
, &hcidescp
->q2
, uvp
->pkthdr2
);
1664 ddi_put32(acc_hdl
, &hcidescp
->hdr
, uvp
->hcihdr
);
1665 ddi_put32(acc_hdl
, &hcidescp
->data_addr
, uvp
->bufaddr
);
1667 /* Sync descriptor for device (desc was modified) */
1668 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1669 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORDEV
);
1670 if (err
!= DDI_SUCCESS
) {
1671 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1673 return (DDI_FAILURE
);
1677 * set old ixl recv pkt size and buffers from
1678 * new ixl recv pkt cmd
1680 old_xfer_pkt_ixlp
->size
= new_xfer_pkt_ixlp
->size
;
1681 old_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
=
1682 new_xfer_pkt_ixlp
->ixl_buf
.ixldmac_addr
;
1683 old_xfer_pkt_ixlp
->mem_bufp
= new_xfer_pkt_ixlp
->mem_bufp
;
1686 case IXL1394_OP_SEND_BUF_U
: {
1687 ixl1394_xfer_buf_t
*old_xfer_buf_ixlp
;
1688 ixl1394_xfer_buf_t
*new_xfer_buf_ixlp
;
1691 old_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixloldp
;
1692 new_xfer_buf_ixlp
= (ixl1394_xfer_buf_t
*)uvp
->ixlnewp
;
1695 * iterate through set of descriptor blocks for this IXL xfer
1696 * command replacing pkthdr2 in output more immediate
1697 * (the first) hci descriptor block descriptor, then altering
1698 * buffer address and count in each output last (the only other)
1699 * hci descriptor block descriptor.
1701 for (ii
= 0; ii
< xferctlp
->cnt
; ii
++) {
1702 hcidescp
= (hci1394_desc_imm_t
*)
1703 xferctlp
->dma
[ii
].dma_descp
- uvp
->hdr_offset
;
1704 acc_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_handle
;
1705 dma_hdl
= xferctlp
->dma
[ii
].dma_buf
->bi_dma_handle
;
1707 ddi_put32(acc_hdl
, &hcidescp
->q2
, uvp
->pkthdr2
);
1708 ddi_put32(acc_hdl
, &hcidescp
->data_addr
, uvp
->bufaddr
);
1710 /* advance to next buffer segment */
1711 uvp
->bufaddr
+= uvp
->bufsize
;
1713 /* Sync the descriptor before we grab the header(s) */
1714 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1715 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORCPU
);
1716 if (err
!= DDI_SUCCESS
) {
1717 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1719 return (DDI_FAILURE
);
1723 * this preserves interrupt enable bits, et al
1724 * in each desc block hdr
1726 desc_hdr
= ddi_get32(acc_hdl
, &hcidescp
->hdr
);
1727 desc_hdr
= (desc_hdr
& ~DESC_HDR_REQCOUNT_MASK
) |
1729 ddi_put32(acc_hdl
, &hcidescp
->hdr
, desc_hdr
);
1731 /* Sync descriptor for device (desc was modified) */
1732 err
= ddi_dma_sync(dma_hdl
, (off_t
)hcidescp
,
1733 sizeof (hci1394_desc_imm_t
), DDI_DMA_SYNC_FORDEV
);
1734 if (err
!= DDI_SUCCESS
) {
1735 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1737 return (DDI_FAILURE
);
1742 * set old ixl recv buf sizes and buffers from
1743 * new ixl recv pkt cmd
1745 old_xfer_buf_ixlp
->pkt_size
= new_xfer_buf_ixlp
->pkt_size
;
1746 old_xfer_buf_ixlp
->size
= new_xfer_buf_ixlp
->size
;
1747 old_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
=
1748 new_xfer_buf_ixlp
->ixl_buf
.ixldmac_addr
;
1749 old_xfer_buf_ixlp
->mem_bufp
= new_xfer_buf_ixlp
->mem_bufp
;
1753 /* ixl command being updated must be one of above, else error */
1754 uvp
->upd_status
= IXL1394_EINTERNAL_ERROR
;
1756 return (DDI_FAILURE
);
1759 /* hit the WAKE bit in the context control register */
1760 if (ctxtp
->ctxt_flags
& HCI1394_ISO_CTXT_RECV
) {
1761 HCI1394_IRCTXT_CTRL_SET(uvp
->soft_statep
, ctxtp
->ctxt_index
,
1762 0, 0, 0, 0, 0, 1 /* wake */);
1764 HCI1394_ITCTXT_CTRL_SET(uvp
->soft_statep
, ctxtp
->ctxt_index
,
1765 0, 0, 0, 1 /* wake */);
1768 /* perform update completed successfully */
1769 return (DDI_SUCCESS
);
1773 * hci1394_ixl_update_evaluate()
1774 * Evaluate where the hardware is in running through the DMA descriptor
1778 hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t
*uvp
)
1780 hci1394_iso_ctxt_t
*ctxtp
;
1781 ixl1394_command_t
*ixlp
;
1788 ixldepth
= 0xFFFFFFFF;
1791 * repeat until IXL execution status evaluation function returns error
1792 * or until pointer to currently executing IXL command and its depth
1795 while ((ixlp
!= ctxtp
->ixl_execp
) ||
1796 (ixldepth
!= ctxtp
->ixl_exec_depth
)) {
1798 ixlp
= ctxtp
->ixl_execp
;
1799 ixldepth
= ctxtp
->ixl_exec_depth
;
1802 * call IXL execution status evaluation (ixl_dma_sync)
1803 * function returning if error (HCI1394_IXL_INTR_DMALOST is
1804 * only error condition).
1806 * Note: interrupt processing function can only return one of
1807 * the following statuses here:
1808 * HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP,
1809 * HCI1394_IXL_INTR_DMALOST
1811 * it can not return the following status here:
1812 * HCI1394_IXL_INTR_NOADV
1814 * Don't need to grab the lock here... for the same reason
1815 * explained in hci1394_ixl_update_endup() above.
1817 ctxtp
->intr_flags
&= ~HCI1394_ISO_CTXT_INTRSET
;
1818 if (hci1394_ixl_dma_sync(uvp
->soft_statep
, ctxtp
) ==
1819 HCI1394_IXL_INTR_DMALOST
) {
1821 /* return post-perform update failed status */
1822 uvp
->upd_status
= IXL1394_EPOST_UPD_DMALOST
;
1824 return (DDI_FAILURE
);
1829 * if the currently executing IXL command is one of the IXL_MAX_LOCN
1830 * locations saved before update was performed, return update
1831 * successful status.
1833 for (ii
= 0; ii
< IXL_MAX_LOCN
; ii
++) {
1834 if ((uvp
->locn_info
[ii
].ixlp
== ixlp
) &&
1835 (uvp
->locn_info
[ii
].ixldepth
== ixldepth
)) {
1837 return (DDI_SUCCESS
);
1842 * else return post-perform update failed status.
1843 * note: later can make more sophisticated evaluations about where
1844 * execution processing went, and if update has really failed.
1846 uvp
->upd_status
= IXL1394_EPOST_UPD_DMALOST
;
1848 return (DDI_FAILURE
);
1852 * hci1394_ixl_update_analysis()
1853 * Determine if the hardware is within the range we expected it to be.
1854 * If so the update succeeded.
1857 hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t
*uvp
)
1859 hci1394_iso_ctxt_t
*ctxtp
;
1860 ixl1394_command_t
*ixlp
;
1868 ixldepth
= 0xFFFFFFFF;
1871 * repeat until ixl execution status evaluation function returns error
1872 * or until pointer to currently executing ixl command and its depth
1875 while ((ixlp
!= ctxtp
->ixl_execp
) ||
1876 (ixldepth
!= ctxtp
->ixl_exec_depth
)) {
1878 ixlp
= ctxtp
->ixl_execp
;
1879 ixldepth
= ctxtp
->ixl_exec_depth
;
1882 * call ixl execution status evaluation (interrupt processing).
1883 * set IXL1394_EIDU_PRE_UPD_DMALOST if status INTR_DMALOST and
1886 * Note: interrupt processing function can only return one of
1887 * the following statuses here:
1888 * HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP or
1889 * HCI1394_IXL_INTR_DMALOST
1891 * it can not return the following status here:
1892 * HCI1394_IXL_INTR_NOADV
1894 * Don't need to grab the lock here... for the same reason
1895 * explained in hci1394_ixl_update_endup() above.
1897 ctxtp
->intr_flags
&= ~HCI1394_ISO_CTXT_INTRSET
;
1899 status
= hci1394_ixl_dma_sync(uvp
->soft_statep
, ctxtp
);
1900 if (status
== HCI1394_IXL_INTR_DMALOST
) {
1902 * set pre-update dma processing lost status and
1905 uvp
->upd_status
= IXL1394_EPRE_UPD_DMALOST
;
1907 return (DDI_FAILURE
);
1912 * save locations of currently executing ixl command and the
1915 hci1394_ixl_update_set_locn_info(uvp
);
1918 * if xfer_ixl_cmd associated with the IXL_command being updated is one
1919 * of the saved (currently executing) IXL commands, risk is too great to
1920 * perform update now, set IXL1394_ERISK_PROHIBITS_UPD status and
1923 * Note: later can implement more sophisticated risk override
1924 * evaluations and processing.
1926 for (ii
= 0; ii
< IXL_MAX_LOCN
; ii
++) {
1928 if ((uvp
->locn_info
[ii
].ixlp
== uvp
->ixlxferp
) &&
1929 (uvp
->locn_info
[ii
].ixldepth
>= uvp
->ixldepth
) &&
1930 (uvp
->locn_info
[ii
].ixldepth
<
1931 (uvp
->ixldepth
+ uvp
->ixlcount
))) {
1933 uvp
->upd_status
= IXL1394_ERISK_PROHIBITS_UPD
;
1935 return (DDI_FAILURE
);
1939 /* is save for update to be performed, return ok status */
1940 return (DDI_SUCCESS
);
1944 * hci1394_ixl_update_set_locn_info()
1945 * set up the local list of the IXL_MAX_LOCN next commandPtr locations we
1946 * expect the hardware to get to in the next 125 microseconds.
1949 hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t
*uvp
)
1951 hci1394_iso_ctxt_t
*ctxtp
;
1952 ixl1394_command_t
*ixlp
;
1957 * find next xfer start ixl command, starting with current ixl command
1958 * where execution last left off
1962 ixldepth
= ctxtp
->ixl_exec_depth
;
1963 (void) hci1394_ixl_find_next_exec_xfer(ctxtp
->ixl_execp
, NULL
, &ixlp
);
1966 * if the current IXL command wasn't a xfer start command, then reset
1967 * the depth to 0 for xfer command found
1969 if (ixlp
!= ctxtp
->ixl_execp
)
1973 * save xfer start IXL command & its depth and also save location and
1974 * depth of the next IXL_MAX_LOCN-1 xfer start IXL commands following
1977 for (ii
= 0; ii
< IXL_MAX_LOCN
; ii
++) {
1978 uvp
->locn_info
[ii
].ixlp
= ixlp
;
1979 uvp
->locn_info
[ii
].ixldepth
= ixldepth
;
1983 * if more dma commands generated by this xfer command
1984 * still follow, use them. else, find the next xfer
1985 * start IXL command and set its depth to 0.
1987 if (++ixldepth
>= ((hci1394_xfer_ctl_t
*)
1988 ixlp
->compiler_privatep
)->cnt
) {
1990 (void) hci1394_ixl_find_next_exec_xfer(
1991 ixlp
->next_ixlp
, NULL
, &ixlp
);