Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / 1394 / adapters / hci1394_ixl_comp.c
blob43fdbca062d1faf59a62299cae175582855aeb84
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
20 * CDDL HEADER END
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * hci1394_ixl_comp.c
29 * Isochronous IXL Compiler.
30 * The compiler converts the general hardware independent IXL command
31 * blocks into OpenHCI DMA descriptors.
34 #include <sys/kmem.h>
35 #include <sys/types.h>
36 #include <sys/conf.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
40 #include <sys/tnf_probe.h>
42 #include <sys/1394/h1394.h>
43 #include <sys/1394/ixl1394.h>
44 #include <sys/1394/adapters/hci1394.h>
46 /* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
47 #define HCI1394_IXL_PAGESIZE 8000
49 /* invalid opcode */
50 #define IXL1394_OP_INVALID (0 | IXL1394_OPTY_OTHER)
53 * maximum number of interrupts permitted for a single context in which
54 * the context does not advance to the next DMA descriptor. Interrupts are
55 * triggered by 1) hardware completing a DMA descriptor block which has the
56 * interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
57 * interrupt. Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
58 * returned.
60 int hci1394_ixl_max_noadv_intrs = 8;
63 static void hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
64 hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
65 ixl1394_command_t *ixlp);
66 static void hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp);
67 static void hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp,
68 ixl1394_command_t *ixlp);
69 static void hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
70 static void hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
71 static void hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
72 static void hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp);
73 static void hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp);
74 static void hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
75 static void hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp);
76 static void hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp);
77 static int hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp,
78 caddr_t *dma_descpp, uint32_t *dma_desc_bound);
79 static void hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp);
80 static void hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp);
81 static void hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp);
82 static int hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp,
83 uint32_t bufp, uint16_t size);
84 static int hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp,
85 uint32_t count);
86 static int hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp);
87 static uint32_t hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp);
88 static hci1394_xfer_ctl_t *hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp,
89 uint32_t dmacnt);
90 static void *hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp,
91 uint32_t size, uint32_t *dma_bound);
92 static boolean_t hci1394_is_opcode_valid(uint16_t ixlopcode);
96 * FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
97 * Receive Only: Transmit Only:
98 * IXL1394_OP_RECV_PKT_ST IXL1394_OP_SEND_PKT_WHDR_ST
99 * IXL1394_OP_RECV_PKT IXL1394_OP_SEND_PKT_ST
100 * IXL1394_OP_RECV_BUF IXL1394_OP_SEND_PKT
101 * IXL1394_OP_SET_SYNCWAIT IXL1394_OP_SEND_BUF
102 * IXL1394_OP_SEND_HDR_ONLY
103 * Receive or Transmit: IXL1394_OP_SEND_NO_PKT
104 * IXL1394_OP_CALLBACK IXL1394_OP_SET_TAGSYNC
105 * IXL1394_OP_LABEL IXL1394_OP_SET_SKIPMODE
106 * IXL1394_OP_JUMP IXL1394_OP_STORE_TIMESTAMP
110 * hci1394_compile_ixl()
111 * Top level ixl compiler entry point. Scans ixl and builds openHCI 1.0
112 * descriptor blocks in dma memory.
115 hci1394_compile_ixl(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
116 ixl1394_command_t *ixlp, int *resultp)
118 hci1394_comp_ixl_vars_t wv; /* working variables used throughout */
120 ASSERT(soft_statep != NULL);
121 ASSERT(ctxtp != NULL);
123 /* Initialize compiler working variables */
124 hci1394_compile_ixl_init(&wv, soft_statep, ctxtp, ixlp);
127 * First pass:
128 * Parse ixl commands, building desc blocks, until end of IXL
129 * linked list.
131 hci1394_parse_ixl(&wv, ixlp);
134 * Second pass:
135 * Resolve all generated descriptor block jump and skip addresses.
136 * Set interrupt enable in descriptor blocks which have callback
137 * operations in their execution scope. (Previously store_timesamp
138 * operations were counted also.) Set interrupt enable in descriptor
139 * blocks which were introduced by an ixl label command.
141 if (wv.dma_bld_error == 0) {
142 hci1394_finalize_all_xfer_desc(&wv);
145 /* Endup: finalize and cleanup ixl compile, return result */
146 hci1394_compile_ixl_endup(&wv);
148 *resultp = wv.dma_bld_error;
149 if (*resultp != 0) {
150 return (DDI_FAILURE);
151 } else {
152 return (DDI_SUCCESS);
157 * hci1394_compile_ixl_init()
158 * Initialize the isoch context structure associated with the IXL
159 * program, and initialize the temporary working variables structure.
161 static void
162 hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
163 hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
164 ixl1394_command_t *ixlp)
166 /* initialize common recv/xmit compile values */
167 wvp->soft_statep = soft_statep;
168 wvp->ctxtp = ctxtp;
170 /* init/clear ctxtp values */
171 ctxtp->dma_mem_execp = (uintptr_t)NULL;
172 ctxtp->dma_firstp = NULL;
173 ctxtp->dma_last_time = 0;
174 ctxtp->xcs_firstp = NULL;
175 ctxtp->ixl_exec_depth = 0;
176 ctxtp->ixl_execp = NULL;
177 ctxtp->ixl_firstp = ixlp;
178 ctxtp->default_skipxferp = NULL;
181 * the context's max_noadv_intrs is set here instead of in isoch init
182 * because the default is patchable and would only be picked up this way
184 ctxtp->max_noadv_intrs = hci1394_ixl_max_noadv_intrs;
186 /* init working variables */
187 wvp->xcs_firstp = NULL;
188 wvp->xcs_currentp = NULL;
190 wvp->dma_firstp = NULL;
191 wvp->dma_currentp = NULL;
192 wvp->dma_bld_error = 0;
194 wvp->ixl_io_mode = ctxtp->ctxt_flags;
195 wvp->ixl_cur_cmdp = NULL;
196 wvp->ixl_cur_xfer_stp = NULL;
197 wvp->ixl_cur_labelp = NULL;
199 wvp->ixl_xfer_st_cnt = 0; /* count of xfer start commands found */
200 wvp->xfer_state = XFER_NONE; /* none, pkt, buf, skip, hdronly */
201 wvp->xfer_hci_flush = 0; /* updateable - xfer, jump, set */
202 wvp->xfer_pktlen = 0;
203 wvp->xfer_bufcnt = 0;
204 wvp->descriptors = 0;
206 /* START RECV ONLY SECTION */
207 wvp->ixl_setsyncwait_cnt = 0;
209 /* START XMIT ONLY SECTION */
210 wvp->ixl_settagsync_cmdp = NULL;
211 wvp->ixl_setskipmode_cmdp = NULL;
212 wvp->default_skipmode = ctxtp->default_skipmode; /* nxt,self,stop,jmp */
213 wvp->default_skiplabelp = ctxtp->default_skiplabelp;
214 wvp->default_skipxferp = NULL;
215 wvp->skipmode = ctxtp->default_skipmode;
216 wvp->skiplabelp = NULL;
217 wvp->skipxferp = NULL;
218 wvp->default_tag = ctxtp->default_tag;
219 wvp->default_sync = ctxtp->default_sync;
220 wvp->storevalue_bufp = hci1394_alloc_storevalue_dma_mem(wvp);
221 wvp->storevalue_data = 0;
222 wvp->xmit_pkthdr1 = 0;
223 wvp->xmit_pkthdr2 = 0;
227 * hci1394_compile_ixl_endup()
228 * This routine is called just before the main hci1394_compile_ixl() exits.
229 * It checks for errors and performs the appropriate cleanup, or it rolls any
230 * relevant info from the working variables struct into the context structure
232 static void
233 hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp)
235 ixl1394_command_t *ixl_exec_stp;
236 hci1394_idma_desc_mem_t *dma_nextp;
237 int err;
239 /* error if no descriptor blocks found in ixl & created in dma memory */
240 if ((wvp->dma_bld_error == 0) && (wvp->ixl_xfer_st_cnt == 0)) {
241 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
244 /* if no errors yet, find the first IXL command that's a transfer cmd */
245 if (wvp->dma_bld_error == 0) {
246 err = hci1394_ixl_find_next_exec_xfer(wvp->ctxtp->ixl_firstp,
247 NULL, &ixl_exec_stp);
249 /* error if a label<->jump loop, or no xfer */
250 if ((err == DDI_FAILURE) || (ixl_exec_stp == NULL)) {
251 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
255 /* Sync all the DMA descriptor buffers */
256 dma_nextp = wvp->ctxtp->dma_firstp;
257 while (dma_nextp != NULL) {
258 err = ddi_dma_sync(dma_nextp->mem.bi_dma_handle,
259 (off_t)dma_nextp->mem.bi_kaddr, dma_nextp->mem.bi_length,
260 DDI_DMA_SYNC_FORDEV);
261 if (err != DDI_SUCCESS) {
262 wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
264 break;
267 /* advance to next dma memory descriptor */
268 dma_nextp = dma_nextp->dma_nextp;
272 * If error, cleanup and return. delete all allocated xfer_ctl structs
273 * and all dma descriptor page memory and its dma memory blocks too.
275 if (wvp->dma_bld_error != 0) {
276 wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
277 wvp->ctxtp->dma_firstp = wvp->dma_firstp;
278 hci1394_ixl_cleanup(wvp->soft_statep, wvp->ctxtp);
280 return;
283 /* can only get to here if the first ixl transfer command is found */
285 /* set required processing vars into ctxtp struct */
286 wvp->ctxtp->default_skipxferp = wvp->default_skipxferp;
287 wvp->ctxtp->dma_mem_execp = 0;
290 * the transfer command's compiler private xfer_ctl structure has the
291 * appropriate bound address
293 wvp->ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
294 ixl_exec_stp->compiler_privatep)->dma[0].dma_bound;
295 wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
296 wvp->ctxtp->dma_firstp = wvp->dma_firstp;
297 wvp->ctxtp->dma_last_time = 0;
298 wvp->ctxtp->ixl_exec_depth = 0;
299 wvp->ctxtp->ixl_execp = NULL;
303 * hci1394_parse_ixl()
304 * Scan IXL program and build ohci DMA descriptor blocks in dma memory.
306 * Parse/process succeeding ixl commands until end of IXL linked list is
307 * reached. Evaluate ixl syntax and build (xmit or recv) descriptor
308 * blocks. To aid execution time evaluation of current location, enable
309 * status recording on each descriptor block built.
310 * On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
312 static void
313 hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp, ixl1394_command_t *ixlp)
315 ixl1394_command_t *ixlnextp = ixlp; /* addr of next ixl cmd */
316 ixl1394_command_t *ixlcurp = NULL; /* addr of current ixl cmd */
317 uint16_t ixlopcode = 0; /* opcode of currnt ixl cmd */
319 uint32_t pktsize;
320 uint32_t pktcnt;
322 /* follow ixl links until reach end or find error */
323 while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
325 /* set this command as the current ixl command */
326 wvp->ixl_cur_cmdp = ixlcurp = ixlnextp;
327 ixlnextp = ixlcurp->next_ixlp;
329 ixlopcode = ixlcurp->ixl_opcode;
331 /* init compiler controlled values in current ixl command */
332 ixlcurp->compiler_privatep = NULL;
333 ixlcurp->compiler_resv = 0;
335 /* error if xmit/recv mode not appropriate for current cmd */
336 if ((((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) &&
337 ((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
338 (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
339 ((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
341 /* check if command op failed because it was invalid */
342 if (hci1394_is_opcode_valid(ixlopcode) != B_TRUE) {
343 wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
344 } else {
345 wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
347 continue;
351 * if ends xfer flag set, finalize current xfer descriptor
352 * block build
354 if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
355 /* finalize any descriptor block build in progress */
356 hci1394_finalize_cur_xfer_desc(wvp);
358 if (wvp->dma_bld_error != 0) {
359 continue;
364 * now process based on specific opcode value
366 switch (ixlopcode) {
368 case IXL1394_OP_RECV_BUF:
369 case IXL1394_OP_RECV_BUF_U: {
370 ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
372 cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
375 * In packet-per-buffer mode:
376 * This ixl command builds a collection of xfer
377 * descriptor blocks (size/pkt_size of them) each to
378 * recv a packet whose buffer size is pkt_size and
379 * whose buffer ptr is (pktcur*pkt_size + bufp)
381 * In buffer fill mode:
382 * This ixl command builds a single xfer descriptor
383 * block to recv as many packets or parts of packets
384 * as can fit into the buffer size specified
385 * (pkt_size is not used).
388 /* set xfer_state for new descriptor block build */
389 wvp->xfer_state = XFER_BUF;
391 /* set this ixl command as current xferstart command */
392 wvp->ixl_cur_xfer_stp = ixlcurp;
395 * perform packet-per-buffer checks
396 * (no checks needed when in buffer fill mode)
398 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) == 0) {
400 /* the packets must use the buffer exactly */
401 pktsize = cur_xfer_buf_ixlp->pkt_size;
402 pktcnt = 0;
403 if (pktsize != 0) {
404 pktcnt = cur_xfer_buf_ixlp->size /
405 pktsize;
407 if ((pktcnt == 0) || ((pktsize * pktcnt) !=
408 cur_xfer_buf_ixlp->size)) {
410 wvp->dma_bld_error =
411 IXL1394_EPKTSIZE_RATIO;
412 continue;
417 * set buffer pointer & size into first xfer_bufp
418 * and xfer_size
420 if (hci1394_set_next_xfer_buf(wvp,
421 cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
422 cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
424 /* wvp->dma_bld_error is set by above call */
425 continue;
427 break;
430 case IXL1394_OP_RECV_PKT_ST:
431 case IXL1394_OP_RECV_PKT_ST_U: {
432 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
434 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
436 /* error if in buffer fill mode */
437 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
438 wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
439 continue;
442 /* set xfer_state for new descriptor block build */
443 /* set this ixl command as current xferstart command */
444 wvp->xfer_state = XFER_PKT;
445 wvp->ixl_cur_xfer_stp = ixlcurp;
448 * set buffer pointer & size into first xfer_bufp
449 * and xfer_size
451 if (hci1394_set_next_xfer_buf(wvp,
452 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
453 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
455 /* wvp->dma_bld_error is set by above call */
456 continue;
458 break;
461 case IXL1394_OP_RECV_PKT:
462 case IXL1394_OP_RECV_PKT_U: {
463 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
465 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
467 /* error if in buffer fill mode */
468 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
469 wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
470 continue;
473 /* error if xfer_state not xfer pkt */
474 if (wvp->xfer_state != XFER_PKT) {
475 wvp->dma_bld_error = IXL1394_EMISPLACED_RECV;
476 continue;
480 * save xfer start cmd ixl ptr in compiler_privatep
481 * field of this cmd
483 ixlcurp->compiler_privatep = (void *)
484 wvp->ixl_cur_xfer_stp;
487 * save pkt index [1-n] in compiler_resv field of
488 * this cmd
490 ixlcurp->compiler_resv = wvp->xfer_bufcnt;
493 * set buffer pointer & size into next xfer_bufp
494 * and xfer_size
496 if (hci1394_set_next_xfer_buf(wvp,
497 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
498 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
500 /* wvp->dma_bld_error is set by above call */
501 continue;
505 * set updateable xfer cache flush eval flag if
506 * updateable opcode
508 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
509 wvp->xfer_hci_flush |= UPDATEABLE_XFER;
511 break;
514 case IXL1394_OP_SEND_BUF:
515 case IXL1394_OP_SEND_BUF_U: {
516 ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
518 cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
521 * These send_buf commands build a collection of xmit
522 * descriptor blocks (size/pkt_size of them) each to
523 * xfer a packet whose buffer size is pkt_size and whose
524 * buffer pt is (pktcur*pkt_size + bufp). (ptr and size
525 * are adjusted if they have header form of ixl cmd)
528 /* set xfer_state for new descriptor block build */
529 wvp->xfer_state = XFER_BUF;
531 /* set this ixl command as current xferstart command */
532 wvp->ixl_cur_xfer_stp = ixlcurp;
534 /* the packets must use the buffer exactly,else error */
535 pktsize = cur_xfer_buf_ixlp->pkt_size;
536 pktcnt = 0;
537 if (pktsize != 0) {
538 pktcnt = cur_xfer_buf_ixlp->size / pktsize;
540 if ((pktcnt == 0) || ((pktsize * pktcnt) !=
541 cur_xfer_buf_ixlp->size)) {
543 wvp->dma_bld_error = IXL1394_EPKTSIZE_RATIO;
544 continue;
547 /* set buf ptr & size into 1st xfer_bufp & xfer_size */
548 if (hci1394_set_next_xfer_buf(wvp,
549 cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
550 cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
552 /* wvp->dma_bld_error is set by above call */
553 continue;
555 break;
558 case IXL1394_OP_SEND_PKT_ST:
559 case IXL1394_OP_SEND_PKT_ST_U: {
560 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
562 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
564 /* set xfer_state for new descriptor block build */
565 /* set this ixl command as current xferstart command */
566 wvp->xfer_state = XFER_PKT;
567 wvp->ixl_cur_xfer_stp = ixlcurp;
570 * set buffer pointer & size into first xfer_bufp and
571 * xfer_size
573 if (hci1394_set_next_xfer_buf(wvp,
574 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
575 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
577 /* wvp->dma_bld_error is set by above call */
578 continue;
580 break;
583 case IXL1394_OP_SEND_PKT_WHDR_ST:
584 case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
585 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
587 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
589 /* set xfer_state for new descriptor block build */
590 /* set this ixl command as current xferstart command */
591 wvp->xfer_state = XFER_PKT;
592 wvp->ixl_cur_xfer_stp = ixlcurp;
595 * buffer size must be at least 4 (must include header),
596 * else error
598 if (cur_xfer_pkt_ixlp->size < 4) {
599 wvp->dma_bld_error = IXL1394_EPKT_HDR_MISSING;
600 continue;
604 * set buffer and size(excluding header) into first
605 * xfer_bufp and xfer_size
607 if (hci1394_set_next_xfer_buf(wvp,
608 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr + 4,
609 cur_xfer_pkt_ixlp->size - 4) != DDI_SUCCESS) {
611 /* wvp->dma_bld_error is set by above call */
612 continue;
614 break;
617 case IXL1394_OP_SEND_PKT:
618 case IXL1394_OP_SEND_PKT_U: {
619 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
621 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
623 /* error if xfer_state not xfer pkt */
624 if (wvp->xfer_state != XFER_PKT) {
625 wvp->dma_bld_error = IXL1394_EMISPLACED_SEND;
626 continue;
630 * save xfer start cmd ixl ptr in compiler_privatep
631 * field of this cmd
633 ixlcurp->compiler_privatep = (void *)
634 wvp->ixl_cur_xfer_stp;
637 * save pkt index [1-n] in compiler_resv field of this
638 * cmd
640 ixlcurp->compiler_resv = wvp->xfer_bufcnt;
643 * set buffer pointer & size into next xfer_bufp
644 * and xfer_size
646 if (hci1394_set_next_xfer_buf(wvp,
647 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
648 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
650 /* wvp->dma_bld_error is set by above call */
651 continue;
655 * set updateable xfer cache flush eval flag if
656 * updateable opcode
658 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
659 wvp->xfer_hci_flush |= UPDATEABLE_XFER;
661 break;
664 case IXL1394_OP_SEND_HDR_ONLY:
665 /* set xfer_state for new descriptor block build */
666 wvp->xfer_state = XMIT_HDRONLY;
668 /* set this ixl command as current xferstart command */
669 wvp->ixl_cur_xfer_stp = ixlcurp;
670 break;
672 case IXL1394_OP_SEND_NO_PKT:
673 /* set xfer_state for new descriptor block build */
674 wvp->xfer_state = XMIT_NOPKT;
676 /* set this ixl command as current xferstart command */
677 wvp->ixl_cur_xfer_stp = ixlcurp;
678 break;
680 case IXL1394_OP_JUMP:
681 case IXL1394_OP_JUMP_U: {
682 ixl1394_jump_t *cur_jump_ixlp;
684 cur_jump_ixlp = (ixl1394_jump_t *)ixlcurp;
687 * verify label indicated by IXL1394_OP_JUMP is
688 * actually an IXL1394_OP_LABEL or NULL
690 if ((cur_jump_ixlp->label != NULL) &&
691 (cur_jump_ixlp->label->ixl_opcode !=
692 IXL1394_OP_LABEL)) {
693 wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
694 continue;
696 break;
699 case IXL1394_OP_LABEL:
701 * save current ixl label command for xfer cmd
702 * finalize processing
704 wvp->ixl_cur_labelp = ixlcurp;
706 /* set initiating label flag to cause cache flush */
707 wvp->xfer_hci_flush |= INITIATING_LBL;
708 break;
710 case IXL1394_OP_CALLBACK:
711 case IXL1394_OP_CALLBACK_U:
712 case IXL1394_OP_STORE_TIMESTAMP:
714 * these commands are accepted during compile,
715 * processed during execution (interrupt handling)
716 * No further processing is needed here.
718 break;
720 case IXL1394_OP_SET_SKIPMODE:
721 case IXL1394_OP_SET_SKIPMODE_U:
723 * Error if already have a set skipmode cmd for
724 * this xfer
726 if (wvp->ixl_setskipmode_cmdp != NULL) {
727 wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
728 continue;
731 /* save skip mode ixl command and verify skipmode */
732 wvp->ixl_setskipmode_cmdp = (ixl1394_set_skipmode_t *)
733 ixlcurp;
735 if ((wvp->ixl_setskipmode_cmdp->skipmode !=
736 IXL1394_SKIP_TO_NEXT) &&
737 (wvp->ixl_setskipmode_cmdp->skipmode !=
738 IXL1394_SKIP_TO_SELF) &&
739 (wvp->ixl_setskipmode_cmdp->skipmode !=
740 IXL1394_SKIP_TO_STOP) &&
741 (wvp->ixl_setskipmode_cmdp->skipmode !=
742 IXL1394_SKIP_TO_LABEL)) {
744 wvp->dma_bld_error = IXL1394_EBAD_SKIPMODE;
745 continue;
749 * if mode is IXL1394_SKIP_TO_LABEL, verify label
750 * references an IXL1394_OP_LABEL
752 if ((wvp->ixl_setskipmode_cmdp->skipmode ==
753 IXL1394_SKIP_TO_LABEL) &&
754 ((wvp->ixl_setskipmode_cmdp->label == NULL) ||
755 (wvp->ixl_setskipmode_cmdp->label->ixl_opcode !=
756 IXL1394_OP_LABEL))) {
758 wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
759 continue;
762 * set updateable set cmd cache flush eval flag if
763 * updateable opcode
765 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
766 wvp->xfer_hci_flush |= UPDATEABLE_SET;
768 break;
770 case IXL1394_OP_SET_TAGSYNC:
771 case IXL1394_OP_SET_TAGSYNC_U:
773 * is an error if already have a set tag and sync cmd
774 * for this xfer
776 if (wvp->ixl_settagsync_cmdp != NULL) {
777 wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
778 continue;
781 /* save ixl command containing tag and sync values */
782 wvp->ixl_settagsync_cmdp =
783 (ixl1394_set_tagsync_t *)ixlcurp;
786 * set updateable set cmd cache flush eval flag if
787 * updateable opcode
789 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
790 wvp->xfer_hci_flush |= UPDATEABLE_SET;
792 break;
794 case IXL1394_OP_SET_SYNCWAIT:
796 * count ixl wait-for-sync commands since last
797 * finalize ignore multiple occurrences for same xfer
798 * command
800 wvp->ixl_setsyncwait_cnt++;
801 break;
803 default:
805 wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
806 continue;
808 } /* while */
810 /* finalize any last descriptor block build */
811 wvp->ixl_cur_cmdp = NULL;
812 if (wvp->dma_bld_error == 0) {
813 hci1394_finalize_cur_xfer_desc(wvp);
818 * hci1394_finalize_all_xfer_desc()
819 * Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
821 * Set interrupt enable on first descriptor block associated with current
822 * xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
824 * Set interrupt enable on last descriptor block associated with current xfer
825 * IXL command if any callback ixl commands are found on the execution path
826 * between the current and the next xfer ixl command. (Previously, this
827 * applied to store timestamp ixl commands, as well.)
829 static void
830 hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
832 ixl1394_command_t *ixlcurp; /* current ixl command */
833 ixl1394_command_t *ixlnextp; /* next ixl command */
834 ixl1394_command_t *ixlexecnext;
835 hci1394_xfer_ctl_t *xferctl_curp;
836 hci1394_xfer_ctl_t *xferctl_nxtp;
837 hci1394_desc_t *hcidescp;
838 ddi_acc_handle_t acc_hdl;
839 uint32_t temp;
840 uint32_t dma_execnext_addr;
841 uint32_t dma_skiplabel_addr;
842 uint32_t dma_skip_addr;
843 uint32_t callback_cnt;
844 uint16_t repcnt;
845 uint16_t ixlopcode;
846 int ii;
847 int err;
850 * If xmit mode and if default skipmode is skip to label -
851 * follow exec path starting at default skipmode label until
852 * find the first ixl xfer command which is to be executed.
853 * Set its address into default_skipxferp.
855 if (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
856 (wvp->ctxtp->default_skipmode == IXL1394_SKIP_TO_LABEL)) {
858 err = hci1394_ixl_find_next_exec_xfer(wvp->default_skiplabelp,
859 NULL, &wvp->default_skipxferp);
860 if (err == DDI_FAILURE) {
861 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
862 return;
866 /* set first ixl cmd */
867 ixlnextp = wvp->ctxtp->ixl_firstp;
869 /* follow ixl links until reach end or find error */
870 while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
872 /* set this command as the current ixl command */
873 ixlcurp = ixlnextp;
874 ixlnextp = ixlcurp->next_ixlp;
876 /* get command opcode removing unneeded update flag */
877 ixlopcode = ixlcurp->ixl_opcode & ~IXL1394_OPF_UPDATE;
880 * Scan for next ixl xfer start command (including this one),
881 * along ixl link path. Once xfer command found, find next IXL
882 * xfer cmd along execution path and fill in branch address of
883 * current xfer command. If is composite ixl xfer command, first
884 * link forward branch dma addresses of each descriptor block in
885 * composite, until reach final one then set its branch address
886 * to next execution path xfer found. Next determine skip mode
887 * and fill in skip address(es) appropriately.
889 /* skip to next if not xfer start ixl command */
890 if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
891 ((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
892 continue;
896 * get xfer_ctl structure and composite repeat count for current
897 * IXL xfer cmd
899 xferctl_curp = (hci1394_xfer_ctl_t *)ixlcurp->compiler_privatep;
900 repcnt = xferctl_curp->cnt;
903 * if initiated by an IXL label command, set interrupt enable
904 * flag into last component of first descriptor block of
905 * current IXL xfer cmd
907 if ((xferctl_curp->ctl_flags & XCTL_LABELLED) != 0) {
908 hcidescp = (hci1394_desc_t *)
909 xferctl_curp->dma[0].dma_descp;
910 acc_hdl = xferctl_curp->dma[0].dma_buf->bi_handle;
911 temp = ddi_get32(acc_hdl, &hcidescp->hdr);
912 temp |= DESC_INTR_ENBL;
913 ddi_put32(acc_hdl, &hcidescp->hdr, temp);
916 /* find next xfer IXL cmd by following execution path */
917 err = hci1394_ixl_find_next_exec_xfer(ixlcurp->next_ixlp,
918 &callback_cnt, &ixlexecnext);
920 /* if label<->jump loop detected, return error */
921 if (err == DDI_FAILURE) {
922 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
924 continue;
927 /* link current IXL's xfer_ctl to next xfer IXL on exec path */
928 xferctl_curp->execp = ixlexecnext;
931 * if callbacks have been seen during execution path scan,
932 * set interrupt enable flag into last descriptor of last
933 * descriptor block of current IXL xfer cmd
935 if (callback_cnt != 0) {
936 hcidescp = (hci1394_desc_t *)
937 xferctl_curp->dma[repcnt - 1].dma_descp;
938 acc_hdl =
939 xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
940 temp = ddi_get32(acc_hdl, &hcidescp->hdr);
941 temp |= DESC_INTR_ENBL;
942 ddi_put32(acc_hdl, &hcidescp->hdr, temp);
946 * obtain dma bound addr of next exec path IXL xfer command,
947 * if any
949 dma_execnext_addr = 0;
951 if (ixlexecnext != NULL) {
952 xferctl_nxtp = (hci1394_xfer_ctl_t *)
953 ixlexecnext->compiler_privatep;
954 dma_execnext_addr = xferctl_nxtp->dma[0].dma_bound;
955 } else {
957 * If this is last descriptor (next == NULL), then
958 * make sure the interrupt bit is enabled. This
959 * way we can ensure that we are notified when the
960 * descriptor chain processing has come to an end.
962 hcidescp = (hci1394_desc_t *)
963 xferctl_curp->dma[repcnt - 1].dma_descp;
964 acc_hdl =
965 xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
966 temp = ddi_get32(acc_hdl, &hcidescp->hdr);
967 temp |= DESC_INTR_ENBL;
968 ddi_put32(acc_hdl, &hcidescp->hdr, temp);
972 * set jump address of final cur IXL xfer cmd to addr next
973 * IXL xfer cmd
975 hcidescp = (hci1394_desc_t *)
976 xferctl_curp->dma[repcnt - 1].dma_descp;
977 acc_hdl = xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
978 ddi_put32(acc_hdl, &hcidescp->branch, dma_execnext_addr);
981 * if a composite object, forward link initial jump
982 * dma addresses
984 for (ii = 0; ii < repcnt - 1; ii++) {
985 hcidescp = (hci1394_desc_t *)
986 xferctl_curp->dma[ii].dma_descp;
987 acc_hdl = xferctl_curp->dma[ii].dma_buf->bi_handle;
988 ddi_put32(acc_hdl, &hcidescp->branch,
989 xferctl_curp->dma[ii + 1].dma_bound);
993 * fill in skip address(es) for all descriptor blocks belonging
994 * to current IXL xfer command; note:skip addresses apply only
995 * to xmit mode commands
997 if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
999 /* first obtain and set skip mode information */
1000 wvp->ixl_setskipmode_cmdp = xferctl_curp->skipmodep;
1001 hci1394_set_xmit_skip_mode(wvp);
1004 * if skip to label,init dma bound addr to be
1005 * 1st xfer cmd after label
1007 dma_skiplabel_addr = 0;
1008 if ((wvp->skipmode == IXL1394_SKIP_TO_LABEL) &&
1009 (wvp->skipxferp != NULL)) {
1010 xferctl_nxtp = (hci1394_xfer_ctl_t *)
1011 wvp->skipxferp->compiler_privatep;
1012 dma_skiplabel_addr =
1013 xferctl_nxtp->dma[0].dma_bound;
1017 * set skip addrs for each descriptor blk at this
1018 * xfer start IXL cmd
1020 for (ii = 0; ii < repcnt; ii++) {
1021 switch (wvp->skipmode) {
1023 case IXL1394_SKIP_TO_LABEL:
1024 /* set dma bound address - label */
1025 dma_skip_addr = dma_skiplabel_addr;
1026 break;
1028 case IXL1394_SKIP_TO_NEXT:
1029 /* set dma bound address - next */
1030 if (ii < repcnt - 1) {
1031 dma_skip_addr = xferctl_curp->
1032 dma[ii + 1].dma_bound;
1033 } else {
1034 dma_skip_addr =
1035 dma_execnext_addr;
1037 break;
1039 case IXL1394_SKIP_TO_SELF:
1040 /* set dma bound address - self */
1041 dma_skip_addr =
1042 xferctl_curp->dma[ii].dma_bound;
1043 break;
1045 case IXL1394_SKIP_TO_STOP:
1046 default:
1047 /* set dma bound address - stop */
1048 dma_skip_addr = 0;
1049 break;
1053 * determine address of first descriptor of
1054 * current descriptor block by adjusting addr of
1055 * last descriptor of current descriptor block
1057 hcidescp = ((hci1394_desc_t *)
1058 xferctl_curp->dma[ii].dma_descp);
1059 acc_hdl =
1060 xferctl_curp->dma[ii].dma_buf->bi_handle;
1063 * adjust by count of descriptors in this desc
1064 * block not including the last one (size of
1065 * descriptor)
1067 hcidescp -= ((xferctl_curp->dma[ii].dma_bound &
1068 DESC_Z_MASK) - 1);
1071 * adjust further if the last descriptor is
1072 * double sized
1074 if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
1075 hcidescp++;
1078 * now set skip address into first descriptor
1079 * of descriptor block
1081 ddi_put32(acc_hdl, &hcidescp->branch,
1082 dma_skip_addr);
1083 } /* for */
1084 } /* if */
1085 } /* while */
1089 * hci1394_finalize_cur_xfer_desc()
1090 * Build the openHCI descriptor for a packet or buffer based on info
1091 * currently collected into the working vars struct (wvp). After some
1092 * checks, this routine dispatches to the appropriate descriptor block
1093 * build (bld) routine for the packet or buf type.
1095 static void
1096 hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
1098 uint16_t ixlopcode;
1099 uint16_t ixlopraw;
1101 /* extract opcode from current IXL cmd (if any) */
1102 if (wvp->ixl_cur_cmdp != NULL) {
1103 ixlopcode = wvp->ixl_cur_cmdp->ixl_opcode;
1104 ixlopraw = ixlopcode & ~IXL1394_OPF_UPDATE;
1105 } else {
1106 ixlopcode = ixlopraw = IXL1394_OP_INVALID;
1110 * if no xfer descriptor block being built, perform validity checks
1112 if (wvp->xfer_state == XFER_NONE) {
1114 * error if being finalized by IXL1394_OP_LABEL or
1115 * IXL1394_OP_JUMP or if at end, and have an unapplied
1116 * IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
1117 * IXL1394_OP_SET_SYNCWAIT
1119 if ((ixlopraw == IXL1394_OP_JUMP) ||
1120 (ixlopraw == IXL1394_OP_LABEL) ||
1121 (wvp->ixl_cur_cmdp == NULL) ||
1122 (wvp->ixl_cur_cmdp->next_ixlp == NULL)) {
1123 if ((wvp->ixl_settagsync_cmdp != NULL) ||
1124 (wvp->ixl_setskipmode_cmdp != NULL) ||
1125 (wvp->ixl_setsyncwait_cnt != 0)) {
1127 wvp->dma_bld_error = IXL1394_EUNAPPLIED_SET_CMD;
1129 return;
1133 /* error if finalize is due to updateable jump cmd */
1134 if (ixlopcode == IXL1394_OP_JUMP_U) {
1136 wvp->dma_bld_error = IXL1394_EUPDATE_DISALLOWED;
1138 return;
1141 /* no error, no xfer */
1142 return;
1146 * finalize current xfer descriptor block being built
1149 /* count IXL xfer start command for descriptor block being built */
1150 wvp->ixl_xfer_st_cnt++;
1153 * complete setting of cache flush evaluation flags; flags will already
1154 * have been set by updateable set cmds and non-start xfer pkt cmds
1156 /* now set cache flush flag if current xfer start cmnd is updateable */
1157 if ((wvp->ixl_cur_xfer_stp->ixl_opcode & IXL1394_OPF_UPDATE) != 0) {
1158 wvp->xfer_hci_flush |= UPDATEABLE_XFER;
1161 * also set cache flush flag if xfer being finalized by
1162 * updateable jump cmd
1164 if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
1165 wvp->xfer_hci_flush |= UPDATEABLE_JUMP;
1169 * Determine if cache flush required before building next descriptor
1170 * block. If xfer pkt command and any cache flush flags are set,
1171 * hci flush needed.
1172 * If buffer or special xfer command and xfer command is updateable or
1173 * an associated set command is updateable, hci flush is required now.
1174 * If a single-xfer buffer or special xfer command is finalized by
1175 * updateable jump command, hci flush is required now.
1176 * Note: a cache flush will be required later, before the last
1177 * descriptor block of a multi-xfer set of descriptor blocks is built,
1178 * if this (non-pkt) xfer is finalized by an updateable jump command.
1180 if (wvp->xfer_hci_flush != 0) {
1181 if (((wvp->ixl_cur_xfer_stp->ixl_opcode &
1182 IXL1394_OPTY_XFER_PKT_ST) != 0) || ((wvp->xfer_hci_flush &
1183 (UPDATEABLE_XFER | UPDATEABLE_SET | INITIATING_LBL)) !=
1184 0)) {
1186 if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
1187 /* wvp->dma_bld_error is set by above call */
1188 return;
1194 * determine which kind of descriptor block to build based on
1195 * xfer state - hdr only, skip cycle, pkt or buf.
1197 switch (wvp->xfer_state) {
1199 case XFER_PKT:
1200 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1201 hci1394_bld_recv_pkt_desc(wvp);
1202 } else {
1203 hci1394_bld_xmit_pkt_desc(wvp);
1205 break;
1207 case XFER_BUF:
1208 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1209 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
1210 hci1394_bld_recv_buf_fill_desc(wvp);
1211 } else {
1212 hci1394_bld_recv_buf_ppb_desc(wvp);
1214 } else {
1215 hci1394_bld_xmit_buf_desc(wvp);
1217 break;
1219 case XMIT_HDRONLY:
1220 case XMIT_NOPKT:
1221 hci1394_bld_xmit_hdronly_nopkt_desc(wvp);
1222 break;
1224 default:
1225 /* internal compiler error */
1226 wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
1229 /* return if error */
1230 if (wvp->dma_bld_error != 0) {
1231 /* wvp->dma_bld_error is set by above call */
1232 return;
1236 * if was finalizing IXL jump cmd, set compiler_privatep to
1237 * cur xfer IXL cmd
1239 if (ixlopraw == IXL1394_OP_JUMP) {
1240 wvp->ixl_cur_cmdp->compiler_privatep =
1241 (void *)wvp->ixl_cur_xfer_stp;
1244 /* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
1245 if (wvp->ixl_cur_labelp != NULL) {
1246 ((hci1394_xfer_ctl_t *)
1247 (wvp->ixl_cur_xfer_stp->compiler_privatep))->ctl_flags |=
1248 XCTL_LABELLED;
1249 wvp->ixl_cur_labelp = NULL;
1253 * set any associated IXL set skipmode cmd into xfer_ctl of
1254 * cur xfer IXL cmd
1256 if (wvp->ixl_setskipmode_cmdp != NULL) {
1257 ((hci1394_xfer_ctl_t *)
1258 (wvp->ixl_cur_xfer_stp->compiler_privatep))->skipmodep =
1259 wvp->ixl_setskipmode_cmdp;
1262 /* set no current xfer start cmd */
1263 wvp->ixl_cur_xfer_stp = NULL;
1265 /* set no current set tag&sync, set skipmode or set syncwait commands */
1266 wvp->ixl_settagsync_cmdp = NULL;
1267 wvp->ixl_setskipmode_cmdp = NULL;
1268 wvp->ixl_setsyncwait_cnt = 0;
1270 /* set no currently active descriptor blocks */
1271 wvp->descriptors = 0;
1273 /* reset total packet length and buffers count */
1274 wvp->xfer_pktlen = 0;
1275 wvp->xfer_bufcnt = 0;
1277 /* reset flush cache evaluation flags */
1278 wvp->xfer_hci_flush = 0;
1280 /* set no xmit descriptor block being built */
1281 wvp->xfer_state = XFER_NONE;
1285 * hci1394_bld_recv_pkt_desc()
1286 * Used to create the openHCI dma descriptor block(s) for a receive packet.
1288 static void
1289 hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1291 hci1394_xfer_ctl_t *xctlp;
1292 caddr_t dma_descp;
1293 uint32_t dma_desc_bound;
1294 uint32_t wait_for_sync;
1295 uint32_t ii;
1296 hci1394_desc_t *wv_descp; /* shorthand to local descrpt */
1299 * is error if number of descriptors to be built exceeds maximum
1300 * descriptors allowed in a descriptor block.
1302 if ((wvp->descriptors + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1304 wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1306 return;
1309 /* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1310 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1312 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1314 return;
1318 * save xfer_ctl struct addr in compiler_privatep of
1319 * current IXL xfer cmd
1321 wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1324 * if enabled, set wait for sync flag in first descriptor of
1325 * descriptor block
1327 if (wvp->ixl_setsyncwait_cnt > 0) {
1328 wvp->ixl_setsyncwait_cnt = 1;
1329 wait_for_sync = DESC_W_ENBL;
1330 } else {
1331 wait_for_sync = DESC_W_DSABL;
1334 /* create descriptor block for this recv packet (xfer status enabled) */
1335 for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1336 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1338 if (ii == (wvp->xfer_bufcnt - 1)) {
1339 HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL,
1340 DESC_INTR_DSABL, wait_for_sync, wvp->xfer_size[ii]);
1341 } else {
1342 HCI1394_INIT_IR_PPB_IMORE(wv_descp, wait_for_sync,
1343 wvp->xfer_size[ii]);
1345 wv_descp->data_addr = wvp->xfer_bufp[ii];
1346 wv_descp->branch = 0;
1347 wv_descp->status = (wvp->xfer_size[ii] <<
1348 DESC_ST_RESCOUNT_SHIFT) & DESC_ST_RESCOUNT_MASK;
1349 wvp->descriptors++;
1352 /* allocate and copy descriptor block to dma memory */
1353 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1354 DDI_SUCCESS) {
1355 /* wvp->dma_bld_error is set by above function call */
1356 return;
1360 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1361 * is last component)
1363 xctlp->dma[0].dma_bound = dma_desc_bound;
1364 xctlp->dma[0].dma_descp =
1365 dma_descp + (wvp->xfer_bufcnt - 1) * sizeof (hci1394_desc_t);
1366 xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
1370 * hci1394_bld_recv_buf_ppb_desc()
1371 * Used to create the openHCI dma descriptor block(s) for a receive buf
1372 * in packet per buffer mode.
1374 static void
1375 hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp)
1377 hci1394_xfer_ctl_t *xctlp;
1378 ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
1379 caddr_t dma_descp;
1380 uint32_t dma_desc_bound;
1381 uint32_t pktsize;
1382 uint32_t pktcnt;
1383 uint32_t wait_for_sync;
1384 uint32_t ii;
1385 hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
1387 local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1389 /* determine number and size of pkt desc blocks to create */
1390 pktsize = local_ixl_cur_xfer_stp->pkt_size;
1391 pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1393 /* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1394 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1396 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1398 return;
1402 * save xfer_ctl struct addr in compiler_privatep of
1403 * current IXL xfer cmd
1405 local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1408 * if enabled, set wait for sync flag in first descriptor in
1409 * descriptor block
1411 if (wvp->ixl_setsyncwait_cnt > 0) {
1412 wvp->ixl_setsyncwait_cnt = 1;
1413 wait_for_sync = DESC_W_ENBL;
1414 } else {
1415 wait_for_sync = DESC_W_DSABL;
1418 /* create first descriptor block for this recv packet */
1419 /* consists of one descriptor and xfer status is enabled */
1420 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1421 HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
1422 wait_for_sync, pktsize);
1423 wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1424 wv_descp->branch = 0;
1425 wv_descp->status = (pktsize << DESC_ST_RESCOUNT_SHIFT) &
1426 DESC_ST_RESCOUNT_MASK;
1427 wvp->descriptors++;
1430 * generate as many contiguous descriptor blocks as there are
1431 * recv pkts
1433 for (ii = 0; ii < pktcnt; ii++) {
1435 /* if about to create last descriptor block */
1436 if (ii == (pktcnt - 1)) {
1437 /* check and perform any required hci cache flush */
1438 if (hci1394_flush_end_desc_check(wvp, ii) !=
1439 DDI_SUCCESS) {
1440 /* wvp->dma_bld_error is set by above call */
1441 return;
1445 /* allocate and copy descriptor block to dma memory */
1446 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1447 &dma_desc_bound) != DDI_SUCCESS) {
1449 /* wvp->dma_bld_error is set by above call */
1450 return;
1454 * set dma addrs into xfer_ctl struct (unbound addr (kernel
1455 * virtual) is last component (descriptor))
1457 xctlp->dma[ii].dma_bound = dma_desc_bound;
1458 xctlp->dma[ii].dma_descp = dma_descp;
1459 xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
1461 /* advance buffer ptr by pktsize in descriptor block */
1462 wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
1463 pktsize;
1468 * hci1394_bld_recv_buf_fill_desc()
1469 * Used to create the openHCI dma descriptor block(s) for a receive buf
1470 * in buffer fill mode.
1472 static void
1473 hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp)
1475 hci1394_xfer_ctl_t *xctlp;
1476 caddr_t dma_descp;
1477 uint32_t dma_desc_bound;
1478 uint32_t wait_for_sync;
1479 ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
1481 local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1484 /* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
1485 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1487 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1489 return;
1493 * save xfer_ctl struct addr in compiler_privatep of
1494 * current IXL xfer cmd
1496 local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1499 * if enabled, set wait for sync flag in first descriptor of
1500 * descriptor block
1502 if (wvp->ixl_setsyncwait_cnt > 0) {
1503 wvp->ixl_setsyncwait_cnt = 1;
1504 wait_for_sync = DESC_W_ENBL;
1505 } else {
1506 wait_for_sync = DESC_W_DSABL;
1510 * create descriptor block for this buffer fill mode recv command which
1511 * consists of one descriptor with xfer status enabled
1513 HCI1394_INIT_IR_BF_IMORE(&wvp->descriptor_block[wvp->descriptors],
1514 DESC_INTR_DSABL, wait_for_sync, local_ixl_cur_xfer_stp->size);
1516 wvp->descriptor_block[wvp->descriptors].data_addr =
1517 local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1518 wvp->descriptor_block[wvp->descriptors].branch = 0;
1519 wvp->descriptor_block[wvp->descriptors].status =
1520 (local_ixl_cur_xfer_stp->size << DESC_ST_RESCOUNT_SHIFT) &
1521 DESC_ST_RESCOUNT_MASK;
1522 wvp->descriptors++;
1524 /* check and perform any required hci cache flush */
1525 if (hci1394_flush_end_desc_check(wvp, 0) != DDI_SUCCESS) {
1526 /* wvp->dma_bld_error is set by above call */
1527 return;
1530 /* allocate and copy descriptor block to dma memory */
1531 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound)
1532 != DDI_SUCCESS) {
1533 /* wvp->dma_bld_error is set by above call */
1534 return;
1538 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1539 * is last component.
1541 xctlp->dma[0].dma_bound = dma_desc_bound;
1542 xctlp->dma[0].dma_descp = dma_descp;
1543 xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
1547 * hci1394_bld_xmit_pkt_desc()
1548 * Used to create the openHCI dma descriptor block(s) for a transmit packet.
1550 static void
1551 hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1553 hci1394_xfer_ctl_t *xctlp;
1554 hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1555 hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
1556 caddr_t dma_descp; /* dma bound memory for descriptor */
1557 uint32_t dma_desc_bound;
1558 uint32_t ii;
1561 * is error if number of descriptors to be built exceeds maximum
1562 * descriptors allowed in a descriptor block. Add 2 for the overhead
1563 * of the OMORE-Immediate.
1565 if ((wvp->descriptors + 2 + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1567 wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1569 return;
1572 /* is error if total packet length exceeds 0xFFFF */
1573 if (wvp->xfer_pktlen > 0xFFFF) {
1575 wvp->dma_bld_error = IXL1394_EPKTSIZE_MAX_OFLO;
1577 return;
1580 /* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1581 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1583 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1585 return;
1589 * save xfer_ctl struct addr in compiler_privatep of
1590 * current IXL xfer cmd
1592 wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1594 /* generate values for the xmit pkt hdrs */
1595 hci1394_set_xmit_pkt_hdr(wvp);
1598 * xmit pkt starts with an output more immediate,
1599 * a double sized hci1394_desc
1601 wv_omi_descp = (hci1394_output_more_imm_t *)
1602 (&wvp->descriptor_block[wvp->descriptors]);
1603 HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1605 wv_omi_descp->data_addr = 0;
1606 wv_omi_descp->branch = 0;
1607 wv_omi_descp->status = 0;
1608 wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1609 wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1610 wv_omi_descp->q3 = 0;
1611 wv_omi_descp->q4 = 0;
1613 wvp->descriptors += 2;
1616 * create the required output more hci1394_desc descriptor, then create
1617 * an output last hci1394_desc descriptor with xfer status enabled
1619 for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1620 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1622 if (ii == (wvp->xfer_bufcnt - 1)) {
1623 HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL,
1624 DESC_INTR_DSABL, wvp->xfer_size[ii]);
1625 } else {
1626 HCI1394_INIT_IT_OMORE(wv_descp, wvp->xfer_size[ii]);
1628 wv_descp->data_addr = wvp->xfer_bufp[ii];
1629 wv_descp->branch = 0;
1630 wv_descp->status = 0;
1631 wvp->descriptors++;
1634 /* allocate and copy descriptor block to dma memory */
1635 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1636 DDI_SUCCESS) {
1637 /* wvp->dma_bld_error is set by above call */
1638 return;
1642 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1643 * is last component (descriptor))
1645 xctlp->dma[0].dma_bound = dma_desc_bound;
1646 xctlp->dma[0].dma_descp =
1647 dma_descp + (wvp->xfer_bufcnt + 1) * sizeof (hci1394_desc_t);
1648 xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
1652 * hci1394_bld_xmit_buf_desc()
1653 * Used to create the openHCI dma descriptor blocks for a transmit buffer.
1655 static void
1656 hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp)
1658 hci1394_xfer_ctl_t *xctlp;
1659 ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
1660 hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1661 hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
1662 caddr_t dma_descp;
1663 uint32_t dma_desc_bound;
1664 uint32_t pktsize;
1665 uint32_t pktcnt;
1666 uint32_t ii;
1668 local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1670 /* determine number and size of pkt desc blocks to create */
1671 pktsize = local_ixl_cur_xfer_stp->pkt_size;
1672 pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1674 /* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1675 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1677 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1679 return;
1683 * save xfer_ctl struct addr in compiler_privatep of
1684 * current IXL xfer cmd
1686 local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1688 /* generate values for the xmit pkt hdrs */
1689 wvp->xfer_pktlen = pktsize;
1690 hci1394_set_xmit_pkt_hdr(wvp);
1693 * xmit pkt starts with an output more immediate,
1694 * a double sized hci1394_desc
1696 wv_omi_descp = (hci1394_output_more_imm_t *)
1697 &wvp->descriptor_block[wvp->descriptors];
1699 HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1701 wv_omi_descp->data_addr = 0;
1702 wv_omi_descp->branch = 0;
1703 wv_omi_descp->status = 0;
1704 wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1705 wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1706 wv_omi_descp->q3 = 0;
1707 wv_omi_descp->q4 = 0;
1709 wvp->descriptors += 2;
1711 /* follow with a single output last descriptor w/status enabled */
1712 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1713 HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
1714 pktsize);
1715 wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1716 wv_descp->branch = 0;
1717 wv_descp->status = 0;
1718 wvp->descriptors++;
1721 * generate as many contiguous descriptor blocks as there are
1722 * xmit packets
1724 for (ii = 0; ii < pktcnt; ii++) {
1726 /* if about to create last descriptor block */
1727 if (ii == (pktcnt - 1)) {
1728 /* check and perform any required hci cache flush */
1729 if (hci1394_flush_end_desc_check(wvp, ii) !=
1730 DDI_SUCCESS) {
1731 /* wvp->dma_bld_error is set by above call */
1732 return;
1736 /* allocate and copy descriptor block to dma memory */
1737 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1738 &dma_desc_bound) != DDI_SUCCESS) {
1739 /* wvp->dma_bld_error is set by above call */
1740 return;
1744 * set dma addrs into xfer_ctl structure (unbound addr
1745 * (kernel virtual) is last component (descriptor))
1747 xctlp->dma[ii].dma_bound = dma_desc_bound;
1748 xctlp->dma[ii].dma_descp = dma_descp + 2 *
1749 sizeof (hci1394_desc_t);
1750 xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
1752 /* advance buffer ptr by pktsize in descriptor block */
1753 wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
1754 pktsize;
1759 * hci1394_bld_xmit_hdronly_nopkt_desc()
1760 * Used to create the openHCI dma descriptor blocks for transmitting
1761 * a packet consisting of an isochronous header with no data payload,
1762 * or for not sending a packet at all for a cycle.
1764 * A Store_Value openhci descriptor is built at the start of each
1765 * IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
1766 * descriptor block (to allow for skip cycle specification and set skipmode
1767 * processing for these commands).
1769 static void
1770 hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp)
1772 hci1394_xfer_ctl_t *xctlp;
1773 hci1394_output_last_t *wv_ol_descp; /* shorthand to local descrp */
1774 hci1394_output_last_imm_t *wv_oli_descp; /* shorthand to local descrp */
1775 caddr_t dma_descp;
1776 uint32_t dma_desc_bound;
1777 uint32_t repcnt;
1778 uint32_t ii;
1780 /* determine # of instances of output hdronly/nopkt to generate */
1781 repcnt = ((ixl1394_xmit_special_t *)wvp->ixl_cur_xfer_stp)->count;
1784 * allocate an xfer_ctl structure which includes repcnt
1785 * xfer_ctl_dma structs
1787 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, repcnt)) == NULL) {
1789 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1791 return;
1795 * save xfer_ctl struct addr in compiler_privatep of
1796 * current IXL xfer command
1798 wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1801 * create a storevalue descriptor
1802 * (will be used for skip vs jump processing)
1804 hci1394_set_xmit_storevalue_desc(wvp);
1807 * processing now based on opcode:
1808 * IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
1810 if ((wvp->ixl_cur_xfer_stp->ixl_opcode & ~IXL1394_OPF_UPDATE) ==
1811 IXL1394_OP_SEND_HDR_ONLY) {
1813 /* for header only, generate values for the xmit pkt hdrs */
1814 hci1394_set_xmit_pkt_hdr(wvp);
1817 * create an output last immediate (double sized) descriptor
1818 * xfer status enabled
1820 wv_oli_descp = (hci1394_output_last_imm_t *)
1821 &wvp->descriptor_block[wvp->descriptors];
1823 HCI1394_INIT_IT_OLAST_IMM(wv_oli_descp, DESC_HDR_STAT_ENBL,
1824 DESC_INTR_DSABL);
1826 wv_oli_descp->data_addr = 0;
1827 wv_oli_descp->branch = 0;
1828 wv_oli_descp->status = 0;
1829 wv_oli_descp->q1 = wvp->xmit_pkthdr1;
1830 wv_oli_descp->q2 = wvp->xmit_pkthdr2;
1831 wv_oli_descp->q3 = 0;
1832 wv_oli_descp->q4 = 0;
1833 wvp->descriptors += 2;
1834 } else {
1836 * for skip cycle, create a single output last descriptor
1837 * with xfer status enabled
1839 wv_ol_descp = &wvp->descriptor_block[wvp->descriptors];
1840 HCI1394_INIT_IT_OLAST(wv_ol_descp, DESC_HDR_STAT_ENBL,
1841 DESC_INTR_DSABL, 0);
1842 wv_ol_descp->data_addr = 0;
1843 wv_ol_descp->branch = 0;
1844 wv_ol_descp->status = 0;
1845 wvp->descriptors++;
1849 * generate as many contiguous descriptor blocks as repeat count
1850 * indicates
1852 for (ii = 0; ii < repcnt; ii++) {
1854 /* if about to create last descriptor block */
1855 if (ii == (repcnt - 1)) {
1856 /* check and perform any required hci cache flush */
1857 if (hci1394_flush_end_desc_check(wvp, ii) !=
1858 DDI_SUCCESS) {
1859 /* wvp->dma_bld_error is set by above call */
1860 return;
1864 /* allocate and copy descriptor block to dma memory */
1865 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1866 &dma_desc_bound) != DDI_SUCCESS) {
1867 /* wvp->dma_bld_error is set by above call */
1868 return;
1872 * set dma addrs into xfer_ctl structure (unbound addr
1873 * (kernel virtual) is last component (descriptor)
1875 xctlp->dma[ii].dma_bound = dma_desc_bound;
1876 xctlp->dma[ii].dma_descp = dma_descp + sizeof (hci1394_desc_t);
1877 xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
1882 * hci1394_bld_dma_mem_desc_blk()
1883 * Used to put a given OpenHCI descriptor block into dma bound memory.
1885 static int
1886 hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp, caddr_t *dma_descpp,
1887 uint32_t *dma_desc_bound)
1889 uint32_t dma_bound;
1891 /* set internal error if no descriptor blocks to build */
1892 if (wvp->descriptors == 0) {
1894 wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
1896 return (DDI_FAILURE);
1899 /* allocate dma memory and move this descriptor block to it */
1900 *dma_descpp = (caddr_t)hci1394_alloc_dma_mem(wvp, wvp->descriptors *
1901 sizeof (hci1394_desc_t), &dma_bound);
1903 if (*dma_descpp == NULL) {
1905 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1907 return (DDI_FAILURE);
1909 #ifdef _KERNEL
1910 ddi_rep_put32(wvp->dma_currentp->mem.bi_handle,
1911 (uint_t *)wvp->descriptor_block, (uint_t *)*dma_descpp,
1912 wvp->descriptors * (sizeof (hci1394_desc_t) >> 2),
1913 DDI_DEV_AUTOINCR);
1914 #else
1915 bcopy(wvp->descriptor_block, *dma_descpp,
1916 wvp->descriptors * sizeof (hci1394_desc_t));
1917 #endif
1919 * convert allocated block's memory address to bus address space
1920 * include properly set Z bits (descriptor count).
1922 *dma_desc_bound = (dma_bound & ~DESC_Z_MASK) | wvp->descriptors;
1924 return (DDI_SUCCESS);
1928 * hci1394_set_xmit_pkt_hdr()
1929 * Compose the 2 quadlets for the xmit packet header.
1931 static void
1932 hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp)
1934 uint16_t tag;
1935 uint16_t sync;
1938 * choose tag and sync bits for header either from default values or
1939 * from currently active set tag and sync IXL command
1940 * (clear command after use)
1942 if (wvp->ixl_settagsync_cmdp == NULL) {
1943 tag = wvp->default_tag;
1944 sync = wvp->default_sync;
1945 } else {
1946 tag = wvp->ixl_settagsync_cmdp->tag;
1947 sync = wvp->ixl_settagsync_cmdp->sync;
1948 wvp->ixl_settagsync_cmdp = NULL;
1950 tag &= (DESC_PKT_TAG_MASK >> DESC_PKT_TAG_SHIFT);
1951 sync &= (DESC_PKT_SY_MASK >> DESC_PKT_SY_SHIFT);
1954 * build xmit pkt header -
1955 * hdr1 has speed, tag, channel number and sync bits
1956 * hdr2 has the packet length.
1958 wvp->xmit_pkthdr1 = (wvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
1959 (tag << DESC_PKT_TAG_SHIFT) | (wvp->ctxtp->isochan <<
1960 DESC_PKT_CHAN_SHIFT) | (IEEE1394_TCODE_ISOCH <<
1961 DESC_PKT_TCODE_SHIFT) | (sync << DESC_PKT_SY_SHIFT);
1963 wvp->xmit_pkthdr2 = wvp->xfer_pktlen << DESC_PKT_DATALEN_SHIFT;
1967 * hci1394_set_xmit_skip_mode()
1968 * Set current skip mode from default or from currently active command.
1969 * If non-default skip mode command's skip mode is skip to label, find
1970 * and set xfer start IXL command which follows skip to label into
1971 * compiler_privatep of set skipmode IXL command.
1973 static void
1974 hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp)
1976 int err;
1978 if (wvp->ixl_setskipmode_cmdp == NULL) {
1979 wvp->skipmode = wvp->default_skipmode;
1980 wvp->skiplabelp = wvp->default_skiplabelp;
1981 wvp->skipxferp = wvp->default_skipxferp;
1982 } else {
1983 wvp->skipmode = wvp->ixl_setskipmode_cmdp->skipmode;
1984 wvp->skiplabelp = wvp->ixl_setskipmode_cmdp->label;
1985 wvp->skipxferp = NULL;
1986 if (wvp->skipmode == IXL1394_SKIP_TO_LABEL) {
1987 err = hci1394_ixl_find_next_exec_xfer(wvp->skiplabelp,
1988 NULL, &wvp->skipxferp);
1989 if (err == DDI_FAILURE) {
1990 wvp->skipxferp = NULL;
1991 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1994 wvp->ixl_setskipmode_cmdp->compiler_privatep =
1995 (void *)wvp->skipxferp;
2000 * hci1394_set_xmit_storevalue_desc()
2001 * Set up store_value DMA descriptor.
2002 * XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
2003 * descriptor in the descriptor block (to handle skip mode processing)
2005 static void
2006 hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp)
2008 wvp->descriptors++;
2010 HCI1394_INIT_IT_STORE(&wvp->descriptor_block[wvp->descriptors - 1],
2011 wvp->storevalue_data);
2012 wvp->descriptor_block[wvp->descriptors - 1].data_addr =
2013 wvp->storevalue_bufp;
2014 wvp->descriptor_block[wvp->descriptors - 1].branch = 0;
2015 wvp->descriptor_block[wvp->descriptors - 1].status = 0;
2019 * hci1394_set_next_xfer_buf()
2020 * This routine adds the data buffer to the current wvp list.
2021 * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2022 * contains the error code.
2024 static int
2025 hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp, uint32_t bufp,
2026 uint16_t size)
2028 /* error if buffer pointer is null (size may be 0) */
2029 if (bufp == (uintptr_t)NULL) {
2031 wvp->dma_bld_error = IXL1394_ENULL_BUFFER_ADDR;
2033 return (DDI_FAILURE);
2036 /* count new xfer buffer */
2037 wvp->xfer_bufcnt++;
2039 /* error if exceeds maximum xfer buffer components allowed */
2040 if (wvp->xfer_bufcnt > HCI1394_DESC_MAX_Z) {
2042 wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
2044 return (DDI_FAILURE);
2047 /* save xmit buffer and size */
2048 wvp->xfer_bufp[wvp->xfer_bufcnt - 1] = bufp;
2049 wvp->xfer_size[wvp->xfer_bufcnt - 1] = size;
2051 /* accumulate total packet length */
2052 wvp->xfer_pktlen += size;
2054 return (DDI_SUCCESS);
2058 * hci1394_flush_end_desc_check()
2059 * Check if flush required before last descriptor block of a
2060 * non-unary set generated by an xfer buff or xmit special command
2061 * or a unary set provided no other flush has already been done.
2063 * hci flush is required if xfer is finalized by an updateable
2064 * jump command.
2066 * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2067 * will contain the error code.
2069 static int
2070 hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp, uint32_t count)
2072 if ((count != 0) ||
2073 ((wvp->xfer_hci_flush & (UPDATEABLE_XFER | UPDATEABLE_SET |
2074 INITIATING_LBL)) == 0)) {
2076 if (wvp->xfer_hci_flush & UPDATEABLE_JUMP) {
2077 if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
2079 /* wvp->dma_bld_error is set by above call */
2080 return (DDI_FAILURE);
2085 return (DDI_SUCCESS);
2089 * hci1394_flush_hci_cache()
2090 * Sun hci controller (RIO) implementation specific processing!
2092 * Allocate dma memory for 1 hci descriptor block which will be left unused.
2093 * During execution this will cause a break in the contiguous address space
2094 * processing required by Sun's RIO implementation of the ohci controller and
2095 * will require the controller to refetch the next descriptor block from
2096 * host memory.
2098 * General rules for cache flush preceeding a descriptor block in dma memory:
2099 * 1. Current IXL Xfer Command Updateable Rule:
2100 * Cache flush of IXL xfer command is required if it, or any of the
2101 * non-start IXL packet xfer commands associated with it, is flagged
2102 * updateable.
2103 * 2. Next IXL Xfer Command Indeterminate Rule:
2104 * Cache flush of IXL xfer command is required if an IXL jump command
2105 * which is flagged updateable has finalized the current IXL xfer
2106 * command.
2107 * 3. Updateable IXL Set Command Rule:
2108 * Cache flush of an IXL xfer command is required if any of the IXL
2109 * "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
2110 * command (i.e. immediately preceeding it), is flagged updateable.
2111 * 4. Label Initiating Xfer Command Rule:
2112 * Cache flush of IXL xfer command is required if it is initiated by a
2113 * label IXL command. (This is to allow both a flush of the cache and
2114 * an interrupt to be generated easily and in close proximity to each
2115 * other. This can make possible simpler more successful reset of
2116 * descriptor statuses, especially under circumstances where the cycle
2117 * of hci commands is short and/or there are no callbacks distributed
2118 * through the span of xfers, etc... This is especially important for
2119 * input where statuses must be reset before execution cycles back
2120 * again.
2122 * Application of above rules:
2123 * Packet mode IXL xfer commands:
2124 * If any of the above flush rules apply, flush cache should be done
2125 * immediately preceeding the generation of the dma descriptor block
2126 * for the packet xfer.
2127 * Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
2128 * SEND_HDR_ONLY, and SEND_NO_PKT):
2129 * If Rules #1, #3 or #4 applies, a flush cache should be done
2130 * immediately before the first generated dma descriptor block of the
2131 * non-packet xfer.
2132 * If Rule #2 applies, a flush cache should be done immediately before
2133 * the last generated dma descriptor block of the non-packet xfer.
2135 * Note: The flush cache should be done at most once in each location that is
2136 * required to be flushed no matter how many rules apply (i.e. only once
2137 * before the first descriptor block and/or only once before the last
2138 * descriptor block generated). If more than one place requires a flush,
2139 * then both flush operations must be performed. This is determined by
2140 * taking all rules that apply into account.
2142 * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2143 * will contain the error code.
2145 static int
2146 hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp)
2148 uint32_t dma_bound;
2150 if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t), &dma_bound) ==
2151 NULL) {
2153 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2155 return (DDI_FAILURE);
2158 return (DDI_SUCCESS);
2162 * hci1394_alloc_storevalue_dma_mem()
2163 * Allocate dma memory for a 1 hci component descriptor block
2164 * which will be used as the dma memory location that ixl
2165 * compiler generated storevalue descriptor commands will
2166 * specify as location to store their data value.
2168 * Returns 32-bit bound address of allocated mem, or NULL.
2170 static uint32_t
2171 hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp)
2173 uint32_t dma_bound;
2175 if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t),
2176 &dma_bound) == NULL) {
2178 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2180 return ((uintptr_t)NULL);
2183 /* return bound address of allocated memory */
2184 return (dma_bound);
2189 * hci1394_alloc_xfer_ctl()
2190 * Allocate an xfer_ctl structure.
2192 static hci1394_xfer_ctl_t *
2193 hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp, uint32_t dmacnt)
2195 hci1394_xfer_ctl_t *xcsp;
2198 * allocate an xfer_ctl struct which includes dmacnt of
2199 * xfer_ctl_dma structs
2201 #ifdef _KERNEL
2202 if ((xcsp = (hci1394_xfer_ctl_t *)kmem_zalloc(
2203 (sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2204 sizeof (hci1394_xfer_ctl_dma_t)), KM_NOSLEEP)) == NULL) {
2206 return (NULL);
2208 #else
2210 * This section makes it possible to easily run and test the compiler in
2211 * user mode.
2213 if ((xcsp = (hci1394_xfer_ctl_t *)calloc(1,
2214 sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2215 sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
2217 return (NULL);
2219 #endif
2221 * set dma structure count into allocated xfer_ctl struct for
2222 * later deletion.
2224 xcsp->cnt = dmacnt;
2226 /* link it to previously allocated xfer_ctl structs or set as first */
2227 if (wvp->xcs_firstp == NULL) {
2228 wvp->xcs_firstp = wvp->xcs_currentp = xcsp;
2229 } else {
2230 wvp->xcs_currentp->ctl_nextp = xcsp;
2231 wvp->xcs_currentp = xcsp;
2234 /* return allocated xfer_ctl structure */
2235 return (xcsp);
2239 * hci1394_alloc_dma_mem()
2240 * Allocates and binds memory for openHCI DMA descriptors as needed.
2242 static void *
2243 hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp, uint32_t size,
2244 uint32_t *dma_bound)
2246 hci1394_idma_desc_mem_t *dma_new;
2247 hci1394_buf_parms_t parms;
2248 hci1394_buf_info_t *memp;
2249 void *dma_mem_ret;
2250 int ret;
2253 * if no dma has been allocated or current request exceeds
2254 * remaining memory
2256 if ((wvp->dma_currentp == NULL) ||
2257 (size > (wvp->dma_currentp->mem.bi_cookie.dmac_size -
2258 wvp->dma_currentp->used))) {
2259 #ifdef _KERNEL
2260 /* kernel-mode memory allocation for driver */
2262 /* allocate struct to track more dma descriptor memory */
2263 if ((dma_new = (hci1394_idma_desc_mem_t *)
2264 kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
2265 KM_NOSLEEP)) == NULL) {
2267 return (NULL);
2271 * if more cookies available from the current mem, try to find
2272 * one of suitable size. Cookies that are too small will be
2273 * skipped and unused. Given that cookie size is always at least
2274 * 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
2275 * it's a small price to pay for code simplicity.
2277 if (wvp->dma_currentp != NULL) {
2278 /* new struct is derived from current */
2279 memp = &wvp->dma_currentp->mem;
2280 dma_new->mem = *memp;
2281 dma_new->offset = wvp->dma_currentp->offset +
2282 memp->bi_cookie.dmac_size;
2284 for (; memp->bi_cookie_count > 1;
2285 memp->bi_cookie_count--) {
2286 ddi_dma_nextcookie(memp->bi_dma_handle,
2287 &dma_new->mem.bi_cookie);
2289 if (dma_new->mem.bi_cookie.dmac_size >= size) {
2290 dma_new->mem_handle =
2291 wvp->dma_currentp->mem_handle;
2292 wvp->dma_currentp->mem_handle = NULL;
2293 dma_new->mem.bi_cookie_count--;
2294 break;
2296 dma_new->offset +=
2297 dma_new->mem.bi_cookie.dmac_size;
2301 /* if no luck with current buffer, allocate a new one */
2302 if (dma_new->mem_handle == NULL) {
2303 parms.bp_length = HCI1394_IXL_PAGESIZE;
2304 parms.bp_max_cookies = OHCI_MAX_COOKIE;
2305 parms.bp_alignment = 16;
2306 ret = hci1394_buf_alloc(&wvp->soft_statep->drvinfo,
2307 &parms, &dma_new->mem, &dma_new->mem_handle);
2308 if (ret != DDI_SUCCESS) {
2309 kmem_free(dma_new,
2310 sizeof (hci1394_idma_desc_mem_t));
2312 return (NULL);
2315 /* paranoia: this is not supposed to happen */
2316 if (dma_new->mem.bi_cookie.dmac_size < size) {
2317 hci1394_buf_free(&dma_new->mem_handle);
2318 kmem_free(dma_new,
2319 sizeof (hci1394_idma_desc_mem_t));
2321 return (NULL);
2323 dma_new->offset = 0;
2325 #else
2326 /* user-mode memory allocation for user mode compiler tests */
2327 /* allocate another dma_desc_mem struct */
2328 if ((dma_new = (hci1394_idma_desc_mem_t *)
2329 calloc(1, sizeof (hci1394_idma_desc_mem_t))) == NULL) {
2330 return (NULL);
2332 dma_new->mem.bi_dma_handle = NULL;
2333 dma_new->mem.bi_handle = NULL;
2334 if ((dma_new->mem.bi_kaddr = (caddr_t)calloc(1,
2335 HCI1394_IXL_PAGESIZE)) == NULL) {
2336 return (NULL);
2338 dma_new->mem.bi_cookie.dmac_address =
2339 (unsigned long)dma_new->mem.bi_kaddr;
2340 dma_new->mem.bi_real_length = HCI1394_IXL_PAGESIZE;
2341 dma_new->mem.bi_cookie_count = 1;
2342 #endif
2344 /* if this is not first dma_desc_mem, link last one to it */
2345 if (wvp->dma_currentp != NULL) {
2346 wvp->dma_currentp->dma_nextp = dma_new;
2347 wvp->dma_currentp = dma_new;
2348 } else {
2349 /* else set it as first one */
2350 wvp->dma_currentp = wvp->dma_firstp = dma_new;
2354 /* now allocate requested memory from current block */
2355 dma_mem_ret = wvp->dma_currentp->mem.bi_kaddr +
2356 wvp->dma_currentp->offset + wvp->dma_currentp->used;
2357 *dma_bound = wvp->dma_currentp->mem.bi_cookie.dmac_address +
2358 wvp->dma_currentp->used;
2359 wvp->dma_currentp->used += size;
2361 return (dma_mem_ret);
2366 * hci1394_is_opcode_valid()
2367 * given an ixl opcode, this routine returns B_TRUE if it is a
2368 * recognized opcode and B_FALSE if it is not recognized.
2369 * Note that the FULL 16 bits of the opcode are checked which includes
2370 * various flags and not just the low order 8 bits of unique code.
2372 static boolean_t
2373 hci1394_is_opcode_valid(uint16_t ixlopcode)
2375 /* if it's not one we know about, then it's bad */
2376 switch (ixlopcode) {
2377 case IXL1394_OP_LABEL:
2378 case IXL1394_OP_JUMP:
2379 case IXL1394_OP_CALLBACK:
2380 case IXL1394_OP_RECV_PKT:
2381 case IXL1394_OP_RECV_PKT_ST:
2382 case IXL1394_OP_RECV_BUF:
2383 case IXL1394_OP_SEND_PKT:
2384 case IXL1394_OP_SEND_PKT_ST:
2385 case IXL1394_OP_SEND_PKT_WHDR_ST:
2386 case IXL1394_OP_SEND_BUF:
2387 case IXL1394_OP_SEND_HDR_ONLY:
2388 case IXL1394_OP_SEND_NO_PKT:
2389 case IXL1394_OP_STORE_TIMESTAMP:
2390 case IXL1394_OP_SET_TAGSYNC:
2391 case IXL1394_OP_SET_SKIPMODE:
2392 case IXL1394_OP_SET_SYNCWAIT:
2393 case IXL1394_OP_JUMP_U:
2394 case IXL1394_OP_CALLBACK_U:
2395 case IXL1394_OP_RECV_PKT_U:
2396 case IXL1394_OP_RECV_PKT_ST_U:
2397 case IXL1394_OP_RECV_BUF_U:
2398 case IXL1394_OP_SEND_PKT_U:
2399 case IXL1394_OP_SEND_PKT_ST_U:
2400 case IXL1394_OP_SEND_PKT_WHDR_ST_U:
2401 case IXL1394_OP_SEND_BUF_U:
2402 case IXL1394_OP_SET_TAGSYNC_U:
2403 case IXL1394_OP_SET_SKIPMODE_U:
2404 return (B_TRUE);
2405 default:
2406 return (B_FALSE);