Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / 1394 / adapters / hci1394_q.c
blob3d039133cb460c4b1ead45b63dab29ddc6d66e41
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
20 * CDDL HEADER END
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * hci1394_q.c
31 * This code decouples some of the OpenHCI async descriptor logic/structures
32 * from the async processing. The goal was to combine as much of the
33 * duplicate code as possible for the different type of async transfers
34 * without going too overboard.
36 * There are two parts to the Q, the descriptor buffer and the data buffer.
37 * For the most part, data to be transmitted and data which is received go
38 * in the data buffers. The information of where to get the data and put
39 * the data reside in the descriptor buffers. There are exceptions to this.
43 #include <sys/types.h>
44 #include <sys/conf.h>
45 #include <sys/ddi.h>
46 #include <sys/modctl.h>
47 #include <sys/stat.h>
48 #include <sys/sunddi.h>
49 #include <sys/cmn_err.h>
50 #include <sys/kmem.h>
51 #include <sys/note.h>
53 #include <sys/1394/adapters/hci1394.h>
56 static int hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size,
57 uint32_t *io_addr);
58 static void hci1394_q_unreserve(hci1394_q_buf_t *qbuf);
59 static void hci1394_q_buf_setup(hci1394_q_buf_t *qbuf);
60 static void hci1394_q_reset(hci1394_q_handle_t q_handle);
61 static void hci1394_q_next_buf(hci1394_q_buf_t *qbuf);
63 static void hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,
64 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
65 uint_t hdrsize);
66 static void hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,
67 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
68 uint_t hdrsize);
69 static void hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,
70 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, uint32_t io_addr,
71 uint_t datasize);
72 static void hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
73 uint8_t *data, uint_t datasize);
74 static void hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf,
75 hci1394_q_cmd_t *cmd, h1394_mblk_t *mblk);
77 static void hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,
78 hci1394_q_buf_t *qbuf, uint32_t io_addr, uint_t datasize);
80 _NOTE(SCHEME_PROTECTS_DATA("unique", msgb))
83 * hci1394_q_init()
84 * Initialize a Q. A Q consists of a descriptor buffer and a data buffer and
85 * can be either an AT or AR Q. hci1394_q_init() returns a handle which
86 * should be used for the reset of the hci1394_q_* calls.
88 int
89 hci1394_q_init(hci1394_drvinfo_t *drvinfo,
90 hci1394_ohci_handle_t ohci_handle, hci1394_q_info_t *qinfo,
91 hci1394_q_handle_t *q_handle)
93 hci1394_q_buf_t *desc;
94 hci1394_q_buf_t *data;
95 hci1394_buf_parms_t parms;
96 hci1394_q_t *q;
97 int status;
98 int index;
101 ASSERT(drvinfo != NULL);
102 ASSERT(qinfo != NULL);
103 ASSERT(q_handle != NULL);
106 * allocate the memory to track this Q. Initialize the internal Q
107 * structure.
109 q = kmem_alloc(sizeof (hci1394_q_t), KM_SLEEP);
110 q->q_drvinfo = drvinfo;
111 q->q_info = *qinfo;
112 q->q_ohci = ohci_handle;
113 mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie);
114 desc = &q->q_desc;
115 data = &q->q_data;
118 * Allocate the Descriptor buffer.
120 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
121 * after we have tested the multiple cookie code on x86.
123 parms.bp_length = qinfo->qi_desc_size;
124 parms.bp_max_cookies = 1;
125 parms.bp_alignment = 16;
126 status = hci1394_buf_alloc(drvinfo, &parms, &desc->qb_buf,
127 &desc->qb_buf_handle);
128 if (status != DDI_SUCCESS) {
129 mutex_destroy(&q->q_mutex);
130 kmem_free(q, sizeof (hci1394_q_t));
131 *q_handle = NULL;
132 return (DDI_FAILURE);
135 /* Copy in buffer cookies into our local cookie array */
136 desc->qb_cookie[0] = desc->qb_buf.bi_cookie;
137 for (index = 1; index < desc->qb_buf.bi_cookie_count; index++) {
138 ddi_dma_nextcookie(desc->qb_buf.bi_dma_handle,
139 &desc->qb_buf.bi_cookie);
140 desc->qb_cookie[index] = desc->qb_buf.bi_cookie;
144 * Allocate the Data buffer.
146 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
147 * after we have tested the multiple cookie code on x86.
149 parms.bp_length = qinfo->qi_data_size;
150 parms.bp_max_cookies = 1;
151 parms.bp_alignment = 16;
152 status = hci1394_buf_alloc(drvinfo, &parms, &data->qb_buf,
153 &data->qb_buf_handle);
154 if (status != DDI_SUCCESS) {
155 /* Free the allocated Descriptor buffer */
156 hci1394_buf_free(&desc->qb_buf_handle);
158 mutex_destroy(&q->q_mutex);
159 kmem_free(q, sizeof (hci1394_q_t));
160 *q_handle = NULL;
161 return (DDI_FAILURE);
165 * We must have at least 2 ARQ data buffers, If we only have one, we
166 * will artificially create 2. We must have 2 so that we always have a
167 * descriptor with free data space to write AR data to. When one is
168 * empty, it will take us a bit to get a new descriptor back into the
169 * chain.
171 if ((qinfo->qi_mode == HCI1394_ARQ) &&
172 (data->qb_buf.bi_cookie_count == 1)) {
173 data->qb_buf.bi_cookie_count = 2;
174 data->qb_cookie[0] = data->qb_buf.bi_cookie;
175 data->qb_cookie[0].dmac_size /= 2;
176 data->qb_cookie[1] = data->qb_cookie[0];
177 data->qb_cookie[1].dmac_laddress =
178 data->qb_cookie[0].dmac_laddress +
179 data->qb_cookie[0].dmac_size;
180 data->qb_cookie[1].dmac_address =
181 data->qb_cookie[0].dmac_address +
182 data->qb_cookie[0].dmac_size;
184 /* We have more than 1 cookie or we are an AT Q */
185 } else {
186 /* Copy in buffer cookies into our local cookie array */
187 data->qb_cookie[0] = data->qb_buf.bi_cookie;
188 for (index = 1; index < data->qb_buf.bi_cookie_count; index++) {
189 ddi_dma_nextcookie(data->qb_buf.bi_dma_handle,
190 &data->qb_buf.bi_cookie);
191 data->qb_cookie[index] = data->qb_buf.bi_cookie;
195 /* The top and bottom of the Q are only set once */
196 desc->qb_ptrs.qp_top = desc->qb_buf.bi_kaddr;
197 desc->qb_ptrs.qp_bottom = desc->qb_buf.bi_kaddr +
198 desc->qb_buf.bi_real_length - 1;
199 data->qb_ptrs.qp_top = data->qb_buf.bi_kaddr;
200 data->qb_ptrs.qp_bottom = data->qb_buf.bi_kaddr +
201 data->qb_buf.bi_real_length - 1;
204 * reset the Q pointers to their original settings. Setup IM
205 * descriptors if this is an AR Q.
207 hci1394_q_reset(q);
209 /* if this is an AT Q, create a queued list for the AT descriptors */
210 if (qinfo->qi_mode == HCI1394_ATQ) {
211 hci1394_tlist_init(drvinfo, NULL, &q->q_queued_list);
214 *q_handle = q;
216 return (DDI_SUCCESS);
221 * hci1394_q_fini()
222 * Cleanup after a successful hci1394_q_init(). Notice that a pointer to the
223 * handle is used for the parameter. fini() will set your handle to NULL
224 * before returning.
226 void
227 hci1394_q_fini(hci1394_q_handle_t *q_handle)
229 hci1394_q_t *q;
231 ASSERT(q_handle != NULL);
233 q = *q_handle;
234 if (q->q_info.qi_mode == HCI1394_ATQ) {
235 hci1394_tlist_fini(&q->q_queued_list);
237 mutex_destroy(&q->q_mutex);
238 hci1394_buf_free(&q->q_desc.qb_buf_handle);
239 hci1394_buf_free(&q->q_data.qb_buf_handle);
240 kmem_free(q, sizeof (hci1394_q_t));
241 *q_handle = NULL;
246 * hci1394_q_buf_setup()
247 * Initialization of buffer pointers which are present in both the descriptor
248 * buffer and data buffer (No reason to duplicate the code)
250 static void
251 hci1394_q_buf_setup(hci1394_q_buf_t *qbuf)
253 ASSERT(qbuf != NULL);
255 /* start with the first cookie */
256 qbuf->qb_ptrs.qp_current_buf = 0;
257 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
258 qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
259 qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
260 qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
261 qbuf->qb_ptrs.qp_offset = 0;
264 * The free_buf and free pointer will change everytime an ACK (of some
265 * type) is processed. Free is the last byte in the last cookie.
267 qbuf->qb_ptrs.qp_free_buf = qbuf->qb_buf.bi_cookie_count - 1;
268 qbuf->qb_ptrs.qp_free = qbuf->qb_ptrs.qp_bottom;
271 * Start with no space to write descriptors. We first need to call
272 * hci1394_q_reserve() before calling hci1394_q_at_write_O*().
274 qbuf->qb_ptrs.qp_resv_size = 0;
279 * hci1394_q_reset()
280 * Resets the buffers to an initial state. This should be called during
281 * attach and resume.
283 static void
284 hci1394_q_reset(hci1394_q_handle_t q_handle)
286 hci1394_q_buf_t *desc;
287 hci1394_q_buf_t *data;
288 int index;
290 ASSERT(q_handle != NULL);
292 mutex_enter(&q_handle->q_mutex);
293 desc = &q_handle->q_desc;
294 data = &q_handle->q_data;
296 hci1394_q_buf_setup(desc);
297 hci1394_q_buf_setup(data);
299 /* DMA starts off stopped, no previous descriptor to link from */
300 q_handle->q_dma_running = B_FALSE;
301 q_handle->q_block_cnt = 0;
302 q_handle->q_previous = NULL;
304 /* If this is an AR Q, setup IM's for the data buffers that we have */
305 if (q_handle->q_info.qi_mode == HCI1394_ARQ) {
307 * This points to where to find the first IM descriptor. Since
308 * we just reset the pointers in hci1394_q_buf_setup(), the
309 * first IM we write below will be found at the top of the Q.
311 q_handle->q_head = desc->qb_ptrs.qp_top;
313 for (index = 0; index < data->qb_buf.bi_cookie_count; index++) {
314 hci1394_q_ar_write_IM(q_handle, desc,
315 data->qb_cookie[index].dmac_address,
316 data->qb_cookie[index].dmac_size);
320 * The space left in the current IM is the size of the buffer.
321 * The current buffer is the first buffer added to the AR Q.
323 q_handle->q_space_left = data->qb_cookie[0].dmac_size;
326 mutex_exit(&q_handle->q_mutex);
331 * hci1394_q_resume()
332 * This is called during a resume (after a successful suspend). Currently
333 * we only call reset. Since this is not a time critical function, we will
334 * leave this as a separate function to increase readability.
336 void
337 hci1394_q_resume(hci1394_q_handle_t q_handle)
339 ASSERT(q_handle != NULL);
340 hci1394_q_reset(q_handle);
345 * hci1394_q_stop()
346 * This call informs us that a DMA engine has been stopped. It does not
347 * perform the actual stop. We need to know this so that when we add a
348 * new descriptor, we do a start instead of a wake.
350 void
351 hci1394_q_stop(hci1394_q_handle_t q_handle)
353 ASSERT(q_handle != NULL);
354 mutex_enter(&q_handle->q_mutex);
355 q_handle->q_dma_running = B_FALSE;
356 mutex_exit(&q_handle->q_mutex);
361 * hci1394_q_reserve()
362 * Reserve space in the AT descriptor or data buffer. This ensures that we
363 * can get a contiguous buffer. Descriptors have to be in a contiguous
364 * buffer. Data does not have to be in a contiguous buffer but we do this to
365 * reduce complexity. For systems with small page sizes (e.g. x86), this
366 * could result in inefficient use of the data buffers when sending large
367 * data blocks (this only applies to non-physical block write ATREQs and
368 * block read ATRESP). Since it looks like most protocols that use large data
369 * blocks (like SPB-2), use physical transfers to do this (due to their
370 * efficiency), this will probably not be a real world problem. If it turns
371 * out to be a problem, the options are to force a single cookie for the data
372 * buffer, allow multiple cookies and have a larger data space, or change the
373 * data code to use a OMI, OM, OL descriptor sequence (instead of OMI, OL).
375 static int
376 hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size, uint32_t *io_addr)
378 uint_t aligned_size;
381 ASSERT(qbuf != NULL);
383 /* Save backup of pointers in case we have to unreserve */
384 qbuf->qb_backup_ptrs = qbuf->qb_ptrs;
387 * Make sure all alloc's are quadlet aligned. The data doesn't have to
388 * be, so we will force it to be.
390 aligned_size = HCI1394_ALIGN_QUAD(size);
393 * if the free pointer is in the current buffer and the free pointer
394 * is below the current pointer (i.e. has not wrapped around)
396 if ((qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) &&
397 (qbuf->qb_ptrs.qp_free >= qbuf->qb_ptrs.qp_current)) {
399 * The free pointer is in this buffer below the current pointer.
400 * Check to see if we have enough free space left.
402 if ((qbuf->qb_ptrs.qp_current + aligned_size) <=
403 qbuf->qb_ptrs.qp_free) {
404 /* Setup up our reserved size, return the IO address */
405 qbuf->qb_ptrs.qp_resv_size = aligned_size;
406 *io_addr = (uint32_t)(qbuf->qb_cookie[
407 qbuf->qb_ptrs.qp_current_buf].dmac_address +
408 qbuf->qb_ptrs.qp_offset);
411 * The free pointer is in this buffer below the current pointer.
412 * We do not have enough free space for the alloc. Return
413 * failure.
415 } else {
416 qbuf->qb_ptrs.qp_resv_size = 0;
417 return (DDI_FAILURE);
421 * If there is not enough room to fit in the current buffer (not
422 * including wrap around), we will go to the next buffer and check
423 * there. If we only have one buffer (i.e. one cookie), we will end up
424 * staying at the current buffer and wrapping the address back to the
425 * top.
427 } else if ((qbuf->qb_ptrs.qp_current + aligned_size) >
428 qbuf->qb_ptrs.qp_end) {
429 /* Go to the next buffer (or the top of ours for one cookie) */
430 hci1394_q_next_buf(qbuf);
432 /* If the free pointer is in the new current buffer */
433 if (qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) {
435 * The free pointer is in this buffer. If we do not have
436 * enough free space for the alloc. Return failure.
438 if ((qbuf->qb_ptrs.qp_current + aligned_size) >
439 qbuf->qb_ptrs.qp_free) {
440 qbuf->qb_ptrs.qp_resv_size = 0;
441 return (DDI_FAILURE);
443 * The free pointer is in this buffer. We have enough
444 * free space left.
446 } else {
448 * Setup up our reserved size, return the IO
449 * address
451 qbuf->qb_ptrs.qp_resv_size = aligned_size;
452 *io_addr = (uint32_t)(qbuf->qb_cookie[
453 qbuf->qb_ptrs.qp_current_buf].dmac_address +
454 qbuf->qb_ptrs.qp_offset);
458 * We switched buffers and the free pointer is still in another
459 * buffer. We have sufficient space in this buffer for the alloc
460 * after changing buffers.
462 } else {
463 /* Setup up our reserved size, return the IO address */
464 qbuf->qb_ptrs.qp_resv_size = aligned_size;
465 *io_addr = (uint32_t)(qbuf->qb_cookie[
466 qbuf->qb_ptrs.qp_current_buf].dmac_address +
467 qbuf->qb_ptrs.qp_offset);
470 * The free pointer is in another buffer. We have sufficient space in
471 * this buffer for the alloc.
473 } else {
474 /* Setup up our reserved size, return the IO address */
475 qbuf->qb_ptrs.qp_resv_size = aligned_size;
476 *io_addr = (uint32_t)(qbuf->qb_cookie[
477 qbuf->qb_ptrs.qp_current_buf].dmac_address +
478 qbuf->qb_ptrs.qp_offset);
481 return (DDI_SUCCESS);
485 * hci1394_q_unreserve()
486 * Set the buffer pointer to what they were before hci1394_reserve(). This
487 * will be called when we encounter errors during hci1394_q_at*().
489 static void
490 hci1394_q_unreserve(hci1394_q_buf_t *qbuf)
492 ASSERT(qbuf != NULL);
494 /* Go back to pointer setting before the reserve */
495 qbuf->qb_ptrs = qbuf->qb_backup_ptrs;
500 * hci1394_q_next_buf()
501 * Set our current buffer to the next cookie. If we only have one cookie, we
502 * will go back to the top of our buffer.
504 void
505 hci1394_q_next_buf(hci1394_q_buf_t *qbuf)
507 ASSERT(qbuf != NULL);
510 * go to the next cookie, if we are >= the cookie count, go back to the
511 * first cookie.
513 qbuf->qb_ptrs.qp_current_buf++;
514 if (qbuf->qb_ptrs.qp_current_buf >= qbuf->qb_buf.bi_cookie_count) {
515 qbuf->qb_ptrs.qp_current_buf = 0;
518 /* adjust the begin, end, current, and offset pointers */
519 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_end + 1;
520 if (qbuf->qb_ptrs.qp_begin > qbuf->qb_ptrs.qp_bottom) {
521 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
523 qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
524 qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
525 qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
526 qbuf->qb_ptrs.qp_offset = 0;
531 * hci1394_q_at()
532 * Place an AT command that does NOT need the data buffer into the DMA chain.
533 * Some examples of this are quadlet read/write, PHY packets, ATREQ Block
534 * Read, and ATRESP block write. result is only valid on failure.
537 hci1394_q_at(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
538 hci1394_basic_pkt_t *hdr, uint_t hdrsize, int *result)
540 int status;
541 uint32_t ioaddr;
544 ASSERT(q_handle != NULL);
545 ASSERT(cmd != NULL);
546 ASSERT(hdr != NULL);
548 mutex_enter(&q_handle->q_mutex);
551 * Check the HAL state and generation when the AT Q is locked. This
552 * will make sure that we get all the commands when we flush the Q's
553 * during a reset or shutdown.
555 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
556 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
557 cmd->qc_generation)) {
558 *result = H1394_STATUS_INVALID_BUSGEN;
559 mutex_exit(&q_handle->q_mutex);
560 return (DDI_FAILURE);
563 /* save away the argument to pass up when this command completes */
564 cmd->qc_node.tln_addr = cmd;
566 /* we have not written any 16 byte blocks to the descriptor yet */
567 q_handle->q_block_cnt = 0;
569 /* Reserve space for an OLI in the descriptor buffer */
570 status = hci1394_q_reserve(&q_handle->q_desc,
571 sizeof (hci1394_desc_imm_t), &ioaddr);
572 if (status != DDI_SUCCESS) {
573 *result = H1394_STATUS_NOMORE_SPACE;
574 mutex_exit(&q_handle->q_mutex);
575 return (DDI_FAILURE);
578 /* write the OLI to the descriptor buffer */
579 hci1394_q_at_write_OLI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
581 /* Add the AT command to the queued list */
582 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
584 mutex_exit(&q_handle->q_mutex);
586 return (DDI_SUCCESS);
591 * XXX - NOTE: POSSIBLE FUTURE OPTIMIZATION
592 * ATREQ Block read and write's that go through software are not very
593 * efficient (one of the reasons to use physical space). A copy is forced
594 * on all block reads due to the design of OpenHCI. Writes do not have this
595 * same restriction. This design forces a copy for writes too (we always
596 * copy into a data buffer before sending). There are many reasons for this
597 * including complexity reduction. There is a data size threshold where a
598 * copy is more expensive than mapping the data buffer address (or worse
599 * case a big enough difference where it pays to do it). However, we move
600 * block data around in mblks which means that our data may be scattered
601 * over many buffers. This adds to the complexity of mapping and setting
602 * up the OpenHCI descriptors.
604 * If someone really needs a speedup on block write ATREQs, my recommendation
605 * would be to add an additional command type at the target interface for a
606 * fast block write. The target driver would pass a mapped io addr to use.
607 * A function like "hci1394_q_at_with_ioaddr()" could be created which would
608 * be almost an exact copy of hci1394_q_at_with_data() without the
609 * hci1394_q_reserve() and hci1394_q_at_rep_put8() for the data buffer.
614 * hci1394_q_at_with_data()
615 * Place an AT command that does need the data buffer into the DMA chain.
616 * The data is passed as a pointer to a kernel virtual address. An example of
617 * this is the lock operations. result is only valid on failure.
620 hci1394_q_at_with_data(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
621 hci1394_basic_pkt_t *hdr, uint_t hdrsize, uint8_t *data, uint_t datasize,
622 int *result)
624 uint32_t desc_ioaddr;
625 uint32_t data_ioaddr;
626 int status;
629 ASSERT(q_handle != NULL);
630 ASSERT(cmd != NULL);
631 ASSERT(hdr != NULL);
632 ASSERT(data != NULL);
634 mutex_enter(&q_handle->q_mutex);
637 * Check the HAL state and generation when the AT Q is locked. This
638 * will make sure that we get all the commands when we flush the Q's
639 * during a reset or shutdown.
641 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
642 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
643 cmd->qc_generation)) {
644 *result = H1394_STATUS_INVALID_BUSGEN;
645 mutex_exit(&q_handle->q_mutex);
646 return (DDI_FAILURE);
649 /* save away the argument to pass up when this command completes */
650 cmd->qc_node.tln_addr = cmd;
652 /* we have not written any 16 byte blocks to the descriptor yet */
653 q_handle->q_block_cnt = 0;
655 /* Reserve space for an OMI and OL in the descriptor buffer */
656 status = hci1394_q_reserve(&q_handle->q_desc,
657 (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
658 &desc_ioaddr);
659 if (status != DDI_SUCCESS) {
660 *result = H1394_STATUS_NOMORE_SPACE;
661 mutex_exit(&q_handle->q_mutex);
662 return (DDI_FAILURE);
665 /* allocate space for data in the data buffer */
666 status = hci1394_q_reserve(&q_handle->q_data, datasize, &data_ioaddr);
667 if (status != DDI_SUCCESS) {
668 *result = H1394_STATUS_NOMORE_SPACE;
669 hci1394_q_unreserve(&q_handle->q_desc);
670 mutex_exit(&q_handle->q_mutex);
671 return (DDI_FAILURE);
674 /* Copy data into data buffer */
675 hci1394_q_at_rep_put8(&q_handle->q_data, cmd, data, datasize);
677 /* write the OMI to the descriptor buffer */
678 hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
680 /* write the OL to the descriptor buffer */
681 hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
682 datasize);
684 /* Add the AT command to the queued list */
685 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
687 mutex_exit(&q_handle->q_mutex);
689 return (DDI_SUCCESS);
694 * hci1394_q_at_with_mblk()
695 * Place an AT command that does need the data buffer into the DMA chain.
696 * The data is passed in mblk_t(s). Examples of this are a block write
697 * ATREQ and a block read ATRESP. The services layer and the hal use a
698 * private structure (h1394_mblk_t) to keep track of how much of the mblk
699 * to send since we may have to break the transfer up into smaller blocks.
700 * (i.e. a 1MByte block write would go out in 2KByte chunks. result is only
701 * valid on failure.
704 hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
705 hci1394_basic_pkt_t *hdr, uint_t hdrsize, h1394_mblk_t *mblk, int *result)
707 uint32_t desc_ioaddr;
708 uint32_t data_ioaddr;
709 int status;
712 ASSERT(q_handle != NULL);
713 ASSERT(cmd != NULL);
714 ASSERT(hdr != NULL);
715 ASSERT(mblk != NULL);
717 mutex_enter(&q_handle->q_mutex);
720 * Check the HAL state and generation when the AT Q is locked. This
721 * will make sure that we get all the commands when we flush the Q's
722 * during a reset or shutdown.
724 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
725 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
726 cmd->qc_generation)) {
727 *result = H1394_STATUS_INVALID_BUSGEN;
728 mutex_exit(&q_handle->q_mutex);
729 return (DDI_FAILURE);
732 /* save away the argument to pass up when this command completes */
733 cmd->qc_node.tln_addr = cmd;
735 /* we have not written any 16 byte blocks to the descriptor yet */
736 q_handle->q_block_cnt = 0;
738 /* Reserve space for an OMI and OL in the descriptor buffer */
739 status = hci1394_q_reserve(&q_handle->q_desc,
740 (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
741 &desc_ioaddr);
742 if (status != DDI_SUCCESS) {
743 *result = H1394_STATUS_NOMORE_SPACE;
744 mutex_exit(&q_handle->q_mutex);
745 return (DDI_FAILURE);
748 /* Reserve space for data in the data buffer */
749 status = hci1394_q_reserve(&q_handle->q_data, mblk->length,
750 &data_ioaddr);
751 if (status != DDI_SUCCESS) {
752 *result = H1394_STATUS_NOMORE_SPACE;
753 hci1394_q_unreserve(&q_handle->q_desc);
754 mutex_exit(&q_handle->q_mutex);
755 return (DDI_FAILURE);
758 /* Copy mblk data into data buffer */
759 hci1394_q_at_copy_from_mblk(&q_handle->q_data, cmd, mblk);
761 /* write the OMI to the descriptor buffer */
762 hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
764 /* write the OL to the descriptor buffer */
765 hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
766 mblk->length);
768 /* Add the AT command to the queued list */
769 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
771 mutex_exit(&q_handle->q_mutex);
773 return (DDI_SUCCESS);
778 * hci1394_q_at_next()
779 * Return the next completed AT command in cmd. If flush_q is true, we will
780 * return the command regardless if it finished or not. We will flush
781 * during bus reset processing, shutdown, and detach.
783 void
784 hci1394_q_at_next(hci1394_q_handle_t q_handle, boolean_t flush_q,
785 hci1394_q_cmd_t **cmd)
787 hci1394_q_buf_t *desc;
788 hci1394_q_buf_t *data;
789 hci1394_tlist_node_t *node;
790 uint32_t cmd_status;
793 ASSERT(q_handle != NULL);
794 ASSERT(cmd != NULL);
796 mutex_enter(&q_handle->q_mutex);
798 desc = &q_handle->q_desc;
799 data = &q_handle->q_data;
801 /* Sync descriptor buffer */
802 (void) ddi_dma_sync(desc->qb_buf.bi_dma_handle, 0,
803 desc->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
805 /* Look at the top cmd on the queued list (without removing it) */
806 hci1394_tlist_peek(q_handle->q_queued_list, &node);
807 if (node == NULL) {
808 /* There are no more commands left on the queued list */
809 *cmd = NULL;
810 mutex_exit(&q_handle->q_mutex);
811 return;
815 * There is a command on the list, read its status and timestamp when
816 * it was sent
818 *cmd = (hci1394_q_cmd_t *)node->tln_addr;
819 cmd_status = ddi_get32(desc->qb_buf.bi_handle, (*cmd)->qc_status_addr);
820 (*cmd)->qc_timestamp = cmd_status & DESC_ST_TIMESTAMP_MASK;
821 cmd_status = HCI1394_DESC_EVT_GET(cmd_status);
824 * If we are flushing the q (e.g. due to a bus reset), we will return
825 * the command regardless of its completion status. If we are not
826 * flushing the Q and we do not have status on the command (e.g. status
827 * = 0), we are done with this Q for now.
829 if (flush_q == B_FALSE) {
830 if (cmd_status == 0) {
831 *cmd = NULL;
832 mutex_exit(&q_handle->q_mutex);
833 return;
838 * The command completed, remove it from the queued list. There is not
839 * a race condition to delete the node in the list here. This is the
840 * only place the node will be deleted so we do not need to check the
841 * return status.
843 (void) hci1394_tlist_delete(q_handle->q_queued_list, node);
846 * Free the space used by the command in the descriptor and data
847 * buffers.
849 desc->qb_ptrs.qp_free_buf = (*cmd)->qc_descriptor_buf;
850 desc->qb_ptrs.qp_free = (*cmd)->qc_descriptor_end;
851 if ((*cmd)->qc_data_used == B_TRUE) {
852 data->qb_ptrs.qp_free_buf = (*cmd)->qc_data_buf;
853 data->qb_ptrs.qp_free = (*cmd)->qc_data_end;
856 /* return command status */
857 (*cmd)->qc_status = cmd_status;
859 mutex_exit(&q_handle->q_mutex);
864 * hci1394_q_at_write_OMI()
865 * Write an OMI descriptor into the AT descriptor buffer passed in as qbuf.
866 * Buffer state information is stored in cmd. Use the hdr and hdr size for
867 * the additional information attached to an immediate descriptor.
869 void
870 hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
871 hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
873 hci1394_desc_imm_t *desc;
874 uint32_t data;
877 ASSERT(qbuf != NULL);
878 ASSERT(cmd != NULL);
879 ASSERT(hdr != NULL);
880 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
882 /* The only valid "header" sizes for an OMI are 8 bytes or 16 bytes */
883 ASSERT((hdrsize == 8) || (hdrsize == 16));
885 /* Make sure enough room for OMI */
886 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
888 /* Store the offset of the top of this descriptor block */
889 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
890 qbuf->qb_ptrs.qp_begin);
892 /* Setup OpenHCI OMI Header */
893 desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
894 data = DESC_AT_OMI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
895 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
896 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
897 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
898 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
901 * Copy in 1394 header. Size is in bytes, convert it to a 32-bit word
902 * count.
904 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
905 hdrsize >> 2, DDI_DEV_AUTOINCR);
908 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
909 * accordingly. Update the reserved size and current pointer.
911 q_handle->q_block_cnt += 2;
912 qbuf->qb_ptrs.qp_resv_size -= sizeof (hci1394_desc_imm_t);
913 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
918 * hci1394_q_at_write_OLI()
919 * Write an OLI descriptor into the AT descriptor buffer passed in as qbuf.
920 * Buffer state information is stored in cmd. Use the hdr and hdr size for
921 * the additional information attached to an immediate descriptor.
923 void
924 hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
925 hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
927 hci1394_desc_imm_t *desc;
928 uint32_t data;
929 uint32_t command_ptr;
930 uint32_t tcode;
933 ASSERT(qbuf != NULL);
934 ASSERT(cmd != NULL);
935 ASSERT(hdr != NULL);
936 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
938 /* The only valid "header" sizes for an OLI are 8, 12, 16 bytes */
939 ASSERT((hdrsize == 8) || (hdrsize == 12) || (hdrsize == 16));
941 /* make sure enough room for 1 OLI */
942 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
944 /* Store the offset of the top of this descriptor block */
945 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
946 qbuf->qb_ptrs.qp_begin);
948 /* Setup OpenHCI OLI Header */
949 desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
950 data = DESC_AT_OLI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
951 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
952 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
953 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
954 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
956 /* Setup 1394 Header */
957 tcode = (hdr->q1 & DESC_PKT_TCODE_MASK) >> DESC_PKT_TCODE_SHIFT;
958 if ((tcode == IEEE1394_TCODE_WRITE_QUADLET) ||
959 (tcode == IEEE1394_TCODE_READ_QUADLET_RESP)) {
961 * if the tcode = a quadlet write, move the last quadlet as
962 * 8-bit data. All data is treated as 8-bit data (even quadlet
963 * reads and writes). Therefore, target drivers MUST take that
964 * into consideration when accessing device registers.
966 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1, 3,
967 DDI_DEV_AUTOINCR);
968 ddi_rep_put8(qbuf->qb_buf.bi_handle, (uint8_t *)&hdr->q4,
969 (uint8_t *)&desc->q4, 4, DDI_DEV_AUTOINCR);
970 } else {
971 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
972 hdrsize >> 2, DDI_DEV_AUTOINCR);
976 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
977 * accordingly.
979 q_handle->q_block_cnt += 2;
982 * Sync buffer in case DMA engine currently running. This must be done
983 * before writing the command pointer in the previous descriptor.
985 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
986 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
988 /* save away the status address for quick access in at_next() */
989 cmd->qc_status_addr = &desc->status;
992 * Setup the command pointer. This tells the HW where to get the
993 * descriptor we just setup. This includes the IO address along with
994 * a 4 bit 16 byte block count
996 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
997 ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
998 DESC_Z_MASK));
1001 * if we previously setup a descriptor, add this new descriptor into
1002 * the previous descriptor's "next" pointer.
1004 if (q_handle->q_previous != NULL) {
1005 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1006 command_ptr);
1007 /* Sync buffer again, this gets the command pointer */
1008 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1009 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1013 * this is now the previous descriptor. Update the current pointer,
1014 * clear the block count and reserved size since this is the end of
1015 * this command.
1017 q_handle->q_previous = (hci1394_desc_t *)desc;
1018 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
1019 q_handle->q_block_cnt = 0;
1020 qbuf->qb_ptrs.qp_resv_size = 0;
1022 /* save away cleanup info when we are done with the command */
1023 cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1024 cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1026 /* If the DMA is not running, start it */
1027 if (q_handle->q_dma_running == B_FALSE) {
1028 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1029 command_ptr);
1030 q_handle->q_dma_running = B_TRUE;
1031 /* the DMA is running, wake it up */
1032 } else {
1033 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1039 * hci1394_q_at_write_OL()
1040 * Write an OL descriptor into the AT descriptor buffer passed in as qbuf.
1041 * Buffer state information is stored in cmd. The IO address of the data
1042 * buffer is passed in io_addr. Size is the size of the data to be
1043 * transferred.
1045 void
1046 hci1394_q_at_write_OL(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1047 hci1394_q_cmd_t *cmd, uint32_t io_addr, uint_t size)
1049 hci1394_desc_t *desc;
1050 uint32_t data;
1051 uint32_t command_ptr;
1054 ASSERT(q_handle != NULL);
1055 ASSERT(qbuf != NULL);
1056 ASSERT(cmd != NULL);
1057 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1059 /* make sure enough room for OL */
1060 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_t));
1062 /* Setup OpenHCI OL Header */
1063 desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1064 data = DESC_AT_OL | (size & DESC_HDR_REQCOUNT_MASK);
1065 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1066 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1067 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1068 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, 0);
1071 * We wrote 1 16 byte block in the descriptor buffer, update the count
1072 * accordingly.
1074 q_handle->q_block_cnt++;
1077 * Sync buffer in case DMA engine currently running. This must be done
1078 * before writing the command pointer in the previous descriptor.
1080 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1081 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1083 /* save away the status address for quick access in at_next() */
1084 cmd->qc_status_addr = &desc->status;
1087 * Setup the command pointer. This tells the HW where to get the
1088 * descriptor we just setup. This includes the IO address along with
1089 * a 4 bit 16 byte block count
1091 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1092 ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1093 DESC_Z_MASK));
1096 * if we previously setup a descriptor, add this new descriptor into
1097 * the previous descriptor's "next" pointer.
1099 if (q_handle->q_previous != NULL) {
1100 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1101 command_ptr);
1102 /* Sync buffer again, this gets the command pointer */
1103 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1104 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1108 * this is now the previous descriptor. Update the current pointer,
1109 * clear the block count and reserved size since this is the end of
1110 * this command.
1112 q_handle->q_previous = desc;
1113 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1114 q_handle->q_block_cnt = 0;
1115 qbuf->qb_ptrs.qp_resv_size = 0;
1117 /* save away cleanup info when we are done with the command */
1118 cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1119 cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1121 /* If the DMA is not running, start it */
1122 if (q_handle->q_dma_running == B_FALSE) {
1123 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1124 command_ptr);
1125 q_handle->q_dma_running = B_TRUE;
1126 /* the DMA is running, wake it up */
1127 } else {
1128 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1134 * hci1394_q_at_rep_put8()
1135 * Copy a byte stream from a kernel virtual address (data) to a IO mapped
1136 * data buffer (qbuf). Copy datasize bytes. State information for the
1137 * data buffer is kept in cmd.
1139 void
1140 hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1141 uint8_t *data, uint_t datasize)
1143 ASSERT(qbuf != NULL);
1144 ASSERT(cmd != NULL);
1145 ASSERT(data != NULL);
1147 /* Make sure enough room for data */
1148 ASSERT(qbuf->qb_ptrs.qp_resv_size >= datasize);
1150 /* Copy in data into the data buffer */
1151 ddi_rep_put8(qbuf->qb_buf.bi_handle, data,
1152 (uint8_t *)qbuf->qb_ptrs.qp_current, datasize, DDI_DEV_AUTOINCR);
1154 /* Update the current pointer, offset, and reserved size */
1155 qbuf->qb_ptrs.qp_current += datasize;
1156 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1157 qbuf->qb_ptrs.qp_begin);
1158 qbuf->qb_ptrs.qp_resv_size -= datasize;
1160 /* save away cleanup info when we are done with the command */
1161 cmd->qc_data_used = B_TRUE;
1162 cmd->qc_data_buf = qbuf->qb_ptrs.qp_current_buf;
1163 cmd->qc_data_end = qbuf->qb_ptrs.qp_current - 1;
1165 /* Sync data buffer */
1166 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1167 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1172 * hci1394_q_at_copy_from_mblk()
1173 * Copy a byte stream from a mblk(s) to a IO mapped data buffer (qbuf).
1174 * Copy mblk->length bytes. The services layer and the hal use a private
1175 * structure (h1394_mblk_t) to keep track of how much of the mblk to send
1176 * since we may have to break the transfer up into smaller blocks. (i.e. a
1177 * 1MByte block write would go out in 2KByte chunks. State information for
1178 * the data buffer is kept in cmd.
1180 static void
1181 hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1182 h1394_mblk_t *mblk)
1184 uint_t bytes_left;
1185 uint_t length;
1188 ASSERT(qbuf != NULL);
1189 ASSERT(cmd != NULL);
1190 ASSERT(mblk != NULL);
1192 /* We return these variables to the Services Layer when we are done */
1193 mblk->next_offset = mblk->curr_offset;
1194 mblk->next_mblk = mblk->curr_mblk;
1195 bytes_left = mblk->length;
1197 /* do while there are bytes left to copy */
1198 do {
1200 * If the entire data portion of the current block transfer is
1201 * contained within a single mblk.
1203 if ((mblk->next_offset + bytes_left) <=
1204 (mblk->next_mblk->b_wptr)) {
1205 /* Copy the data into the data Q */
1206 hci1394_q_at_rep_put8(qbuf, cmd,
1207 (uint8_t *)mblk->next_offset, bytes_left);
1209 /* increment the mblk offset */
1210 mblk->next_offset += bytes_left;
1212 /* we have no more bytes to put into the buffer */
1213 bytes_left = 0;
1216 * If our offset is at the end of data in this mblk, go
1217 * to the next mblk.
1219 if (mblk->next_offset >= mblk->next_mblk->b_wptr) {
1220 mblk->next_mblk = mblk->next_mblk->b_cont;
1221 if (mblk->next_mblk != NULL) {
1222 mblk->next_offset =
1223 mblk->next_mblk->b_rptr;
1228 * The data portion of the current block transfer is spread
1229 * across two or more mblk's
1231 } else {
1233 * Figure out how much data is in this mblk.
1235 length = mblk->next_mblk->b_wptr - mblk->next_offset;
1237 /* Copy the data into the atreq data Q */
1238 hci1394_q_at_rep_put8(qbuf, cmd,
1239 (uint8_t *)mblk->next_offset, length);
1241 /* update the bytes left count, go to the next mblk */
1242 bytes_left = bytes_left - length;
1243 mblk->next_mblk = mblk->next_mblk->b_cont;
1244 ASSERT(mblk->next_mblk != NULL);
1245 mblk->next_offset = mblk->next_mblk->b_rptr;
1247 } while (bytes_left > 0);
1252 * hci1394_q_ar_next()
1253 * Return an address to the next received AR packet. If there are no more
1254 * AR packets in the buffer, q_addr will be set to NULL.
1256 void
1257 hci1394_q_ar_next(hci1394_q_handle_t q_handle, uint32_t **q_addr)
1259 hci1394_desc_t *desc;
1260 hci1394_q_buf_t *descb;
1261 hci1394_q_buf_t *datab;
1262 uint32_t residual_count;
1265 ASSERT(q_handle != NULL);
1266 ASSERT(q_addr != NULL);
1268 descb = &q_handle->q_desc;
1269 datab = &q_handle->q_data;
1271 /* Sync Descriptor buffer */
1272 (void) ddi_dma_sync(descb->qb_buf.bi_dma_handle, 0,
1273 descb->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1276 * Check residual in current IM count vs q_space_left to see if we have
1277 * received any more responses
1279 desc = (hci1394_desc_t *)q_handle->q_head;
1280 residual_count = ddi_get32(descb->qb_buf.bi_handle, &desc->status);
1281 residual_count &= DESC_ST_RESCOUNT_MASK;
1282 if (residual_count >= q_handle->q_space_left) {
1283 /* No new packets received */
1284 *q_addr = NULL;
1285 return;
1288 /* Sync Data Q */
1289 (void) ddi_dma_sync(datab->qb_buf.bi_dma_handle, 0,
1290 datab->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1293 * We have a new packet, return the address of the start of the
1294 * packet.
1296 *q_addr = (uint32_t *)datab->qb_ptrs.qp_current;
1301 * hci1394_q_ar_free()
1302 * Free the space used by the AR packet at the top of the data buffer. AR
1303 * packets are processed in the order that they are received. This will
1304 * free the oldest received packet which has not yet been freed. size is
1305 * how much space the packet takes up.
1307 void
1308 hci1394_q_ar_free(hci1394_q_handle_t q_handle, uint_t size)
1310 hci1394_q_buf_t *descb;
1311 hci1394_q_buf_t *datab;
1314 ASSERT(q_handle != NULL);
1316 descb = &q_handle->q_desc;
1317 datab = &q_handle->q_data;
1320 * Packet is in multiple buffers. Theoretically a buffer could be broken
1321 * in more than two buffers for an ARRESP. Since the buffers should be
1322 * in at least 4K increments this will not happen since the max packet
1323 * size is 2KBytes.
1325 if ((datab->qb_ptrs.qp_current + size) > datab->qb_ptrs.qp_end) {
1326 /* Add IM descriptor for used buffer back into Q */
1327 hci1394_q_ar_write_IM(q_handle, descb,
1328 datab->qb_cookie[datab->qb_ptrs.qp_current_buf
1329 ].dmac_address,
1330 datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size);
1332 /* Go to the next buffer */
1333 hci1394_q_next_buf(datab);
1335 /* Update next buffers pointers for partial packet */
1336 size -= q_handle->q_space_left;
1337 datab->qb_ptrs.qp_current += size;
1338 q_handle->q_space_left =
1339 datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size -
1340 size;
1342 /* Change the head pointer to the next IM descriptor */
1343 q_handle->q_head += sizeof (hci1394_desc_t);
1344 if ((q_handle->q_head + sizeof (hci1394_desc_t)) >
1345 (descb->qb_ptrs.qp_bottom + 1)) {
1346 q_handle->q_head = descb->qb_ptrs.qp_top;
1349 /* Packet is only in one buffer */
1350 } else {
1351 q_handle->q_space_left -= size;
1352 datab->qb_ptrs.qp_current += size;
1358 * hci1394_q_ar_get32()
1359 * Read a quadlet of data regardless if it is in the current buffer or has
1360 * wrapped to the top buffer. If the address passed to this routine is
1361 * passed the bottom of the data buffer, this routine will automatically
1362 * wrap back to the top of the Q and look in the correct offset from the
1363 * top. Copy the data into the kernel virtual address provided.
1365 uint32_t
1366 hci1394_q_ar_get32(hci1394_q_handle_t q_handle, uint32_t *addr)
1368 hci1394_q_buf_t *data;
1369 uintptr_t new_addr;
1370 uint32_t data32;
1373 ASSERT(q_handle != NULL);
1374 ASSERT(addr != NULL);
1376 data = &q_handle->q_data;
1379 * if the data has wrapped to the top of the buffer, adjust the address.
1381 if ((uintptr_t)addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1382 new_addr = (uintptr_t)data->qb_ptrs.qp_top + ((uintptr_t)addr -
1383 ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1));
1384 data32 = ddi_get32(data->qb_buf.bi_handle,
1385 (uint32_t *)new_addr);
1387 /* data is before end of buffer */
1388 } else {
1389 data32 = ddi_get32(data->qb_buf.bi_handle, addr);
1392 return (data32);
1397 * hci1394_q_ar_rep_get8()
1398 * Read a byte stream of data regardless if it is contiguous or has partially
1399 * or fully wrapped to the top buffer. If the address passed to this routine
1400 * is passed the bottom of the data buffer, or address + size is past the
1401 * bottom of the data buffer. this routine will automatically wrap back to
1402 * the top of the Q and look in the correct offset from the top. Copy the
1403 * data into the kernel virtual address provided.
1405 void
1406 hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle, uint8_t *dest,
1407 uint8_t *q_addr, uint_t size)
1409 hci1394_q_buf_t *data;
1410 uintptr_t new_addr;
1411 uint_t new_size;
1412 uintptr_t new_dest;
1415 ASSERT(q_handle != NULL);
1416 ASSERT(dest != NULL);
1417 ASSERT(q_addr != NULL);
1419 data = &q_handle->q_data;
1422 * There are three cases:
1423 * 1) All of the data has wrapped.
1424 * 2) Some of the data has not wrapped and some has wrapped.
1425 * 3) None of the data has wrapped.
1428 /* All of the data has wrapped, just adjust the starting address */
1429 if ((uintptr_t)q_addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1430 new_addr = (uintptr_t)data->qb_ptrs.qp_top +
1431 ((uintptr_t)q_addr - ((uintptr_t)data->qb_ptrs.qp_bottom +
1432 (uintptr_t)1));
1433 ddi_rep_get8(data->qb_buf.bi_handle, dest, (uint8_t *)new_addr,
1434 size, DDI_DEV_AUTOINCR);
1437 * Some of the data has wrapped. Copy the data that hasn't wrapped,
1438 * adjust the address, then copy the rest.
1440 } else if (((uintptr_t)q_addr + (uintptr_t)size) >
1441 ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1)) {
1442 /* Copy first half */
1443 new_size = (uint_t)(((uintptr_t)data->qb_ptrs.qp_bottom +
1444 (uintptr_t)1) - (uintptr_t)q_addr);
1445 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, new_size,
1446 DDI_DEV_AUTOINCR);
1448 /* copy second half */
1449 new_dest = (uintptr_t)dest + (uintptr_t)new_size;
1450 new_size = size - new_size;
1451 new_addr = (uintptr_t)data->qb_ptrs.qp_top;
1452 ddi_rep_get8(data->qb_buf.bi_handle, (uint8_t *)new_dest,
1453 (uint8_t *)new_addr, new_size, DDI_DEV_AUTOINCR);
1455 /* None of the data has wrapped */
1456 } else {
1457 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, size,
1458 DDI_DEV_AUTOINCR);
1464 * hci1394_q_ar_copy_to_mblk()
1465 * Read a byte stream of data regardless if it is contiguous or has partially
1466 * or fully wrapped to the top buffer. If the address passed to this routine
1467 * is passed the bottom of the data buffer, or address + size is passed the
1468 * bottom of the data buffer. this routine will automatically wrap back to
1469 * the top of the Q and look in the correct offset from the top. Copy the
1470 * data into the mblk provided. The services layer and the hal use a private
1471 * structure (h1394_mblk_t) to keep track of how much of the mblk to receive
1472 * into since we may have to break the transfer up into smaller blocks.
1473 * (i.e. a 1MByte block read would go out in 2KByte requests.
1475 void
1476 hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle, uint8_t *addr,
1477 h1394_mblk_t *mblk)
1479 uint8_t *new_addr;
1480 uint_t bytes_left;
1481 uint_t length;
1484 ASSERT(q_handle != NULL);
1485 ASSERT(addr != NULL);
1486 ASSERT(mblk != NULL);
1488 /* We return these variables to the Services Layer when we are done */
1489 mblk->next_offset = mblk->curr_offset;
1490 mblk->next_mblk = mblk->curr_mblk;
1491 bytes_left = mblk->length;
1493 /* the address we copy from will change as we change mblks */
1494 new_addr = addr;
1496 /* do while there are bytes left to copy */
1497 do {
1499 * If the entire data portion of the current block transfer is
1500 * contained within a single mblk.
1502 if ((mblk->next_offset + bytes_left) <=
1503 (mblk->next_mblk->b_datap->db_lim)) {
1504 /* Copy the data into the mblk */
1505 hci1394_q_ar_rep_get8(q_handle,
1506 (uint8_t *)mblk->next_offset, new_addr, bytes_left);
1508 /* increment the offset */
1509 mblk->next_offset += bytes_left;
1510 mblk->next_mblk->b_wptr = mblk->next_offset;
1512 /* we have no more bytes to put into the buffer */
1513 bytes_left = 0;
1516 * If our offset is at the end of data in this mblk, go
1517 * to the next mblk.
1519 if (mblk->next_offset >=
1520 mblk->next_mblk->b_datap->db_lim) {
1521 mblk->next_mblk = mblk->next_mblk->b_cont;
1522 if (mblk->next_mblk != NULL) {
1523 mblk->next_offset =
1524 mblk->next_mblk->b_wptr;
1529 * The data portion of the current block transfer is spread
1530 * across two or more mblk's
1532 } else {
1533 /* Figure out how much data is in this mblk */
1534 length = mblk->next_mblk->b_datap->db_lim -
1535 mblk->next_offset;
1537 /* Copy the data into the mblk */
1538 hci1394_q_ar_rep_get8(q_handle,
1539 (uint8_t *)mblk->next_offset, new_addr, length);
1540 mblk->next_mblk->b_wptr =
1541 mblk->next_mblk->b_datap->db_lim;
1544 * update the bytes left and address to copy from, go
1545 * to the next mblk.
1547 bytes_left = bytes_left - length;
1548 new_addr = (uint8_t *)((uintptr_t)new_addr +
1549 (uintptr_t)length);
1550 mblk->next_mblk = mblk->next_mblk->b_cont;
1551 ASSERT(mblk->next_mblk != NULL);
1552 mblk->next_offset = mblk->next_mblk->b_wptr;
1554 } while (bytes_left > 0);
1559 * hci1394_q_ar_write_IM()
1560 * Write an IM descriptor into the AR descriptor buffer passed in as qbuf.
1561 * The IO address of the data buffer is passed in io_addr. datasize is the
1562 * size of the data data buffer to receive into.
1564 void
1565 hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1566 uint32_t io_addr, uint_t datasize)
1568 hci1394_desc_t *desc;
1569 uint32_t data;
1570 uint32_t command_ptr;
1573 ASSERT(q_handle != NULL);
1574 ASSERT(qbuf != NULL);
1576 /* Make sure enough room for IM */
1577 if ((qbuf->qb_ptrs.qp_current + sizeof (hci1394_desc_t)) >
1578 (qbuf->qb_ptrs.qp_bottom + 1)) {
1579 hci1394_q_next_buf(qbuf);
1580 } else {
1581 /* Store the offset of the top of this descriptor block */
1582 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1583 qbuf->qb_ptrs.qp_begin);
1586 /* Setup OpenHCI IM Header */
1587 desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1588 data = DESC_AR_IM | (datasize & DESC_HDR_REQCOUNT_MASK);
1589 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1590 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1591 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1592 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, datasize &
1593 DESC_ST_RESCOUNT_MASK);
1596 * Sync buffer in case DMA engine currently running. This must be done
1597 * before writing the command pointer in the previous descriptor.
1599 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1600 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1603 * Setup the command pointer. This tells the HW where to get the
1604 * descriptor we just setup. This includes the IO address along with
1605 * a 4 bit 16 byte block count. We only wrote 1 16 byte block.
1607 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1608 ].dmac_address + qbuf->qb_ptrs.qp_offset) | 1);
1611 * if we previously setup a descriptor, add this new descriptor into
1612 * the previous descriptor's "next" pointer.
1614 if (q_handle->q_previous != NULL) {
1615 ddi_put32(qbuf->qb_buf.bi_handle,
1616 &q_handle->q_previous->branch, command_ptr);
1617 /* Sync buffer again, this gets the command pointer */
1618 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1619 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1622 /* this is the new previous descriptor. Update the current pointer */
1623 q_handle->q_previous = desc;
1624 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1626 /* If the DMA is not running, start it */
1627 if (q_handle->q_dma_running == B_FALSE) {
1628 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1629 command_ptr);
1630 q_handle->q_dma_running = B_TRUE;
1631 /* the DMA is running, wake it up */
1632 } else {
1633 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);