2 * Copyright (c) 2007-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <asm/unaligned.h>
23 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
25 static void htc_prep_send_pkt(struct htc_packet
*packet
, u8 flags
, int ctrl0
,
28 struct htc_frame_hdr
*hdr
;
30 packet
->buf
-= HTC_HDR_LENGTH
;
31 hdr
= (struct htc_frame_hdr
*)packet
->buf
;
34 put_unaligned((u16
)packet
->act_len
, &hdr
->payld_len
);
36 hdr
->eid
= packet
->endpoint
;
41 static void htc_reclaim_txctrl_buf(struct htc_target
*target
,
42 struct htc_packet
*pkt
)
44 spin_lock_bh(&target
->htc_lock
);
45 list_add_tail(&pkt
->list
, &target
->free_ctrl_txbuf
);
46 spin_unlock_bh(&target
->htc_lock
);
49 static struct htc_packet
*htc_get_control_buf(struct htc_target
*target
,
52 struct htc_packet
*packet
= NULL
;
53 struct list_head
*buf_list
;
55 buf_list
= tx
? &target
->free_ctrl_txbuf
: &target
->free_ctrl_rxbuf
;
57 spin_lock_bh(&target
->htc_lock
);
59 if (list_empty(buf_list
)) {
60 spin_unlock_bh(&target
->htc_lock
);
64 packet
= list_first_entry(buf_list
, struct htc_packet
, list
);
65 list_del(&packet
->list
);
66 spin_unlock_bh(&target
->htc_lock
);
69 packet
->buf
= packet
->buf_start
+ HTC_HDR_LENGTH
;
74 static void htc_tx_comp_update(struct htc_target
*target
,
75 struct htc_endpoint
*endpoint
,
76 struct htc_packet
*packet
)
78 packet
->completion
= NULL
;
79 packet
->buf
+= HTC_HDR_LENGTH
;
84 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
85 packet
->status
, packet
->endpoint
, packet
->act_len
,
86 packet
->info
.tx
.cred_used
);
88 /* on failure to submit, reclaim credits for this packet */
89 spin_lock_bh(&target
->tx_lock
);
90 endpoint
->cred_dist
.cred_to_dist
+=
91 packet
->info
.tx
.cred_used
;
92 endpoint
->cred_dist
.txq_depth
= get_queue_depth(&endpoint
->txq
);
94 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
95 target
->cred_dist_cntxt
, &target
->cred_dist_list
);
97 ath6k_credit_distribute(target
->cred_dist_cntxt
,
98 &target
->cred_dist_list
,
99 HTC_CREDIT_DIST_SEND_COMPLETE
);
101 spin_unlock_bh(&target
->tx_lock
);
104 static void htc_tx_complete(struct htc_endpoint
*endpoint
,
105 struct list_head
*txq
)
110 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
111 "send complete ep %d, (%d pkts)\n",
112 endpoint
->eid
, get_queue_depth(txq
));
114 ath6kl_tx_complete(endpoint
->target
->dev
->ar
, txq
);
117 static void htc_tx_comp_handler(struct htc_target
*target
,
118 struct htc_packet
*packet
)
120 struct htc_endpoint
*endpoint
= &target
->endpoint
[packet
->endpoint
];
121 struct list_head container
;
123 htc_tx_comp_update(target
, endpoint
, packet
);
124 INIT_LIST_HEAD(&container
);
125 list_add_tail(&packet
->list
, &container
);
127 htc_tx_complete(endpoint
, &container
);
130 static void htc_async_tx_scat_complete(struct htc_target
*target
,
131 struct hif_scatter_req
*scat_req
)
133 struct htc_endpoint
*endpoint
;
134 struct htc_packet
*packet
;
135 struct list_head tx_compq
;
138 INIT_LIST_HEAD(&tx_compq
);
140 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
141 "htc_async_tx_scat_complete total len: %d entries: %d\n",
142 scat_req
->len
, scat_req
->scat_entries
);
144 if (scat_req
->status
)
145 ath6kl_err("send scatter req failed: %d\n", scat_req
->status
);
147 packet
= scat_req
->scat_list
[0].packet
;
148 endpoint
= &target
->endpoint
[packet
->endpoint
];
150 /* walk through the scatter list and process */
151 for (i
= 0; i
< scat_req
->scat_entries
; i
++) {
152 packet
= scat_req
->scat_list
[i
].packet
;
158 packet
->status
= scat_req
->status
;
159 htc_tx_comp_update(target
, endpoint
, packet
);
160 list_add_tail(&packet
->list
, &tx_compq
);
163 /* free scatter request */
164 hif_scatter_req_add(target
->dev
->ar
, scat_req
);
166 /* complete all packets */
167 htc_tx_complete(endpoint
, &tx_compq
);
170 static int htc_issue_send(struct htc_target
*target
, struct htc_packet
*packet
)
174 u32 padded_len
, send_len
;
176 if (!packet
->completion
)
179 send_len
= packet
->act_len
+ HTC_HDR_LENGTH
;
181 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "%s: transmit len : %d (%s)\n",
182 __func__
, send_len
, sync
? "sync" : "async");
184 padded_len
= CALC_TXRX_PADDED_LEN(target
, send_len
);
186 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
187 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
189 target
->dev
->ar
->mbox_info
.htc_addr
,
190 sync
? "sync" : "async");
193 status
= hif_read_write_sync(target
->dev
->ar
,
194 target
->dev
->ar
->mbox_info
.htc_addr
,
195 packet
->buf
, padded_len
,
196 HIF_WR_SYNC_BLOCK_INC
);
198 packet
->status
= status
;
199 packet
->buf
+= HTC_HDR_LENGTH
;
201 status
= hif_write_async(target
->dev
->ar
,
202 target
->dev
->ar
->mbox_info
.htc_addr
,
203 packet
->buf
, padded_len
,
204 HIF_WR_ASYNC_BLOCK_INC
, packet
);
209 static int htc_check_credits(struct htc_target
*target
,
210 struct htc_endpoint
*ep
, u8
*flags
,
211 enum htc_endpoint_id eid
, unsigned int len
,
215 *req_cred
= (len
> target
->tgt_cred_sz
) ?
216 DIV_ROUND_UP(len
, target
->tgt_cred_sz
) : 1;
218 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "creds required:%d got:%d\n",
219 *req_cred
, ep
->cred_dist
.credits
);
221 if (ep
->cred_dist
.credits
< *req_cred
) {
222 if (eid
== ENDPOINT_0
)
225 /* Seek more credits */
226 ep
->cred_dist
.seek_cred
= *req_cred
- ep
->cred_dist
.credits
;
228 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
229 target
->cred_dist_cntxt
, &ep
->cred_dist
);
231 ath6k_seek_credits(target
->cred_dist_cntxt
, &ep
->cred_dist
);
233 ep
->cred_dist
.seek_cred
= 0;
235 if (ep
->cred_dist
.credits
< *req_cred
) {
236 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
237 "not enough credits for ep %d - leaving packet in queue\n",
243 ep
->cred_dist
.credits
-= *req_cred
;
244 ep
->ep_st
.cred_cosumd
+= *req_cred
;
246 /* When we are getting low on credits, ask for more */
247 if (ep
->cred_dist
.credits
< ep
->cred_dist
.cred_per_msg
) {
248 ep
->cred_dist
.seek_cred
=
249 ep
->cred_dist
.cred_per_msg
- ep
->cred_dist
.credits
;
251 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
252 target
->cred_dist_cntxt
, &ep
->cred_dist
);
254 ath6k_seek_credits(target
->cred_dist_cntxt
, &ep
->cred_dist
);
256 /* see if we were successful in getting more */
257 if (ep
->cred_dist
.credits
< ep
->cred_dist
.cred_per_msg
) {
258 /* tell the target we need credits ASAP! */
259 *flags
|= HTC_FLAGS_NEED_CREDIT_UPDATE
;
260 ep
->ep_st
.cred_low_indicate
+= 1;
261 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "host needs credits\n");
268 static void htc_tx_pkts_get(struct htc_target
*target
,
269 struct htc_endpoint
*endpoint
,
270 struct list_head
*queue
)
274 struct htc_packet
*packet
;
281 if (list_empty(&endpoint
->txq
))
283 packet
= list_first_entry(&endpoint
->txq
, struct htc_packet
,
286 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
287 "got head pkt:0x%p , queue depth: %d\n",
288 packet
, get_queue_depth(&endpoint
->txq
));
290 len
= CALC_TXRX_PADDED_LEN(target
,
291 packet
->act_len
+ HTC_HDR_LENGTH
);
293 if (htc_check_credits(target
, endpoint
, &flags
,
294 packet
->endpoint
, len
, &req_cred
))
297 /* now we can fully move onto caller's queue */
298 packet
= list_first_entry(&endpoint
->txq
, struct htc_packet
,
300 list_move_tail(&packet
->list
, queue
);
302 /* save the number of credits this packet consumed */
303 packet
->info
.tx
.cred_used
= req_cred
;
305 /* all TX packets are handled asynchronously */
306 packet
->completion
= htc_tx_comp_handler
;
307 packet
->context
= target
;
308 endpoint
->ep_st
.tx_issued
+= 1;
310 /* save send flags */
311 packet
->info
.tx
.flags
= flags
;
312 packet
->info
.tx
.seqno
= endpoint
->seqno
;
317 /* See if the padded tx length falls on a credit boundary */
318 static int htc_get_credit_padding(unsigned int cred_sz
, int *len
,
319 struct htc_endpoint
*ep
)
321 int rem_cred
, cred_pad
;
323 rem_cred
= *len
% cred_sz
;
325 /* No padding needed */
329 if (!(ep
->conn_flags
& HTC_FLGS_TX_BNDL_PAD_EN
))
333 * The transfer consumes a "partial" credit, this
334 * packet cannot be bundled unless we add
335 * additional "dummy" padding (max 255 bytes) to
336 * consume the entire credit.
338 cred_pad
= *len
< cred_sz
? (cred_sz
- *len
) : rem_cred
;
340 if ((cred_pad
> 0) && (cred_pad
<= 255))
343 /* The amount of padding is too large, send as non-bundled */
349 static int htc_setup_send_scat_list(struct htc_target
*target
,
350 struct htc_endpoint
*endpoint
,
351 struct hif_scatter_req
*scat_req
,
353 struct list_head
*queue
)
355 struct htc_packet
*packet
;
356 int i
, len
, rem_scat
, cred_pad
;
359 rem_scat
= target
->max_tx_bndl_sz
;
361 for (i
= 0; i
< n_scat
; i
++) {
362 scat_req
->scat_list
[i
].packet
= NULL
;
364 if (list_empty(queue
))
367 packet
= list_first_entry(queue
, struct htc_packet
, list
);
368 len
= CALC_TXRX_PADDED_LEN(target
,
369 packet
->act_len
+ HTC_HDR_LENGTH
);
371 cred_pad
= htc_get_credit_padding(target
->tgt_cred_sz
,
378 if (rem_scat
< len
) {
379 /* exceeds what we can transfer */
385 /* now remove it from the queue */
386 packet
= list_first_entry(queue
, struct htc_packet
, list
);
387 list_del(&packet
->list
);
389 scat_req
->scat_list
[i
].packet
= packet
;
390 /* prepare packet and flag message as part of a send bundle */
391 htc_prep_send_pkt(packet
,
392 packet
->info
.tx
.flags
| HTC_FLAGS_SEND_BUNDLE
,
393 cred_pad
, packet
->info
.tx
.seqno
);
394 scat_req
->scat_list
[i
].buf
= packet
->buf
;
395 scat_req
->scat_list
[i
].len
= len
;
397 scat_req
->len
+= len
;
398 scat_req
->scat_entries
++;
399 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
400 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
401 i
, packet
, len
, rem_scat
);
404 /* Roll back scatter setup in case of any failure */
405 if (status
|| (scat_req
->scat_entries
< HTC_MIN_HTC_MSGS_TO_BUNDLE
)) {
406 for (i
= scat_req
->scat_entries
- 1; i
>= 0; i
--) {
407 packet
= scat_req
->scat_list
[i
].packet
;
409 packet
->buf
+= HTC_HDR_LENGTH
;
410 list_add(&packet
->list
, queue
);
420 * htc_issue_send_bundle: drain a queue and send as bundles
421 * this function may return without fully draining the queue
424 * 1. scatter resources are exhausted
425 * 2. a message that will consume a partial credit will stop the
426 * bundling process early
427 * 3. we drop below the minimum number of messages for a bundle
429 static void htc_issue_send_bundle(struct htc_endpoint
*endpoint
,
430 struct list_head
*queue
,
431 int *sent_bundle
, int *n_bundle_pkts
)
433 struct htc_target
*target
= endpoint
->target
;
434 struct hif_scatter_req
*scat_req
= NULL
;
435 int n_scat
, n_sent_bundle
= 0, tot_pkts_bundle
= 0;
438 n_scat
= get_queue_depth(queue
);
439 n_scat
= min(n_scat
, target
->msg_per_bndl_max
);
441 if (n_scat
< HTC_MIN_HTC_MSGS_TO_BUNDLE
)
442 /* not enough to bundle */
445 scat_req
= hif_scatter_req_get(target
->dev
->ar
);
448 /* no scatter resources */
449 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
450 "no more scatter resources\n");
454 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "pkts to scatter: %d\n",
458 scat_req
->scat_entries
= 0;
460 if (htc_setup_send_scat_list(target
, endpoint
, scat_req
,
462 hif_scatter_req_add(target
->dev
->ar
, scat_req
);
466 /* send path is always asynchronous */
467 scat_req
->complete
= htc_async_tx_scat_complete
;
469 tot_pkts_bundle
+= scat_req
->scat_entries
;
471 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
472 "send scatter total bytes: %d , entries: %d\n",
473 scat_req
->len
, scat_req
->scat_entries
);
474 ath6kldev_submit_scat_req(target
->dev
, scat_req
, false);
477 *sent_bundle
= n_sent_bundle
;
478 *n_bundle_pkts
= tot_pkts_bundle
;
479 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "htc_issue_send_bundle (sent:%d)\n",
485 static void htc_tx_from_ep_txq(struct htc_target
*target
,
486 struct htc_endpoint
*endpoint
)
488 struct list_head txq
;
489 struct htc_packet
*packet
;
493 spin_lock_bh(&target
->tx_lock
);
495 endpoint
->tx_proc_cnt
++;
496 if (endpoint
->tx_proc_cnt
> 1) {
497 endpoint
->tx_proc_cnt
--;
498 spin_unlock_bh(&target
->tx_lock
);
499 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "htc_try_send (busy)\n");
504 * drain the endpoint TX queue for transmission as long
505 * as we have enough credits.
507 INIT_LIST_HEAD(&txq
);
511 if (list_empty(&endpoint
->txq
))
514 htc_tx_pkts_get(target
, endpoint
, &txq
);
516 if (list_empty(&txq
))
519 spin_unlock_bh(&target
->tx_lock
);
525 /* try to send a bundle on each pass */
526 if ((target
->tx_bndl_enable
) &&
527 (get_queue_depth(&txq
) >=
528 HTC_MIN_HTC_MSGS_TO_BUNDLE
)) {
529 int temp1
= 0, temp2
= 0;
531 htc_issue_send_bundle(endpoint
, &txq
,
533 bundle_sent
+= temp1
;
534 n_pkts_bundle
+= temp2
;
537 if (list_empty(&txq
))
540 packet
= list_first_entry(&txq
, struct htc_packet
,
542 list_del(&packet
->list
);
544 htc_prep_send_pkt(packet
, packet
->info
.tx
.flags
,
545 0, packet
->info
.tx
.seqno
);
546 htc_issue_send(target
, packet
);
549 spin_lock_bh(&target
->tx_lock
);
551 endpoint
->ep_st
.tx_bundles
+= bundle_sent
;
552 endpoint
->ep_st
.tx_pkt_bundled
+= n_pkts_bundle
;
555 endpoint
->tx_proc_cnt
= 0;
556 spin_unlock_bh(&target
->tx_lock
);
559 static bool htc_try_send(struct htc_target
*target
,
560 struct htc_endpoint
*endpoint
,
561 struct htc_packet
*tx_pkt
)
563 struct htc_ep_callbacks ep_cb
;
565 bool overflow
= false;
567 ep_cb
= endpoint
->ep_cb
;
569 spin_lock_bh(&target
->tx_lock
);
570 txq_depth
= get_queue_depth(&endpoint
->txq
);
571 spin_unlock_bh(&target
->tx_lock
);
573 if (txq_depth
>= endpoint
->max_txq_depth
)
577 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
578 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
579 endpoint
->eid
, overflow
, txq_depth
,
580 endpoint
->max_txq_depth
);
582 if (overflow
&& ep_cb
.tx_full
) {
583 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
584 "indicating overflowed tx packet: 0x%p\n", tx_pkt
);
586 if (ep_cb
.tx_full(endpoint
->target
, tx_pkt
) ==
587 HTC_SEND_FULL_DROP
) {
588 endpoint
->ep_st
.tx_dropped
+= 1;
593 spin_lock_bh(&target
->tx_lock
);
594 list_add_tail(&tx_pkt
->list
, &endpoint
->txq
);
595 spin_unlock_bh(&target
->tx_lock
);
597 htc_tx_from_ep_txq(target
, endpoint
);
602 static void htc_chk_ep_txq(struct htc_target
*target
)
604 struct htc_endpoint
*endpoint
;
605 struct htc_endpoint_credit_dist
*cred_dist
;
608 * Run through the credit distribution list to see if there are
609 * packets queued. NOTE: no locks need to be taken since the
610 * distribution list is not dynamic (cannot be re-ordered) and we
611 * are not modifying any state.
613 list_for_each_entry(cred_dist
, &target
->cred_dist_list
, list
) {
614 endpoint
= (struct htc_endpoint
*)cred_dist
->htc_rsvd
;
616 spin_lock_bh(&target
->tx_lock
);
617 if (!list_empty(&endpoint
->txq
)) {
618 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
619 "ep %d has %d credits and %d packets in tx queue\n",
621 endpoint
->cred_dist
.credits
,
622 get_queue_depth(&endpoint
->txq
));
623 spin_unlock_bh(&target
->tx_lock
);
625 * Try to start the stalled queue, this list is
626 * ordered by priority. If there are credits
627 * available the highest priority queue will get a
628 * chance to reclaim credits from lower priority
631 htc_tx_from_ep_txq(target
, endpoint
);
632 spin_lock_bh(&target
->tx_lock
);
634 spin_unlock_bh(&target
->tx_lock
);
638 static int htc_setup_tx_complete(struct htc_target
*target
)
640 struct htc_packet
*send_pkt
= NULL
;
643 send_pkt
= htc_get_control_buf(target
, true);
648 if (target
->htc_tgt_ver
>= HTC_VERSION_2P1
) {
649 struct htc_setup_comp_ext_msg
*setup_comp_ext
;
653 (struct htc_setup_comp_ext_msg
*)send_pkt
->buf
;
654 memset(setup_comp_ext
, 0, sizeof(*setup_comp_ext
));
655 setup_comp_ext
->msg_id
=
656 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID
);
658 if (target
->msg_per_bndl_max
> 0) {
659 /* Indicate HTC bundling to the target */
660 flags
|= HTC_SETUP_COMP_FLG_RX_BNDL_EN
;
661 setup_comp_ext
->msg_per_rxbndl
=
662 target
->msg_per_bndl_max
;
665 memcpy(&setup_comp_ext
->flags
, &flags
,
666 sizeof(setup_comp_ext
->flags
));
667 set_htc_pkt_info(send_pkt
, NULL
, (u8
*) setup_comp_ext
,
668 sizeof(struct htc_setup_comp_ext_msg
),
669 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
672 struct htc_setup_comp_msg
*setup_comp
;
673 setup_comp
= (struct htc_setup_comp_msg
*)send_pkt
->buf
;
674 memset(setup_comp
, 0, sizeof(struct htc_setup_comp_msg
));
675 setup_comp
->msg_id
= cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID
);
676 set_htc_pkt_info(send_pkt
, NULL
, (u8
*) setup_comp
,
677 sizeof(struct htc_setup_comp_msg
),
678 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
681 /* we want synchronous operation */
682 send_pkt
->completion
= NULL
;
683 htc_prep_send_pkt(send_pkt
, 0, 0, 0);
684 status
= htc_issue_send(target
, send_pkt
);
686 if (send_pkt
!= NULL
)
687 htc_reclaim_txctrl_buf(target
, send_pkt
);
692 void htc_set_credit_dist(struct htc_target
*target
,
693 struct htc_credit_state_info
*cred_dist_cntxt
,
694 u16 srvc_pri_order
[], int list_len
)
696 struct htc_endpoint
*endpoint
;
699 target
->cred_dist_cntxt
= cred_dist_cntxt
;
701 list_add_tail(&target
->endpoint
[ENDPOINT_0
].cred_dist
.list
,
702 &target
->cred_dist_list
);
704 for (i
= 0; i
< list_len
; i
++) {
705 for (ep
= ENDPOINT_1
; ep
< ENDPOINT_MAX
; ep
++) {
706 endpoint
= &target
->endpoint
[ep
];
707 if (endpoint
->svc_id
== srvc_pri_order
[i
]) {
708 list_add_tail(&endpoint
->cred_dist
.list
,
709 &target
->cred_dist_list
);
713 if (ep
>= ENDPOINT_MAX
) {
720 int htc_tx(struct htc_target
*target
, struct htc_packet
*packet
)
722 struct htc_endpoint
*endpoint
;
723 struct list_head queue
;
725 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
726 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
727 packet
->endpoint
, packet
->buf
, packet
->act_len
);
729 if (packet
->endpoint
>= ENDPOINT_MAX
) {
734 endpoint
= &target
->endpoint
[packet
->endpoint
];
736 if (!htc_try_send(target
, endpoint
, packet
)) {
737 packet
->status
= (target
->htc_flags
& HTC_OP_STATE_STOPPING
) ?
738 -ECANCELED
: -ENOSPC
;
739 INIT_LIST_HEAD(&queue
);
740 list_add(&packet
->list
, &queue
);
741 htc_tx_complete(endpoint
, &queue
);
747 /* flush endpoint TX queue */
748 void htc_flush_txep(struct htc_target
*target
,
749 enum htc_endpoint_id eid
, u16 tag
)
751 struct htc_packet
*packet
, *tmp_pkt
;
752 struct list_head discard_q
, container
;
753 struct htc_endpoint
*endpoint
= &target
->endpoint
[eid
];
755 if (!endpoint
->svc_id
) {
760 /* initialize the discard queue */
761 INIT_LIST_HEAD(&discard_q
);
763 spin_lock_bh(&target
->tx_lock
);
765 list_for_each_entry_safe(packet
, tmp_pkt
, &endpoint
->txq
, list
) {
766 if ((tag
== HTC_TX_PACKET_TAG_ALL
) ||
767 (tag
== packet
->info
.tx
.tag
))
768 list_move_tail(&packet
->list
, &discard_q
);
771 spin_unlock_bh(&target
->tx_lock
);
773 list_for_each_entry_safe(packet
, tmp_pkt
, &discard_q
, list
) {
774 packet
->status
= -ECANCELED
;
775 list_del(&packet
->list
);
776 ath6kl_dbg(ATH6KL_DBG_TRC
,
777 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
778 packet
, packet
->act_len
,
779 packet
->endpoint
, packet
->info
.tx
.tag
);
781 INIT_LIST_HEAD(&container
);
782 list_add_tail(&packet
->list
, &container
);
783 htc_tx_complete(endpoint
, &container
);
788 static void htc_flush_txep_all(struct htc_target
*target
)
790 struct htc_endpoint
*endpoint
;
793 dump_cred_dist_stats(target
);
795 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
796 endpoint
= &target
->endpoint
[i
];
797 if (endpoint
->svc_id
== 0)
800 htc_flush_txep(target
, i
, HTC_TX_PACKET_TAG_ALL
);
804 void htc_indicate_activity_change(struct htc_target
*target
,
805 enum htc_endpoint_id eid
, bool active
)
807 struct htc_endpoint
*endpoint
= &target
->endpoint
[eid
];
810 if (endpoint
->svc_id
== 0) {
815 spin_lock_bh(&target
->tx_lock
);
818 if (!(endpoint
->cred_dist
.dist_flags
& HTC_EP_ACTIVE
)) {
819 endpoint
->cred_dist
.dist_flags
|= HTC_EP_ACTIVE
;
823 if (endpoint
->cred_dist
.dist_flags
& HTC_EP_ACTIVE
) {
824 endpoint
->cred_dist
.dist_flags
&= ~HTC_EP_ACTIVE
;
830 endpoint
->cred_dist
.txq_depth
=
831 get_queue_depth(&endpoint
->txq
);
833 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
834 target
->cred_dist_cntxt
, &target
->cred_dist_list
);
836 ath6k_credit_distribute(target
->cred_dist_cntxt
,
837 &target
->cred_dist_list
,
838 HTC_CREDIT_DIST_ACTIVITY_CHANGE
);
841 spin_unlock_bh(&target
->tx_lock
);
844 htc_chk_ep_txq(target
);
849 static inline void htc_update_rx_stats(struct htc_endpoint
*endpoint
,
852 endpoint
->ep_st
.rx_pkts
++;
853 if (n_look_ahds
== 1)
854 endpoint
->ep_st
.rx_lkahds
++;
855 else if (n_look_ahds
> 1)
856 endpoint
->ep_st
.rx_bundle_lkahd
++;
859 static inline bool htc_valid_rx_frame_len(struct htc_target
*target
,
860 enum htc_endpoint_id eid
, int len
)
862 return (eid
== target
->dev
->ar
->ctrl_ep
) ?
863 len
<= ATH6KL_BUFFER_SIZE
: len
<= ATH6KL_AMSDU_BUFFER_SIZE
;
866 static int htc_add_rxbuf(struct htc_target
*target
, struct htc_packet
*packet
)
868 struct list_head queue
;
870 INIT_LIST_HEAD(&queue
);
871 list_add_tail(&packet
->list
, &queue
);
872 return htc_add_rxbuf_multiple(target
, &queue
);
875 static void htc_reclaim_rxbuf(struct htc_target
*target
,
876 struct htc_packet
*packet
,
877 struct htc_endpoint
*ep
)
879 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_NO_RECYCLE
) {
880 htc_rxpkt_reset(packet
);
881 packet
->status
= -ECANCELED
;
882 ep
->ep_cb
.rx(ep
->target
, packet
);
884 htc_rxpkt_reset(packet
);
885 htc_add_rxbuf((void *)(target
), packet
);
889 static void reclaim_rx_ctrl_buf(struct htc_target
*target
,
890 struct htc_packet
*packet
)
892 spin_lock_bh(&target
->htc_lock
);
893 list_add_tail(&packet
->list
, &target
->free_ctrl_rxbuf
);
894 spin_unlock_bh(&target
->htc_lock
);
897 static int dev_rx_pkt(struct htc_target
*target
, struct htc_packet
*packet
,
900 struct ath6kl_device
*dev
= target
->dev
;
904 padded_len
= CALC_TXRX_PADDED_LEN(target
, rx_len
);
906 if (padded_len
> packet
->buf_len
) {
907 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
908 padded_len
, rx_len
, packet
->buf_len
);
912 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
913 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
914 packet
, packet
->info
.rx
.exp_hdr
,
915 padded_len
, dev
->ar
->mbox_info
.htc_addr
, "sync");
917 status
= hif_read_write_sync(dev
->ar
,
918 dev
->ar
->mbox_info
.htc_addr
,
919 packet
->buf
, padded_len
,
920 HIF_RD_SYNC_BLOCK_FIX
);
922 packet
->status
= status
;
928 * optimization for recv packets, we can indicate a
929 * "hint" that there are more single-packets to fetch
932 static void set_rxpkt_indication_flag(u32 lk_ahd
,
933 struct htc_endpoint
*endpoint
,
934 struct htc_packet
*packet
)
936 struct htc_frame_hdr
*htc_hdr
= (struct htc_frame_hdr
*)&lk_ahd
;
938 if (htc_hdr
->eid
== packet
->endpoint
) {
939 if (!list_empty(&endpoint
->rx_bufq
))
940 packet
->info
.rx
.indicat_flags
|=
941 HTC_RX_FLAGS_INDICATE_MORE_PKTS
;
945 static void chk_rx_water_mark(struct htc_endpoint
*endpoint
)
947 struct htc_ep_callbacks ep_cb
= endpoint
->ep_cb
;
949 if (ep_cb
.rx_refill_thresh
> 0) {
950 spin_lock_bh(&endpoint
->target
->rx_lock
);
951 if (get_queue_depth(&endpoint
->rx_bufq
)
952 < ep_cb
.rx_refill_thresh
) {
953 spin_unlock_bh(&endpoint
->target
->rx_lock
);
954 ep_cb
.rx_refill(endpoint
->target
, endpoint
->eid
);
957 spin_unlock_bh(&endpoint
->target
->rx_lock
);
961 /* This function is called with rx_lock held */
962 static int htc_setup_rxpkts(struct htc_target
*target
, struct htc_endpoint
*ep
,
963 u32
*lk_ahds
, struct list_head
*queue
, int n_msg
)
965 struct htc_packet
*packet
;
966 /* FIXME: type of lk_ahds can't be right */
967 struct htc_frame_hdr
*htc_hdr
= (struct htc_frame_hdr
*)lk_ahds
;
968 struct htc_ep_callbacks ep_cb
;
969 int status
= 0, j
, full_len
;
972 full_len
= CALC_TXRX_PADDED_LEN(target
,
973 le16_to_cpu(htc_hdr
->payld_len
) +
976 if (!htc_valid_rx_frame_len(target
, ep
->eid
, full_len
)) {
977 ath6kl_warn("Rx buffer requested with invalid length\n");
982 for (j
= 0; j
< n_msg
; j
++) {
985 * Reset flag, any packets allocated using the
986 * rx_alloc() API cannot be recycled on
987 * cleanup,they must be explicitly returned.
991 if (ep_cb
.rx_allocthresh
&&
992 (full_len
> ep_cb
.rx_alloc_thresh
)) {
993 ep
->ep_st
.rx_alloc_thresh_hit
+= 1;
994 ep
->ep_st
.rxalloc_thresh_byte
+=
995 le16_to_cpu(htc_hdr
->payld_len
);
997 spin_unlock_bh(&target
->rx_lock
);
1000 packet
= ep_cb
.rx_allocthresh(ep
->target
, ep
->eid
,
1002 spin_lock_bh(&target
->rx_lock
);
1004 /* refill handler is being used */
1005 if (list_empty(&ep
->rx_bufq
)) {
1006 if (ep_cb
.rx_refill
) {
1007 spin_unlock_bh(&target
->rx_lock
);
1008 ep_cb
.rx_refill(ep
->target
, ep
->eid
);
1009 spin_lock_bh(&target
->rx_lock
);
1013 if (list_empty(&ep
->rx_bufq
))
1016 packet
= list_first_entry(&ep
->rx_bufq
,
1017 struct htc_packet
, list
);
1018 list_del(&packet
->list
);
1023 target
->rx_st_flags
|= HTC_RECV_WAIT_BUFFERS
;
1024 target
->ep_waiting
= ep
->eid
;
1029 packet
->info
.rx
.rx_flags
= 0;
1030 packet
->info
.rx
.indicat_flags
= 0;
1035 * flag that these packets cannot be
1036 * recycled, they have to be returned to
1039 packet
->info
.rx
.rx_flags
|= HTC_RX_PKT_NO_RECYCLE
;
1041 /* Caller needs to free this upon any failure */
1042 list_add_tail(&packet
->list
, queue
);
1044 if (target
->htc_flags
& HTC_OP_STATE_STOPPING
) {
1045 status
= -ECANCELED
;
1050 packet
->info
.rx
.rx_flags
|= HTC_RX_PKT_REFRESH_HDR
;
1051 packet
->info
.rx
.exp_hdr
= 0xFFFFFFFF;
1053 /* set expected look ahead */
1054 packet
->info
.rx
.exp_hdr
= *lk_ahds
;
1056 packet
->act_len
= le16_to_cpu(htc_hdr
->payld_len
) +
1063 static int alloc_and_prep_rxpkts(struct htc_target
*target
,
1064 u32 lk_ahds
[], int msg
,
1065 struct htc_endpoint
*endpoint
,
1066 struct list_head
*queue
)
1069 struct htc_packet
*packet
, *tmp_pkt
;
1070 struct htc_frame_hdr
*htc_hdr
;
1073 spin_lock_bh(&target
->rx_lock
);
1075 for (i
= 0; i
< msg
; i
++) {
1077 htc_hdr
= (struct htc_frame_hdr
*)&lk_ahds
[i
];
1079 if (htc_hdr
->eid
>= ENDPOINT_MAX
) {
1080 ath6kl_err("invalid ep in look-ahead: %d\n",
1086 if (htc_hdr
->eid
!= endpoint
->eid
) {
1087 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1088 htc_hdr
->eid
, endpoint
->eid
, i
);
1093 if (le16_to_cpu(htc_hdr
->payld_len
) > HTC_MAX_PAYLOAD_LENGTH
) {
1094 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1096 (u32
) HTC_MAX_PAYLOAD_LENGTH
);
1101 if (endpoint
->svc_id
== 0) {
1102 ath6kl_err("ep %d is not connected !\n", htc_hdr
->eid
);
1107 if (htc_hdr
->flags
& HTC_FLG_RX_BNDL_CNT
) {
1109 * HTC header indicates that every packet to follow
1110 * has the same padded length so that it can be
1111 * optimally fetched as a full bundle.
1113 n_msg
= (htc_hdr
->flags
& HTC_FLG_RX_BNDL_CNT
) >>
1114 HTC_FLG_RX_BNDL_CNT_S
;
1116 /* the count doesn't include the starter frame */
1118 if (n_msg
> target
->msg_per_bndl_max
) {
1123 endpoint
->ep_st
.rx_bundle_from_hdr
+= 1;
1124 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1125 "htc hdr indicates :%d msg can be fetched as a bundle\n",
1128 /* HTC header only indicates 1 message to fetch */
1131 /* Setup packet buffers for each message */
1132 status
= htc_setup_rxpkts(target
, endpoint
, &lk_ahds
[i
], queue
,
1136 * This is due to unavailabilty of buffers to rx entire data.
1137 * Return no error so that free buffers from queue can be used
1138 * to receive partial data.
1140 if (status
== -ENOSPC
) {
1141 spin_unlock_bh(&target
->rx_lock
);
1149 spin_unlock_bh(&target
->rx_lock
);
1152 list_for_each_entry_safe(packet
, tmp_pkt
, queue
, list
) {
1153 list_del(&packet
->list
);
1154 htc_reclaim_rxbuf(target
, packet
,
1155 &target
->endpoint
[packet
->endpoint
]);
1162 static void htc_ctrl_rx(struct htc_target
*context
, struct htc_packet
*packets
)
1164 if (packets
->endpoint
!= ENDPOINT_0
) {
1169 if (packets
->status
== -ECANCELED
) {
1170 reclaim_rx_ctrl_buf(context
, packets
);
1174 if (packets
->act_len
> 0) {
1175 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1176 packets
->act_len
+ HTC_HDR_LENGTH
);
1178 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
,
1179 "Unexpected ENDPOINT 0 Message",
1180 packets
->buf
- HTC_HDR_LENGTH
,
1181 packets
->act_len
+ HTC_HDR_LENGTH
);
1184 htc_reclaim_rxbuf(context
, packets
, &context
->endpoint
[0]);
1187 static void htc_proc_cred_rpt(struct htc_target
*target
,
1188 struct htc_credit_report
*rpt
,
1190 enum htc_endpoint_id from_ep
)
1192 struct htc_endpoint
*endpoint
;
1193 int tot_credits
= 0, i
;
1196 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
1197 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries
);
1199 spin_lock_bh(&target
->tx_lock
);
1201 for (i
= 0; i
< n_entries
; i
++, rpt
++) {
1202 if (rpt
->eid
>= ENDPOINT_MAX
) {
1204 spin_unlock_bh(&target
->tx_lock
);
1208 endpoint
= &target
->endpoint
[rpt
->eid
];
1210 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, " ep %d got %d credits\n",
1211 rpt
->eid
, rpt
->credits
);
1213 endpoint
->ep_st
.tx_cred_rpt
+= 1;
1214 endpoint
->ep_st
.cred_retnd
+= rpt
->credits
;
1216 if (from_ep
== rpt
->eid
) {
1218 * This credit report arrived on the same endpoint
1219 * indicating it arrived in an RX packet.
1221 endpoint
->ep_st
.cred_from_rx
+= rpt
->credits
;
1222 endpoint
->ep_st
.cred_rpt_from_rx
+= 1;
1223 } else if (from_ep
== ENDPOINT_0
) {
1224 /* credit arrived on endpoint 0 as a NULL message */
1225 endpoint
->ep_st
.cred_from_ep0
+= rpt
->credits
;
1226 endpoint
->ep_st
.cred_rpt_ep0
+= 1;
1228 endpoint
->ep_st
.cred_from_other
+= rpt
->credits
;
1229 endpoint
->ep_st
.cred_rpt_from_other
+= 1;
1232 if (rpt
->eid
== ENDPOINT_0
)
1233 /* always give endpoint 0 credits back */
1234 endpoint
->cred_dist
.credits
+= rpt
->credits
;
1236 endpoint
->cred_dist
.cred_to_dist
+= rpt
->credits
;
1241 * Refresh tx depth for distribution function that will
1242 * recover these credits NOTE: this is only valid when
1243 * there are credits to recover!
1245 endpoint
->cred_dist
.txq_depth
=
1246 get_queue_depth(&endpoint
->txq
);
1248 tot_credits
+= rpt
->credits
;
1251 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
,
1252 "report indicated %d credits to distribute\n",
1257 * This was a credit return based on a completed send
1258 * operations note, this is done with the lock held
1260 ath6kl_dbg(ATH6KL_DBG_HTC_SEND
, "ctxt:0x%p dist:0x%p\n",
1261 target
->cred_dist_cntxt
, &target
->cred_dist_list
);
1263 ath6k_credit_distribute(target
->cred_dist_cntxt
,
1264 &target
->cred_dist_list
,
1265 HTC_CREDIT_DIST_SEND_COMPLETE
);
1268 spin_unlock_bh(&target
->tx_lock
);
1271 htc_chk_ep_txq(target
);
1274 static int htc_parse_trailer(struct htc_target
*target
,
1275 struct htc_record_hdr
*record
,
1276 u8
*record_buf
, u32
*next_lk_ahds
,
1277 enum htc_endpoint_id endpoint
,
1280 struct htc_bundle_lkahd_rpt
*bundle_lkahd_rpt
;
1281 struct htc_lookahead_report
*lk_ahd
;
1284 switch (record
->rec_id
) {
1285 case HTC_RECORD_CREDITS
:
1286 len
= record
->len
/ sizeof(struct htc_credit_report
);
1292 htc_proc_cred_rpt(target
,
1293 (struct htc_credit_report
*) record_buf
,
1296 case HTC_RECORD_LOOKAHEAD
:
1297 len
= record
->len
/ sizeof(*lk_ahd
);
1303 lk_ahd
= (struct htc_lookahead_report
*) record_buf
;
1304 if ((lk_ahd
->pre_valid
== ((~lk_ahd
->post_valid
) & 0xFF))
1307 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1308 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1309 lk_ahd
->pre_valid
, lk_ahd
->post_valid
);
1311 /* look ahead bytes are valid, copy them over */
1312 memcpy((u8
*)&next_lk_ahds
[0], lk_ahd
->lk_ahd
, 4);
1314 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Next Look Ahead",
1320 case HTC_RECORD_LOOKAHEAD_BUNDLE
:
1321 len
= record
->len
/ sizeof(*bundle_lkahd_rpt
);
1322 if (!len
|| (len
> HTC_HOST_MAX_MSG_PER_BUNDLE
)) {
1331 (struct htc_bundle_lkahd_rpt
*) record_buf
;
1333 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Bundle lk_ahd",
1334 record_buf
, record
->len
);
1336 for (i
= 0; i
< len
; i
++) {
1337 memcpy((u8
*)&next_lk_ahds
[i
],
1338 bundle_lkahd_rpt
->lk_ahd
, 4);
1346 ath6kl_err("unhandled record: id:%d len:%d\n",
1347 record
->rec_id
, record
->len
);
1355 static int htc_proc_trailer(struct htc_target
*target
,
1356 u8
*buf
, int len
, u32
*next_lk_ahds
,
1357 int *n_lk_ahds
, enum htc_endpoint_id endpoint
)
1359 struct htc_record_hdr
*record
;
1365 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
, "+htc_proc_trailer (len:%d)\n", len
);
1367 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Recv Trailer", buf
, len
);
1375 if (len
< sizeof(struct htc_record_hdr
)) {
1379 /* these are byte aligned structs */
1380 record
= (struct htc_record_hdr
*) buf
;
1381 len
-= sizeof(struct htc_record_hdr
);
1382 buf
+= sizeof(struct htc_record_hdr
);
1384 if (record
->len
> len
) {
1385 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1386 record
->len
, record
->rec_id
, len
);
1392 status
= htc_parse_trailer(target
, record
, record_buf
,
1393 next_lk_ahds
, endpoint
, n_lk_ahds
);
1398 /* advance buffer past this record for next time around */
1404 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "BAD Recv Trailer",
1405 orig_buf
, orig_len
);
1410 static int htc_proc_rxhdr(struct htc_target
*target
,
1411 struct htc_packet
*packet
,
1412 u32
*next_lkahds
, int *n_lkahds
)
1417 struct htc_frame_hdr
*htc_hdr
= (struct htc_frame_hdr
*)packet
->buf
;
1419 if (n_lkahds
!= NULL
)
1422 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "HTC Recv PKT", packet
->buf
,
1426 * NOTE: we cannot assume the alignment of buf, so we use the safe
1427 * macros to retrieve 16 bit fields.
1429 payload_len
= le16_to_cpu(get_unaligned(&htc_hdr
->payld_len
));
1431 memcpy((u8
*)&lk_ahd
, packet
->buf
, sizeof(lk_ahd
));
1433 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_REFRESH_HDR
) {
1435 * Refresh the expected header and the actual length as it
1436 * was unknown when this packet was grabbed as part of the
1439 packet
->info
.rx
.exp_hdr
= lk_ahd
;
1440 packet
->act_len
= payload_len
+ HTC_HDR_LENGTH
;
1442 /* validate the actual header that was refreshed */
1443 if (packet
->act_len
> packet
->buf_len
) {
1444 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1445 payload_len
, lk_ahd
);
1447 * Limit this to max buffer just to print out some
1450 packet
->act_len
= min(packet
->act_len
, packet
->buf_len
);
1455 if (packet
->endpoint
!= htc_hdr
->eid
) {
1456 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1457 htc_hdr
->eid
, packet
->endpoint
);
1463 if (lk_ahd
!= packet
->info
.rx
.exp_hdr
) {
1464 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1465 packet
, packet
->info
.rx
.rx_flags
);
1466 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Expected Message lk_ahd",
1467 &packet
->info
.rx
.exp_hdr
, 4);
1468 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "Current Frame Header",
1469 (u8
*)&lk_ahd
, sizeof(lk_ahd
));
1474 if (htc_hdr
->flags
& HTC_FLG_RX_TRAILER
) {
1475 if (htc_hdr
->ctrl
[0] < sizeof(struct htc_record_hdr
) ||
1476 htc_hdr
->ctrl
[0] > payload_len
) {
1477 ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1478 payload_len
, htc_hdr
->ctrl
[0]);
1483 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_IGNORE_LOOKAHEAD
) {
1488 status
= htc_proc_trailer(target
, packet
->buf
+ HTC_HDR_LENGTH
1489 + payload_len
- htc_hdr
->ctrl
[0],
1490 htc_hdr
->ctrl
[0], next_lkahds
,
1491 n_lkahds
, packet
->endpoint
);
1496 packet
->act_len
-= htc_hdr
->ctrl
[0];
1499 packet
->buf
+= HTC_HDR_LENGTH
;
1500 packet
->act_len
-= HTC_HDR_LENGTH
;
1504 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, "BAD HTC Recv PKT",
1506 packet
->act_len
< 256 ? packet
->act_len
: 256);
1508 if (packet
->act_len
> 0)
1509 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
,
1510 "HTC - Application Msg",
1511 packet
->buf
, packet
->act_len
);
1517 static void do_rx_completion(struct htc_endpoint
*endpoint
,
1518 struct htc_packet
*packet
)
1520 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1521 "htc calling ep %d recv callback on packet 0x%p\n",
1522 endpoint
->eid
, packet
);
1523 endpoint
->ep_cb
.rx(endpoint
->target
, packet
);
1526 static int htc_issue_rxpkt_bundle(struct htc_target
*target
,
1527 struct list_head
*rxq
,
1528 struct list_head
*sync_compq
,
1529 int *n_pkt_fetched
, bool part_bundle
)
1531 struct hif_scatter_req
*scat_req
;
1532 struct htc_packet
*packet
;
1533 int rem_space
= target
->max_rx_bndl_sz
;
1534 int n_scat_pkt
, status
= 0, i
, len
;
1536 n_scat_pkt
= get_queue_depth(rxq
);
1537 n_scat_pkt
= min(n_scat_pkt
, target
->msg_per_bndl_max
);
1539 if ((get_queue_depth(rxq
) - n_scat_pkt
) > 0) {
1541 * We were forced to split this bundle receive operation
1542 * all packets in this partial bundle must have their
1543 * lookaheads ignored.
1548 * This would only happen if the target ignored our max
1551 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1552 get_queue_depth(rxq
), n_scat_pkt
);
1557 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1558 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1559 get_queue_depth(rxq
), n_scat_pkt
);
1561 scat_req
= hif_scatter_req_get(target
->dev
->ar
);
1563 if (scat_req
== NULL
)
1566 for (i
= 0; i
< n_scat_pkt
; i
++) {
1569 packet
= list_first_entry(rxq
, struct htc_packet
, list
);
1570 list_del(&packet
->list
);
1572 pad_len
= CALC_TXRX_PADDED_LEN(target
,
1575 if ((rem_space
- pad_len
) < 0) {
1576 list_add(&packet
->list
, rxq
);
1580 rem_space
-= pad_len
;
1582 if (part_bundle
|| (i
< (n_scat_pkt
- 1)))
1584 * Packet 0..n-1 cannot be checked for look-aheads
1585 * since we are fetching a bundle the last packet
1586 * however can have it's lookahead used
1588 packet
->info
.rx
.rx_flags
|=
1589 HTC_RX_PKT_IGNORE_LOOKAHEAD
;
1591 /* NOTE: 1 HTC packet per scatter entry */
1592 scat_req
->scat_list
[i
].buf
= packet
->buf
;
1593 scat_req
->scat_list
[i
].len
= pad_len
;
1595 packet
->info
.rx
.rx_flags
|= HTC_RX_PKT_PART_OF_BUNDLE
;
1597 list_add_tail(&packet
->list
, sync_compq
);
1599 WARN_ON(!scat_req
->scat_list
[i
].len
);
1600 len
+= scat_req
->scat_list
[i
].len
;
1603 scat_req
->len
= len
;
1604 scat_req
->scat_entries
= i
;
1606 status
= ath6kldev_submit_scat_req(target
->dev
, scat_req
, true);
1611 /* free scatter request */
1612 hif_scatter_req_add(target
->dev
->ar
, scat_req
);
1619 static int htc_proc_fetched_rxpkts(struct htc_target
*target
,
1620 struct list_head
*comp_pktq
, u32 lk_ahds
[],
1623 struct htc_packet
*packet
, *tmp_pkt
;
1624 struct htc_endpoint
*ep
;
1627 list_for_each_entry_safe(packet
, tmp_pkt
, comp_pktq
, list
) {
1628 list_del(&packet
->list
);
1629 ep
= &target
->endpoint
[packet
->endpoint
];
1631 /* process header for each of the recv packet */
1632 status
= htc_proc_rxhdr(target
, packet
, lk_ahds
, n_lk_ahd
);
1636 if (list_empty(comp_pktq
)) {
1638 * Last packet's more packet flag is set
1639 * based on the lookahead.
1642 set_rxpkt_indication_flag(lk_ahds
[0],
1646 * Packets in a bundle automatically have
1649 packet
->info
.rx
.indicat_flags
|=
1650 HTC_RX_FLAGS_INDICATE_MORE_PKTS
;
1652 htc_update_rx_stats(ep
, *n_lk_ahd
);
1654 if (packet
->info
.rx
.rx_flags
& HTC_RX_PKT_PART_OF_BUNDLE
)
1655 ep
->ep_st
.rx_bundl
+= 1;
1657 do_rx_completion(ep
, packet
);
1663 static int htc_fetch_rxpkts(struct htc_target
*target
,
1664 struct list_head
*rx_pktq
,
1665 struct list_head
*comp_pktq
)
1668 bool part_bundle
= false;
1671 /* now go fetch the list of HTC packets */
1672 while (!list_empty(rx_pktq
)) {
1675 if (target
->rx_bndl_enable
&& (get_queue_depth(rx_pktq
) > 1)) {
1677 * There are enough packets to attempt a
1678 * bundle transfer and recv bundling is
1681 status
= htc_issue_rxpkt_bundle(target
, rx_pktq
,
1688 if (!list_empty(rx_pktq
))
1692 if (!fetched_pkts
) {
1693 struct htc_packet
*packet
;
1695 packet
= list_first_entry(rx_pktq
, struct htc_packet
,
1698 list_del(&packet
->list
);
1700 /* fully synchronous */
1701 packet
->completion
= NULL
;
1703 if (!list_empty(rx_pktq
))
1705 * look_aheads in all packet
1706 * except the last one in the
1707 * bundle must be ignored
1709 packet
->info
.rx
.rx_flags
|=
1710 HTC_RX_PKT_IGNORE_LOOKAHEAD
;
1712 /* go fetch the packet */
1713 status
= dev_rx_pkt(target
, packet
, packet
->act_len
);
1717 list_add_tail(&packet
->list
, comp_pktq
);
1724 int htc_rxmsg_pending_handler(struct htc_target
*target
, u32 msg_look_ahead
[],
1727 struct htc_packet
*packets
, *tmp_pkt
;
1728 struct htc_endpoint
*endpoint
;
1729 struct list_head rx_pktq
, comp_pktq
;
1731 u32 look_aheads
[HTC_HOST_MAX_MSG_PER_BUNDLE
];
1732 int num_look_ahead
= 1;
1733 enum htc_endpoint_id id
;
1739 * On first entry copy the look_aheads into our temp array for
1742 memcpy(look_aheads
, msg_look_ahead
, sizeof(look_aheads
));
1747 * First lookahead sets the expected endpoint IDs for all
1748 * packets in a bundle.
1750 id
= ((struct htc_frame_hdr
*)&look_aheads
[0])->eid
;
1751 endpoint
= &target
->endpoint
[id
];
1753 if (id
>= ENDPOINT_MAX
) {
1754 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1760 INIT_LIST_HEAD(&rx_pktq
);
1761 INIT_LIST_HEAD(&comp_pktq
);
1764 * Try to allocate as many HTC RX packets indicated by the
1767 status
= alloc_and_prep_rxpkts(target
, look_aheads
,
1768 num_look_ahead
, endpoint
,
1773 if (get_queue_depth(&rx_pktq
) >= 2)
1775 * A recv bundle was detected, force IRQ status
1778 target
->chk_irq_status_cnt
= 1;
1780 n_fetched
+= get_queue_depth(&rx_pktq
);
1784 status
= htc_fetch_rxpkts(target
, &rx_pktq
, &comp_pktq
);
1787 chk_rx_water_mark(endpoint
);
1789 /* Process fetched packets */
1790 status
= htc_proc_fetched_rxpkts(target
, &comp_pktq
,
1791 look_aheads
, &num_look_ahead
);
1793 if (!num_look_ahead
|| status
)
1797 * For SYNCH processing, if we get here, we are running
1798 * through the loop again due to a detected lookahead. Set
1799 * flag that we should re-check IRQ status registers again
1800 * before leaving IRQ processing, this can net better
1801 * performance in high throughput situations.
1803 target
->chk_irq_status_cnt
= 1;
1807 ath6kl_err("failed to get pending recv messages: %d\n",
1810 * Cleanup any packets we allocated but didn't use to
1811 * actually fetch any packets.
1813 list_for_each_entry_safe(packets
, tmp_pkt
, &rx_pktq
, list
) {
1814 list_del(&packets
->list
);
1815 htc_reclaim_rxbuf(target
, packets
,
1816 &target
->endpoint
[packets
->endpoint
]);
1819 /* cleanup any packets in sync completion queue */
1820 list_for_each_entry_safe(packets
, tmp_pkt
, &comp_pktq
, list
) {
1821 list_del(&packets
->list
);
1822 htc_reclaim_rxbuf(target
, packets
,
1823 &target
->endpoint
[packets
->endpoint
]);
1826 if (target
->htc_flags
& HTC_OP_STATE_STOPPING
) {
1827 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1828 ath6kldev_rx_control(target
->dev
, false);
1833 * Before leaving, check to see if host ran out of buffers and
1834 * needs to stop the receiver.
1836 if (target
->rx_st_flags
& HTC_RECV_WAIT_BUFFERS
) {
1837 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1838 ath6kldev_rx_control(target
->dev
, false);
1840 *num_pkts
= n_fetched
;
1846 * Synchronously wait for a control message from the target,
1847 * This function is used at initialization time ONLY. At init messages
1848 * on ENDPOINT 0 are expected.
1850 static struct htc_packet
*htc_wait_for_ctrl_msg(struct htc_target
*target
)
1852 struct htc_packet
*packet
= NULL
;
1853 struct htc_frame_hdr
*htc_hdr
;
1856 if (ath6kldev_poll_mboxmsg_rx(target
->dev
, &look_ahead
,
1857 HTC_TARGET_RESPONSE_TIMEOUT
))
1860 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1861 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead
);
1863 htc_hdr
= (struct htc_frame_hdr
*)&look_ahead
;
1865 if (htc_hdr
->eid
!= ENDPOINT_0
)
1868 packet
= htc_get_control_buf(target
, false);
1873 packet
->info
.rx
.rx_flags
= 0;
1874 packet
->info
.rx
.exp_hdr
= look_ahead
;
1875 packet
->act_len
= le16_to_cpu(htc_hdr
->payld_len
) + HTC_HDR_LENGTH
;
1877 if (packet
->act_len
> packet
->buf_len
)
1880 /* we want synchronous operation */
1881 packet
->completion
= NULL
;
1883 /* get the message from the device, this will block */
1884 if (dev_rx_pkt(target
, packet
, packet
->act_len
))
1887 /* process receive header */
1888 packet
->status
= htc_proc_rxhdr(target
, packet
, NULL
, NULL
);
1890 if (packet
->status
) {
1891 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1899 if (packet
!= NULL
) {
1900 htc_rxpkt_reset(packet
);
1901 reclaim_rx_ctrl_buf(target
, packet
);
1907 int htc_add_rxbuf_multiple(struct htc_target
*target
,
1908 struct list_head
*pkt_queue
)
1910 struct htc_endpoint
*endpoint
;
1911 struct htc_packet
*first_pkt
;
1912 bool rx_unblock
= false;
1913 int status
= 0, depth
;
1915 if (list_empty(pkt_queue
))
1918 first_pkt
= list_first_entry(pkt_queue
, struct htc_packet
, list
);
1920 if (first_pkt
->endpoint
>= ENDPOINT_MAX
)
1923 depth
= get_queue_depth(pkt_queue
);
1925 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1926 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1927 first_pkt
->endpoint
, depth
, first_pkt
->buf_len
);
1929 endpoint
= &target
->endpoint
[first_pkt
->endpoint
];
1931 if (target
->htc_flags
& HTC_OP_STATE_STOPPING
) {
1932 struct htc_packet
*packet
, *tmp_pkt
;
1934 /* walk through queue and mark each one canceled */
1935 list_for_each_entry_safe(packet
, tmp_pkt
, pkt_queue
, list
) {
1936 packet
->status
= -ECANCELED
;
1937 list_del(&packet
->list
);
1938 do_rx_completion(endpoint
, packet
);
1944 spin_lock_bh(&target
->rx_lock
);
1946 list_splice_tail_init(pkt_queue
, &endpoint
->rx_bufq
);
1948 /* check if we are blocked waiting for a new buffer */
1949 if (target
->rx_st_flags
& HTC_RECV_WAIT_BUFFERS
) {
1950 if (target
->ep_waiting
== first_pkt
->endpoint
) {
1951 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1952 "receiver was blocked on ep:%d, unblocking.\n",
1953 target
->ep_waiting
);
1954 target
->rx_st_flags
&= ~HTC_RECV_WAIT_BUFFERS
;
1955 target
->ep_waiting
= ENDPOINT_MAX
;
1960 spin_unlock_bh(&target
->rx_lock
);
1962 if (rx_unblock
&& !(target
->htc_flags
& HTC_OP_STATE_STOPPING
))
1963 /* TODO : implement a buffer threshold count? */
1964 ath6kldev_rx_control(target
->dev
, true);
1969 void htc_flush_rx_buf(struct htc_target
*target
)
1971 struct htc_endpoint
*endpoint
;
1972 struct htc_packet
*packet
, *tmp_pkt
;
1975 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
1976 endpoint
= &target
->endpoint
[i
];
1977 if (!endpoint
->svc_id
)
1981 spin_lock_bh(&target
->rx_lock
);
1982 list_for_each_entry_safe(packet
, tmp_pkt
,
1983 &endpoint
->rx_bufq
, list
) {
1984 list_del(&packet
->list
);
1985 spin_unlock_bh(&target
->rx_lock
);
1986 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
1987 "flushing rx pkt:0x%p, len:%d, ep:%d\n",
1988 packet
, packet
->buf_len
,
1990 dev_kfree_skb(packet
->pkt_cntxt
);
1991 spin_lock_bh(&target
->rx_lock
);
1993 spin_unlock_bh(&target
->rx_lock
);
1997 int htc_conn_service(struct htc_target
*target
,
1998 struct htc_service_connect_req
*conn_req
,
1999 struct htc_service_connect_resp
*conn_resp
)
2001 struct htc_packet
*rx_pkt
= NULL
;
2002 struct htc_packet
*tx_pkt
= NULL
;
2003 struct htc_conn_service_resp
*resp_msg
;
2004 struct htc_conn_service_msg
*conn_msg
;
2005 struct htc_endpoint
*endpoint
;
2006 enum htc_endpoint_id assigned_ep
= ENDPOINT_MAX
;
2007 unsigned int max_msg_sz
= 0;
2010 ath6kl_dbg(ATH6KL_DBG_TRC
,
2011 "htc_conn_service, target:0x%p service id:0x%X\n",
2012 target
, conn_req
->svc_id
);
2014 if (conn_req
->svc_id
== HTC_CTRL_RSVD_SVC
) {
2015 /* special case for pseudo control service */
2016 assigned_ep
= ENDPOINT_0
;
2017 max_msg_sz
= HTC_MAX_CTRL_MSG_LEN
;
2019 /* allocate a packet to send to the target */
2020 tx_pkt
= htc_get_control_buf(target
, true);
2025 conn_msg
= (struct htc_conn_service_msg
*)tx_pkt
->buf
;
2026 memset(conn_msg
, 0, sizeof(*conn_msg
));
2027 conn_msg
->msg_id
= cpu_to_le16(HTC_MSG_CONN_SVC_ID
);
2028 conn_msg
->svc_id
= cpu_to_le16(conn_req
->svc_id
);
2029 conn_msg
->conn_flags
= cpu_to_le16(conn_req
->conn_flags
);
2031 set_htc_pkt_info(tx_pkt
, NULL
, (u8
*) conn_msg
,
2032 sizeof(*conn_msg
) + conn_msg
->svc_meta_len
,
2033 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
2035 /* we want synchronous operation */
2036 tx_pkt
->completion
= NULL
;
2037 htc_prep_send_pkt(tx_pkt
, 0, 0, 0);
2038 status
= htc_issue_send(target
, tx_pkt
);
2043 /* wait for response */
2044 rx_pkt
= htc_wait_for_ctrl_msg(target
);
2051 resp_msg
= (struct htc_conn_service_resp
*)rx_pkt
->buf
;
2053 if ((le16_to_cpu(resp_msg
->msg_id
) != HTC_MSG_CONN_SVC_RESP_ID
)
2054 || (rx_pkt
->act_len
< sizeof(*resp_msg
))) {
2059 conn_resp
->resp_code
= resp_msg
->status
;
2060 /* check response status */
2061 if (resp_msg
->status
!= HTC_SERVICE_SUCCESS
) {
2062 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2063 resp_msg
->svc_id
, resp_msg
->status
);
2068 assigned_ep
= (enum htc_endpoint_id
)resp_msg
->eid
;
2069 max_msg_sz
= le16_to_cpu(resp_msg
->max_msg_sz
);
2072 if (assigned_ep
>= ENDPOINT_MAX
|| !max_msg_sz
) {
2077 endpoint
= &target
->endpoint
[assigned_ep
];
2078 endpoint
->eid
= assigned_ep
;
2079 if (endpoint
->svc_id
) {
2084 /* return assigned endpoint to caller */
2085 conn_resp
->endpoint
= assigned_ep
;
2086 conn_resp
->len_max
= max_msg_sz
;
2088 /* setup the endpoint */
2090 /* this marks the endpoint in use */
2091 endpoint
->svc_id
= conn_req
->svc_id
;
2093 endpoint
->max_txq_depth
= conn_req
->max_txq_depth
;
2094 endpoint
->len_max
= max_msg_sz
;
2095 endpoint
->ep_cb
= conn_req
->ep_cb
;
2096 endpoint
->cred_dist
.svc_id
= conn_req
->svc_id
;
2097 endpoint
->cred_dist
.htc_rsvd
= endpoint
;
2098 endpoint
->cred_dist
.endpoint
= assigned_ep
;
2099 endpoint
->cred_dist
.cred_sz
= target
->tgt_cred_sz
;
2101 if (conn_req
->max_rxmsg_sz
) {
2103 * Override cred_per_msg calculation, this optimizes
2104 * the credit-low indications since the host will actually
2105 * issue smaller messages in the Send path.
2107 if (conn_req
->max_rxmsg_sz
> max_msg_sz
) {
2111 endpoint
->cred_dist
.cred_per_msg
=
2112 conn_req
->max_rxmsg_sz
/ target
->tgt_cred_sz
;
2114 endpoint
->cred_dist
.cred_per_msg
=
2115 max_msg_sz
/ target
->tgt_cred_sz
;
2117 if (!endpoint
->cred_dist
.cred_per_msg
)
2118 endpoint
->cred_dist
.cred_per_msg
= 1;
2120 /* save local connection flags */
2121 endpoint
->conn_flags
= conn_req
->flags
;
2125 htc_reclaim_txctrl_buf(target
, tx_pkt
);
2128 htc_rxpkt_reset(rx_pkt
);
2129 reclaim_rx_ctrl_buf(target
, rx_pkt
);
2135 static void reset_ep_state(struct htc_target
*target
)
2137 struct htc_endpoint
*endpoint
;
2140 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
2141 endpoint
= &target
->endpoint
[i
];
2142 memset(&endpoint
->cred_dist
, 0, sizeof(endpoint
->cred_dist
));
2143 endpoint
->svc_id
= 0;
2144 endpoint
->len_max
= 0;
2145 endpoint
->max_txq_depth
= 0;
2146 memset(&endpoint
->ep_st
, 0,
2147 sizeof(endpoint
->ep_st
));
2148 INIT_LIST_HEAD(&endpoint
->rx_bufq
);
2149 INIT_LIST_HEAD(&endpoint
->txq
);
2150 endpoint
->target
= target
;
2153 /* reset distribution list */
2154 INIT_LIST_HEAD(&target
->cred_dist_list
);
2157 int htc_get_rxbuf_num(struct htc_target
*target
, enum htc_endpoint_id endpoint
)
2161 spin_lock_bh(&target
->rx_lock
);
2162 num
= get_queue_depth(&(target
->endpoint
[endpoint
].rx_bufq
));
2163 spin_unlock_bh(&target
->rx_lock
);
2167 static void htc_setup_msg_bndl(struct htc_target
*target
)
2169 /* limit what HTC can handle */
2170 target
->msg_per_bndl_max
= min(HTC_HOST_MAX_MSG_PER_BUNDLE
,
2171 target
->msg_per_bndl_max
);
2173 if (ath6kl_hif_enable_scatter(target
->dev
->ar
)) {
2174 target
->msg_per_bndl_max
= 0;
2178 /* limit bundle what the device layer can handle */
2179 target
->msg_per_bndl_max
= min(target
->max_scat_entries
,
2180 target
->msg_per_bndl_max
);
2182 ath6kl_dbg(ATH6KL_DBG_TRC
,
2183 "htc bundling allowed. max msg per htc bundle: %d\n",
2184 target
->msg_per_bndl_max
);
2186 /* Max rx bundle size is limited by the max tx bundle size */
2187 target
->max_rx_bndl_sz
= target
->max_xfer_szper_scatreq
;
2188 /* Max tx bundle size if limited by the extended mbox address range */
2189 target
->max_tx_bndl_sz
= min(HIF_MBOX0_EXT_WIDTH
,
2190 target
->max_xfer_szper_scatreq
);
2192 ath6kl_dbg(ATH6KL_DBG_ANY
, "max recv: %d max send: %d\n",
2193 target
->max_rx_bndl_sz
, target
->max_tx_bndl_sz
);
2195 if (target
->max_tx_bndl_sz
)
2196 target
->tx_bndl_enable
= true;
2198 if (target
->max_rx_bndl_sz
)
2199 target
->rx_bndl_enable
= true;
2201 if ((target
->tgt_cred_sz
% target
->block_sz
) != 0) {
2202 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2203 target
->tgt_cred_sz
);
2206 * Disallow send bundling since the credit size is
2207 * not aligned to a block size the I/O block
2208 * padding will spill into the next credit buffer
2211 target
->tx_bndl_enable
= false;
2215 int htc_wait_target(struct htc_target
*target
)
2217 struct htc_packet
*packet
= NULL
;
2218 struct htc_ready_ext_msg
*rdy_msg
;
2219 struct htc_service_connect_req connect
;
2220 struct htc_service_connect_resp resp
;
2223 /* we should be getting 1 control message that the target is ready */
2224 packet
= htc_wait_for_ctrl_msg(target
);
2229 /* we controlled the buffer creation so it's properly aligned */
2230 rdy_msg
= (struct htc_ready_ext_msg
*)packet
->buf
;
2232 if ((le16_to_cpu(rdy_msg
->ver2_0_info
.msg_id
) != HTC_MSG_READY_ID
) ||
2233 (packet
->act_len
< sizeof(struct htc_ready_msg
))) {
2235 goto fail_wait_target
;
2238 if (!rdy_msg
->ver2_0_info
.cred_cnt
|| !rdy_msg
->ver2_0_info
.cred_sz
) {
2240 goto fail_wait_target
;
2243 target
->tgt_creds
= le16_to_cpu(rdy_msg
->ver2_0_info
.cred_cnt
);
2244 target
->tgt_cred_sz
= le16_to_cpu(rdy_msg
->ver2_0_info
.cred_sz
);
2246 ath6kl_dbg(ATH6KL_DBG_HTC_RECV
,
2247 "target ready: credits: %d credit size: %d\n",
2248 target
->tgt_creds
, target
->tgt_cred_sz
);
2250 /* check if this is an extended ready message */
2251 if (packet
->act_len
>= sizeof(struct htc_ready_ext_msg
)) {
2252 /* this is an extended message */
2253 target
->htc_tgt_ver
= rdy_msg
->htc_ver
;
2254 target
->msg_per_bndl_max
= rdy_msg
->msg_per_htc_bndl
;
2257 target
->htc_tgt_ver
= HTC_VERSION_2P0
;
2258 target
->msg_per_bndl_max
= 0;
2261 ath6kl_dbg(ATH6KL_DBG_TRC
, "using htc protocol version : %s (%d)\n",
2262 (target
->htc_tgt_ver
== HTC_VERSION_2P0
) ? "2.0" : ">= 2.1",
2263 target
->htc_tgt_ver
);
2265 if (target
->msg_per_bndl_max
> 0)
2266 htc_setup_msg_bndl(target
);
2268 /* setup our pseudo HTC control endpoint connection */
2269 memset(&connect
, 0, sizeof(connect
));
2270 memset(&resp
, 0, sizeof(resp
));
2271 connect
.ep_cb
.rx
= htc_ctrl_rx
;
2272 connect
.ep_cb
.rx_refill
= NULL
;
2273 connect
.ep_cb
.tx_full
= NULL
;
2274 connect
.max_txq_depth
= NUM_CONTROL_BUFFERS
;
2275 connect
.svc_id
= HTC_CTRL_RSVD_SVC
;
2277 /* connect fake service */
2278 status
= htc_conn_service((void *)target
, &connect
, &resp
);
2281 ath6kl_hif_cleanup_scatter(target
->dev
->ar
);
2285 htc_rxpkt_reset(packet
);
2286 reclaim_rx_ctrl_buf(target
, packet
);
2293 * Start HTC, enable interrupts and let the target know
2294 * host has finished setup.
2296 int htc_start(struct htc_target
*target
)
2298 struct htc_packet
*packet
;
2301 /* Disable interrupts at the chip level */
2302 ath6kldev_disable_intrs(target
->dev
);
2304 target
->htc_flags
= 0;
2305 target
->rx_st_flags
= 0;
2307 /* Push control receive buffers into htc control endpoint */
2308 while ((packet
= htc_get_control_buf(target
, false)) != NULL
) {
2309 status
= htc_add_rxbuf(target
, packet
);
2314 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2315 ath6k_credit_init(target
->cred_dist_cntxt
, &target
->cred_dist_list
,
2318 dump_cred_dist_stats(target
);
2320 /* Indicate to the target of the setup completion */
2321 status
= htc_setup_tx_complete(target
);
2326 /* unmask interrupts */
2327 status
= ath6kldev_unmask_intrs(target
->dev
);
2335 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2336 void htc_stop(struct htc_target
*target
)
2338 spin_lock_bh(&target
->htc_lock
);
2339 target
->htc_flags
|= HTC_OP_STATE_STOPPING
;
2340 spin_unlock_bh(&target
->htc_lock
);
2343 * Masking interrupts is a synchronous operation, when this
2344 * function returns all pending HIF I/O has completed, we can
2345 * safely flush the queues.
2347 ath6kldev_mask_intrs(target
->dev
);
2349 htc_flush_txep_all(target
);
2351 htc_flush_rx_buf(target
);
2353 reset_ep_state(target
);
2356 void *htc_create(struct ath6kl
*ar
)
2358 struct htc_target
*target
= NULL
;
2359 struct htc_packet
*packet
;
2360 int status
= 0, i
= 0;
2361 u32 block_size
, ctrl_bufsz
;
2363 target
= kzalloc(sizeof(*target
), GFP_KERNEL
);
2365 ath6kl_err("unable to allocate memory\n");
2369 target
->dev
= kzalloc(sizeof(*target
->dev
), GFP_KERNEL
);
2371 ath6kl_err("unable to allocate memory\n");
2373 goto fail_create_htc
;
2376 spin_lock_init(&target
->htc_lock
);
2377 spin_lock_init(&target
->rx_lock
);
2378 spin_lock_init(&target
->tx_lock
);
2380 INIT_LIST_HEAD(&target
->free_ctrl_txbuf
);
2381 INIT_LIST_HEAD(&target
->free_ctrl_rxbuf
);
2382 INIT_LIST_HEAD(&target
->cred_dist_list
);
2384 target
->dev
->ar
= ar
;
2385 target
->dev
->htc_cnxt
= target
;
2386 target
->ep_waiting
= ENDPOINT_MAX
;
2388 reset_ep_state(target
);
2390 status
= ath6kldev_setup(target
->dev
);
2393 goto fail_create_htc
;
2395 block_size
= ar
->mbox_info
.block_size
;
2397 ctrl_bufsz
= (block_size
> HTC_MAX_CTRL_MSG_LEN
) ?
2398 (block_size
+ HTC_HDR_LENGTH
) :
2399 (HTC_MAX_CTRL_MSG_LEN
+ HTC_HDR_LENGTH
);
2401 for (i
= 0; i
< NUM_CONTROL_BUFFERS
; i
++) {
2402 packet
= kzalloc(sizeof(*packet
), GFP_KERNEL
);
2406 packet
->buf_start
= kzalloc(ctrl_bufsz
, GFP_KERNEL
);
2407 if (!packet
->buf_start
) {
2412 packet
->buf_len
= ctrl_bufsz
;
2413 if (i
< NUM_CONTROL_RX_BUFFERS
) {
2414 packet
->act_len
= 0;
2415 packet
->buf
= packet
->buf_start
;
2416 packet
->endpoint
= ENDPOINT_0
;
2417 list_add_tail(&packet
->list
, &target
->free_ctrl_rxbuf
);
2419 list_add_tail(&packet
->list
, &target
->free_ctrl_txbuf
);
2423 if (i
!= NUM_CONTROL_BUFFERS
|| status
) {
2425 htc_cleanup(target
);
2433 /* cleanup the HTC instance */
2434 void htc_cleanup(struct htc_target
*target
)
2436 struct htc_packet
*packet
, *tmp_packet
;
2438 ath6kl_hif_cleanup_scatter(target
->dev
->ar
);
2440 list_for_each_entry_safe(packet
, tmp_packet
,
2441 &target
->free_ctrl_txbuf
, list
) {
2442 list_del(&packet
->list
);
2443 kfree(packet
->buf_start
);
2447 list_for_each_entry_safe(packet
, tmp_packet
,
2448 &target
->free_ctrl_rxbuf
, list
) {
2449 list_del(&packet
->list
);
2450 kfree(packet
->buf_start
);