spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / net / wireless / ath / ath6kl / htc.c
blobf3b63ca25c7e02b469feb800f8294fe1c3d195b5
1 /*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include "core.h"
18 #include "hif.h"
19 #include "debug.h"
20 #include "hif-ops.h"
21 #include <asm/unaligned.h>
23 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
25 /* Functions for Tx credit handling */
26 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
27 struct htc_endpoint_credit_dist *ep_dist,
28 int credits)
30 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
31 ep_dist->endpoint, credits);
33 ep_dist->credits += credits;
34 ep_dist->cred_assngd += credits;
35 cred_info->cur_free_credits -= credits;
38 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
39 struct list_head *ep_list,
40 int tot_credits)
42 struct htc_endpoint_credit_dist *cur_ep_dist;
43 int count;
45 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
47 cred_info->cur_free_credits = tot_credits;
48 cred_info->total_avail_credits = tot_credits;
50 list_for_each_entry(cur_ep_dist, ep_list, list) {
51 if (cur_ep_dist->endpoint == ENDPOINT_0)
52 continue;
54 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
56 if (tot_credits > 4) {
57 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
58 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
59 ath6kl_credit_deposit(cred_info,
60 cur_ep_dist,
61 cur_ep_dist->cred_min);
62 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
66 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
67 ath6kl_credit_deposit(cred_info, cur_ep_dist,
68 cur_ep_dist->cred_min);
70 * Control service is always marked active, it
71 * never goes inactive EVER.
73 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
74 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
75 /* this is the lowest priority data endpoint */
76 /* FIXME: this looks fishy, check */
77 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
80 * Streams have to be created (explicit | implicit) for all
81 * kinds of traffic. BE endpoints are also inactive in the
82 * beginning. When BE traffic starts it creates implicit
83 * streams that redistributes credits.
85 * Note: all other endpoints have minimums set but are
86 * initially given NO credits. credits will be distributed
87 * as traffic activity demands
91 WARN_ON(cred_info->cur_free_credits <= 0);
93 list_for_each_entry(cur_ep_dist, ep_list, list) {
94 if (cur_ep_dist->endpoint == ENDPOINT_0)
95 continue;
97 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
98 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
99 else {
101 * For the remaining data endpoints, we assume that
102 * each cred_per_msg are the same. We use a simple
103 * calculation here, we take the remaining credits
104 * and determine how many max messages this can
105 * cover and then set each endpoint's normal value
106 * equal to 3/4 this amount.
108 count = (cred_info->cur_free_credits /
109 cur_ep_dist->cred_per_msg)
110 * cur_ep_dist->cred_per_msg;
111 count = (count * 3) >> 2;
112 count = max(count, cur_ep_dist->cred_per_msg);
113 cur_ep_dist->cred_norm = count;
117 ath6kl_dbg(ATH6KL_DBG_CREDIT,
118 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
119 cur_ep_dist->endpoint,
120 cur_ep_dist->svc_id,
121 cur_ep_dist->credits,
122 cur_ep_dist->cred_per_msg,
123 cur_ep_dist->cred_norm,
124 cur_ep_dist->cred_min);
128 /* initialize and setup credit distribution */
129 int ath6kl_credit_setup(void *htc_handle,
130 struct ath6kl_htc_credit_info *cred_info)
132 u16 servicepriority[5];
134 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
136 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
137 servicepriority[1] = WMI_DATA_VO_SVC;
138 servicepriority[2] = WMI_DATA_VI_SVC;
139 servicepriority[3] = WMI_DATA_BE_SVC;
140 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
142 /* set priority list */
143 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
145 return 0;
148 /* reduce an ep's credits back to a set limit */
149 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
150 struct htc_endpoint_credit_dist *ep_dist,
151 int limit)
153 int credits;
155 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
156 ep_dist->endpoint, limit);
158 ep_dist->cred_assngd = limit;
160 if (ep_dist->credits <= limit)
161 return;
163 credits = ep_dist->credits - limit;
164 ep_dist->credits -= credits;
165 cred_info->cur_free_credits += credits;
168 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
169 struct list_head *epdist_list)
171 struct htc_endpoint_credit_dist *cur_dist_list;
173 list_for_each_entry(cur_dist_list, epdist_list, list) {
174 if (cur_dist_list->endpoint == ENDPOINT_0)
175 continue;
177 if (cur_dist_list->cred_to_dist > 0) {
178 cur_dist_list->credits +=
179 cur_dist_list->cred_to_dist;
180 cur_dist_list->cred_to_dist = 0;
181 if (cur_dist_list->credits >
182 cur_dist_list->cred_assngd)
183 ath6kl_credit_reduce(cred_info,
184 cur_dist_list,
185 cur_dist_list->cred_assngd);
187 if (cur_dist_list->credits >
188 cur_dist_list->cred_norm)
189 ath6kl_credit_reduce(cred_info, cur_dist_list,
190 cur_dist_list->cred_norm);
192 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
193 if (cur_dist_list->txq_depth == 0)
194 ath6kl_credit_reduce(cred_info,
195 cur_dist_list, 0);
202 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
203 * question.
205 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
206 struct htc_endpoint_credit_dist *ep_dist)
208 struct htc_endpoint_credit_dist *curdist_list;
209 int credits = 0;
210 int need;
212 if (ep_dist->svc_id == WMI_CONTROL_SVC)
213 goto out;
215 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
216 (ep_dist->svc_id == WMI_DATA_VO_SVC))
217 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
218 goto out;
221 * For all other services, we follow a simple algorithm of:
223 * 1. checking the free pool for credits
224 * 2. checking lower priority endpoints for credits to take
227 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
229 if (credits >= ep_dist->seek_cred)
230 goto out;
233 * We don't have enough in the free pool, try taking away from
234 * lower priority services The rule for taking away credits:
236 * 1. Only take from lower priority endpoints
237 * 2. Only take what is allocated above the minimum (never
238 * starve an endpoint completely)
239 * 3. Only take what you need.
242 list_for_each_entry_reverse(curdist_list,
243 &cred_info->lowestpri_ep_dist,
244 list) {
245 if (curdist_list == ep_dist)
246 break;
248 need = ep_dist->seek_cred - cred_info->cur_free_credits;
250 if ((curdist_list->cred_assngd - need) >=
251 curdist_list->cred_min) {
253 * The current one has been allocated more than
254 * it's minimum and it has enough credits assigned
255 * above it's minimum to fulfill our need try to
256 * take away just enough to fulfill our need.
258 ath6kl_credit_reduce(cred_info, curdist_list,
259 curdist_list->cred_assngd - need);
261 if (cred_info->cur_free_credits >=
262 ep_dist->seek_cred)
263 break;
266 if (curdist_list->endpoint == ENDPOINT_0)
267 break;
270 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
272 out:
273 /* did we find some credits? */
274 if (credits)
275 ath6kl_credit_deposit(cred_info, ep_dist, credits);
277 ep_dist->seek_cred = 0;
280 /* redistribute credits based on activity change */
281 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
282 struct list_head *ep_dist_list)
284 struct htc_endpoint_credit_dist *curdist_list;
286 list_for_each_entry(curdist_list, ep_dist_list, list) {
287 if (curdist_list->endpoint == ENDPOINT_0)
288 continue;
290 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
291 (curdist_list->svc_id == WMI_DATA_BE_SVC))
292 curdist_list->dist_flags |= HTC_EP_ACTIVE;
294 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
295 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
296 if (curdist_list->txq_depth == 0)
297 ath6kl_credit_reduce(info, curdist_list, 0);
298 else
299 ath6kl_credit_reduce(info,
300 curdist_list,
301 curdist_list->cred_min);
308 * This function is invoked whenever endpoints require credit
309 * distributions. A lock is held while this function is invoked, this
310 * function shall NOT block. The ep_dist_list is a list of distribution
311 * structures in prioritized order as defined by the call to the
312 * htc_set_credit_dist() api.
314 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
315 struct list_head *ep_dist_list,
316 enum htc_credit_dist_reason reason)
318 switch (reason) {
319 case HTC_CREDIT_DIST_SEND_COMPLETE:
320 ath6kl_credit_update(cred_info, ep_dist_list);
321 break;
322 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
323 ath6kl_credit_redistribute(cred_info, ep_dist_list);
324 break;
325 default:
326 break;
329 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
330 WARN_ON(cred_info->cur_free_credits < 0);
333 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
335 u8 *align_addr;
337 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
338 align_addr = PTR_ALIGN(*buf - 4, 4);
339 memmove(align_addr, *buf, len);
340 *buf = align_addr;
344 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
345 int ctrl0, int ctrl1)
347 struct htc_frame_hdr *hdr;
349 packet->buf -= HTC_HDR_LENGTH;
350 hdr = (struct htc_frame_hdr *)packet->buf;
352 /* Endianess? */
353 put_unaligned((u16)packet->act_len, &hdr->payld_len);
354 hdr->flags = flags;
355 hdr->eid = packet->endpoint;
356 hdr->ctrl[0] = ctrl0;
357 hdr->ctrl[1] = ctrl1;
360 static void htc_reclaim_txctrl_buf(struct htc_target *target,
361 struct htc_packet *pkt)
363 spin_lock_bh(&target->htc_lock);
364 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
365 spin_unlock_bh(&target->htc_lock);
368 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
369 bool tx)
371 struct htc_packet *packet = NULL;
372 struct list_head *buf_list;
374 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
376 spin_lock_bh(&target->htc_lock);
378 if (list_empty(buf_list)) {
379 spin_unlock_bh(&target->htc_lock);
380 return NULL;
383 packet = list_first_entry(buf_list, struct htc_packet, list);
384 list_del(&packet->list);
385 spin_unlock_bh(&target->htc_lock);
387 if (tx)
388 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
390 return packet;
393 static void htc_tx_comp_update(struct htc_target *target,
394 struct htc_endpoint *endpoint,
395 struct htc_packet *packet)
397 packet->completion = NULL;
398 packet->buf += HTC_HDR_LENGTH;
400 if (!packet->status)
401 return;
403 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
404 packet->status, packet->endpoint, packet->act_len,
405 packet->info.tx.cred_used);
407 /* on failure to submit, reclaim credits for this packet */
408 spin_lock_bh(&target->tx_lock);
409 endpoint->cred_dist.cred_to_dist +=
410 packet->info.tx.cred_used;
411 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
413 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
414 target->credit_info, &target->cred_dist_list);
416 ath6kl_credit_distribute(target->credit_info,
417 &target->cred_dist_list,
418 HTC_CREDIT_DIST_SEND_COMPLETE);
420 spin_unlock_bh(&target->tx_lock);
423 static void htc_tx_complete(struct htc_endpoint *endpoint,
424 struct list_head *txq)
426 if (list_empty(txq))
427 return;
429 ath6kl_dbg(ATH6KL_DBG_HTC,
430 "htc tx complete ep %d pkts %d\n",
431 endpoint->eid, get_queue_depth(txq));
433 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
436 static void htc_tx_comp_handler(struct htc_target *target,
437 struct htc_packet *packet)
439 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
440 struct list_head container;
442 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
443 packet->info.tx.seqno);
445 htc_tx_comp_update(target, endpoint, packet);
446 INIT_LIST_HEAD(&container);
447 list_add_tail(&packet->list, &container);
448 /* do completion */
449 htc_tx_complete(endpoint, &container);
452 static void htc_async_tx_scat_complete(struct htc_target *target,
453 struct hif_scatter_req *scat_req)
455 struct htc_endpoint *endpoint;
456 struct htc_packet *packet;
457 struct list_head tx_compq;
458 int i;
460 INIT_LIST_HEAD(&tx_compq);
462 ath6kl_dbg(ATH6KL_DBG_HTC,
463 "htc tx scat complete len %d entries %d\n",
464 scat_req->len, scat_req->scat_entries);
466 if (scat_req->status)
467 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
469 packet = scat_req->scat_list[0].packet;
470 endpoint = &target->endpoint[packet->endpoint];
472 /* walk through the scatter list and process */
473 for (i = 0; i < scat_req->scat_entries; i++) {
474 packet = scat_req->scat_list[i].packet;
475 if (!packet) {
476 WARN_ON(1);
477 return;
480 packet->status = scat_req->status;
481 htc_tx_comp_update(target, endpoint, packet);
482 list_add_tail(&packet->list, &tx_compq);
485 /* free scatter request */
486 hif_scatter_req_add(target->dev->ar, scat_req);
488 /* complete all packets */
489 htc_tx_complete(endpoint, &tx_compq);
492 static int ath6kl_htc_tx_issue(struct htc_target *target,
493 struct htc_packet *packet)
495 int status;
496 bool sync = false;
497 u32 padded_len, send_len;
499 if (!packet->completion)
500 sync = true;
502 send_len = packet->act_len + HTC_HDR_LENGTH;
504 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
506 ath6kl_dbg(ATH6KL_DBG_HTC,
507 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
508 send_len, packet->info.tx.seqno, padded_len,
509 target->dev->ar->mbox_info.htc_addr,
510 sync ? "sync" : "async");
512 if (sync) {
513 status = hif_read_write_sync(target->dev->ar,
514 target->dev->ar->mbox_info.htc_addr,
515 packet->buf, padded_len,
516 HIF_WR_SYNC_BLOCK_INC);
518 packet->status = status;
519 packet->buf += HTC_HDR_LENGTH;
520 } else
521 status = hif_write_async(target->dev->ar,
522 target->dev->ar->mbox_info.htc_addr,
523 packet->buf, padded_len,
524 HIF_WR_ASYNC_BLOCK_INC, packet);
526 return status;
529 static int htc_check_credits(struct htc_target *target,
530 struct htc_endpoint *ep, u8 *flags,
531 enum htc_endpoint_id eid, unsigned int len,
532 int *req_cred)
535 *req_cred = (len > target->tgt_cred_sz) ?
536 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
538 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
539 *req_cred, ep->cred_dist.credits);
541 if (ep->cred_dist.credits < *req_cred) {
542 if (eid == ENDPOINT_0)
543 return -EINVAL;
545 /* Seek more credits */
546 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
548 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
550 ep->cred_dist.seek_cred = 0;
552 if (ep->cred_dist.credits < *req_cred) {
553 ath6kl_dbg(ATH6KL_DBG_CREDIT,
554 "credit not found for ep %d\n",
555 eid);
556 return -EINVAL;
560 ep->cred_dist.credits -= *req_cred;
561 ep->ep_st.cred_cosumd += *req_cred;
563 /* When we are getting low on credits, ask for more */
564 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
565 ep->cred_dist.seek_cred =
566 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
568 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
570 /* see if we were successful in getting more */
571 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
572 /* tell the target we need credits ASAP! */
573 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
574 ep->ep_st.cred_low_indicate += 1;
575 ath6kl_dbg(ATH6KL_DBG_CREDIT,
576 "credit we need credits asap\n");
580 return 0;
583 static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
584 struct htc_endpoint *endpoint,
585 struct list_head *queue)
587 int req_cred;
588 u8 flags;
589 struct htc_packet *packet;
590 unsigned int len;
592 while (true) {
594 flags = 0;
596 if (list_empty(&endpoint->txq))
597 break;
598 packet = list_first_entry(&endpoint->txq, struct htc_packet,
599 list);
601 ath6kl_dbg(ATH6KL_DBG_HTC,
602 "htc tx got packet 0x%p queue depth %d\n",
603 packet, get_queue_depth(&endpoint->txq));
605 len = CALC_TXRX_PADDED_LEN(target,
606 packet->act_len + HTC_HDR_LENGTH);
608 if (htc_check_credits(target, endpoint, &flags,
609 packet->endpoint, len, &req_cred))
610 break;
612 /* now we can fully move onto caller's queue */
613 packet = list_first_entry(&endpoint->txq, struct htc_packet,
614 list);
615 list_move_tail(&packet->list, queue);
617 /* save the number of credits this packet consumed */
618 packet->info.tx.cred_used = req_cred;
620 /* all TX packets are handled asynchronously */
621 packet->completion = htc_tx_comp_handler;
622 packet->context = target;
623 endpoint->ep_st.tx_issued += 1;
625 /* save send flags */
626 packet->info.tx.flags = flags;
627 packet->info.tx.seqno = endpoint->seqno;
628 endpoint->seqno++;
632 /* See if the padded tx length falls on a credit boundary */
633 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
634 struct htc_endpoint *ep)
636 int rem_cred, cred_pad;
638 rem_cred = *len % cred_sz;
640 /* No padding needed */
641 if (!rem_cred)
642 return 0;
644 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
645 return -1;
648 * The transfer consumes a "partial" credit, this
649 * packet cannot be bundled unless we add
650 * additional "dummy" padding (max 255 bytes) to
651 * consume the entire credit.
653 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
655 if ((cred_pad > 0) && (cred_pad <= 255))
656 *len += cred_pad;
657 else
658 /* The amount of padding is too large, send as non-bundled */
659 return -1;
661 return cred_pad;
664 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
665 struct htc_endpoint *endpoint,
666 struct hif_scatter_req *scat_req,
667 int n_scat,
668 struct list_head *queue)
670 struct htc_packet *packet;
671 int i, len, rem_scat, cred_pad;
672 int status = 0;
674 rem_scat = target->max_tx_bndl_sz;
676 for (i = 0; i < n_scat; i++) {
677 scat_req->scat_list[i].packet = NULL;
679 if (list_empty(queue))
680 break;
682 packet = list_first_entry(queue, struct htc_packet, list);
683 len = CALC_TXRX_PADDED_LEN(target,
684 packet->act_len + HTC_HDR_LENGTH);
686 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
687 &len, endpoint);
688 if (cred_pad < 0 || rem_scat < len) {
689 status = -ENOSPC;
690 break;
693 rem_scat -= len;
694 /* now remove it from the queue */
695 list_del(&packet->list);
697 scat_req->scat_list[i].packet = packet;
698 /* prepare packet and flag message as part of a send bundle */
699 ath6kl_htc_tx_prep_pkt(packet,
700 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
701 cred_pad, packet->info.tx.seqno);
702 /* Make sure the buffer is 4-byte aligned */
703 ath6kl_htc_tx_buf_align(&packet->buf,
704 packet->act_len + HTC_HDR_LENGTH);
705 scat_req->scat_list[i].buf = packet->buf;
706 scat_req->scat_list[i].len = len;
708 scat_req->len += len;
709 scat_req->scat_entries++;
710 ath6kl_dbg(ATH6KL_DBG_HTC,
711 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
712 i, packet, packet->info.tx.seqno, len, rem_scat);
715 /* Roll back scatter setup in case of any failure */
716 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
717 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
718 packet = scat_req->scat_list[i].packet;
719 if (packet) {
720 packet->buf += HTC_HDR_LENGTH;
721 list_add(&packet->list, queue);
724 return -EAGAIN;
727 return status;
731 * Drain a queue and send as bundles this function may return without fully
732 * draining the queue when
734 * 1. scatter resources are exhausted
735 * 2. a message that will consume a partial credit will stop the
736 * bundling process early
737 * 3. we drop below the minimum number of messages for a bundle
739 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
740 struct list_head *queue,
741 int *sent_bundle, int *n_bundle_pkts)
743 struct htc_target *target = endpoint->target;
744 struct hif_scatter_req *scat_req = NULL;
745 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
746 int status;
748 while (true) {
749 status = 0;
750 n_scat = get_queue_depth(queue);
751 n_scat = min(n_scat, target->msg_per_bndl_max);
753 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
754 /* not enough to bundle */
755 break;
757 scat_req = hif_scatter_req_get(target->dev->ar);
759 if (!scat_req) {
760 /* no scatter resources */
761 ath6kl_dbg(ATH6KL_DBG_HTC,
762 "htc tx no more scatter resources\n");
763 break;
766 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
767 n_scat);
769 scat_req->len = 0;
770 scat_req->scat_entries = 0;
772 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
773 scat_req, n_scat,
774 queue);
775 if (status == -EAGAIN) {
776 hif_scatter_req_add(target->dev->ar, scat_req);
777 break;
780 /* send path is always asynchronous */
781 scat_req->complete = htc_async_tx_scat_complete;
782 n_sent_bundle++;
783 tot_pkts_bundle += scat_req->scat_entries;
785 ath6kl_dbg(ATH6KL_DBG_HTC,
786 "htc tx scatter bytes %d entries %d\n",
787 scat_req->len, scat_req->scat_entries);
788 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
790 if (status)
791 break;
794 *sent_bundle = n_sent_bundle;
795 *n_bundle_pkts = tot_pkts_bundle;
796 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
797 n_sent_bundle);
799 return;
802 static void ath6kl_htc_tx_from_queue(struct htc_target *target,
803 struct htc_endpoint *endpoint)
805 struct list_head txq;
806 struct htc_packet *packet;
807 int bundle_sent;
808 int n_pkts_bundle;
810 spin_lock_bh(&target->tx_lock);
812 endpoint->tx_proc_cnt++;
813 if (endpoint->tx_proc_cnt > 1) {
814 endpoint->tx_proc_cnt--;
815 spin_unlock_bh(&target->tx_lock);
816 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
817 return;
821 * drain the endpoint TX queue for transmission as long
822 * as we have enough credits.
824 INIT_LIST_HEAD(&txq);
826 while (true) {
828 if (list_empty(&endpoint->txq))
829 break;
831 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
833 if (list_empty(&txq))
834 break;
836 spin_unlock_bh(&target->tx_lock);
838 bundle_sent = 0;
839 n_pkts_bundle = 0;
841 while (true) {
842 /* try to send a bundle on each pass */
843 if ((target->tx_bndl_enable) &&
844 (get_queue_depth(&txq) >=
845 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
846 int temp1 = 0, temp2 = 0;
848 ath6kl_htc_tx_bundle(endpoint, &txq,
849 &temp1, &temp2);
850 bundle_sent += temp1;
851 n_pkts_bundle += temp2;
854 if (list_empty(&txq))
855 break;
857 packet = list_first_entry(&txq, struct htc_packet,
858 list);
859 list_del(&packet->list);
861 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
862 0, packet->info.tx.seqno);
863 ath6kl_htc_tx_issue(target, packet);
866 spin_lock_bh(&target->tx_lock);
868 endpoint->ep_st.tx_bundles += bundle_sent;
869 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
872 endpoint->tx_proc_cnt = 0;
873 spin_unlock_bh(&target->tx_lock);
876 static bool ath6kl_htc_tx_try(struct htc_target *target,
877 struct htc_endpoint *endpoint,
878 struct htc_packet *tx_pkt)
880 struct htc_ep_callbacks ep_cb;
881 int txq_depth;
882 bool overflow = false;
884 ep_cb = endpoint->ep_cb;
886 spin_lock_bh(&target->tx_lock);
887 txq_depth = get_queue_depth(&endpoint->txq);
888 spin_unlock_bh(&target->tx_lock);
890 if (txq_depth >= endpoint->max_txq_depth)
891 overflow = true;
893 if (overflow)
894 ath6kl_dbg(ATH6KL_DBG_HTC,
895 "htc tx overflow ep %d depth %d max %d\n",
896 endpoint->eid, txq_depth,
897 endpoint->max_txq_depth);
899 if (overflow && ep_cb.tx_full) {
900 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
901 HTC_SEND_FULL_DROP) {
902 endpoint->ep_st.tx_dropped += 1;
903 return false;
907 spin_lock_bh(&target->tx_lock);
908 list_add_tail(&tx_pkt->list, &endpoint->txq);
909 spin_unlock_bh(&target->tx_lock);
911 ath6kl_htc_tx_from_queue(target, endpoint);
913 return true;
916 static void htc_chk_ep_txq(struct htc_target *target)
918 struct htc_endpoint *endpoint;
919 struct htc_endpoint_credit_dist *cred_dist;
922 * Run through the credit distribution list to see if there are
923 * packets queued. NOTE: no locks need to be taken since the
924 * distribution list is not dynamic (cannot be re-ordered) and we
925 * are not modifying any state.
927 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
928 endpoint = cred_dist->htc_ep;
930 spin_lock_bh(&target->tx_lock);
931 if (!list_empty(&endpoint->txq)) {
932 ath6kl_dbg(ATH6KL_DBG_HTC,
933 "htc creds ep %d credits %d pkts %d\n",
934 cred_dist->endpoint,
935 endpoint->cred_dist.credits,
936 get_queue_depth(&endpoint->txq));
937 spin_unlock_bh(&target->tx_lock);
939 * Try to start the stalled queue, this list is
940 * ordered by priority. If there are credits
941 * available the highest priority queue will get a
942 * chance to reclaim credits from lower priority
943 * ones.
945 ath6kl_htc_tx_from_queue(target, endpoint);
946 spin_lock_bh(&target->tx_lock);
948 spin_unlock_bh(&target->tx_lock);
952 static int htc_setup_tx_complete(struct htc_target *target)
954 struct htc_packet *send_pkt = NULL;
955 int status;
957 send_pkt = htc_get_control_buf(target, true);
959 if (!send_pkt)
960 return -ENOMEM;
962 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
963 struct htc_setup_comp_ext_msg *setup_comp_ext;
964 u32 flags = 0;
966 setup_comp_ext =
967 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
968 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
969 setup_comp_ext->msg_id =
970 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
972 if (target->msg_per_bndl_max > 0) {
973 /* Indicate HTC bundling to the target */
974 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
975 setup_comp_ext->msg_per_rxbndl =
976 target->msg_per_bndl_max;
979 memcpy(&setup_comp_ext->flags, &flags,
980 sizeof(setup_comp_ext->flags));
981 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
982 sizeof(struct htc_setup_comp_ext_msg),
983 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
985 } else {
986 struct htc_setup_comp_msg *setup_comp;
987 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
988 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
989 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
990 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
991 sizeof(struct htc_setup_comp_msg),
992 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
995 /* we want synchronous operation */
996 send_pkt->completion = NULL;
997 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
998 status = ath6kl_htc_tx_issue(target, send_pkt);
1000 if (send_pkt != NULL)
1001 htc_reclaim_txctrl_buf(target, send_pkt);
1003 return status;
1006 void ath6kl_htc_set_credit_dist(struct htc_target *target,
1007 struct ath6kl_htc_credit_info *credit_info,
1008 u16 srvc_pri_order[], int list_len)
1010 struct htc_endpoint *endpoint;
1011 int i, ep;
1013 target->credit_info = credit_info;
1015 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1016 &target->cred_dist_list);
1018 for (i = 0; i < list_len; i++) {
1019 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1020 endpoint = &target->endpoint[ep];
1021 if (endpoint->svc_id == srvc_pri_order[i]) {
1022 list_add_tail(&endpoint->cred_dist.list,
1023 &target->cred_dist_list);
1024 break;
1027 if (ep >= ENDPOINT_MAX) {
1028 WARN_ON(1);
1029 return;
1034 int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
1036 struct htc_endpoint *endpoint;
1037 struct list_head queue;
1039 ath6kl_dbg(ATH6KL_DBG_HTC,
1040 "htc tx ep id %d buf 0x%p len %d\n",
1041 packet->endpoint, packet->buf, packet->act_len);
1043 if (packet->endpoint >= ENDPOINT_MAX) {
1044 WARN_ON(1);
1045 return -EINVAL;
1048 endpoint = &target->endpoint[packet->endpoint];
1050 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1051 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1052 -ECANCELED : -ENOSPC;
1053 INIT_LIST_HEAD(&queue);
1054 list_add(&packet->list, &queue);
1055 htc_tx_complete(endpoint, &queue);
1058 return 0;
1061 /* flush endpoint TX queue */
1062 void ath6kl_htc_flush_txep(struct htc_target *target,
1063 enum htc_endpoint_id eid, u16 tag)
1065 struct htc_packet *packet, *tmp_pkt;
1066 struct list_head discard_q, container;
1067 struct htc_endpoint *endpoint = &target->endpoint[eid];
1069 if (!endpoint->svc_id) {
1070 WARN_ON(1);
1071 return;
1074 /* initialize the discard queue */
1075 INIT_LIST_HEAD(&discard_q);
1077 spin_lock_bh(&target->tx_lock);
1079 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1080 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1081 (tag == packet->info.tx.tag))
1082 list_move_tail(&packet->list, &discard_q);
1085 spin_unlock_bh(&target->tx_lock);
1087 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1088 packet->status = -ECANCELED;
1089 list_del(&packet->list);
1090 ath6kl_dbg(ATH6KL_DBG_HTC,
1091 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
1092 packet, packet->act_len,
1093 packet->endpoint, packet->info.tx.tag);
1095 INIT_LIST_HEAD(&container);
1096 list_add_tail(&packet->list, &container);
1097 htc_tx_complete(endpoint, &container);
1102 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1104 struct htc_endpoint *endpoint;
1105 int i;
1107 dump_cred_dist_stats(target);
1109 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1110 endpoint = &target->endpoint[i];
1111 if (endpoint->svc_id == 0)
1112 /* not in use.. */
1113 continue;
1114 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1118 void ath6kl_htc_indicate_activity_change(struct htc_target *target,
1119 enum htc_endpoint_id eid, bool active)
1121 struct htc_endpoint *endpoint = &target->endpoint[eid];
1122 bool dist = false;
1124 if (endpoint->svc_id == 0) {
1125 WARN_ON(1);
1126 return;
1129 spin_lock_bh(&target->tx_lock);
1131 if (active) {
1132 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1133 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1134 dist = true;
1136 } else {
1137 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1138 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1139 dist = true;
1143 if (dist) {
1144 endpoint->cred_dist.txq_depth =
1145 get_queue_depth(&endpoint->txq);
1147 ath6kl_dbg(ATH6KL_DBG_HTC,
1148 "htc tx activity ctxt 0x%p dist 0x%p\n",
1149 target->credit_info, &target->cred_dist_list);
1151 ath6kl_credit_distribute(target->credit_info,
1152 &target->cred_dist_list,
1153 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1156 spin_unlock_bh(&target->tx_lock);
1158 if (dist && !active)
1159 htc_chk_ep_txq(target);
1162 /* HTC Rx */
1164 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1165 int n_look_ahds)
1167 endpoint->ep_st.rx_pkts++;
1168 if (n_look_ahds == 1)
1169 endpoint->ep_st.rx_lkahds++;
1170 else if (n_look_ahds > 1)
1171 endpoint->ep_st.rx_bundle_lkahd++;
1174 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1175 enum htc_endpoint_id eid, int len)
1177 return (eid == target->dev->ar->ctrl_ep) ?
1178 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1181 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1183 struct list_head queue;
1185 INIT_LIST_HEAD(&queue);
1186 list_add_tail(&packet->list, &queue);
1187 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
1190 static void htc_reclaim_rxbuf(struct htc_target *target,
1191 struct htc_packet *packet,
1192 struct htc_endpoint *ep)
1194 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1195 htc_rxpkt_reset(packet);
1196 packet->status = -ECANCELED;
1197 ep->ep_cb.rx(ep->target, packet);
1198 } else {
1199 htc_rxpkt_reset(packet);
1200 htc_add_rxbuf((void *)(target), packet);
1204 static void reclaim_rx_ctrl_buf(struct htc_target *target,
1205 struct htc_packet *packet)
1207 spin_lock_bh(&target->htc_lock);
1208 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1209 spin_unlock_bh(&target->htc_lock);
1212 static int ath6kl_htc_rx_packet(struct htc_target *target,
1213 struct htc_packet *packet,
1214 u32 rx_len)
1216 struct ath6kl_device *dev = target->dev;
1217 u32 padded_len;
1218 int status;
1220 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1222 if (padded_len > packet->buf_len) {
1223 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1224 padded_len, rx_len, packet->buf_len);
1225 return -ENOMEM;
1228 ath6kl_dbg(ATH6KL_DBG_HTC,
1229 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
1230 packet, packet->info.rx.exp_hdr,
1231 padded_len, dev->ar->mbox_info.htc_addr);
1233 status = hif_read_write_sync(dev->ar,
1234 dev->ar->mbox_info.htc_addr,
1235 packet->buf, padded_len,
1236 HIF_RD_SYNC_BLOCK_FIX);
1238 packet->status = status;
1240 return status;
1244 * optimization for recv packets, we can indicate a
1245 * "hint" that there are more single-packets to fetch
1246 * on this endpoint.
1248 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1249 struct htc_endpoint *endpoint,
1250 struct htc_packet *packet)
1252 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1254 if (htc_hdr->eid == packet->endpoint) {
1255 if (!list_empty(&endpoint->rx_bufq))
1256 packet->info.rx.indicat_flags |=
1257 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1261 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1263 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1265 if (ep_cb.rx_refill_thresh > 0) {
1266 spin_lock_bh(&endpoint->target->rx_lock);
1267 if (get_queue_depth(&endpoint->rx_bufq)
1268 < ep_cb.rx_refill_thresh) {
1269 spin_unlock_bh(&endpoint->target->rx_lock);
1270 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1271 return;
1273 spin_unlock_bh(&endpoint->target->rx_lock);
1277 /* This function is called with rx_lock held */
1278 static int ath6kl_htc_rx_setup(struct htc_target *target,
1279 struct htc_endpoint *ep,
1280 u32 *lk_ahds, struct list_head *queue, int n_msg)
1282 struct htc_packet *packet;
1283 /* FIXME: type of lk_ahds can't be right */
1284 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1285 struct htc_ep_callbacks ep_cb;
1286 int status = 0, j, full_len;
1287 bool no_recycle;
1289 full_len = CALC_TXRX_PADDED_LEN(target,
1290 le16_to_cpu(htc_hdr->payld_len) +
1291 sizeof(*htc_hdr));
1293 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1294 ath6kl_warn("Rx buffer requested with invalid length\n");
1295 return -EINVAL;
1298 ep_cb = ep->ep_cb;
1299 for (j = 0; j < n_msg; j++) {
1302 * Reset flag, any packets allocated using the
1303 * rx_alloc() API cannot be recycled on
1304 * cleanup,they must be explicitly returned.
1306 no_recycle = false;
1308 if (ep_cb.rx_allocthresh &&
1309 (full_len > ep_cb.rx_alloc_thresh)) {
1310 ep->ep_st.rx_alloc_thresh_hit += 1;
1311 ep->ep_st.rxalloc_thresh_byte +=
1312 le16_to_cpu(htc_hdr->payld_len);
1314 spin_unlock_bh(&target->rx_lock);
1315 no_recycle = true;
1317 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1318 full_len);
1319 spin_lock_bh(&target->rx_lock);
1320 } else {
1321 /* refill handler is being used */
1322 if (list_empty(&ep->rx_bufq)) {
1323 if (ep_cb.rx_refill) {
1324 spin_unlock_bh(&target->rx_lock);
1325 ep_cb.rx_refill(ep->target, ep->eid);
1326 spin_lock_bh(&target->rx_lock);
1330 if (list_empty(&ep->rx_bufq))
1331 packet = NULL;
1332 else {
1333 packet = list_first_entry(&ep->rx_bufq,
1334 struct htc_packet, list);
1335 list_del(&packet->list);
1339 if (!packet) {
1340 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1341 target->ep_waiting = ep->eid;
1342 return -ENOSPC;
1345 /* clear flags */
1346 packet->info.rx.rx_flags = 0;
1347 packet->info.rx.indicat_flags = 0;
1348 packet->status = 0;
1350 if (no_recycle)
1352 * flag that these packets cannot be
1353 * recycled, they have to be returned to
1354 * the user
1356 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1358 /* Caller needs to free this upon any failure */
1359 list_add_tail(&packet->list, queue);
1361 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1362 status = -ECANCELED;
1363 break;
1366 if (j) {
1367 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1368 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1369 } else
1370 /* set expected look ahead */
1371 packet->info.rx.exp_hdr = *lk_ahds;
1373 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1374 HTC_HDR_LENGTH;
1377 return status;
1380 static int ath6kl_htc_rx_alloc(struct htc_target *target,
1381 u32 lk_ahds[], int msg,
1382 struct htc_endpoint *endpoint,
1383 struct list_head *queue)
1385 int status = 0;
1386 struct htc_packet *packet, *tmp_pkt;
1387 struct htc_frame_hdr *htc_hdr;
1388 int i, n_msg;
1390 spin_lock_bh(&target->rx_lock);
1392 for (i = 0; i < msg; i++) {
1394 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1396 if (htc_hdr->eid >= ENDPOINT_MAX) {
1397 ath6kl_err("invalid ep in look-ahead: %d\n",
1398 htc_hdr->eid);
1399 status = -ENOMEM;
1400 break;
1403 if (htc_hdr->eid != endpoint->eid) {
1404 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1405 htc_hdr->eid, endpoint->eid, i);
1406 status = -ENOMEM;
1407 break;
1410 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1411 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1412 htc_hdr->payld_len,
1413 (u32) HTC_MAX_PAYLOAD_LENGTH);
1414 status = -ENOMEM;
1415 break;
1418 if (endpoint->svc_id == 0) {
1419 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1420 status = -ENOMEM;
1421 break;
1424 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1426 * HTC header indicates that every packet to follow
1427 * has the same padded length so that it can be
1428 * optimally fetched as a full bundle.
1430 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1431 HTC_FLG_RX_BNDL_CNT_S;
1433 /* the count doesn't include the starter frame */
1434 n_msg++;
1435 if (n_msg > target->msg_per_bndl_max) {
1436 status = -ENOMEM;
1437 break;
1440 endpoint->ep_st.rx_bundle_from_hdr += 1;
1441 ath6kl_dbg(ATH6KL_DBG_HTC,
1442 "htc rx bundle pkts %d\n",
1443 n_msg);
1444 } else
1445 /* HTC header only indicates 1 message to fetch */
1446 n_msg = 1;
1448 /* Setup packet buffers for each message */
1449 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1450 queue, n_msg);
1453 * This is due to unavailabilty of buffers to rx entire data.
1454 * Return no error so that free buffers from queue can be used
1455 * to receive partial data.
1457 if (status == -ENOSPC) {
1458 spin_unlock_bh(&target->rx_lock);
1459 return 0;
1462 if (status)
1463 break;
1466 spin_unlock_bh(&target->rx_lock);
1468 if (status) {
1469 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1470 list_del(&packet->list);
1471 htc_reclaim_rxbuf(target, packet,
1472 &target->endpoint[packet->endpoint]);
1476 return status;
1479 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1481 if (packets->endpoint != ENDPOINT_0) {
1482 WARN_ON(1);
1483 return;
1486 if (packets->status == -ECANCELED) {
1487 reclaim_rx_ctrl_buf(context, packets);
1488 return;
1491 if (packets->act_len > 0) {
1492 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1493 packets->act_len + HTC_HDR_LENGTH);
1495 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1496 "htc rx unexpected endpoint 0 message", "",
1497 packets->buf - HTC_HDR_LENGTH,
1498 packets->act_len + HTC_HDR_LENGTH);
1501 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1504 static void htc_proc_cred_rpt(struct htc_target *target,
1505 struct htc_credit_report *rpt,
1506 int n_entries,
1507 enum htc_endpoint_id from_ep)
1509 struct htc_endpoint *endpoint;
1510 int tot_credits = 0, i;
1511 bool dist = false;
1513 spin_lock_bh(&target->tx_lock);
1515 for (i = 0; i < n_entries; i++, rpt++) {
1516 if (rpt->eid >= ENDPOINT_MAX) {
1517 WARN_ON(1);
1518 spin_unlock_bh(&target->tx_lock);
1519 return;
1522 endpoint = &target->endpoint[rpt->eid];
1524 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1525 "credit report ep %d credits %d\n",
1526 rpt->eid, rpt->credits);
1528 endpoint->ep_st.tx_cred_rpt += 1;
1529 endpoint->ep_st.cred_retnd += rpt->credits;
1531 if (from_ep == rpt->eid) {
1533 * This credit report arrived on the same endpoint
1534 * indicating it arrived in an RX packet.
1536 endpoint->ep_st.cred_from_rx += rpt->credits;
1537 endpoint->ep_st.cred_rpt_from_rx += 1;
1538 } else if (from_ep == ENDPOINT_0) {
1539 /* credit arrived on endpoint 0 as a NULL message */
1540 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1541 endpoint->ep_st.cred_rpt_ep0 += 1;
1542 } else {
1543 endpoint->ep_st.cred_from_other += rpt->credits;
1544 endpoint->ep_st.cred_rpt_from_other += 1;
1547 if (rpt->eid == ENDPOINT_0)
1548 /* always give endpoint 0 credits back */
1549 endpoint->cred_dist.credits += rpt->credits;
1550 else {
1551 endpoint->cred_dist.cred_to_dist += rpt->credits;
1552 dist = true;
1556 * Refresh tx depth for distribution function that will
1557 * recover these credits NOTE: this is only valid when
1558 * there are credits to recover!
1560 endpoint->cred_dist.txq_depth =
1561 get_queue_depth(&endpoint->txq);
1563 tot_credits += rpt->credits;
1566 if (dist) {
1568 * This was a credit return based on a completed send
1569 * operations note, this is done with the lock held
1571 ath6kl_credit_distribute(target->credit_info,
1572 &target->cred_dist_list,
1573 HTC_CREDIT_DIST_SEND_COMPLETE);
1576 spin_unlock_bh(&target->tx_lock);
1578 if (tot_credits)
1579 htc_chk_ep_txq(target);
1582 static int htc_parse_trailer(struct htc_target *target,
1583 struct htc_record_hdr *record,
1584 u8 *record_buf, u32 *next_lk_ahds,
1585 enum htc_endpoint_id endpoint,
1586 int *n_lk_ahds)
1588 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1589 struct htc_lookahead_report *lk_ahd;
1590 int len;
1592 switch (record->rec_id) {
1593 case HTC_RECORD_CREDITS:
1594 len = record->len / sizeof(struct htc_credit_report);
1595 if (!len) {
1596 WARN_ON(1);
1597 return -EINVAL;
1600 htc_proc_cred_rpt(target,
1601 (struct htc_credit_report *) record_buf,
1602 len, endpoint);
1603 break;
1604 case HTC_RECORD_LOOKAHEAD:
1605 len = record->len / sizeof(*lk_ahd);
1606 if (!len) {
1607 WARN_ON(1);
1608 return -EINVAL;
1611 lk_ahd = (struct htc_lookahead_report *) record_buf;
1612 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1613 && next_lk_ahds) {
1615 ath6kl_dbg(ATH6KL_DBG_HTC,
1616 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1617 lk_ahd->pre_valid, lk_ahd->post_valid);
1619 /* look ahead bytes are valid, copy them over */
1620 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1622 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1623 "htc rx next look ahead",
1624 "", next_lk_ahds, 4);
1626 *n_lk_ahds = 1;
1628 break;
1629 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1630 len = record->len / sizeof(*bundle_lkahd_rpt);
1631 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1632 WARN_ON(1);
1633 return -EINVAL;
1636 if (next_lk_ahds) {
1637 int i;
1639 bundle_lkahd_rpt =
1640 (struct htc_bundle_lkahd_rpt *) record_buf;
1642 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1643 "", record_buf, record->len);
1645 for (i = 0; i < len; i++) {
1646 memcpy((u8 *)&next_lk_ahds[i],
1647 bundle_lkahd_rpt->lk_ahd, 4);
1648 bundle_lkahd_rpt++;
1651 *n_lk_ahds = i;
1653 break;
1654 default:
1655 ath6kl_err("unhandled record: id:%d len:%d\n",
1656 record->rec_id, record->len);
1657 break;
1660 return 0;
1664 static int htc_proc_trailer(struct htc_target *target,
1665 u8 *buf, int len, u32 *next_lk_ahds,
1666 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1668 struct htc_record_hdr *record;
1669 int orig_len;
1670 int status;
1671 u8 *record_buf;
1672 u8 *orig_buf;
1674 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1675 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1677 orig_buf = buf;
1678 orig_len = len;
1679 status = 0;
1681 while (len > 0) {
1683 if (len < sizeof(struct htc_record_hdr)) {
1684 status = -ENOMEM;
1685 break;
1687 /* these are byte aligned structs */
1688 record = (struct htc_record_hdr *) buf;
1689 len -= sizeof(struct htc_record_hdr);
1690 buf += sizeof(struct htc_record_hdr);
1692 if (record->len > len) {
1693 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1694 record->len, record->rec_id, len);
1695 status = -ENOMEM;
1696 break;
1698 record_buf = buf;
1700 status = htc_parse_trailer(target, record, record_buf,
1701 next_lk_ahds, endpoint, n_lk_ahds);
1703 if (status)
1704 break;
1706 /* advance buffer past this record for next time around */
1707 buf += record->len;
1708 len -= record->len;
1711 if (status)
1712 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1713 "", orig_buf, orig_len);
1715 return status;
1718 static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1719 struct htc_packet *packet,
1720 u32 *next_lkahds, int *n_lkahds)
1722 int status = 0;
1723 u16 payload_len;
1724 u32 lk_ahd;
1725 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1727 if (n_lkahds != NULL)
1728 *n_lkahds = 0;
1731 * NOTE: we cannot assume the alignment of buf, so we use the safe
1732 * macros to retrieve 16 bit fields.
1734 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1736 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1738 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1740 * Refresh the expected header and the actual length as it
1741 * was unknown when this packet was grabbed as part of the
1742 * bundle.
1744 packet->info.rx.exp_hdr = lk_ahd;
1745 packet->act_len = payload_len + HTC_HDR_LENGTH;
1747 /* validate the actual header that was refreshed */
1748 if (packet->act_len > packet->buf_len) {
1749 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1750 payload_len, lk_ahd);
1752 * Limit this to max buffer just to print out some
1753 * of the buffer.
1755 packet->act_len = min(packet->act_len, packet->buf_len);
1756 status = -ENOMEM;
1757 goto fail_rx;
1760 if (packet->endpoint != htc_hdr->eid) {
1761 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1762 htc_hdr->eid, packet->endpoint);
1763 status = -ENOMEM;
1764 goto fail_rx;
1768 if (lk_ahd != packet->info.rx.exp_hdr) {
1769 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1770 __func__, packet, packet->info.rx.rx_flags);
1771 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1772 "", &packet->info.rx.exp_hdr, 4);
1773 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1774 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1775 status = -ENOMEM;
1776 goto fail_rx;
1779 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1780 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1781 htc_hdr->ctrl[0] > payload_len) {
1782 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1783 __func__, payload_len, htc_hdr->ctrl[0]);
1784 status = -ENOMEM;
1785 goto fail_rx;
1788 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1789 next_lkahds = NULL;
1790 n_lkahds = NULL;
1793 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1794 + payload_len - htc_hdr->ctrl[0],
1795 htc_hdr->ctrl[0], next_lkahds,
1796 n_lkahds, packet->endpoint);
1798 if (status)
1799 goto fail_rx;
1801 packet->act_len -= htc_hdr->ctrl[0];
1804 packet->buf += HTC_HDR_LENGTH;
1805 packet->act_len -= HTC_HDR_LENGTH;
1807 fail_rx:
1808 if (status)
1809 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1810 "", packet->buf, packet->act_len);
1812 return status;
1815 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1816 struct htc_packet *packet)
1818 ath6kl_dbg(ATH6KL_DBG_HTC,
1819 "htc rx complete ep %d packet 0x%p\n",
1820 endpoint->eid, packet);
1821 endpoint->ep_cb.rx(endpoint->target, packet);
1824 static int ath6kl_htc_rx_bundle(struct htc_target *target,
1825 struct list_head *rxq,
1826 struct list_head *sync_compq,
1827 int *n_pkt_fetched, bool part_bundle)
1829 struct hif_scatter_req *scat_req;
1830 struct htc_packet *packet;
1831 int rem_space = target->max_rx_bndl_sz;
1832 int n_scat_pkt, status = 0, i, len;
1834 n_scat_pkt = get_queue_depth(rxq);
1835 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1837 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1839 * We were forced to split this bundle receive operation
1840 * all packets in this partial bundle must have their
1841 * lookaheads ignored.
1843 part_bundle = true;
1846 * This would only happen if the target ignored our max
1847 * bundle limit.
1849 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1850 __func__, get_queue_depth(rxq), n_scat_pkt);
1853 len = 0;
1855 ath6kl_dbg(ATH6KL_DBG_HTC,
1856 "htc rx bundle depth %d pkts %d\n",
1857 get_queue_depth(rxq), n_scat_pkt);
1859 scat_req = hif_scatter_req_get(target->dev->ar);
1861 if (scat_req == NULL)
1862 goto fail_rx_pkt;
1864 for (i = 0; i < n_scat_pkt; i++) {
1865 int pad_len;
1867 packet = list_first_entry(rxq, struct htc_packet, list);
1868 list_del(&packet->list);
1870 pad_len = CALC_TXRX_PADDED_LEN(target,
1871 packet->act_len);
1873 if ((rem_space - pad_len) < 0) {
1874 list_add(&packet->list, rxq);
1875 break;
1878 rem_space -= pad_len;
1880 if (part_bundle || (i < (n_scat_pkt - 1)))
1882 * Packet 0..n-1 cannot be checked for look-aheads
1883 * since we are fetching a bundle the last packet
1884 * however can have it's lookahead used
1886 packet->info.rx.rx_flags |=
1887 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1889 /* NOTE: 1 HTC packet per scatter entry */
1890 scat_req->scat_list[i].buf = packet->buf;
1891 scat_req->scat_list[i].len = pad_len;
1893 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1895 list_add_tail(&packet->list, sync_compq);
1897 WARN_ON(!scat_req->scat_list[i].len);
1898 len += scat_req->scat_list[i].len;
1901 scat_req->len = len;
1902 scat_req->scat_entries = i;
1904 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1906 if (!status)
1907 *n_pkt_fetched = i;
1909 /* free scatter request */
1910 hif_scatter_req_add(target->dev->ar, scat_req);
1912 fail_rx_pkt:
1914 return status;
1917 static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1918 struct list_head *comp_pktq,
1919 u32 lk_ahds[],
1920 int *n_lk_ahd)
1922 struct htc_packet *packet, *tmp_pkt;
1923 struct htc_endpoint *ep;
1924 int status = 0;
1926 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1927 ep = &target->endpoint[packet->endpoint];
1929 /* process header for each of the recv packet */
1930 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
1931 n_lk_ahd);
1932 if (status)
1933 return status;
1935 list_del(&packet->list);
1937 if (list_empty(comp_pktq)) {
1939 * Last packet's more packet flag is set
1940 * based on the lookahead.
1942 if (*n_lk_ahd > 0)
1943 ath6kl_htc_rx_set_indicate(lk_ahds[0],
1944 ep, packet);
1945 } else
1947 * Packets in a bundle automatically have
1948 * this flag set.
1950 packet->info.rx.indicat_flags |=
1951 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1953 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
1955 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1956 ep->ep_st.rx_bundl += 1;
1958 ath6kl_htc_rx_complete(ep, packet);
1961 return status;
1964 static int ath6kl_htc_rx_fetch(struct htc_target *target,
1965 struct list_head *rx_pktq,
1966 struct list_head *comp_pktq)
1968 int fetched_pkts;
1969 bool part_bundle = false;
1970 int status = 0;
1971 struct list_head tmp_rxq;
1972 struct htc_packet *packet, *tmp_pkt;
1974 /* now go fetch the list of HTC packets */
1975 while (!list_empty(rx_pktq)) {
1976 fetched_pkts = 0;
1978 INIT_LIST_HEAD(&tmp_rxq);
1980 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1982 * There are enough packets to attempt a
1983 * bundle transfer and recv bundling is
1984 * allowed.
1986 status = ath6kl_htc_rx_bundle(target, rx_pktq,
1987 &tmp_rxq,
1988 &fetched_pkts,
1989 part_bundle);
1990 if (status)
1991 goto fail_rx;
1993 if (!list_empty(rx_pktq))
1994 part_bundle = true;
1996 list_splice_tail_init(&tmp_rxq, comp_pktq);
1999 if (!fetched_pkts) {
2001 packet = list_first_entry(rx_pktq, struct htc_packet,
2002 list);
2004 /* fully synchronous */
2005 packet->completion = NULL;
2007 if (!list_is_singular(rx_pktq))
2009 * look_aheads in all packet
2010 * except the last one in the
2011 * bundle must be ignored
2013 packet->info.rx.rx_flags |=
2014 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2016 /* go fetch the packet */
2017 status = ath6kl_htc_rx_packet(target, packet,
2018 packet->act_len);
2020 list_move_tail(&packet->list, &tmp_rxq);
2022 if (status)
2023 goto fail_rx;
2025 list_splice_tail_init(&tmp_rxq, comp_pktq);
2029 return 0;
2031 fail_rx:
2034 * Cleanup any packets we allocated but didn't use to
2035 * actually fetch any packets.
2038 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2039 list_del(&packet->list);
2040 htc_reclaim_rxbuf(target, packet,
2041 &target->endpoint[packet->endpoint]);
2044 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2045 list_del(&packet->list);
2046 htc_reclaim_rxbuf(target, packet,
2047 &target->endpoint[packet->endpoint]);
2050 return status;
2053 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2054 u32 msg_look_ahead, int *num_pkts)
2056 struct htc_packet *packets, *tmp_pkt;
2057 struct htc_endpoint *endpoint;
2058 struct list_head rx_pktq, comp_pktq;
2059 int status = 0;
2060 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2061 int num_look_ahead = 1;
2062 enum htc_endpoint_id id;
2063 int n_fetched = 0;
2065 *num_pkts = 0;
2068 * On first entry copy the look_aheads into our temp array for
2069 * processing
2071 look_aheads[0] = msg_look_ahead;
2073 while (true) {
2076 * First lookahead sets the expected endpoint IDs for all
2077 * packets in a bundle.
2079 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2080 endpoint = &target->endpoint[id];
2082 if (id >= ENDPOINT_MAX) {
2083 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2084 id);
2085 status = -ENOMEM;
2086 break;
2089 INIT_LIST_HEAD(&rx_pktq);
2090 INIT_LIST_HEAD(&comp_pktq);
2093 * Try to allocate as many HTC RX packets indicated by the
2094 * look_aheads.
2096 status = ath6kl_htc_rx_alloc(target, look_aheads,
2097 num_look_ahead, endpoint,
2098 &rx_pktq);
2099 if (status)
2100 break;
2102 if (get_queue_depth(&rx_pktq) >= 2)
2104 * A recv bundle was detected, force IRQ status
2105 * re-check again
2107 target->chk_irq_status_cnt = 1;
2109 n_fetched += get_queue_depth(&rx_pktq);
2111 num_look_ahead = 0;
2113 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2115 if (!status)
2116 ath6kl_htc_rx_chk_water_mark(endpoint);
2118 /* Process fetched packets */
2119 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2120 look_aheads,
2121 &num_look_ahead);
2123 if (!num_look_ahead || status)
2124 break;
2127 * For SYNCH processing, if we get here, we are running
2128 * through the loop again due to a detected lookahead. Set
2129 * flag that we should re-check IRQ status registers again
2130 * before leaving IRQ processing, this can net better
2131 * performance in high throughput situations.
2133 target->chk_irq_status_cnt = 1;
2136 if (status) {
2137 ath6kl_err("failed to get pending recv messages: %d\n",
2138 status);
2140 /* cleanup any packets in sync completion queue */
2141 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2142 list_del(&packets->list);
2143 htc_reclaim_rxbuf(target, packets,
2144 &target->endpoint[packets->endpoint]);
2147 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2148 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2149 ath6kl_hif_rx_control(target->dev, false);
2154 * Before leaving, check to see if host ran out of buffers and
2155 * needs to stop the receiver.
2157 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2158 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2159 ath6kl_hif_rx_control(target->dev, false);
2161 *num_pkts = n_fetched;
2163 return status;
2167 * Synchronously wait for a control message from the target,
2168 * This function is used at initialization time ONLY. At init messages
2169 * on ENDPOINT 0 are expected.
2171 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2173 struct htc_packet *packet = NULL;
2174 struct htc_frame_hdr *htc_hdr;
2175 u32 look_ahead;
2177 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
2178 HTC_TARGET_RESPONSE_TIMEOUT))
2179 return NULL;
2181 ath6kl_dbg(ATH6KL_DBG_HTC,
2182 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
2184 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2186 if (htc_hdr->eid != ENDPOINT_0)
2187 return NULL;
2189 packet = htc_get_control_buf(target, false);
2191 if (!packet)
2192 return NULL;
2194 packet->info.rx.rx_flags = 0;
2195 packet->info.rx.exp_hdr = look_ahead;
2196 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2198 if (packet->act_len > packet->buf_len)
2199 goto fail_ctrl_rx;
2201 /* we want synchronous operation */
2202 packet->completion = NULL;
2204 /* get the message from the device, this will block */
2205 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2206 goto fail_ctrl_rx;
2208 /* process receive header */
2209 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2211 if (packet->status) {
2212 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2213 packet->status);
2214 goto fail_ctrl_rx;
2217 return packet;
2219 fail_ctrl_rx:
2220 if (packet != NULL) {
2221 htc_rxpkt_reset(packet);
2222 reclaim_rx_ctrl_buf(target, packet);
2225 return NULL;
2228 int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2229 struct list_head *pkt_queue)
2231 struct htc_endpoint *endpoint;
2232 struct htc_packet *first_pkt;
2233 bool rx_unblock = false;
2234 int status = 0, depth;
2236 if (list_empty(pkt_queue))
2237 return -ENOMEM;
2239 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2241 if (first_pkt->endpoint >= ENDPOINT_MAX)
2242 return status;
2244 depth = get_queue_depth(pkt_queue);
2246 ath6kl_dbg(ATH6KL_DBG_HTC,
2247 "htc rx add multiple ep id %d cnt %d len %d\n",
2248 first_pkt->endpoint, depth, first_pkt->buf_len);
2250 endpoint = &target->endpoint[first_pkt->endpoint];
2252 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2253 struct htc_packet *packet, *tmp_pkt;
2255 /* walk through queue and mark each one canceled */
2256 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2257 packet->status = -ECANCELED;
2258 list_del(&packet->list);
2259 ath6kl_htc_rx_complete(endpoint, packet);
2262 return status;
2265 spin_lock_bh(&target->rx_lock);
2267 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2269 /* check if we are blocked waiting for a new buffer */
2270 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2271 if (target->ep_waiting == first_pkt->endpoint) {
2272 ath6kl_dbg(ATH6KL_DBG_HTC,
2273 "htc rx blocked on ep %d, unblocking\n",
2274 target->ep_waiting);
2275 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2276 target->ep_waiting = ENDPOINT_MAX;
2277 rx_unblock = true;
2281 spin_unlock_bh(&target->rx_lock);
2283 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2284 /* TODO : implement a buffer threshold count? */
2285 ath6kl_hif_rx_control(target->dev, true);
2287 return status;
2290 void ath6kl_htc_flush_rx_buf(struct htc_target *target)
2292 struct htc_endpoint *endpoint;
2293 struct htc_packet *packet, *tmp_pkt;
2294 int i;
2296 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2297 endpoint = &target->endpoint[i];
2298 if (!endpoint->svc_id)
2299 /* not in use.. */
2300 continue;
2302 spin_lock_bh(&target->rx_lock);
2303 list_for_each_entry_safe(packet, tmp_pkt,
2304 &endpoint->rx_bufq, list) {
2305 list_del(&packet->list);
2306 spin_unlock_bh(&target->rx_lock);
2307 ath6kl_dbg(ATH6KL_DBG_HTC,
2308 "htc rx flush pkt 0x%p len %d ep %d\n",
2309 packet, packet->buf_len,
2310 packet->endpoint);
2311 dev_kfree_skb(packet->pkt_cntxt);
2312 spin_lock_bh(&target->rx_lock);
2314 spin_unlock_bh(&target->rx_lock);
2318 int ath6kl_htc_conn_service(struct htc_target *target,
2319 struct htc_service_connect_req *conn_req,
2320 struct htc_service_connect_resp *conn_resp)
2322 struct htc_packet *rx_pkt = NULL;
2323 struct htc_packet *tx_pkt = NULL;
2324 struct htc_conn_service_resp *resp_msg;
2325 struct htc_conn_service_msg *conn_msg;
2326 struct htc_endpoint *endpoint;
2327 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2328 unsigned int max_msg_sz = 0;
2329 int status = 0;
2331 ath6kl_dbg(ATH6KL_DBG_HTC,
2332 "htc connect service target 0x%p service id 0x%x\n",
2333 target, conn_req->svc_id);
2335 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2336 /* special case for pseudo control service */
2337 assigned_ep = ENDPOINT_0;
2338 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2339 } else {
2340 /* allocate a packet to send to the target */
2341 tx_pkt = htc_get_control_buf(target, true);
2343 if (!tx_pkt)
2344 return -ENOMEM;
2346 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2347 memset(conn_msg, 0, sizeof(*conn_msg));
2348 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2349 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2350 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2352 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2353 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2354 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2356 /* we want synchronous operation */
2357 tx_pkt->completion = NULL;
2358 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2359 status = ath6kl_htc_tx_issue(target, tx_pkt);
2361 if (status)
2362 goto fail_tx;
2364 /* wait for response */
2365 rx_pkt = htc_wait_for_ctrl_msg(target);
2367 if (!rx_pkt) {
2368 status = -ENOMEM;
2369 goto fail_tx;
2372 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2374 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2375 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2376 status = -ENOMEM;
2377 goto fail_tx;
2380 conn_resp->resp_code = resp_msg->status;
2381 /* check response status */
2382 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2383 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2384 resp_msg->svc_id, resp_msg->status);
2385 status = -ENOMEM;
2386 goto fail_tx;
2389 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2390 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2393 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2394 status = -ENOMEM;
2395 goto fail_tx;
2398 endpoint = &target->endpoint[assigned_ep];
2399 endpoint->eid = assigned_ep;
2400 if (endpoint->svc_id) {
2401 status = -ENOMEM;
2402 goto fail_tx;
2405 /* return assigned endpoint to caller */
2406 conn_resp->endpoint = assigned_ep;
2407 conn_resp->len_max = max_msg_sz;
2409 /* setup the endpoint */
2411 /* this marks the endpoint in use */
2412 endpoint->svc_id = conn_req->svc_id;
2414 endpoint->max_txq_depth = conn_req->max_txq_depth;
2415 endpoint->len_max = max_msg_sz;
2416 endpoint->ep_cb = conn_req->ep_cb;
2417 endpoint->cred_dist.svc_id = conn_req->svc_id;
2418 endpoint->cred_dist.htc_ep = endpoint;
2419 endpoint->cred_dist.endpoint = assigned_ep;
2420 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2422 if (conn_req->max_rxmsg_sz) {
2424 * Override cred_per_msg calculation, this optimizes
2425 * the credit-low indications since the host will actually
2426 * issue smaller messages in the Send path.
2428 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2429 status = -ENOMEM;
2430 goto fail_tx;
2432 endpoint->cred_dist.cred_per_msg =
2433 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2434 } else
2435 endpoint->cred_dist.cred_per_msg =
2436 max_msg_sz / target->tgt_cred_sz;
2438 if (!endpoint->cred_dist.cred_per_msg)
2439 endpoint->cred_dist.cred_per_msg = 1;
2441 /* save local connection flags */
2442 endpoint->conn_flags = conn_req->flags;
2444 fail_tx:
2445 if (tx_pkt)
2446 htc_reclaim_txctrl_buf(target, tx_pkt);
2448 if (rx_pkt) {
2449 htc_rxpkt_reset(rx_pkt);
2450 reclaim_rx_ctrl_buf(target, rx_pkt);
2453 return status;
2456 static void reset_ep_state(struct htc_target *target)
2458 struct htc_endpoint *endpoint;
2459 int i;
2461 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2462 endpoint = &target->endpoint[i];
2463 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2464 endpoint->svc_id = 0;
2465 endpoint->len_max = 0;
2466 endpoint->max_txq_depth = 0;
2467 memset(&endpoint->ep_st, 0,
2468 sizeof(endpoint->ep_st));
2469 INIT_LIST_HEAD(&endpoint->rx_bufq);
2470 INIT_LIST_HEAD(&endpoint->txq);
2471 endpoint->target = target;
2474 /* reset distribution list */
2475 /* FIXME: free existing entries */
2476 INIT_LIST_HEAD(&target->cred_dist_list);
2479 int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2480 enum htc_endpoint_id endpoint)
2482 int num;
2484 spin_lock_bh(&target->rx_lock);
2485 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2486 spin_unlock_bh(&target->rx_lock);
2487 return num;
2490 static void htc_setup_msg_bndl(struct htc_target *target)
2492 /* limit what HTC can handle */
2493 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2494 target->msg_per_bndl_max);
2496 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2497 target->msg_per_bndl_max = 0;
2498 return;
2501 /* limit bundle what the device layer can handle */
2502 target->msg_per_bndl_max = min(target->max_scat_entries,
2503 target->msg_per_bndl_max);
2505 ath6kl_dbg(ATH6KL_DBG_BOOT,
2506 "htc bundling allowed msg_per_bndl_max %d\n",
2507 target->msg_per_bndl_max);
2509 /* Max rx bundle size is limited by the max tx bundle size */
2510 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2511 /* Max tx bundle size if limited by the extended mbox address range */
2512 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2513 target->max_xfer_szper_scatreq);
2515 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2516 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2518 if (target->max_tx_bndl_sz)
2519 target->tx_bndl_enable = true;
2521 if (target->max_rx_bndl_sz)
2522 target->rx_bndl_enable = true;
2524 if ((target->tgt_cred_sz % target->block_sz) != 0) {
2525 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2526 target->tgt_cred_sz);
2529 * Disallow send bundling since the credit size is
2530 * not aligned to a block size the I/O block
2531 * padding will spill into the next credit buffer
2532 * which is fatal.
2534 target->tx_bndl_enable = false;
2538 int ath6kl_htc_wait_target(struct htc_target *target)
2540 struct htc_packet *packet = NULL;
2541 struct htc_ready_ext_msg *rdy_msg;
2542 struct htc_service_connect_req connect;
2543 struct htc_service_connect_resp resp;
2544 int status;
2546 /* we should be getting 1 control message that the target is ready */
2547 packet = htc_wait_for_ctrl_msg(target);
2549 if (!packet)
2550 return -ENOMEM;
2552 /* we controlled the buffer creation so it's properly aligned */
2553 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2555 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2556 (packet->act_len < sizeof(struct htc_ready_msg))) {
2557 status = -ENOMEM;
2558 goto fail_wait_target;
2561 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2562 status = -ENOMEM;
2563 goto fail_wait_target;
2566 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2567 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2569 ath6kl_dbg(ATH6KL_DBG_BOOT,
2570 "htc target ready credits %d size %d\n",
2571 target->tgt_creds, target->tgt_cred_sz);
2573 /* check if this is an extended ready message */
2574 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2575 /* this is an extended message */
2576 target->htc_tgt_ver = rdy_msg->htc_ver;
2577 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2578 } else {
2579 /* legacy */
2580 target->htc_tgt_ver = HTC_VERSION_2P0;
2581 target->msg_per_bndl_max = 0;
2584 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2585 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2586 target->htc_tgt_ver);
2588 if (target->msg_per_bndl_max > 0)
2589 htc_setup_msg_bndl(target);
2591 /* setup our pseudo HTC control endpoint connection */
2592 memset(&connect, 0, sizeof(connect));
2593 memset(&resp, 0, sizeof(resp));
2594 connect.ep_cb.rx = htc_ctrl_rx;
2595 connect.ep_cb.rx_refill = NULL;
2596 connect.ep_cb.tx_full = NULL;
2597 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2598 connect.svc_id = HTC_CTRL_RSVD_SVC;
2600 /* connect fake service */
2601 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
2603 if (status)
2605 * FIXME: this call doesn't make sense, the caller should
2606 * call ath6kl_htc_cleanup() when it wants remove htc
2608 ath6kl_hif_cleanup_scatter(target->dev->ar);
2610 fail_wait_target:
2611 if (packet) {
2612 htc_rxpkt_reset(packet);
2613 reclaim_rx_ctrl_buf(target, packet);
2616 return status;
2620 * Start HTC, enable interrupts and let the target know
2621 * host has finished setup.
2623 int ath6kl_htc_start(struct htc_target *target)
2625 struct htc_packet *packet;
2626 int status;
2628 memset(&target->dev->irq_proc_reg, 0,
2629 sizeof(target->dev->irq_proc_reg));
2631 /* Disable interrupts at the chip level */
2632 ath6kl_hif_disable_intrs(target->dev);
2634 target->htc_flags = 0;
2635 target->rx_st_flags = 0;
2637 /* Push control receive buffers into htc control endpoint */
2638 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2639 status = htc_add_rxbuf(target, packet);
2640 if (status)
2641 return status;
2644 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2645 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2646 target->tgt_creds);
2648 dump_cred_dist_stats(target);
2650 /* Indicate to the target of the setup completion */
2651 status = htc_setup_tx_complete(target);
2653 if (status)
2654 return status;
2656 /* unmask interrupts */
2657 status = ath6kl_hif_unmask_intrs(target->dev);
2659 if (status)
2660 ath6kl_htc_stop(target);
2662 return status;
2665 static int ath6kl_htc_reset(struct htc_target *target)
2667 u32 block_size, ctrl_bufsz;
2668 struct htc_packet *packet;
2669 int i;
2671 reset_ep_state(target);
2673 block_size = target->dev->ar->mbox_info.block_size;
2675 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2676 (block_size + HTC_HDR_LENGTH) :
2677 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2679 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2680 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2681 if (!packet)
2682 return -ENOMEM;
2684 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2685 if (!packet->buf_start) {
2686 kfree(packet);
2687 return -ENOMEM;
2690 packet->buf_len = ctrl_bufsz;
2691 if (i < NUM_CONTROL_RX_BUFFERS) {
2692 packet->act_len = 0;
2693 packet->buf = packet->buf_start;
2694 packet->endpoint = ENDPOINT_0;
2695 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2696 } else
2697 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2700 return 0;
2703 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2704 void ath6kl_htc_stop(struct htc_target *target)
2706 spin_lock_bh(&target->htc_lock);
2707 target->htc_flags |= HTC_OP_STATE_STOPPING;
2708 spin_unlock_bh(&target->htc_lock);
2711 * Masking interrupts is a synchronous operation, when this
2712 * function returns all pending HIF I/O has completed, we can
2713 * safely flush the queues.
2715 ath6kl_hif_mask_intrs(target->dev);
2717 ath6kl_htc_flush_txep_all(target);
2719 ath6kl_htc_flush_rx_buf(target);
2721 ath6kl_htc_reset(target);
2724 void *ath6kl_htc_create(struct ath6kl *ar)
2726 struct htc_target *target = NULL;
2727 int status = 0;
2729 target = kzalloc(sizeof(*target), GFP_KERNEL);
2730 if (!target) {
2731 ath6kl_err("unable to allocate memory\n");
2732 return NULL;
2735 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2736 if (!target->dev) {
2737 ath6kl_err("unable to allocate memory\n");
2738 status = -ENOMEM;
2739 goto err_htc_cleanup;
2742 spin_lock_init(&target->htc_lock);
2743 spin_lock_init(&target->rx_lock);
2744 spin_lock_init(&target->tx_lock);
2746 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2747 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2748 INIT_LIST_HEAD(&target->cred_dist_list);
2750 target->dev->ar = ar;
2751 target->dev->htc_cnxt = target;
2752 target->ep_waiting = ENDPOINT_MAX;
2754 status = ath6kl_hif_setup(target->dev);
2755 if (status)
2756 goto err_htc_cleanup;
2758 status = ath6kl_htc_reset(target);
2759 if (status)
2760 goto err_htc_cleanup;
2762 return target;
2764 err_htc_cleanup:
2765 ath6kl_htc_cleanup(target);
2767 return NULL;
2770 /* cleanup the HTC instance */
2771 void ath6kl_htc_cleanup(struct htc_target *target)
2773 struct htc_packet *packet, *tmp_packet;
2775 ath6kl_hif_cleanup_scatter(target->dev->ar);
2777 list_for_each_entry_safe(packet, tmp_packet,
2778 &target->free_ctrl_txbuf, list) {
2779 list_del(&packet->list);
2780 kfree(packet->buf_start);
2781 kfree(packet);
2784 list_for_each_entry_safe(packet, tmp_packet,
2785 &target->free_ctrl_rxbuf, list) {
2786 list_del(&packet->list);
2787 kfree(packet->buf_start);
2788 kfree(packet);
2791 kfree(target->dev);
2792 kfree(target);