2 * Copyright (c) 2007-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #define HTC_PACKET_CONTAINER_ALLOCATION 32
22 #define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
24 static int ath6kl_htc_pipe_tx(struct htc_target
*handle
,
25 struct htc_packet
*packet
);
26 static void ath6kl_htc_pipe_cleanup(struct htc_target
*handle
);
28 /* htc pipe tx path */
29 static inline void restore_tx_packet(struct htc_packet
*packet
)
31 if (packet
->info
.tx
.flags
& HTC_FLAGS_TX_FIXUP_NETBUF
) {
32 skb_pull(packet
->skb
, sizeof(struct htc_frame_hdr
));
33 packet
->info
.tx
.flags
&= ~HTC_FLAGS_TX_FIXUP_NETBUF
;
37 static void do_send_completion(struct htc_endpoint
*ep
,
38 struct list_head
*queue_to_indicate
)
40 struct htc_packet
*packet
;
42 if (list_empty(queue_to_indicate
)) {
43 /* nothing to indicate */
47 if (ep
->ep_cb
.tx_comp_multi
!= NULL
) {
48 ath6kl_dbg(ATH6KL_DBG_HTC
,
49 "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
51 get_queue_depth(queue_to_indicate
));
53 * a multiple send complete handler is being used,
54 * pass the queue to the handler
56 ep
->ep_cb
.tx_comp_multi(ep
->target
, queue_to_indicate
);
58 * all packets are now owned by the callback,
59 * reset queue to be safe
61 INIT_LIST_HEAD(queue_to_indicate
);
63 /* using legacy EpTxComplete */
65 packet
= list_first_entry(queue_to_indicate
,
66 struct htc_packet
, list
);
68 list_del(&packet
->list
);
69 ath6kl_dbg(ATH6KL_DBG_HTC
,
70 "%s: calling ep %d send complete callback on packet 0x%p\n",
71 __func__
, ep
->eid
, packet
);
72 ep
->ep_cb
.tx_complete(ep
->target
, packet
);
73 } while (!list_empty(queue_to_indicate
));
77 static void send_packet_completion(struct htc_target
*target
,
78 struct htc_packet
*packet
)
80 struct htc_endpoint
*ep
= &target
->endpoint
[packet
->endpoint
];
81 struct list_head container
;
83 restore_tx_packet(packet
);
84 INIT_LIST_HEAD(&container
);
85 list_add_tail(&packet
->list
, &container
);
88 do_send_completion(ep
, &container
);
91 static void get_htc_packet_credit_based(struct htc_target
*target
,
92 struct htc_endpoint
*ep
,
93 struct list_head
*queue
)
98 struct htc_packet
*packet
;
99 unsigned int transfer_len
;
101 /* NOTE : the TX lock is held when this function is called */
103 /* loop until we can grab as many packets out of the queue as we can */
106 if (list_empty(&ep
->txq
))
109 /* get packet at head, but don't remove it */
110 packet
= list_first_entry(&ep
->txq
, struct htc_packet
, list
);
112 ath6kl_dbg(ATH6KL_DBG_HTC
,
113 "%s: got head packet:0x%p , queue depth: %d\n",
114 __func__
, packet
, get_queue_depth(&ep
->txq
));
116 transfer_len
= packet
->act_len
+ HTC_HDR_LENGTH
;
118 if (transfer_len
<= target
->tgt_cred_sz
) {
119 credits_required
= 1;
121 /* figure out how many credits this message requires */
122 credits_required
= transfer_len
/ target
->tgt_cred_sz
;
123 remainder
= transfer_len
% target
->tgt_cred_sz
;
129 ath6kl_dbg(ATH6KL_DBG_HTC
, "%s: creds required:%d got:%d\n",
130 __func__
, credits_required
, ep
->cred_dist
.credits
);
132 if (ep
->eid
== ENDPOINT_0
) {
134 * endpoint 0 is special, it always has a credit and
135 * does not require credit based flow control
137 credits_required
= 0;
140 if (ep
->cred_dist
.credits
< credits_required
)
143 ep
->cred_dist
.credits
-= credits_required
;
144 ep
->ep_st
.cred_cosumd
+= credits_required
;
146 /* check if we need credits back from the target */
147 if (ep
->cred_dist
.credits
<
148 ep
->cred_dist
.cred_per_msg
) {
149 /* tell the target we need credits ASAP! */
150 send_flags
|= HTC_FLAGS_NEED_CREDIT_UPDATE
;
151 ep
->ep_st
.cred_low_indicate
+= 1;
152 ath6kl_dbg(ATH6KL_DBG_HTC
,
153 "%s: host needs credits\n",
158 /* now we can fully dequeue */
159 packet
= list_first_entry(&ep
->txq
, struct htc_packet
, list
);
161 list_del(&packet
->list
);
162 /* save the number of credits this packet consumed */
163 packet
->info
.tx
.cred_used
= credits_required
;
164 /* save send flags */
165 packet
->info
.tx
.flags
= send_flags
;
166 packet
->info
.tx
.seqno
= ep
->seqno
;
168 /* queue this packet into the caller's queue */
169 list_add_tail(&packet
->list
, queue
);
173 static void get_htc_packet(struct htc_target
*target
,
174 struct htc_endpoint
*ep
,
175 struct list_head
*queue
, int resources
)
177 struct htc_packet
*packet
;
179 /* NOTE : the TX lock is held when this function is called */
181 /* loop until we can grab as many packets out of the queue as we can */
183 if (list_empty(&ep
->txq
))
186 packet
= list_first_entry(&ep
->txq
, struct htc_packet
, list
);
187 list_del(&packet
->list
);
189 ath6kl_dbg(ATH6KL_DBG_HTC
,
190 "%s: got packet:0x%p , new queue depth: %d\n",
191 __func__
, packet
, get_queue_depth(&ep
->txq
));
192 packet
->info
.tx
.seqno
= ep
->seqno
;
193 packet
->info
.tx
.flags
= 0;
194 packet
->info
.tx
.cred_used
= 0;
197 /* queue this packet into the caller's queue */
198 list_add_tail(&packet
->list
, queue
);
203 static int htc_issue_packets(struct htc_target
*target
,
204 struct htc_endpoint
*ep
,
205 struct list_head
*pkt_queue
)
210 struct htc_frame_hdr
*htc_hdr
;
211 struct htc_packet
*packet
;
213 ath6kl_dbg(ATH6KL_DBG_HTC
,
214 "%s: queue: 0x%p, pkts %d\n", __func__
,
215 pkt_queue
, get_queue_depth(pkt_queue
));
217 while (!list_empty(pkt_queue
)) {
218 packet
= list_first_entry(pkt_queue
, struct htc_packet
, list
);
219 list_del(&packet
->list
);
228 payload_len
= packet
->act_len
;
230 /* setup HTC frame header */
231 htc_hdr
= skb_push(skb
, sizeof(*htc_hdr
));
238 packet
->info
.tx
.flags
|= HTC_FLAGS_TX_FIXUP_NETBUF
;
241 put_unaligned((u16
) payload_len
, &htc_hdr
->payld_len
);
242 htc_hdr
->flags
= packet
->info
.tx
.flags
;
243 htc_hdr
->eid
= (u8
) packet
->endpoint
;
244 htc_hdr
->ctrl
[0] = 0;
245 htc_hdr
->ctrl
[1] = (u8
) packet
->info
.tx
.seqno
;
247 spin_lock_bh(&target
->tx_lock
);
249 /* store in look up queue to match completions */
250 list_add_tail(&packet
->list
, &ep
->pipe
.tx_lookup_queue
);
251 ep
->ep_st
.tx_issued
+= 1;
252 spin_unlock_bh(&target
->tx_lock
);
254 status
= ath6kl_hif_pipe_send(target
->dev
->ar
,
255 ep
->pipe
.pipeid_ul
, NULL
, skb
);
258 if (status
!= -ENOMEM
) {
259 /* TODO: if more than 1 endpoint maps to the
260 * same PipeID, it is possible to run out of
261 * resources in the HIF layer.
262 * Don't emit the error
264 ath6kl_dbg(ATH6KL_DBG_HTC
,
265 "%s: failed status:%d\n",
268 spin_lock_bh(&target
->tx_lock
);
269 list_del(&packet
->list
);
271 /* reclaim credits */
272 ep
->cred_dist
.credits
+= packet
->info
.tx
.cred_used
;
273 spin_unlock_bh(&target
->tx_lock
);
275 /* put it back into the callers queue */
276 list_add(&packet
->list
, pkt_queue
);
282 while (!list_empty(pkt_queue
)) {
283 if (status
!= -ENOMEM
) {
284 ath6kl_dbg(ATH6KL_DBG_HTC
,
285 "%s: failed pkt:0x%p status:%d\n",
286 __func__
, packet
, status
);
289 packet
= list_first_entry(pkt_queue
,
290 struct htc_packet
, list
);
291 list_del(&packet
->list
);
292 packet
->status
= status
;
293 send_packet_completion(target
, packet
);
300 static enum htc_send_queue_result
htc_try_send(struct htc_target
*target
,
301 struct htc_endpoint
*ep
,
302 struct list_head
*txq
)
304 struct list_head send_queue
; /* temp queue to hold packets */
305 struct htc_packet
*packet
, *tmp_pkt
;
306 struct ath6kl
*ar
= target
->dev
->ar
;
307 enum htc_send_full_action action
;
308 int tx_resources
, overflow
, txqueue_depth
, i
, good_pkts
;
311 ath6kl_dbg(ATH6KL_DBG_HTC
, "%s: (queue:0x%p depth:%d)\n",
313 (txq
== NULL
) ? 0 : get_queue_depth(txq
));
315 /* init the local send queue */
316 INIT_LIST_HEAD(&send_queue
);
319 * txq equals to NULL means
320 * caller didn't provide a queue, just wants us to
321 * check queues and send
324 if (list_empty(txq
)) {
326 return HTC_SEND_QUEUE_DROP
;
329 spin_lock_bh(&target
->tx_lock
);
330 txqueue_depth
= get_queue_depth(&ep
->txq
);
331 spin_unlock_bh(&target
->tx_lock
);
333 if (txqueue_depth
>= ep
->max_txq_depth
) {
334 /* we've already overflowed */
335 overflow
= get_queue_depth(txq
);
337 /* get how much we will overflow by */
338 overflow
= txqueue_depth
;
339 overflow
+= get_queue_depth(txq
);
340 /* get how much we will overflow the TX queue by */
341 overflow
-= ep
->max_txq_depth
;
344 /* if overflow is negative or zero, we are okay */
346 ath6kl_dbg(ATH6KL_DBG_HTC
,
347 "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
348 __func__
, ep
->eid
, overflow
, txqueue_depth
,
351 if ((overflow
<= 0) ||
352 (ep
->ep_cb
.tx_full
== NULL
)) {
354 * all packets will fit or caller did not provide send
355 * full indication handler -- just move all of them
356 * to the local send_queue object
358 list_splice_tail_init(txq
, &send_queue
);
360 good_pkts
= get_queue_depth(txq
) - overflow
;
363 return HTC_SEND_QUEUE_DROP
;
366 /* we have overflowed, and a callback is provided */
367 /* dequeue all non-overflow packets to the sendqueue */
368 for (i
= 0; i
< good_pkts
; i
++) {
369 /* pop off caller's queue */
370 packet
= list_first_entry(txq
,
373 /* move to local queue */
374 list_move_tail(&packet
->list
, &send_queue
);
378 * the caller's queue has all the packets that won't fit
379 * walk through the caller's queue and indicate each to
380 * the send full handler
382 list_for_each_entry_safe(packet
, tmp_pkt
,
384 ath6kl_dbg(ATH6KL_DBG_HTC
,
385 "%s: Indicate overflowed TX pkts: %p\n",
387 action
= ep
->ep_cb
.tx_full(ep
->target
, packet
);
388 if (action
== HTC_SEND_FULL_DROP
) {
389 /* callback wants the packet dropped */
390 ep
->ep_st
.tx_dropped
+= 1;
392 /* leave this one in the caller's queue
395 /* callback wants to keep this packet,
396 * move from caller's queue to the send
398 list_move_tail(&packet
->list
,
403 if (list_empty(&send_queue
)) {
404 /* no packets made it in, caller will cleanup */
405 return HTC_SEND_QUEUE_DROP
;
410 if (!ep
->pipe
.tx_credit_flow_enabled
) {
412 ath6kl_hif_pipe_get_free_queue_number(ar
,
418 spin_lock_bh(&target
->tx_lock
);
419 if (!list_empty(&send_queue
)) {
420 /* transfer packets to tail */
421 list_splice_tail_init(&send_queue
, &ep
->txq
);
422 if (!list_empty(&send_queue
)) {
424 spin_unlock_bh(&target
->tx_lock
);
425 return HTC_SEND_QUEUE_DROP
;
427 INIT_LIST_HEAD(&send_queue
);
430 /* increment tx processing count on entry */
433 if (ep
->tx_proc_cnt
> 1) {
435 * Another thread or task is draining the TX queues on this
436 * endpoint that thread will reset the tx processing count
437 * when the queue is drained.
440 spin_unlock_bh(&target
->tx_lock
);
441 return HTC_SEND_QUEUE_OK
;
444 /***** beyond this point only 1 thread may enter ******/
447 * Now drain the endpoint TX queue for transmission as long as we have
448 * enough transmit resources.
451 if (get_queue_depth(&ep
->txq
) == 0)
454 if (ep
->pipe
.tx_credit_flow_enabled
) {
456 * Credit based mechanism provides flow control
457 * based on target transmit resource availability,
458 * we assume that the HIF layer will always have
459 * bus resources greater than target transmit
462 get_htc_packet_credit_based(target
, ep
, &send_queue
);
465 * Get all packets for this endpoint that we can
468 get_htc_packet(target
, ep
, &send_queue
, tx_resources
);
471 if (get_queue_depth(&send_queue
) == 0) {
473 * Didn't get packets due to out of resources or TX
479 spin_unlock_bh(&target
->tx_lock
);
481 /* send what we can */
482 htc_issue_packets(target
, ep
, &send_queue
);
484 if (!ep
->pipe
.tx_credit_flow_enabled
) {
485 pipeid
= ep
->pipe
.pipeid_ul
;
487 ath6kl_hif_pipe_get_free_queue_number(ar
, pipeid
);
490 spin_lock_bh(&target
->tx_lock
);
493 /* done with this endpoint, we can clear the count */
495 spin_unlock_bh(&target
->tx_lock
);
497 return HTC_SEND_QUEUE_OK
;
500 /* htc control packet manipulation */
501 static void destroy_htc_txctrl_packet(struct htc_packet
*packet
)
509 static struct htc_packet
*build_htc_txctrl_packet(void)
511 struct htc_packet
*packet
= NULL
;
514 packet
= kzalloc(sizeof(struct htc_packet
), GFP_KERNEL
);
518 skb
= __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE
, GFP_KERNEL
);
529 static void htc_free_txctrl_packet(struct htc_target
*target
,
530 struct htc_packet
*packet
)
532 destroy_htc_txctrl_packet(packet
);
535 static struct htc_packet
*htc_alloc_txctrl_packet(struct htc_target
*target
)
537 return build_htc_txctrl_packet();
540 static void htc_txctrl_complete(struct htc_target
*target
,
541 struct htc_packet
*packet
)
543 htc_free_txctrl_packet(target
, packet
);
546 #define MAX_MESSAGE_SIZE 1536
548 static int htc_setup_target_buffer_assignments(struct htc_target
*target
)
550 int status
, credits
, credit_per_maxmsg
, i
;
551 struct htc_pipe_txcredit_alloc
*entry
;
552 unsigned int hif_usbaudioclass
= 0;
554 credit_per_maxmsg
= MAX_MESSAGE_SIZE
/ target
->tgt_cred_sz
;
555 if (MAX_MESSAGE_SIZE
% target
->tgt_cred_sz
)
558 /* TODO, this should be configured by the caller! */
560 credits
= target
->tgt_creds
;
561 entry
= &target
->pipe
.txcredit_alloc
[0];
565 /* FIXME: hif_usbaudioclass is always zero */
566 if (hif_usbaudioclass
) {
567 ath6kl_dbg(ATH6KL_DBG_HTC
,
568 "%s: For USB Audio Class- Total:%d\n",
572 /* Setup VO Service To have Max Credits */
573 entry
->service_id
= WMI_DATA_VO_SVC
;
574 entry
->credit_alloc
= (credits
- 6);
575 if (entry
->credit_alloc
== 0)
576 entry
->credit_alloc
++;
578 credits
-= (int) entry
->credit_alloc
;
583 entry
->service_id
= WMI_CONTROL_SVC
;
584 entry
->credit_alloc
= credit_per_maxmsg
;
585 credits
-= (int) entry
->credit_alloc
;
589 /* leftovers go to best effort */
592 entry
->service_id
= WMI_DATA_BE_SVC
;
593 entry
->credit_alloc
= (u8
) credits
;
597 entry
->service_id
= WMI_DATA_VI_SVC
;
598 entry
->credit_alloc
= credits
/ 4;
599 if (entry
->credit_alloc
== 0)
600 entry
->credit_alloc
++;
602 credits
-= (int) entry
->credit_alloc
;
607 entry
->service_id
= WMI_DATA_VO_SVC
;
608 entry
->credit_alloc
= credits
/ 4;
609 if (entry
->credit_alloc
== 0)
610 entry
->credit_alloc
++;
612 credits
-= (int) entry
->credit_alloc
;
617 entry
->service_id
= WMI_CONTROL_SVC
;
618 entry
->credit_alloc
= credit_per_maxmsg
;
619 credits
-= (int) entry
->credit_alloc
;
624 entry
->service_id
= WMI_DATA_BK_SVC
;
625 entry
->credit_alloc
= credit_per_maxmsg
;
626 credits
-= (int) entry
->credit_alloc
;
630 /* leftovers go to best effort */
632 entry
->service_id
= WMI_DATA_BE_SVC
;
633 entry
->credit_alloc
= (u8
) credits
;
638 for (i
= 0; i
< ENDPOINT_MAX
; i
++) {
639 if (target
->pipe
.txcredit_alloc
[i
].service_id
!= 0) {
640 ath6kl_dbg(ATH6KL_DBG_HTC
,
641 "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
643 target
->pipe
.txcredit_alloc
[i
].
645 target
->pipe
.txcredit_alloc
[i
].
653 /* process credit reports and call distribution function */
654 static void htc_process_credit_report(struct htc_target
*target
,
655 struct htc_credit_report
*rpt
,
657 enum htc_endpoint_id from_ep
)
659 int total_credits
= 0, i
;
660 struct htc_endpoint
*ep
;
662 /* lock out TX while we update credits */
663 spin_lock_bh(&target
->tx_lock
);
665 for (i
= 0; i
< num_entries
; i
++, rpt
++) {
666 if (rpt
->eid
>= ENDPOINT_MAX
) {
668 spin_unlock_bh(&target
->tx_lock
);
672 ep
= &target
->endpoint
[rpt
->eid
];
673 ep
->cred_dist
.credits
+= rpt
->credits
;
675 if (ep
->cred_dist
.credits
&& get_queue_depth(&ep
->txq
)) {
676 spin_unlock_bh(&target
->tx_lock
);
677 htc_try_send(target
, ep
, NULL
);
678 spin_lock_bh(&target
->tx_lock
);
681 total_credits
+= rpt
->credits
;
683 ath6kl_dbg(ATH6KL_DBG_HTC
,
684 "Report indicated %d credits to distribute\n",
687 spin_unlock_bh(&target
->tx_lock
);
690 /* flush endpoint TX queue */
691 static void htc_flush_tx_endpoint(struct htc_target
*target
,
692 struct htc_endpoint
*ep
, u16 tag
)
694 struct htc_packet
*packet
;
696 spin_lock_bh(&target
->tx_lock
);
697 while (get_queue_depth(&ep
->txq
)) {
698 packet
= list_first_entry(&ep
->txq
, struct htc_packet
, list
);
699 list_del(&packet
->list
);
701 send_packet_completion(target
, packet
);
703 spin_unlock_bh(&target
->tx_lock
);
707 * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
708 * since upper layers expects struct htc_packet containers we use the completed
709 * skb and lookup it's corresponding HTC packet buffer from a lookup list.
710 * This is extra overhead that can be fixed by re-aligning HIF interfaces with
713 static struct htc_packet
*htc_lookup_tx_packet(struct htc_target
*target
,
714 struct htc_endpoint
*ep
,
717 struct htc_packet
*packet
, *tmp_pkt
, *found_packet
= NULL
;
719 spin_lock_bh(&target
->tx_lock
);
722 * interate from the front of tx lookup queue
723 * this lookup should be fast since lower layers completes in-order and
724 * so the completed packet should be at the head of the list generally
726 list_for_each_entry_safe(packet
, tmp_pkt
, &ep
->pipe
.tx_lookup_queue
,
728 /* check for removal */
729 if (skb
== packet
->skb
) {
731 list_del(&packet
->list
);
732 found_packet
= packet
;
737 spin_unlock_bh(&target
->tx_lock
);
742 static int ath6kl_htc_pipe_tx_complete(struct ath6kl
*ar
, struct sk_buff
*skb
)
744 struct htc_target
*target
= ar
->htc_target
;
745 struct htc_frame_hdr
*htc_hdr
;
746 struct htc_endpoint
*ep
;
747 struct htc_packet
*packet
;
752 htc_hdr
= (struct htc_frame_hdr
*) netdata
;
754 ep_id
= htc_hdr
->eid
;
755 ep
= &target
->endpoint
[ep_id
];
757 packet
= htc_lookup_tx_packet(target
, ep
, skb
);
758 if (packet
== NULL
) {
759 /* may have already been flushed and freed */
760 ath6kl_err("HTC TX lookup failed!\n");
762 /* will be giving this buffer back to upper layers */
764 send_packet_completion(target
, packet
);
768 if (!ep
->pipe
.tx_credit_flow_enabled
) {
770 * note: when using TX credit flow, the re-checking of queues
771 * happens when credits flow back from the target. in the
772 * non-TX credit case, we recheck after the packet completes
774 htc_try_send(target
, ep
, NULL
);
780 static int htc_send_packets_multiple(struct htc_target
*target
,
781 struct list_head
*pkt_queue
)
783 struct htc_endpoint
*ep
;
784 struct htc_packet
*packet
, *tmp_pkt
;
786 if (list_empty(pkt_queue
))
789 /* get first packet to find out which ep the packets will go into */
790 packet
= list_first_entry(pkt_queue
, struct htc_packet
, list
);
792 if (packet
->endpoint
>= ENDPOINT_MAX
) {
796 ep
= &target
->endpoint
[packet
->endpoint
];
798 htc_try_send(target
, ep
, pkt_queue
);
800 /* do completion on any packets that couldn't get in */
801 if (!list_empty(pkt_queue
)) {
802 list_for_each_entry_safe(packet
, tmp_pkt
, pkt_queue
, list
) {
803 packet
->status
= -ENOMEM
;
806 do_send_completion(ep
, pkt_queue
);
812 /* htc pipe rx path */
813 static struct htc_packet
*alloc_htc_packet_container(struct htc_target
*target
)
815 struct htc_packet
*packet
;
816 spin_lock_bh(&target
->rx_lock
);
818 if (target
->pipe
.htc_packet_pool
== NULL
) {
819 spin_unlock_bh(&target
->rx_lock
);
823 packet
= target
->pipe
.htc_packet_pool
;
824 target
->pipe
.htc_packet_pool
= (struct htc_packet
*) packet
->list
.next
;
826 spin_unlock_bh(&target
->rx_lock
);
828 packet
->list
.next
= NULL
;
832 static void free_htc_packet_container(struct htc_target
*target
,
833 struct htc_packet
*packet
)
835 struct list_head
*lh
;
837 spin_lock_bh(&target
->rx_lock
);
839 if (target
->pipe
.htc_packet_pool
== NULL
) {
840 target
->pipe
.htc_packet_pool
= packet
;
841 packet
->list
.next
= NULL
;
843 lh
= (struct list_head
*) target
->pipe
.htc_packet_pool
;
844 packet
->list
.next
= lh
;
845 target
->pipe
.htc_packet_pool
= packet
;
848 spin_unlock_bh(&target
->rx_lock
);
851 static int htc_process_trailer(struct htc_target
*target
, u8
*buffer
,
852 int len
, enum htc_endpoint_id from_ep
)
854 struct htc_credit_report
*report
;
855 struct htc_record_hdr
*record
;
860 if (len
< sizeof(struct htc_record_hdr
)) {
865 /* these are byte aligned structs */
866 record
= (struct htc_record_hdr
*) buffer
;
867 len
-= sizeof(struct htc_record_hdr
);
868 buffer
+= sizeof(struct htc_record_hdr
);
870 if (record
->len
> len
) {
871 /* no room left in buffer for record */
872 ath6kl_dbg(ATH6KL_DBG_HTC
,
873 "invalid length: %d (id:%d) buffer has: %d bytes left\n",
874 record
->len
, record
->rec_id
, len
);
879 /* start of record follows the header */
882 switch (record
->rec_id
) {
883 case HTC_RECORD_CREDITS
:
884 if (record
->len
< sizeof(struct htc_credit_report
)) {
889 report
= (struct htc_credit_report
*) record_buf
;
890 htc_process_credit_report(target
, report
,
891 record
->len
/ sizeof(*report
),
895 ath6kl_dbg(ATH6KL_DBG_HTC
,
896 "unhandled record: id:%d length:%d\n",
897 record
->rec_id
, record
->len
);
901 /* advance buffer past this record for next time around */
902 buffer
+= record
->len
;
909 static void do_recv_completion(struct htc_endpoint
*ep
,
910 struct list_head
*queue_to_indicate
)
912 struct htc_packet
*packet
;
914 if (list_empty(queue_to_indicate
)) {
915 /* nothing to indicate */
919 /* using legacy EpRecv */
920 while (!list_empty(queue_to_indicate
)) {
921 packet
= list_first_entry(queue_to_indicate
,
922 struct htc_packet
, list
);
923 list_del(&packet
->list
);
924 ep
->ep_cb
.rx(ep
->target
, packet
);
930 static void recv_packet_completion(struct htc_target
*target
,
931 struct htc_endpoint
*ep
,
932 struct htc_packet
*packet
)
934 struct list_head container
;
935 INIT_LIST_HEAD(&container
);
936 list_add_tail(&packet
->list
, &container
);
939 do_recv_completion(ep
, &container
);
942 static int ath6kl_htc_pipe_rx_complete(struct ath6kl
*ar
, struct sk_buff
*skb
,
945 struct htc_target
*target
= ar
->htc_target
;
946 u8
*netdata
, *trailer
, hdr_info
;
947 struct htc_frame_hdr
*htc_hdr
;
948 u32 netlen
, trailerlen
= 0;
949 struct htc_packet
*packet
;
950 struct htc_endpoint
*ep
;
955 * ar->htc_target can be NULL due to a race condition that can occur
956 * during driver initialization(we do 'ath6kl_hif_power_on' before
957 * initializing 'ar->htc_target' via 'ath6kl_htc_create').
958 * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
959 * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
960 * Thus the possibility of ar->htc_target being NULL
961 * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
963 if (WARN_ON_ONCE(!target
)) {
964 ath6kl_err("Target not yet initialized\n");
973 htc_hdr
= (struct htc_frame_hdr
*) netdata
;
975 if (htc_hdr
->eid
>= ENDPOINT_MAX
) {
976 ath6kl_dbg(ATH6KL_DBG_HTC
,
977 "HTC Rx: invalid EndpointID=%d\n",
982 ep
= &target
->endpoint
[htc_hdr
->eid
];
984 payload_len
= le16_to_cpu(get_unaligned(&htc_hdr
->payld_len
));
986 if (netlen
< (payload_len
+ HTC_HDR_LENGTH
)) {
987 ath6kl_dbg(ATH6KL_DBG_HTC
,
988 "HTC Rx: insufficient length, got:%d expected =%zu\n",
989 netlen
, payload_len
+ HTC_HDR_LENGTH
);
994 /* get flags to check for trailer */
995 hdr_info
= htc_hdr
->flags
;
996 if (hdr_info
& HTC_FLG_RX_TRAILER
) {
997 /* extract the trailer length */
998 hdr_info
= htc_hdr
->ctrl
[0];
999 if ((hdr_info
< sizeof(struct htc_record_hdr
)) ||
1000 (hdr_info
> payload_len
)) {
1001 ath6kl_dbg(ATH6KL_DBG_HTC
,
1002 "invalid header: payloadlen should be %d, CB[0]: %d\n",
1003 payload_len
, hdr_info
);
1008 trailerlen
= hdr_info
;
1009 /* process trailer after hdr/apps payload */
1010 trailer
= (u8
*) htc_hdr
+ HTC_HDR_LENGTH
+
1011 payload_len
- hdr_info
;
1012 status
= htc_process_trailer(target
, trailer
, hdr_info
,
1018 if (((int) payload_len
- (int) trailerlen
) <= 0) {
1019 /* zero length packet with trailer, just drop these */
1023 if (htc_hdr
->eid
== ENDPOINT_0
) {
1024 /* handle HTC control message */
1025 if (target
->htc_flags
& HTC_OP_STATE_SETUP_COMPLETE
) {
1027 * fatal: target should not send unsolicited
1028 * messageson the endpoint 0
1030 ath6kl_dbg(ATH6KL_DBG_HTC
,
1031 "HTC ignores Rx Ctrl after setup complete\n");
1036 /* remove HTC header */
1037 skb_pull(skb
, HTC_HDR_LENGTH
);
1039 netdata
= skb
->data
;
1042 spin_lock_bh(&target
->rx_lock
);
1044 target
->pipe
.ctrl_response_valid
= true;
1045 target
->pipe
.ctrl_response_len
= min_t(int, netlen
,
1046 HTC_MAX_CTRL_MSG_LEN
);
1047 memcpy(target
->pipe
.ctrl_response_buf
, netdata
,
1048 target
->pipe
.ctrl_response_len
);
1050 spin_unlock_bh(&target
->rx_lock
);
1059 * TODO: the message based HIF architecture allocates net bufs
1060 * for recv packets since it bridges that HIF to upper layers,
1061 * which expects HTC packets, we form the packets here
1063 packet
= alloc_htc_packet_container(target
);
1064 if (packet
== NULL
) {
1070 packet
->endpoint
= htc_hdr
->eid
;
1071 packet
->pkt_cntxt
= skb
;
1073 /* TODO: for backwards compatibility */
1074 packet
->buf
= skb_push(skb
, 0) + HTC_HDR_LENGTH
;
1075 packet
->act_len
= netlen
- HTC_HDR_LENGTH
- trailerlen
;
1078 * TODO: this is a hack because the driver layer will set the
1079 * actual len of the skb again which will just double the len
1083 recv_packet_completion(target
, ep
, packet
);
1085 /* recover the packet container */
1086 free_htc_packet_container(target
, packet
);
1095 static void htc_flush_rx_queue(struct htc_target
*target
,
1096 struct htc_endpoint
*ep
)
1098 struct list_head container
;
1099 struct htc_packet
*packet
;
1101 spin_lock_bh(&target
->rx_lock
);
1104 if (list_empty(&ep
->rx_bufq
))
1107 packet
= list_first_entry(&ep
->rx_bufq
,
1108 struct htc_packet
, list
);
1109 list_del(&packet
->list
);
1111 spin_unlock_bh(&target
->rx_lock
);
1112 packet
->status
= -ECANCELED
;
1113 packet
->act_len
= 0;
1115 ath6kl_dbg(ATH6KL_DBG_HTC
,
1116 "Flushing RX packet:0x%p, length:%d, ep:%d\n",
1117 packet
, packet
->buf_len
,
1120 INIT_LIST_HEAD(&container
);
1121 list_add_tail(&packet
->list
, &container
);
1123 /* give the packet back */
1124 do_recv_completion(ep
, &container
);
1125 spin_lock_bh(&target
->rx_lock
);
1128 spin_unlock_bh(&target
->rx_lock
);
1131 /* polling routine to wait for a control packet to be received */
1132 static int htc_wait_recv_ctrl_message(struct htc_target
*target
)
1134 int count
= HTC_TARGET_RESPONSE_POLL_COUNT
;
1137 spin_lock_bh(&target
->rx_lock
);
1139 if (target
->pipe
.ctrl_response_valid
) {
1140 target
->pipe
.ctrl_response_valid
= false;
1141 spin_unlock_bh(&target
->rx_lock
);
1145 spin_unlock_bh(&target
->rx_lock
);
1149 msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT
);
1153 ath6kl_warn("htc pipe control receive timeout!\n");
1160 static void htc_rxctrl_complete(struct htc_target
*context
,
1161 struct htc_packet
*packet
)
1163 struct sk_buff
*skb
= packet
->skb
;
1165 if (packet
->endpoint
== ENDPOINT_0
&&
1166 packet
->status
== -ECANCELED
&&
1171 /* htc pipe initialization */
1172 static void reset_endpoint_states(struct htc_target
*target
)
1174 struct htc_endpoint
*ep
;
1177 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
1178 ep
= &target
->endpoint
[i
];
1181 ep
->max_txq_depth
= 0;
1183 INIT_LIST_HEAD(&ep
->txq
);
1184 INIT_LIST_HEAD(&ep
->pipe
.tx_lookup_queue
);
1185 INIT_LIST_HEAD(&ep
->rx_bufq
);
1186 ep
->target
= target
;
1187 ep
->pipe
.tx_credit_flow_enabled
= true;
1191 /* start HTC, this is called after all services are connected */
1192 static int htc_config_target_hif_pipe(struct htc_target
*target
)
1197 /* htc service functions */
1198 static u8
htc_get_credit_alloc(struct htc_target
*target
, u16 service_id
)
1203 for (i
= 0; i
< ENDPOINT_MAX
; i
++) {
1204 if (target
->pipe
.txcredit_alloc
[i
].service_id
== service_id
)
1206 target
->pipe
.txcredit_alloc
[i
].credit_alloc
;
1209 if (allocation
== 0) {
1210 ath6kl_dbg(ATH6KL_DBG_HTC
,
1211 "HTC Service TX : 0x%2.2X : allocation is zero!\n",
1218 static int ath6kl_htc_pipe_conn_service(struct htc_target
*target
,
1219 struct htc_service_connect_req
*conn_req
,
1220 struct htc_service_connect_resp
*conn_resp
)
1222 struct ath6kl
*ar
= target
->dev
->ar
;
1223 struct htc_packet
*packet
= NULL
;
1224 struct htc_conn_service_resp
*resp_msg
;
1225 struct htc_conn_service_msg
*conn_msg
;
1226 enum htc_endpoint_id assigned_epid
= ENDPOINT_MAX
;
1227 bool disable_credit_flowctrl
= false;
1228 unsigned int max_msg_size
= 0;
1229 struct htc_endpoint
*ep
;
1230 int length
, status
= 0;
1231 struct sk_buff
*skb
;
1235 if (conn_req
->svc_id
== 0) {
1241 if (conn_req
->svc_id
== HTC_CTRL_RSVD_SVC
) {
1242 /* special case for pseudo control service */
1243 assigned_epid
= ENDPOINT_0
;
1244 max_msg_size
= HTC_MAX_CTRL_MSG_LEN
;
1248 tx_alloc
= htc_get_credit_alloc(target
, conn_req
->svc_id
);
1249 if (tx_alloc
== 0) {
1254 /* allocate a packet to send to the target */
1255 packet
= htc_alloc_txctrl_packet(target
);
1257 if (packet
== NULL
) {
1264 length
= sizeof(struct htc_conn_service_msg
);
1266 /* assemble connect service message */
1267 conn_msg
= skb_put(skb
, length
);
1268 if (conn_msg
== NULL
) {
1275 sizeof(struct htc_conn_service_msg
));
1276 conn_msg
->msg_id
= cpu_to_le16(HTC_MSG_CONN_SVC_ID
);
1277 conn_msg
->svc_id
= cpu_to_le16(conn_req
->svc_id
);
1278 conn_msg
->conn_flags
= cpu_to_le16(conn_req
->conn_flags
&
1279 ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK
);
1281 /* tell target desired recv alloc for this ep */
1282 flags
= tx_alloc
<< HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT
;
1283 conn_msg
->conn_flags
|= cpu_to_le16(flags
);
1285 if (conn_req
->conn_flags
&
1286 HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL
) {
1287 disable_credit_flowctrl
= true;
1290 set_htc_pkt_info(packet
, NULL
, (u8
*) conn_msg
,
1292 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
1294 status
= ath6kl_htc_pipe_tx(target
, packet
);
1296 /* we don't own it anymore */
1301 /* wait for response */
1302 status
= htc_wait_recv_ctrl_message(target
);
1306 /* we controlled the buffer creation so it has to be
1309 resp_msg
= (struct htc_conn_service_resp
*)
1310 target
->pipe
.ctrl_response_buf
;
1312 if (resp_msg
->msg_id
!= cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID
) ||
1313 (target
->pipe
.ctrl_response_len
< sizeof(*resp_msg
))) {
1314 /* this message is not valid */
1320 ath6kl_dbg(ATH6KL_DBG_TRC
,
1321 "%s: service 0x%X conn resp: status: %d ep: %d\n",
1322 __func__
, resp_msg
->svc_id
, resp_msg
->status
,
1325 conn_resp
->resp_code
= resp_msg
->status
;
1326 /* check response status */
1327 if (resp_msg
->status
!= HTC_SERVICE_SUCCESS
) {
1328 ath6kl_dbg(ATH6KL_DBG_HTC
,
1329 "Target failed service 0x%X connect request (status:%d)\n",
1330 resp_msg
->svc_id
, resp_msg
->status
);
1335 assigned_epid
= (enum htc_endpoint_id
) resp_msg
->eid
;
1336 max_msg_size
= le16_to_cpu(resp_msg
->max_msg_sz
);
1339 /* the rest are parameter checks so set the error status */
1342 if (assigned_epid
>= ENDPOINT_MAX
) {
1347 if (max_msg_size
== 0) {
1352 ep
= &target
->endpoint
[assigned_epid
];
1353 ep
->eid
= assigned_epid
;
1354 if (ep
->svc_id
!= 0) {
1355 /* endpoint already in use! */
1360 /* return assigned endpoint to caller */
1361 conn_resp
->endpoint
= assigned_epid
;
1362 conn_resp
->len_max
= max_msg_size
;
1364 /* setup the endpoint */
1365 ep
->svc_id
= conn_req
->svc_id
; /* this marks ep in use */
1366 ep
->max_txq_depth
= conn_req
->max_txq_depth
;
1367 ep
->len_max
= max_msg_size
;
1368 ep
->cred_dist
.credits
= tx_alloc
;
1369 ep
->cred_dist
.cred_sz
= target
->tgt_cred_sz
;
1370 ep
->cred_dist
.cred_per_msg
= max_msg_size
/ target
->tgt_cred_sz
;
1371 if (max_msg_size
% target
->tgt_cred_sz
)
1372 ep
->cred_dist
.cred_per_msg
++;
1374 /* copy all the callbacks */
1375 ep
->ep_cb
= conn_req
->ep_cb
;
1377 /* initialize tx_drop_packet_threshold */
1378 ep
->tx_drop_packet_threshold
= MAX_HI_COOKIE_NUM
;
1380 status
= ath6kl_hif_pipe_map_service(ar
, ep
->svc_id
,
1381 &ep
->pipe
.pipeid_ul
,
1382 &ep
->pipe
.pipeid_dl
);
1386 ath6kl_dbg(ATH6KL_DBG_HTC
,
1387 "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
1388 ep
->svc_id
, ep
->pipe
.pipeid_ul
,
1389 ep
->pipe
.pipeid_dl
, ep
->eid
);
1391 if (disable_credit_flowctrl
&& ep
->pipe
.tx_credit_flow_enabled
) {
1392 ep
->pipe
.tx_credit_flow_enabled
= false;
1393 ath6kl_dbg(ATH6KL_DBG_HTC
,
1394 "SVC: 0x%4.4X ep:%d TX flow control off\n",
1395 ep
->svc_id
, assigned_epid
);
1400 htc_free_txctrl_packet(target
, packet
);
1404 /* htc export functions */
1405 static void *ath6kl_htc_pipe_create(struct ath6kl
*ar
)
1408 struct htc_endpoint
*ep
= NULL
;
1409 struct htc_target
*target
= NULL
;
1410 struct htc_packet
*packet
;
1413 target
= kzalloc(sizeof(struct htc_target
), GFP_KERNEL
);
1414 if (target
== NULL
) {
1415 ath6kl_err("htc create unable to allocate memory\n");
1417 goto fail_htc_create
;
1420 spin_lock_init(&target
->htc_lock
);
1421 spin_lock_init(&target
->rx_lock
);
1422 spin_lock_init(&target
->tx_lock
);
1424 reset_endpoint_states(target
);
1426 for (i
= 0; i
< HTC_PACKET_CONTAINER_ALLOCATION
; i
++) {
1427 packet
= kzalloc(sizeof(struct htc_packet
), GFP_KERNEL
);
1430 free_htc_packet_container(target
, packet
);
1433 target
->dev
= kzalloc(sizeof(*target
->dev
), GFP_KERNEL
);
1435 ath6kl_err("unable to allocate memory\n");
1437 goto fail_htc_create
;
1439 target
->dev
->ar
= ar
;
1440 target
->dev
->htc_cnxt
= target
;
1442 /* Get HIF default pipe for HTC message exchange */
1443 ep
= &target
->endpoint
[ENDPOINT_0
];
1445 ath6kl_hif_pipe_get_default(ar
, &ep
->pipe
.pipeid_ul
,
1446 &ep
->pipe
.pipeid_dl
);
1453 ath6kl_htc_pipe_cleanup(target
);
1460 /* cleanup the HTC instance */
1461 static void ath6kl_htc_pipe_cleanup(struct htc_target
*target
)
1463 struct htc_packet
*packet
;
1466 packet
= alloc_htc_packet_container(target
);
1474 /* kfree our instance */
1478 static int ath6kl_htc_pipe_start(struct htc_target
*target
)
1480 struct sk_buff
*skb
;
1481 struct htc_setup_comp_ext_msg
*setup
;
1482 struct htc_packet
*packet
;
1484 htc_config_target_hif_pipe(target
);
1486 /* allocate a buffer to send */
1487 packet
= htc_alloc_txctrl_packet(target
);
1488 if (packet
== NULL
) {
1495 /* assemble setup complete message */
1496 setup
= skb_put(skb
, sizeof(*setup
));
1497 memset(setup
, 0, sizeof(struct htc_setup_comp_ext_msg
));
1498 setup
->msg_id
= cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID
);
1500 ath6kl_dbg(ATH6KL_DBG_HTC
, "HTC using TX credit flow control\n");
1502 set_htc_pkt_info(packet
, NULL
, (u8
*) setup
,
1503 sizeof(struct htc_setup_comp_ext_msg
),
1504 ENDPOINT_0
, HTC_SERVICE_TX_PACKET_TAG
);
1506 target
->htc_flags
|= HTC_OP_STATE_SETUP_COMPLETE
;
1508 return ath6kl_htc_pipe_tx(target
, packet
);
1511 static void ath6kl_htc_pipe_stop(struct htc_target
*target
)
1514 struct htc_endpoint
*ep
;
1516 /* cleanup endpoints */
1517 for (i
= 0; i
< ENDPOINT_MAX
; i
++) {
1518 ep
= &target
->endpoint
[i
];
1519 htc_flush_rx_queue(target
, ep
);
1520 htc_flush_tx_endpoint(target
, ep
, HTC_TX_PACKET_TAG_ALL
);
1523 reset_endpoint_states(target
);
1524 target
->htc_flags
&= ~HTC_OP_STATE_SETUP_COMPLETE
;
1527 static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target
*target
,
1528 enum htc_endpoint_id endpoint
)
1532 spin_lock_bh(&target
->rx_lock
);
1533 num
= get_queue_depth(&(target
->endpoint
[endpoint
].rx_bufq
));
1534 spin_unlock_bh(&target
->rx_lock
);
1539 static int ath6kl_htc_pipe_tx(struct htc_target
*target
,
1540 struct htc_packet
*packet
)
1542 struct list_head queue
;
1544 ath6kl_dbg(ATH6KL_DBG_HTC
,
1545 "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
1546 __func__
, packet
->endpoint
, packet
->buf
,
1549 INIT_LIST_HEAD(&queue
);
1550 list_add_tail(&packet
->list
, &queue
);
1552 return htc_send_packets_multiple(target
, &queue
);
1555 static int ath6kl_htc_pipe_wait_target(struct htc_target
*target
)
1557 struct htc_ready_ext_msg
*ready_msg
;
1558 struct htc_service_connect_req connect
;
1559 struct htc_service_connect_resp resp
;
1562 status
= htc_wait_recv_ctrl_message(target
);
1567 if (target
->pipe
.ctrl_response_len
< sizeof(*ready_msg
)) {
1568 ath6kl_warn("invalid htc pipe ready msg len: %d\n",
1569 target
->pipe
.ctrl_response_len
);
1573 ready_msg
= (struct htc_ready_ext_msg
*) target
->pipe
.ctrl_response_buf
;
1575 if (ready_msg
->ver2_0_info
.msg_id
!= cpu_to_le16(HTC_MSG_READY_ID
)) {
1576 ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
1577 ready_msg
->ver2_0_info
.msg_id
);
1581 ath6kl_dbg(ATH6KL_DBG_HTC
,
1582 "Target Ready! : transmit resources : %d size:%d\n",
1583 ready_msg
->ver2_0_info
.cred_cnt
,
1584 ready_msg
->ver2_0_info
.cred_sz
);
1586 target
->tgt_creds
= le16_to_cpu(ready_msg
->ver2_0_info
.cred_cnt
);
1587 target
->tgt_cred_sz
= le16_to_cpu(ready_msg
->ver2_0_info
.cred_sz
);
1589 if ((target
->tgt_creds
== 0) || (target
->tgt_cred_sz
== 0))
1592 htc_setup_target_buffer_assignments(target
);
1594 /* setup our pseudo HTC control endpoint connection */
1595 memset(&connect
, 0, sizeof(connect
));
1596 memset(&resp
, 0, sizeof(resp
));
1597 connect
.ep_cb
.tx_complete
= htc_txctrl_complete
;
1598 connect
.ep_cb
.rx
= htc_rxctrl_complete
;
1599 connect
.max_txq_depth
= NUM_CONTROL_TX_BUFFERS
;
1600 connect
.svc_id
= HTC_CTRL_RSVD_SVC
;
1602 /* connect fake service */
1603 status
= ath6kl_htc_pipe_conn_service(target
, &connect
, &resp
);
1608 static void ath6kl_htc_pipe_flush_txep(struct htc_target
*target
,
1609 enum htc_endpoint_id endpoint
, u16 tag
)
1611 struct htc_endpoint
*ep
= &target
->endpoint
[endpoint
];
1613 if (ep
->svc_id
== 0) {
1619 htc_flush_tx_endpoint(target
, ep
, tag
);
1622 static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target
*target
,
1623 struct list_head
*pkt_queue
)
1625 struct htc_packet
*packet
, *tmp_pkt
, *first
;
1626 struct htc_endpoint
*ep
;
1629 if (list_empty(pkt_queue
))
1632 first
= list_first_entry(pkt_queue
, struct htc_packet
, list
);
1634 if (first
->endpoint
>= ENDPOINT_MAX
) {
1639 ath6kl_dbg(ATH6KL_DBG_HTC
, "%s: epid: %d, cnt:%d, len: %d\n",
1640 __func__
, first
->endpoint
, get_queue_depth(pkt_queue
),
1643 ep
= &target
->endpoint
[first
->endpoint
];
1645 spin_lock_bh(&target
->rx_lock
);
1647 /* store receive packets */
1648 list_splice_tail_init(pkt_queue
, &ep
->rx_bufq
);
1650 spin_unlock_bh(&target
->rx_lock
);
1653 /* walk through queue and mark each one canceled */
1654 list_for_each_entry_safe(packet
, tmp_pkt
, pkt_queue
, list
) {
1655 packet
->status
= -ECANCELED
;
1658 do_recv_completion(ep
, pkt_queue
);
1664 static void ath6kl_htc_pipe_activity_changed(struct htc_target
*target
,
1665 enum htc_endpoint_id ep
,
1671 static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target
*target
)
1673 struct htc_endpoint
*endpoint
;
1674 struct htc_packet
*packet
, *tmp_pkt
;
1677 for (i
= ENDPOINT_0
; i
< ENDPOINT_MAX
; i
++) {
1678 endpoint
= &target
->endpoint
[i
];
1680 spin_lock_bh(&target
->rx_lock
);
1682 list_for_each_entry_safe(packet
, tmp_pkt
,
1683 &endpoint
->rx_bufq
, list
) {
1684 list_del(&packet
->list
);
1685 spin_unlock_bh(&target
->rx_lock
);
1686 ath6kl_dbg(ATH6KL_DBG_HTC
,
1687 "htc rx flush pkt 0x%p len %d ep %d\n",
1688 packet
, packet
->buf_len
,
1690 dev_kfree_skb(packet
->pkt_cntxt
);
1691 spin_lock_bh(&target
->rx_lock
);
1694 spin_unlock_bh(&target
->rx_lock
);
1698 static int ath6kl_htc_pipe_credit_setup(struct htc_target
*target
,
1699 struct ath6kl_htc_credit_info
*info
)
1704 static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops
= {
1705 .create
= ath6kl_htc_pipe_create
,
1706 .wait_target
= ath6kl_htc_pipe_wait_target
,
1707 .start
= ath6kl_htc_pipe_start
,
1708 .conn_service
= ath6kl_htc_pipe_conn_service
,
1709 .tx
= ath6kl_htc_pipe_tx
,
1710 .stop
= ath6kl_htc_pipe_stop
,
1711 .cleanup
= ath6kl_htc_pipe_cleanup
,
1712 .flush_txep
= ath6kl_htc_pipe_flush_txep
,
1713 .flush_rx_buf
= ath6kl_htc_pipe_flush_rx_buf
,
1714 .activity_changed
= ath6kl_htc_pipe_activity_changed
,
1715 .get_rxbuf_num
= ath6kl_htc_pipe_get_rxbuf_num
,
1716 .add_rxbuf_multiple
= ath6kl_htc_pipe_add_rxbuf_multiple
,
1717 .credit_setup
= ath6kl_htc_pipe_credit_setup
,
1718 .tx_complete
= ath6kl_htc_pipe_tx_complete
,
1719 .rx_complete
= ath6kl_htc_pipe_rx_complete
,
1722 void ath6kl_htc_pipe_attach(struct ath6kl
*ar
)
1724 ar
->htc_ops
= &ath6kl_htc_pipe_ops
;