2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 * tid - tid_mux0..tid_mux3
27 * aid - tid_mux4..tid_mux7
29 #define ATH6KL_TID_MASK 0xf
30 #define ATH6KL_AID_SHIFT 4
32 static inline u8
ath6kl_get_tid(u8 tid_mux
)
34 return tid_mux
& ATH6KL_TID_MASK
;
37 static inline u8
ath6kl_get_aid(u8 tid_mux
)
39 return tid_mux
>> ATH6KL_AID_SHIFT
;
42 static u8
ath6kl_ibss_map_epid(struct sk_buff
*skb
, struct net_device
*dev
,
45 struct ath6kl
*ar
= ath6kl_priv(dev
);
46 struct ethhdr
*eth_hdr
;
52 eth_hdr
= (struct ethhdr
*) (datap
+ sizeof(struct wmi_data_hdr
));
54 if (is_multicast_ether_addr(eth_hdr
->h_dest
))
57 for (i
= 0; i
< ar
->node_num
; i
++) {
58 if (memcmp(eth_hdr
->h_dest
, ar
->node_map
[i
].mac_addr
,
61 ar
->node_map
[i
].tx_pend
++;
62 return ar
->node_map
[i
].ep_id
;
65 if ((ep_map
== -1) && !ar
->node_map
[i
].tx_pend
)
70 ep_map
= ar
->node_num
;
72 if (ar
->node_num
> MAX_NODE_NUM
)
73 return ENDPOINT_UNUSED
;
76 memcpy(ar
->node_map
[ep_map
].mac_addr
, eth_hdr
->h_dest
, ETH_ALEN
);
78 for (i
= ENDPOINT_2
; i
<= ENDPOINT_5
; i
++) {
79 if (!ar
->tx_pending
[i
]) {
80 ar
->node_map
[ep_map
].ep_id
= i
;
85 * No free endpoint is available, start redistribution on
86 * the inuse endpoints.
88 if (i
== ENDPOINT_5
) {
89 ar
->node_map
[ep_map
].ep_id
= ar
->next_ep_id
;
91 if (ar
->next_ep_id
> ENDPOINT_5
)
92 ar
->next_ep_id
= ENDPOINT_2
;
97 ar
->node_map
[ep_map
].tx_pend
++;
99 return ar
->node_map
[ep_map
].ep_id
;
102 static bool ath6kl_process_uapsdq(struct ath6kl_sta
*conn
,
103 struct ath6kl_vif
*vif
,
107 struct ath6kl
*ar
= vif
->ar
;
108 bool is_apsdq_empty
= false;
109 struct ethhdr
*datap
= (struct ethhdr
*) skb
->data
;
110 u8 up
= 0, traffic_class
, *ip_hdr
;
112 struct ath6kl_llc_snap_hdr
*llc_hdr
;
114 if (conn
->sta_flags
& STA_PS_APSD_TRIGGER
) {
116 * This tx is because of a uAPSD trigger, determine
117 * more and EOSP bit. Set EOSP if queue is empty
118 * or sufficient frames are delivered for this trigger.
120 spin_lock_bh(&conn
->psq_lock
);
121 if (!skb_queue_empty(&conn
->apsdq
))
122 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
123 else if (conn
->sta_flags
& STA_PS_APSD_EOSP
)
124 *flags
|= WMI_DATA_HDR_FLAGS_EOSP
;
125 *flags
|= WMI_DATA_HDR_FLAGS_UAPSD
;
126 spin_unlock_bh(&conn
->psq_lock
);
128 } else if (!conn
->apsd_info
) {
132 if (test_bit(WMM_ENABLED
, &vif
->flags
)) {
133 ether_type
= be16_to_cpu(datap
->h_proto
);
134 if (is_ethertype(ether_type
)) {
135 /* packet is in DIX format */
136 ip_hdr
= (u8
*)(datap
+ 1);
138 /* packet is in 802.3 format */
139 llc_hdr
= (struct ath6kl_llc_snap_hdr
*)
141 ether_type
= be16_to_cpu(llc_hdr
->eth_type
);
142 ip_hdr
= (u8
*)(llc_hdr
+ 1);
145 if (ether_type
== IP_ETHERTYPE
)
146 up
= ath6kl_wmi_determine_user_priority(
150 traffic_class
= ath6kl_wmi_get_traffic_class(up
);
152 if ((conn
->apsd_info
& (1 << traffic_class
)) == 0)
155 /* Queue the frames if the STA is sleeping */
156 spin_lock_bh(&conn
->psq_lock
);
157 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
158 skb_queue_tail(&conn
->apsdq
, skb
);
159 spin_unlock_bh(&conn
->psq_lock
);
162 * If this is the first pkt getting queued
163 * for this STA, update the PVB for this STA
165 if (is_apsdq_empty
) {
166 ath6kl_wmi_set_apsd_bfrd_traf(ar
->wmi
,
170 *flags
|= WMI_DATA_HDR_FLAGS_UAPSD
;
175 static bool ath6kl_process_psq(struct ath6kl_sta
*conn
,
176 struct ath6kl_vif
*vif
,
180 bool is_psq_empty
= false;
181 struct ath6kl
*ar
= vif
->ar
;
183 if (conn
->sta_flags
& STA_PS_POLLED
) {
184 spin_lock_bh(&conn
->psq_lock
);
185 if (!skb_queue_empty(&conn
->psq
))
186 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
187 spin_unlock_bh(&conn
->psq_lock
);
191 /* Queue the frames if the STA is sleeping */
192 spin_lock_bh(&conn
->psq_lock
);
193 is_psq_empty
= skb_queue_empty(&conn
->psq
);
194 skb_queue_tail(&conn
->psq
, skb
);
195 spin_unlock_bh(&conn
->psq_lock
);
198 * If this is the first pkt getting queued
199 * for this STA, update the PVB for this
203 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
209 static bool ath6kl_powersave_ap(struct ath6kl_vif
*vif
, struct sk_buff
*skb
,
212 struct ethhdr
*datap
= (struct ethhdr
*) skb
->data
;
213 struct ath6kl_sta
*conn
= NULL
;
214 bool ps_queued
= false;
215 struct ath6kl
*ar
= vif
->ar
;
217 if (is_multicast_ether_addr(datap
->h_dest
)) {
219 bool q_mcast
= false;
221 for (ctr
= 0; ctr
< AP_MAX_NUM_STA
; ctr
++) {
222 if (ar
->sta_list
[ctr
].sta_flags
& STA_PS_SLEEP
) {
230 * If this transmit is not because of a Dtim Expiry
233 if (!test_bit(DTIM_EXPIRED
, &vif
->flags
)) {
234 bool is_mcastq_empty
= false;
236 spin_lock_bh(&ar
->mcastpsq_lock
);
238 skb_queue_empty(&ar
->mcastpsq
);
239 skb_queue_tail(&ar
->mcastpsq
, skb
);
240 spin_unlock_bh(&ar
->mcastpsq_lock
);
243 * If this is the first Mcast pkt getting
244 * queued indicate to the target to set the
245 * BitmapControl LSB of the TIM IE.
248 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
255 * This transmit is because of Dtim expiry.
256 * Determine if MoreData bit has to be set.
258 spin_lock_bh(&ar
->mcastpsq_lock
);
259 if (!skb_queue_empty(&ar
->mcastpsq
))
260 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
261 spin_unlock_bh(&ar
->mcastpsq_lock
);
265 conn
= ath6kl_find_sta(vif
, datap
->h_dest
);
269 /* Inform the caller that the skb is consumed */
273 if (conn
->sta_flags
& STA_PS_SLEEP
) {
274 ps_queued
= ath6kl_process_uapsdq(conn
,
276 if (!(*flags
& WMI_DATA_HDR_FLAGS_UAPSD
))
277 ps_queued
= ath6kl_process_psq(conn
,
286 int ath6kl_control_tx(void *devt
, struct sk_buff
*skb
,
287 enum htc_endpoint_id eid
)
289 struct ath6kl
*ar
= devt
;
291 struct ath6kl_cookie
*cookie
= NULL
;
293 trace_ath6kl_wmi_cmd(skb
->data
, skb
->len
);
295 if (WARN_ON_ONCE(ar
->state
== ATH6KL_STATE_WOW
)) {
300 if (WARN_ON_ONCE(eid
== ENDPOINT_UNUSED
||
301 eid
>= ENDPOINT_MAX
)) {
306 spin_lock_bh(&ar
->lock
);
308 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
309 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__
,
312 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
) && (eid
== ar
->ctrl_ep
)) {
314 * Control endpoint is full, don't allocate resources, we
315 * are just going to drop this packet.
318 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
321 cookie
= ath6kl_alloc_cookie(ar
);
324 if (cookie
== NULL
) {
325 spin_unlock_bh(&ar
->lock
);
330 ar
->tx_pending
[eid
]++;
332 if (eid
!= ar
->ctrl_ep
)
333 ar
->total_tx_data_pend
++;
335 spin_unlock_bh(&ar
->lock
);
339 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
340 eid
, ATH6KL_CONTROL_PKT_TAG
);
341 cookie
->htc_pkt
.skb
= skb
;
344 * This interface is asynchronous, if there is an error, cleanup
345 * will happen in the TX completion callback.
347 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
356 int ath6kl_data_tx(struct sk_buff
*skb
, struct net_device
*dev
)
358 struct ath6kl
*ar
= ath6kl_priv(dev
);
359 struct ath6kl_cookie
*cookie
= NULL
;
360 enum htc_endpoint_id eid
= ENDPOINT_UNUSED
;
361 struct ath6kl_vif
*vif
= netdev_priv(dev
);
363 u16 htc_tag
= ATH6KL_DATA_PKT_TAG
;
364 u8 ac
= 99; /* initialize to unmapped ac */
365 bool chk_adhoc_ps_mapping
= false;
367 struct wmi_tx_meta_v2 meta_v2
;
369 u8 csum_start
= 0, csum_dest
= 0, csum
= skb
->ip_summed
;
373 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
374 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__
,
375 skb
, skb
->data
, skb
->len
);
377 /* If target is not associated */
378 if (!test_bit(CONNECTED
, &vif
->flags
))
381 if (WARN_ON_ONCE(ar
->state
!= ATH6KL_STATE_ON
))
384 if (!test_bit(WMI_READY
, &ar
->flag
))
387 /* AP mode Power saving processing */
388 if (vif
->nw_type
== AP_NETWORK
) {
389 if (ath6kl_powersave_ap(vif
, skb
, &flags
))
393 if (test_bit(WMI_ENABLED
, &ar
->flag
)) {
394 if ((dev
->features
& NETIF_F_IP_CSUM
) &&
395 (csum
== CHECKSUM_PARTIAL
)) {
396 csum_start
= skb
->csum_start
-
397 (skb_network_header(skb
) - skb
->head
) +
398 sizeof(struct ath6kl_llc_snap_hdr
);
399 csum_dest
= skb
->csum_offset
+ csum_start
;
402 if (skb_headroom(skb
) < dev
->needed_headroom
) {
403 struct sk_buff
*tmp_skb
= skb
;
405 skb
= skb_realloc_headroom(skb
, dev
->needed_headroom
);
408 vif
->net_stats
.tx_dropped
++;
413 if (ath6kl_wmi_dix_2_dot3(ar
->wmi
, skb
)) {
414 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
418 if ((dev
->features
& NETIF_F_IP_CSUM
) &&
419 (csum
== CHECKSUM_PARTIAL
)) {
420 meta_v2
.csum_start
= csum_start
;
421 meta_v2
.csum_dest
= csum_dest
;
423 /* instruct target to calculate checksum */
424 meta_v2
.csum_flags
= WMI_META_V2_FLAG_CSUM_OFFLOAD
;
425 meta_ver
= WMI_META_VERSION_2
;
432 ret
= ath6kl_wmi_data_hdr_add(ar
->wmi
, skb
,
433 DATA_MSGTYPE
, flags
, 0,
435 meta
, vif
->fw_vif_idx
);
438 ath6kl_warn("failed to add wmi data header:%d\n"
443 if ((vif
->nw_type
== ADHOC_NETWORK
) &&
444 ar
->ibss_ps_enable
&& test_bit(CONNECTED
, &vif
->flags
))
445 chk_adhoc_ps_mapping
= true;
447 /* get the stream mapping */
448 ret
= ath6kl_wmi_implicit_create_pstream(ar
->wmi
,
449 vif
->fw_vif_idx
, skb
,
450 0, test_bit(WMM_ENABLED
, &vif
->flags
), &ac
);
458 spin_lock_bh(&ar
->lock
);
460 if (chk_adhoc_ps_mapping
)
461 eid
= ath6kl_ibss_map_epid(skb
, dev
, &map_no
);
463 eid
= ar
->ac2ep_map
[ac
];
465 if (eid
== 0 || eid
== ENDPOINT_UNUSED
) {
466 ath6kl_err("eid %d is not mapped!\n", eid
);
467 spin_unlock_bh(&ar
->lock
);
471 /* allocate resource for this packet */
472 cookie
= ath6kl_alloc_cookie(ar
);
475 spin_unlock_bh(&ar
->lock
);
479 /* update counts while the lock is held */
480 ar
->tx_pending
[eid
]++;
481 ar
->total_tx_data_pend
++;
483 spin_unlock_bh(&ar
->lock
);
485 if (!IS_ALIGNED((unsigned long) skb
->data
- HTC_HDR_LENGTH
, 4) &&
488 * We will touch (move the buffer data to align it. Since the
489 * skb buffer is cloned and not only the header is changed, we
490 * have to copy it to allow the changes. Since we are copying
491 * the data here, we may as well align it by reserving suitable
492 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
494 struct sk_buff
*nskb
;
496 nskb
= skb_copy_expand(skb
, HTC_HDR_LENGTH
, 0, GFP_ATOMIC
);
504 cookie
->map_no
= map_no
;
505 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
507 cookie
->htc_pkt
.skb
= skb
;
509 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "tx ",
510 skb
->data
, skb
->len
);
513 * HTC interface is asynchronous, if this fails, cleanup will
514 * happen in the ath6kl_tx_complete callback.
516 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
523 vif
->net_stats
.tx_dropped
++;
524 vif
->net_stats
.tx_aborted_errors
++;
529 /* indicate tx activity or inactivity on a WMI stream */
530 void ath6kl_indicate_tx_activity(void *devt
, u8 traffic_class
, bool active
)
532 struct ath6kl
*ar
= devt
;
533 enum htc_endpoint_id eid
;
536 eid
= ar
->ac2ep_map
[traffic_class
];
538 if (!test_bit(WMI_ENABLED
, &ar
->flag
))
541 spin_lock_bh(&ar
->lock
);
543 ar
->ac_stream_active
[traffic_class
] = active
;
547 * Keep track of the active stream with the highest
550 if (ar
->ac_stream_pri_map
[traffic_class
] >
551 ar
->hiac_stream_active_pri
)
552 /* set the new highest active priority */
553 ar
->hiac_stream_active_pri
=
554 ar
->ac_stream_pri_map
[traffic_class
];
558 * We may have to search for the next active stream
559 * that is the highest priority.
561 if (ar
->hiac_stream_active_pri
==
562 ar
->ac_stream_pri_map
[traffic_class
]) {
564 * The highest priority stream just went inactive
565 * reset and search for the "next" highest "active"
568 ar
->hiac_stream_active_pri
= 0;
570 for (i
= 0; i
< WMM_NUM_AC
; i
++) {
571 if (ar
->ac_stream_active
[i
] &&
572 (ar
->ac_stream_pri_map
[i
] >
573 ar
->hiac_stream_active_pri
))
575 * Set the new highest active
578 ar
->hiac_stream_active_pri
=
579 ar
->ac_stream_pri_map
[i
];
584 spin_unlock_bh(&ar
->lock
);
587 /* notify HTC, this may cause credit distribution changes */
588 ath6kl_htc_activity_changed(ar
->htc_target
, eid
, active
);
591 enum htc_send_full_action
ath6kl_tx_queue_full(struct htc_target
*target
,
592 struct htc_packet
*packet
)
594 struct ath6kl
*ar
= target
->dev
->ar
;
595 struct ath6kl_vif
*vif
;
596 enum htc_endpoint_id endpoint
= packet
->endpoint
;
597 enum htc_send_full_action action
= HTC_SEND_FULL_KEEP
;
599 if (endpoint
== ar
->ctrl_ep
) {
601 * Under normal WMI if this is getting full, then something
602 * is running rampant the host should not be exhausting the
603 * WMI queue with too many commands the only exception to
604 * this is during testing using endpointping.
606 set_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
607 ath6kl_err("wmi ctrl ep is full\n");
608 ath6kl_recovery_err_notify(ar
, ATH6KL_FW_EP_FULL
);
612 if (packet
->info
.tx
.tag
== ATH6KL_CONTROL_PKT_TAG
)
616 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
617 * the highest active stream.
619 if (ar
->ac_stream_pri_map
[ar
->ep2ac_map
[endpoint
]] <
620 ar
->hiac_stream_active_pri
&&
622 target
->endpoint
[endpoint
].tx_drop_packet_threshold
)
624 * Give preference to the highest priority stream by
625 * dropping the packets which overflowed.
627 action
= HTC_SEND_FULL_DROP
;
630 spin_lock_bh(&ar
->list_lock
);
631 list_for_each_entry(vif
, &ar
->vif_list
, list
) {
632 if (vif
->nw_type
== ADHOC_NETWORK
||
633 action
!= HTC_SEND_FULL_DROP
) {
634 spin_unlock_bh(&ar
->list_lock
);
636 set_bit(NETQ_STOPPED
, &vif
->flags
);
637 netif_stop_queue(vif
->ndev
);
642 spin_unlock_bh(&ar
->list_lock
);
647 /* TODO this needs to be looked at */
648 static void ath6kl_tx_clear_node_map(struct ath6kl_vif
*vif
,
649 enum htc_endpoint_id eid
, u32 map_no
)
651 struct ath6kl
*ar
= vif
->ar
;
654 if (vif
->nw_type
!= ADHOC_NETWORK
)
657 if (!ar
->ibss_ps_enable
)
660 if (eid
== ar
->ctrl_ep
)
667 ar
->node_map
[map_no
].tx_pend
--;
669 if (ar
->node_map
[map_no
].tx_pend
)
672 if (map_no
!= (ar
->node_num
- 1))
675 for (i
= ar
->node_num
; i
> 0; i
--) {
676 if (ar
->node_map
[i
- 1].tx_pend
)
679 memset(&ar
->node_map
[i
- 1], 0,
680 sizeof(struct ath6kl_node_mapping
));
685 void ath6kl_tx_complete(struct htc_target
*target
,
686 struct list_head
*packet_queue
)
688 struct ath6kl
*ar
= target
->dev
->ar
;
689 struct sk_buff_head skb_queue
;
690 struct htc_packet
*packet
;
692 struct ath6kl_cookie
*ath6kl_cookie
;
695 enum htc_endpoint_id eid
;
696 bool wake_event
= false;
697 bool flushing
[ATH6KL_VIF_MAX
] = {false};
699 struct ath6kl_vif
*vif
;
701 skb_queue_head_init(&skb_queue
);
703 /* lock the driver as we update internal state */
704 spin_lock_bh(&ar
->lock
);
706 /* reap completed packets */
707 while (!list_empty(packet_queue
)) {
708 packet
= list_first_entry(packet_queue
, struct htc_packet
,
710 list_del(&packet
->list
);
712 if (WARN_ON_ONCE(packet
->endpoint
== ENDPOINT_UNUSED
||
713 packet
->endpoint
>= ENDPOINT_MAX
))
716 ath6kl_cookie
= (struct ath6kl_cookie
*)packet
->pkt_cntxt
;
717 if (WARN_ON_ONCE(!ath6kl_cookie
))
720 status
= packet
->status
;
721 skb
= ath6kl_cookie
->skb
;
722 eid
= packet
->endpoint
;
723 map_no
= ath6kl_cookie
->map_no
;
725 if (WARN_ON_ONCE(!skb
|| !skb
->data
)) {
727 ath6kl_free_cookie(ar
, ath6kl_cookie
);
731 __skb_queue_tail(&skb_queue
, skb
);
733 if (WARN_ON_ONCE(!status
&& (packet
->act_len
!= skb
->len
))) {
734 ath6kl_free_cookie(ar
, ath6kl_cookie
);
738 ar
->tx_pending
[eid
]--;
740 if (eid
!= ar
->ctrl_ep
)
741 ar
->total_tx_data_pend
--;
743 if (eid
== ar
->ctrl_ep
) {
744 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
))
745 clear_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
747 if (ar
->tx_pending
[eid
] == 0)
751 if (eid
== ar
->ctrl_ep
) {
752 if_idx
= wmi_cmd_hdr_get_if_idx(
753 (struct wmi_cmd_hdr
*) packet
->buf
);
755 if_idx
= wmi_data_hdr_get_if_idx(
756 (struct wmi_data_hdr
*) packet
->buf
);
759 vif
= ath6kl_get_vif_by_index(ar
, if_idx
);
761 ath6kl_free_cookie(ar
, ath6kl_cookie
);
766 if (status
== -ECANCELED
)
767 /* a packet was flushed */
768 flushing
[if_idx
] = true;
770 vif
->net_stats
.tx_errors
++;
772 if (status
!= -ENOSPC
&& status
!= -ECANCELED
)
773 ath6kl_warn("tx complete error: %d\n", status
);
775 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
776 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
777 __func__
, skb
, packet
->buf
, packet
->act_len
,
780 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
781 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
782 __func__
, skb
, packet
->buf
, packet
->act_len
,
785 flushing
[if_idx
] = false;
786 vif
->net_stats
.tx_packets
++;
787 vif
->net_stats
.tx_bytes
+= skb
->len
;
790 ath6kl_tx_clear_node_map(vif
, eid
, map_no
);
792 ath6kl_free_cookie(ar
, ath6kl_cookie
);
794 if (test_bit(NETQ_STOPPED
, &vif
->flags
))
795 clear_bit(NETQ_STOPPED
, &vif
->flags
);
798 spin_unlock_bh(&ar
->lock
);
800 __skb_queue_purge(&skb_queue
);
803 spin_lock_bh(&ar
->list_lock
);
804 list_for_each_entry(vif
, &ar
->vif_list
, list
) {
805 if (test_bit(CONNECTED
, &vif
->flags
) &&
806 !flushing
[vif
->fw_vif_idx
]) {
807 spin_unlock_bh(&ar
->list_lock
);
808 netif_wake_queue(vif
->ndev
);
809 spin_lock_bh(&ar
->list_lock
);
812 spin_unlock_bh(&ar
->list_lock
);
815 wake_up(&ar
->event_wq
);
820 void ath6kl_tx_data_cleanup(struct ath6kl
*ar
)
824 /* flush all the data (non-control) streams */
825 for (i
= 0; i
< WMM_NUM_AC
; i
++)
826 ath6kl_htc_flush_txep(ar
->htc_target
, ar
->ac2ep_map
[i
],
827 ATH6KL_DATA_PKT_TAG
);
832 static void ath6kl_deliver_frames_to_nw_stack(struct net_device
*dev
,
840 if (!(skb
->dev
->flags
& IFF_UP
)) {
845 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
850 static void ath6kl_alloc_netbufs(struct sk_buff_head
*q
, u16 num
)
855 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
857 ath6kl_err("netbuf allocation failed\n");
860 skb_queue_tail(q
, skb
);
865 static struct sk_buff
*aggr_get_free_skb(struct aggr_info
*p_aggr
)
867 struct sk_buff
*skb
= NULL
;
869 if (skb_queue_len(&p_aggr
->rx_amsdu_freeq
) <
870 (AGGR_NUM_OF_FREE_NETBUFS
>> 2))
871 ath6kl_alloc_netbufs(&p_aggr
->rx_amsdu_freeq
,
872 AGGR_NUM_OF_FREE_NETBUFS
);
874 skb
= skb_dequeue(&p_aggr
->rx_amsdu_freeq
);
879 void ath6kl_rx_refill(struct htc_target
*target
, enum htc_endpoint_id endpoint
)
881 struct ath6kl
*ar
= target
->dev
->ar
;
885 struct htc_packet
*packet
;
886 struct list_head queue
;
888 n_buf_refill
= ATH6KL_MAX_RX_BUFFERS
-
889 ath6kl_htc_get_rxbuf_num(ar
->htc_target
, endpoint
);
891 if (n_buf_refill
<= 0)
894 INIT_LIST_HEAD(&queue
);
896 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
897 "%s: providing htc with %d buffers at eid=%d\n",
898 __func__
, n_buf_refill
, endpoint
);
900 for (rx_buf
= 0; rx_buf
< n_buf_refill
; rx_buf
++) {
901 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
905 packet
= (struct htc_packet
*) skb
->head
;
906 if (!IS_ALIGNED((unsigned long) skb
->data
, 4)) {
907 size_t len
= skb_headlen(skb
);
908 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
909 skb_set_tail_pointer(skb
, len
);
911 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
912 ATH6KL_BUFFER_SIZE
, endpoint
);
914 list_add_tail(&packet
->list
, &queue
);
917 if (!list_empty(&queue
))
918 ath6kl_htc_add_rxbuf_multiple(ar
->htc_target
, &queue
);
921 void ath6kl_refill_amsdu_rxbufs(struct ath6kl
*ar
, int count
)
923 struct htc_packet
*packet
;
927 skb
= ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE
);
931 packet
= (struct htc_packet
*) skb
->head
;
932 if (!IS_ALIGNED((unsigned long) skb
->data
, 4)) {
933 size_t len
= skb_headlen(skb
);
934 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
935 skb_set_tail_pointer(skb
, len
);
937 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
938 ATH6KL_AMSDU_BUFFER_SIZE
, 0);
941 spin_lock_bh(&ar
->lock
);
942 list_add_tail(&packet
->list
, &ar
->amsdu_rx_buffer_queue
);
943 spin_unlock_bh(&ar
->lock
);
949 * Callback to allocate a receive buffer for a pending packet. We use a
950 * pre-allocated list of buffers of maximum AMSDU size (4K).
952 struct htc_packet
*ath6kl_alloc_amsdu_rxbuf(struct htc_target
*target
,
953 enum htc_endpoint_id endpoint
,
956 struct ath6kl
*ar
= target
->dev
->ar
;
957 struct htc_packet
*packet
= NULL
;
958 struct list_head
*pkt_pos
;
959 int refill_cnt
= 0, depth
= 0;
961 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: eid=%d, len:%d\n",
962 __func__
, endpoint
, len
);
964 if ((len
<= ATH6KL_BUFFER_SIZE
) ||
965 (len
> ATH6KL_AMSDU_BUFFER_SIZE
))
968 spin_lock_bh(&ar
->lock
);
970 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
971 spin_unlock_bh(&ar
->lock
);
972 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
;
976 packet
= list_first_entry(&ar
->amsdu_rx_buffer_queue
,
977 struct htc_packet
, list
);
978 list_del(&packet
->list
);
979 list_for_each(pkt_pos
, &ar
->amsdu_rx_buffer_queue
)
982 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
- depth
;
983 spin_unlock_bh(&ar
->lock
);
985 /* set actual endpoint ID */
986 packet
->endpoint
= endpoint
;
989 if (refill_cnt
>= ATH6KL_AMSDU_REFILL_THRESHOLD
)
990 ath6kl_refill_amsdu_rxbufs(ar
, refill_cnt
);
995 static void aggr_slice_amsdu(struct aggr_info
*p_aggr
,
996 struct rxtid
*rxtid
, struct sk_buff
*skb
)
998 struct sk_buff
*new_skb
;
1000 u16 frame_8023_len
, payload_8023_len
, mac_hdr_len
, amsdu_len
;
1003 mac_hdr_len
= sizeof(struct ethhdr
);
1004 framep
= skb
->data
+ mac_hdr_len
;
1005 amsdu_len
= skb
->len
- mac_hdr_len
;
1007 while (amsdu_len
> mac_hdr_len
) {
1008 hdr
= (struct ethhdr
*) framep
;
1009 payload_8023_len
= ntohs(hdr
->h_proto
);
1011 if (payload_8023_len
< MIN_MSDU_SUBFRAME_PAYLOAD_LEN
||
1012 payload_8023_len
> MAX_MSDU_SUBFRAME_PAYLOAD_LEN
) {
1013 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
1018 frame_8023_len
= payload_8023_len
+ mac_hdr_len
;
1019 new_skb
= aggr_get_free_skb(p_aggr
);
1021 ath6kl_err("no buffer available\n");
1025 memcpy(new_skb
->data
, framep
, frame_8023_len
);
1026 skb_put(new_skb
, frame_8023_len
);
1027 if (ath6kl_wmi_dot3_2_dix(new_skb
)) {
1028 ath6kl_err("dot3_2_dix error\n");
1029 dev_kfree_skb(new_skb
);
1033 skb_queue_tail(&rxtid
->q
, new_skb
);
1035 /* Is this the last subframe within this aggregate ? */
1036 if ((amsdu_len
- frame_8023_len
) == 0)
1039 /* Add the length of A-MSDU subframe padding bytes -
1040 * Round to nearest word.
1042 frame_8023_len
= ALIGN(frame_8023_len
, 4);
1044 framep
+= frame_8023_len
;
1045 amsdu_len
-= frame_8023_len
;
1051 static void aggr_deque_frms(struct aggr_info_conn
*agg_conn
, u8 tid
,
1052 u16 seq_no
, u8 order
)
1054 struct sk_buff
*skb
;
1055 struct rxtid
*rxtid
;
1056 struct skb_hold_q
*node
;
1057 u16 idx
, idx_end
, seq_end
;
1058 struct rxtid_stats
*stats
;
1060 rxtid
= &agg_conn
->rx_tid
[tid
];
1061 stats
= &agg_conn
->stat
[tid
];
1063 spin_lock_bh(&rxtid
->lock
);
1064 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
1067 * idx_end is typically the last possible frame in the window,
1068 * but changes to 'the' seq_no, when BAR comes. If seq_no
1069 * is non-zero, we will go up to that and stop.
1070 * Note: last seq no in current window will occupy the same
1071 * index position as index that is just previous to start.
1072 * An imp point : if win_sz is 7, for seq_no space of 4095,
1073 * then, there would be holes when sequence wrap around occurs.
1074 * Target should judiciously choose the win_sz, based on
1075 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1076 * 2, 4, 8, 16 win_sz works fine).
1077 * We must deque from "idx" to "idx_end", including both.
1079 seq_end
= seq_no
? seq_no
: rxtid
->seq_next
;
1080 idx_end
= AGGR_WIN_IDX(seq_end
, rxtid
->hold_q_sz
);
1083 node
= &rxtid
->hold_q
[idx
];
1084 if ((order
== 1) && (!node
->skb
))
1089 aggr_slice_amsdu(agg_conn
->aggr_info
, rxtid
,
1092 skb_queue_tail(&rxtid
->q
, node
->skb
);
1098 rxtid
->seq_next
= ATH6KL_NEXT_SEQ_NO(rxtid
->seq_next
);
1099 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
1100 } while (idx
!= idx_end
);
1102 spin_unlock_bh(&rxtid
->lock
);
1104 stats
->num_delivered
+= skb_queue_len(&rxtid
->q
);
1106 while ((skb
= skb_dequeue(&rxtid
->q
)))
1107 ath6kl_deliver_frames_to_nw_stack(agg_conn
->dev
, skb
);
1110 static bool aggr_process_recv_frm(struct aggr_info_conn
*agg_conn
, u8 tid
,
1112 bool is_amsdu
, struct sk_buff
*frame
)
1114 struct rxtid
*rxtid
;
1115 struct rxtid_stats
*stats
;
1116 struct sk_buff
*skb
;
1117 struct skb_hold_q
*node
;
1118 u16 idx
, st
, cur
, end
;
1119 bool is_queued
= false;
1122 rxtid
= &agg_conn
->rx_tid
[tid
];
1123 stats
= &agg_conn
->stat
[tid
];
1125 stats
->num_into_aggr
++;
1129 aggr_slice_amsdu(agg_conn
->aggr_info
, rxtid
, frame
);
1132 while ((skb
= skb_dequeue(&rxtid
->q
)))
1133 ath6kl_deliver_frames_to_nw_stack(agg_conn
->dev
,
1139 /* Check the incoming sequence no, if it's in the window */
1140 st
= rxtid
->seq_next
;
1142 end
= (st
+ rxtid
->hold_q_sz
-1) & ATH6KL_MAX_SEQ_NO
;
1144 if (((st
< end
) && (cur
< st
|| cur
> end
)) ||
1145 ((st
> end
) && (cur
> end
) && (cur
< st
))) {
1146 extended_end
= (end
+ rxtid
->hold_q_sz
- 1) &
1149 if (((end
< extended_end
) &&
1150 (cur
< end
|| cur
> extended_end
)) ||
1151 ((end
> extended_end
) && (cur
> extended_end
) &&
1153 aggr_deque_frms(agg_conn
, tid
, 0, 0);
1154 spin_lock_bh(&rxtid
->lock
);
1155 if (cur
>= rxtid
->hold_q_sz
- 1)
1156 rxtid
->seq_next
= cur
- (rxtid
->hold_q_sz
- 1);
1158 rxtid
->seq_next
= ATH6KL_MAX_SEQ_NO
-
1159 (rxtid
->hold_q_sz
- 2 - cur
);
1160 spin_unlock_bh(&rxtid
->lock
);
1163 * Dequeue only those frames that are outside the
1164 * new shifted window.
1166 if (cur
>= rxtid
->hold_q_sz
- 1)
1167 st
= cur
- (rxtid
->hold_q_sz
- 1);
1169 st
= ATH6KL_MAX_SEQ_NO
-
1170 (rxtid
->hold_q_sz
- 2 - cur
);
1172 aggr_deque_frms(agg_conn
, tid
, st
, 0);
1178 idx
= AGGR_WIN_IDX(seq_no
, rxtid
->hold_q_sz
);
1180 node
= &rxtid
->hold_q
[idx
];
1182 spin_lock_bh(&rxtid
->lock
);
1185 * Is the cur frame duplicate or something beyond our window(hold_q
1186 * -> which is 2x, already)?
1188 * 1. Duplicate is easy - drop incoming frame.
1189 * 2. Not falling in current sliding window.
1190 * 2a. is the frame_seq_no preceding current tid_seq_no?
1191 * -> drop the frame. perhaps sender did not get our ACK.
1192 * this is taken care of above.
1193 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1194 * -> Taken care of it above, by moving window forward.
1196 dev_kfree_skb(node
->skb
);
1201 node
->is_amsdu
= is_amsdu
;
1202 node
->seq_no
= seq_no
;
1209 spin_unlock_bh(&rxtid
->lock
);
1211 aggr_deque_frms(agg_conn
, tid
, 0, 1);
1213 if (agg_conn
->timer_scheduled
)
1216 spin_lock_bh(&rxtid
->lock
);
1217 for (idx
= 0; idx
< rxtid
->hold_q_sz
; idx
++) {
1218 if (rxtid
->hold_q
[idx
].skb
) {
1220 * There is a frame in the queue and no
1221 * timer so start a timer to ensure that
1222 * the frame doesn't remain stuck
1225 agg_conn
->timer_scheduled
= true;
1226 mod_timer(&agg_conn
->timer
,
1227 (jiffies
+ (HZ
* AGGR_RX_TIMEOUT
) / 1000));
1228 rxtid
->timer_mon
= true;
1232 spin_unlock_bh(&rxtid
->lock
);
1237 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif
*vif
,
1238 struct ath6kl_sta
*conn
)
1240 struct ath6kl
*ar
= vif
->ar
;
1241 bool is_apsdq_empty
, is_apsdq_empty_at_start
;
1242 u32 num_frames_to_deliver
, flags
;
1243 struct sk_buff
*skb
= NULL
;
1246 * If the APSD q for this STA is not empty, dequeue and
1247 * send a pkt from the head of the q. Also update the
1248 * More data bit in the WMI_DATA_HDR if there are
1249 * more pkts for this STA in the APSD q.
1250 * If there are no more pkts for this STA,
1251 * update the APSD bitmap for this STA.
1254 num_frames_to_deliver
= (conn
->apsd_info
>> ATH6KL_APSD_NUM_OF_AC
) &
1255 ATH6KL_APSD_FRAME_MASK
;
1257 * Number of frames to send in a service period is
1258 * indicated by the station
1259 * in the QOS_INFO of the association request
1260 * If it is zero, send all frames
1262 if (!num_frames_to_deliver
)
1263 num_frames_to_deliver
= ATH6KL_APSD_ALL_FRAME
;
1265 spin_lock_bh(&conn
->psq_lock
);
1266 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1267 spin_unlock_bh(&conn
->psq_lock
);
1268 is_apsdq_empty_at_start
= is_apsdq_empty
;
1270 while ((!is_apsdq_empty
) && (num_frames_to_deliver
)) {
1271 spin_lock_bh(&conn
->psq_lock
);
1272 skb
= skb_dequeue(&conn
->apsdq
);
1273 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1274 spin_unlock_bh(&conn
->psq_lock
);
1277 * Set the STA flag to Trigger delivery,
1278 * so that the frame will go out
1280 conn
->sta_flags
|= STA_PS_APSD_TRIGGER
;
1281 num_frames_to_deliver
--;
1283 /* Last frame in the service period, set EOSP or queue empty */
1284 if ((is_apsdq_empty
) || (!num_frames_to_deliver
))
1285 conn
->sta_flags
|= STA_PS_APSD_EOSP
;
1287 ath6kl_data_tx(skb
, vif
->ndev
);
1288 conn
->sta_flags
&= ~(STA_PS_APSD_TRIGGER
);
1289 conn
->sta_flags
&= ~(STA_PS_APSD_EOSP
);
1292 if (is_apsdq_empty
) {
1293 if (is_apsdq_empty_at_start
)
1294 flags
= WMI_AP_APSD_NO_DELIVERY_FRAMES
;
1298 ath6kl_wmi_set_apsd_bfrd_traf(ar
->wmi
,
1300 conn
->aid
, 0, flags
);
1306 void ath6kl_rx(struct htc_target
*target
, struct htc_packet
*packet
)
1308 struct ath6kl
*ar
= target
->dev
->ar
;
1309 struct sk_buff
*skb
= packet
->pkt_cntxt
;
1310 struct wmi_rx_meta_v2
*meta
;
1311 struct wmi_data_hdr
*dhdr
;
1313 u8 meta_type
, dot11_hdr
= 0;
1314 u8 pad_before_data_start
;
1315 int status
= packet
->status
;
1316 enum htc_endpoint_id ept
= packet
->endpoint
;
1317 bool is_amsdu
, prev_ps
, ps_state
= false;
1318 bool trig_state
= false;
1319 struct ath6kl_sta
*conn
= NULL
;
1320 struct sk_buff
*skb1
= NULL
;
1321 struct ethhdr
*datap
= NULL
;
1322 struct ath6kl_vif
*vif
;
1323 struct aggr_info_conn
*aggr_conn
;
1327 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
1328 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1329 __func__
, ar
, ept
, skb
, packet
->buf
,
1330 packet
->act_len
, status
);
1332 if (status
|| packet
->act_len
< HTC_HDR_LENGTH
) {
1337 skb_put(skb
, packet
->act_len
+ HTC_HDR_LENGTH
);
1338 skb_pull(skb
, HTC_HDR_LENGTH
);
1340 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "rx ",
1341 skb
->data
, skb
->len
);
1343 if (ept
== ar
->ctrl_ep
) {
1344 if (test_bit(WMI_ENABLED
, &ar
->flag
)) {
1345 ath6kl_check_wow_status(ar
);
1346 ath6kl_wmi_control_rx(ar
->wmi
, skb
);
1350 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr
*) skb
->data
);
1353 wmi_data_hdr_get_if_idx((struct wmi_data_hdr
*) skb
->data
);
1356 vif
= ath6kl_get_vif_by_index(ar
, if_idx
);
1363 * Take lock to protect buffer counts and adaptive power throughput
1366 spin_lock_bh(&vif
->if_lock
);
1368 vif
->net_stats
.rx_packets
++;
1369 vif
->net_stats
.rx_bytes
+= packet
->act_len
;
1371 spin_unlock_bh(&vif
->if_lock
);
1373 skb
->dev
= vif
->ndev
;
1375 if (!test_bit(WMI_ENABLED
, &ar
->flag
)) {
1376 if (EPPING_ALIGNMENT_PAD
> 0)
1377 skb_pull(skb
, EPPING_ALIGNMENT_PAD
);
1378 ath6kl_deliver_frames_to_nw_stack(vif
->ndev
, skb
);
1382 ath6kl_check_wow_status(ar
);
1384 min_hdr_len
= sizeof(struct ethhdr
) + sizeof(struct wmi_data_hdr
) +
1385 sizeof(struct ath6kl_llc_snap_hdr
);
1387 dhdr
= (struct wmi_data_hdr
*) skb
->data
;
1390 * In the case of AP mode we may receive NULL data frames
1391 * that do not have LLC hdr. They are 16 bytes in size.
1392 * Allow these frames in the AP mode.
1394 if (vif
->nw_type
!= AP_NETWORK
&&
1395 ((packet
->act_len
< min_hdr_len
) ||
1396 (packet
->act_len
> WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
))) {
1397 ath6kl_info("frame len is too short or too long\n");
1398 vif
->net_stats
.rx_errors
++;
1399 vif
->net_stats
.rx_length_errors
++;
1404 /* Get the Power save state of the STA */
1405 if (vif
->nw_type
== AP_NETWORK
) {
1406 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1408 ps_state
= !!((dhdr
->info
>> WMI_DATA_HDR_PS_SHIFT
) &
1409 WMI_DATA_HDR_PS_MASK
);
1411 offset
= sizeof(struct wmi_data_hdr
);
1412 trig_state
= !!(le16_to_cpu(dhdr
->info3
) & WMI_DATA_HDR_TRIG
);
1414 switch (meta_type
) {
1417 case WMI_META_VERSION_1
:
1418 offset
+= sizeof(struct wmi_rx_meta_v1
);
1420 case WMI_META_VERSION_2
:
1421 offset
+= sizeof(struct wmi_rx_meta_v2
);
1427 datap
= (struct ethhdr
*) (skb
->data
+ offset
);
1428 conn
= ath6kl_find_sta(vif
, datap
->h_source
);
1436 * If there is a change in PS state of the STA,
1437 * take appropriate steps:
1439 * 1. If Sleep-->Awake, flush the psq for the STA
1440 * Clear the PVB for the STA.
1441 * 2. If Awake-->Sleep, Starting queueing frames
1444 prev_ps
= !!(conn
->sta_flags
& STA_PS_SLEEP
);
1447 conn
->sta_flags
|= STA_PS_SLEEP
;
1449 conn
->sta_flags
&= ~STA_PS_SLEEP
;
1451 /* Accept trigger only when the station is in sleep */
1452 if ((conn
->sta_flags
& STA_PS_SLEEP
) && trig_state
)
1453 ath6kl_uapsd_trigger_frame_rx(vif
, conn
);
1455 if (prev_ps
^ !!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1456 if (!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1457 struct sk_buff
*skbuff
= NULL
;
1458 bool is_apsdq_empty
;
1459 struct ath6kl_mgmt_buff
*mgmt
;
1462 spin_lock_bh(&conn
->psq_lock
);
1463 while (conn
->mgmt_psq_len
> 0) {
1464 mgmt
= list_first_entry(
1466 struct ath6kl_mgmt_buff
,
1468 list_del(&mgmt
->list
);
1469 conn
->mgmt_psq_len
--;
1470 spin_unlock_bh(&conn
->psq_lock
);
1471 idx
= vif
->fw_vif_idx
;
1473 ath6kl_wmi_send_mgmt_cmd(ar
->wmi
,
1483 spin_lock_bh(&conn
->psq_lock
);
1485 conn
->mgmt_psq_len
= 0;
1486 while ((skbuff
= skb_dequeue(&conn
->psq
))) {
1487 spin_unlock_bh(&conn
->psq_lock
);
1488 ath6kl_data_tx(skbuff
, vif
->ndev
);
1489 spin_lock_bh(&conn
->psq_lock
);
1492 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1493 while ((skbuff
= skb_dequeue(&conn
->apsdq
))) {
1494 spin_unlock_bh(&conn
->psq_lock
);
1495 ath6kl_data_tx(skbuff
, vif
->ndev
);
1496 spin_lock_bh(&conn
->psq_lock
);
1498 spin_unlock_bh(&conn
->psq_lock
);
1500 if (!is_apsdq_empty
)
1501 ath6kl_wmi_set_apsd_bfrd_traf(
1506 /* Clear the PVB for this STA */
1507 ath6kl_wmi_set_pvb_cmd(ar
->wmi
, vif
->fw_vif_idx
,
1512 /* drop NULL data frames here */
1513 if ((packet
->act_len
< min_hdr_len
) ||
1515 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
)) {
1521 is_amsdu
= wmi_data_hdr_is_amsdu(dhdr
) ? true : false;
1522 tid
= wmi_data_hdr_get_up(dhdr
);
1523 seq_no
= wmi_data_hdr_get_seqno(dhdr
);
1524 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1525 dot11_hdr
= wmi_data_hdr_get_dot11(dhdr
);
1526 pad_before_data_start
=
1527 (le16_to_cpu(dhdr
->info3
) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT
)
1528 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK
;
1530 skb_pull(skb
, sizeof(struct wmi_data_hdr
));
1532 switch (meta_type
) {
1533 case WMI_META_VERSION_1
:
1534 skb_pull(skb
, sizeof(struct wmi_rx_meta_v1
));
1536 case WMI_META_VERSION_2
:
1537 meta
= (struct wmi_rx_meta_v2
*) skb
->data
;
1538 if (meta
->csum_flags
& 0x1) {
1539 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1540 skb
->csum
= (__force __wsum
) meta
->csum
;
1542 skb_pull(skb
, sizeof(struct wmi_rx_meta_v2
));
1548 skb_pull(skb
, pad_before_data_start
);
1551 status
= ath6kl_wmi_dot11_hdr_remove(ar
->wmi
, skb
);
1553 status
= ath6kl_wmi_dot3_2_dix(skb
);
1557 * Drop frames that could not be processed (lack of
1564 if (!(vif
->ndev
->flags
& IFF_UP
)) {
1569 if (vif
->nw_type
== AP_NETWORK
) {
1570 datap
= (struct ethhdr
*) skb
->data
;
1571 if (is_multicast_ether_addr(datap
->h_dest
))
1573 * Bcast/Mcast frames should be sent to the
1574 * OS stack as well as on the air.
1576 skb1
= skb_copy(skb
, GFP_ATOMIC
);
1579 * Search for a connected STA with dstMac
1580 * as the Mac address. If found send the
1581 * frame to it on the air else send the
1582 * frame up the stack.
1584 conn
= ath6kl_find_sta(vif
, datap
->h_dest
);
1586 if (conn
&& ar
->intra_bss
) {
1589 } else if (conn
&& !ar
->intra_bss
) {
1595 ath6kl_data_tx(skb1
, vif
->ndev
);
1598 /* nothing to deliver up the stack */
1603 datap
= (struct ethhdr
*) skb
->data
;
1605 if (is_unicast_ether_addr(datap
->h_dest
)) {
1606 if (vif
->nw_type
== AP_NETWORK
) {
1607 conn
= ath6kl_find_sta(vif
, datap
->h_source
);
1610 aggr_conn
= conn
->aggr_conn
;
1612 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1615 if (aggr_process_recv_frm(aggr_conn
, tid
, seq_no
,
1617 /* aggregation code will handle the skb */
1620 } else if (!is_broadcast_ether_addr(datap
->h_dest
)) {
1621 vif
->net_stats
.multicast
++;
1624 ath6kl_deliver_frames_to_nw_stack(vif
->ndev
, skb
);
1627 static void aggr_timeout(unsigned long arg
)
1630 struct aggr_info_conn
*aggr_conn
= (struct aggr_info_conn
*) arg
;
1631 struct rxtid
*rxtid
;
1632 struct rxtid_stats
*stats
;
1634 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1635 rxtid
= &aggr_conn
->rx_tid
[i
];
1636 stats
= &aggr_conn
->stat
[i
];
1638 if (!rxtid
->aggr
|| !rxtid
->timer_mon
)
1641 stats
->num_timeouts
++;
1642 ath6kl_dbg(ATH6KL_DBG_AGGR
,
1643 "aggr timeout (st %d end %d)\n",
1645 ((rxtid
->seq_next
+ rxtid
->hold_q_sz
-1) &
1646 ATH6KL_MAX_SEQ_NO
));
1647 aggr_deque_frms(aggr_conn
, i
, 0, 0);
1650 aggr_conn
->timer_scheduled
= false;
1652 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1653 rxtid
= &aggr_conn
->rx_tid
[i
];
1655 if (rxtid
->aggr
&& rxtid
->hold_q
) {
1656 spin_lock_bh(&rxtid
->lock
);
1657 for (j
= 0; j
< rxtid
->hold_q_sz
; j
++) {
1658 if (rxtid
->hold_q
[j
].skb
) {
1659 aggr_conn
->timer_scheduled
= true;
1660 rxtid
->timer_mon
= true;
1664 spin_unlock_bh(&rxtid
->lock
);
1666 if (j
>= rxtid
->hold_q_sz
)
1667 rxtid
->timer_mon
= false;
1671 if (aggr_conn
->timer_scheduled
)
1672 mod_timer(&aggr_conn
->timer
,
1673 jiffies
+ msecs_to_jiffies(AGGR_RX_TIMEOUT
));
1676 static void aggr_delete_tid_state(struct aggr_info_conn
*aggr_conn
, u8 tid
)
1678 struct rxtid
*rxtid
;
1679 struct rxtid_stats
*stats
;
1681 if (!aggr_conn
|| tid
>= NUM_OF_TIDS
)
1684 rxtid
= &aggr_conn
->rx_tid
[tid
];
1685 stats
= &aggr_conn
->stat
[tid
];
1688 aggr_deque_frms(aggr_conn
, tid
, 0, 0);
1690 rxtid
->aggr
= false;
1691 rxtid
->timer_mon
= false;
1693 rxtid
->seq_next
= 0;
1694 rxtid
->hold_q_sz
= 0;
1696 kfree(rxtid
->hold_q
);
1697 rxtid
->hold_q
= NULL
;
1699 memset(stats
, 0, sizeof(struct rxtid_stats
));
1702 void aggr_recv_addba_req_evt(struct ath6kl_vif
*vif
, u8 tid_mux
, u16 seq_no
,
1705 struct ath6kl_sta
*sta
;
1706 struct aggr_info_conn
*aggr_conn
= NULL
;
1707 struct rxtid
*rxtid
;
1708 struct rxtid_stats
*stats
;
1712 if (vif
->nw_type
== AP_NETWORK
) {
1713 aid
= ath6kl_get_aid(tid_mux
);
1714 sta
= ath6kl_find_sta_by_aid(vif
->ar
, aid
);
1716 aggr_conn
= sta
->aggr_conn
;
1718 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1724 tid
= ath6kl_get_tid(tid_mux
);
1725 if (tid
>= NUM_OF_TIDS
)
1728 rxtid
= &aggr_conn
->rx_tid
[tid
];
1729 stats
= &aggr_conn
->stat
[tid
];
1731 if (win_sz
< AGGR_WIN_SZ_MIN
|| win_sz
> AGGR_WIN_SZ_MAX
)
1732 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: win_sz %d, tid %d\n",
1733 __func__
, win_sz
, tid
);
1736 aggr_delete_tid_state(aggr_conn
, tid
);
1738 rxtid
->seq_next
= seq_no
;
1739 hold_q_size
= TID_WINDOW_SZ(win_sz
) * sizeof(struct skb_hold_q
);
1740 rxtid
->hold_q
= kzalloc(hold_q_size
, GFP_KERNEL
);
1744 rxtid
->win_sz
= win_sz
;
1745 rxtid
->hold_q_sz
= TID_WINDOW_SZ(win_sz
);
1746 if (!skb_queue_empty(&rxtid
->q
))
1752 void aggr_conn_init(struct ath6kl_vif
*vif
, struct aggr_info
*aggr_info
,
1753 struct aggr_info_conn
*aggr_conn
)
1755 struct rxtid
*rxtid
;
1758 aggr_conn
->aggr_sz
= AGGR_SZ_DEFAULT
;
1759 aggr_conn
->dev
= vif
->ndev
;
1760 init_timer(&aggr_conn
->timer
);
1761 aggr_conn
->timer
.function
= aggr_timeout
;
1762 aggr_conn
->timer
.data
= (unsigned long) aggr_conn
;
1763 aggr_conn
->aggr_info
= aggr_info
;
1765 aggr_conn
->timer_scheduled
= false;
1767 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1768 rxtid
= &aggr_conn
->rx_tid
[i
];
1769 rxtid
->aggr
= false;
1770 rxtid
->timer_mon
= false;
1771 skb_queue_head_init(&rxtid
->q
);
1772 spin_lock_init(&rxtid
->lock
);
1776 struct aggr_info
*aggr_init(struct ath6kl_vif
*vif
)
1778 struct aggr_info
*p_aggr
= NULL
;
1780 p_aggr
= kzalloc(sizeof(struct aggr_info
), GFP_KERNEL
);
1782 ath6kl_err("failed to alloc memory for aggr_node\n");
1786 p_aggr
->aggr_conn
= kzalloc(sizeof(struct aggr_info_conn
), GFP_KERNEL
);
1787 if (!p_aggr
->aggr_conn
) {
1788 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1793 aggr_conn_init(vif
, p_aggr
, p_aggr
->aggr_conn
);
1795 skb_queue_head_init(&p_aggr
->rx_amsdu_freeq
);
1796 ath6kl_alloc_netbufs(&p_aggr
->rx_amsdu_freeq
, AGGR_NUM_OF_FREE_NETBUFS
);
1801 void aggr_recv_delba_req_evt(struct ath6kl_vif
*vif
, u8 tid_mux
)
1803 struct ath6kl_sta
*sta
;
1804 struct rxtid
*rxtid
;
1805 struct aggr_info_conn
*aggr_conn
= NULL
;
1808 if (vif
->nw_type
== AP_NETWORK
) {
1809 aid
= ath6kl_get_aid(tid_mux
);
1810 sta
= ath6kl_find_sta_by_aid(vif
->ar
, aid
);
1812 aggr_conn
= sta
->aggr_conn
;
1814 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1820 tid
= ath6kl_get_tid(tid_mux
);
1821 if (tid
>= NUM_OF_TIDS
)
1824 rxtid
= &aggr_conn
->rx_tid
[tid
];
1827 aggr_delete_tid_state(aggr_conn
, tid
);
1830 void aggr_reset_state(struct aggr_info_conn
*aggr_conn
)
1837 if (aggr_conn
->timer_scheduled
) {
1838 del_timer(&aggr_conn
->timer
);
1839 aggr_conn
->timer_scheduled
= false;
1842 for (tid
= 0; tid
< NUM_OF_TIDS
; tid
++)
1843 aggr_delete_tid_state(aggr_conn
, tid
);
1846 /* clean up our amsdu buffer list */
1847 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl
*ar
)
1849 struct htc_packet
*packet
, *tmp_pkt
;
1851 spin_lock_bh(&ar
->lock
);
1852 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
1853 spin_unlock_bh(&ar
->lock
);
1857 list_for_each_entry_safe(packet
, tmp_pkt
, &ar
->amsdu_rx_buffer_queue
,
1859 list_del(&packet
->list
);
1860 spin_unlock_bh(&ar
->lock
);
1861 dev_kfree_skb(packet
->pkt_cntxt
);
1862 spin_lock_bh(&ar
->lock
);
1865 spin_unlock_bh(&ar
->lock
);
1868 void aggr_module_destroy(struct aggr_info
*aggr_info
)
1873 aggr_reset_state(aggr_info
->aggr_conn
);
1874 skb_queue_purge(&aggr_info
->rx_amsdu_freeq
);
1875 kfree(aggr_info
->aggr_conn
);