2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 * tid - tid_mux0..tid_mux3
27 * aid - tid_mux4..tid_mux7
29 #define ATH6KL_TID_MASK 0xf
30 #define ATH6KL_AID_SHIFT 4
32 static inline u8
ath6kl_get_tid(u8 tid_mux
)
34 return tid_mux
& ATH6KL_TID_MASK
;
37 static inline u8
ath6kl_get_aid(u8 tid_mux
)
39 return tid_mux
>> ATH6KL_AID_SHIFT
;
42 static u8
ath6kl_ibss_map_epid(struct sk_buff
*skb
, struct net_device
*dev
,
45 struct ath6kl
*ar
= ath6kl_priv(dev
);
46 struct ethhdr
*eth_hdr
;
52 eth_hdr
= (struct ethhdr
*) (datap
+ sizeof(struct wmi_data_hdr
));
54 if (is_multicast_ether_addr(eth_hdr
->h_dest
))
57 for (i
= 0; i
< ar
->node_num
; i
++) {
58 if (memcmp(eth_hdr
->h_dest
, ar
->node_map
[i
].mac_addr
,
61 ar
->node_map
[i
].tx_pend
++;
62 return ar
->node_map
[i
].ep_id
;
65 if ((ep_map
== -1) && !ar
->node_map
[i
].tx_pend
)
70 ep_map
= ar
->node_num
;
72 if (ar
->node_num
> MAX_NODE_NUM
)
73 return ENDPOINT_UNUSED
;
76 memcpy(ar
->node_map
[ep_map
].mac_addr
, eth_hdr
->h_dest
, ETH_ALEN
);
78 for (i
= ENDPOINT_2
; i
<= ENDPOINT_5
; i
++) {
79 if (!ar
->tx_pending
[i
]) {
80 ar
->node_map
[ep_map
].ep_id
= i
;
85 * No free endpoint is available, start redistribution on
86 * the inuse endpoints.
88 if (i
== ENDPOINT_5
) {
89 ar
->node_map
[ep_map
].ep_id
= ar
->next_ep_id
;
91 if (ar
->next_ep_id
> ENDPOINT_5
)
92 ar
->next_ep_id
= ENDPOINT_2
;
97 ar
->node_map
[ep_map
].tx_pend
++;
99 return ar
->node_map
[ep_map
].ep_id
;
102 static bool ath6kl_process_uapsdq(struct ath6kl_sta
*conn
,
103 struct ath6kl_vif
*vif
,
107 struct ath6kl
*ar
= vif
->ar
;
108 bool is_apsdq_empty
= false;
109 struct ethhdr
*datap
= (struct ethhdr
*) skb
->data
;
110 u8 up
= 0, traffic_class
, *ip_hdr
;
112 struct ath6kl_llc_snap_hdr
*llc_hdr
;
114 if (conn
->sta_flags
& STA_PS_APSD_TRIGGER
) {
116 * This tx is because of a uAPSD trigger, determine
117 * more and EOSP bit. Set EOSP if queue is empty
118 * or sufficient frames are delivered for this trigger.
120 spin_lock_bh(&conn
->psq_lock
);
121 if (!skb_queue_empty(&conn
->apsdq
))
122 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
123 else if (conn
->sta_flags
& STA_PS_APSD_EOSP
)
124 *flags
|= WMI_DATA_HDR_FLAGS_EOSP
;
125 *flags
|= WMI_DATA_HDR_FLAGS_UAPSD
;
126 spin_unlock_bh(&conn
->psq_lock
);
128 } else if (!conn
->apsd_info
) {
132 if (test_bit(WMM_ENABLED
, &vif
->flags
)) {
133 ether_type
= be16_to_cpu(datap
->h_proto
);
134 if (is_ethertype(ether_type
)) {
135 /* packet is in DIX format */
136 ip_hdr
= (u8
*)(datap
+ 1);
138 /* packet is in 802.3 format */
139 llc_hdr
= (struct ath6kl_llc_snap_hdr
*)
141 ether_type
= be16_to_cpu(llc_hdr
->eth_type
);
142 ip_hdr
= (u8
*)(llc_hdr
+ 1);
145 if (ether_type
== IP_ETHERTYPE
)
146 up
= ath6kl_wmi_determine_user_priority(
150 traffic_class
= ath6kl_wmi_get_traffic_class(up
);
152 if ((conn
->apsd_info
& (1 << traffic_class
)) == 0)
155 /* Queue the frames if the STA is sleeping */
156 spin_lock_bh(&conn
->psq_lock
);
157 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
158 skb_queue_tail(&conn
->apsdq
, skb
);
159 spin_unlock_bh(&conn
->psq_lock
);
162 * If this is the first pkt getting queued
163 * for this STA, update the PVB for this STA
165 if (is_apsdq_empty
) {
166 ath6kl_wmi_set_apsd_bfrd_traf(ar
->wmi
,
170 *flags
|= WMI_DATA_HDR_FLAGS_UAPSD
;
175 static bool ath6kl_process_psq(struct ath6kl_sta
*conn
,
176 struct ath6kl_vif
*vif
,
180 bool is_psq_empty
= false;
181 struct ath6kl
*ar
= vif
->ar
;
183 if (conn
->sta_flags
& STA_PS_POLLED
) {
184 spin_lock_bh(&conn
->psq_lock
);
185 if (!skb_queue_empty(&conn
->psq
))
186 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
187 spin_unlock_bh(&conn
->psq_lock
);
191 /* Queue the frames if the STA is sleeping */
192 spin_lock_bh(&conn
->psq_lock
);
193 is_psq_empty
= skb_queue_empty(&conn
->psq
);
194 skb_queue_tail(&conn
->psq
, skb
);
195 spin_unlock_bh(&conn
->psq_lock
);
198 * If this is the first pkt getting queued
199 * for this STA, update the PVB for this
203 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
209 static bool ath6kl_powersave_ap(struct ath6kl_vif
*vif
, struct sk_buff
*skb
,
212 struct ethhdr
*datap
= (struct ethhdr
*) skb
->data
;
213 struct ath6kl_sta
*conn
= NULL
;
214 bool ps_queued
= false;
215 struct ath6kl
*ar
= vif
->ar
;
217 if (is_multicast_ether_addr(datap
->h_dest
)) {
219 bool q_mcast
= false;
221 for (ctr
= 0; ctr
< AP_MAX_NUM_STA
; ctr
++) {
222 if (ar
->sta_list
[ctr
].sta_flags
& STA_PS_SLEEP
) {
230 * If this transmit is not because of a Dtim Expiry
233 if (!test_bit(DTIM_EXPIRED
, &vif
->flags
)) {
234 bool is_mcastq_empty
= false;
236 spin_lock_bh(&ar
->mcastpsq_lock
);
238 skb_queue_empty(&ar
->mcastpsq
);
239 skb_queue_tail(&ar
->mcastpsq
, skb
);
240 spin_unlock_bh(&ar
->mcastpsq_lock
);
243 * If this is the first Mcast pkt getting
244 * queued indicate to the target to set the
245 * BitmapControl LSB of the TIM IE.
248 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
255 * This transmit is because of Dtim expiry.
256 * Determine if MoreData bit has to be set.
258 spin_lock_bh(&ar
->mcastpsq_lock
);
259 if (!skb_queue_empty(&ar
->mcastpsq
))
260 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
261 spin_unlock_bh(&ar
->mcastpsq_lock
);
265 conn
= ath6kl_find_sta(vif
, datap
->h_dest
);
269 /* Inform the caller that the skb is consumed */
273 if (conn
->sta_flags
& STA_PS_SLEEP
) {
274 ps_queued
= ath6kl_process_uapsdq(conn
,
276 if (!(*flags
& WMI_DATA_HDR_FLAGS_UAPSD
))
277 ps_queued
= ath6kl_process_psq(conn
,
286 int ath6kl_control_tx(void *devt
, struct sk_buff
*skb
,
287 enum htc_endpoint_id eid
)
289 struct ath6kl
*ar
= devt
;
291 struct ath6kl_cookie
*cookie
= NULL
;
293 trace_ath6kl_wmi_cmd(skb
->data
, skb
->len
);
295 if (WARN_ON_ONCE(ar
->state
== ATH6KL_STATE_WOW
)) {
300 if (WARN_ON_ONCE(eid
== ENDPOINT_UNUSED
||
301 eid
>= ENDPOINT_MAX
)) {
306 spin_lock_bh(&ar
->lock
);
308 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
309 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__
,
312 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
) && (eid
== ar
->ctrl_ep
)) {
314 * Control endpoint is full, don't allocate resources, we
315 * are just going to drop this packet.
318 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
321 cookie
= ath6kl_alloc_cookie(ar
);
324 if (cookie
== NULL
) {
325 spin_unlock_bh(&ar
->lock
);
330 ar
->tx_pending
[eid
]++;
332 if (eid
!= ar
->ctrl_ep
)
333 ar
->total_tx_data_pend
++;
335 spin_unlock_bh(&ar
->lock
);
339 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
340 eid
, ATH6KL_CONTROL_PKT_TAG
);
341 cookie
->htc_pkt
.skb
= skb
;
344 * This interface is asynchronous, if there is an error, cleanup
345 * will happen in the TX completion callback.
347 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
356 netdev_tx_t
ath6kl_data_tx(struct sk_buff
*skb
, struct net_device
*dev
)
358 struct ath6kl
*ar
= ath6kl_priv(dev
);
359 struct ath6kl_cookie
*cookie
= NULL
;
360 enum htc_endpoint_id eid
= ENDPOINT_UNUSED
;
361 struct ath6kl_vif
*vif
= netdev_priv(dev
);
363 u16 htc_tag
= ATH6KL_DATA_PKT_TAG
;
364 u8 ac
= 99; /* initialize to unmapped ac */
365 bool chk_adhoc_ps_mapping
= false;
367 struct wmi_tx_meta_v2 meta_v2
;
369 u8 csum_start
= 0, csum_dest
= 0, csum
= skb
->ip_summed
;
373 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
374 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__
,
375 skb
, skb
->data
, skb
->len
);
377 /* If target is not associated */
378 if (!test_bit(CONNECTED
, &vif
->flags
))
381 if (WARN_ON_ONCE(ar
->state
!= ATH6KL_STATE_ON
))
384 if (!test_bit(WMI_READY
, &ar
->flag
))
387 /* AP mode Power saving processing */
388 if (vif
->nw_type
== AP_NETWORK
) {
389 if (ath6kl_powersave_ap(vif
, skb
, &flags
))
393 if (test_bit(WMI_ENABLED
, &ar
->flag
)) {
394 if ((dev
->features
& NETIF_F_IP_CSUM
) &&
395 (csum
== CHECKSUM_PARTIAL
)) {
396 csum_start
= skb
->csum_start
-
397 (skb_network_header(skb
) - skb
->head
) +
398 sizeof(struct ath6kl_llc_snap_hdr
);
399 csum_dest
= skb
->csum_offset
+ csum_start
;
402 if (skb_cow_head(skb
, dev
->needed_headroom
)) {
403 dev
->stats
.tx_dropped
++;
408 if (ath6kl_wmi_dix_2_dot3(ar
->wmi
, skb
)) {
409 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
413 if ((dev
->features
& NETIF_F_IP_CSUM
) &&
414 (csum
== CHECKSUM_PARTIAL
)) {
415 meta_v2
.csum_start
= csum_start
;
416 meta_v2
.csum_dest
= csum_dest
;
418 /* instruct target to calculate checksum */
419 meta_v2
.csum_flags
= WMI_META_V2_FLAG_CSUM_OFFLOAD
;
420 meta_ver
= WMI_META_VERSION_2
;
427 ret
= ath6kl_wmi_data_hdr_add(ar
->wmi
, skb
,
428 DATA_MSGTYPE
, flags
, 0,
430 meta
, vif
->fw_vif_idx
);
433 ath6kl_warn("failed to add wmi data header:%d\n"
438 if ((vif
->nw_type
== ADHOC_NETWORK
) &&
439 ar
->ibss_ps_enable
&& test_bit(CONNECTED
, &vif
->flags
))
440 chk_adhoc_ps_mapping
= true;
442 /* get the stream mapping */
443 ret
= ath6kl_wmi_implicit_create_pstream(ar
->wmi
,
444 vif
->fw_vif_idx
, skb
,
445 0, test_bit(WMM_ENABLED
, &vif
->flags
), &ac
);
453 spin_lock_bh(&ar
->lock
);
455 if (chk_adhoc_ps_mapping
)
456 eid
= ath6kl_ibss_map_epid(skb
, dev
, &map_no
);
458 eid
= ar
->ac2ep_map
[ac
];
460 if (eid
== 0 || eid
== ENDPOINT_UNUSED
) {
461 ath6kl_err("eid %d is not mapped!\n", eid
);
462 spin_unlock_bh(&ar
->lock
);
466 /* allocate resource for this packet */
467 cookie
= ath6kl_alloc_cookie(ar
);
470 spin_unlock_bh(&ar
->lock
);
474 /* update counts while the lock is held */
475 ar
->tx_pending
[eid
]++;
476 ar
->total_tx_data_pend
++;
478 spin_unlock_bh(&ar
->lock
);
480 if (!IS_ALIGNED((unsigned long) skb
->data
- HTC_HDR_LENGTH
, 4) &&
483 * We will touch (move the buffer data to align it. Since the
484 * skb buffer is cloned and not only the header is changed, we
485 * have to copy it to allow the changes. Since we are copying
486 * the data here, we may as well align it by reserving suitable
487 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
489 struct sk_buff
*nskb
;
491 nskb
= skb_copy_expand(skb
, HTC_HDR_LENGTH
, 0, GFP_ATOMIC
);
499 cookie
->map_no
= map_no
;
500 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
502 cookie
->htc_pkt
.skb
= skb
;
504 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "tx ",
505 skb
->data
, skb
->len
);
508 * HTC interface is asynchronous, if this fails, cleanup will
509 * happen in the ath6kl_tx_complete callback.
511 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
518 dev
->stats
.tx_dropped
++;
519 dev
->stats
.tx_aborted_errors
++;
524 /* indicate tx activity or inactivity on a WMI stream */
525 void ath6kl_indicate_tx_activity(void *devt
, u8 traffic_class
, bool active
)
527 struct ath6kl
*ar
= devt
;
528 enum htc_endpoint_id eid
;
531 eid
= ar
->ac2ep_map
[traffic_class
];
533 if (!test_bit(WMI_ENABLED
, &ar
->flag
))
536 spin_lock_bh(&ar
->lock
);
538 ar
->ac_stream_active
[traffic_class
] = active
;
542 * Keep track of the active stream with the highest
545 if (ar
->ac_stream_pri_map
[traffic_class
] >
546 ar
->hiac_stream_active_pri
)
547 /* set the new highest active priority */
548 ar
->hiac_stream_active_pri
=
549 ar
->ac_stream_pri_map
[traffic_class
];
553 * We may have to search for the next active stream
554 * that is the highest priority.
556 if (ar
->hiac_stream_active_pri
==
557 ar
->ac_stream_pri_map
[traffic_class
]) {
559 * The highest priority stream just went inactive
560 * reset and search for the "next" highest "active"
563 ar
->hiac_stream_active_pri
= 0;
565 for (i
= 0; i
< WMM_NUM_AC
; i
++) {
566 if (ar
->ac_stream_active
[i
] &&
567 (ar
->ac_stream_pri_map
[i
] >
568 ar
->hiac_stream_active_pri
))
570 * Set the new highest active
573 ar
->hiac_stream_active_pri
=
574 ar
->ac_stream_pri_map
[i
];
579 spin_unlock_bh(&ar
->lock
);
582 /* notify HTC, this may cause credit distribution changes */
583 ath6kl_htc_activity_changed(ar
->htc_target
, eid
, active
);
586 enum htc_send_full_action
ath6kl_tx_queue_full(struct htc_target
*target
,
587 struct htc_packet
*packet
)
589 struct ath6kl
*ar
= target
->dev
->ar
;
590 struct ath6kl_vif
*vif
;
591 enum htc_endpoint_id endpoint
= packet
->endpoint
;
592 enum htc_send_full_action action
= HTC_SEND_FULL_KEEP
;
594 if (endpoint
== ar
->ctrl_ep
) {
596 * Under normal WMI if this is getting full, then something
597 * is running rampant the host should not be exhausting the
598 * WMI queue with too many commands the only exception to
599 * this is during testing using endpointping.
601 set_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
602 ath6kl_err("wmi ctrl ep is full\n");
603 ath6kl_recovery_err_notify(ar
, ATH6KL_FW_EP_FULL
);
607 if (packet
->info
.tx
.tag
== ATH6KL_CONTROL_PKT_TAG
)
611 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
612 * the highest active stream.
614 if (ar
->ac_stream_pri_map
[ar
->ep2ac_map
[endpoint
]] <
615 ar
->hiac_stream_active_pri
&&
617 target
->endpoint
[endpoint
].tx_drop_packet_threshold
)
619 * Give preference to the highest priority stream by
620 * dropping the packets which overflowed.
622 action
= HTC_SEND_FULL_DROP
;
625 spin_lock_bh(&ar
->list_lock
);
626 list_for_each_entry(vif
, &ar
->vif_list
, list
) {
627 if (vif
->nw_type
== ADHOC_NETWORK
||
628 action
!= HTC_SEND_FULL_DROP
) {
629 spin_unlock_bh(&ar
->list_lock
);
631 set_bit(NETQ_STOPPED
, &vif
->flags
);
632 netif_stop_queue(vif
->ndev
);
637 spin_unlock_bh(&ar
->list_lock
);
642 /* TODO this needs to be looked at */
643 static void ath6kl_tx_clear_node_map(struct ath6kl_vif
*vif
,
644 enum htc_endpoint_id eid
, u32 map_no
)
646 struct ath6kl
*ar
= vif
->ar
;
649 if (vif
->nw_type
!= ADHOC_NETWORK
)
652 if (!ar
->ibss_ps_enable
)
655 if (eid
== ar
->ctrl_ep
)
662 ar
->node_map
[map_no
].tx_pend
--;
664 if (ar
->node_map
[map_no
].tx_pend
)
667 if (map_no
!= (ar
->node_num
- 1))
670 for (i
= ar
->node_num
; i
> 0; i
--) {
671 if (ar
->node_map
[i
- 1].tx_pend
)
674 memset(&ar
->node_map
[i
- 1], 0,
675 sizeof(struct ath6kl_node_mapping
));
680 void ath6kl_tx_complete(struct htc_target
*target
,
681 struct list_head
*packet_queue
)
683 struct ath6kl
*ar
= target
->dev
->ar
;
684 struct sk_buff_head skb_queue
;
685 struct htc_packet
*packet
;
687 struct ath6kl_cookie
*ath6kl_cookie
;
690 enum htc_endpoint_id eid
;
691 bool wake_event
= false;
692 bool flushing
[ATH6KL_VIF_MAX
] = {false};
694 struct ath6kl_vif
*vif
;
696 skb_queue_head_init(&skb_queue
);
698 /* lock the driver as we update internal state */
699 spin_lock_bh(&ar
->lock
);
701 /* reap completed packets */
702 while (!list_empty(packet_queue
)) {
703 packet
= list_first_entry(packet_queue
, struct htc_packet
,
705 list_del(&packet
->list
);
707 if (WARN_ON_ONCE(packet
->endpoint
== ENDPOINT_UNUSED
||
708 packet
->endpoint
>= ENDPOINT_MAX
))
711 ath6kl_cookie
= (struct ath6kl_cookie
*)packet
->pkt_cntxt
;
712 if (WARN_ON_ONCE(!ath6kl_cookie
))
715 status
= packet
->status
;
716 skb
= ath6kl_cookie
->skb
;
717 eid
= packet
->endpoint
;
718 map_no
= ath6kl_cookie
->map_no
;
720 if (WARN_ON_ONCE(!skb
|| !skb
->data
)) {
722 ath6kl_free_cookie(ar
, ath6kl_cookie
);
726 __skb_queue_tail(&skb_queue
, skb
);
728 if (WARN_ON_ONCE(!status
&& (packet
->act_len
!= skb
->len
))) {
729 ath6kl_free_cookie(ar
, ath6kl_cookie
);
733 ar
->tx_pending
[eid
]--;
735 if (eid
!= ar
->ctrl_ep
)
736 ar
->total_tx_data_pend
--;
738 if (eid
== ar
->ctrl_ep
) {
739 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
))
740 clear_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
742 if (ar
->tx_pending
[eid
] == 0)
746 if (eid
== ar
->ctrl_ep
) {
747 if_idx
= wmi_cmd_hdr_get_if_idx(
748 (struct wmi_cmd_hdr
*) packet
->buf
);
750 if_idx
= wmi_data_hdr_get_if_idx(
751 (struct wmi_data_hdr
*) packet
->buf
);
754 vif
= ath6kl_get_vif_by_index(ar
, if_idx
);
756 ath6kl_free_cookie(ar
, ath6kl_cookie
);
761 if (status
== -ECANCELED
)
762 /* a packet was flushed */
763 flushing
[if_idx
] = true;
765 vif
->ndev
->stats
.tx_errors
++;
767 if (status
!= -ENOSPC
&& status
!= -ECANCELED
)
768 ath6kl_warn("tx complete error: %d\n", status
);
770 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
771 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
772 __func__
, skb
, packet
->buf
, packet
->act_len
,
775 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
776 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
777 __func__
, skb
, packet
->buf
, packet
->act_len
,
780 flushing
[if_idx
] = false;
781 vif
->ndev
->stats
.tx_packets
++;
782 vif
->ndev
->stats
.tx_bytes
+= skb
->len
;
785 ath6kl_tx_clear_node_map(vif
, eid
, map_no
);
787 ath6kl_free_cookie(ar
, ath6kl_cookie
);
789 if (test_bit(NETQ_STOPPED
, &vif
->flags
))
790 clear_bit(NETQ_STOPPED
, &vif
->flags
);
793 spin_unlock_bh(&ar
->lock
);
795 __skb_queue_purge(&skb_queue
);
798 spin_lock_bh(&ar
->list_lock
);
799 list_for_each_entry(vif
, &ar
->vif_list
, list
) {
800 if (test_bit(CONNECTED
, &vif
->flags
) &&
801 !flushing
[vif
->fw_vif_idx
]) {
802 spin_unlock_bh(&ar
->list_lock
);
803 netif_wake_queue(vif
->ndev
);
804 spin_lock_bh(&ar
->list_lock
);
807 spin_unlock_bh(&ar
->list_lock
);
810 wake_up(&ar
->event_wq
);
815 void ath6kl_tx_data_cleanup(struct ath6kl
*ar
)
819 /* flush all the data (non-control) streams */
820 for (i
= 0; i
< WMM_NUM_AC
; i
++)
821 ath6kl_htc_flush_txep(ar
->htc_target
, ar
->ac2ep_map
[i
],
822 ATH6KL_DATA_PKT_TAG
);
827 static void ath6kl_deliver_frames_to_nw_stack(struct net_device
*dev
,
835 if (!(skb
->dev
->flags
& IFF_UP
)) {
840 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
845 static void ath6kl_alloc_netbufs(struct sk_buff_head
*q
, u16 num
)
850 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
852 ath6kl_err("netbuf allocation failed\n");
855 skb_queue_tail(q
, skb
);
860 static struct sk_buff
*aggr_get_free_skb(struct aggr_info
*p_aggr
)
862 struct sk_buff
*skb
= NULL
;
864 if (skb_queue_len(&p_aggr
->rx_amsdu_freeq
) <
865 (AGGR_NUM_OF_FREE_NETBUFS
>> 2))
866 ath6kl_alloc_netbufs(&p_aggr
->rx_amsdu_freeq
,
867 AGGR_NUM_OF_FREE_NETBUFS
);
869 skb
= skb_dequeue(&p_aggr
->rx_amsdu_freeq
);
874 void ath6kl_rx_refill(struct htc_target
*target
, enum htc_endpoint_id endpoint
)
876 struct ath6kl
*ar
= target
->dev
->ar
;
880 struct htc_packet
*packet
;
881 struct list_head queue
;
883 n_buf_refill
= ATH6KL_MAX_RX_BUFFERS
-
884 ath6kl_htc_get_rxbuf_num(ar
->htc_target
, endpoint
);
886 if (n_buf_refill
<= 0)
889 INIT_LIST_HEAD(&queue
);
891 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
892 "%s: providing htc with %d buffers at eid=%d\n",
893 __func__
, n_buf_refill
, endpoint
);
895 for (rx_buf
= 0; rx_buf
< n_buf_refill
; rx_buf
++) {
896 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
900 packet
= (struct htc_packet
*) skb
->head
;
901 if (!IS_ALIGNED((unsigned long) skb
->data
, 4)) {
902 size_t len
= skb_headlen(skb
);
903 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
904 skb_set_tail_pointer(skb
, len
);
906 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
907 ATH6KL_BUFFER_SIZE
, endpoint
);
909 list_add_tail(&packet
->list
, &queue
);
912 if (!list_empty(&queue
))
913 ath6kl_htc_add_rxbuf_multiple(ar
->htc_target
, &queue
);
916 void ath6kl_refill_amsdu_rxbufs(struct ath6kl
*ar
, int count
)
918 struct htc_packet
*packet
;
922 skb
= ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE
);
926 packet
= (struct htc_packet
*) skb
->head
;
927 if (!IS_ALIGNED((unsigned long) skb
->data
, 4)) {
928 size_t len
= skb_headlen(skb
);
929 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
930 skb_set_tail_pointer(skb
, len
);
932 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
933 ATH6KL_AMSDU_BUFFER_SIZE
, 0);
936 spin_lock_bh(&ar
->lock
);
937 list_add_tail(&packet
->list
, &ar
->amsdu_rx_buffer_queue
);
938 spin_unlock_bh(&ar
->lock
);
944 * Callback to allocate a receive buffer for a pending packet. We use a
945 * pre-allocated list of buffers of maximum AMSDU size (4K).
947 struct htc_packet
*ath6kl_alloc_amsdu_rxbuf(struct htc_target
*target
,
948 enum htc_endpoint_id endpoint
,
951 struct ath6kl
*ar
= target
->dev
->ar
;
952 struct htc_packet
*packet
= NULL
;
953 struct list_head
*pkt_pos
;
954 int refill_cnt
= 0, depth
= 0;
956 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: eid=%d, len:%d\n",
957 __func__
, endpoint
, len
);
959 if ((len
<= ATH6KL_BUFFER_SIZE
) ||
960 (len
> ATH6KL_AMSDU_BUFFER_SIZE
))
963 spin_lock_bh(&ar
->lock
);
965 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
966 spin_unlock_bh(&ar
->lock
);
967 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
;
971 packet
= list_first_entry(&ar
->amsdu_rx_buffer_queue
,
972 struct htc_packet
, list
);
973 list_del(&packet
->list
);
974 list_for_each(pkt_pos
, &ar
->amsdu_rx_buffer_queue
)
977 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
- depth
;
978 spin_unlock_bh(&ar
->lock
);
980 /* set actual endpoint ID */
981 packet
->endpoint
= endpoint
;
984 if (refill_cnt
>= ATH6KL_AMSDU_REFILL_THRESHOLD
)
985 ath6kl_refill_amsdu_rxbufs(ar
, refill_cnt
);
990 static void aggr_slice_amsdu(struct aggr_info
*p_aggr
,
991 struct rxtid
*rxtid
, struct sk_buff
*skb
)
993 struct sk_buff
*new_skb
;
995 u16 frame_8023_len
, payload_8023_len
, mac_hdr_len
, amsdu_len
;
998 mac_hdr_len
= sizeof(struct ethhdr
);
999 framep
= skb
->data
+ mac_hdr_len
;
1000 amsdu_len
= skb
->len
- mac_hdr_len
;
1002 while (amsdu_len
> mac_hdr_len
) {
1003 hdr
= (struct ethhdr
*) framep
;
1004 payload_8023_len
= be16_to_cpu(hdr
->h_proto
);
1006 if (payload_8023_len
< MIN_MSDU_SUBFRAME_PAYLOAD_LEN
||
1007 payload_8023_len
> MAX_MSDU_SUBFRAME_PAYLOAD_LEN
) {
1008 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
1013 frame_8023_len
= payload_8023_len
+ mac_hdr_len
;
1014 new_skb
= aggr_get_free_skb(p_aggr
);
1016 ath6kl_err("no buffer available\n");
1020 memcpy(new_skb
->data
, framep
, frame_8023_len
);
1021 skb_put(new_skb
, frame_8023_len
);
1022 if (ath6kl_wmi_dot3_2_dix(new_skb
)) {
1023 ath6kl_err("dot3_2_dix error\n");
1024 dev_kfree_skb(new_skb
);
1028 skb_queue_tail(&rxtid
->q
, new_skb
);
1030 /* Is this the last subframe within this aggregate ? */
1031 if ((amsdu_len
- frame_8023_len
) == 0)
1034 /* Add the length of A-MSDU subframe padding bytes -
1035 * Round to nearest word.
1037 frame_8023_len
= ALIGN(frame_8023_len
, 4);
1039 framep
+= frame_8023_len
;
1040 amsdu_len
-= frame_8023_len
;
1046 static void aggr_deque_frms(struct aggr_info_conn
*agg_conn
, u8 tid
,
1047 u16 seq_no
, u8 order
)
1049 struct sk_buff
*skb
;
1050 struct rxtid
*rxtid
;
1051 struct skb_hold_q
*node
;
1052 u16 idx
, idx_end
, seq_end
;
1053 struct rxtid_stats
*stats
;
1055 rxtid
= &agg_conn
->rx_tid
[tid
];
1056 stats
= &agg_conn
->stat
[tid
];
1058 spin_lock_bh(&rxtid
->lock
);
1059 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
1062 * idx_end is typically the last possible frame in the window,
1063 * but changes to 'the' seq_no, when BAR comes. If seq_no
1064 * is non-zero, we will go up to that and stop.
1065 * Note: last seq no in current window will occupy the same
1066 * index position as index that is just previous to start.
1067 * An imp point : if win_sz is 7, for seq_no space of 4095,
1068 * then, there would be holes when sequence wrap around occurs.
1069 * Target should judiciously choose the win_sz, based on
1070 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1071 * 2, 4, 8, 16 win_sz works fine).
1072 * We must deque from "idx" to "idx_end", including both.
1074 seq_end
= seq_no
? seq_no
: rxtid
->seq_next
;
1075 idx_end
= AGGR_WIN_IDX(seq_end
, rxtid
->hold_q_sz
);
1078 node
= &rxtid
->hold_q
[idx
];
1079 if ((order
== 1) && (!node
->skb
))
1084 aggr_slice_amsdu(agg_conn
->aggr_info
, rxtid
,
1087 skb_queue_tail(&rxtid
->q
, node
->skb
);
1093 rxtid
->seq_next
= ATH6KL_NEXT_SEQ_NO(rxtid
->seq_next
);
1094 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
1095 } while (idx
!= idx_end
);
1097 spin_unlock_bh(&rxtid
->lock
);
1099 stats
->num_delivered
+= skb_queue_len(&rxtid
->q
);
1101 while ((skb
= skb_dequeue(&rxtid
->q
)))
1102 ath6kl_deliver_frames_to_nw_stack(agg_conn
->dev
, skb
);
1105 static bool aggr_process_recv_frm(struct aggr_info_conn
*agg_conn
, u8 tid
,
1107 bool is_amsdu
, struct sk_buff
*frame
)
1109 struct rxtid
*rxtid
;
1110 struct rxtid_stats
*stats
;
1111 struct sk_buff
*skb
;
1112 struct skb_hold_q
*node
;
1113 u16 idx
, st
, cur
, end
;
1114 bool is_queued
= false;
1117 rxtid
= &agg_conn
->rx_tid
[tid
];
1118 stats
= &agg_conn
->stat
[tid
];
1120 stats
->num_into_aggr
++;
1124 aggr_slice_amsdu(agg_conn
->aggr_info
, rxtid
, frame
);
1127 while ((skb
= skb_dequeue(&rxtid
->q
)))
1128 ath6kl_deliver_frames_to_nw_stack(agg_conn
->dev
,
1134 /* Check the incoming sequence no, if it's in the window */
1135 st
= rxtid
->seq_next
;
1137 end
= (st
+ rxtid
->hold_q_sz
-1) & ATH6KL_MAX_SEQ_NO
;
1139 if (((st
< end
) && (cur
< st
|| cur
> end
)) ||
1140 ((st
> end
) && (cur
> end
) && (cur
< st
))) {
1141 extended_end
= (end
+ rxtid
->hold_q_sz
- 1) &
1144 if (((end
< extended_end
) &&
1145 (cur
< end
|| cur
> extended_end
)) ||
1146 ((end
> extended_end
) && (cur
> extended_end
) &&
1148 aggr_deque_frms(agg_conn
, tid
, 0, 0);
1149 spin_lock_bh(&rxtid
->lock
);
1150 if (cur
>= rxtid
->hold_q_sz
- 1)
1151 rxtid
->seq_next
= cur
- (rxtid
->hold_q_sz
- 1);
1153 rxtid
->seq_next
= ATH6KL_MAX_SEQ_NO
-
1154 (rxtid
->hold_q_sz
- 2 - cur
);
1155 spin_unlock_bh(&rxtid
->lock
);
1158 * Dequeue only those frames that are outside the
1159 * new shifted window.
1161 if (cur
>= rxtid
->hold_q_sz
- 1)
1162 st
= cur
- (rxtid
->hold_q_sz
- 1);
1164 st
= ATH6KL_MAX_SEQ_NO
-
1165 (rxtid
->hold_q_sz
- 2 - cur
);
1167 aggr_deque_frms(agg_conn
, tid
, st
, 0);
1173 idx
= AGGR_WIN_IDX(seq_no
, rxtid
->hold_q_sz
);
1175 node
= &rxtid
->hold_q
[idx
];
1177 spin_lock_bh(&rxtid
->lock
);
1180 * Is the cur frame duplicate or something beyond our window(hold_q
1181 * -> which is 2x, already)?
1183 * 1. Duplicate is easy - drop incoming frame.
1184 * 2. Not falling in current sliding window.
1185 * 2a. is the frame_seq_no preceding current tid_seq_no?
1186 * -> drop the frame. perhaps sender did not get our ACK.
1187 * this is taken care of above.
1188 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1189 * -> Taken care of it above, by moving window forward.
1191 dev_kfree_skb(node
->skb
);
1196 node
->is_amsdu
= is_amsdu
;
1197 node
->seq_no
= seq_no
;
1204 spin_unlock_bh(&rxtid
->lock
);
1206 aggr_deque_frms(agg_conn
, tid
, 0, 1);
1208 if (agg_conn
->timer_scheduled
)
1211 spin_lock_bh(&rxtid
->lock
);
1212 for (idx
= 0; idx
< rxtid
->hold_q_sz
; idx
++) {
1213 if (rxtid
->hold_q
[idx
].skb
) {
1215 * There is a frame in the queue and no
1216 * timer so start a timer to ensure that
1217 * the frame doesn't remain stuck
1220 agg_conn
->timer_scheduled
= true;
1221 mod_timer(&agg_conn
->timer
,
1222 (jiffies
+ (HZ
* AGGR_RX_TIMEOUT
) / 1000));
1223 rxtid
->timer_mon
= true;
1227 spin_unlock_bh(&rxtid
->lock
);
1232 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif
*vif
,
1233 struct ath6kl_sta
*conn
)
1235 struct ath6kl
*ar
= vif
->ar
;
1236 bool is_apsdq_empty
, is_apsdq_empty_at_start
;
1237 u32 num_frames_to_deliver
, flags
;
1238 struct sk_buff
*skb
= NULL
;
1241 * If the APSD q for this STA is not empty, dequeue and
1242 * send a pkt from the head of the q. Also update the
1243 * More data bit in the WMI_DATA_HDR if there are
1244 * more pkts for this STA in the APSD q.
1245 * If there are no more pkts for this STA,
1246 * update the APSD bitmap for this STA.
1249 num_frames_to_deliver
= (conn
->apsd_info
>> ATH6KL_APSD_NUM_OF_AC
) &
1250 ATH6KL_APSD_FRAME_MASK
;
1252 * Number of frames to send in a service period is
1253 * indicated by the station
1254 * in the QOS_INFO of the association request
1255 * If it is zero, send all frames
1257 if (!num_frames_to_deliver
)
1258 num_frames_to_deliver
= ATH6KL_APSD_ALL_FRAME
;
1260 spin_lock_bh(&conn
->psq_lock
);
1261 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1262 spin_unlock_bh(&conn
->psq_lock
);
1263 is_apsdq_empty_at_start
= is_apsdq_empty
;
1265 while ((!is_apsdq_empty
) && (num_frames_to_deliver
)) {
1266 spin_lock_bh(&conn
->psq_lock
);
1267 skb
= skb_dequeue(&conn
->apsdq
);
1268 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1269 spin_unlock_bh(&conn
->psq_lock
);
1272 * Set the STA flag to Trigger delivery,
1273 * so that the frame will go out
1275 conn
->sta_flags
|= STA_PS_APSD_TRIGGER
;
1276 num_frames_to_deliver
--;
1278 /* Last frame in the service period, set EOSP or queue empty */
1279 if ((is_apsdq_empty
) || (!num_frames_to_deliver
))
1280 conn
->sta_flags
|= STA_PS_APSD_EOSP
;
1282 ath6kl_data_tx(skb
, vif
->ndev
);
1283 conn
->sta_flags
&= ~(STA_PS_APSD_TRIGGER
);
1284 conn
->sta_flags
&= ~(STA_PS_APSD_EOSP
);
1287 if (is_apsdq_empty
) {
1288 if (is_apsdq_empty_at_start
)
1289 flags
= WMI_AP_APSD_NO_DELIVERY_FRAMES
;
1293 ath6kl_wmi_set_apsd_bfrd_traf(ar
->wmi
,
1295 conn
->aid
, 0, flags
);
1301 void ath6kl_rx(struct htc_target
*target
, struct htc_packet
*packet
)
1303 struct ath6kl
*ar
= target
->dev
->ar
;
1304 struct sk_buff
*skb
= packet
->pkt_cntxt
;
1305 struct wmi_rx_meta_v2
*meta
;
1306 struct wmi_data_hdr
*dhdr
;
1308 u8 meta_type
, dot11_hdr
= 0;
1309 u8 pad_before_data_start
;
1310 int status
= packet
->status
;
1311 enum htc_endpoint_id ept
= packet
->endpoint
;
1312 bool is_amsdu
, prev_ps
, ps_state
= false;
1313 bool trig_state
= false;
1314 struct ath6kl_sta
*conn
= NULL
;
1315 struct sk_buff
*skb1
= NULL
;
1316 struct ethhdr
*datap
= NULL
;
1317 struct ath6kl_vif
*vif
;
1318 struct aggr_info_conn
*aggr_conn
;
1322 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
1323 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1324 __func__
, ar
, ept
, skb
, packet
->buf
,
1325 packet
->act_len
, status
);
1327 if (status
|| packet
->act_len
< HTC_HDR_LENGTH
) {
1332 skb_put(skb
, packet
->act_len
+ HTC_HDR_LENGTH
);
1333 skb_pull(skb
, HTC_HDR_LENGTH
);
1335 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "rx ",
1336 skb
->data
, skb
->len
);
1338 if (ept
== ar
->ctrl_ep
) {
1339 if (test_bit(WMI_ENABLED
, &ar
->flag
)) {
1340 ath6kl_check_wow_status(ar
);
1341 ath6kl_wmi_control_rx(ar
->wmi
, skb
);
1345 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr
*) skb
->data
);
1348 wmi_data_hdr_get_if_idx((struct wmi_data_hdr
*) skb
->data
);
1351 vif
= ath6kl_get_vif_by_index(ar
, if_idx
);
1358 * Take lock to protect buffer counts and adaptive power throughput
1361 spin_lock_bh(&vif
->if_lock
);
1363 vif
->ndev
->stats
.rx_packets
++;
1364 vif
->ndev
->stats
.rx_bytes
+= packet
->act_len
;
1366 spin_unlock_bh(&vif
->if_lock
);
1368 skb
->dev
= vif
->ndev
;
1370 if (!test_bit(WMI_ENABLED
, &ar
->flag
)) {
1371 if (EPPING_ALIGNMENT_PAD
> 0)
1372 skb_pull(skb
, EPPING_ALIGNMENT_PAD
);
1373 ath6kl_deliver_frames_to_nw_stack(vif
->ndev
, skb
);
1377 ath6kl_check_wow_status(ar
);
1379 min_hdr_len
= sizeof(struct ethhdr
) + sizeof(struct wmi_data_hdr
) +
1380 sizeof(struct ath6kl_llc_snap_hdr
);
1382 dhdr
= (struct wmi_data_hdr
*) skb
->data
;
1385 * In the case of AP mode we may receive NULL data frames
1386 * that do not have LLC hdr. They are 16 bytes in size.
1387 * Allow these frames in the AP mode.
1389 if (vif
->nw_type
!= AP_NETWORK
&&
1390 ((packet
->act_len
< min_hdr_len
) ||
1391 (packet
->act_len
> WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
))) {
1392 ath6kl_info("frame len is too short or too long\n");
1393 vif
->ndev
->stats
.rx_errors
++;
1394 vif
->ndev
->stats
.rx_length_errors
++;
1399 pad_before_data_start
=
1400 (le16_to_cpu(dhdr
->info3
) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT
)
1401 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK
;
1403 /* Get the Power save state of the STA */
1404 if (vif
->nw_type
== AP_NETWORK
) {
1405 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1407 ps_state
= !!((dhdr
->info
>> WMI_DATA_HDR_PS_SHIFT
) &
1408 WMI_DATA_HDR_PS_MASK
);
1410 offset
= sizeof(struct wmi_data_hdr
) + pad_before_data_start
;
1411 trig_state
= !!(le16_to_cpu(dhdr
->info3
) & WMI_DATA_HDR_TRIG
);
1413 switch (meta_type
) {
1416 case WMI_META_VERSION_1
:
1417 offset
+= sizeof(struct wmi_rx_meta_v1
);
1419 case WMI_META_VERSION_2
:
1420 offset
+= sizeof(struct wmi_rx_meta_v2
);
1426 datap
= (struct ethhdr
*) (skb
->data
+ offset
);
1427 conn
= ath6kl_find_sta(vif
, datap
->h_source
);
1435 * If there is a change in PS state of the STA,
1436 * take appropriate steps:
1438 * 1. If Sleep-->Awake, flush the psq for the STA
1439 * Clear the PVB for the STA.
1440 * 2. If Awake-->Sleep, Starting queueing frames
1443 prev_ps
= !!(conn
->sta_flags
& STA_PS_SLEEP
);
1446 conn
->sta_flags
|= STA_PS_SLEEP
;
1448 conn
->sta_flags
&= ~STA_PS_SLEEP
;
1450 /* Accept trigger only when the station is in sleep */
1451 if ((conn
->sta_flags
& STA_PS_SLEEP
) && trig_state
)
1452 ath6kl_uapsd_trigger_frame_rx(vif
, conn
);
1454 if (prev_ps
^ !!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1455 if (!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1456 struct sk_buff
*skbuff
= NULL
;
1457 bool is_apsdq_empty
;
1458 struct ath6kl_mgmt_buff
*mgmt
;
1461 spin_lock_bh(&conn
->psq_lock
);
1462 while (conn
->mgmt_psq_len
> 0) {
1463 mgmt
= list_first_entry(
1465 struct ath6kl_mgmt_buff
,
1467 list_del(&mgmt
->list
);
1468 conn
->mgmt_psq_len
--;
1469 spin_unlock_bh(&conn
->psq_lock
);
1470 idx
= vif
->fw_vif_idx
;
1472 ath6kl_wmi_send_mgmt_cmd(ar
->wmi
,
1482 spin_lock_bh(&conn
->psq_lock
);
1484 conn
->mgmt_psq_len
= 0;
1485 while ((skbuff
= skb_dequeue(&conn
->psq
))) {
1486 spin_unlock_bh(&conn
->psq_lock
);
1487 ath6kl_data_tx(skbuff
, vif
->ndev
);
1488 spin_lock_bh(&conn
->psq_lock
);
1491 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1492 while ((skbuff
= skb_dequeue(&conn
->apsdq
))) {
1493 spin_unlock_bh(&conn
->psq_lock
);
1494 ath6kl_data_tx(skbuff
, vif
->ndev
);
1495 spin_lock_bh(&conn
->psq_lock
);
1497 spin_unlock_bh(&conn
->psq_lock
);
1499 if (!is_apsdq_empty
)
1500 ath6kl_wmi_set_apsd_bfrd_traf(
1505 /* Clear the PVB for this STA */
1506 ath6kl_wmi_set_pvb_cmd(ar
->wmi
, vif
->fw_vif_idx
,
1511 /* drop NULL data frames here */
1512 if ((packet
->act_len
< min_hdr_len
) ||
1514 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
)) {
1520 is_amsdu
= wmi_data_hdr_is_amsdu(dhdr
) ? true : false;
1521 tid
= wmi_data_hdr_get_up(dhdr
);
1522 seq_no
= wmi_data_hdr_get_seqno(dhdr
);
1523 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1524 dot11_hdr
= wmi_data_hdr_get_dot11(dhdr
);
1526 skb_pull(skb
, sizeof(struct wmi_data_hdr
));
1528 switch (meta_type
) {
1529 case WMI_META_VERSION_1
:
1530 skb_pull(skb
, sizeof(struct wmi_rx_meta_v1
));
1532 case WMI_META_VERSION_2
:
1533 meta
= (struct wmi_rx_meta_v2
*) skb
->data
;
1534 if (meta
->csum_flags
& 0x1) {
1535 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1536 skb
->csum
= (__force __wsum
) meta
->csum
;
1538 skb_pull(skb
, sizeof(struct wmi_rx_meta_v2
));
1544 skb_pull(skb
, pad_before_data_start
);
1547 status
= ath6kl_wmi_dot11_hdr_remove(ar
->wmi
, skb
);
1549 status
= ath6kl_wmi_dot3_2_dix(skb
);
1553 * Drop frames that could not be processed (lack of
1560 if (!(vif
->ndev
->flags
& IFF_UP
)) {
1565 if (vif
->nw_type
== AP_NETWORK
) {
1566 datap
= (struct ethhdr
*) skb
->data
;
1567 if (is_multicast_ether_addr(datap
->h_dest
))
1569 * Bcast/Mcast frames should be sent to the
1570 * OS stack as well as on the air.
1572 skb1
= skb_copy(skb
, GFP_ATOMIC
);
1575 * Search for a connected STA with dstMac
1576 * as the Mac address. If found send the
1577 * frame to it on the air else send the
1578 * frame up the stack.
1580 conn
= ath6kl_find_sta(vif
, datap
->h_dest
);
1582 if (conn
&& ar
->intra_bss
) {
1585 } else if (conn
&& !ar
->intra_bss
) {
1591 ath6kl_data_tx(skb1
, vif
->ndev
);
1594 /* nothing to deliver up the stack */
1599 datap
= (struct ethhdr
*) skb
->data
;
1601 if (is_unicast_ether_addr(datap
->h_dest
)) {
1602 if (vif
->nw_type
== AP_NETWORK
) {
1603 conn
= ath6kl_find_sta(vif
, datap
->h_source
);
1606 aggr_conn
= conn
->aggr_conn
;
1608 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1611 if (aggr_process_recv_frm(aggr_conn
, tid
, seq_no
,
1613 /* aggregation code will handle the skb */
1616 } else if (!is_broadcast_ether_addr(datap
->h_dest
)) {
1617 vif
->ndev
->stats
.multicast
++;
1620 ath6kl_deliver_frames_to_nw_stack(vif
->ndev
, skb
);
1623 static void aggr_timeout(struct timer_list
*t
)
1626 struct aggr_info_conn
*aggr_conn
= from_timer(aggr_conn
, t
, timer
);
1627 struct rxtid
*rxtid
;
1628 struct rxtid_stats
*stats
;
1630 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1631 rxtid
= &aggr_conn
->rx_tid
[i
];
1632 stats
= &aggr_conn
->stat
[i
];
1634 if (!rxtid
->aggr
|| !rxtid
->timer_mon
)
1637 stats
->num_timeouts
++;
1638 ath6kl_dbg(ATH6KL_DBG_AGGR
,
1639 "aggr timeout (st %d end %d)\n",
1641 ((rxtid
->seq_next
+ rxtid
->hold_q_sz
-1) &
1642 ATH6KL_MAX_SEQ_NO
));
1643 aggr_deque_frms(aggr_conn
, i
, 0, 0);
1646 aggr_conn
->timer_scheduled
= false;
1648 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1649 rxtid
= &aggr_conn
->rx_tid
[i
];
1651 if (rxtid
->aggr
&& rxtid
->hold_q
) {
1652 spin_lock_bh(&rxtid
->lock
);
1653 for (j
= 0; j
< rxtid
->hold_q_sz
; j
++) {
1654 if (rxtid
->hold_q
[j
].skb
) {
1655 aggr_conn
->timer_scheduled
= true;
1656 rxtid
->timer_mon
= true;
1660 spin_unlock_bh(&rxtid
->lock
);
1662 if (j
>= rxtid
->hold_q_sz
)
1663 rxtid
->timer_mon
= false;
1667 if (aggr_conn
->timer_scheduled
)
1668 mod_timer(&aggr_conn
->timer
,
1669 jiffies
+ msecs_to_jiffies(AGGR_RX_TIMEOUT
));
1672 static void aggr_delete_tid_state(struct aggr_info_conn
*aggr_conn
, u8 tid
)
1674 struct rxtid
*rxtid
;
1675 struct rxtid_stats
*stats
;
1677 if (!aggr_conn
|| tid
>= NUM_OF_TIDS
)
1680 rxtid
= &aggr_conn
->rx_tid
[tid
];
1681 stats
= &aggr_conn
->stat
[tid
];
1684 aggr_deque_frms(aggr_conn
, tid
, 0, 0);
1686 rxtid
->aggr
= false;
1687 rxtid
->timer_mon
= false;
1689 rxtid
->seq_next
= 0;
1690 rxtid
->hold_q_sz
= 0;
1692 kfree(rxtid
->hold_q
);
1693 rxtid
->hold_q
= NULL
;
1695 memset(stats
, 0, sizeof(struct rxtid_stats
));
1698 void aggr_recv_addba_req_evt(struct ath6kl_vif
*vif
, u8 tid_mux
, u16 seq_no
,
1701 struct ath6kl_sta
*sta
;
1702 struct aggr_info_conn
*aggr_conn
= NULL
;
1703 struct rxtid
*rxtid
;
1707 if (vif
->nw_type
== AP_NETWORK
) {
1708 aid
= ath6kl_get_aid(tid_mux
);
1709 sta
= ath6kl_find_sta_by_aid(vif
->ar
, aid
);
1711 aggr_conn
= sta
->aggr_conn
;
1713 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1719 tid
= ath6kl_get_tid(tid_mux
);
1720 if (tid
>= NUM_OF_TIDS
)
1723 rxtid
= &aggr_conn
->rx_tid
[tid
];
1725 if (win_sz
< AGGR_WIN_SZ_MIN
|| win_sz
> AGGR_WIN_SZ_MAX
)
1726 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: win_sz %d, tid %d\n",
1727 __func__
, win_sz
, tid
);
1730 aggr_delete_tid_state(aggr_conn
, tid
);
1732 rxtid
->seq_next
= seq_no
;
1733 hold_q_size
= TID_WINDOW_SZ(win_sz
) * sizeof(struct skb_hold_q
);
1734 rxtid
->hold_q
= kzalloc(hold_q_size
, GFP_KERNEL
);
1738 rxtid
->win_sz
= win_sz
;
1739 rxtid
->hold_q_sz
= TID_WINDOW_SZ(win_sz
);
1740 if (!skb_queue_empty(&rxtid
->q
))
1746 void aggr_conn_init(struct ath6kl_vif
*vif
, struct aggr_info
*aggr_info
,
1747 struct aggr_info_conn
*aggr_conn
)
1749 struct rxtid
*rxtid
;
1752 aggr_conn
->aggr_sz
= AGGR_SZ_DEFAULT
;
1753 aggr_conn
->dev
= vif
->ndev
;
1754 timer_setup(&aggr_conn
->timer
, aggr_timeout
, 0);
1755 aggr_conn
->aggr_info
= aggr_info
;
1757 aggr_conn
->timer_scheduled
= false;
1759 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1760 rxtid
= &aggr_conn
->rx_tid
[i
];
1761 rxtid
->aggr
= false;
1762 rxtid
->timer_mon
= false;
1763 skb_queue_head_init(&rxtid
->q
);
1764 spin_lock_init(&rxtid
->lock
);
1768 struct aggr_info
*aggr_init(struct ath6kl_vif
*vif
)
1770 struct aggr_info
*p_aggr
= NULL
;
1772 p_aggr
= kzalloc(sizeof(struct aggr_info
), GFP_KERNEL
);
1774 ath6kl_err("failed to alloc memory for aggr_node\n");
1778 p_aggr
->aggr_conn
= kzalloc(sizeof(struct aggr_info_conn
), GFP_KERNEL
);
1779 if (!p_aggr
->aggr_conn
) {
1780 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1785 aggr_conn_init(vif
, p_aggr
, p_aggr
->aggr_conn
);
1787 skb_queue_head_init(&p_aggr
->rx_amsdu_freeq
);
1788 ath6kl_alloc_netbufs(&p_aggr
->rx_amsdu_freeq
, AGGR_NUM_OF_FREE_NETBUFS
);
1793 void aggr_recv_delba_req_evt(struct ath6kl_vif
*vif
, u8 tid_mux
)
1795 struct ath6kl_sta
*sta
;
1796 struct rxtid
*rxtid
;
1797 struct aggr_info_conn
*aggr_conn
= NULL
;
1800 if (vif
->nw_type
== AP_NETWORK
) {
1801 aid
= ath6kl_get_aid(tid_mux
);
1802 sta
= ath6kl_find_sta_by_aid(vif
->ar
, aid
);
1804 aggr_conn
= sta
->aggr_conn
;
1806 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1812 tid
= ath6kl_get_tid(tid_mux
);
1813 if (tid
>= NUM_OF_TIDS
)
1816 rxtid
= &aggr_conn
->rx_tid
[tid
];
1819 aggr_delete_tid_state(aggr_conn
, tid
);
1822 void aggr_reset_state(struct aggr_info_conn
*aggr_conn
)
1829 if (aggr_conn
->timer_scheduled
) {
1830 del_timer(&aggr_conn
->timer
);
1831 aggr_conn
->timer_scheduled
= false;
1834 for (tid
= 0; tid
< NUM_OF_TIDS
; tid
++)
1835 aggr_delete_tid_state(aggr_conn
, tid
);
1838 /* clean up our amsdu buffer list */
1839 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl
*ar
)
1841 struct htc_packet
*packet
, *tmp_pkt
;
1843 spin_lock_bh(&ar
->lock
);
1844 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
1845 spin_unlock_bh(&ar
->lock
);
1849 list_for_each_entry_safe(packet
, tmp_pkt
, &ar
->amsdu_rx_buffer_queue
,
1851 list_del(&packet
->list
);
1852 spin_unlock_bh(&ar
->lock
);
1853 dev_kfree_skb(packet
->pkt_cntxt
);
1854 spin_lock_bh(&ar
->lock
);
1857 spin_unlock_bh(&ar
->lock
);
1860 void aggr_module_destroy(struct aggr_info
*aggr_info
)
1865 aggr_reset_state(aggr_info
->aggr_conn
);
1866 skb_queue_purge(&aggr_info
->rx_amsdu_freeq
);
1867 kfree(aggr_info
->aggr_conn
);