2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 * tid - tid_mux0..tid_mux3
27 * aid - tid_mux4..tid_mux7
29 #define ATH6KL_TID_MASK 0xf
30 #define ATH6KL_AID_SHIFT 4
32 static inline u8
ath6kl_get_tid(u8 tid_mux
)
34 return tid_mux
& ATH6KL_TID_MASK
;
37 static inline u8
ath6kl_get_aid(u8 tid_mux
)
39 return tid_mux
>> ATH6KL_AID_SHIFT
;
42 static u8
ath6kl_ibss_map_epid(struct sk_buff
*skb
, struct net_device
*dev
,
45 struct ath6kl
*ar
= ath6kl_priv(dev
);
46 struct ethhdr
*eth_hdr
;
52 eth_hdr
= (struct ethhdr
*) (datap
+ sizeof(struct wmi_data_hdr
));
54 if (is_multicast_ether_addr(eth_hdr
->h_dest
))
57 for (i
= 0; i
< ar
->node_num
; i
++) {
58 if (memcmp(eth_hdr
->h_dest
, ar
->node_map
[i
].mac_addr
,
61 ar
->node_map
[i
].tx_pend
++;
62 return ar
->node_map
[i
].ep_id
;
65 if ((ep_map
== -1) && !ar
->node_map
[i
].tx_pend
)
70 ep_map
= ar
->node_num
;
72 if (ar
->node_num
> MAX_NODE_NUM
)
73 return ENDPOINT_UNUSED
;
76 memcpy(ar
->node_map
[ep_map
].mac_addr
, eth_hdr
->h_dest
, ETH_ALEN
);
78 for (i
= ENDPOINT_2
; i
<= ENDPOINT_5
; i
++) {
79 if (!ar
->tx_pending
[i
]) {
80 ar
->node_map
[ep_map
].ep_id
= i
;
85 * No free endpoint is available, start redistribution on
86 * the inuse endpoints.
88 if (i
== ENDPOINT_5
) {
89 ar
->node_map
[ep_map
].ep_id
= ar
->next_ep_id
;
91 if (ar
->next_ep_id
> ENDPOINT_5
)
92 ar
->next_ep_id
= ENDPOINT_2
;
97 ar
->node_map
[ep_map
].tx_pend
++;
99 return ar
->node_map
[ep_map
].ep_id
;
102 static bool ath6kl_process_uapsdq(struct ath6kl_sta
*conn
,
103 struct ath6kl_vif
*vif
,
107 struct ath6kl
*ar
= vif
->ar
;
108 bool is_apsdq_empty
= false;
109 struct ethhdr
*datap
= (struct ethhdr
*) skb
->data
;
110 u8 up
= 0, traffic_class
, *ip_hdr
;
112 struct ath6kl_llc_snap_hdr
*llc_hdr
;
114 if (conn
->sta_flags
& STA_PS_APSD_TRIGGER
) {
116 * This tx is because of a uAPSD trigger, determine
117 * more and EOSP bit. Set EOSP if queue is empty
118 * or sufficient frames are delivered for this trigger.
120 spin_lock_bh(&conn
->psq_lock
);
121 if (!skb_queue_empty(&conn
->apsdq
))
122 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
123 else if (conn
->sta_flags
& STA_PS_APSD_EOSP
)
124 *flags
|= WMI_DATA_HDR_FLAGS_EOSP
;
125 *flags
|= WMI_DATA_HDR_FLAGS_UAPSD
;
126 spin_unlock_bh(&conn
->psq_lock
);
128 } else if (!conn
->apsd_info
)
131 if (test_bit(WMM_ENABLED
, &vif
->flags
)) {
132 ether_type
= be16_to_cpu(datap
->h_proto
);
133 if (is_ethertype(ether_type
)) {
134 /* packet is in DIX format */
135 ip_hdr
= (u8
*)(datap
+ 1);
137 /* packet is in 802.3 format */
138 llc_hdr
= (struct ath6kl_llc_snap_hdr
*)
140 ether_type
= be16_to_cpu(llc_hdr
->eth_type
);
141 ip_hdr
= (u8
*)(llc_hdr
+ 1);
144 if (ether_type
== IP_ETHERTYPE
)
145 up
= ath6kl_wmi_determine_user_priority(
149 traffic_class
= ath6kl_wmi_get_traffic_class(up
);
151 if ((conn
->apsd_info
& (1 << traffic_class
)) == 0)
154 /* Queue the frames if the STA is sleeping */
155 spin_lock_bh(&conn
->psq_lock
);
156 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
157 skb_queue_tail(&conn
->apsdq
, skb
);
158 spin_unlock_bh(&conn
->psq_lock
);
161 * If this is the first pkt getting queued
162 * for this STA, update the PVB for this STA
164 if (is_apsdq_empty
) {
165 ath6kl_wmi_set_apsd_bfrd_traf(ar
->wmi
,
169 *flags
|= WMI_DATA_HDR_FLAGS_UAPSD
;
174 static bool ath6kl_process_psq(struct ath6kl_sta
*conn
,
175 struct ath6kl_vif
*vif
,
179 bool is_psq_empty
= false;
180 struct ath6kl
*ar
= vif
->ar
;
182 if (conn
->sta_flags
& STA_PS_POLLED
) {
183 spin_lock_bh(&conn
->psq_lock
);
184 if (!skb_queue_empty(&conn
->psq
))
185 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
186 spin_unlock_bh(&conn
->psq_lock
);
190 /* Queue the frames if the STA is sleeping */
191 spin_lock_bh(&conn
->psq_lock
);
192 is_psq_empty
= skb_queue_empty(&conn
->psq
);
193 skb_queue_tail(&conn
->psq
, skb
);
194 spin_unlock_bh(&conn
->psq_lock
);
197 * If this is the first pkt getting queued
198 * for this STA, update the PVB for this
202 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
208 static bool ath6kl_powersave_ap(struct ath6kl_vif
*vif
, struct sk_buff
*skb
,
211 struct ethhdr
*datap
= (struct ethhdr
*) skb
->data
;
212 struct ath6kl_sta
*conn
= NULL
;
213 bool ps_queued
= false;
214 struct ath6kl
*ar
= vif
->ar
;
216 if (is_multicast_ether_addr(datap
->h_dest
)) {
218 bool q_mcast
= false;
220 for (ctr
= 0; ctr
< AP_MAX_NUM_STA
; ctr
++) {
221 if (ar
->sta_list
[ctr
].sta_flags
& STA_PS_SLEEP
) {
229 * If this transmit is not because of a Dtim Expiry
232 if (!test_bit(DTIM_EXPIRED
, &vif
->flags
)) {
233 bool is_mcastq_empty
= false;
235 spin_lock_bh(&ar
->mcastpsq_lock
);
237 skb_queue_empty(&ar
->mcastpsq
);
238 skb_queue_tail(&ar
->mcastpsq
, skb
);
239 spin_unlock_bh(&ar
->mcastpsq_lock
);
242 * If this is the first Mcast pkt getting
243 * queued indicate to the target to set the
244 * BitmapControl LSB of the TIM IE.
247 ath6kl_wmi_set_pvb_cmd(ar
->wmi
,
254 * This transmit is because of Dtim expiry.
255 * Determine if MoreData bit has to be set.
257 spin_lock_bh(&ar
->mcastpsq_lock
);
258 if (!skb_queue_empty(&ar
->mcastpsq
))
259 *flags
|= WMI_DATA_HDR_FLAGS_MORE
;
260 spin_unlock_bh(&ar
->mcastpsq_lock
);
264 conn
= ath6kl_find_sta(vif
, datap
->h_dest
);
268 /* Inform the caller that the skb is consumed */
272 if (conn
->sta_flags
& STA_PS_SLEEP
) {
273 ps_queued
= ath6kl_process_uapsdq(conn
,
275 if (!(*flags
& WMI_DATA_HDR_FLAGS_UAPSD
))
276 ps_queued
= ath6kl_process_psq(conn
,
285 int ath6kl_control_tx(void *devt
, struct sk_buff
*skb
,
286 enum htc_endpoint_id eid
)
288 struct ath6kl
*ar
= devt
;
290 struct ath6kl_cookie
*cookie
= NULL
;
292 trace_ath6kl_wmi_cmd(skb
->data
, skb
->len
);
294 if (WARN_ON_ONCE(ar
->state
== ATH6KL_STATE_WOW
)) {
299 if (WARN_ON_ONCE(eid
== ENDPOINT_UNUSED
||
300 eid
>= ENDPOINT_MAX
)) {
305 spin_lock_bh(&ar
->lock
);
307 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
308 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__
,
311 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
) && (eid
== ar
->ctrl_ep
)) {
313 * Control endpoint is full, don't allocate resources, we
314 * are just going to drop this packet.
317 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
320 cookie
= ath6kl_alloc_cookie(ar
);
322 if (cookie
== NULL
) {
323 spin_unlock_bh(&ar
->lock
);
328 ar
->tx_pending
[eid
]++;
330 if (eid
!= ar
->ctrl_ep
)
331 ar
->total_tx_data_pend
++;
333 spin_unlock_bh(&ar
->lock
);
337 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
338 eid
, ATH6KL_CONTROL_PKT_TAG
);
339 cookie
->htc_pkt
.skb
= skb
;
342 * This interface is asynchronous, if there is an error, cleanup
343 * will happen in the TX completion callback.
345 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
354 int ath6kl_data_tx(struct sk_buff
*skb
, struct net_device
*dev
)
356 struct ath6kl
*ar
= ath6kl_priv(dev
);
357 struct ath6kl_cookie
*cookie
= NULL
;
358 enum htc_endpoint_id eid
= ENDPOINT_UNUSED
;
359 struct ath6kl_vif
*vif
= netdev_priv(dev
);
361 u16 htc_tag
= ATH6KL_DATA_PKT_TAG
;
362 u8 ac
= 99 ; /* initialize to unmapped ac */
363 bool chk_adhoc_ps_mapping
= false;
365 struct wmi_tx_meta_v2 meta_v2
;
367 u8 csum_start
= 0, csum_dest
= 0, csum
= skb
->ip_summed
;
371 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
372 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__
,
373 skb
, skb
->data
, skb
->len
);
375 /* If target is not associated */
376 if (!test_bit(CONNECTED
, &vif
->flags
))
379 if (WARN_ON_ONCE(ar
->state
!= ATH6KL_STATE_ON
))
382 if (!test_bit(WMI_READY
, &ar
->flag
))
385 /* AP mode Power saving processing */
386 if (vif
->nw_type
== AP_NETWORK
) {
387 if (ath6kl_powersave_ap(vif
, skb
, &flags
))
391 if (test_bit(WMI_ENABLED
, &ar
->flag
)) {
392 if ((dev
->features
& NETIF_F_IP_CSUM
) &&
393 (csum
== CHECKSUM_PARTIAL
)) {
394 csum_start
= skb
->csum_start
-
395 (skb_network_header(skb
) - skb
->head
) +
396 sizeof(struct ath6kl_llc_snap_hdr
);
397 csum_dest
= skb
->csum_offset
+ csum_start
;
400 if (skb_headroom(skb
) < dev
->needed_headroom
) {
401 struct sk_buff
*tmp_skb
= skb
;
403 skb
= skb_realloc_headroom(skb
, dev
->needed_headroom
);
406 vif
->net_stats
.tx_dropped
++;
411 if (ath6kl_wmi_dix_2_dot3(ar
->wmi
, skb
)) {
412 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
416 if ((dev
->features
& NETIF_F_IP_CSUM
) &&
417 (csum
== CHECKSUM_PARTIAL
)) {
418 meta_v2
.csum_start
= csum_start
;
419 meta_v2
.csum_dest
= csum_dest
;
421 /* instruct target to calculate checksum */
422 meta_v2
.csum_flags
= WMI_META_V2_FLAG_CSUM_OFFLOAD
;
423 meta_ver
= WMI_META_VERSION_2
;
430 ret
= ath6kl_wmi_data_hdr_add(ar
->wmi
, skb
,
431 DATA_MSGTYPE
, flags
, 0,
433 meta
, vif
->fw_vif_idx
);
436 ath6kl_warn("failed to add wmi data header:%d\n"
441 if ((vif
->nw_type
== ADHOC_NETWORK
) &&
442 ar
->ibss_ps_enable
&& test_bit(CONNECTED
, &vif
->flags
))
443 chk_adhoc_ps_mapping
= true;
445 /* get the stream mapping */
446 ret
= ath6kl_wmi_implicit_create_pstream(ar
->wmi
,
447 vif
->fw_vif_idx
, skb
,
448 0, test_bit(WMM_ENABLED
, &vif
->flags
), &ac
);
455 spin_lock_bh(&ar
->lock
);
457 if (chk_adhoc_ps_mapping
)
458 eid
= ath6kl_ibss_map_epid(skb
, dev
, &map_no
);
460 eid
= ar
->ac2ep_map
[ac
];
462 if (eid
== 0 || eid
== ENDPOINT_UNUSED
) {
463 ath6kl_err("eid %d is not mapped!\n", eid
);
464 spin_unlock_bh(&ar
->lock
);
468 /* allocate resource for this packet */
469 cookie
= ath6kl_alloc_cookie(ar
);
472 spin_unlock_bh(&ar
->lock
);
476 /* update counts while the lock is held */
477 ar
->tx_pending
[eid
]++;
478 ar
->total_tx_data_pend
++;
480 spin_unlock_bh(&ar
->lock
);
482 if (!IS_ALIGNED((unsigned long) skb
->data
- HTC_HDR_LENGTH
, 4) &&
485 * We will touch (move the buffer data to align it. Since the
486 * skb buffer is cloned and not only the header is changed, we
487 * have to copy it to allow the changes. Since we are copying
488 * the data here, we may as well align it by reserving suitable
489 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
491 struct sk_buff
*nskb
;
493 nskb
= skb_copy_expand(skb
, HTC_HDR_LENGTH
, 0, GFP_ATOMIC
);
501 cookie
->map_no
= map_no
;
502 set_htc_pkt_info(&cookie
->htc_pkt
, cookie
, skb
->data
, skb
->len
,
504 cookie
->htc_pkt
.skb
= skb
;
506 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "tx ",
507 skb
->data
, skb
->len
);
510 * HTC interface is asynchronous, if this fails, cleanup will
511 * happen in the ath6kl_tx_complete callback.
513 ath6kl_htc_tx(ar
->htc_target
, &cookie
->htc_pkt
);
520 vif
->net_stats
.tx_dropped
++;
521 vif
->net_stats
.tx_aborted_errors
++;
526 /* indicate tx activity or inactivity on a WMI stream */
527 void ath6kl_indicate_tx_activity(void *devt
, u8 traffic_class
, bool active
)
529 struct ath6kl
*ar
= devt
;
530 enum htc_endpoint_id eid
;
533 eid
= ar
->ac2ep_map
[traffic_class
];
535 if (!test_bit(WMI_ENABLED
, &ar
->flag
))
538 spin_lock_bh(&ar
->lock
);
540 ar
->ac_stream_active
[traffic_class
] = active
;
544 * Keep track of the active stream with the highest
547 if (ar
->ac_stream_pri_map
[traffic_class
] >
548 ar
->hiac_stream_active_pri
)
549 /* set the new highest active priority */
550 ar
->hiac_stream_active_pri
=
551 ar
->ac_stream_pri_map
[traffic_class
];
555 * We may have to search for the next active stream
556 * that is the highest priority.
558 if (ar
->hiac_stream_active_pri
==
559 ar
->ac_stream_pri_map
[traffic_class
]) {
561 * The highest priority stream just went inactive
562 * reset and search for the "next" highest "active"
565 ar
->hiac_stream_active_pri
= 0;
567 for (i
= 0; i
< WMM_NUM_AC
; i
++) {
568 if (ar
->ac_stream_active
[i
] &&
569 (ar
->ac_stream_pri_map
[i
] >
570 ar
->hiac_stream_active_pri
))
572 * Set the new highest active
575 ar
->hiac_stream_active_pri
=
576 ar
->ac_stream_pri_map
[i
];
581 spin_unlock_bh(&ar
->lock
);
584 /* notify HTC, this may cause credit distribution changes */
585 ath6kl_htc_activity_changed(ar
->htc_target
, eid
, active
);
588 enum htc_send_full_action
ath6kl_tx_queue_full(struct htc_target
*target
,
589 struct htc_packet
*packet
)
591 struct ath6kl
*ar
= target
->dev
->ar
;
592 struct ath6kl_vif
*vif
;
593 enum htc_endpoint_id endpoint
= packet
->endpoint
;
594 enum htc_send_full_action action
= HTC_SEND_FULL_KEEP
;
596 if (endpoint
== ar
->ctrl_ep
) {
598 * Under normal WMI if this is getting full, then something
599 * is running rampant the host should not be exhausting the
600 * WMI queue with too many commands the only exception to
601 * this is during testing using endpointping.
603 set_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
604 ath6kl_err("wmi ctrl ep is full\n");
605 ath6kl_recovery_err_notify(ar
, ATH6KL_FW_EP_FULL
);
609 if (packet
->info
.tx
.tag
== ATH6KL_CONTROL_PKT_TAG
)
613 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
614 * the highest active stream.
616 if (ar
->ac_stream_pri_map
[ar
->ep2ac_map
[endpoint
]] <
617 ar
->hiac_stream_active_pri
&&
619 target
->endpoint
[endpoint
].tx_drop_packet_threshold
)
621 * Give preference to the highest priority stream by
622 * dropping the packets which overflowed.
624 action
= HTC_SEND_FULL_DROP
;
627 spin_lock_bh(&ar
->list_lock
);
628 list_for_each_entry(vif
, &ar
->vif_list
, list
) {
629 if (vif
->nw_type
== ADHOC_NETWORK
||
630 action
!= HTC_SEND_FULL_DROP
) {
631 spin_unlock_bh(&ar
->list_lock
);
633 set_bit(NETQ_STOPPED
, &vif
->flags
);
634 netif_stop_queue(vif
->ndev
);
639 spin_unlock_bh(&ar
->list_lock
);
644 /* TODO this needs to be looked at */
645 static void ath6kl_tx_clear_node_map(struct ath6kl_vif
*vif
,
646 enum htc_endpoint_id eid
, u32 map_no
)
648 struct ath6kl
*ar
= vif
->ar
;
651 if (vif
->nw_type
!= ADHOC_NETWORK
)
654 if (!ar
->ibss_ps_enable
)
657 if (eid
== ar
->ctrl_ep
)
664 ar
->node_map
[map_no
].tx_pend
--;
666 if (ar
->node_map
[map_no
].tx_pend
)
669 if (map_no
!= (ar
->node_num
- 1))
672 for (i
= ar
->node_num
; i
> 0; i
--) {
673 if (ar
->node_map
[i
- 1].tx_pend
)
676 memset(&ar
->node_map
[i
- 1], 0,
677 sizeof(struct ath6kl_node_mapping
));
682 void ath6kl_tx_complete(struct htc_target
*target
,
683 struct list_head
*packet_queue
)
685 struct ath6kl
*ar
= target
->dev
->ar
;
686 struct sk_buff_head skb_queue
;
687 struct htc_packet
*packet
;
689 struct ath6kl_cookie
*ath6kl_cookie
;
692 enum htc_endpoint_id eid
;
693 bool wake_event
= false;
694 bool flushing
[ATH6KL_VIF_MAX
] = {false};
696 struct ath6kl_vif
*vif
;
698 skb_queue_head_init(&skb_queue
);
700 /* lock the driver as we update internal state */
701 spin_lock_bh(&ar
->lock
);
703 /* reap completed packets */
704 while (!list_empty(packet_queue
)) {
706 packet
= list_first_entry(packet_queue
, struct htc_packet
,
708 list_del(&packet
->list
);
710 if (WARN_ON_ONCE(packet
->endpoint
== ENDPOINT_UNUSED
||
711 packet
->endpoint
>= ENDPOINT_MAX
))
714 ath6kl_cookie
= (struct ath6kl_cookie
*)packet
->pkt_cntxt
;
715 if (WARN_ON_ONCE(!ath6kl_cookie
))
718 status
= packet
->status
;
719 skb
= ath6kl_cookie
->skb
;
720 eid
= packet
->endpoint
;
721 map_no
= ath6kl_cookie
->map_no
;
723 if (WARN_ON_ONCE(!skb
|| !skb
->data
)) {
725 ath6kl_free_cookie(ar
, ath6kl_cookie
);
729 __skb_queue_tail(&skb_queue
, skb
);
731 if (WARN_ON_ONCE(!status
&& (packet
->act_len
!= skb
->len
))) {
732 ath6kl_free_cookie(ar
, ath6kl_cookie
);
736 ar
->tx_pending
[eid
]--;
738 if (eid
!= ar
->ctrl_ep
)
739 ar
->total_tx_data_pend
--;
741 if (eid
== ar
->ctrl_ep
) {
742 if (test_bit(WMI_CTRL_EP_FULL
, &ar
->flag
))
743 clear_bit(WMI_CTRL_EP_FULL
, &ar
->flag
);
745 if (ar
->tx_pending
[eid
] == 0)
749 if (eid
== ar
->ctrl_ep
) {
750 if_idx
= wmi_cmd_hdr_get_if_idx(
751 (struct wmi_cmd_hdr
*) packet
->buf
);
753 if_idx
= wmi_data_hdr_get_if_idx(
754 (struct wmi_data_hdr
*) packet
->buf
);
757 vif
= ath6kl_get_vif_by_index(ar
, if_idx
);
759 ath6kl_free_cookie(ar
, ath6kl_cookie
);
764 if (status
== -ECANCELED
)
765 /* a packet was flushed */
766 flushing
[if_idx
] = true;
768 vif
->net_stats
.tx_errors
++;
770 if (status
!= -ENOSPC
&& status
!= -ECANCELED
)
771 ath6kl_warn("tx complete error: %d\n", status
);
773 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
774 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
775 __func__
, skb
, packet
->buf
, packet
->act_len
,
778 ath6kl_dbg(ATH6KL_DBG_WLAN_TX
,
779 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
780 __func__
, skb
, packet
->buf
, packet
->act_len
,
783 flushing
[if_idx
] = false;
784 vif
->net_stats
.tx_packets
++;
785 vif
->net_stats
.tx_bytes
+= skb
->len
;
788 ath6kl_tx_clear_node_map(vif
, eid
, map_no
);
790 ath6kl_free_cookie(ar
, ath6kl_cookie
);
792 if (test_bit(NETQ_STOPPED
, &vif
->flags
))
793 clear_bit(NETQ_STOPPED
, &vif
->flags
);
796 spin_unlock_bh(&ar
->lock
);
798 __skb_queue_purge(&skb_queue
);
801 spin_lock_bh(&ar
->list_lock
);
802 list_for_each_entry(vif
, &ar
->vif_list
, list
) {
803 if (test_bit(CONNECTED
, &vif
->flags
) &&
804 !flushing
[vif
->fw_vif_idx
]) {
805 spin_unlock_bh(&ar
->list_lock
);
806 netif_wake_queue(vif
->ndev
);
807 spin_lock_bh(&ar
->list_lock
);
810 spin_unlock_bh(&ar
->list_lock
);
813 wake_up(&ar
->event_wq
);
818 void ath6kl_tx_data_cleanup(struct ath6kl
*ar
)
822 /* flush all the data (non-control) streams */
823 for (i
= 0; i
< WMM_NUM_AC
; i
++)
824 ath6kl_htc_flush_txep(ar
->htc_target
, ar
->ac2ep_map
[i
],
825 ATH6KL_DATA_PKT_TAG
);
830 static void ath6kl_deliver_frames_to_nw_stack(struct net_device
*dev
,
838 if (!(skb
->dev
->flags
& IFF_UP
)) {
843 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
848 static void ath6kl_alloc_netbufs(struct sk_buff_head
*q
, u16 num
)
853 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
855 ath6kl_err("netbuf allocation failed\n");
858 skb_queue_tail(q
, skb
);
863 static struct sk_buff
*aggr_get_free_skb(struct aggr_info
*p_aggr
)
865 struct sk_buff
*skb
= NULL
;
867 if (skb_queue_len(&p_aggr
->rx_amsdu_freeq
) <
868 (AGGR_NUM_OF_FREE_NETBUFS
>> 2))
869 ath6kl_alloc_netbufs(&p_aggr
->rx_amsdu_freeq
,
870 AGGR_NUM_OF_FREE_NETBUFS
);
872 skb
= skb_dequeue(&p_aggr
->rx_amsdu_freeq
);
877 void ath6kl_rx_refill(struct htc_target
*target
, enum htc_endpoint_id endpoint
)
879 struct ath6kl
*ar
= target
->dev
->ar
;
883 struct htc_packet
*packet
;
884 struct list_head queue
;
886 n_buf_refill
= ATH6KL_MAX_RX_BUFFERS
-
887 ath6kl_htc_get_rxbuf_num(ar
->htc_target
, endpoint
);
889 if (n_buf_refill
<= 0)
892 INIT_LIST_HEAD(&queue
);
894 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
895 "%s: providing htc with %d buffers at eid=%d\n",
896 __func__
, n_buf_refill
, endpoint
);
898 for (rx_buf
= 0; rx_buf
< n_buf_refill
; rx_buf
++) {
899 skb
= ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE
);
903 packet
= (struct htc_packet
*) skb
->head
;
904 if (!IS_ALIGNED((unsigned long) skb
->data
, 4)) {
905 size_t len
= skb_headlen(skb
);
906 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
907 skb_set_tail_pointer(skb
, len
);
909 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
910 ATH6KL_BUFFER_SIZE
, endpoint
);
912 list_add_tail(&packet
->list
, &queue
);
915 if (!list_empty(&queue
))
916 ath6kl_htc_add_rxbuf_multiple(ar
->htc_target
, &queue
);
919 void ath6kl_refill_amsdu_rxbufs(struct ath6kl
*ar
, int count
)
921 struct htc_packet
*packet
;
925 skb
= ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE
);
929 packet
= (struct htc_packet
*) skb
->head
;
930 if (!IS_ALIGNED((unsigned long) skb
->data
, 4)) {
931 size_t len
= skb_headlen(skb
);
932 skb
->data
= PTR_ALIGN(skb
->data
- 4, 4);
933 skb_set_tail_pointer(skb
, len
);
935 set_htc_rxpkt_info(packet
, skb
, skb
->data
,
936 ATH6KL_AMSDU_BUFFER_SIZE
, 0);
939 spin_lock_bh(&ar
->lock
);
940 list_add_tail(&packet
->list
, &ar
->amsdu_rx_buffer_queue
);
941 spin_unlock_bh(&ar
->lock
);
947 * Callback to allocate a receive buffer for a pending packet. We use a
948 * pre-allocated list of buffers of maximum AMSDU size (4K).
950 struct htc_packet
*ath6kl_alloc_amsdu_rxbuf(struct htc_target
*target
,
951 enum htc_endpoint_id endpoint
,
954 struct ath6kl
*ar
= target
->dev
->ar
;
955 struct htc_packet
*packet
= NULL
;
956 struct list_head
*pkt_pos
;
957 int refill_cnt
= 0, depth
= 0;
959 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: eid=%d, len:%d\n",
960 __func__
, endpoint
, len
);
962 if ((len
<= ATH6KL_BUFFER_SIZE
) ||
963 (len
> ATH6KL_AMSDU_BUFFER_SIZE
))
966 spin_lock_bh(&ar
->lock
);
968 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
969 spin_unlock_bh(&ar
->lock
);
970 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
;
974 packet
= list_first_entry(&ar
->amsdu_rx_buffer_queue
,
975 struct htc_packet
, list
);
976 list_del(&packet
->list
);
977 list_for_each(pkt_pos
, &ar
->amsdu_rx_buffer_queue
)
980 refill_cnt
= ATH6KL_MAX_AMSDU_RX_BUFFERS
- depth
;
981 spin_unlock_bh(&ar
->lock
);
983 /* set actual endpoint ID */
984 packet
->endpoint
= endpoint
;
987 if (refill_cnt
>= ATH6KL_AMSDU_REFILL_THRESHOLD
)
988 ath6kl_refill_amsdu_rxbufs(ar
, refill_cnt
);
993 static void aggr_slice_amsdu(struct aggr_info
*p_aggr
,
994 struct rxtid
*rxtid
, struct sk_buff
*skb
)
996 struct sk_buff
*new_skb
;
998 u16 frame_8023_len
, payload_8023_len
, mac_hdr_len
, amsdu_len
;
1001 mac_hdr_len
= sizeof(struct ethhdr
);
1002 framep
= skb
->data
+ mac_hdr_len
;
1003 amsdu_len
= skb
->len
- mac_hdr_len
;
1005 while (amsdu_len
> mac_hdr_len
) {
1006 hdr
= (struct ethhdr
*) framep
;
1007 payload_8023_len
= ntohs(hdr
->h_proto
);
1009 if (payload_8023_len
< MIN_MSDU_SUBFRAME_PAYLOAD_LEN
||
1010 payload_8023_len
> MAX_MSDU_SUBFRAME_PAYLOAD_LEN
) {
1011 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
1016 frame_8023_len
= payload_8023_len
+ mac_hdr_len
;
1017 new_skb
= aggr_get_free_skb(p_aggr
);
1019 ath6kl_err("no buffer available\n");
1023 memcpy(new_skb
->data
, framep
, frame_8023_len
);
1024 skb_put(new_skb
, frame_8023_len
);
1025 if (ath6kl_wmi_dot3_2_dix(new_skb
)) {
1026 ath6kl_err("dot3_2_dix error\n");
1027 dev_kfree_skb(new_skb
);
1031 skb_queue_tail(&rxtid
->q
, new_skb
);
1033 /* Is this the last subframe within this aggregate ? */
1034 if ((amsdu_len
- frame_8023_len
) == 0)
1037 /* Add the length of A-MSDU subframe padding bytes -
1038 * Round to nearest word.
1040 frame_8023_len
= ALIGN(frame_8023_len
, 4);
1042 framep
+= frame_8023_len
;
1043 amsdu_len
-= frame_8023_len
;
1049 static void aggr_deque_frms(struct aggr_info_conn
*agg_conn
, u8 tid
,
1050 u16 seq_no
, u8 order
)
1052 struct sk_buff
*skb
;
1053 struct rxtid
*rxtid
;
1054 struct skb_hold_q
*node
;
1055 u16 idx
, idx_end
, seq_end
;
1056 struct rxtid_stats
*stats
;
1058 rxtid
= &agg_conn
->rx_tid
[tid
];
1059 stats
= &agg_conn
->stat
[tid
];
1061 spin_lock_bh(&rxtid
->lock
);
1062 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
1065 * idx_end is typically the last possible frame in the window,
1066 * but changes to 'the' seq_no, when BAR comes. If seq_no
1067 * is non-zero, we will go up to that and stop.
1068 * Note: last seq no in current window will occupy the same
1069 * index position as index that is just previous to start.
1070 * An imp point : if win_sz is 7, for seq_no space of 4095,
1071 * then, there would be holes when sequence wrap around occurs.
1072 * Target should judiciously choose the win_sz, based on
1073 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1074 * 2, 4, 8, 16 win_sz works fine).
1075 * We must deque from "idx" to "idx_end", including both.
1077 seq_end
= seq_no
? seq_no
: rxtid
->seq_next
;
1078 idx_end
= AGGR_WIN_IDX(seq_end
, rxtid
->hold_q_sz
);
1081 node
= &rxtid
->hold_q
[idx
];
1082 if ((order
== 1) && (!node
->skb
))
1087 aggr_slice_amsdu(agg_conn
->aggr_info
, rxtid
,
1090 skb_queue_tail(&rxtid
->q
, node
->skb
);
1095 rxtid
->seq_next
= ATH6KL_NEXT_SEQ_NO(rxtid
->seq_next
);
1096 idx
= AGGR_WIN_IDX(rxtid
->seq_next
, rxtid
->hold_q_sz
);
1097 } while (idx
!= idx_end
);
1099 spin_unlock_bh(&rxtid
->lock
);
1101 stats
->num_delivered
+= skb_queue_len(&rxtid
->q
);
1103 while ((skb
= skb_dequeue(&rxtid
->q
)))
1104 ath6kl_deliver_frames_to_nw_stack(agg_conn
->dev
, skb
);
1107 static bool aggr_process_recv_frm(struct aggr_info_conn
*agg_conn
, u8 tid
,
1109 bool is_amsdu
, struct sk_buff
*frame
)
1111 struct rxtid
*rxtid
;
1112 struct rxtid_stats
*stats
;
1113 struct sk_buff
*skb
;
1114 struct skb_hold_q
*node
;
1115 u16 idx
, st
, cur
, end
;
1116 bool is_queued
= false;
1119 rxtid
= &agg_conn
->rx_tid
[tid
];
1120 stats
= &agg_conn
->stat
[tid
];
1122 stats
->num_into_aggr
++;
1126 aggr_slice_amsdu(agg_conn
->aggr_info
, rxtid
, frame
);
1129 while ((skb
= skb_dequeue(&rxtid
->q
)))
1130 ath6kl_deliver_frames_to_nw_stack(agg_conn
->dev
,
1136 /* Check the incoming sequence no, if it's in the window */
1137 st
= rxtid
->seq_next
;
1139 end
= (st
+ rxtid
->hold_q_sz
-1) & ATH6KL_MAX_SEQ_NO
;
1141 if (((st
< end
) && (cur
< st
|| cur
> end
)) ||
1142 ((st
> end
) && (cur
> end
) && (cur
< st
))) {
1143 extended_end
= (end
+ rxtid
->hold_q_sz
- 1) &
1146 if (((end
< extended_end
) &&
1147 (cur
< end
|| cur
> extended_end
)) ||
1148 ((end
> extended_end
) && (cur
> extended_end
) &&
1150 aggr_deque_frms(agg_conn
, tid
, 0, 0);
1151 spin_lock_bh(&rxtid
->lock
);
1152 if (cur
>= rxtid
->hold_q_sz
- 1)
1153 rxtid
->seq_next
= cur
- (rxtid
->hold_q_sz
- 1);
1155 rxtid
->seq_next
= ATH6KL_MAX_SEQ_NO
-
1156 (rxtid
->hold_q_sz
- 2 - cur
);
1157 spin_unlock_bh(&rxtid
->lock
);
1160 * Dequeue only those frames that are outside the
1161 * new shifted window.
1163 if (cur
>= rxtid
->hold_q_sz
- 1)
1164 st
= cur
- (rxtid
->hold_q_sz
- 1);
1166 st
= ATH6KL_MAX_SEQ_NO
-
1167 (rxtid
->hold_q_sz
- 2 - cur
);
1169 aggr_deque_frms(agg_conn
, tid
, st
, 0);
1175 idx
= AGGR_WIN_IDX(seq_no
, rxtid
->hold_q_sz
);
1177 node
= &rxtid
->hold_q
[idx
];
1179 spin_lock_bh(&rxtid
->lock
);
1182 * Is the cur frame duplicate or something beyond our window(hold_q
1183 * -> which is 2x, already)?
1185 * 1. Duplicate is easy - drop incoming frame.
1186 * 2. Not falling in current sliding window.
1187 * 2a. is the frame_seq_no preceding current tid_seq_no?
1188 * -> drop the frame. perhaps sender did not get our ACK.
1189 * this is taken care of above.
1190 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1191 * -> Taken care of it above, by moving window forward.
1193 dev_kfree_skb(node
->skb
);
1198 node
->is_amsdu
= is_amsdu
;
1199 node
->seq_no
= seq_no
;
1206 spin_unlock_bh(&rxtid
->lock
);
1208 aggr_deque_frms(agg_conn
, tid
, 0, 1);
1210 if (agg_conn
->timer_scheduled
)
1213 spin_lock_bh(&rxtid
->lock
);
1214 for (idx
= 0 ; idx
< rxtid
->hold_q_sz
; idx
++) {
1215 if (rxtid
->hold_q
[idx
].skb
) {
1217 * There is a frame in the queue and no
1218 * timer so start a timer to ensure that
1219 * the frame doesn't remain stuck
1222 agg_conn
->timer_scheduled
= true;
1223 mod_timer(&agg_conn
->timer
,
1224 (jiffies
+ (HZ
* AGGR_RX_TIMEOUT
) / 1000));
1225 rxtid
->timer_mon
= true;
1229 spin_unlock_bh(&rxtid
->lock
);
1234 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif
*vif
,
1235 struct ath6kl_sta
*conn
)
1237 struct ath6kl
*ar
= vif
->ar
;
1238 bool is_apsdq_empty
, is_apsdq_empty_at_start
;
1239 u32 num_frames_to_deliver
, flags
;
1240 struct sk_buff
*skb
= NULL
;
1243 * If the APSD q for this STA is not empty, dequeue and
1244 * send a pkt from the head of the q. Also update the
1245 * More data bit in the WMI_DATA_HDR if there are
1246 * more pkts for this STA in the APSD q.
1247 * If there are no more pkts for this STA,
1248 * update the APSD bitmap for this STA.
1251 num_frames_to_deliver
= (conn
->apsd_info
>> ATH6KL_APSD_NUM_OF_AC
) &
1252 ATH6KL_APSD_FRAME_MASK
;
1254 * Number of frames to send in a service period is
1255 * indicated by the station
1256 * in the QOS_INFO of the association request
1257 * If it is zero, send all frames
1259 if (!num_frames_to_deliver
)
1260 num_frames_to_deliver
= ATH6KL_APSD_ALL_FRAME
;
1262 spin_lock_bh(&conn
->psq_lock
);
1263 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1264 spin_unlock_bh(&conn
->psq_lock
);
1265 is_apsdq_empty_at_start
= is_apsdq_empty
;
1267 while ((!is_apsdq_empty
) && (num_frames_to_deliver
)) {
1269 spin_lock_bh(&conn
->psq_lock
);
1270 skb
= skb_dequeue(&conn
->apsdq
);
1271 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1272 spin_unlock_bh(&conn
->psq_lock
);
1275 * Set the STA flag to Trigger delivery,
1276 * so that the frame will go out
1278 conn
->sta_flags
|= STA_PS_APSD_TRIGGER
;
1279 num_frames_to_deliver
--;
1281 /* Last frame in the service period, set EOSP or queue empty */
1282 if ((is_apsdq_empty
) || (!num_frames_to_deliver
))
1283 conn
->sta_flags
|= STA_PS_APSD_EOSP
;
1285 ath6kl_data_tx(skb
, vif
->ndev
);
1286 conn
->sta_flags
&= ~(STA_PS_APSD_TRIGGER
);
1287 conn
->sta_flags
&= ~(STA_PS_APSD_EOSP
);
1290 if (is_apsdq_empty
) {
1291 if (is_apsdq_empty_at_start
)
1292 flags
= WMI_AP_APSD_NO_DELIVERY_FRAMES
;
1296 ath6kl_wmi_set_apsd_bfrd_traf(ar
->wmi
,
1298 conn
->aid
, 0, flags
);
1304 void ath6kl_rx(struct htc_target
*target
, struct htc_packet
*packet
)
1306 struct ath6kl
*ar
= target
->dev
->ar
;
1307 struct sk_buff
*skb
= packet
->pkt_cntxt
;
1308 struct wmi_rx_meta_v2
*meta
;
1309 struct wmi_data_hdr
*dhdr
;
1311 u8 meta_type
, dot11_hdr
= 0;
1312 u8 pad_before_data_start
;
1313 int status
= packet
->status
;
1314 enum htc_endpoint_id ept
= packet
->endpoint
;
1315 bool is_amsdu
, prev_ps
, ps_state
= false;
1316 bool trig_state
= false;
1317 struct ath6kl_sta
*conn
= NULL
;
1318 struct sk_buff
*skb1
= NULL
;
1319 struct ethhdr
*datap
= NULL
;
1320 struct ath6kl_vif
*vif
;
1321 struct aggr_info_conn
*aggr_conn
;
1325 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
,
1326 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1327 __func__
, ar
, ept
, skb
, packet
->buf
,
1328 packet
->act_len
, status
);
1330 if (status
|| packet
->act_len
< HTC_HDR_LENGTH
) {
1335 skb_put(skb
, packet
->act_len
+ HTC_HDR_LENGTH
);
1336 skb_pull(skb
, HTC_HDR_LENGTH
);
1338 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES
, __func__
, "rx ",
1339 skb
->data
, skb
->len
);
1341 if (ept
== ar
->ctrl_ep
) {
1342 if (test_bit(WMI_ENABLED
, &ar
->flag
)) {
1343 ath6kl_check_wow_status(ar
);
1344 ath6kl_wmi_control_rx(ar
->wmi
, skb
);
1348 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr
*) skb
->data
);
1351 wmi_data_hdr_get_if_idx((struct wmi_data_hdr
*) skb
->data
);
1354 vif
= ath6kl_get_vif_by_index(ar
, if_idx
);
1361 * Take lock to protect buffer counts and adaptive power throughput
1364 spin_lock_bh(&vif
->if_lock
);
1366 vif
->net_stats
.rx_packets
++;
1367 vif
->net_stats
.rx_bytes
+= packet
->act_len
;
1369 spin_unlock_bh(&vif
->if_lock
);
1371 skb
->dev
= vif
->ndev
;
1373 if (!test_bit(WMI_ENABLED
, &ar
->flag
)) {
1374 if (EPPING_ALIGNMENT_PAD
> 0)
1375 skb_pull(skb
, EPPING_ALIGNMENT_PAD
);
1376 ath6kl_deliver_frames_to_nw_stack(vif
->ndev
, skb
);
1380 ath6kl_check_wow_status(ar
);
1382 min_hdr_len
= sizeof(struct ethhdr
) + sizeof(struct wmi_data_hdr
) +
1383 sizeof(struct ath6kl_llc_snap_hdr
);
1385 dhdr
= (struct wmi_data_hdr
*) skb
->data
;
1388 * In the case of AP mode we may receive NULL data frames
1389 * that do not have LLC hdr. They are 16 bytes in size.
1390 * Allow these frames in the AP mode.
1392 if (vif
->nw_type
!= AP_NETWORK
&&
1393 ((packet
->act_len
< min_hdr_len
) ||
1394 (packet
->act_len
> WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
))) {
1395 ath6kl_info("frame len is too short or too long\n");
1396 vif
->net_stats
.rx_errors
++;
1397 vif
->net_stats
.rx_length_errors
++;
1402 /* Get the Power save state of the STA */
1403 if (vif
->nw_type
== AP_NETWORK
) {
1404 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1406 ps_state
= !!((dhdr
->info
>> WMI_DATA_HDR_PS_SHIFT
) &
1407 WMI_DATA_HDR_PS_MASK
);
1409 offset
= sizeof(struct wmi_data_hdr
);
1410 trig_state
= !!(le16_to_cpu(dhdr
->info3
) & WMI_DATA_HDR_TRIG
);
1412 switch (meta_type
) {
1415 case WMI_META_VERSION_1
:
1416 offset
+= sizeof(struct wmi_rx_meta_v1
);
1418 case WMI_META_VERSION_2
:
1419 offset
+= sizeof(struct wmi_rx_meta_v2
);
1425 datap
= (struct ethhdr
*) (skb
->data
+ offset
);
1426 conn
= ath6kl_find_sta(vif
, datap
->h_source
);
1434 * If there is a change in PS state of the STA,
1435 * take appropriate steps:
1437 * 1. If Sleep-->Awake, flush the psq for the STA
1438 * Clear the PVB for the STA.
1439 * 2. If Awake-->Sleep, Starting queueing frames
1442 prev_ps
= !!(conn
->sta_flags
& STA_PS_SLEEP
);
1445 conn
->sta_flags
|= STA_PS_SLEEP
;
1447 conn
->sta_flags
&= ~STA_PS_SLEEP
;
1449 /* Accept trigger only when the station is in sleep */
1450 if ((conn
->sta_flags
& STA_PS_SLEEP
) && trig_state
)
1451 ath6kl_uapsd_trigger_frame_rx(vif
, conn
);
1453 if (prev_ps
^ !!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1454 if (!(conn
->sta_flags
& STA_PS_SLEEP
)) {
1455 struct sk_buff
*skbuff
= NULL
;
1456 bool is_apsdq_empty
;
1457 struct ath6kl_mgmt_buff
*mgmt
;
1460 spin_lock_bh(&conn
->psq_lock
);
1461 while (conn
->mgmt_psq_len
> 0) {
1462 mgmt
= list_first_entry(
1464 struct ath6kl_mgmt_buff
,
1466 list_del(&mgmt
->list
);
1467 conn
->mgmt_psq_len
--;
1468 spin_unlock_bh(&conn
->psq_lock
);
1469 idx
= vif
->fw_vif_idx
;
1471 ath6kl_wmi_send_mgmt_cmd(ar
->wmi
,
1481 spin_lock_bh(&conn
->psq_lock
);
1483 conn
->mgmt_psq_len
= 0;
1484 while ((skbuff
= skb_dequeue(&conn
->psq
))) {
1485 spin_unlock_bh(&conn
->psq_lock
);
1486 ath6kl_data_tx(skbuff
, vif
->ndev
);
1487 spin_lock_bh(&conn
->psq_lock
);
1490 is_apsdq_empty
= skb_queue_empty(&conn
->apsdq
);
1491 while ((skbuff
= skb_dequeue(&conn
->apsdq
))) {
1492 spin_unlock_bh(&conn
->psq_lock
);
1493 ath6kl_data_tx(skbuff
, vif
->ndev
);
1494 spin_lock_bh(&conn
->psq_lock
);
1496 spin_unlock_bh(&conn
->psq_lock
);
1498 if (!is_apsdq_empty
)
1499 ath6kl_wmi_set_apsd_bfrd_traf(
1504 /* Clear the PVB for this STA */
1505 ath6kl_wmi_set_pvb_cmd(ar
->wmi
, vif
->fw_vif_idx
,
1510 /* drop NULL data frames here */
1511 if ((packet
->act_len
< min_hdr_len
) ||
1513 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH
)) {
1519 is_amsdu
= wmi_data_hdr_is_amsdu(dhdr
) ? true : false;
1520 tid
= wmi_data_hdr_get_up(dhdr
);
1521 seq_no
= wmi_data_hdr_get_seqno(dhdr
);
1522 meta_type
= wmi_data_hdr_get_meta(dhdr
);
1523 dot11_hdr
= wmi_data_hdr_get_dot11(dhdr
);
1524 pad_before_data_start
=
1525 (le16_to_cpu(dhdr
->info3
) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT
)
1526 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK
;
1528 skb_pull(skb
, sizeof(struct wmi_data_hdr
));
1530 switch (meta_type
) {
1531 case WMI_META_VERSION_1
:
1532 skb_pull(skb
, sizeof(struct wmi_rx_meta_v1
));
1534 case WMI_META_VERSION_2
:
1535 meta
= (struct wmi_rx_meta_v2
*) skb
->data
;
1536 if (meta
->csum_flags
& 0x1) {
1537 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1538 skb
->csum
= (__force __wsum
) meta
->csum
;
1540 skb_pull(skb
, sizeof(struct wmi_rx_meta_v2
));
1546 skb_pull(skb
, pad_before_data_start
);
1549 status
= ath6kl_wmi_dot11_hdr_remove(ar
->wmi
, skb
);
1551 status
= ath6kl_wmi_dot3_2_dix(skb
);
1555 * Drop frames that could not be processed (lack of
1562 if (!(vif
->ndev
->flags
& IFF_UP
)) {
1567 if (vif
->nw_type
== AP_NETWORK
) {
1568 datap
= (struct ethhdr
*) skb
->data
;
1569 if (is_multicast_ether_addr(datap
->h_dest
))
1571 * Bcast/Mcast frames should be sent to the
1572 * OS stack as well as on the air.
1574 skb1
= skb_copy(skb
, GFP_ATOMIC
);
1577 * Search for a connected STA with dstMac
1578 * as the Mac address. If found send the
1579 * frame to it on the air else send the
1580 * frame up the stack.
1582 conn
= ath6kl_find_sta(vif
, datap
->h_dest
);
1584 if (conn
&& ar
->intra_bss
) {
1587 } else if (conn
&& !ar
->intra_bss
) {
1593 ath6kl_data_tx(skb1
, vif
->ndev
);
1596 /* nothing to deliver up the stack */
1601 datap
= (struct ethhdr
*) skb
->data
;
1603 if (is_unicast_ether_addr(datap
->h_dest
)) {
1604 if (vif
->nw_type
== AP_NETWORK
) {
1605 conn
= ath6kl_find_sta(vif
, datap
->h_source
);
1608 aggr_conn
= conn
->aggr_conn
;
1610 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1612 if (aggr_process_recv_frm(aggr_conn
, tid
, seq_no
,
1614 /* aggregation code will handle the skb */
1617 } else if (!is_broadcast_ether_addr(datap
->h_dest
))
1618 vif
->net_stats
.multicast
++;
1620 ath6kl_deliver_frames_to_nw_stack(vif
->ndev
, skb
);
1623 static void aggr_timeout(unsigned long arg
)
1626 struct aggr_info_conn
*aggr_conn
= (struct aggr_info_conn
*) arg
;
1627 struct rxtid
*rxtid
;
1628 struct rxtid_stats
*stats
;
1630 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1631 rxtid
= &aggr_conn
->rx_tid
[i
];
1632 stats
= &aggr_conn
->stat
[i
];
1634 if (!rxtid
->aggr
|| !rxtid
->timer_mon
)
1637 stats
->num_timeouts
++;
1638 ath6kl_dbg(ATH6KL_DBG_AGGR
,
1639 "aggr timeout (st %d end %d)\n",
1641 ((rxtid
->seq_next
+ rxtid
->hold_q_sz
-1) &
1642 ATH6KL_MAX_SEQ_NO
));
1643 aggr_deque_frms(aggr_conn
, i
, 0, 0);
1646 aggr_conn
->timer_scheduled
= false;
1648 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1649 rxtid
= &aggr_conn
->rx_tid
[i
];
1651 if (rxtid
->aggr
&& rxtid
->hold_q
) {
1652 spin_lock_bh(&rxtid
->lock
);
1653 for (j
= 0; j
< rxtid
->hold_q_sz
; j
++) {
1654 if (rxtid
->hold_q
[j
].skb
) {
1655 aggr_conn
->timer_scheduled
= true;
1656 rxtid
->timer_mon
= true;
1660 spin_unlock_bh(&rxtid
->lock
);
1662 if (j
>= rxtid
->hold_q_sz
)
1663 rxtid
->timer_mon
= false;
1667 if (aggr_conn
->timer_scheduled
)
1668 mod_timer(&aggr_conn
->timer
,
1669 jiffies
+ msecs_to_jiffies(AGGR_RX_TIMEOUT
));
1672 static void aggr_delete_tid_state(struct aggr_info_conn
*aggr_conn
, u8 tid
)
1674 struct rxtid
*rxtid
;
1675 struct rxtid_stats
*stats
;
1677 if (!aggr_conn
|| tid
>= NUM_OF_TIDS
)
1680 rxtid
= &aggr_conn
->rx_tid
[tid
];
1681 stats
= &aggr_conn
->stat
[tid
];
1684 aggr_deque_frms(aggr_conn
, tid
, 0, 0);
1686 rxtid
->aggr
= false;
1687 rxtid
->timer_mon
= false;
1689 rxtid
->seq_next
= 0;
1690 rxtid
->hold_q_sz
= 0;
1692 kfree(rxtid
->hold_q
);
1693 rxtid
->hold_q
= NULL
;
1695 memset(stats
, 0, sizeof(struct rxtid_stats
));
1698 void aggr_recv_addba_req_evt(struct ath6kl_vif
*vif
, u8 tid_mux
, u16 seq_no
,
1701 struct ath6kl_sta
*sta
;
1702 struct aggr_info_conn
*aggr_conn
= NULL
;
1703 struct rxtid
*rxtid
;
1704 struct rxtid_stats
*stats
;
1708 if (vif
->nw_type
== AP_NETWORK
) {
1709 aid
= ath6kl_get_aid(tid_mux
);
1710 sta
= ath6kl_find_sta_by_aid(vif
->ar
, aid
);
1712 aggr_conn
= sta
->aggr_conn
;
1714 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1719 tid
= ath6kl_get_tid(tid_mux
);
1720 if (tid
>= NUM_OF_TIDS
)
1723 rxtid
= &aggr_conn
->rx_tid
[tid
];
1724 stats
= &aggr_conn
->stat
[tid
];
1726 if (win_sz
< AGGR_WIN_SZ_MIN
|| win_sz
> AGGR_WIN_SZ_MAX
)
1727 ath6kl_dbg(ATH6KL_DBG_WLAN_RX
, "%s: win_sz %d, tid %d\n",
1728 __func__
, win_sz
, tid
);
1731 aggr_delete_tid_state(aggr_conn
, tid
);
1733 rxtid
->seq_next
= seq_no
;
1734 hold_q_size
= TID_WINDOW_SZ(win_sz
) * sizeof(struct skb_hold_q
);
1735 rxtid
->hold_q
= kzalloc(hold_q_size
, GFP_KERNEL
);
1739 rxtid
->win_sz
= win_sz
;
1740 rxtid
->hold_q_sz
= TID_WINDOW_SZ(win_sz
);
1741 if (!skb_queue_empty(&rxtid
->q
))
1747 void aggr_conn_init(struct ath6kl_vif
*vif
, struct aggr_info
*aggr_info
,
1748 struct aggr_info_conn
*aggr_conn
)
1750 struct rxtid
*rxtid
;
1753 aggr_conn
->aggr_sz
= AGGR_SZ_DEFAULT
;
1754 aggr_conn
->dev
= vif
->ndev
;
1755 init_timer(&aggr_conn
->timer
);
1756 aggr_conn
->timer
.function
= aggr_timeout
;
1757 aggr_conn
->timer
.data
= (unsigned long) aggr_conn
;
1758 aggr_conn
->aggr_info
= aggr_info
;
1760 aggr_conn
->timer_scheduled
= false;
1762 for (i
= 0; i
< NUM_OF_TIDS
; i
++) {
1763 rxtid
= &aggr_conn
->rx_tid
[i
];
1764 rxtid
->aggr
= false;
1765 rxtid
->timer_mon
= false;
1766 skb_queue_head_init(&rxtid
->q
);
1767 spin_lock_init(&rxtid
->lock
);
1772 struct aggr_info
*aggr_init(struct ath6kl_vif
*vif
)
1774 struct aggr_info
*p_aggr
= NULL
;
1776 p_aggr
= kzalloc(sizeof(struct aggr_info
), GFP_KERNEL
);
1778 ath6kl_err("failed to alloc memory for aggr_node\n");
1782 p_aggr
->aggr_conn
= kzalloc(sizeof(struct aggr_info_conn
), GFP_KERNEL
);
1783 if (!p_aggr
->aggr_conn
) {
1784 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1789 aggr_conn_init(vif
, p_aggr
, p_aggr
->aggr_conn
);
1791 skb_queue_head_init(&p_aggr
->rx_amsdu_freeq
);
1792 ath6kl_alloc_netbufs(&p_aggr
->rx_amsdu_freeq
, AGGR_NUM_OF_FREE_NETBUFS
);
1797 void aggr_recv_delba_req_evt(struct ath6kl_vif
*vif
, u8 tid_mux
)
1799 struct ath6kl_sta
*sta
;
1800 struct rxtid
*rxtid
;
1801 struct aggr_info_conn
*aggr_conn
= NULL
;
1804 if (vif
->nw_type
== AP_NETWORK
) {
1805 aid
= ath6kl_get_aid(tid_mux
);
1806 sta
= ath6kl_find_sta_by_aid(vif
->ar
, aid
);
1808 aggr_conn
= sta
->aggr_conn
;
1810 aggr_conn
= vif
->aggr_cntxt
->aggr_conn
;
1815 tid
= ath6kl_get_tid(tid_mux
);
1816 if (tid
>= NUM_OF_TIDS
)
1819 rxtid
= &aggr_conn
->rx_tid
[tid
];
1822 aggr_delete_tid_state(aggr_conn
, tid
);
1825 void aggr_reset_state(struct aggr_info_conn
*aggr_conn
)
1832 if (aggr_conn
->timer_scheduled
) {
1833 del_timer(&aggr_conn
->timer
);
1834 aggr_conn
->timer_scheduled
= false;
1837 for (tid
= 0; tid
< NUM_OF_TIDS
; tid
++)
1838 aggr_delete_tid_state(aggr_conn
, tid
);
1841 /* clean up our amsdu buffer list */
1842 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl
*ar
)
1844 struct htc_packet
*packet
, *tmp_pkt
;
1846 spin_lock_bh(&ar
->lock
);
1847 if (list_empty(&ar
->amsdu_rx_buffer_queue
)) {
1848 spin_unlock_bh(&ar
->lock
);
1852 list_for_each_entry_safe(packet
, tmp_pkt
, &ar
->amsdu_rx_buffer_queue
,
1854 list_del(&packet
->list
);
1855 spin_unlock_bh(&ar
->lock
);
1856 dev_kfree_skb(packet
->pkt_cntxt
);
1857 spin_lock_bh(&ar
->lock
);
1860 spin_unlock_bh(&ar
->lock
);
1863 void aggr_module_destroy(struct aggr_info
*aggr_info
)
1868 aggr_reset_state(aggr_info
->aggr_conn
);
1869 skb_queue_purge(&aggr_info
->rx_amsdu_freeq
);
1870 kfree(aggr_info
->aggr_conn
);