1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 #include <linux/ieee80211.h>
39 #include "iwl-agn-hw.h"
41 #include "iwl-trans.h"
43 static void iwlagn_tx_cmd_protection(struct iwl_priv
*priv
,
44 struct ieee80211_tx_info
*info
,
45 __le16 fc
, __le32
*tx_flags
)
47 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_RTS_CTS
||
48 info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_CTS_PROTECT
||
49 info
->flags
& IEEE80211_TX_CTL_AMPDU
)
50 *tx_flags
|= TX_CMD_FLG_PROT_REQUIRE_MSK
;
54 * handle build REPLY_TX command notification.
56 static void iwlagn_tx_cmd_build_basic(struct iwl_priv
*priv
,
58 struct iwl_tx_cmd
*tx_cmd
,
59 struct ieee80211_tx_info
*info
,
60 struct ieee80211_hdr
*hdr
, u8 sta_id
)
62 __le16 fc
= hdr
->frame_control
;
63 __le32 tx_flags
= tx_cmd
->tx_flags
;
65 tx_cmd
->stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
67 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
68 tx_flags
|= TX_CMD_FLG_ACK_MSK
;
70 tx_flags
&= ~TX_CMD_FLG_ACK_MSK
;
72 if (ieee80211_is_probe_resp(fc
))
73 tx_flags
|= TX_CMD_FLG_TSF_MSK
;
74 else if (ieee80211_is_back_req(fc
))
75 tx_flags
|= TX_CMD_FLG_ACK_MSK
| TX_CMD_FLG_IMM_BA_RSP_MASK
;
76 else if (info
->band
== IEEE80211_BAND_2GHZ
&&
77 cfg(priv
)->bt_params
&&
78 cfg(priv
)->bt_params
->advanced_bt_coexist
&&
79 (ieee80211_is_auth(fc
) || ieee80211_is_assoc_req(fc
) ||
80 ieee80211_is_reassoc_req(fc
) ||
81 skb
->protocol
== cpu_to_be16(ETH_P_PAE
)))
82 tx_flags
|= TX_CMD_FLG_IGNORE_BT
;
85 tx_cmd
->sta_id
= sta_id
;
86 if (ieee80211_has_morefrags(fc
))
87 tx_flags
|= TX_CMD_FLG_MORE_FRAG_MSK
;
89 if (ieee80211_is_data_qos(fc
)) {
90 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
91 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
92 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
94 tx_cmd
->tid_tspec
= IWL_TID_NON_QOS
;
95 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
96 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
98 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
101 iwlagn_tx_cmd_protection(priv
, info
, fc
, &tx_flags
);
103 tx_flags
&= ~(TX_CMD_FLG_ANT_SEL_MSK
);
104 if (ieee80211_is_mgmt(fc
)) {
105 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
106 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(3);
108 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(2);
110 tx_cmd
->timeout
.pm_frame_timeout
= 0;
113 tx_cmd
->driver_txop
= 0;
114 tx_cmd
->tx_flags
= tx_flags
;
115 tx_cmd
->next_frame_len
= 0;
118 static void iwlagn_tx_cmd_build_rate(struct iwl_priv
*priv
,
119 struct iwl_tx_cmd
*tx_cmd
,
120 struct ieee80211_tx_info
*info
,
129 if (priv
->shrd
->wowlan
) {
130 rts_retry_limit
= IWLAGN_LOW_RETRY_LIMIT
;
131 data_retry_limit
= IWLAGN_LOW_RETRY_LIMIT
;
133 /* Set retry limit on RTS packets */
134 rts_retry_limit
= IWLAGN_RTS_DFAULT_RETRY_LIMIT
;
136 /* Set retry limit on DATA packets and Probe Responses*/
137 if (ieee80211_is_probe_resp(fc
)) {
138 data_retry_limit
= IWLAGN_MGMT_DFAULT_RETRY_LIMIT
;
140 min(data_retry_limit
, rts_retry_limit
);
141 } else if (ieee80211_is_back_req(fc
))
142 data_retry_limit
= IWLAGN_BAR_DFAULT_RETRY_LIMIT
;
144 data_retry_limit
= IWLAGN_DEFAULT_TX_RETRY
;
147 tx_cmd
->data_retry_limit
= data_retry_limit
;
148 tx_cmd
->rts_retry_limit
= rts_retry_limit
;
150 /* DATA packets will use the uCode station table for rate/antenna
152 if (ieee80211_is_data(fc
)) {
153 tx_cmd
->initial_rate_index
= 0;
154 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
155 #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
156 if (priv
->tm_fixed_rate
) {
158 * rate overwrite by testmode
159 * we not only send lq command to change rate
160 * we also re-enforce per data pkt base.
162 tx_cmd
->tx_flags
&= ~TX_CMD_FLG_STA_RATE_MSK
;
163 memcpy(&tx_cmd
->rate_n_flags
, &priv
->tm_fixed_rate
,
164 sizeof(tx_cmd
->rate_n_flags
));
168 } else if (ieee80211_is_back_req(fc
))
169 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
172 * If the current TX rate stored in mac80211 has the MCS bit set, it's
173 * not really a TX rate. Thus, we use the lowest supported rate for
174 * this band. Also use the lowest supported rate if the stored rate
177 rate_idx
= info
->control
.rates
[0].idx
;
178 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
||
179 (rate_idx
< 0) || (rate_idx
> IWL_RATE_COUNT_LEGACY
))
180 rate_idx
= rate_lowest_index(&priv
->bands
[info
->band
],
182 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
183 if (info
->band
== IEEE80211_BAND_5GHZ
)
184 rate_idx
+= IWL_FIRST_OFDM_RATE
;
185 /* Get PLCP rate for tx_cmd->rate_n_flags */
186 rate_plcp
= iwl_rates
[rate_idx
].plcp
;
187 /* Zero out flags for this packet */
190 /* Set CCK flag as needed */
191 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
192 rate_flags
|= RATE_MCS_CCK_MSK
;
194 /* Set up antennas */
195 if (cfg(priv
)->bt_params
&&
196 cfg(priv
)->bt_params
->advanced_bt_coexist
&&
197 priv
->bt_full_concurrent
) {
198 /* operated as 1x1 in full concurrency mode */
199 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
200 first_antenna(hw_params(priv
).valid_tx_ant
));
202 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
203 hw_params(priv
).valid_tx_ant
);
204 rate_flags
|= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
206 /* Set the rate in the TX cmd */
207 tx_cmd
->rate_n_flags
= iwl_hw_set_rate_n_flags(rate_plcp
, rate_flags
);
210 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv
*priv
,
211 struct ieee80211_tx_info
*info
,
212 struct iwl_tx_cmd
*tx_cmd
,
213 struct sk_buff
*skb_frag
,
216 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
218 switch (keyconf
->cipher
) {
219 case WLAN_CIPHER_SUITE_CCMP
:
220 tx_cmd
->sec_ctl
= TX_CMD_SEC_CCM
;
221 memcpy(tx_cmd
->key
, keyconf
->key
, keyconf
->keylen
);
222 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
223 tx_cmd
->tx_flags
|= TX_CMD_FLG_AGG_CCMP_MSK
;
224 IWL_DEBUG_TX(priv
, "tx_cmd with AES hwcrypto\n");
227 case WLAN_CIPHER_SUITE_TKIP
:
228 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
229 ieee80211_get_tkip_p2k(keyconf
, skb_frag
, tx_cmd
->key
);
230 IWL_DEBUG_TX(priv
, "tx_cmd with tkip hwcrypto\n");
233 case WLAN_CIPHER_SUITE_WEP104
:
234 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
236 case WLAN_CIPHER_SUITE_WEP40
:
237 tx_cmd
->sec_ctl
|= (TX_CMD_SEC_WEP
|
238 (keyconf
->keyidx
& TX_CMD_SEC_MSK
) << TX_CMD_SEC_SHIFT
);
240 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
242 IWL_DEBUG_TX(priv
, "Configuring packet for WEP encryption "
243 "with key %d\n", keyconf
->keyidx
);
247 IWL_ERR(priv
, "Unknown encode cipher %x\n", keyconf
->cipher
);
253 * start REPLY_TX command process
255 int iwlagn_tx_skb(struct iwl_priv
*priv
, struct sk_buff
*skb
)
257 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
258 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
259 struct iwl_station_priv
*sta_priv
= NULL
;
260 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
261 struct iwl_device_cmd
*dev_cmd
= NULL
;
262 struct iwl_tx_cmd
*tx_cmd
;
266 u16 len
, seq_number
= 0;
267 u8 sta_id
, tid
= IWL_MAX_TID_COUNT
;
271 if (info
->control
.vif
)
272 ctx
= iwl_rxon_ctx_from_vif(info
->control
.vif
);
274 spin_lock_irqsave(&priv
->shrd
->lock
, flags
);
275 if (iwl_is_rfkill(priv
->shrd
)) {
276 IWL_DEBUG_DROP(priv
, "Dropping - RF KILL\n");
277 goto drop_unlock_priv
;
280 fc
= hdr
->frame_control
;
282 #ifdef CONFIG_IWLWIFI_DEBUG
283 if (ieee80211_is_auth(fc
))
284 IWL_DEBUG_TX(priv
, "Sending AUTH frame\n");
285 else if (ieee80211_is_assoc_req(fc
))
286 IWL_DEBUG_TX(priv
, "Sending ASSOC frame\n");
287 else if (ieee80211_is_reassoc_req(fc
))
288 IWL_DEBUG_TX(priv
, "Sending REASSOC frame\n");
291 if (unlikely(ieee80211_is_probe_resp(fc
))) {
292 struct iwl_wipan_noa_data
*noa_data
=
293 rcu_dereference(priv
->noa_data
);
296 pskb_expand_head(skb
, 0, noa_data
->length
,
298 memcpy(skb_put(skb
, noa_data
->length
),
299 noa_data
->data
, noa_data
->length
);
300 hdr
= (struct ieee80211_hdr
*)skb
->data
;
304 hdr_len
= ieee80211_hdrlen(fc
);
306 /* For management frames use broadcast id to do not break aggregation */
307 if (!ieee80211_is_data(fc
))
308 sta_id
= ctx
->bcast_sta_id
;
310 /* Find index into station table for destination station */
311 sta_id
= iwl_sta_id_or_broadcast(priv
, ctx
, info
->control
.sta
);
312 if (sta_id
== IWL_INVALID_STATION
) {
313 IWL_DEBUG_DROP(priv
, "Dropping - INVALID STATION: %pM\n",
315 goto drop_unlock_priv
;
319 IWL_DEBUG_TX(priv
, "station Id %d\n", sta_id
);
321 if (info
->control
.sta
)
322 sta_priv
= (void *)info
->control
.sta
->drv_priv
;
324 if (sta_priv
&& sta_priv
->asleep
&&
325 (info
->flags
& IEEE80211_TX_CTL_POLL_RESPONSE
)) {
327 * This sends an asynchronous command to the device,
328 * but we can rely on it being processed before the
329 * next frame is processed -- and the next frame to
330 * this station is the one that will consume this
332 * For now set the counter to just 1 since we do not
335 iwl_sta_modify_sleep_tx_count(priv
, sta_id
, 1);
338 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
341 /* irqs already disabled/saved above when locking priv->shrd->lock */
342 spin_lock(&priv
->shrd
->sta_lock
);
344 dev_cmd
= kmem_cache_alloc(priv
->tx_cmd_pool
, GFP_ATOMIC
);
346 if (unlikely(!dev_cmd
))
347 goto drop_unlock_sta
;
349 memset(dev_cmd
, 0, sizeof(*dev_cmd
));
350 tx_cmd
= (struct iwl_tx_cmd
*) dev_cmd
->payload
;
352 /* Total # bytes to be transmitted */
354 tx_cmd
->len
= cpu_to_le16(len
);
356 if (info
->control
.hw_key
)
357 iwlagn_tx_cmd_build_hwcrypto(priv
, info
, tx_cmd
, skb
, sta_id
);
359 /* TODO need this for burst mode later on */
360 iwlagn_tx_cmd_build_basic(priv
, skb
, tx_cmd
, info
, hdr
, sta_id
);
361 iwl_dbg_log_tx_data_frame(priv
, len
, hdr
);
363 iwlagn_tx_cmd_build_rate(priv
, tx_cmd
, info
, fc
);
365 iwl_update_stats(priv
, true, fc
, len
);
367 memset(&info
->status
, 0, sizeof(info
->status
));
369 info
->driver_data
[0] = ctx
;
370 info
->driver_data
[1] = dev_cmd
;
372 if (ieee80211_is_data_qos(fc
) && !ieee80211_is_qos_nullfunc(fc
)) {
374 struct iwl_tid_data
*tid_data
;
375 qc
= ieee80211_get_qos_ctl(hdr
);
376 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
377 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
378 goto drop_unlock_sta
;
379 tid_data
= &priv
->tid_data
[sta_id
][tid
];
381 /* aggregation is on for this <sta,tid> */
382 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
383 tid_data
->agg
.state
!= IWL_AGG_ON
) {
384 IWL_ERR(priv
, "TX_CTL_AMPDU while not in AGG:"
385 " Tx flags = 0x%08x, agg.state = %d",
386 info
->flags
, tid_data
->agg
.state
);
387 IWL_ERR(priv
, "sta_id = %d, tid = %d seq_num = %d",
388 sta_id
, tid
, SEQ_TO_SN(tid_data
->seq_number
));
389 goto drop_unlock_sta
;
392 /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
393 * only. Check this here.
395 if (WARN_ONCE(tid_data
->agg
.state
!= IWL_AGG_ON
&&
396 tid_data
->agg
.state
!= IWL_AGG_OFF
,
397 "Tx while agg.state = %d", tid_data
->agg
.state
))
398 goto drop_unlock_sta
;
400 seq_number
= tid_data
->seq_number
;
401 seq_number
&= IEEE80211_SCTL_SEQ
;
402 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
403 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
407 /* Copy MAC header from skb into command buffer */
408 memcpy(tx_cmd
->hdr
, hdr
, hdr_len
);
410 if (iwl_trans_tx(trans(priv
), skb
, dev_cmd
, ctx
->ctxid
, sta_id
, tid
))
411 goto drop_unlock_sta
;
413 if (ieee80211_is_data_qos(fc
) && !ieee80211_is_qos_nullfunc(fc
) &&
414 !ieee80211_has_morefrags(fc
))
415 priv
->tid_data
[sta_id
][tid
].seq_number
= seq_number
;
417 spin_unlock(&priv
->shrd
->sta_lock
);
418 spin_unlock_irqrestore(&priv
->shrd
->lock
, flags
);
421 * Avoid atomic ops if it isn't an associated client.
422 * Also, if this is a packet for aggregation, don't
423 * increase the counter because the ucode will stop
424 * aggregation queues when their respective station
427 if (sta_priv
&& sta_priv
->client
&& !is_agg
)
428 atomic_inc(&sta_priv
->pending_frames
);
434 kmem_cache_free(priv
->tx_cmd_pool
, dev_cmd
);
435 spin_unlock(&priv
->shrd
->sta_lock
);
437 spin_unlock_irqrestore(&priv
->shrd
->lock
, flags
);
441 int iwlagn_tx_agg_stop(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
442 struct ieee80211_sta
*sta
, u16 tid
)
444 struct iwl_tid_data
*tid_data
;
448 sta_id
= iwl_sta_id(sta
);
450 if (sta_id
== IWL_INVALID_STATION
) {
451 IWL_ERR(priv
, "Invalid station for AGG tid %d\n", tid
);
455 spin_lock_irqsave(&priv
->shrd
->sta_lock
, flags
);
457 tid_data
= &priv
->tid_data
[sta_id
][tid
];
459 switch (priv
->tid_data
[sta_id
][tid
].agg
.state
) {
460 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
462 * This can happen if the peer stops aggregation
463 * again before we've had a chance to drain the
464 * queue we selected previously, i.e. before the
465 * session was really started completely.
467 IWL_DEBUG_HT(priv
, "AGG stop before setup done\n");
472 IWL_WARN(priv
, "Stopping AGG while state not ON "
473 "or starting for %d on %d (%d)\n", sta_id
, tid
,
474 priv
->tid_data
[sta_id
][tid
].agg
.state
);
475 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
479 tid_data
->agg
.ssn
= SEQ_TO_SN(tid_data
->seq_number
);
481 /* There are still packets for this RA / TID in the HW */
482 if (tid_data
->agg
.ssn
!= tid_data
->next_reclaimed
) {
483 IWL_DEBUG_TX_QUEUES(priv
, "Can't proceed: ssn %d, "
486 tid_data
->next_reclaimed
);
487 priv
->tid_data
[sta_id
][tid
].agg
.state
=
488 IWL_EMPTYING_HW_QUEUE_DELBA
;
489 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
493 IWL_DEBUG_TX_QUEUES(priv
, "Can proceed: ssn = next_recl = %d",
496 priv
->tid_data
[sta_id
][tid
].agg
.state
= IWL_AGG_OFF
;
498 /* do not restore/save irqs */
499 spin_unlock(&priv
->shrd
->sta_lock
);
500 spin_lock(&priv
->shrd
->lock
);
502 iwl_trans_tx_agg_disable(trans(priv
), sta_id
, tid
);
504 spin_unlock_irqrestore(&priv
->shrd
->lock
, flags
);
506 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
511 int iwlagn_tx_agg_start(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
512 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
514 struct iwl_tid_data
*tid_data
;
519 IWL_DEBUG_HT(priv
, "TX AGG request on ra = %pM tid = %d\n",
522 sta_id
= iwl_sta_id(sta
);
523 if (sta_id
== IWL_INVALID_STATION
) {
524 IWL_ERR(priv
, "Start AGG on invalid station\n");
527 if (unlikely(tid
>= IWL_MAX_TID_COUNT
))
530 if (priv
->tid_data
[sta_id
][tid
].agg
.state
!= IWL_AGG_OFF
) {
531 IWL_ERR(priv
, "Start AGG when state is not IWL_AGG_OFF !\n");
535 ret
= iwl_sta_tx_modify_enable_tid(priv
, sta_id
, tid
);
539 spin_lock_irqsave(&priv
->shrd
->sta_lock
, flags
);
541 tid_data
= &priv
->tid_data
[sta_id
][tid
];
542 tid_data
->agg
.ssn
= SEQ_TO_SN(tid_data
->seq_number
);
544 *ssn
= tid_data
->agg
.ssn
;
546 ret
= iwl_trans_tx_agg_alloc(trans(priv
), sta_id
, tid
);
548 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
552 if (*ssn
== tid_data
->next_reclaimed
) {
553 IWL_DEBUG_TX_QUEUES(priv
, "Can proceed: ssn = next_recl = %d",
555 tid_data
->agg
.state
= IWL_AGG_ON
;
556 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
558 IWL_DEBUG_TX_QUEUES(priv
, "Can't proceed: ssn %d, "
559 "next_reclaimed = %d",
561 tid_data
->next_reclaimed
);
562 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
565 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
570 int iwlagn_tx_agg_oper(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
571 struct ieee80211_sta
*sta
, u16 tid
, u8 buf_size
)
573 struct iwl_station_priv
*sta_priv
= (void *) sta
->drv_priv
;
574 struct iwl_rxon_context
*ctx
= iwl_rxon_ctx_from_vif(vif
);
578 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
580 spin_lock_irqsave(&priv
->shrd
->sta_lock
, flags
);
581 ssn
= priv
->tid_data
[sta_priv
->sta_id
][tid
].agg
.ssn
;
582 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
584 iwl_trans_tx_agg_setup(trans(priv
), ctx
->ctxid
, sta_priv
->sta_id
, tid
,
588 * If the limit is 0, then it wasn't initialised yet,
589 * use the default. We can do that since we take the
590 * minimum below, and we don't want to go above our
591 * default due to hardware restrictions.
593 if (sta_priv
->max_agg_bufsize
== 0)
594 sta_priv
->max_agg_bufsize
=
595 LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
598 * Even though in theory the peer could have different
599 * aggregation reorder buffer sizes for different sessions,
600 * our ucode doesn't allow for that and has a global limit
601 * for each station. Therefore, use the minimum of all the
602 * aggregation sessions and our default value.
604 sta_priv
->max_agg_bufsize
=
605 min(sta_priv
->max_agg_bufsize
, buf_size
);
607 if (cfg(priv
)->ht_params
&&
608 cfg(priv
)->ht_params
->use_rts_for_aggregation
) {
610 * switch to RTS/CTS if it is the prefer protection
611 * method for HT traffic
614 sta_priv
->lq_sta
.lq
.general_params
.flags
|=
615 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK
;
617 priv
->agg_tids_count
++;
618 IWL_DEBUG_HT(priv
, "priv->agg_tids_count = %u\n",
619 priv
->agg_tids_count
);
621 sta_priv
->lq_sta
.lq
.agg_params
.agg_frame_cnt_limit
=
622 sta_priv
->max_agg_bufsize
;
624 IWL_DEBUG_HT(priv
, "Tx aggregation enabled on ra = %pM tid = %d\n",
627 return iwl_send_lq_cmd(priv
, ctx
,
628 &sta_priv
->lq_sta
.lq
, CMD_ASYNC
, false);
631 static void iwlagn_check_ratid_empty(struct iwl_priv
*priv
, int sta_id
, u8 tid
)
633 struct iwl_tid_data
*tid_data
= &priv
->tid_data
[sta_id
][tid
];
634 enum iwl_rxon_context_id ctx
;
635 struct ieee80211_vif
*vif
;
638 lockdep_assert_held(&priv
->shrd
->sta_lock
);
640 addr
= priv
->stations
[sta_id
].sta
.sta
.addr
;
641 ctx
= priv
->stations
[sta_id
].ctxid
;
642 vif
= priv
->contexts
[ctx
].vif
;
644 switch (priv
->tid_data
[sta_id
][tid
].agg
.state
) {
645 case IWL_EMPTYING_HW_QUEUE_DELBA
:
646 /* There are no packets for this RA / TID in the HW any more */
647 if (tid_data
->agg
.ssn
== tid_data
->next_reclaimed
) {
648 IWL_DEBUG_TX_QUEUES(priv
,
649 "Can continue DELBA flow ssn = next_recl ="
650 " %d", tid_data
->next_reclaimed
);
651 iwl_trans_tx_agg_disable(trans(priv
), sta_id
, tid
);
652 tid_data
->agg
.state
= IWL_AGG_OFF
;
653 ieee80211_stop_tx_ba_cb_irqsafe(vif
, addr
, tid
);
656 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
657 /* There are no packets for this RA / TID in the HW any more */
658 if (tid_data
->agg
.ssn
== tid_data
->next_reclaimed
) {
659 IWL_DEBUG_TX_QUEUES(priv
,
660 "Can continue ADDBA flow ssn = next_recl ="
661 " %d", tid_data
->next_reclaimed
);
662 tid_data
->agg
.state
= IWL_AGG_ON
;
663 ieee80211_start_tx_ba_cb_irqsafe(vif
, addr
, tid
);
671 static void iwlagn_non_agg_tx_status(struct iwl_priv
*priv
,
672 struct iwl_rxon_context
*ctx
,
675 struct ieee80211_sta
*sta
;
676 struct iwl_station_priv
*sta_priv
;
679 sta
= ieee80211_find_sta(ctx
->vif
, addr1
);
681 sta_priv
= (void *)sta
->drv_priv
;
682 /* avoid atomic ops if this isn't a client */
683 if (sta_priv
->client
&&
684 atomic_dec_return(&sta_priv
->pending_frames
) == 0)
685 ieee80211_sta_block_awake(priv
->hw
, sta
, false);
691 * translate ucode response to mac80211 tx status control values
693 static void iwlagn_hwrate_to_tx_control(struct iwl_priv
*priv
, u32 rate_n_flags
,
694 struct ieee80211_tx_info
*info
)
696 struct ieee80211_tx_rate
*r
= &info
->control
.rates
[0];
698 info
->antenna_sel_tx
=
699 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
700 if (rate_n_flags
& RATE_MCS_HT_MSK
)
701 r
->flags
|= IEEE80211_TX_RC_MCS
;
702 if (rate_n_flags
& RATE_MCS_GF_MSK
)
703 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
704 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
705 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
706 if (rate_n_flags
& RATE_MCS_DUP_MSK
)
707 r
->flags
|= IEEE80211_TX_RC_DUP_DATA
;
708 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
709 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
710 r
->idx
= iwlagn_hwrate_to_mac80211_idx(rate_n_flags
, info
->band
);
713 #ifdef CONFIG_IWLWIFI_DEBUG
714 const char *iwl_get_tx_fail_reason(u32 status
)
716 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
717 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
719 switch (status
& TX_STATUS_MSK
) {
720 case TX_STATUS_SUCCESS
:
722 TX_STATUS_POSTPONE(DELAY
);
723 TX_STATUS_POSTPONE(FEW_BYTES
);
724 TX_STATUS_POSTPONE(BT_PRIO
);
725 TX_STATUS_POSTPONE(QUIET_PERIOD
);
726 TX_STATUS_POSTPONE(CALC_TTAK
);
727 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
728 TX_STATUS_FAIL(SHORT_LIMIT
);
729 TX_STATUS_FAIL(LONG_LIMIT
);
730 TX_STATUS_FAIL(FIFO_UNDERRUN
);
731 TX_STATUS_FAIL(DRAIN_FLOW
);
732 TX_STATUS_FAIL(RFKILL_FLUSH
);
733 TX_STATUS_FAIL(LIFE_EXPIRE
);
734 TX_STATUS_FAIL(DEST_PS
);
735 TX_STATUS_FAIL(HOST_ABORTED
);
736 TX_STATUS_FAIL(BT_RETRY
);
737 TX_STATUS_FAIL(STA_INVALID
);
738 TX_STATUS_FAIL(FRAG_DROPPED
);
739 TX_STATUS_FAIL(TID_DISABLE
);
740 TX_STATUS_FAIL(FIFO_FLUSHED
);
741 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL
);
742 TX_STATUS_FAIL(PASSIVE_NO_RX
);
743 TX_STATUS_FAIL(NO_BEACON_ON_RADAR
);
748 #undef TX_STATUS_FAIL
749 #undef TX_STATUS_POSTPONE
751 #endif /* CONFIG_IWLWIFI_DEBUG */
753 static void iwlagn_count_agg_tx_err_status(struct iwl_priv
*priv
, u16 status
)
755 status
&= AGG_TX_STATUS_MSK
;
758 case AGG_TX_STATE_UNDERRUN_MSK
:
759 priv
->reply_agg_tx_stats
.underrun
++;
761 case AGG_TX_STATE_BT_PRIO_MSK
:
762 priv
->reply_agg_tx_stats
.bt_prio
++;
764 case AGG_TX_STATE_FEW_BYTES_MSK
:
765 priv
->reply_agg_tx_stats
.few_bytes
++;
767 case AGG_TX_STATE_ABORT_MSK
:
768 priv
->reply_agg_tx_stats
.abort
++;
770 case AGG_TX_STATE_LAST_SENT_TTL_MSK
:
771 priv
->reply_agg_tx_stats
.last_sent_ttl
++;
773 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK
:
774 priv
->reply_agg_tx_stats
.last_sent_try
++;
776 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK
:
777 priv
->reply_agg_tx_stats
.last_sent_bt_kill
++;
779 case AGG_TX_STATE_SCD_QUERY_MSK
:
780 priv
->reply_agg_tx_stats
.scd_query
++;
782 case AGG_TX_STATE_TEST_BAD_CRC32_MSK
:
783 priv
->reply_agg_tx_stats
.bad_crc32
++;
785 case AGG_TX_STATE_RESPONSE_MSK
:
786 priv
->reply_agg_tx_stats
.response
++;
788 case AGG_TX_STATE_DUMP_TX_MSK
:
789 priv
->reply_agg_tx_stats
.dump_tx
++;
791 case AGG_TX_STATE_DELAY_TX_MSK
:
792 priv
->reply_agg_tx_stats
.delay_tx
++;
795 priv
->reply_agg_tx_stats
.unknown
++;
800 static void iwl_rx_reply_tx_agg(struct iwl_priv
*priv
,
801 struct iwlagn_tx_resp
*tx_resp
)
803 struct agg_tx_status
*frame_status
= &tx_resp
->status
;
804 int tid
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_TID_MSK
) >>
805 IWLAGN_TX_RES_TID_POS
;
806 int sta_id
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_RA_MSK
) >>
807 IWLAGN_TX_RES_RA_POS
;
808 struct iwl_ht_agg
*agg
= &priv
->tid_data
[sta_id
][tid
].agg
;
809 u32 status
= le16_to_cpu(tx_resp
->status
.status
);
812 WARN_ON(tid
== IWL_TID_NON_QOS
);
814 if (agg
->wait_for_ba
)
815 IWL_DEBUG_TX_REPLY(priv
,
816 "got tx response w/o block-ack\n");
818 agg
->rate_n_flags
= le32_to_cpu(tx_resp
->rate_n_flags
);
819 agg
->wait_for_ba
= (tx_resp
->frame_count
> 1);
822 * If the BT kill count is non-zero, we'll get this
823 * notification again.
825 if (tx_resp
->bt_kill_count
&& tx_resp
->frame_count
== 1 &&
826 cfg(priv
)->bt_params
&&
827 cfg(priv
)->bt_params
->advanced_bt_coexist
) {
828 IWL_DEBUG_COEX(priv
, "receive reply tx w/ bt_kill\n");
831 if (tx_resp
->frame_count
== 1)
834 /* Construct bit-map of pending frames within Tx window */
835 for (i
= 0; i
< tx_resp
->frame_count
; i
++) {
836 u16 fstatus
= le16_to_cpu(frame_status
[i
].status
);
838 if (status
& AGG_TX_STATUS_MSK
)
839 iwlagn_count_agg_tx_err_status(priv
, fstatus
);
841 if (status
& (AGG_TX_STATE_FEW_BYTES_MSK
|
842 AGG_TX_STATE_ABORT_MSK
))
845 IWL_DEBUG_TX_REPLY(priv
, "status %s (0x%08x), "
846 "try-count (0x%08x)\n",
847 iwl_get_agg_tx_fail_reason(fstatus
),
848 fstatus
& AGG_TX_STATUS_MSK
,
849 fstatus
& AGG_TX_TRY_MSK
);
853 #ifdef CONFIG_IWLWIFI_DEBUG
854 #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
856 const char *iwl_get_agg_tx_fail_reason(u16 status
)
858 status
&= AGG_TX_STATUS_MSK
;
860 case AGG_TX_STATE_TRANSMITTED
:
862 AGG_TX_STATE_FAIL(UNDERRUN_MSK
);
863 AGG_TX_STATE_FAIL(BT_PRIO_MSK
);
864 AGG_TX_STATE_FAIL(FEW_BYTES_MSK
);
865 AGG_TX_STATE_FAIL(ABORT_MSK
);
866 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK
);
867 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK
);
868 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK
);
869 AGG_TX_STATE_FAIL(SCD_QUERY_MSK
);
870 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK
);
871 AGG_TX_STATE_FAIL(RESPONSE_MSK
);
872 AGG_TX_STATE_FAIL(DUMP_TX_MSK
);
873 AGG_TX_STATE_FAIL(DELAY_TX_MSK
);
878 #endif /* CONFIG_IWLWIFI_DEBUG */
880 static inline u32
iwlagn_get_scd_ssn(struct iwlagn_tx_resp
*tx_resp
)
882 return le32_to_cpup((__le32
*)&tx_resp
->status
+
883 tx_resp
->frame_count
) & MAX_SN
;
886 static void iwlagn_count_tx_err_status(struct iwl_priv
*priv
, u16 status
)
888 status
&= TX_STATUS_MSK
;
891 case TX_STATUS_POSTPONE_DELAY
:
892 priv
->reply_tx_stats
.pp_delay
++;
894 case TX_STATUS_POSTPONE_FEW_BYTES
:
895 priv
->reply_tx_stats
.pp_few_bytes
++;
897 case TX_STATUS_POSTPONE_BT_PRIO
:
898 priv
->reply_tx_stats
.pp_bt_prio
++;
900 case TX_STATUS_POSTPONE_QUIET_PERIOD
:
901 priv
->reply_tx_stats
.pp_quiet_period
++;
903 case TX_STATUS_POSTPONE_CALC_TTAK
:
904 priv
->reply_tx_stats
.pp_calc_ttak
++;
906 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY
:
907 priv
->reply_tx_stats
.int_crossed_retry
++;
909 case TX_STATUS_FAIL_SHORT_LIMIT
:
910 priv
->reply_tx_stats
.short_limit
++;
912 case TX_STATUS_FAIL_LONG_LIMIT
:
913 priv
->reply_tx_stats
.long_limit
++;
915 case TX_STATUS_FAIL_FIFO_UNDERRUN
:
916 priv
->reply_tx_stats
.fifo_underrun
++;
918 case TX_STATUS_FAIL_DRAIN_FLOW
:
919 priv
->reply_tx_stats
.drain_flow
++;
921 case TX_STATUS_FAIL_RFKILL_FLUSH
:
922 priv
->reply_tx_stats
.rfkill_flush
++;
924 case TX_STATUS_FAIL_LIFE_EXPIRE
:
925 priv
->reply_tx_stats
.life_expire
++;
927 case TX_STATUS_FAIL_DEST_PS
:
928 priv
->reply_tx_stats
.dest_ps
++;
930 case TX_STATUS_FAIL_HOST_ABORTED
:
931 priv
->reply_tx_stats
.host_abort
++;
933 case TX_STATUS_FAIL_BT_RETRY
:
934 priv
->reply_tx_stats
.bt_retry
++;
936 case TX_STATUS_FAIL_STA_INVALID
:
937 priv
->reply_tx_stats
.sta_invalid
++;
939 case TX_STATUS_FAIL_FRAG_DROPPED
:
940 priv
->reply_tx_stats
.frag_drop
++;
942 case TX_STATUS_FAIL_TID_DISABLE
:
943 priv
->reply_tx_stats
.tid_disable
++;
945 case TX_STATUS_FAIL_FIFO_FLUSHED
:
946 priv
->reply_tx_stats
.fifo_flush
++;
948 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL
:
949 priv
->reply_tx_stats
.insuff_cf_poll
++;
951 case TX_STATUS_FAIL_PASSIVE_NO_RX
:
952 priv
->reply_tx_stats
.fail_hw_drop
++;
954 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR
:
955 priv
->reply_tx_stats
.sta_color_mismatch
++;
958 priv
->reply_tx_stats
.unknown
++;
963 static void iwlagn_set_tx_status(struct iwl_priv
*priv
,
964 struct ieee80211_tx_info
*info
,
965 struct iwlagn_tx_resp
*tx_resp
,
968 u16 status
= le16_to_cpu(tx_resp
->status
.status
);
970 info
->status
.rates
[0].count
= tx_resp
->failure_frame
+ 1;
972 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
973 info
->flags
|= iwl_tx_status_to_mac80211(status
);
974 iwlagn_hwrate_to_tx_control(priv
, le32_to_cpu(tx_resp
->rate_n_flags
),
976 if (!iwl_is_tx_success(status
))
977 iwlagn_count_tx_err_status(priv
, status
);
980 static void iwl_check_abort_status(struct iwl_priv
*priv
,
981 u8 frame_count
, u32 status
)
983 if (frame_count
== 1 && status
== TX_STATUS_FAIL_RFKILL_FLUSH
) {
984 IWL_ERR(priv
, "Tx flush command to flush out all frames\n");
985 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->shrd
->status
))
986 queue_work(priv
->shrd
->workqueue
, &priv
->tx_flush
);
990 int iwlagn_rx_reply_tx(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
,
991 struct iwl_device_cmd
*cmd
)
993 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
994 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
995 int txq_id
= SEQ_TO_QUEUE(sequence
);
996 int cmd_index __maybe_unused
= SEQ_TO_INDEX(sequence
);
997 struct iwlagn_tx_resp
*tx_resp
= (void *)&pkt
->u
.raw
[0];
998 struct ieee80211_hdr
*hdr
;
999 u32 status
= le16_to_cpu(tx_resp
->status
.status
);
1000 u16 ssn
= iwlagn_get_scd_ssn(tx_resp
);
1004 struct ieee80211_tx_info
*info
;
1005 unsigned long flags
;
1006 struct sk_buff_head skbs
;
1007 struct sk_buff
*skb
;
1008 struct iwl_rxon_context
*ctx
;
1009 bool is_agg
= (txq_id
>= IWLAGN_FIRST_AMPDU_QUEUE
);
1011 tid
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_TID_MSK
) >>
1012 IWLAGN_TX_RES_TID_POS
;
1013 sta_id
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_RA_MSK
) >>
1014 IWLAGN_TX_RES_RA_POS
;
1016 spin_lock_irqsave(&priv
->shrd
->sta_lock
, flags
);
1019 iwl_rx_reply_tx_agg(priv
, tx_resp
);
1021 if (tx_resp
->frame_count
== 1) {
1022 u16 next_reclaimed
= le16_to_cpu(tx_resp
->seq_ctl
);
1023 next_reclaimed
= SEQ_TO_SN(next_reclaimed
+ 0x10);
1026 /* If this is an aggregation queue, we can rely on the
1027 * ssn since the wifi sequence number corresponds to
1028 * the index in the TFD ring (%256).
1029 * The seq_ctl is the sequence control of the packet
1030 * to which this Tx response relates. But if there is a
1031 * hole in the bitmap of the BA we received, this Tx
1032 * response may allow to reclaim the hole and all the
1033 * subsequent packets that were already acked.
1034 * In that case, seq_ctl != ssn, and the next packet
1035 * to be reclaimed will be ssn and not seq_ctl.
1037 next_reclaimed
= ssn
;
1040 __skb_queue_head_init(&skbs
);
1042 if (tid
!= IWL_TID_NON_QOS
) {
1043 priv
->tid_data
[sta_id
][tid
].next_reclaimed
=
1045 IWL_DEBUG_TX_REPLY(priv
, "Next reclaimed packet:%d",
1049 /*we can free until ssn % q.n_bd not inclusive */
1050 WARN_ON(iwl_trans_reclaim(trans(priv
), sta_id
, tid
, txq_id
,
1051 ssn
, status
, &skbs
));
1052 iwlagn_check_ratid_empty(priv
, sta_id
, tid
);
1054 while (!skb_queue_empty(&skbs
)) {
1055 skb
= __skb_dequeue(&skbs
);
1056 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1058 if (!ieee80211_is_data_qos(hdr
->frame_control
))
1059 priv
->last_seq_ctl
= tx_resp
->seq_ctl
;
1061 info
= IEEE80211_SKB_CB(skb
);
1062 ctx
= info
->driver_data
[0];
1063 kmem_cache_free(priv
->tx_cmd_pool
,
1064 (info
->driver_data
[1]));
1066 memset(&info
->status
, 0, sizeof(info
->status
));
1068 if (status
== TX_STATUS_FAIL_PASSIVE_NO_RX
&&
1069 iwl_is_associated_ctx(ctx
) && ctx
->vif
&&
1070 ctx
->vif
->type
== NL80211_IFTYPE_STATION
) {
1071 ctx
->last_tx_rejected
= true;
1072 iwl_trans_stop_queue(trans(priv
), txq_id
,
1073 "Tx on passive channel");
1075 IWL_DEBUG_TX_REPLY(priv
,
1076 "TXQ %d status %s (0x%08x) "
1077 "rate_n_flags 0x%x retries %d\n",
1079 iwl_get_tx_fail_reason(status
),
1081 le32_to_cpu(tx_resp
->rate_n_flags
),
1082 tx_resp
->failure_frame
);
1084 IWL_DEBUG_TX_REPLY(priv
,
1085 "FrameCnt = %d, idx=%d\n",
1086 tx_resp
->frame_count
, cmd_index
);
1089 /* check if BAR is needed */
1090 if (is_agg
&& !iwl_is_tx_success(status
))
1091 info
->flags
|= IEEE80211_TX_STAT_AMPDU_NO_BACK
;
1092 iwlagn_set_tx_status(priv
, IEEE80211_SKB_CB(skb
),
1095 iwlagn_non_agg_tx_status(priv
, ctx
, hdr
->addr1
);
1097 ieee80211_tx_status_irqsafe(priv
->hw
, skb
);
1102 WARN_ON(!is_agg
&& freed
!= 1);
1105 iwl_check_abort_status(priv
, tx_resp
->frame_count
, status
);
1106 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
1111 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1113 * Handles block-acknowledge notification from device, which reports success
1114 * of frames sent via aggregation.
1116 int iwlagn_rx_reply_compressed_ba(struct iwl_priv
*priv
,
1117 struct iwl_rx_mem_buffer
*rxb
,
1118 struct iwl_device_cmd
*cmd
)
1120 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1121 struct iwl_compressed_ba_resp
*ba_resp
= &pkt
->u
.compressed_ba
;
1122 struct iwl_ht_agg
*agg
;
1123 struct sk_buff_head reclaimed_skbs
;
1124 struct ieee80211_tx_info
*info
;
1125 struct ieee80211_hdr
*hdr
;
1126 struct sk_buff
*skb
;
1127 unsigned long flags
;
1132 /* "flow" corresponds to Tx queue */
1133 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1135 /* "ssn" is start of block-ack Tx window, corresponds to index
1136 * (in Tx queue's circular buffer) of first TFD/frame in window */
1137 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_resp
->scd_ssn
);
1139 if (scd_flow
>= hw_params(priv
).max_txq_num
) {
1141 "BUG_ON scd_flow is bigger than number of queues\n");
1145 sta_id
= ba_resp
->sta_id
;
1147 agg
= &priv
->tid_data
[sta_id
][tid
].agg
;
1149 spin_lock_irqsave(&priv
->shrd
->sta_lock
, flags
);
1151 if (unlikely(!agg
->wait_for_ba
)) {
1152 if (unlikely(ba_resp
->bitmap
))
1153 IWL_ERR(priv
, "Received BA when not expected\n");
1154 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
1158 __skb_queue_head_init(&reclaimed_skbs
);
1160 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1161 * block-ack window (we assume that they've been successfully
1162 * transmitted ... if not, it's too late anyway). */
1163 if (iwl_trans_reclaim(trans(priv
), sta_id
, tid
, scd_flow
,
1164 ba_resp_scd_ssn
, 0, &reclaimed_skbs
)) {
1165 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);
1169 IWL_DEBUG_TX_REPLY(priv
, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1172 (u8
*) &ba_resp
->sta_addr_lo32
,
1174 IWL_DEBUG_TX_REPLY(priv
, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1175 "scd_flow = %d, scd_ssn = %d\n",
1176 ba_resp
->tid
, le16_to_cpu(ba_resp
->seq_ctl
),
1177 (unsigned long long)le64_to_cpu(ba_resp
->bitmap
),
1178 scd_flow
, ba_resp_scd_ssn
);
1180 /* Mark that the expected block-ack response arrived */
1181 agg
->wait_for_ba
= false;
1183 /* Sanity check values reported by uCode */
1184 if (ba_resp
->txed_2_done
> ba_resp
->txed
) {
1185 IWL_DEBUG_TX_REPLY(priv
,
1186 "bogus sent(%d) and ack(%d) count\n",
1187 ba_resp
->txed
, ba_resp
->txed_2_done
);
1189 * set txed_2_done = txed,
1190 * so it won't impact rate scale
1192 ba_resp
->txed
= ba_resp
->txed_2_done
;
1194 IWL_DEBUG_HT(priv
, "agg frames sent:%d, acked:%d\n",
1195 ba_resp
->txed
, ba_resp
->txed_2_done
);
1197 priv
->tid_data
[sta_id
][tid
].next_reclaimed
= ba_resp_scd_ssn
;
1199 iwlagn_check_ratid_empty(priv
, sta_id
, tid
);
1201 while (!skb_queue_empty(&reclaimed_skbs
)) {
1203 skb
= __skb_dequeue(&reclaimed_skbs
);
1204 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1206 if (ieee80211_is_data_qos(hdr
->frame_control
))
1211 info
= IEEE80211_SKB_CB(skb
);
1212 kmem_cache_free(priv
->tx_cmd_pool
, (info
->driver_data
[1]));
1215 /* this is the first skb we deliver in this batch */
1216 /* put the rate scaling data there */
1217 info
= IEEE80211_SKB_CB(skb
);
1218 memset(&info
->status
, 0, sizeof(info
->status
));
1219 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1220 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1221 info
->status
.ampdu_ack_len
= ba_resp
->txed_2_done
;
1222 info
->status
.ampdu_len
= ba_resp
->txed
;
1223 iwlagn_hwrate_to_tx_control(priv
, agg
->rate_n_flags
,
1227 ieee80211_tx_status_irqsafe(priv
->hw
, skb
);
1230 spin_unlock_irqrestore(&priv
->shrd
->sta_lock
, flags
);