1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
39 #include "iwl-helpers.h"
40 #include "iwl-agn-hw.h"
42 #include "iwl-trans.h"
45 * mac80211 queues, ACs, hardware queues, FIFOs.
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
65 * Due to the way hw queues are set up (by the hw specific modules like
66 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
70 static const u8 tid_to_ac
[] = {
81 static inline int get_ac_from_tid(u16 tid
)
83 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
84 return tid_to_ac
[tid
];
86 /* no support for TIDs 8-15 yet */
90 static inline int get_fifo_from_tid(struct iwl_rxon_context
*ctx
, u16 tid
)
92 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
93 return ctx
->ac_to_fifo
[tid_to_ac
[tid
]];
95 /* no support for TIDs 8-15 yet */
99 static int iwlagn_txq_agg_enable(struct iwl_priv
*priv
, int txq_id
, int sta_id
,
102 if ((IWLAGN_FIRST_AMPDU_QUEUE
> txq_id
) ||
103 (IWLAGN_FIRST_AMPDU_QUEUE
+
104 priv
->cfg
->base_params
->num_of_ampdu_queues
<= txq_id
)) {
106 "queue number out of range: %d, must be %d to %d\n",
107 txq_id
, IWLAGN_FIRST_AMPDU_QUEUE
,
108 IWLAGN_FIRST_AMPDU_QUEUE
+
109 priv
->cfg
->base_params
->num_of_ampdu_queues
- 1);
113 /* Modify device's station table to Tx this TID */
114 return iwl_sta_tx_modify_enable_tid(priv
, sta_id
, tid
);
117 static void iwlagn_tx_cmd_protection(struct iwl_priv
*priv
,
118 struct ieee80211_tx_info
*info
,
119 __le16 fc
, __le32
*tx_flags
)
121 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_RTS_CTS
||
122 info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_CTS_PROTECT
||
123 info
->flags
& IEEE80211_TX_CTL_AMPDU
)
124 *tx_flags
|= TX_CMD_FLG_PROT_REQUIRE_MSK
;
128 * handle build REPLY_TX command notification.
130 static void iwlagn_tx_cmd_build_basic(struct iwl_priv
*priv
,
132 struct iwl_tx_cmd
*tx_cmd
,
133 struct ieee80211_tx_info
*info
,
134 struct ieee80211_hdr
*hdr
,
137 __le16 fc
= hdr
->frame_control
;
138 __le32 tx_flags
= tx_cmd
->tx_flags
;
140 tx_cmd
->stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
142 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
143 tx_flags
|= TX_CMD_FLG_ACK_MSK
;
145 tx_flags
&= ~TX_CMD_FLG_ACK_MSK
;
147 if (ieee80211_is_probe_resp(fc
))
148 tx_flags
|= TX_CMD_FLG_TSF_MSK
;
149 else if (ieee80211_is_back_req(fc
))
150 tx_flags
|= TX_CMD_FLG_ACK_MSK
| TX_CMD_FLG_IMM_BA_RSP_MASK
;
151 else if (info
->band
== IEEE80211_BAND_2GHZ
&&
152 priv
->cfg
->bt_params
&&
153 priv
->cfg
->bt_params
->advanced_bt_coexist
&&
154 (ieee80211_is_auth(fc
) || ieee80211_is_assoc_req(fc
) ||
155 ieee80211_is_reassoc_req(fc
) ||
156 skb
->protocol
== cpu_to_be16(ETH_P_PAE
)))
157 tx_flags
|= TX_CMD_FLG_IGNORE_BT
;
160 tx_cmd
->sta_id
= std_id
;
161 if (ieee80211_has_morefrags(fc
))
162 tx_flags
|= TX_CMD_FLG_MORE_FRAG_MSK
;
164 if (ieee80211_is_data_qos(fc
)) {
165 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
166 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
167 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
169 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
172 iwlagn_tx_cmd_protection(priv
, info
, fc
, &tx_flags
);
174 tx_flags
&= ~(TX_CMD_FLG_ANT_SEL_MSK
);
175 if (ieee80211_is_mgmt(fc
)) {
176 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
177 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(3);
179 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(2);
181 tx_cmd
->timeout
.pm_frame_timeout
= 0;
184 tx_cmd
->driver_txop
= 0;
185 tx_cmd
->tx_flags
= tx_flags
;
186 tx_cmd
->next_frame_len
= 0;
189 #define RTS_DFAULT_RETRY_LIMIT 60
191 static void iwlagn_tx_cmd_build_rate(struct iwl_priv
*priv
,
192 struct iwl_tx_cmd
*tx_cmd
,
193 struct ieee80211_tx_info
*info
,
202 /* Set retry limit on DATA packets and Probe Responses*/
203 if (ieee80211_is_probe_resp(fc
))
204 data_retry_limit
= 3;
206 data_retry_limit
= IWLAGN_DEFAULT_TX_RETRY
;
207 tx_cmd
->data_retry_limit
= data_retry_limit
;
209 /* Set retry limit on RTS packets */
210 rts_retry_limit
= RTS_DFAULT_RETRY_LIMIT
;
211 if (data_retry_limit
< rts_retry_limit
)
212 rts_retry_limit
= data_retry_limit
;
213 tx_cmd
->rts_retry_limit
= rts_retry_limit
;
215 /* DATA packets will use the uCode station table for rate/antenna
217 if (ieee80211_is_data(fc
)) {
218 tx_cmd
->initial_rate_index
= 0;
219 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
220 #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
221 if (priv
->tm_fixed_rate
) {
223 * rate overwrite by testmode
224 * we not only send lq command to change rate
225 * we also re-enforce per data pkt base.
227 tx_cmd
->tx_flags
&= ~TX_CMD_FLG_STA_RATE_MSK
;
228 memcpy(&tx_cmd
->rate_n_flags
, &priv
->tm_fixed_rate
,
229 sizeof(tx_cmd
->rate_n_flags
));
236 * If the current TX rate stored in mac80211 has the MCS bit set, it's
237 * not really a TX rate. Thus, we use the lowest supported rate for
238 * this band. Also use the lowest supported rate if the stored rate
241 rate_idx
= info
->control
.rates
[0].idx
;
242 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
||
243 (rate_idx
< 0) || (rate_idx
> IWL_RATE_COUNT_LEGACY
))
244 rate_idx
= rate_lowest_index(&priv
->bands
[info
->band
],
246 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
247 if (info
->band
== IEEE80211_BAND_5GHZ
)
248 rate_idx
+= IWL_FIRST_OFDM_RATE
;
249 /* Get PLCP rate for tx_cmd->rate_n_flags */
250 rate_plcp
= iwl_rates
[rate_idx
].plcp
;
251 /* Zero out flags for this packet */
254 /* Set CCK flag as needed */
255 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
256 rate_flags
|= RATE_MCS_CCK_MSK
;
258 /* Set up antennas */
259 if (priv
->cfg
->bt_params
&&
260 priv
->cfg
->bt_params
->advanced_bt_coexist
&&
261 priv
->bt_full_concurrent
) {
262 /* operated as 1x1 in full concurrency mode */
263 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
264 first_antenna(priv
->hw_params
.valid_tx_ant
));
266 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
267 priv
->hw_params
.valid_tx_ant
);
268 rate_flags
|= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
270 /* Set the rate in the TX cmd */
271 tx_cmd
->rate_n_flags
= iwl_hw_set_rate_n_flags(rate_plcp
, rate_flags
);
274 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv
*priv
,
275 struct ieee80211_tx_info
*info
,
276 struct iwl_tx_cmd
*tx_cmd
,
277 struct sk_buff
*skb_frag
,
280 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
282 switch (keyconf
->cipher
) {
283 case WLAN_CIPHER_SUITE_CCMP
:
284 tx_cmd
->sec_ctl
= TX_CMD_SEC_CCM
;
285 memcpy(tx_cmd
->key
, keyconf
->key
, keyconf
->keylen
);
286 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
287 tx_cmd
->tx_flags
|= TX_CMD_FLG_AGG_CCMP_MSK
;
288 IWL_DEBUG_TX(priv
, "tx_cmd with AES hwcrypto\n");
291 case WLAN_CIPHER_SUITE_TKIP
:
292 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
293 ieee80211_get_tkip_p2k(keyconf
, skb_frag
, tx_cmd
->key
);
294 IWL_DEBUG_TX(priv
, "tx_cmd with tkip hwcrypto\n");
297 case WLAN_CIPHER_SUITE_WEP104
:
298 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
300 case WLAN_CIPHER_SUITE_WEP40
:
301 tx_cmd
->sec_ctl
|= (TX_CMD_SEC_WEP
|
302 (keyconf
->keyidx
& TX_CMD_SEC_MSK
) << TX_CMD_SEC_SHIFT
);
304 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
306 IWL_DEBUG_TX(priv
, "Configuring packet for WEP encryption "
307 "with key %d\n", keyconf
->keyidx
);
311 IWL_ERR(priv
, "Unknown encode cipher %x\n", keyconf
->cipher
);
317 * start REPLY_TX command process
319 int iwlagn_tx_skb(struct iwl_priv
*priv
, struct sk_buff
*skb
)
321 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
322 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
323 struct iwl_station_priv
*sta_priv
= NULL
;
324 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
325 struct iwl_tx_cmd
*tx_cmd
;
338 * If the frame needs to go out off-channel, then
339 * we'll have put the PAN context to that channel,
340 * so make the frame go out there.
342 if (info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
)
343 ctx
= &priv
->contexts
[IWL_RXON_CTX_PAN
];
344 else if (info
->control
.vif
)
345 ctx
= iwl_rxon_ctx_from_vif(info
->control
.vif
);
347 spin_lock_irqsave(&priv
->lock
, flags
);
348 if (iwl_is_rfkill(priv
)) {
349 IWL_DEBUG_DROP(priv
, "Dropping - RF KILL\n");
350 goto drop_unlock_priv
;
353 fc
= hdr
->frame_control
;
355 #ifdef CONFIG_IWLWIFI_DEBUG
356 if (ieee80211_is_auth(fc
))
357 IWL_DEBUG_TX(priv
, "Sending AUTH frame\n");
358 else if (ieee80211_is_assoc_req(fc
))
359 IWL_DEBUG_TX(priv
, "Sending ASSOC frame\n");
360 else if (ieee80211_is_reassoc_req(fc
))
361 IWL_DEBUG_TX(priv
, "Sending REASSOC frame\n");
364 hdr_len
= ieee80211_hdrlen(fc
);
366 /* For management frames use broadcast id to do not break aggregation */
367 if (!ieee80211_is_data(fc
))
368 sta_id
= ctx
->bcast_sta_id
;
370 /* Find index into station table for destination station */
371 sta_id
= iwl_sta_id_or_broadcast(priv
, ctx
, info
->control
.sta
);
372 if (sta_id
== IWL_INVALID_STATION
) {
373 IWL_DEBUG_DROP(priv
, "Dropping - INVALID STATION: %pM\n",
375 goto drop_unlock_priv
;
379 IWL_DEBUG_TX(priv
, "station Id %d\n", sta_id
);
381 if (info
->control
.sta
)
382 sta_priv
= (void *)info
->control
.sta
->drv_priv
;
384 if (sta_priv
&& sta_priv
->asleep
&&
385 (info
->flags
& IEEE80211_TX_CTL_PSPOLL_RESPONSE
)) {
387 * This sends an asynchronous command to the device,
388 * but we can rely on it being processed before the
389 * next frame is processed -- and the next frame to
390 * this station is the one that will consume this
392 * For now set the counter to just 1 since we do not
395 iwl_sta_modify_sleep_tx_count(priv
, sta_id
, 1);
399 * Send this frame after DTIM -- there's a special queue
400 * reserved for this for contexts that support AP mode.
402 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
403 txq_id
= ctx
->mcast_queue
;
405 * The microcode will clear the more data
406 * bit in the last frame it transmits.
408 hdr
->frame_control
|=
409 cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
411 txq_id
= ctx
->ac_to_queue
[skb_get_queue_mapping(skb
)];
413 /* irqs already disabled/saved above when locking priv->lock */
414 spin_lock(&priv
->sta_lock
);
416 if (ieee80211_is_data_qos(fc
)) {
418 qc
= ieee80211_get_qos_ctl(hdr
);
419 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
421 if (WARN_ON_ONCE(tid
>= MAX_TID_COUNT
))
422 goto drop_unlock_sta
;
424 seq_number
= priv
->stations
[sta_id
].tid
[tid
].seq_number
;
425 seq_number
&= IEEE80211_SCTL_SEQ
;
426 hdr
->seq_ctrl
= hdr
->seq_ctrl
&
427 cpu_to_le16(IEEE80211_SCTL_FRAG
);
428 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
430 /* aggregation is on for this <sta,tid> */
431 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
432 priv
->stations
[sta_id
].tid
[tid
].agg
.state
== IWL_AGG_ON
) {
433 txq_id
= priv
->stations
[sta_id
].tid
[tid
].agg
.txq_id
;
438 tx_cmd
= trans_get_tx_cmd(&priv
->trans
, txq_id
);
439 if (unlikely(!tx_cmd
))
440 goto drop_unlock_sta
;
442 /* Copy MAC header from skb into command buffer */
443 memcpy(tx_cmd
->hdr
, hdr
, hdr_len
);
445 /* Total # bytes to be transmitted */
447 tx_cmd
->len
= cpu_to_le16(len
);
449 if (info
->control
.hw_key
)
450 iwlagn_tx_cmd_build_hwcrypto(priv
, info
, tx_cmd
, skb
, sta_id
);
452 /* TODO need this for burst mode later on */
453 iwlagn_tx_cmd_build_basic(priv
, skb
, tx_cmd
, info
, hdr
, sta_id
);
454 iwl_dbg_log_tx_data_frame(priv
, len
, hdr
);
456 iwlagn_tx_cmd_build_rate(priv
, tx_cmd
, info
, fc
);
458 iwl_update_stats(priv
, true, fc
, len
);
460 if (trans_tx(&priv
->trans
, skb
, tx_cmd
, txq_id
, fc
, is_agg
, ctx
))
461 goto drop_unlock_sta
;
463 if (ieee80211_is_data_qos(fc
)) {
464 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
++;
465 if (!ieee80211_has_morefrags(fc
))
466 priv
->stations
[sta_id
].tid
[tid
].seq_number
= seq_number
;
469 spin_unlock(&priv
->sta_lock
);
470 spin_unlock_irqrestore(&priv
->lock
, flags
);
473 * Avoid atomic ops if it isn't an associated client.
474 * Also, if this is a packet for aggregation, don't
475 * increase the counter because the ucode will stop
476 * aggregation queues when their respective station
479 if (sta_priv
&& sta_priv
->client
&& !is_agg
)
480 atomic_inc(&sta_priv
->pending_frames
);
485 spin_unlock(&priv
->sta_lock
);
487 spin_unlock_irqrestore(&priv
->lock
, flags
);
492 * Find first available (lowest unused) Tx Queue, mark it "active".
493 * Called only when finding queue for aggregation.
494 * Should never return anything < 7, because they should already
495 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
497 static int iwlagn_txq_ctx_activate_free(struct iwl_priv
*priv
)
501 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
502 if (!test_and_set_bit(txq_id
, &priv
->txq_ctx_active_msk
))
507 int iwlagn_tx_agg_start(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
508 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
515 struct iwl_tid_data
*tid_data
;
517 tx_fifo
= get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif
), tid
);
518 if (unlikely(tx_fifo
< 0))
521 IWL_DEBUG_HT(priv
, "TX AGG request on ra = %pM tid = %d\n",
524 sta_id
= iwl_sta_id(sta
);
525 if (sta_id
== IWL_INVALID_STATION
) {
526 IWL_ERR(priv
, "Start AGG on invalid station\n");
529 if (unlikely(tid
>= MAX_TID_COUNT
))
532 if (priv
->stations
[sta_id
].tid
[tid
].agg
.state
!= IWL_AGG_OFF
) {
533 IWL_ERR(priv
, "Start AGG when state is not IWL_AGG_OFF !\n");
537 txq_id
= iwlagn_txq_ctx_activate_free(priv
);
539 IWL_ERR(priv
, "No free aggregation queue available\n");
543 spin_lock_irqsave(&priv
->sta_lock
, flags
);
544 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
545 *ssn
= SEQ_TO_SN(tid_data
->seq_number
);
546 tid_data
->agg
.txq_id
= txq_id
;
547 tid_data
->agg
.tx_fifo
= tx_fifo
;
548 iwl_set_swq_id(&priv
->txq
[txq_id
], get_ac_from_tid(tid
), txq_id
);
549 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
551 ret
= iwlagn_txq_agg_enable(priv
, txq_id
, sta_id
, tid
);
555 spin_lock_irqsave(&priv
->sta_lock
, flags
);
556 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
557 if (tid_data
->tfds_in_queue
== 0) {
558 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
559 tid_data
->agg
.state
= IWL_AGG_ON
;
560 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
562 IWL_DEBUG_HT(priv
, "HW queue is NOT empty: %d packets in HW queue\n",
563 tid_data
->tfds_in_queue
);
564 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
566 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
570 int iwlagn_tx_agg_stop(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
571 struct ieee80211_sta
*sta
, u16 tid
)
573 int tx_fifo_id
, txq_id
, sta_id
, ssn
;
574 struct iwl_tid_data
*tid_data
;
575 int write_ptr
, read_ptr
;
578 tx_fifo_id
= get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif
), tid
);
579 if (unlikely(tx_fifo_id
< 0))
582 sta_id
= iwl_sta_id(sta
);
584 if (sta_id
== IWL_INVALID_STATION
) {
585 IWL_ERR(priv
, "Invalid station for AGG tid %d\n", tid
);
589 spin_lock_irqsave(&priv
->sta_lock
, flags
);
591 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
592 ssn
= (tid_data
->seq_number
& IEEE80211_SCTL_SEQ
) >> 4;
593 txq_id
= tid_data
->agg
.txq_id
;
595 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
596 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
598 * This can happen if the peer stops aggregation
599 * again before we've had a chance to drain the
600 * queue we selected previously, i.e. before the
601 * session was really started completely.
603 IWL_DEBUG_HT(priv
, "AGG stop before setup done\n");
608 IWL_WARN(priv
, "Stopping AGG while state not ON or starting\n");
611 write_ptr
= priv
->txq
[txq_id
].q
.write_ptr
;
612 read_ptr
= priv
->txq
[txq_id
].q
.read_ptr
;
614 /* The queue is not empty */
615 if (write_ptr
!= read_ptr
) {
616 IWL_DEBUG_HT(priv
, "Stopping a non empty AGG HW QUEUE\n");
617 priv
->stations
[sta_id
].tid
[tid
].agg
.state
=
618 IWL_EMPTYING_HW_QUEUE_DELBA
;
619 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
623 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
625 priv
->stations
[sta_id
].tid
[tid
].agg
.state
= IWL_AGG_OFF
;
627 /* do not restore/save irqs */
628 spin_unlock(&priv
->sta_lock
);
629 spin_lock(&priv
->lock
);
632 * the only reason this call can fail is queue number out of range,
633 * which can happen if uCode is reloaded and all the station
634 * information are lost. if it is outside the range, there is no need
635 * to deactivate the uCode queue, just return "success" to allow
636 * mac80211 to clean up it own data.
638 trans_txq_agg_disable(&priv
->trans
, txq_id
, ssn
, tx_fifo_id
);
639 spin_unlock_irqrestore(&priv
->lock
, flags
);
641 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
646 int iwlagn_txq_check_empty(struct iwl_priv
*priv
,
647 int sta_id
, u8 tid
, int txq_id
)
649 struct iwl_queue
*q
= &priv
->txq
[txq_id
].q
;
650 u8
*addr
= priv
->stations
[sta_id
].sta
.sta
.addr
;
651 struct iwl_tid_data
*tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
652 struct iwl_rxon_context
*ctx
;
654 ctx
= &priv
->contexts
[priv
->stations
[sta_id
].ctxid
];
656 lockdep_assert_held(&priv
->sta_lock
);
658 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
659 case IWL_EMPTYING_HW_QUEUE_DELBA
:
660 /* We are reclaiming the last packet of the */
661 /* aggregated HW queue */
662 if ((txq_id
== tid_data
->agg
.txq_id
) &&
663 (q
->read_ptr
== q
->write_ptr
)) {
664 u16 ssn
= SEQ_TO_SN(tid_data
->seq_number
);
665 int tx_fifo
= get_fifo_from_tid(ctx
, tid
);
666 IWL_DEBUG_HT(priv
, "HW queue empty: continue DELBA flow\n");
667 trans_txq_agg_disable(&priv
->trans
, txq_id
,
669 tid_data
->agg
.state
= IWL_AGG_OFF
;
670 ieee80211_stop_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
673 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
674 /* We are reclaiming the last packet of the queue */
675 if (tid_data
->tfds_in_queue
== 0) {
676 IWL_DEBUG_HT(priv
, "HW queue empty: continue ADDBA flow\n");
677 tid_data
->agg
.state
= IWL_AGG_ON
;
678 ieee80211_start_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
686 static void iwlagn_non_agg_tx_status(struct iwl_priv
*priv
,
687 struct iwl_rxon_context
*ctx
,
690 struct ieee80211_sta
*sta
;
691 struct iwl_station_priv
*sta_priv
;
694 sta
= ieee80211_find_sta(ctx
->vif
, addr1
);
696 sta_priv
= (void *)sta
->drv_priv
;
697 /* avoid atomic ops if this isn't a client */
698 if (sta_priv
->client
&&
699 atomic_dec_return(&sta_priv
->pending_frames
) == 0)
700 ieee80211_sta_block_awake(priv
->hw
, sta
, false);
705 static void iwlagn_tx_status(struct iwl_priv
*priv
, struct iwl_tx_info
*tx_info
,
708 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) tx_info
->skb
->data
;
711 iwlagn_non_agg_tx_status(priv
, tx_info
->ctx
, hdr
->addr1
);
713 ieee80211_tx_status_irqsafe(priv
->hw
, tx_info
->skb
);
716 int iwlagn_tx_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int index
)
718 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
719 struct iwl_queue
*q
= &txq
->q
;
720 struct iwl_tx_info
*tx_info
;
722 struct ieee80211_hdr
*hdr
;
724 if ((index
>= q
->n_bd
) || (iwl_queue_used(q
, index
) == 0)) {
725 IWL_ERR(priv
, "%s: Read index for DMA queue txq id (%d), "
726 "index %d is out of range [0-%d] %d %d.\n", __func__
,
727 txq_id
, index
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
731 for (index
= iwl_queue_inc_wrap(index
, q
->n_bd
);
732 q
->read_ptr
!= index
;
733 q
->read_ptr
= iwl_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
735 tx_info
= &txq
->txb
[txq
->q
.read_ptr
];
737 if (WARN_ON_ONCE(tx_info
->skb
== NULL
))
740 hdr
= (struct ieee80211_hdr
*)tx_info
->skb
->data
;
741 if (ieee80211_is_data_qos(hdr
->frame_control
))
744 iwlagn_tx_status(priv
, tx_info
,
745 txq_id
>= IWLAGN_FIRST_AMPDU_QUEUE
);
748 iwlagn_txq_inval_byte_cnt_tbl(priv
, txq
);
750 iwlagn_txq_free_tfd(priv
, txq
, txq
->q
.read_ptr
);
756 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
758 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
759 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
761 static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv
*priv
,
762 struct iwl_ht_agg
*agg
,
763 struct iwl_compressed_ba_resp
*ba_resp
)
767 u16 seq_ctl
= le16_to_cpu(ba_resp
->seq_ctl
);
768 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
769 struct ieee80211_tx_info
*info
;
770 u64 bitmap
, sent_bitmap
;
772 if (unlikely(!agg
->wait_for_ba
)) {
773 if (unlikely(ba_resp
->bitmap
))
774 IWL_ERR(priv
, "Received BA when not expected\n");
778 /* Mark that the expected block-ack response arrived */
779 agg
->wait_for_ba
= 0;
780 IWL_DEBUG_TX_REPLY(priv
, "BA %d %d\n", agg
->start_idx
, ba_resp
->seq_ctl
);
782 /* Calculate shift to align block-ack bits with our Tx window bits */
783 sh
= agg
->start_idx
- SEQ_TO_INDEX(seq_ctl
>> 4);
788 * Check for success or failure according to the
789 * transmitted bitmap and block-ack bitmap
791 bitmap
= le64_to_cpu(ba_resp
->bitmap
) >> sh
;
792 sent_bitmap
= bitmap
& agg
->bitmap
;
794 /* Sanity check values reported by uCode */
795 if (ba_resp
->txed_2_done
> ba_resp
->txed
) {
796 IWL_DEBUG_TX_REPLY(priv
,
797 "bogus sent(%d) and ack(%d) count\n",
798 ba_resp
->txed
, ba_resp
->txed_2_done
);
800 * set txed_2_done = txed,
801 * so it won't impact rate scale
803 ba_resp
->txed
= ba_resp
->txed_2_done
;
805 IWL_DEBUG_HT(priv
, "agg frames sent:%d, acked:%d\n",
806 ba_resp
->txed
, ba_resp
->txed_2_done
);
808 /* Find the first ACKed frame to store the TX status */
809 while (sent_bitmap
&& !(sent_bitmap
& 1)) {
810 agg
->start_idx
= (agg
->start_idx
+ 1) & 0xff;
814 info
= IEEE80211_SKB_CB(priv
->txq
[scd_flow
].txb
[agg
->start_idx
].skb
);
815 memset(&info
->status
, 0, sizeof(info
->status
));
816 info
->flags
|= IEEE80211_TX_STAT_ACK
;
817 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
818 info
->status
.ampdu_ack_len
= ba_resp
->txed_2_done
;
819 info
->status
.ampdu_len
= ba_resp
->txed
;
820 iwlagn_hwrate_to_tx_control(priv
, agg
->rate_n_flags
, info
);
826 * translate ucode response to mac80211 tx status control values
828 void iwlagn_hwrate_to_tx_control(struct iwl_priv
*priv
, u32 rate_n_flags
,
829 struct ieee80211_tx_info
*info
)
831 struct ieee80211_tx_rate
*r
= &info
->control
.rates
[0];
833 info
->antenna_sel_tx
=
834 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
835 if (rate_n_flags
& RATE_MCS_HT_MSK
)
836 r
->flags
|= IEEE80211_TX_RC_MCS
;
837 if (rate_n_flags
& RATE_MCS_GF_MSK
)
838 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
839 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
840 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
841 if (rate_n_flags
& RATE_MCS_DUP_MSK
)
842 r
->flags
|= IEEE80211_TX_RC_DUP_DATA
;
843 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
844 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
845 r
->idx
= iwlagn_hwrate_to_mac80211_idx(rate_n_flags
, info
->band
);
849 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
851 * Handles block-acknowledge notification from device, which reports success
852 * of frames sent via aggregation.
854 void iwlagn_rx_reply_compressed_ba(struct iwl_priv
*priv
,
855 struct iwl_rx_mem_buffer
*rxb
)
857 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
858 struct iwl_compressed_ba_resp
*ba_resp
= &pkt
->u
.compressed_ba
;
859 struct iwl_tx_queue
*txq
= NULL
;
860 struct iwl_ht_agg
*agg
;
866 /* "flow" corresponds to Tx queue */
867 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
869 /* "ssn" is start of block-ack Tx window, corresponds to index
870 * (in Tx queue's circular buffer) of first TFD/frame in window */
871 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_resp
->scd_ssn
);
873 if (scd_flow
>= priv
->hw_params
.max_txq_num
) {
875 "BUG_ON scd_flow is bigger than number of queues\n");
879 txq
= &priv
->txq
[scd_flow
];
880 sta_id
= ba_resp
->sta_id
;
882 agg
= &priv
->stations
[sta_id
].tid
[tid
].agg
;
883 if (unlikely(agg
->txq_id
!= scd_flow
)) {
885 * FIXME: this is a uCode bug which need to be addressed,
886 * log the information and return for now!
887 * since it is possible happen very often and in order
888 * not to fill the syslog, don't enable the logging by default
890 IWL_DEBUG_TX_REPLY(priv
,
891 "BA scd_flow %d does not match txq_id %d\n",
892 scd_flow
, agg
->txq_id
);
896 /* Find index just before block-ack window */
897 index
= iwl_queue_dec_wrap(ba_resp_scd_ssn
& 0xff, txq
->q
.n_bd
);
899 spin_lock_irqsave(&priv
->sta_lock
, flags
);
901 IWL_DEBUG_TX_REPLY(priv
, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
904 (u8
*) &ba_resp
->sta_addr_lo32
,
906 IWL_DEBUG_TX_REPLY(priv
, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
907 "%d, scd_ssn = %d\n",
910 (unsigned long long)le64_to_cpu(ba_resp
->bitmap
),
913 IWL_DEBUG_TX_REPLY(priv
, "DAT start_idx = %d, bitmap = 0x%llx\n",
915 (unsigned long long)agg
->bitmap
);
917 /* Update driver's record of ACK vs. not for each frame in window */
918 iwlagn_tx_status_reply_compressed_ba(priv
, agg
, ba_resp
);
920 /* Release all TFDs before the SSN, i.e. all TFDs in front of
921 * block-ack window (we assume that they've been successfully
922 * transmitted ... if not, it's too late anyway). */
923 if (txq
->q
.read_ptr
!= (ba_resp_scd_ssn
& 0xff)) {
924 /* calculate mac80211 ampdu sw queue to wake */
925 int freed
= iwlagn_tx_queue_reclaim(priv
, scd_flow
, index
);
926 iwl_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
928 if ((iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
) &&
929 priv
->mac80211_registered
&&
930 (agg
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
))
931 iwl_wake_queue(priv
, txq
);
933 iwlagn_txq_check_empty(priv
, sta_id
, tid
, scd_flow
);
936 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
939 #ifdef CONFIG_IWLWIFI_DEBUG
940 const char *iwl_get_tx_fail_reason(u32 status
)
942 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
943 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
945 switch (status
& TX_STATUS_MSK
) {
946 case TX_STATUS_SUCCESS
:
948 TX_STATUS_POSTPONE(DELAY
);
949 TX_STATUS_POSTPONE(FEW_BYTES
);
950 TX_STATUS_POSTPONE(BT_PRIO
);
951 TX_STATUS_POSTPONE(QUIET_PERIOD
);
952 TX_STATUS_POSTPONE(CALC_TTAK
);
953 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
954 TX_STATUS_FAIL(SHORT_LIMIT
);
955 TX_STATUS_FAIL(LONG_LIMIT
);
956 TX_STATUS_FAIL(FIFO_UNDERRUN
);
957 TX_STATUS_FAIL(DRAIN_FLOW
);
958 TX_STATUS_FAIL(RFKILL_FLUSH
);
959 TX_STATUS_FAIL(LIFE_EXPIRE
);
960 TX_STATUS_FAIL(DEST_PS
);
961 TX_STATUS_FAIL(HOST_ABORTED
);
962 TX_STATUS_FAIL(BT_RETRY
);
963 TX_STATUS_FAIL(STA_INVALID
);
964 TX_STATUS_FAIL(FRAG_DROPPED
);
965 TX_STATUS_FAIL(TID_DISABLE
);
966 TX_STATUS_FAIL(FIFO_FLUSHED
);
967 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL
);
968 TX_STATUS_FAIL(PASSIVE_NO_RX
);
969 TX_STATUS_FAIL(NO_BEACON_ON_RADAR
);
974 #undef TX_STATUS_FAIL
975 #undef TX_STATUS_POSTPONE
977 #endif /* CONFIG_IWLWIFI_DEBUG */