1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <linux/ieee80211.h>
65 #include <linux/etherdevice.h>
66 #include <linux/tcp.h>
70 #include "iwl-trans.h"
71 #include "iwl-eeprom-parse.h"
76 iwl_mvm_bar_check_trigger(struct iwl_mvm
*mvm
, const u8
*addr
,
79 struct iwl_fw_dbg_trigger_tlv
*trig
;
80 struct iwl_fw_dbg_trigger_ba
*ba_trig
;
82 trig
= iwl_fw_dbg_trigger_on(&mvm
->fwrt
, NULL
, FW_DBG_TRIGGER_BA
);
86 ba_trig
= (void *)trig
->data
;
88 if (!(le16_to_cpu(ba_trig
->tx_bar
) & BIT(tid
)))
91 iwl_fw_dbg_collect_trig(&mvm
->fwrt
, trig
,
92 "BAR sent to %pM, tid %d, ssn %d",
96 #define OPT_HDR(type, skb, off) \
97 (type *)(skb_network_header(skb) + (off))
99 static u16
iwl_mvm_tx_csum(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
100 struct ieee80211_hdr
*hdr
,
101 struct ieee80211_tx_info
*info
,
104 #if IS_ENABLED(CONFIG_INET)
105 u16 mh_len
= ieee80211_hdrlen(hdr
->frame_control
);
109 * Do not compute checksum if already computed or if transport will
112 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| IWL_MVM_SW_TX_CSUM_OFFLOAD
)
115 /* We do not expect to be requested to csum stuff we do not support */
116 if (WARN_ONCE(!(mvm
->hw
->netdev_features
& IWL_TX_CSUM_NETIF_FLAGS
) ||
117 (skb
->protocol
!= htons(ETH_P_IP
) &&
118 skb
->protocol
!= htons(ETH_P_IPV6
)),
119 "No support for requested checksum\n")) {
120 skb_checksum_help(skb
);
124 if (skb
->protocol
== htons(ETH_P_IP
)) {
125 protocol
= ip_hdr(skb
)->protocol
;
127 #if IS_ENABLED(CONFIG_IPV6)
128 struct ipv6hdr
*ipv6h
=
129 (struct ipv6hdr
*)skb_network_header(skb
);
130 unsigned int off
= sizeof(*ipv6h
);
132 protocol
= ipv6h
->nexthdr
;
133 while (protocol
!= NEXTHDR_NONE
&& ipv6_ext_hdr(protocol
)) {
134 struct ipv6_opt_hdr
*hp
;
136 /* only supported extension headers */
137 if (protocol
!= NEXTHDR_ROUTING
&&
138 protocol
!= NEXTHDR_HOP
&&
139 protocol
!= NEXTHDR_DEST
) {
140 skb_checksum_help(skb
);
144 hp
= OPT_HDR(struct ipv6_opt_hdr
, skb
, off
);
145 protocol
= hp
->nexthdr
;
146 off
+= ipv6_optlen(hp
);
148 /* if we get here - protocol now should be TCP/UDP */
152 if (protocol
!= IPPROTO_TCP
&& protocol
!= IPPROTO_UDP
) {
154 skb_checksum_help(skb
);
159 offload_assist
|= BIT(TX_CMD_OFFLD_L4_EN
);
162 * Set offset to IP header (snap).
163 * We don't support tunneling so no need to take care of inner header.
166 offload_assist
|= (4 << TX_CMD_OFFLD_IP_HDR
);
168 /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
169 if (skb
->protocol
== htons(ETH_P_IP
) &&
170 (offload_assist
& BIT(TX_CMD_OFFLD_AMSDU
))) {
171 ip_hdr(skb
)->check
= 0;
172 offload_assist
|= BIT(TX_CMD_OFFLD_L3_EN
);
175 /* reset UDP/TCP header csum */
176 if (protocol
== IPPROTO_TCP
)
177 tcp_hdr(skb
)->check
= 0;
179 udp_hdr(skb
)->check
= 0;
182 * mac header len should include IV, size is in words unless
183 * the IV is added by the firmware like in WEP.
184 * In new Tx API, the IV is always added by the firmware.
186 if (!iwl_mvm_has_new_tx_api(mvm
) && info
->control
.hw_key
&&
187 info
->control
.hw_key
->cipher
!= WLAN_CIPHER_SUITE_WEP40
&&
188 info
->control
.hw_key
->cipher
!= WLAN_CIPHER_SUITE_WEP104
)
189 mh_len
+= info
->control
.hw_key
->iv_len
;
191 offload_assist
|= mh_len
<< TX_CMD_OFFLD_MH_SIZE
;
195 return offload_assist
;
199 * Sets most of the Tx cmd's fields
201 void iwl_mvm_set_tx_cmd(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
202 struct iwl_tx_cmd
*tx_cmd
,
203 struct ieee80211_tx_info
*info
, u8 sta_id
)
205 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
206 __le16 fc
= hdr
->frame_control
;
207 u32 tx_flags
= le32_to_cpu(tx_cmd
->tx_flags
);
208 u32 len
= skb
->len
+ FCS_LEN
;
209 u16 offload_assist
= 0;
212 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
) ||
213 (ieee80211_is_probe_resp(fc
) &&
214 !is_multicast_ether_addr(hdr
->addr1
)))
215 tx_flags
|= TX_CMD_FLG_ACK
;
217 tx_flags
&= ~TX_CMD_FLG_ACK
;
219 if (ieee80211_is_probe_resp(fc
))
220 tx_flags
|= TX_CMD_FLG_TSF
;
222 if (ieee80211_has_morefrags(fc
))
223 tx_flags
|= TX_CMD_FLG_MORE_FRAG
;
225 if (ieee80211_is_data_qos(fc
)) {
226 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
227 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
228 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL
;
229 if (*qc
& IEEE80211_QOS_CTL_A_MSDU_PRESENT
)
230 offload_assist
|= BIT(TX_CMD_OFFLD_AMSDU
);
231 } else if (ieee80211_is_back_req(fc
)) {
232 struct ieee80211_bar
*bar
= (void *)skb
->data
;
233 u16 control
= le16_to_cpu(bar
->control
);
234 u16 ssn
= le16_to_cpu(bar
->start_seq_num
);
236 tx_flags
|= TX_CMD_FLG_ACK
| TX_CMD_FLG_BAR
;
237 tx_cmd
->tid_tspec
= (control
&
238 IEEE80211_BAR_CTRL_TID_INFO_MASK
) >>
239 IEEE80211_BAR_CTRL_TID_INFO_SHIFT
;
240 WARN_ON_ONCE(tx_cmd
->tid_tspec
>= IWL_MAX_TID_COUNT
);
241 iwl_mvm_bar_check_trigger(mvm
, bar
->ra
, tx_cmd
->tid_tspec
,
244 if (ieee80211_is_data(fc
))
245 tx_cmd
->tid_tspec
= IWL_TID_NON_QOS
;
247 tx_cmd
->tid_tspec
= IWL_MAX_TID_COUNT
;
249 if (info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
)
250 tx_flags
|= TX_CMD_FLG_SEQ_CTL
;
252 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL
;
255 /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
256 if (tx_cmd
->tid_tspec
< IWL_MAX_TID_COUNT
)
257 ac
= tid_to_mac80211_ac
[tx_cmd
->tid_tspec
];
259 ac
= tid_to_mac80211_ac
[0];
261 tx_flags
|= iwl_mvm_bt_coex_tx_prio(mvm
, hdr
, info
, ac
) <<
262 TX_CMD_FLG_BT_PRIO_POS
;
264 if (ieee80211_is_mgmt(fc
)) {
265 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
266 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_ASSOC
);
267 else if (ieee80211_is_action(fc
))
268 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_NONE
);
270 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_MGMT
);
272 /* The spec allows Action frames in A-MPDU, we don't support
275 WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_AMPDU
);
276 } else if (info
->control
.flags
& IEEE80211_TX_CTRL_PORT_CTRL_PROTO
) {
277 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_MGMT
);
279 tx_cmd
->pm_frame_timeout
= cpu_to_le16(PM_FRAME_NONE
);
282 if (ieee80211_is_data(fc
) && len
> mvm
->rts_threshold
&&
283 !is_multicast_ether_addr(hdr
->addr1
))
284 tx_flags
|= TX_CMD_FLG_PROT_REQUIRE
;
286 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
287 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT
) &&
288 ieee80211_action_contains_tpc(skb
))
289 tx_flags
|= TX_CMD_FLG_WRITE_TX_POWER
;
291 tx_cmd
->tx_flags
= cpu_to_le32(tx_flags
);
292 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
293 tx_cmd
->len
= cpu_to_le16((u16
)skb
->len
);
294 tx_cmd
->life_time
= cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE
);
295 tx_cmd
->sta_id
= sta_id
;
297 /* padding is inserted later in transport */
298 if (ieee80211_hdrlen(fc
) % 4 &&
299 !(offload_assist
& BIT(TX_CMD_OFFLD_AMSDU
)))
300 offload_assist
|= BIT(TX_CMD_OFFLD_PAD
);
302 tx_cmd
->offload_assist
|=
303 cpu_to_le16(iwl_mvm_tx_csum(mvm
, skb
, hdr
, info
,
307 static u32
iwl_mvm_get_tx_ant(struct iwl_mvm
*mvm
,
308 struct ieee80211_tx_info
*info
,
309 struct ieee80211_sta
*sta
, __le16 fc
)
311 if (info
->band
== NL80211_BAND_2GHZ
&&
312 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm
))
313 return mvm
->cfg
->non_shared_ant
<< RATE_MCS_ANT_POS
;
315 if (sta
&& ieee80211_is_data(fc
)) {
316 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
318 return BIT(mvmsta
->tx_ant
) << RATE_MCS_ANT_POS
;
321 return BIT(mvm
->mgmt_last_antenna_idx
) << RATE_MCS_ANT_POS
;
324 static u32
iwl_mvm_get_tx_rate(struct iwl_mvm
*mvm
,
325 struct ieee80211_tx_info
*info
,
326 struct ieee80211_sta
*sta
)
332 /* HT rate doesn't make sense for a non data frame */
333 WARN_ONCE(info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
,
334 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
335 info
->control
.rates
[0].flags
,
336 info
->control
.rates
[0].idx
);
338 rate_idx
= info
->control
.rates
[0].idx
;
339 /* if the rate isn't a well known legacy rate, take the lowest one */
340 if (rate_idx
< 0 || rate_idx
>= IWL_RATE_COUNT_LEGACY
)
341 rate_idx
= rate_lowest_index(
342 &mvm
->nvm_data
->bands
[info
->band
], sta
);
345 * For non 2 GHZ band, remap mac80211 rate
346 * indices into driver indices
348 if (info
->band
!= NL80211_BAND_2GHZ
)
349 rate_idx
+= IWL_FIRST_OFDM_RATE
;
351 /* For 2.4 GHZ band, check that there is no need to remap */
352 BUILD_BUG_ON(IWL_FIRST_CCK_RATE
!= 0);
354 /* Get PLCP rate for tx_cmd->rate_n_flags */
355 rate_plcp
= iwl_mvm_mac80211_idx_to_hwrate(rate_idx
);
357 /* Set CCK flag as needed */
358 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
359 rate_flags
|= RATE_MCS_CCK_MSK
;
361 return (u32
)rate_plcp
| rate_flags
;
364 static u32
iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm
*mvm
,
365 struct ieee80211_tx_info
*info
,
366 struct ieee80211_sta
*sta
, __le16 fc
)
368 return iwl_mvm_get_tx_rate(mvm
, info
, sta
) |
369 iwl_mvm_get_tx_ant(mvm
, info
, sta
, fc
);
373 * Sets the fields in the Tx cmd that are rate related
375 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm
*mvm
, struct iwl_tx_cmd
*tx_cmd
,
376 struct ieee80211_tx_info
*info
,
377 struct ieee80211_sta
*sta
, __le16 fc
)
379 /* Set retry limit on RTS packets */
380 tx_cmd
->rts_retry_limit
= IWL_RTS_DFAULT_RETRY_LIMIT
;
382 /* Set retry limit on DATA packets and Probe Responses*/
383 if (ieee80211_is_probe_resp(fc
)) {
384 tx_cmd
->data_retry_limit
= IWL_MGMT_DFAULT_RETRY_LIMIT
;
385 tx_cmd
->rts_retry_limit
=
386 min(tx_cmd
->data_retry_limit
, tx_cmd
->rts_retry_limit
);
387 } else if (ieee80211_is_back_req(fc
)) {
388 tx_cmd
->data_retry_limit
= IWL_BAR_DFAULT_RETRY_LIMIT
;
390 tx_cmd
->data_retry_limit
= IWL_DEFAULT_TX_RETRY
;
394 * for data packets, rate info comes from the table inside the fw. This
395 * table is controlled by LINK_QUALITY commands
398 if (ieee80211_is_data(fc
) && sta
) {
399 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
401 if (mvmsta
->sta_state
>= IEEE80211_STA_AUTHORIZED
) {
402 tx_cmd
->initial_rate_index
= 0;
403 tx_cmd
->tx_flags
|= cpu_to_le32(TX_CMD_FLG_STA_RATE
);
406 } else if (ieee80211_is_back_req(fc
)) {
408 cpu_to_le32(TX_CMD_FLG_ACK
| TX_CMD_FLG_BAR
);
411 /* Set the rate in the TX cmd */
412 tx_cmd
->rate_n_flags
=
413 cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm
, info
, sta
, fc
));
416 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info
*info
,
419 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
422 pn
= atomic64_inc_return(&keyconf
->tx_pn
);
425 crypto_hdr
[3] = 0x20 | (keyconf
->keyidx
<< 6);
426 crypto_hdr
[1] = pn
>> 8;
427 crypto_hdr
[4] = pn
>> 16;
428 crypto_hdr
[5] = pn
>> 24;
429 crypto_hdr
[6] = pn
>> 32;
430 crypto_hdr
[7] = pn
>> 40;
434 * Sets the fields in the Tx cmd that are crypto related
436 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm
*mvm
,
437 struct ieee80211_tx_info
*info
,
438 struct iwl_tx_cmd
*tx_cmd
,
439 struct sk_buff
*skb_frag
,
442 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
443 u8
*crypto_hdr
= skb_frag
->data
+ hdrlen
;
444 enum iwl_tx_cmd_sec_ctrl type
= TX_CMD_SEC_CCM
;
447 switch (keyconf
->cipher
) {
448 case WLAN_CIPHER_SUITE_CCMP
:
449 iwl_mvm_set_tx_cmd_ccmp(info
, tx_cmd
);
450 iwl_mvm_set_tx_cmd_pn(info
, crypto_hdr
);
453 case WLAN_CIPHER_SUITE_TKIP
:
454 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
455 pn
= atomic64_inc_return(&keyconf
->tx_pn
);
456 ieee80211_tkip_add_iv(crypto_hdr
, keyconf
, pn
);
457 ieee80211_get_tkip_p2k(keyconf
, skb_frag
, tx_cmd
->key
);
460 case WLAN_CIPHER_SUITE_WEP104
:
461 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
463 case WLAN_CIPHER_SUITE_WEP40
:
464 tx_cmd
->sec_ctl
|= TX_CMD_SEC_WEP
|
465 ((keyconf
->keyidx
<< TX_CMD_SEC_WEP_KEY_IDX_POS
) &
466 TX_CMD_SEC_WEP_KEY_IDX_MSK
);
468 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
470 case WLAN_CIPHER_SUITE_GCMP
:
471 case WLAN_CIPHER_SUITE_GCMP_256
:
472 type
= TX_CMD_SEC_GCMP
;
474 case WLAN_CIPHER_SUITE_CCMP_256
:
475 /* TODO: Taking the key from the table might introduce a race
476 * when PTK rekeying is done, having an old packets with a PN
477 * based on the old key but the message encrypted with a new
479 * Need to handle this.
481 tx_cmd
->sec_ctl
|= type
| TX_CMD_SEC_KEY_FROM_TABLE
;
482 tx_cmd
->key
[0] = keyconf
->hw_key_idx
;
483 iwl_mvm_set_tx_cmd_pn(info
, crypto_hdr
);
486 tx_cmd
->sec_ctl
|= TX_CMD_SEC_EXT
;
491 * Allocates and sets the Tx cmd the driver data pointers in the skb
493 static struct iwl_device_tx_cmd
*
494 iwl_mvm_set_tx_params(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
495 struct ieee80211_tx_info
*info
, int hdrlen
,
496 struct ieee80211_sta
*sta
, u8 sta_id
)
498 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
499 struct iwl_device_tx_cmd
*dev_cmd
;
500 struct iwl_tx_cmd
*tx_cmd
;
502 dev_cmd
= iwl_trans_alloc_tx_cmd(mvm
->trans
);
504 if (unlikely(!dev_cmd
))
507 dev_cmd
->hdr
.cmd
= TX_CMD
;
509 if (iwl_mvm_has_new_tx_api(mvm
)) {
510 u16 offload_assist
= 0;
511 u32 rate_n_flags
= 0;
513 struct iwl_mvm_sta
*mvmsta
= sta
?
514 iwl_mvm_sta_from_mac80211(sta
) : NULL
;
516 if (ieee80211_is_data_qos(hdr
->frame_control
)) {
517 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
519 if (*qc
& IEEE80211_QOS_CTL_A_MSDU_PRESENT
)
520 offload_assist
|= BIT(TX_CMD_OFFLD_AMSDU
);
523 offload_assist
= iwl_mvm_tx_csum(mvm
, skb
, hdr
, info
,
526 /* padding is inserted later in transport */
527 if (ieee80211_hdrlen(hdr
->frame_control
) % 4 &&
528 !(offload_assist
& BIT(TX_CMD_OFFLD_AMSDU
)))
529 offload_assist
|= BIT(TX_CMD_OFFLD_PAD
);
531 if (!info
->control
.hw_key
)
532 flags
|= IWL_TX_FLAGS_ENCRYPT_DIS
;
535 * For data packets rate info comes from the fw. Only
536 * set rate/antenna during connection establishment or in case
537 * no station is given.
539 if (!sta
|| !ieee80211_is_data(hdr
->frame_control
) ||
540 mvmsta
->sta_state
< IEEE80211_STA_AUTHORIZED
) {
541 flags
|= IWL_TX_FLAGS_CMD_RATE
;
543 iwl_mvm_get_tx_rate_n_flags(mvm
, info
, sta
,
547 if (mvm
->trans
->trans_cfg
->device_family
>=
548 IWL_DEVICE_FAMILY_AX210
) {
549 struct iwl_tx_cmd_gen3
*cmd
= (void *)dev_cmd
->payload
;
551 cmd
->offload_assist
|= cpu_to_le32(offload_assist
);
553 /* Total # bytes to be transmitted */
554 cmd
->len
= cpu_to_le16((u16
)skb
->len
);
556 /* Copy MAC header from skb into command buffer */
557 memcpy(cmd
->hdr
, hdr
, hdrlen
);
559 cmd
->flags
= cpu_to_le16(flags
);
560 cmd
->rate_n_flags
= cpu_to_le32(rate_n_flags
);
562 struct iwl_tx_cmd_gen2
*cmd
= (void *)dev_cmd
->payload
;
564 cmd
->offload_assist
|= cpu_to_le16(offload_assist
);
566 /* Total # bytes to be transmitted */
567 cmd
->len
= cpu_to_le16((u16
)skb
->len
);
569 /* Copy MAC header from skb into command buffer */
570 memcpy(cmd
->hdr
, hdr
, hdrlen
);
572 cmd
->flags
= cpu_to_le32(flags
);
573 cmd
->rate_n_flags
= cpu_to_le32(rate_n_flags
);
578 tx_cmd
= (struct iwl_tx_cmd
*)dev_cmd
->payload
;
580 if (info
->control
.hw_key
)
581 iwl_mvm_set_tx_cmd_crypto(mvm
, info
, tx_cmd
, skb
, hdrlen
);
583 iwl_mvm_set_tx_cmd(mvm
, skb
, tx_cmd
, info
, sta_id
);
585 iwl_mvm_set_tx_cmd_rate(mvm
, tx_cmd
, info
, sta
, hdr
->frame_control
);
587 /* Copy MAC header from skb into command buffer */
588 memcpy(tx_cmd
->hdr
, hdr
, hdrlen
);
594 static void iwl_mvm_skb_prepare_status(struct sk_buff
*skb
,
595 struct iwl_device_tx_cmd
*cmd
)
597 struct ieee80211_tx_info
*skb_info
= IEEE80211_SKB_CB(skb
);
599 memset(&skb_info
->status
, 0, sizeof(skb_info
->status
));
600 memset(skb_info
->driver_data
, 0, sizeof(skb_info
->driver_data
));
602 skb_info
->driver_data
[1] = cmd
;
605 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm
*mvm
,
606 struct ieee80211_tx_info
*info
,
607 struct ieee80211_hdr
*hdr
)
609 struct iwl_mvm_vif
*mvmvif
=
610 iwl_mvm_vif_from_mac80211(info
->control
.vif
);
611 __le16 fc
= hdr
->frame_control
;
613 switch (info
->control
.vif
->type
) {
614 case NL80211_IFTYPE_AP
:
615 case NL80211_IFTYPE_ADHOC
:
617 * Non-bufferable frames use the broadcast station, thus they
618 * use the probe queue.
619 * Also take care of the case where we send a deauth to a
620 * station that we don't have, or similarly an association
621 * response (with non-success status) for a station we can't
623 * Also, disassociate frames might happen, particular with
624 * reason 7 ("Class 3 frame received from nonassociated STA").
626 if (ieee80211_is_mgmt(fc
) &&
627 (!ieee80211_is_bufferable_mmpdu(fc
) ||
628 ieee80211_is_deauth(fc
) || ieee80211_is_disassoc(fc
)))
629 return mvm
->probe_queue
;
631 if (!ieee80211_has_order(fc
) && !ieee80211_is_probe_req(fc
) &&
632 is_multicast_ether_addr(hdr
->addr1
))
633 return mvmvif
->cab_queue
;
635 WARN_ONCE(info
->control
.vif
->type
!= NL80211_IFTYPE_ADHOC
,
636 "fc=0x%02x", le16_to_cpu(fc
));
637 return mvm
->probe_queue
;
638 case NL80211_IFTYPE_P2P_DEVICE
:
639 if (ieee80211_is_mgmt(fc
))
640 return mvm
->p2p_dev_queue
;
643 return mvm
->p2p_dev_queue
;
645 WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
650 static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm
*mvm
,
653 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
654 struct iwl_mvm_vif
*mvmvif
=
655 iwl_mvm_vif_from_mac80211(info
->control
.vif
);
656 struct ieee80211_mgmt
*mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
657 int base_len
= (u8
*)mgmt
->u
.probe_resp
.variable
- (u8
*)mgmt
;
658 struct iwl_probe_resp_data
*resp_data
;
661 (WLAN_OUI_WFA
>> 16) & 0xff,
662 (WLAN_OUI_WFA
>> 8) & 0xff,
664 WLAN_OUI_TYPE_WFA_P2P
,
669 resp_data
= rcu_dereference(mvmvif
->probe_resp_data
);
673 if (!resp_data
->notif
.noa_active
)
676 ie
= (u8
*)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC
,
677 mgmt
->u
.probe_resp
.variable
,
681 IWL_DEBUG_TX(mvm
, "probe resp doesn't have P2P IE\n");
685 if (skb_tailroom(skb
) < resp_data
->noa_len
) {
686 if (pskb_expand_head(skb
, 0, resp_data
->noa_len
, GFP_ATOMIC
)) {
688 "Failed to reallocate probe resp\n");
693 pos
= skb_put(skb
, resp_data
->noa_len
);
695 *pos
++ = WLAN_EID_VENDOR_SPECIFIC
;
696 /* Set length of IE body (not including ID and length itself) */
697 *pos
++ = resp_data
->noa_len
- 2;
698 *pos
++ = (WLAN_OUI_WFA
>> 16) & 0xff;
699 *pos
++ = (WLAN_OUI_WFA
>> 8) & 0xff;
700 *pos
++ = WLAN_OUI_WFA
& 0xff;
701 *pos
++ = WLAN_OUI_TYPE_WFA_P2P
;
703 memcpy(pos
, &resp_data
->notif
.noa_attr
,
704 resp_data
->noa_len
- sizeof(struct ieee80211_vendor_ie
));
710 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm
*mvm
, struct sk_buff
*skb
)
712 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
713 struct ieee80211_tx_info info
;
714 struct iwl_device_tx_cmd
*dev_cmd
;
716 int hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
717 __le16 fc
= hdr
->frame_control
;
718 bool offchannel
= IEEE80211_SKB_CB(skb
)->flags
&
719 IEEE80211_TX_CTL_TX_OFFCHAN
;
722 if (IWL_MVM_NON_TRANSMITTING_AP
&& ieee80211_is_probe_resp(fc
))
725 memcpy(&info
, skb
->cb
, sizeof(info
));
727 if (WARN_ON_ONCE(skb
->len
> IEEE80211_MAX_DATA_LEN
+ hdrlen
))
730 if (WARN_ON_ONCE(info
.flags
& IEEE80211_TX_CTL_AMPDU
))
733 if (info
.control
.vif
) {
734 struct iwl_mvm_vif
*mvmvif
=
735 iwl_mvm_vif_from_mac80211(info
.control
.vif
);
737 if (info
.control
.vif
->type
== NL80211_IFTYPE_P2P_DEVICE
||
738 info
.control
.vif
->type
== NL80211_IFTYPE_AP
||
739 info
.control
.vif
->type
== NL80211_IFTYPE_ADHOC
) {
740 if (!ieee80211_is_data(hdr
->frame_control
))
741 sta_id
= mvmvif
->bcast_sta
.sta_id
;
743 sta_id
= mvmvif
->mcast_sta
.sta_id
;
745 queue
= iwl_mvm_get_ctrl_vif_queue(mvm
, &info
, hdr
);
746 } else if (info
.control
.vif
->type
== NL80211_IFTYPE_MONITOR
) {
747 queue
= mvm
->snif_queue
;
748 sta_id
= mvm
->snif_sta
.sta_id
;
749 } else if (info
.control
.vif
->type
== NL80211_IFTYPE_STATION
&&
752 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
753 * that can be used in 2 different types of vifs, P2P &
755 * P2P uses the offchannel queue.
756 * STATION (HS2.0) uses the auxiliary context of the FW,
757 * and hence needs to be sent on the aux queue.
759 sta_id
= mvm
->aux_sta
.sta_id
;
760 queue
= mvm
->aux_queue
;
765 IWL_ERR(mvm
, "No queue was found. Dropping TX\n");
769 if (unlikely(ieee80211_is_probe_resp(fc
)))
770 iwl_mvm_probe_resp_set_noa(mvm
, skb
);
772 IWL_DEBUG_TX(mvm
, "station Id %d, queue=%d\n", sta_id
, queue
);
774 dev_cmd
= iwl_mvm_set_tx_params(mvm
, skb
, &info
, hdrlen
, NULL
, sta_id
);
778 /* From now on, we cannot access info->control */
779 iwl_mvm_skb_prepare_status(skb
, dev_cmd
);
781 if (iwl_trans_tx(mvm
->trans
, skb
, dev_cmd
, queue
)) {
782 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
789 unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm
*mvm
,
790 struct ieee80211_sta
*sta
, unsigned int tid
)
792 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
793 enum nl80211_band band
= mvmsta
->vif
->bss_conf
.chandef
.chan
->band
;
794 u8 ac
= tid_to_mac80211_ac
[tid
];
796 int lmac
= IWL_LMAC_24G_INDEX
;
798 if (iwl_mvm_is_cdb_supported(mvm
) &&
799 band
== NL80211_BAND_5GHZ
)
800 lmac
= IWL_LMAC_5G_INDEX
;
802 /* For HE redirect to trigger based fifos */
803 if (sta
->he_cap
.has_he
&& !WARN_ON(!iwl_mvm_has_new_tx_api(mvm
)))
806 txf
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
);
809 * Don't send an AMSDU that will be longer than the TXF.
810 * Add a security margin of 256 for the TX command + headers.
811 * We also want to have the start of the next packet inside the
812 * fifo to be able to send bursts.
814 return min_t(unsigned int, mvmsta
->max_amsdu_len
,
815 mvm
->fwrt
.smem_cfg
.lmac
[lmac
].txfifo_size
[txf
] - 256);
821 iwl_mvm_tx_tso_segment(struct sk_buff
*skb
, unsigned int num_subframes
,
822 netdev_features_t netdev_flags
,
823 struct sk_buff_head
*mpdus_skb
)
825 struct sk_buff
*tmp
, *next
;
826 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
827 char cb
[sizeof(skb
->cb
)];
829 unsigned int tcp_payload_len
;
830 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
831 bool ipv4
= (skb
->protocol
== htons(ETH_P_IP
));
832 bool qos
= ieee80211_is_data_qos(hdr
->frame_control
);
833 u16 ip_base_id
= ipv4
? ntohs(ip_hdr(skb
)->id
) : 0;
835 skb_shinfo(skb
)->gso_size
= num_subframes
* mss
;
836 memcpy(cb
, skb
->cb
, sizeof(cb
));
838 next
= skb_gso_segment(skb
, netdev_flags
);
839 skb_shinfo(skb
)->gso_size
= mss
;
840 if (WARN_ON_ONCE(IS_ERR(next
)))
845 skb_list_walk_safe(next
, tmp
, next
) {
846 memcpy(tmp
->cb
, cb
, sizeof(tmp
->cb
));
848 * Compute the length of all the data added for the A-MSDU.
849 * This will be used to compute the length to write in the TX
850 * command. We have: SNAP + IP + TCP for n -1 subframes and
851 * ETH header for n subframes.
853 tcp_payload_len
= skb_tail_pointer(tmp
) -
854 skb_transport_header(tmp
) -
855 tcp_hdrlen(tmp
) + tmp
->data_len
;
858 ip_hdr(tmp
)->id
= htons(ip_base_id
+ i
* num_subframes
);
860 if (tcp_payload_len
> mss
) {
861 skb_shinfo(tmp
)->gso_size
= mss
;
867 ip_send_check(ip_hdr(tmp
));
869 qc
= ieee80211_get_qos_ctl((void *)tmp
->data
);
870 *qc
&= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
872 skb_shinfo(tmp
)->gso_size
= 0;
875 skb_mark_not_on_list(tmp
);
876 __skb_queue_tail(mpdus_skb
, tmp
);
883 static int iwl_mvm_tx_tso(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
884 struct ieee80211_tx_info
*info
,
885 struct ieee80211_sta
*sta
,
886 struct sk_buff_head
*mpdus_skb
)
888 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
889 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
890 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
891 unsigned int num_subframes
, tcp_payload_len
, subf_len
, max_amsdu_len
;
892 u16 snap_ip_tcp
, pad
;
893 netdev_features_t netdev_flags
= NETIF_F_CSUM_MASK
| NETIF_F_SG
;
896 snap_ip_tcp
= 8 + skb_transport_header(skb
) - skb_network_header(skb
) +
899 if (!mvmsta
->max_amsdu_len
||
900 !ieee80211_is_data_qos(hdr
->frame_control
) ||
901 !mvmsta
->amsdu_enabled
)
902 return iwl_mvm_tx_tso_segment(skb
, 1, netdev_flags
, mpdus_skb
);
905 * Do not build AMSDU for IPv6 with extension headers.
906 * ask stack to segment and checkum the generated MPDUs for us.
908 if (skb
->protocol
== htons(ETH_P_IPV6
) &&
909 ((struct ipv6hdr
*)skb_network_header(skb
))->nexthdr
!=
911 netdev_flags
&= ~NETIF_F_CSUM_MASK
;
912 return iwl_mvm_tx_tso_segment(skb
, 1, netdev_flags
, mpdus_skb
);
915 tid
= ieee80211_get_tid(hdr
);
916 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
920 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
921 * during an BA session.
923 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
924 !mvmsta
->tid_data
[tid
].amsdu_in_ampdu_allowed
)
925 return iwl_mvm_tx_tso_segment(skb
, 1, netdev_flags
, mpdus_skb
);
927 if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta
->vif
)) ||
928 !(mvmsta
->amsdu_enabled
& BIT(tid
)))
929 return iwl_mvm_tx_tso_segment(skb
, 1, netdev_flags
, mpdus_skb
);
932 * Take the min of ieee80211 station and mvm station
935 min_t(unsigned int, sta
->max_amsdu_len
,
936 iwl_mvm_max_amsdu_size(mvm
, sta
, tid
));
939 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
940 * supported. This is a spec requirement (IEEE 802.11-2015
941 * section 8.7.3 NOTE 3).
943 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
944 !sta
->vht_cap
.vht_supported
)
945 max_amsdu_len
= min_t(unsigned int, max_amsdu_len
, 4095);
947 /* Sub frame header + SNAP + IP header + TCP header + MSS */
948 subf_len
= sizeof(struct ethhdr
) + snap_ip_tcp
+ mss
;
949 pad
= (4 - subf_len
) & 0x3;
952 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
953 * N * subf_len + (N - 1) * pad.
955 num_subframes
= (max_amsdu_len
+ pad
) / (subf_len
+ pad
);
957 if (sta
->max_amsdu_subframes
&&
958 num_subframes
> sta
->max_amsdu_subframes
)
959 num_subframes
= sta
->max_amsdu_subframes
;
961 tcp_payload_len
= skb_tail_pointer(skb
) - skb_transport_header(skb
) -
962 tcp_hdrlen(skb
) + skb
->data_len
;
965 * Make sure we have enough TBs for the A-MSDU:
966 * 2 for each subframe
967 * 1 more for each fragment
968 * 1 more for the potential data in the header
970 if ((num_subframes
* 2 + skb_shinfo(skb
)->nr_frags
+ 1) >
971 mvm
->trans
->max_skb_frags
)
974 if (num_subframes
> 1)
975 *ieee80211_get_qos_ctl(hdr
) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT
;
977 /* This skb fits in one single A-MSDU */
978 if (num_subframes
* mss
>= tcp_payload_len
) {
979 __skb_queue_tail(mpdus_skb
, skb
);
984 * Trick the segmentation function to make it
985 * create SKBs that can fit into one A-MSDU.
987 return iwl_mvm_tx_tso_segment(skb
, num_subframes
, netdev_flags
,
990 #else /* CONFIG_INET */
991 static int iwl_mvm_tx_tso(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
992 struct ieee80211_tx_info
*info
,
993 struct ieee80211_sta
*sta
,
994 struct sk_buff_head
*mpdus_skb
)
996 /* Impossible to get TSO with CONFIG_INET */
1003 /* Check if there are any timed-out TIDs on a given shared TXQ */
1004 static bool iwl_mvm_txq_should_update(struct iwl_mvm
*mvm
, int txq_id
)
1006 unsigned long queue_tid_bitmap
= mvm
->queue_info
[txq_id
].tid_bitmap
;
1007 unsigned long now
= jiffies
;
1010 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1013 for_each_set_bit(tid
, &queue_tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
1014 if (time_before(mvm
->queue_info
[txq_id
].last_frame_time
[tid
] +
1015 IWL_MVM_DQA_QUEUE_TIMEOUT
, now
))
1022 static void iwl_mvm_tx_airtime(struct iwl_mvm
*mvm
,
1023 struct iwl_mvm_sta
*mvmsta
,
1026 int mac
= mvmsta
->mac_id_n_color
& FW_CTXT_ID_MSK
;
1027 struct iwl_mvm_tcm_mac
*mdata
;
1029 if (mac
>= NUM_MAC_INDEX_DRIVER
)
1032 mdata
= &mvm
->tcm
.data
[mac
];
1034 if (mvm
->tcm
.paused
)
1037 if (time_after(jiffies
, mvm
->tcm
.ts
+ MVM_TCM_PERIOD
))
1038 schedule_delayed_work(&mvm
->tcm
.work
, 0);
1040 mdata
->tx
.airtime
+= airtime
;
1043 static int iwl_mvm_tx_pkt_queued(struct iwl_mvm
*mvm
,
1044 struct iwl_mvm_sta
*mvmsta
, int tid
)
1046 u32 ac
= tid_to_mac80211_ac
[tid
];
1047 int mac
= mvmsta
->mac_id_n_color
& FW_CTXT_ID_MSK
;
1048 struct iwl_mvm_tcm_mac
*mdata
;
1050 if (mac
>= NUM_MAC_INDEX_DRIVER
)
1053 mdata
= &mvm
->tcm
.data
[mac
];
1055 mdata
->tx
.pkts
[ac
]++;
1061 * Sets the fields in the Tx cmd that are crypto related.
1063 * This function must be called with BHs disabled.
1065 static int iwl_mvm_tx_mpdu(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
1066 struct ieee80211_tx_info
*info
,
1067 struct ieee80211_sta
*sta
)
1069 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1070 struct iwl_mvm_sta
*mvmsta
;
1071 struct iwl_device_tx_cmd
*dev_cmd
;
1074 u8 tid
= IWL_MAX_TID_COUNT
;
1076 bool is_ampdu
= false;
1079 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1080 fc
= hdr
->frame_control
;
1081 hdrlen
= ieee80211_hdrlen(fc
);
1083 if (IWL_MVM_NON_TRANSMITTING_AP
&& ieee80211_is_probe_resp(fc
))
1086 if (WARN_ON_ONCE(!mvmsta
))
1089 if (WARN_ON_ONCE(mvmsta
->sta_id
== IWL_MVM_INVALID_STA
))
1092 if (unlikely(ieee80211_is_probe_resp(fc
)))
1093 iwl_mvm_probe_resp_set_noa(mvm
, skb
);
1095 dev_cmd
= iwl_mvm_set_tx_params(mvm
, skb
, info
, hdrlen
,
1096 sta
, mvmsta
->sta_id
);
1101 * we handle that entirely ourselves -- for uAPSD the firmware
1102 * will always send a notification, and for PS-Poll responses
1103 * we'll notify mac80211 when getting frame status
1105 info
->flags
&= ~IEEE80211_TX_STATUS_EOSP
;
1107 spin_lock(&mvmsta
->lock
);
1109 /* nullfunc frames should go to the MGMT queue regardless of QOS,
1110 * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
1111 * assignment of MGMT TID
1113 if (ieee80211_is_data_qos(fc
) && !ieee80211_is_qos_nullfunc(fc
)) {
1114 tid
= ieee80211_get_tid(hdr
);
1115 if (WARN_ONCE(tid
>= IWL_MAX_TID_COUNT
, "Invalid TID %d", tid
))
1116 goto drop_unlock_sta
;
1118 is_ampdu
= info
->flags
& IEEE80211_TX_CTL_AMPDU
;
1119 if (WARN_ONCE(is_ampdu
&&
1120 mvmsta
->tid_data
[tid
].state
!= IWL_AGG_ON
,
1121 "Invalid internal agg state %d for TID %d",
1122 mvmsta
->tid_data
[tid
].state
, tid
))
1123 goto drop_unlock_sta
;
1125 seq_number
= mvmsta
->tid_data
[tid
].seq_number
;
1126 seq_number
&= IEEE80211_SCTL_SEQ
;
1128 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1129 struct iwl_tx_cmd
*tx_cmd
= (void *)dev_cmd
->payload
;
1131 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
1132 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
1133 /* update the tx_cmd hdr as it was already copied */
1134 tx_cmd
->hdr
->seq_ctrl
= hdr
->seq_ctrl
;
1136 } else if (ieee80211_is_data(fc
) && !ieee80211_is_data_qos(fc
)) {
1137 tid
= IWL_TID_NON_QOS
;
1140 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
1142 WARN_ON_ONCE(info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
);
1144 if (WARN_ONCE(txq_id
== IWL_MVM_INVALID_QUEUE
, "Invalid TXQ id")) {
1145 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
1146 spin_unlock(&mvmsta
->lock
);
1150 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1151 /* Keep track of the time of the last frame for this RA/TID */
1152 mvm
->queue_info
[txq_id
].last_frame_time
[tid
] = jiffies
;
1155 * If we have timed-out TIDs - schedule the worker that will
1156 * reconfig the queues and update them
1158 * Note that the no lock is taken here in order to not serialize
1159 * the TX flow. This isn't dangerous because scheduling
1160 * mvm->add_stream_wk can't ruin the state, and if we DON'T
1161 * schedule it due to some race condition then next TX we get
1164 if (unlikely(mvm
->queue_info
[txq_id
].status
==
1165 IWL_MVM_QUEUE_SHARED
&&
1166 iwl_mvm_txq_should_update(mvm
, txq_id
)))
1167 schedule_work(&mvm
->add_stream_wk
);
1170 IWL_DEBUG_TX(mvm
, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
1171 mvmsta
->sta_id
, tid
, txq_id
,
1172 IEEE80211_SEQ_TO_SN(seq_number
), skb
->len
);
1174 /* From now on, we cannot access info->control */
1175 iwl_mvm_skb_prepare_status(skb
, dev_cmd
);
1177 if (iwl_trans_tx(mvm
->trans
, skb
, dev_cmd
, txq_id
))
1178 goto drop_unlock_sta
;
1180 if (tid
< IWL_MAX_TID_COUNT
&& !ieee80211_has_morefrags(fc
))
1181 mvmsta
->tid_data
[tid
].seq_number
= seq_number
+ 0x10;
1183 spin_unlock(&mvmsta
->lock
);
1185 if (iwl_mvm_tx_pkt_queued(mvm
, mvmsta
,
1186 tid
== IWL_MAX_TID_COUNT
? 0 : tid
))
1192 iwl_trans_free_tx_cmd(mvm
->trans
, dev_cmd
);
1193 spin_unlock(&mvmsta
->lock
);
1195 IWL_DEBUG_TX(mvm
, "TX to [%d|%d] dropped\n", mvmsta
->sta_id
, tid
);
1199 int iwl_mvm_tx_skb_sta(struct iwl_mvm
*mvm
, struct sk_buff
*skb
,
1200 struct ieee80211_sta
*sta
)
1202 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1203 struct ieee80211_tx_info info
;
1204 struct sk_buff_head mpdus_skbs
;
1205 unsigned int payload_len
;
1208 if (WARN_ON_ONCE(!mvmsta
))
1211 if (WARN_ON_ONCE(mvmsta
->sta_id
== IWL_MVM_INVALID_STA
))
1214 memcpy(&info
, skb
->cb
, sizeof(info
));
1216 if (!skb_is_gso(skb
))
1217 return iwl_mvm_tx_mpdu(mvm
, skb
, &info
, sta
);
1219 payload_len
= skb_tail_pointer(skb
) - skb_transport_header(skb
) -
1220 tcp_hdrlen(skb
) + skb
->data_len
;
1222 if (payload_len
<= skb_shinfo(skb
)->gso_size
)
1223 return iwl_mvm_tx_mpdu(mvm
, skb
, &info
, sta
);
1225 __skb_queue_head_init(&mpdus_skbs
);
1227 ret
= iwl_mvm_tx_tso(mvm
, skb
, &info
, sta
, &mpdus_skbs
);
1231 if (WARN_ON(skb_queue_empty(&mpdus_skbs
)))
1234 while (!skb_queue_empty(&mpdus_skbs
)) {
1235 skb
= __skb_dequeue(&mpdus_skbs
);
1237 ret
= iwl_mvm_tx_mpdu(mvm
, skb
, &info
, sta
);
1239 __skb_queue_purge(&mpdus_skbs
);
1247 static void iwl_mvm_check_ratid_empty(struct iwl_mvm
*mvm
,
1248 struct ieee80211_sta
*sta
, u8 tid
)
1250 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1251 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1252 struct ieee80211_vif
*vif
= mvmsta
->vif
;
1255 lockdep_assert_held(&mvmsta
->lock
);
1257 if ((tid_data
->state
== IWL_AGG_ON
||
1258 tid_data
->state
== IWL_EMPTYING_HW_QUEUE_DELBA
) &&
1259 iwl_mvm_tid_queued(mvm
, tid_data
) == 0) {
1261 * Now that this aggregation or DQA queue is empty tell
1262 * mac80211 so it knows we no longer have frames buffered for
1263 * the station on this TID (for the TIM bitmap calculation.)
1265 ieee80211_sta_set_buffered(sta
, tid
, false);
1269 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
1270 * to align the wrap around of ssn so we compare relevant values.
1272 normalized_ssn
= tid_data
->ssn
;
1273 if (mvm
->trans
->trans_cfg
->gen2
)
1274 normalized_ssn
&= 0xff;
1276 if (normalized_ssn
!= tid_data
->next_reclaimed
)
1279 switch (tid_data
->state
) {
1280 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1281 IWL_DEBUG_TX_QUEUES(mvm
,
1282 "Can continue addBA flow ssn = next_recl = %d\n",
1283 tid_data
->next_reclaimed
);
1284 tid_data
->state
= IWL_AGG_STARTING
;
1285 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1288 case IWL_EMPTYING_HW_QUEUE_DELBA
:
1289 IWL_DEBUG_TX_QUEUES(mvm
,
1290 "Can continue DELBA flow ssn = next_recl = %d\n",
1291 tid_data
->next_reclaimed
);
1292 tid_data
->state
= IWL_AGG_OFF
;
1293 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1301 #ifdef CONFIG_IWLWIFI_DEBUG
1302 const char *iwl_mvm_get_tx_fail_reason(u32 status
)
1304 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1305 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1307 switch (status
& TX_STATUS_MSK
) {
1308 case TX_STATUS_SUCCESS
:
1310 TX_STATUS_POSTPONE(DELAY
);
1311 TX_STATUS_POSTPONE(FEW_BYTES
);
1312 TX_STATUS_POSTPONE(BT_PRIO
);
1313 TX_STATUS_POSTPONE(QUIET_PERIOD
);
1314 TX_STATUS_POSTPONE(CALC_TTAK
);
1315 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
1316 TX_STATUS_FAIL(SHORT_LIMIT
);
1317 TX_STATUS_FAIL(LONG_LIMIT
);
1318 TX_STATUS_FAIL(UNDERRUN
);
1319 TX_STATUS_FAIL(DRAIN_FLOW
);
1320 TX_STATUS_FAIL(RFKILL_FLUSH
);
1321 TX_STATUS_FAIL(LIFE_EXPIRE
);
1322 TX_STATUS_FAIL(DEST_PS
);
1323 TX_STATUS_FAIL(HOST_ABORTED
);
1324 TX_STATUS_FAIL(BT_RETRY
);
1325 TX_STATUS_FAIL(STA_INVALID
);
1326 TX_STATUS_FAIL(FRAG_DROPPED
);
1327 TX_STATUS_FAIL(TID_DISABLE
);
1328 TX_STATUS_FAIL(FIFO_FLUSHED
);
1329 TX_STATUS_FAIL(SMALL_CF_POLL
);
1330 TX_STATUS_FAIL(FW_DROP
);
1331 TX_STATUS_FAIL(STA_COLOR_MISMATCH
);
1336 #undef TX_STATUS_FAIL
1337 #undef TX_STATUS_POSTPONE
1339 #endif /* CONFIG_IWLWIFI_DEBUG */
1341 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags
,
1342 enum nl80211_band band
,
1343 struct ieee80211_tx_rate
*r
)
1345 if (rate_n_flags
& RATE_HT_MCS_GF_MSK
)
1346 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
1347 switch (rate_n_flags
& RATE_MCS_CHAN_WIDTH_MSK
) {
1348 case RATE_MCS_CHAN_WIDTH_20
:
1350 case RATE_MCS_CHAN_WIDTH_40
:
1351 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
1353 case RATE_MCS_CHAN_WIDTH_80
:
1354 r
->flags
|= IEEE80211_TX_RC_80_MHZ_WIDTH
;
1356 case RATE_MCS_CHAN_WIDTH_160
:
1357 r
->flags
|= IEEE80211_TX_RC_160_MHZ_WIDTH
;
1360 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
1361 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
1362 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
1363 r
->flags
|= IEEE80211_TX_RC_MCS
;
1364 r
->idx
= rate_n_flags
& RATE_HT_MCS_INDEX_MSK
;
1365 } else if (rate_n_flags
& RATE_MCS_VHT_MSK
) {
1366 ieee80211_rate_set_vht(
1367 r
, rate_n_flags
& RATE_VHT_MCS_RATE_CODE_MSK
,
1368 ((rate_n_flags
& RATE_VHT_MCS_NSS_MSK
) >>
1369 RATE_VHT_MCS_NSS_POS
) + 1);
1370 r
->flags
|= IEEE80211_TX_RC_VHT_MCS
;
1372 r
->idx
= iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags
,
1378 * translate ucode response to mac80211 tx status control values
1380 static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags
,
1381 struct ieee80211_tx_info
*info
)
1383 struct ieee80211_tx_rate
*r
= &info
->status
.rates
[0];
1385 info
->status
.antenna
=
1386 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
1387 iwl_mvm_hwrate_to_tx_rate(rate_n_flags
, info
->band
, r
);
1390 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm
*mvm
,
1393 struct iwl_fw_dbg_trigger_tlv
*trig
;
1394 struct iwl_fw_dbg_trigger_tx_status
*status_trig
;
1397 trig
= iwl_fw_dbg_trigger_on(&mvm
->fwrt
, NULL
,
1398 FW_DBG_TRIGGER_TX_STATUS
);
1402 status_trig
= (void *)trig
->data
;
1404 for (i
= 0; i
< ARRAY_SIZE(status_trig
->statuses
); i
++) {
1405 /* don't collect on status 0 */
1406 if (!status_trig
->statuses
[i
].status
)
1409 if (status_trig
->statuses
[i
].status
!= (status
& TX_STATUS_MSK
))
1412 iwl_fw_dbg_collect_trig(&mvm
->fwrt
, trig
,
1413 "Tx status %d was received",
1414 status
& TX_STATUS_MSK
);
1420 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1421 * @tx_resp: the Tx response from the fw (agg or non-agg)
1423 * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1424 * it can't know that everything will go well until the end of the AMPDU, it
1425 * can't know in advance the number of MPDUs that will be sent in the current
1426 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1427 * Hence, it can't know in advance what the SSN of the SCD will be at the end
1428 * of the batch. This is why the SSN of the SCD is written at the end of the
1429 * whole struct at a variable offset. This function knows how to cope with the
1430 * variable offset and returns the SSN of the SCD.
1432 static inline u32
iwl_mvm_get_scd_ssn(struct iwl_mvm
*mvm
,
1433 struct iwl_mvm_tx_resp
*tx_resp
)
1435 return le32_to_cpup((__le32
*)iwl_mvm_get_agg_status(mvm
, tx_resp
) +
1436 tx_resp
->frame_count
) & 0xfff;
1439 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm
*mvm
,
1440 struct iwl_rx_packet
*pkt
)
1442 struct ieee80211_sta
*sta
;
1443 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1444 int txq_id
= SEQ_TO_QUEUE(sequence
);
1445 /* struct iwl_mvm_tx_resp_v3 is almost the same */
1446 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1447 int sta_id
= IWL_MVM_TX_RES_GET_RA(tx_resp
->ra_tid
);
1448 int tid
= IWL_MVM_TX_RES_GET_TID(tx_resp
->ra_tid
);
1449 struct agg_tx_status
*agg_status
=
1450 iwl_mvm_get_agg_status(mvm
, tx_resp
);
1451 u32 status
= le16_to_cpu(agg_status
->status
);
1452 u16 ssn
= iwl_mvm_get_scd_ssn(mvm
, tx_resp
);
1453 struct sk_buff_head skbs
;
1456 u16 next_reclaimed
, seq_ctl
;
1457 bool is_ndp
= false;
1459 __skb_queue_head_init(&skbs
);
1461 if (iwl_mvm_has_new_tx_api(mvm
))
1462 txq_id
= le16_to_cpu(tx_resp
->tx_queue
);
1464 seq_ctl
= le16_to_cpu(tx_resp
->seq_ctl
);
1466 /* we can free until ssn % q.n_bd not inclusive */
1467 iwl_trans_reclaim(mvm
->trans
, txq_id
, ssn
, &skbs
);
1469 while (!skb_queue_empty(&skbs
)) {
1470 struct sk_buff
*skb
= __skb_dequeue(&skbs
);
1471 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1472 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1473 bool flushed
= false;
1477 iwl_trans_free_tx_cmd(mvm
->trans
, info
->driver_data
[1]);
1479 memset(&info
->status
, 0, sizeof(info
->status
));
1481 /* inform mac80211 about what happened with the frame */
1482 switch (status
& TX_STATUS_MSK
) {
1483 case TX_STATUS_SUCCESS
:
1484 case TX_STATUS_DIRECT_DONE
:
1485 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1487 case TX_STATUS_FAIL_FIFO_FLUSHED
:
1488 case TX_STATUS_FAIL_DRAIN_FLOW
:
1491 case TX_STATUS_FAIL_DEST_PS
:
1492 /* the FW should have stopped the queue and not
1493 * return this status
1496 info
->flags
|= IEEE80211_TX_STAT_TX_FILTERED
;
1502 if ((status
& TX_STATUS_MSK
) != TX_STATUS_SUCCESS
&&
1503 ieee80211_is_mgmt(hdr
->frame_control
))
1504 iwl_mvm_toggle_tx_ant(mvm
, &mvm
->mgmt_last_antenna_idx
);
1507 * If we are freeing multiple frames, mark all the frames
1508 * but the first one as acked, since they were acknowledged
1512 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1514 iwl_mvm_tx_status_check_trigger(mvm
, status
);
1516 info
->status
.rates
[0].count
= tx_resp
->failure_frame
+ 1;
1517 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp
->initial_rate
),
1519 info
->status
.status_driver_data
[1] =
1520 (void *)(uintptr_t)le32_to_cpu(tx_resp
->initial_rate
);
1522 /* Single frame failure in an AMPDU queue => send BAR */
1523 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
1524 !(info
->flags
& IEEE80211_TX_STAT_ACK
) &&
1525 !(info
->flags
& IEEE80211_TX_STAT_TX_FILTERED
) && !flushed
)
1526 info
->flags
|= IEEE80211_TX_STAT_AMPDU_NO_BACK
;
1527 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
1529 /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
1530 if (ieee80211_is_back_req(hdr
->frame_control
))
1532 else if (status
!= TX_STATUS_SUCCESS
)
1533 seq_ctl
= le16_to_cpu(hdr
->seq_ctrl
);
1535 if (unlikely(!seq_ctl
)) {
1536 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1539 * If it is an NDP, we can't update next_reclaim since
1540 * its sequence control is 0. Note that for that same
1541 * reason, NDPs are never sent to A-MPDU'able queues
1542 * so that we can never have more than one freed frame
1543 * for a single Tx resonse (see WARN_ON below).
1545 if (ieee80211_is_qos_nullfunc(hdr
->frame_control
))
1550 * TODO: this is not accurate if we are freeing more than one
1553 info
->status
.tx_time
=
1554 le16_to_cpu(tx_resp
->wireless_media_time
);
1555 BUILD_BUG_ON(ARRAY_SIZE(info
->status
.status_driver_data
) < 1);
1556 lq_color
= TX_RES_RATE_TABLE_COL_GET(tx_resp
->tlc_info
);
1557 info
->status
.status_driver_data
[0] =
1558 RS_DRV_DATA_PACK(lq_color
, tx_resp
->reduced_tpc
);
1560 ieee80211_tx_status(mvm
->hw
, skb
);
1563 /* This is an aggregation queue or might become one, so we use
1564 * the ssn since: ssn = wifi seq_num % 256.
1565 * The seq_ctl is the sequence control of the packet to which
1566 * this Tx response relates. But if there is a hole in the
1567 * bitmap of the BA we received, this Tx response may allow to
1568 * reclaim the hole and all the subsequent packets that were
1569 * already acked. In that case, seq_ctl != ssn, and the next
1570 * packet to be reclaimed will be ssn and not seq_ctl. In that
1571 * case, several packets will be reclaimed even if
1574 * The ssn is the index (% 256) of the latest packet that has
1575 * treated (acked / dropped) + 1.
1577 next_reclaimed
= ssn
;
1579 IWL_DEBUG_TX_REPLY(mvm
,
1580 "TXQ %d status %s (0x%08x)\n",
1581 txq_id
, iwl_mvm_get_tx_fail_reason(status
), status
);
1583 IWL_DEBUG_TX_REPLY(mvm
,
1584 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1585 le32_to_cpu(tx_resp
->initial_rate
),
1586 tx_resp
->failure_frame
, SEQ_TO_INDEX(sequence
),
1587 ssn
, next_reclaimed
, seq_ctl
);
1591 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1593 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1594 * the firmware while we still have packets for it in the Tx queues.
1596 if (WARN_ON_ONCE(!sta
))
1600 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1602 iwl_mvm_tx_airtime(mvm
, mvmsta
,
1603 le16_to_cpu(tx_resp
->wireless_media_time
));
1605 if ((status
& TX_STATUS_MSK
) != TX_STATUS_SUCCESS
&&
1606 mvmsta
->sta_state
< IEEE80211_STA_AUTHORIZED
)
1607 iwl_mvm_toggle_tx_ant(mvm
, &mvmsta
->tx_ant
);
1609 if (sta
->wme
&& tid
!= IWL_MGMT_TID
) {
1610 struct iwl_mvm_tid_data
*tid_data
=
1611 &mvmsta
->tid_data
[tid
];
1612 bool send_eosp_ndp
= false;
1614 spin_lock_bh(&mvmsta
->lock
);
1617 tid_data
->next_reclaimed
= next_reclaimed
;
1618 IWL_DEBUG_TX_REPLY(mvm
,
1619 "Next reclaimed packet:%d\n",
1622 IWL_DEBUG_TX_REPLY(mvm
,
1623 "NDP - don't update next_reclaimed\n");
1626 iwl_mvm_check_ratid_empty(mvm
, sta
, tid
);
1628 if (mvmsta
->sleep_tx_count
) {
1629 mvmsta
->sleep_tx_count
--;
1630 if (mvmsta
->sleep_tx_count
&&
1631 !iwl_mvm_tid_queued(mvm
, tid_data
)) {
1633 * The number of frames in the queue
1634 * dropped to 0 even if we sent less
1635 * frames than we thought we had on the
1637 * This means we had holes in the BA
1638 * window that we just filled, ask
1639 * mac80211 to send EOSP since the
1640 * firmware won't know how to do that.
1641 * Send NDP and the firmware will send
1642 * EOSP notification that will trigger
1643 * a call to ieee80211_sta_eosp().
1645 send_eosp_ndp
= true;
1649 spin_unlock_bh(&mvmsta
->lock
);
1650 if (send_eosp_ndp
) {
1651 iwl_mvm_sta_modify_sleep_tx_count(mvm
, sta
,
1652 IEEE80211_FRAME_RELEASE_UAPSD
,
1653 1, tid
, false, false);
1654 mvmsta
->sleep_tx_count
= 0;
1655 ieee80211_send_eosp_nullfunc(sta
, tid
);
1659 if (mvmsta
->next_status_eosp
) {
1660 mvmsta
->next_status_eosp
= false;
1661 ieee80211_sta_eosp(sta
);
1668 #ifdef CONFIG_IWLWIFI_DEBUG
1669 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1670 static const char *iwl_get_agg_tx_status(u16 status
)
1672 switch (status
& AGG_TX_STATE_STATUS_MSK
) {
1673 AGG_TX_STATE_(TRANSMITTED
);
1674 AGG_TX_STATE_(UNDERRUN
);
1675 AGG_TX_STATE_(BT_PRIO
);
1676 AGG_TX_STATE_(FEW_BYTES
);
1677 AGG_TX_STATE_(ABORT
);
1678 AGG_TX_STATE_(TX_ON_AIR_DROP
);
1679 AGG_TX_STATE_(LAST_SENT_TRY_CNT
);
1680 AGG_TX_STATE_(LAST_SENT_BT_KILL
);
1681 AGG_TX_STATE_(SCD_QUERY
);
1682 AGG_TX_STATE_(TEST_BAD_CRC32
);
1683 AGG_TX_STATE_(RESPONSE
);
1684 AGG_TX_STATE_(DUMP_TX
);
1685 AGG_TX_STATE_(DELAY_TX
);
1691 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm
*mvm
,
1692 struct iwl_rx_packet
*pkt
)
1694 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1695 struct agg_tx_status
*frame_status
=
1696 iwl_mvm_get_agg_status(mvm
, tx_resp
);
1699 for (i
= 0; i
< tx_resp
->frame_count
; i
++) {
1700 u16 fstatus
= le16_to_cpu(frame_status
[i
].status
);
1702 IWL_DEBUG_TX_REPLY(mvm
,
1703 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1704 iwl_get_agg_tx_status(fstatus
),
1705 fstatus
& AGG_TX_STATE_STATUS_MSK
,
1706 (fstatus
& AGG_TX_STATE_TRY_CNT_MSK
) >>
1707 AGG_TX_STATE_TRY_CNT_POS
,
1708 le16_to_cpu(frame_status
[i
].sequence
));
1712 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm
*mvm
,
1713 struct iwl_rx_packet
*pkt
)
1715 #endif /* CONFIG_IWLWIFI_DEBUG */
1717 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm
*mvm
,
1718 struct iwl_rx_packet
*pkt
)
1720 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1721 int sta_id
= IWL_MVM_TX_RES_GET_RA(tx_resp
->ra_tid
);
1722 int tid
= IWL_MVM_TX_RES_GET_TID(tx_resp
->ra_tid
);
1723 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1724 struct iwl_mvm_sta
*mvmsta
;
1725 int queue
= SEQ_TO_QUEUE(sequence
);
1726 struct ieee80211_sta
*sta
;
1728 if (WARN_ON_ONCE(queue
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
1729 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)))
1732 iwl_mvm_rx_tx_cmd_agg_dbg(mvm
, pkt
);
1736 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, sta_id
);
1738 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1739 if (WARN_ON_ONCE(!sta
|| !sta
->wme
)) {
1744 if (!WARN_ON_ONCE(!mvmsta
)) {
1745 mvmsta
->tid_data
[tid
].rate_n_flags
=
1746 le32_to_cpu(tx_resp
->initial_rate
);
1747 mvmsta
->tid_data
[tid
].tx_time
=
1748 le16_to_cpu(tx_resp
->wireless_media_time
);
1749 mvmsta
->tid_data
[tid
].lq_color
=
1750 TX_RES_RATE_TABLE_COL_GET(tx_resp
->tlc_info
);
1751 iwl_mvm_tx_airtime(mvm
, mvmsta
,
1752 le16_to_cpu(tx_resp
->wireless_media_time
));
1758 void iwl_mvm_rx_tx_cmd(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
)
1760 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1761 struct iwl_mvm_tx_resp
*tx_resp
= (void *)pkt
->data
;
1763 if (tx_resp
->frame_count
== 1)
1764 iwl_mvm_rx_tx_cmd_single(mvm
, pkt
);
1766 iwl_mvm_rx_tx_cmd_agg(mvm
, pkt
);
1769 static void iwl_mvm_tx_reclaim(struct iwl_mvm
*mvm
, int sta_id
, int tid
,
1771 struct ieee80211_tx_info
*ba_info
, u32 rate
)
1773 struct sk_buff_head reclaimed_skbs
;
1774 struct iwl_mvm_tid_data
*tid_data
;
1775 struct ieee80211_sta
*sta
;
1776 struct iwl_mvm_sta
*mvmsta
;
1777 struct sk_buff
*skb
;
1780 if (WARN_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
||
1781 tid
> IWL_MAX_TID_COUNT
,
1782 "sta_id %d tid %d", sta_id
, tid
))
1787 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1789 /* Reclaiming frames for a station that has been deleted ? */
1790 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
1795 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1796 tid_data
= &mvmsta
->tid_data
[tid
];
1798 if (tid_data
->txq_id
!= txq
) {
1800 "invalid BA notification: Q %d, tid %d\n",
1801 tid_data
->txq_id
, tid
);
1806 __skb_queue_head_init(&reclaimed_skbs
);
1809 * Release all TFDs before the SSN, i.e. all TFDs in front of
1810 * block-ack window (we assume that they've been successfully
1811 * transmitted ... if not, it's too late anyway).
1813 iwl_trans_reclaim(mvm
->trans
, txq
, index
, &reclaimed_skbs
);
1815 spin_lock_bh(&mvmsta
->lock
);
1817 tid_data
->next_reclaimed
= index
;
1819 iwl_mvm_check_ratid_empty(mvm
, sta
, tid
);
1823 /* pack lq color from tid_data along the reduced txp */
1824 ba_info
->status
.status_driver_data
[0] =
1825 RS_DRV_DATA_PACK(tid_data
->lq_color
,
1826 ba_info
->status
.status_driver_data
[0]);
1827 ba_info
->status
.status_driver_data
[1] = (void *)(uintptr_t)rate
;
1829 skb_queue_walk(&reclaimed_skbs
, skb
) {
1830 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
1831 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1833 if (ieee80211_is_data_qos(hdr
->frame_control
))
1836 WARN_ON_ONCE(tid
!= IWL_MAX_TID_COUNT
);
1838 iwl_trans_free_tx_cmd(mvm
->trans
, info
->driver_data
[1]);
1840 memset(&info
->status
, 0, sizeof(info
->status
));
1841 /* Packet was transmitted successfully, failures come as single
1842 * frames because before failing a frame the firmware transmits
1843 * it without aggregation at least once.
1845 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1847 /* this is the first skb we deliver in this batch */
1848 /* put the rate scaling data there */
1850 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1851 memcpy(&info
->status
, &ba_info
->status
,
1852 sizeof(ba_info
->status
));
1853 iwl_mvm_hwrate_to_tx_status(rate
, info
);
1857 spin_unlock_bh(&mvmsta
->lock
);
1859 /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
1860 * possible (i.e. first MPDU in the aggregation wasn't acked)
1861 * Still it's important to update RS about sent vs. acked.
1863 if (skb_queue_empty(&reclaimed_skbs
)) {
1864 struct ieee80211_chanctx_conf
*chanctx_conf
= NULL
;
1868 rcu_dereference(mvmsta
->vif
->chanctx_conf
);
1870 if (WARN_ON_ONCE(!chanctx_conf
))
1873 ba_info
->band
= chanctx_conf
->def
.chan
->band
;
1874 iwl_mvm_hwrate_to_tx_status(rate
, ba_info
);
1876 if (!iwl_mvm_has_tlc_offload(mvm
)) {
1877 IWL_DEBUG_TX_REPLY(mvm
,
1878 "No reclaim. Update rs directly\n");
1879 iwl_mvm_rs_tx_status(mvm
, sta
, tid
, ba_info
, false);
1886 while (!skb_queue_empty(&reclaimed_skbs
)) {
1887 skb
= __skb_dequeue(&reclaimed_skbs
);
1888 ieee80211_tx_status(mvm
->hw
, skb
);
1892 void iwl_mvm_rx_ba_notif(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
)
1894 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1895 int sta_id
, tid
, txq
, index
;
1896 struct ieee80211_tx_info ba_info
= {};
1897 struct iwl_mvm_ba_notif
*ba_notif
;
1898 struct iwl_mvm_tid_data
*tid_data
;
1899 struct iwl_mvm_sta
*mvmsta
;
1901 ba_info
.flags
= IEEE80211_TX_STAT_AMPDU
;
1903 if (iwl_mvm_has_new_tx_api(mvm
)) {
1904 struct iwl_mvm_compressed_ba_notif
*ba_res
=
1906 u8 lq_color
= TX_RES_RATE_TABLE_COL_GET(ba_res
->tlc_rate_info
);
1909 sta_id
= ba_res
->sta_id
;
1910 ba_info
.status
.ampdu_ack_len
= (u8
)le16_to_cpu(ba_res
->done
);
1911 ba_info
.status
.ampdu_len
= (u8
)le16_to_cpu(ba_res
->txed
);
1912 ba_info
.status
.tx_time
=
1913 (u16
)le32_to_cpu(ba_res
->wireless_time
);
1914 ba_info
.status
.status_driver_data
[0] =
1915 (void *)(uintptr_t)ba_res
->reduced_txp
;
1917 if (!le16_to_cpu(ba_res
->tfd_cnt
))
1922 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, sta_id
);
1927 for (i
= 0; i
< le16_to_cpu(ba_res
->tfd_cnt
); i
++) {
1928 struct iwl_mvm_compressed_ba_tfd
*ba_tfd
=
1932 if (tid
== IWL_MGMT_TID
)
1933 tid
= IWL_MAX_TID_COUNT
;
1935 mvmsta
->tid_data
[i
].lq_color
= lq_color
;
1936 iwl_mvm_tx_reclaim(mvm
, sta_id
, tid
,
1937 (int)(le16_to_cpu(ba_tfd
->q_num
)),
1938 le16_to_cpu(ba_tfd
->tfd_index
),
1940 le32_to_cpu(ba_res
->tx_rate
));
1943 iwl_mvm_tx_airtime(mvm
, mvmsta
,
1944 le32_to_cpu(ba_res
->wireless_time
));
1948 IWL_DEBUG_TX_REPLY(mvm
,
1949 "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
1950 sta_id
, le32_to_cpu(ba_res
->flags
),
1951 le16_to_cpu(ba_res
->txed
),
1952 le16_to_cpu(ba_res
->done
));
1956 ba_notif
= (void *)pkt
->data
;
1957 sta_id
= ba_notif
->sta_id
;
1958 tid
= ba_notif
->tid
;
1959 /* "flow" corresponds to Tx queue */
1960 txq
= le16_to_cpu(ba_notif
->scd_flow
);
1961 /* "ssn" is start of block-ack Tx window, corresponds to index
1962 * (in Tx queue's circular buffer) of first TFD/frame in window */
1963 index
= le16_to_cpu(ba_notif
->scd_ssn
);
1966 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, sta_id
);
1967 if (WARN_ON_ONCE(!mvmsta
)) {
1972 tid_data
= &mvmsta
->tid_data
[tid
];
1974 ba_info
.status
.ampdu_ack_len
= ba_notif
->txed_2_done
;
1975 ba_info
.status
.ampdu_len
= ba_notif
->txed
;
1976 ba_info
.status
.tx_time
= tid_data
->tx_time
;
1977 ba_info
.status
.status_driver_data
[0] =
1978 (void *)(uintptr_t)ba_notif
->reduced_txp
;
1982 iwl_mvm_tx_reclaim(mvm
, sta_id
, tid
, txq
, index
, &ba_info
,
1983 tid_data
->rate_n_flags
);
1985 IWL_DEBUG_TX_REPLY(mvm
,
1986 "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
1987 ba_notif
->sta_addr
, ba_notif
->sta_id
);
1989 IWL_DEBUG_TX_REPLY(mvm
,
1990 "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1991 ba_notif
->tid
, le16_to_cpu(ba_notif
->seq_ctl
),
1992 le64_to_cpu(ba_notif
->bitmap
), txq
, index
,
1993 ba_notif
->txed
, ba_notif
->txed_2_done
);
1995 IWL_DEBUG_TX_REPLY(mvm
, "reduced txp from ba notif %d\n",
1996 ba_notif
->reduced_txp
);
2000 * Note that there are transports that buffer frames before they reach
2001 * the firmware. This means that after flush_tx_path is called, the
2002 * queue might not be empty. The race-free way to handle this is to:
2003 * 1) set the station as draining
2004 * 2) flush the Tx path
2005 * 3) wait for the transport queues to be empty
2007 int iwl_mvm_flush_tx_path(struct iwl_mvm
*mvm
, u32 tfd_msk
, u32 flags
)
2010 struct iwl_tx_path_flush_cmd_v1 flush_cmd
= {
2011 .queues_ctl
= cpu_to_le32(tfd_msk
),
2012 .flush_ctl
= cpu_to_le16(DUMP_TX_FIFO_FLUSH
),
2015 WARN_ON(iwl_mvm_has_new_tx_api(mvm
));
2017 ret
= iwl_mvm_send_cmd_pdu(mvm
, TXPATH_FLUSH
, flags
,
2018 sizeof(flush_cmd
), &flush_cmd
);
2020 IWL_ERR(mvm
, "Failed to send flush command (%d)\n", ret
);
2024 int iwl_mvm_flush_sta_tids(struct iwl_mvm
*mvm
, u32 sta_id
,
2025 u16 tids
, u32 flags
)
2028 struct iwl_tx_path_flush_cmd flush_cmd
= {
2029 .sta_id
= cpu_to_le32(sta_id
),
2030 .tid_mask
= cpu_to_le16(tids
),
2033 WARN_ON(!iwl_mvm_has_new_tx_api(mvm
));
2035 ret
= iwl_mvm_send_cmd_pdu(mvm
, TXPATH_FLUSH
, flags
,
2036 sizeof(flush_cmd
), &flush_cmd
);
2038 IWL_ERR(mvm
, "Failed to send flush command (%d)\n", ret
);
2042 int iwl_mvm_flush_sta(struct iwl_mvm
*mvm
, void *sta
, bool internal
, u32 flags
)
2044 struct iwl_mvm_int_sta
*int_sta
= sta
;
2045 struct iwl_mvm_sta
*mvm_sta
= sta
;
2047 BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta
, sta_id
) !=
2048 offsetof(struct iwl_mvm_sta
, sta_id
));
2050 if (iwl_mvm_has_new_tx_api(mvm
))
2051 return iwl_mvm_flush_sta_tids(mvm
, mvm_sta
->sta_id
,
2055 return iwl_mvm_flush_tx_path(mvm
, int_sta
->tfd_queue_msk
,
2058 return iwl_mvm_flush_tx_path(mvm
, mvm_sta
->tfd_queue_msk
, flags
);