1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <net/mac80211.h>
71 * New version of ADD_STA_sta command added new fields at the end of the
72 * structure, so sending the size of the relevant API's structure is enough to
73 * support both API versions.
75 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
77 if (iwl_mvm_has_new_rx_api(mvm
) ||
78 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
79 return sizeof(struct iwl_mvm_add_sta_cmd
);
81 return sizeof(struct iwl_mvm_add_sta_cmd_v7
);
84 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
85 enum nl80211_iftype iftype
)
90 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
91 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
93 lockdep_assert_held(&mvm
->mutex
);
95 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
96 if (iftype
!= NL80211_IFTYPE_STATION
)
97 reserved_ids
= BIT(0);
99 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
100 for (sta_id
= 0; sta_id
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); sta_id
++) {
101 if (BIT(sta_id
) & reserved_ids
)
104 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
105 lockdep_is_held(&mvm
->mutex
)))
108 return IWL_MVM_INVALID_STA
;
111 /* send station add/update command to firmware */
112 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
113 bool update
, unsigned int flags
)
115 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
116 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
117 .sta_id
= mvm_sta
->sta_id
,
118 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
119 .add_modify
= update
? 1 : 0,
120 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
121 STA_FLG_MIMO_EN_MSK
|
122 STA_FLG_RTS_MIMO_PROT
),
123 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
127 u32 agg_size
= 0, mpdu_dens
= 0;
129 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
130 add_sta_cmd
.station_type
= mvm_sta
->sta_type
;
132 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
133 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
135 if (!iwl_mvm_has_new_tx_api(mvm
)) {
136 add_sta_cmd
.tfd_queue_msk
=
137 cpu_to_le32(mvm_sta
->tfd_queue_msk
);
139 if (flags
& STA_MODIFY_QUEUES
)
140 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
142 WARN_ON(flags
& STA_MODIFY_QUEUES
);
146 switch (sta
->bandwidth
) {
147 case IEEE80211_STA_RX_BW_160
:
148 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
150 case IEEE80211_STA_RX_BW_80
:
151 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
153 case IEEE80211_STA_RX_BW_40
:
154 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
156 case IEEE80211_STA_RX_BW_20
:
157 if (sta
->ht_cap
.ht_supported
)
158 add_sta_cmd
.station_flags
|=
159 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
163 switch (sta
->rx_nss
) {
165 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
168 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
171 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
175 switch (sta
->smps_mode
) {
176 case IEEE80211_SMPS_AUTOMATIC
:
177 case IEEE80211_SMPS_NUM_MODES
:
180 case IEEE80211_SMPS_STATIC
:
182 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
183 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
185 case IEEE80211_SMPS_DYNAMIC
:
186 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
188 case IEEE80211_SMPS_OFF
:
193 if (sta
->ht_cap
.ht_supported
) {
194 add_sta_cmd
.station_flags_msk
|=
195 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
196 STA_FLG_AGG_MPDU_DENS_MSK
);
198 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
201 if (sta
->vht_cap
.vht_supported
) {
202 agg_size
= sta
->vht_cap
.cap
&
203 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
205 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
206 } else if (sta
->ht_cap
.ht_supported
) {
207 agg_size
= sta
->ht_cap
.ampdu_factor
;
210 add_sta_cmd
.station_flags
|=
211 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
212 add_sta_cmd
.station_flags
|=
213 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
214 if (mvm_sta
->sta_state
>= IEEE80211_STA_ASSOC
)
215 add_sta_cmd
.assoc_id
= cpu_to_le16(sta
->aid
);
218 add_sta_cmd
.modify_mask
|= STA_MODIFY_UAPSD_ACS
;
220 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BK
)
221 add_sta_cmd
.uapsd_acs
|= BIT(AC_BK
);
222 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BE
)
223 add_sta_cmd
.uapsd_acs
|= BIT(AC_BE
);
224 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VI
)
225 add_sta_cmd
.uapsd_acs
|= BIT(AC_VI
);
226 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
)
227 add_sta_cmd
.uapsd_acs
|= BIT(AC_VO
);
228 add_sta_cmd
.uapsd_acs
|= add_sta_cmd
.uapsd_acs
<< 4;
229 add_sta_cmd
.sp_length
= sta
->max_sp
? sta
->max_sp
* 2 : 128;
232 status
= ADD_STA_SUCCESS
;
233 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
234 iwl_mvm_add_sta_cmd_size(mvm
),
235 &add_sta_cmd
, &status
);
239 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
240 case ADD_STA_SUCCESS
:
241 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
245 IWL_ERR(mvm
, "ADD_STA failed\n");
252 static void iwl_mvm_rx_agg_session_expired(struct timer_list
*t
)
254 struct iwl_mvm_baid_data
*data
=
255 from_timer(data
, t
, session_timer
);
256 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= data
->rcu_ptr
;
257 struct iwl_mvm_baid_data
*ba_data
;
258 struct ieee80211_sta
*sta
;
259 struct iwl_mvm_sta
*mvm_sta
;
260 unsigned long timeout
;
264 ba_data
= rcu_dereference(*rcu_ptr
);
266 if (WARN_ON(!ba_data
))
269 if (!ba_data
->timeout
)
272 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
273 if (time_is_after_jiffies(timeout
)) {
274 mod_timer(&ba_data
->session_timer
, timeout
);
279 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
282 * sta should be valid unless the following happens:
283 * The firmware asserts which triggers a reconfig flow, but
284 * the reconfig fails before we set the pointer to sta into
285 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
286 * A-MDPU and hence the timer continues to run. Then, the
287 * timer expires and sta is NULL.
292 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
293 ieee80211_rx_ba_timer_expired(mvm_sta
->vif
,
294 sta
->addr
, ba_data
->tid
);
299 /* Disable aggregations for a bitmap of TIDs for a given station */
300 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
301 unsigned long disable_agg_tids
,
304 struct iwl_mvm_add_sta_cmd cmd
= {};
305 struct ieee80211_sta
*sta
;
306 struct iwl_mvm_sta
*mvmsta
;
310 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
313 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
317 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
319 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
324 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
326 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
328 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
329 cmd
.sta_id
= mvmsta
->sta_id
;
330 cmd
.add_modify
= STA_MODE_MODIFY
;
331 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
332 if (disable_agg_tids
)
333 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
335 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
336 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
337 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
341 /* Notify FW of queue removal from the STA queues */
342 status
= ADD_STA_SUCCESS
;
343 return iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
344 iwl_mvm_add_sta_cmd_size(mvm
),
348 static int iwl_mvm_disable_txq(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
349 int queue
, u8 tid
, u8 flags
)
351 struct iwl_scd_txq_cfg_cmd cmd
= {
353 .action
= SCD_CFG_DISABLE_QUEUE
,
357 if (iwl_mvm_has_new_tx_api(mvm
)) {
358 iwl_trans_txq_free(mvm
->trans
, queue
);
363 if (WARN_ON(mvm
->queue_info
[queue
].tid_bitmap
== 0))
366 mvm
->queue_info
[queue
].tid_bitmap
&= ~BIT(tid
);
368 cmd
.action
= mvm
->queue_info
[queue
].tid_bitmap
?
369 SCD_CFG_ENABLE_QUEUE
: SCD_CFG_DISABLE_QUEUE
;
370 if (cmd
.action
== SCD_CFG_DISABLE_QUEUE
)
371 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_FREE
;
373 IWL_DEBUG_TX_QUEUES(mvm
,
374 "Disabling TXQ #%d tids=0x%x\n",
376 mvm
->queue_info
[queue
].tid_bitmap
);
378 /* If the queue is still enabled - nothing left to do in this func */
379 if (cmd
.action
== SCD_CFG_ENABLE_QUEUE
)
382 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
383 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
385 /* Make sure queue info is correct even though we overwrite it */
386 WARN(mvm
->queue_info
[queue
].tid_bitmap
,
387 "TXQ #%d info out-of-sync - tids=0x%x\n",
388 queue
, mvm
->queue_info
[queue
].tid_bitmap
);
390 /* If we are here - the queue is freed and we can zero out these vals */
391 mvm
->queue_info
[queue
].tid_bitmap
= 0;
394 struct iwl_mvm_txq
*mvmtxq
=
395 iwl_mvm_txq_from_tid(sta
, tid
);
397 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
400 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
401 mvm
->queue_info
[queue
].reserved
= false;
403 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
404 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, flags
,
405 sizeof(struct iwl_scd_txq_cfg_cmd
), &cmd
);
408 IWL_ERR(mvm
, "Failed to disable queue %d (ret=%d)\n",
413 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
415 struct ieee80211_sta
*sta
;
416 struct iwl_mvm_sta
*mvmsta
;
417 unsigned long tid_bitmap
;
418 unsigned long agg_tids
= 0;
422 lockdep_assert_held(&mvm
->mutex
);
424 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
427 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
428 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
430 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
431 lockdep_is_held(&mvm
->mutex
));
433 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
436 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
438 spin_lock_bh(&mvmsta
->lock
);
439 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
440 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
441 agg_tids
|= BIT(tid
);
443 spin_unlock_bh(&mvmsta
->lock
);
449 * Remove a queue from a station's resources.
450 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
451 * doesn't disable the queue
453 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
455 struct ieee80211_sta
*sta
;
456 struct iwl_mvm_sta
*mvmsta
;
457 unsigned long tid_bitmap
;
458 unsigned long disable_agg_tids
= 0;
462 lockdep_assert_held(&mvm
->mutex
);
464 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
467 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
468 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
472 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
474 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
479 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
481 spin_lock_bh(&mvmsta
->lock
);
482 /* Unmap MAC queues and TIDs from this queue */
483 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
484 struct iwl_mvm_txq
*mvmtxq
=
485 iwl_mvm_txq_from_tid(sta
, tid
);
487 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
488 disable_agg_tids
|= BIT(tid
);
489 mvmsta
->tid_data
[tid
].txq_id
= IWL_MVM_INVALID_QUEUE
;
491 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
494 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
495 spin_unlock_bh(&mvmsta
->lock
);
500 * The TX path may have been using this TXQ_ID from the tid_data,
501 * so make sure it's no longer running so that we can safely reuse
502 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
503 * above, but nothing guarantees we've stopped using them. Thus,
504 * without this, we could get to iwl_mvm_disable_txq() and remove
505 * the queue while still sending frames to it.
509 return disable_agg_tids
;
512 static int iwl_mvm_free_inactive_queue(struct iwl_mvm
*mvm
, int queue
,
513 struct ieee80211_sta
*old_sta
,
516 struct iwl_mvm_sta
*mvmsta
;
518 unsigned long disable_agg_tids
= 0;
522 lockdep_assert_held(&mvm
->mutex
);
524 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
527 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
528 tid
= mvm
->queue_info
[queue
].txq_tid
;
530 same_sta
= sta_id
== new_sta_id
;
532 mvmsta
= iwl_mvm_sta_from_staid_protected(mvm
, sta_id
);
533 if (WARN_ON(!mvmsta
))
536 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
537 /* Disable the queue */
538 if (disable_agg_tids
)
539 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
540 disable_agg_tids
, false);
542 ret
= iwl_mvm_disable_txq(mvm
, old_sta
, queue
, tid
, 0);
545 "Failed to free inactive queue %d (ret=%d)\n",
551 /* If TXQ is allocated to another STA, update removal in FW */
553 iwl_mvm_invalidate_sta_queue(mvm
, queue
, 0, true);
558 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
559 unsigned long tfd_queue_mask
, u8 ac
)
562 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
566 * This protects us against grabbing a queue that's being reconfigured
567 * by the inactivity checker.
569 lockdep_assert_held(&mvm
->mutex
);
571 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
574 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
576 /* See what ACs the existing queues for this STA have */
577 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
578 /* Only DATA queues can be shared */
579 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
580 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
583 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
587 * The queue to share is chosen only from DATA queues as follows (in
588 * descending priority):
591 * 3. Highest AC queue that is lower than new AC
592 * 4. Any existing AC (there always is at least 1 DATA queue)
595 /* Priority 1: An AC_BE queue */
596 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
597 queue
= ac_to_queue
[IEEE80211_AC_BE
];
598 /* Priority 2: Same AC queue */
599 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
600 queue
= ac_to_queue
[ac
];
601 /* Priority 3a: If new AC is VO and VI exists - use VI */
602 else if (ac
== IEEE80211_AC_VO
&&
603 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
604 queue
= ac_to_queue
[IEEE80211_AC_VI
];
605 /* Priority 3b: No BE so only AC less than the new one is BK */
606 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
607 queue
= ac_to_queue
[IEEE80211_AC_BK
];
608 /* Priority 4a: No BE nor BK - use VI if exists */
609 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
610 queue
= ac_to_queue
[IEEE80211_AC_VI
];
611 /* Priority 4b: No BE, BK nor VI - use VO if exists */
612 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
613 queue
= ac_to_queue
[IEEE80211_AC_VO
];
615 /* Make sure queue found (or not) is legal */
616 if (!iwl_mvm_is_dqa_data_queue(mvm
, queue
) &&
617 !iwl_mvm_is_dqa_mgmt_queue(mvm
, queue
) &&
618 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)) {
619 IWL_ERR(mvm
, "No DATA queues available to share\n");
627 * If a given queue has a higher AC than the TID stream that is being compared
628 * to, the queue needs to be redirected to the lower AC. This function does that
629 * in such a case, otherwise - if no redirection required - it does nothing,
630 * unless the %force param is true.
632 static int iwl_mvm_redirect_queue(struct iwl_mvm
*mvm
, int queue
, int tid
,
633 int ac
, int ssn
, unsigned int wdg_timeout
,
634 bool force
, struct iwl_mvm_txq
*txq
)
636 struct iwl_scd_txq_cfg_cmd cmd
= {
638 .action
= SCD_CFG_DISABLE_QUEUE
,
643 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
647 * If the AC is lower than current one - FIFO needs to be redirected to
648 * the lowest one of the streams in the queue. Check if this is needed
650 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
651 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
652 * we need to check if the numerical value of X is LARGER than of Y.
654 if (ac
<= mvm
->queue_info
[queue
].mac80211_ac
&& !force
) {
655 IWL_DEBUG_TX_QUEUES(mvm
,
656 "No redirection needed on TXQ #%d\n",
661 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
662 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[mvm
->queue_info
[queue
].mac80211_ac
];
663 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
664 shared_queue
= hweight16(mvm
->queue_info
[queue
].tid_bitmap
) > 1;
666 IWL_DEBUG_TX_QUEUES(mvm
, "Redirecting TXQ #%d to FIFO #%d\n",
667 queue
, iwl_mvm_ac_to_tx_fifo
[ac
]);
669 /* Stop the queue and wait for it to empty */
672 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(queue
));
674 IWL_ERR(mvm
, "Error draining queue %d before reconfig\n",
680 /* Before redirecting the queue we need to de-activate it */
681 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
682 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
684 IWL_ERR(mvm
, "Failed SCD disable TXQ %d (ret=%d)\n", queue
,
687 /* Make sure the SCD wrptr is correctly set before reconfiguring */
688 iwl_trans_txq_enable_cfg(mvm
->trans
, queue
, ssn
, NULL
, wdg_timeout
);
690 /* Update the TID "owner" of the queue */
691 mvm
->queue_info
[queue
].txq_tid
= tid
;
693 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
695 /* Redirect to lower AC */
696 iwl_mvm_reconfig_scd(mvm
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
697 cmd
.sta_id
, tid
, IWL_FRAME_LIMIT
, ssn
);
699 /* Update AC marking of the queue */
700 mvm
->queue_info
[queue
].mac80211_ac
= ac
;
703 * Mark queue as shared in transport if shared
704 * Note this has to be done after queue enablement because enablement
705 * can also set this value, and there is no indication there to shared
709 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
712 /* Continue using the queue */
713 txq
->stopped
= false;
718 static int iwl_mvm_find_free_queue(struct iwl_mvm
*mvm
, u8 sta_id
,
723 lockdep_assert_held(&mvm
->mutex
);
725 /* This should not be hit with new TX path */
726 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
729 /* Start by looking for a free queue */
730 for (i
= minq
; i
<= maxq
; i
++)
731 if (mvm
->queue_info
[i
].tid_bitmap
== 0 &&
732 mvm
->queue_info
[i
].status
== IWL_MVM_QUEUE_FREE
)
738 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm
*mvm
,
739 u8 sta_id
, u8 tid
, unsigned int timeout
)
741 int queue
, size
= max_t(u32
, IWL_DEFAULT_QUEUE_SIZE
,
742 mvm
->trans
->cfg
->min_256_ba_txq_size
);
744 if (tid
== IWL_MAX_TID_COUNT
) {
746 size
= max_t(u32
, IWL_MGMT_QUEUE_SIZE
,
747 mvm
->trans
->cfg
->min_txq_size
);
749 queue
= iwl_trans_txq_alloc(mvm
->trans
,
750 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE
),
751 sta_id
, tid
, SCD_QUEUE_CFG
, size
, timeout
);
754 IWL_DEBUG_TX_QUEUES(mvm
,
755 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
760 IWL_DEBUG_TX_QUEUES(mvm
, "Enabling TXQ #%d for sta %d tid %d\n",
763 IWL_DEBUG_TX_QUEUES(mvm
, "Enabling TXQ #%d\n", queue
);
768 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm
*mvm
,
769 struct ieee80211_sta
*sta
, u8 ac
,
772 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
773 struct iwl_mvm_txq
*mvmtxq
=
774 iwl_mvm_txq_from_tid(sta
, tid
);
775 unsigned int wdg_timeout
=
776 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
779 lockdep_assert_held(&mvm
->mutex
);
781 IWL_DEBUG_TX_QUEUES(mvm
,
782 "Allocating queue for sta %d on tid %d\n",
783 mvmsta
->sta_id
, tid
);
784 queue
= iwl_mvm_tvqm_enable_txq(mvm
, mvmsta
->sta_id
, tid
, wdg_timeout
);
788 mvmtxq
->txq_id
= queue
;
789 mvm
->tvqm_info
[queue
].txq_tid
= tid
;
790 mvm
->tvqm_info
[queue
].sta_id
= mvmsta
->sta_id
;
792 IWL_DEBUG_TX_QUEUES(mvm
, "Allocated queue is %d\n", queue
);
794 spin_lock_bh(&mvmsta
->lock
);
795 mvmsta
->tid_data
[tid
].txq_id
= queue
;
796 spin_unlock_bh(&mvmsta
->lock
);
801 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm
*mvm
,
802 struct ieee80211_sta
*sta
,
803 int queue
, u8 sta_id
, u8 tid
)
805 bool enable_queue
= true;
807 /* Make sure this TID isn't already enabled */
808 if (mvm
->queue_info
[queue
].tid_bitmap
& BIT(tid
)) {
809 IWL_ERR(mvm
, "Trying to enable TXQ %d with existing TID %d\n",
814 /* Update mappings and refcounts */
815 if (mvm
->queue_info
[queue
].tid_bitmap
)
816 enable_queue
= false;
818 mvm
->queue_info
[queue
].tid_bitmap
|= BIT(tid
);
819 mvm
->queue_info
[queue
].ra_sta_id
= sta_id
;
822 if (tid
!= IWL_MAX_TID_COUNT
)
823 mvm
->queue_info
[queue
].mac80211_ac
=
824 tid_to_mac80211_ac
[tid
];
826 mvm
->queue_info
[queue
].mac80211_ac
= IEEE80211_AC_VO
;
828 mvm
->queue_info
[queue
].txq_tid
= tid
;
832 struct iwl_mvm_txq
*mvmtxq
=
833 iwl_mvm_txq_from_tid(sta
, tid
);
835 mvmtxq
->txq_id
= queue
;
838 IWL_DEBUG_TX_QUEUES(mvm
,
839 "Enabling TXQ #%d tids=0x%x\n",
840 queue
, mvm
->queue_info
[queue
].tid_bitmap
);
845 static bool iwl_mvm_enable_txq(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
847 const struct iwl_trans_txq_scd_cfg
*cfg
,
848 unsigned int wdg_timeout
)
850 struct iwl_scd_txq_cfg_cmd cmd
= {
852 .action
= SCD_CFG_ENABLE_QUEUE
,
853 .window
= cfg
->frame_limit
,
854 .sta_id
= cfg
->sta_id
,
855 .ssn
= cpu_to_le16(ssn
),
856 .tx_fifo
= cfg
->fifo
,
857 .aggregate
= cfg
->aggregate
,
862 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
865 /* Send the enabling command if we need to */
866 if (!iwl_mvm_update_txq_mapping(mvm
, sta
, queue
, cfg
->sta_id
, cfg
->tid
))
869 inc_ssn
= iwl_trans_txq_enable_cfg(mvm
->trans
, queue
, ssn
,
872 le16_add_cpu(&cmd
.ssn
, 1);
874 WARN(iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
),
875 "Failed to configure queue %d on FIFO %d\n", queue
, cfg
->fifo
);
880 static void iwl_mvm_change_queue_tid(struct iwl_mvm
*mvm
, int queue
)
882 struct iwl_scd_txq_cfg_cmd cmd
= {
884 .action
= SCD_CFG_UPDATE_QUEUE_TID
,
887 unsigned long tid_bitmap
;
890 lockdep_assert_held(&mvm
->mutex
);
892 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
895 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
897 if (WARN(!tid_bitmap
, "TXQ %d has no tids assigned to it\n", queue
))
900 /* Find any TID for queue */
901 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
903 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
905 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
907 IWL_ERR(mvm
, "Failed to update owner of TXQ %d (ret=%d)\n",
912 mvm
->queue_info
[queue
].txq_tid
= tid
;
913 IWL_DEBUG_TX_QUEUES(mvm
, "Changed TXQ %d ownership to tid %d\n",
917 static void iwl_mvm_unshare_queue(struct iwl_mvm
*mvm
, int queue
)
919 struct ieee80211_sta
*sta
;
920 struct iwl_mvm_sta
*mvmsta
;
923 unsigned long tid_bitmap
;
924 unsigned int wdg_timeout
;
928 /* queue sharing is disabled on new TX path */
929 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
932 lockdep_assert_held(&mvm
->mutex
);
934 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
935 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
937 /* Find TID for queue, and make sure it is the only one on the queue */
938 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
939 if (tid_bitmap
!= BIT(tid
)) {
940 IWL_ERR(mvm
, "Failed to unshare q %d, active tids=0x%lx\n",
945 IWL_DEBUG_TX_QUEUES(mvm
, "Unsharing TXQ %d, keeping tid %d\n", queue
,
948 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
949 lockdep_is_held(&mvm
->mutex
));
951 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
954 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
955 wdg_timeout
= iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
957 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
959 ret
= iwl_mvm_redirect_queue(mvm
, queue
, tid
,
960 tid_to_mac80211_ac
[tid
], ssn
,
962 iwl_mvm_txq_from_tid(sta
, tid
));
964 IWL_ERR(mvm
, "Failed to redirect TXQ %d\n", queue
);
968 /* If aggs should be turned back on - do it */
969 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
) {
970 struct iwl_mvm_add_sta_cmd cmd
= {0};
972 mvmsta
->tid_disable_agg
&= ~BIT(tid
);
974 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
975 cmd
.sta_id
= mvmsta
->sta_id
;
976 cmd
.add_modify
= STA_MODE_MODIFY
;
977 cmd
.modify_mask
= STA_MODIFY_TID_DISABLE_TX
;
978 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
979 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
981 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
982 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
984 IWL_DEBUG_TX_QUEUES(mvm
,
985 "TXQ #%d is now aggregated again\n",
988 /* Mark queue intenally as aggregating again */
989 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, false);
993 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
997 * Remove inactive TIDs of a given queue.
998 * If all queue TIDs are inactive - mark the queue as inactive
999 * If only some the queue TIDs are inactive - unmap them from the queue
1001 * Returns %true if all TIDs were removed and the queue could be reused.
1003 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm
*mvm
,
1004 struct iwl_mvm_sta
*mvmsta
, int queue
,
1005 unsigned long tid_bitmap
,
1006 unsigned long *unshare_queues
,
1007 unsigned long *changetid_queues
)
1011 lockdep_assert_held(&mvmsta
->lock
);
1012 lockdep_assert_held(&mvm
->mutex
);
1014 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1017 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1018 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
1019 /* If some TFDs are still queued - don't mark TID as inactive */
1020 if (iwl_mvm_tid_queued(mvm
, &mvmsta
->tid_data
[tid
]))
1021 tid_bitmap
&= ~BIT(tid
);
1023 /* Don't mark as inactive any TID that has an active BA */
1024 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
)
1025 tid_bitmap
&= ~BIT(tid
);
1028 /* If all TIDs in the queue are inactive - return it can be reused */
1029 if (tid_bitmap
== mvm
->queue_info
[queue
].tid_bitmap
) {
1030 IWL_DEBUG_TX_QUEUES(mvm
, "Queue %d is inactive\n", queue
);
1035 * If we are here, this is a shared queue and not all TIDs timed-out.
1036 * Remove the ones that did.
1038 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
1041 mvmsta
->tid_data
[tid
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1042 mvm
->queue_info
[queue
].tid_bitmap
&= ~BIT(tid
);
1044 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
1047 * We need to take into account a situation in which a TXQ was
1048 * allocated to TID x, and then turned shared by adding TIDs y
1049 * and z. If TID x becomes inactive and is removed from the TXQ,
1050 * ownership must be given to one of the remaining TIDs.
1051 * This is mainly because if TID x continues - a new queue can't
1052 * be allocated for it as long as it is an owner of another TXQ.
1054 * Mark this queue in the right bitmap, we'll send the command
1055 * to the firmware later.
1057 if (!(tid_bitmap
& BIT(mvm
->queue_info
[queue
].txq_tid
)))
1058 set_bit(queue
, changetid_queues
);
1060 IWL_DEBUG_TX_QUEUES(mvm
,
1061 "Removing inactive TID %d from shared Q:%d\n",
1065 IWL_DEBUG_TX_QUEUES(mvm
,
1066 "TXQ #%d left with tid bitmap 0x%x\n", queue
,
1067 mvm
->queue_info
[queue
].tid_bitmap
);
1070 * There may be different TIDs with the same mac queues, so make
1071 * sure all TIDs have existing corresponding mac queues enabled
1073 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
1075 /* If the queue is marked as shared - "unshare" it */
1076 if (hweight16(mvm
->queue_info
[queue
].tid_bitmap
) == 1 &&
1077 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_SHARED
) {
1078 IWL_DEBUG_TX_QUEUES(mvm
, "Marking Q:%d for reconfig\n",
1080 set_bit(queue
, unshare_queues
);
1087 * Check for inactivity - this includes checking if any queue
1088 * can be unshared and finding one (and only one) that can be
1090 * This function is also invoked as a sort of clean-up task,
1091 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1093 * Returns the queue number, or -ENOSPC.
1095 static int iwl_mvm_inactivity_check(struct iwl_mvm
*mvm
, u8 alloc_for_sta
)
1097 unsigned long now
= jiffies
;
1098 unsigned long unshare_queues
= 0;
1099 unsigned long changetid_queues
= 0;
1100 int i
, ret
, free_queue
= -ENOSPC
;
1101 struct ieee80211_sta
*queue_owner
= NULL
;
1103 lockdep_assert_held(&mvm
->mutex
);
1105 if (iwl_mvm_has_new_tx_api(mvm
))
1110 /* we skip the CMD queue below by starting at 1 */
1111 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE
!= 0);
1113 for (i
= 1; i
< IWL_MAX_HW_QUEUES
; i
++) {
1114 struct ieee80211_sta
*sta
;
1115 struct iwl_mvm_sta
*mvmsta
;
1118 unsigned long inactive_tid_bitmap
= 0;
1119 unsigned long queue_tid_bitmap
;
1121 queue_tid_bitmap
= mvm
->queue_info
[i
].tid_bitmap
;
1122 if (!queue_tid_bitmap
)
1125 /* If TXQ isn't in active use anyway - nothing to do here... */
1126 if (mvm
->queue_info
[i
].status
!= IWL_MVM_QUEUE_READY
&&
1127 mvm
->queue_info
[i
].status
!= IWL_MVM_QUEUE_SHARED
)
1130 /* Check to see if there are inactive TIDs on this queue */
1131 for_each_set_bit(tid
, &queue_tid_bitmap
,
1132 IWL_MAX_TID_COUNT
+ 1) {
1133 if (time_after(mvm
->queue_info
[i
].last_frame_time
[tid
] +
1134 IWL_MVM_DQA_QUEUE_TIMEOUT
, now
))
1137 inactive_tid_bitmap
|= BIT(tid
);
1140 /* If all TIDs are active - finish check on this queue */
1141 if (!inactive_tid_bitmap
)
1145 * If we are here - the queue hadn't been served recently and is
1149 sta_id
= mvm
->queue_info
[i
].ra_sta_id
;
1150 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1153 * If the STA doesn't exist anymore, it isn't an error. It could
1154 * be that it was removed since getting the queues, and in this
1155 * case it should've inactivated its queues anyway.
1157 if (IS_ERR_OR_NULL(sta
))
1160 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1162 spin_lock_bh(&mvmsta
->lock
);
1163 ret
= iwl_mvm_remove_inactive_tids(mvm
, mvmsta
, i
,
1164 inactive_tid_bitmap
,
1167 if (ret
>= 0 && free_queue
< 0) {
1171 /* only unlock sta lock - we still need the queue info lock */
1172 spin_unlock_bh(&mvmsta
->lock
);
1176 /* Reconfigure queues requiring reconfiguation */
1177 for_each_set_bit(i
, &unshare_queues
, IWL_MAX_HW_QUEUES
)
1178 iwl_mvm_unshare_queue(mvm
, i
);
1179 for_each_set_bit(i
, &changetid_queues
, IWL_MAX_HW_QUEUES
)
1180 iwl_mvm_change_queue_tid(mvm
, i
);
1182 if (free_queue
>= 0 && alloc_for_sta
!= IWL_MVM_INVALID_STA
) {
1183 ret
= iwl_mvm_free_inactive_queue(mvm
, free_queue
, queue_owner
,
1196 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
1197 struct ieee80211_sta
*sta
, u8 ac
, int tid
)
1199 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1200 struct iwl_trans_txq_scd_cfg cfg
= {
1201 .fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
),
1202 .sta_id
= mvmsta
->sta_id
,
1204 .frame_limit
= IWL_FRAME_LIMIT
,
1206 unsigned int wdg_timeout
=
1207 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
1209 unsigned long disable_agg_tids
= 0;
1210 enum iwl_mvm_agg_state queue_state
;
1211 bool shared_queue
= false, inc_ssn
;
1213 unsigned long tfd_queue_mask
;
1216 lockdep_assert_held(&mvm
->mutex
);
1218 if (iwl_mvm_has_new_tx_api(mvm
))
1219 return iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
1221 spin_lock_bh(&mvmsta
->lock
);
1222 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
1223 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
1224 spin_unlock_bh(&mvmsta
->lock
);
1226 if (tid
== IWL_MAX_TID_COUNT
) {
1227 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1228 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
1229 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
1230 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
1231 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
1234 /* If no such queue is found, we'll use a DATA queue instead */
1237 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
1238 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
1239 IWL_MVM_QUEUE_RESERVED
)) {
1240 queue
= mvmsta
->reserved_queue
;
1241 mvm
->queue_info
[queue
].reserved
= true;
1242 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
1246 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1247 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1248 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1250 /* try harder - perhaps kill an inactive queue */
1251 queue
= iwl_mvm_inactivity_check(mvm
, mvmsta
->sta_id
);
1254 /* No free queue - we'll have to share */
1256 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
1258 shared_queue
= true;
1259 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
1264 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1265 * to make sure no one else takes it.
1266 * This will allow avoiding re-acquiring the lock at the end of the
1267 * configuration. On error we'll mark it back as free.
1269 if (queue
> 0 && !shared_queue
)
1270 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1272 /* This shouldn't happen - out of queues */
1273 if (WARN_ON(queue
<= 0)) {
1274 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
1280 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1281 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1283 * Mark all DATA queues as allowing to be aggregated at some point
1285 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1286 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1288 IWL_DEBUG_TX_QUEUES(mvm
,
1289 "Allocating %squeue #%d to sta %d on tid %d\n",
1290 shared_queue
? "shared " : "", queue
,
1291 mvmsta
->sta_id
, tid
);
1294 /* Disable any open aggs on this queue */
1295 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
1297 if (disable_agg_tids
) {
1298 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
1300 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
1301 disable_agg_tids
, false);
1305 inc_ssn
= iwl_mvm_enable_txq(mvm
, sta
, queue
, ssn
, &cfg
, wdg_timeout
);
1308 * Mark queue as shared in transport if shared
1309 * Note this has to be done after queue enablement because enablement
1310 * can also set this value, and there is no indication there to shared
1314 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
1316 spin_lock_bh(&mvmsta
->lock
);
1318 * This looks racy, but it is not. We have only one packet for
1319 * this ra/tid in our Tx path since we stop the Qdisc when we
1320 * need to allocate a new TFD queue.
1323 mvmsta
->tid_data
[tid
].seq_number
+= 0x10;
1324 ssn
= (ssn
+ 1) & IEEE80211_SCTL_SEQ
;
1326 mvmsta
->tid_data
[tid
].txq_id
= queue
;
1327 mvmsta
->tfd_queue_msk
|= BIT(queue
);
1328 queue_state
= mvmsta
->tid_data
[tid
].state
;
1330 if (mvmsta
->reserved_queue
== queue
)
1331 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
1332 spin_unlock_bh(&mvmsta
->lock
);
1334 if (!shared_queue
) {
1335 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
1339 /* If we need to re-enable aggregations... */
1340 if (queue_state
== IWL_AGG_ON
) {
1341 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
1346 /* Redirect queue, if needed */
1347 ret
= iwl_mvm_redirect_queue(mvm
, queue
, tid
, ac
, ssn
,
1349 iwl_mvm_txq_from_tid(sta
, tid
));
1357 iwl_mvm_disable_txq(mvm
, sta
, queue
, tid
, 0);
1362 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
1364 if (tid
== IWL_MAX_TID_COUNT
)
1365 return IEEE80211_AC_VO
; /* MGMT */
1367 return tid_to_mac80211_ac
[tid
];
1370 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
1372 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
1375 mutex_lock(&mvm
->mutex
);
1377 iwl_mvm_inactivity_check(mvm
, IWL_MVM_INVALID_STA
);
1379 while (!list_empty(&mvm
->add_stream_txqs
)) {
1380 struct iwl_mvm_txq
*mvmtxq
;
1381 struct ieee80211_txq
*txq
;
1384 mvmtxq
= list_first_entry(&mvm
->add_stream_txqs
,
1385 struct iwl_mvm_txq
, list
);
1387 txq
= container_of((void *)mvmtxq
, struct ieee80211_txq
,
1390 if (tid
== IEEE80211_NUM_TIDS
)
1391 tid
= IWL_MAX_TID_COUNT
;
1393 iwl_mvm_sta_alloc_queue(mvm
, txq
->sta
, txq
->ac
, tid
);
1394 list_del_init(&mvmtxq
->list
);
1396 iwl_mvm_mac_itxq_xmit(mvm
->hw
, txq
);
1400 mutex_unlock(&mvm
->mutex
);
1403 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
1404 struct ieee80211_sta
*sta
,
1405 enum nl80211_iftype vif_type
)
1407 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1410 /* queue reserving is disabled on new TX path */
1411 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1414 /* run the general cleanup/unsharing of queues */
1415 iwl_mvm_inactivity_check(mvm
, IWL_MVM_INVALID_STA
);
1417 /* Make sure we have free resources for this STA */
1418 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
1419 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].tid_bitmap
&&
1420 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
1421 IWL_MVM_QUEUE_FREE
))
1422 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
1424 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
1425 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1426 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1428 /* try again - this time kick out a queue if needed */
1429 queue
= iwl_mvm_inactivity_check(mvm
, mvmsta
->sta_id
);
1431 IWL_ERR(mvm
, "No available queues for new station\n");
1435 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
1437 mvmsta
->reserved_queue
= queue
;
1439 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
1440 queue
, mvmsta
->sta_id
);
1446 * In DQA mode, after a HW restart the queues should be allocated as before, in
1447 * order to avoid race conditions when there are shared queues. This function
1448 * does the re-mapping and queue allocation.
1450 * Note that re-enabling aggregations isn't done in this function.
1452 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm
*mvm
,
1453 struct ieee80211_sta
*sta
)
1455 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1457 iwl_mvm_get_wd_timeout(mvm
, mvm_sta
->vif
, false, false);
1459 struct iwl_trans_txq_scd_cfg cfg
= {
1460 .sta_id
= mvm_sta
->sta_id
,
1461 .frame_limit
= IWL_FRAME_LIMIT
,
1464 /* Make sure reserved queue is still marked as such (if allocated) */
1465 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
)
1466 mvm
->queue_info
[mvm_sta
->reserved_queue
].status
=
1467 IWL_MVM_QUEUE_RESERVED
;
1469 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1470 struct iwl_mvm_tid_data
*tid_data
= &mvm_sta
->tid_data
[i
];
1471 int txq_id
= tid_data
->txq_id
;
1474 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1477 ac
= tid_to_mac80211_ac
[i
];
1479 if (iwl_mvm_has_new_tx_api(mvm
)) {
1480 IWL_DEBUG_TX_QUEUES(mvm
,
1481 "Re-mapping sta %d tid %d\n",
1482 mvm_sta
->sta_id
, i
);
1483 txq_id
= iwl_mvm_tvqm_enable_txq(mvm
, mvm_sta
->sta_id
,
1486 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1487 * to try again later, we have no other good way of
1491 txq_id
= IWL_MVM_INVALID_QUEUE
;
1492 tid_data
->txq_id
= txq_id
;
1495 * Since we don't set the seq number after reset, and HW
1496 * sets it now, FW reset will cause the seq num to start
1497 * at 0 again, so driver will need to update it
1498 * internally as well, so it keeps in sync with real val
1500 tid_data
->seq_number
= 0;
1502 u16 seq
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1505 cfg
.fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
);
1506 cfg
.aggregate
= (txq_id
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1508 IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1510 IWL_DEBUG_TX_QUEUES(mvm
,
1511 "Re-mapping sta %d tid %d to queue %d\n",
1512 mvm_sta
->sta_id
, i
, txq_id
);
1514 iwl_mvm_enable_txq(mvm
, sta
, txq_id
, seq
, &cfg
, wdg
);
1515 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_READY
;
1520 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1521 struct iwl_mvm_int_sta
*sta
,
1523 u16 mac_id
, u16 color
)
1525 struct iwl_mvm_add_sta_cmd cmd
;
1527 u32 status
= ADD_STA_SUCCESS
;
1529 lockdep_assert_held(&mvm
->mutex
);
1531 memset(&cmd
, 0, sizeof(cmd
));
1532 cmd
.sta_id
= sta
->sta_id
;
1533 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1535 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
1536 cmd
.station_type
= sta
->type
;
1538 if (!iwl_mvm_has_new_tx_api(mvm
))
1539 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1540 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1543 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1545 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1546 iwl_mvm_add_sta_cmd_size(mvm
),
1551 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1552 case ADD_STA_SUCCESS
:
1553 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1557 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1564 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
1565 struct ieee80211_vif
*vif
,
1566 struct ieee80211_sta
*sta
)
1568 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1569 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1570 struct iwl_mvm_rxq_dup_data
*dup_data
;
1572 bool sta_update
= false;
1573 unsigned int sta_flags
= 0;
1575 lockdep_assert_held(&mvm
->mutex
);
1577 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1578 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
1579 ieee80211_vif_type_p2p(vif
));
1581 sta_id
= mvm_sta
->sta_id
;
1583 if (sta_id
== IWL_MVM_INVALID_STA
)
1586 spin_lock_init(&mvm_sta
->lock
);
1588 /* if this is a HW restart re-alloc existing queues */
1589 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1590 struct iwl_mvm_int_sta tmp_sta
= {
1592 .type
= mvm_sta
->sta_type
,
1596 * First add an empty station since allocating
1597 * a queue requires a valid station
1599 ret
= iwl_mvm_add_int_sta_common(mvm
, &tmp_sta
, sta
->addr
,
1600 mvmvif
->id
, mvmvif
->color
);
1604 iwl_mvm_realloc_queues_after_restart(mvm
, sta
);
1606 sta_flags
= iwl_mvm_has_new_tx_api(mvm
) ? 0 : STA_MODIFY_QUEUES
;
1610 mvm_sta
->sta_id
= sta_id
;
1611 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
1614 if (!mvm
->trans
->trans_cfg
->gen2
)
1615 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
1617 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF
;
1618 mvm_sta
->tx_protection
= 0;
1619 mvm_sta
->tt_tx_protection
= false;
1620 mvm_sta
->sta_type
= sta
->tdls
? IWL_STA_TDLS_LINK
: IWL_STA_LINK
;
1622 /* HW restart, don't assume the memory has been zeroed */
1623 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
1624 mvm_sta
->tfd_queue_msk
= 0;
1626 /* for HW restart - reset everything but the sequence number */
1627 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1628 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
1629 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
1630 mvm_sta
->tid_data
[i
].seq_number
= seq
;
1633 * Mark all queues for this STA as unallocated and defer TX
1634 * frames until the queue is allocated
1636 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1639 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
1640 struct iwl_mvm_txq
*mvmtxq
=
1641 iwl_mvm_txq_from_mac80211(sta
->txq
[i
]);
1643 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
1644 INIT_LIST_HEAD(&mvmtxq
->list
);
1645 atomic_set(&mvmtxq
->tx_request
, 0);
1648 mvm_sta
->agg_tids
= 0;
1650 if (iwl_mvm_has_new_rx_api(mvm
) &&
1651 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1654 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
1655 sizeof(*dup_data
), GFP_KERNEL
);
1659 * Initialize all the last_seq values to 0xffff which can never
1660 * compare equal to the frame's seq_ctrl in the check in
1661 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1662 * number and fragmented packets don't reach that function.
1664 * This thus allows receiving a packet with seqno 0 and the
1665 * retry bit set as the very first packet on a new TID.
1667 for (q
= 0; q
< mvm
->trans
->num_rx_queues
; q
++)
1668 memset(dup_data
[q
].last_seq
, 0xff,
1669 sizeof(dup_data
[q
].last_seq
));
1670 mvm_sta
->dup_data
= dup_data
;
1673 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1674 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
1675 ieee80211_vif_type_p2p(vif
));
1681 * if rs is registered with mac80211, then "add station" will be handled
1682 * via the corresponding ops, otherwise need to notify rate scaling here
1684 if (iwl_mvm_has_tlc_offload(mvm
))
1685 iwl_mvm_rs_add_sta(mvm
, mvm_sta
);
1687 spin_lock_init(&mvm_sta
->lq_sta
.rs_drv
.pers
.lock
);
1689 iwl_mvm_toggle_tx_ant(mvm
, &mvm_sta
->tx_ant
);
1692 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, sta_update
, sta_flags
);
1696 if (vif
->type
== NL80211_IFTYPE_STATION
) {
1698 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
);
1699 mvmvif
->ap_sta_id
= sta_id
;
1701 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_INVALID_STA
);
1705 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
1713 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
1716 struct iwl_mvm_add_sta_cmd cmd
= {};
1720 lockdep_assert_held(&mvm
->mutex
);
1722 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1723 cmd
.sta_id
= mvmsta
->sta_id
;
1724 cmd
.add_modify
= STA_MODE_MODIFY
;
1725 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
1726 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
1728 status
= ADD_STA_SUCCESS
;
1729 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1730 iwl_mvm_add_sta_cmd_size(mvm
),
1735 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1736 case ADD_STA_SUCCESS
:
1737 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1742 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1751 * Remove a station from the FW table. Before sending the command to remove
1752 * the station validate that the station is indeed known to the driver (sanity
1755 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1757 struct ieee80211_sta
*sta
;
1758 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1763 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1764 lockdep_is_held(&mvm
->mutex
));
1766 /* Note: internal stations are marked as error values */
1768 IWL_ERR(mvm
, "Invalid station id\n");
1772 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1773 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1775 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1782 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1783 struct ieee80211_vif
*vif
,
1784 struct ieee80211_sta
*sta
)
1786 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1789 lockdep_assert_held(&mvm
->mutex
);
1791 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1792 if (mvm_sta
->tid_data
[i
].txq_id
== IWL_MVM_INVALID_QUEUE
)
1795 iwl_mvm_disable_txq(mvm
, sta
, mvm_sta
->tid_data
[i
].txq_id
, i
,
1797 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1800 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
1801 struct iwl_mvm_txq
*mvmtxq
=
1802 iwl_mvm_txq_from_mac80211(sta
->txq
[i
]);
1804 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
1808 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm
*mvm
,
1809 struct iwl_mvm_sta
*mvm_sta
)
1813 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
1817 spin_lock_bh(&mvm_sta
->lock
);
1818 txq_id
= mvm_sta
->tid_data
[i
].txq_id
;
1819 spin_unlock_bh(&mvm_sta
->lock
);
1821 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1824 ret
= iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
1832 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
1833 struct ieee80211_vif
*vif
,
1834 struct ieee80211_sta
*sta
)
1836 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1837 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1838 u8 sta_id
= mvm_sta
->sta_id
;
1841 lockdep_assert_held(&mvm
->mutex
);
1843 if (iwl_mvm_has_new_rx_api(mvm
))
1844 kfree(mvm_sta
->dup_data
);
1846 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
1850 /* flush its queues here since we are freeing mvm_sta */
1851 ret
= iwl_mvm_flush_sta(mvm
, mvm_sta
, false, 0);
1854 if (iwl_mvm_has_new_tx_api(mvm
)) {
1855 ret
= iwl_mvm_wait_sta_queues_empty(mvm
, mvm_sta
);
1857 u32 q_mask
= mvm_sta
->tfd_queue_msk
;
1859 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
1865 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
1867 iwl_mvm_disable_sta_queues(mvm
, vif
, sta
);
1869 /* If there is a TXQ still marked as reserved - free it */
1870 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) {
1871 u8 reserved_txq
= mvm_sta
->reserved_queue
;
1872 enum iwl_mvm_queue_status
*status
;
1875 * If no traffic has gone through the reserved TXQ - it
1876 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1877 * should be manually marked as free again
1879 status
= &mvm
->queue_info
[reserved_txq
].status
;
1880 if (WARN((*status
!= IWL_MVM_QUEUE_RESERVED
) &&
1881 (*status
!= IWL_MVM_QUEUE_FREE
),
1882 "sta_id %d reserved txq %d status %d",
1883 sta_id
, reserved_txq
, *status
))
1886 *status
= IWL_MVM_QUEUE_FREE
;
1889 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1890 mvmvif
->ap_sta_id
== sta_id
) {
1891 /* if associated - we can't remove the AP STA now */
1892 if (vif
->bss_conf
.assoc
)
1895 /* unassoc - go ahead - remove the AP STA now */
1896 mvmvif
->ap_sta_id
= IWL_MVM_INVALID_STA
;
1900 * This shouldn't happen - the TDLS channel switch should be canceled
1901 * before the STA is removed.
1903 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== sta_id
)) {
1904 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_INVALID_STA
;
1905 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
1909 * Make sure that the tx response code sees the station as -EBUSY and
1910 * calls the drain worker.
1912 spin_lock_bh(&mvm_sta
->lock
);
1913 spin_unlock_bh(&mvm_sta
->lock
);
1915 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
1916 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
1921 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
1922 struct ieee80211_vif
*vif
,
1925 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
1927 lockdep_assert_held(&mvm
->mutex
);
1929 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
1933 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
1934 struct iwl_mvm_int_sta
*sta
,
1935 u32 qmask
, enum nl80211_iftype iftype
,
1936 enum iwl_sta_type type
)
1938 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
) ||
1939 sta
->sta_id
== IWL_MVM_INVALID_STA
) {
1940 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
1941 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_INVALID_STA
))
1945 sta
->tfd_queue_msk
= qmask
;
1948 /* put a non-NULL value so iterating over the stations won't stop */
1949 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
1953 void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_int_sta
*sta
)
1955 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
1956 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
1957 sta
->sta_id
= IWL_MVM_INVALID_STA
;
1960 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm
*mvm
, u16 queue
,
1963 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1964 mvm
->trans
->trans_cfg
->base_params
->wd_timeout
:
1965 IWL_WATCHDOG_DISABLED
;
1966 struct iwl_trans_txq_scd_cfg cfg
= {
1969 .tid
= IWL_MAX_TID_COUNT
,
1971 .frame_limit
= IWL_FRAME_LIMIT
,
1974 WARN_ON(iwl_mvm_has_new_tx_api(mvm
));
1976 iwl_mvm_enable_txq(mvm
, NULL
, queue
, 0, &cfg
, wdg_timeout
);
1979 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm
*mvm
, u8 sta_id
)
1981 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
1982 mvm
->trans
->trans_cfg
->base_params
->wd_timeout
:
1983 IWL_WATCHDOG_DISABLED
;
1985 WARN_ON(!iwl_mvm_has_new_tx_api(mvm
));
1987 return iwl_mvm_tvqm_enable_txq(mvm
, sta_id
, IWL_MAX_TID_COUNT
,
1991 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm
*mvm
, int macidx
,
1993 struct iwl_mvm_int_sta
*sta
,
1994 u16
*queue
, int fifo
)
1998 /* Map queue to fifo - needs to happen before adding station */
1999 if (!iwl_mvm_has_new_tx_api(mvm
))
2000 iwl_mvm_enable_aux_snif_queue(mvm
, *queue
, sta
->sta_id
, fifo
);
2002 ret
= iwl_mvm_add_int_sta_common(mvm
, sta
, NULL
, macidx
, maccolor
);
2004 if (!iwl_mvm_has_new_tx_api(mvm
))
2005 iwl_mvm_disable_txq(mvm
, NULL
, *queue
,
2006 IWL_MAX_TID_COUNT
, 0);
2011 * For 22000 firmware and on we cannot add queue to a station unknown
2012 * to firmware so enable queue here - after the station was added
2014 if (iwl_mvm_has_new_tx_api(mvm
)) {
2017 txq
= iwl_mvm_enable_aux_snif_queue_tvqm(mvm
, sta
->sta_id
);
2019 iwl_mvm_rm_sta_common(mvm
, sta
->sta_id
);
2029 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
2033 lockdep_assert_held(&mvm
->mutex
);
2035 /* Allocate aux station and assign to it the aux queue */
2036 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
2037 NL80211_IFTYPE_UNSPECIFIED
,
2038 IWL_STA_AUX_ACTIVITY
);
2042 ret
= iwl_mvm_add_int_sta_with_queue(mvm
, MAC_INDEX_AUX
, 0,
2043 &mvm
->aux_sta
, &mvm
->aux_queue
,
2044 IWL_MVM_TX_FIFO_MCAST
);
2046 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
2053 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2055 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2057 lockdep_assert_held(&mvm
->mutex
);
2059 return iwl_mvm_add_int_sta_with_queue(mvm
, mvmvif
->id
, mvmvif
->color
,
2060 &mvm
->snif_sta
, &mvm
->snif_queue
,
2061 IWL_MVM_TX_FIFO_BE
);
2064 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2068 lockdep_assert_held(&mvm
->mutex
);
2070 iwl_mvm_disable_txq(mvm
, NULL
, mvm
->snif_queue
, IWL_MAX_TID_COUNT
, 0);
2071 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
2073 IWL_WARN(mvm
, "Failed sending remove station\n");
2078 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
2080 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
2083 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
2085 lockdep_assert_held(&mvm
->mutex
);
2087 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
2091 * Send the add station command for the vif's broadcast station.
2092 * Assumes that the station was already allocated.
2094 * @mvm: the mvm component
2095 * @vif: the interface to which the broadcast station is added
2096 * @bsta: the broadcast station to add.
2098 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2100 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2101 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
2102 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2103 const u8
*baddr
= _baddr
;
2106 unsigned int wdg_timeout
=
2107 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
2108 struct iwl_trans_txq_scd_cfg cfg
= {
2109 .fifo
= IWL_MVM_TX_FIFO_VO
,
2110 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
2111 .tid
= IWL_MAX_TID_COUNT
,
2113 .frame_limit
= IWL_FRAME_LIMIT
,
2116 lockdep_assert_held(&mvm
->mutex
);
2118 if (!iwl_mvm_has_new_tx_api(mvm
)) {
2119 if (vif
->type
== NL80211_IFTYPE_AP
||
2120 vif
->type
== NL80211_IFTYPE_ADHOC
) {
2121 queue
= mvm
->probe_queue
;
2122 } else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
2123 queue
= mvm
->p2p_dev_queue
;
2125 WARN(1, "Missing required TXQ for adding bcast STA\n");
2129 bsta
->tfd_queue_msk
|= BIT(queue
);
2131 iwl_mvm_enable_txq(mvm
, NULL
, queue
, 0, &cfg
, wdg_timeout
);
2134 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
2135 baddr
= vif
->bss_conf
.bssid
;
2137 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_INVALID_STA
))
2140 ret
= iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
2141 mvmvif
->id
, mvmvif
->color
);
2146 * For 22000 firmware and on we cannot add queue to a station unknown
2147 * to firmware so enable queue here - after the station was added
2149 if (iwl_mvm_has_new_tx_api(mvm
)) {
2150 queue
= iwl_mvm_tvqm_enable_txq(mvm
, bsta
->sta_id
,
2154 iwl_mvm_rm_sta_common(mvm
, bsta
->sta_id
);
2158 if (vif
->type
== NL80211_IFTYPE_AP
||
2159 vif
->type
== NL80211_IFTYPE_ADHOC
)
2160 mvm
->probe_queue
= queue
;
2161 else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
2162 mvm
->p2p_dev_queue
= queue
;
2168 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm
*mvm
,
2169 struct ieee80211_vif
*vif
)
2171 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2174 lockdep_assert_held(&mvm
->mutex
);
2176 iwl_mvm_flush_sta(mvm
, &mvmvif
->bcast_sta
, true, 0);
2178 switch (vif
->type
) {
2179 case NL80211_IFTYPE_AP
:
2180 case NL80211_IFTYPE_ADHOC
:
2181 queue
= mvm
->probe_queue
;
2183 case NL80211_IFTYPE_P2P_DEVICE
:
2184 queue
= mvm
->p2p_dev_queue
;
2187 WARN(1, "Can't free bcast queue on vif type %d\n",
2192 iwl_mvm_disable_txq(mvm
, NULL
, queue
, IWL_MAX_TID_COUNT
, 0);
2193 if (iwl_mvm_has_new_tx_api(mvm
))
2196 WARN_ON(!(mvmvif
->bcast_sta
.tfd_queue_msk
& BIT(queue
)));
2197 mvmvif
->bcast_sta
.tfd_queue_msk
&= ~BIT(queue
);
2200 /* Send the FW a request to remove the station from it's internal data
2201 * structures, but DO NOT remove the entry from the local data structures. */
2202 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2204 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2207 lockdep_assert_held(&mvm
->mutex
);
2209 iwl_mvm_free_bcast_sta_queues(mvm
, vif
);
2211 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
2213 IWL_WARN(mvm
, "Failed sending remove station\n");
2217 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2219 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2221 lockdep_assert_held(&mvm
->mutex
);
2223 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, 0,
2224 ieee80211_vif_type_p2p(vif
),
2225 IWL_STA_GENERAL_PURPOSE
);
2228 /* Allocate a new station entry for the broadcast station to the given vif,
2229 * and send it to the FW.
2230 * Note that each P2P mac should have its own broadcast station.
2232 * @mvm: the mvm component
2233 * @vif: the interface to which the broadcast station is added
2234 * @bsta: the broadcast station to add. */
2235 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2237 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2238 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
2241 lockdep_assert_held(&mvm
->mutex
);
2243 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
2247 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
2250 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
2255 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2257 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2259 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
2263 * Send the FW a request to remove the station from it's internal data
2264 * structures, and in addition remove it from the local data structure.
2266 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2270 lockdep_assert_held(&mvm
->mutex
);
2272 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
2274 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
2280 * Allocate a new station entry for the multicast station to the given vif,
2281 * and send it to the FW.
2282 * Note that each AP/GO mac should have its own multicast station.
2284 * @mvm: the mvm component
2285 * @vif: the interface to which the multicast station is added
2287 int iwl_mvm_add_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2289 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2290 struct iwl_mvm_int_sta
*msta
= &mvmvif
->mcast_sta
;
2291 static const u8 _maddr
[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2292 const u8
*maddr
= _maddr
;
2293 struct iwl_trans_txq_scd_cfg cfg
= {
2294 .fifo
= vif
->type
== NL80211_IFTYPE_AP
?
2295 IWL_MVM_TX_FIFO_MCAST
: IWL_MVM_TX_FIFO_BE
,
2296 .sta_id
= msta
->sta_id
,
2299 .frame_limit
= IWL_FRAME_LIMIT
,
2301 unsigned int timeout
= iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
2304 lockdep_assert_held(&mvm
->mutex
);
2306 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_AP
&&
2307 vif
->type
!= NL80211_IFTYPE_ADHOC
))
2311 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2312 * invalid, so make sure we use the queue we want.
2313 * Note that this is done here as we want to avoid making DQA
2314 * changes in mac80211 layer.
2316 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
2317 mvmvif
->cab_queue
= IWL_MVM_DQA_GCAST_QUEUE
;
2320 * While in previous FWs we had to exclude cab queue from TFD queue
2321 * mask, now it is needed as any other queue.
2323 if (!iwl_mvm_has_new_tx_api(mvm
) &&
2324 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
)) {
2325 iwl_mvm_enable_txq(mvm
, NULL
, mvmvif
->cab_queue
, 0, &cfg
,
2327 msta
->tfd_queue_msk
|= BIT(mvmvif
->cab_queue
);
2329 ret
= iwl_mvm_add_int_sta_common(mvm
, msta
, maddr
,
2330 mvmvif
->id
, mvmvif
->color
);
2335 * Enable cab queue after the ADD_STA command is sent.
2336 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2337 * command with unknown station id, and for FW that doesn't support
2338 * station API since the cab queue is not included in the
2341 if (iwl_mvm_has_new_tx_api(mvm
)) {
2342 int queue
= iwl_mvm_tvqm_enable_txq(mvm
, msta
->sta_id
,
2349 mvmvif
->cab_queue
= queue
;
2350 } else if (!fw_has_api(&mvm
->fw
->ucode_capa
,
2351 IWL_UCODE_TLV_API_STA_TYPE
))
2352 iwl_mvm_enable_txq(mvm
, NULL
, mvmvif
->cab_queue
, 0, &cfg
,
2357 iwl_mvm_dealloc_int_sta(mvm
, msta
);
2361 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
2362 struct ieee80211_key_conf
*keyconf
,
2366 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
2367 struct iwl_mvm_add_sta_key_cmd cmd
;
2369 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
2370 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
2375 /* This is a valid situation for GTK removal */
2376 if (sta_id
== IWL_MVM_INVALID_STA
)
2379 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2380 STA_KEY_FLG_KEYID_MSK
);
2381 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
2382 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
2385 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2388 * The fields assigned here are in the same location at the start
2389 * of the command, so we can do this union trick.
2391 u
.cmd
.common
.key_flags
= key_flags
;
2392 u
.cmd
.common
.key_offset
= keyconf
->hw_key_idx
;
2393 u
.cmd
.common
.sta_id
= sta_id
;
2395 size
= new_api
? sizeof(u
.cmd
) : sizeof(u
.cmd_v1
);
2397 status
= ADD_STA_SUCCESS
;
2398 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
, &u
.cmd
,
2402 case ADD_STA_SUCCESS
:
2403 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
2407 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
2415 * Send the FW a request to remove the station from it's internal data
2416 * structures, and in addition remove it from the local data structure.
2418 int iwl_mvm_rm_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2420 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2423 lockdep_assert_held(&mvm
->mutex
);
2425 iwl_mvm_flush_sta(mvm
, &mvmvif
->mcast_sta
, true, 0);
2427 iwl_mvm_disable_txq(mvm
, NULL
, mvmvif
->cab_queue
, 0, 0);
2429 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->mcast_sta
.sta_id
);
2431 IWL_WARN(mvm
, "Failed sending remove station\n");
2436 #define IWL_MAX_RX_BA_SESSIONS 16
2438 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
2440 struct iwl_mvm_rss_sync_notif notif
= {
2441 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
2445 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
2448 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
2449 struct iwl_mvm_baid_data
*data
)
2453 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
2455 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2457 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2458 &data
->reorder_buf
[i
];
2459 struct iwl_mvm_reorder_buf_entry
*entries
=
2460 &data
->entries
[i
* data
->entries_per_queue
];
2462 spin_lock_bh(&reorder_buf
->lock
);
2463 if (likely(!reorder_buf
->num_stored
)) {
2464 spin_unlock_bh(&reorder_buf
->lock
);
2469 * This shouldn't happen in regular DELBA since the internal
2470 * delBA notification should trigger a release of all frames in
2471 * the reorder buffer.
2475 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2476 __skb_queue_purge(&entries
[j
].e
.frames
);
2478 * Prevent timer re-arm. This prevents a very far fetched case
2479 * where we timed out on the notification. There may be prior
2480 * RX frames pending in the RX queue before the notification
2481 * that might get processed between now and the actual deletion
2482 * and we would re-arm the timer although we are deleting the
2485 reorder_buf
->removed
= true;
2486 spin_unlock_bh(&reorder_buf
->lock
);
2487 del_timer_sync(&reorder_buf
->reorder_timer
);
2491 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
2492 struct iwl_mvm_baid_data
*data
,
2493 u16 ssn
, u16 buf_size
)
2497 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2498 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2499 &data
->reorder_buf
[i
];
2500 struct iwl_mvm_reorder_buf_entry
*entries
=
2501 &data
->entries
[i
* data
->entries_per_queue
];
2504 reorder_buf
->num_stored
= 0;
2505 reorder_buf
->head_sn
= ssn
;
2506 reorder_buf
->buf_size
= buf_size
;
2507 /* rx reorder timer */
2508 timer_setup(&reorder_buf
->reorder_timer
,
2509 iwl_mvm_reorder_timer_expired
, 0);
2510 spin_lock_init(&reorder_buf
->lock
);
2511 reorder_buf
->mvm
= mvm
;
2512 reorder_buf
->queue
= i
;
2513 reorder_buf
->valid
= false;
2514 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
2515 __skb_queue_head_init(&entries
[j
].e
.frames
);
2519 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2520 int tid
, u16 ssn
, bool start
, u16 buf_size
, u16 timeout
)
2522 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2523 struct iwl_mvm_add_sta_cmd cmd
= {};
2524 struct iwl_mvm_baid_data
*baid_data
= NULL
;
2528 lockdep_assert_held(&mvm
->mutex
);
2530 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
2531 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
2535 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
2536 u16 reorder_buf_size
= buf_size
* sizeof(baid_data
->entries
[0]);
2538 /* sparse doesn't like the __align() so don't check */
2541 * The division below will be OK if either the cache line size
2542 * can be divided by the entry size (ALIGN will round up) or if
2543 * if the entry size can be divided by the cache line size, in
2544 * which case the ALIGN() will do nothing.
2546 BUILD_BUG_ON(SMP_CACHE_BYTES
% sizeof(baid_data
->entries
[0]) &&
2547 sizeof(baid_data
->entries
[0]) % SMP_CACHE_BYTES
);
2551 * Upward align the reorder buffer size to fill an entire cache
2552 * line for each queue, to avoid sharing cache lines between
2555 reorder_buf_size
= ALIGN(reorder_buf_size
, SMP_CACHE_BYTES
);
2558 * Allocate here so if allocation fails we can bail out early
2559 * before starting the BA session in the firmware
2561 baid_data
= kzalloc(sizeof(*baid_data
) +
2562 mvm
->trans
->num_rx_queues
*
2569 * This division is why we need the above BUILD_BUG_ON(),
2570 * if that doesn't hold then this will not be right.
2572 baid_data
->entries_per_queue
=
2573 reorder_buf_size
/ sizeof(baid_data
->entries
[0]);
2576 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2577 cmd
.sta_id
= mvm_sta
->sta_id
;
2578 cmd
.add_modify
= STA_MODE_MODIFY
;
2580 cmd
.add_immediate_ba_tid
= (u8
) tid
;
2581 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
2582 cmd
.rx_ba_window
= cpu_to_le16(buf_size
);
2584 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
2586 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
2587 STA_MODIFY_REMOVE_BA_TID
;
2589 status
= ADD_STA_SUCCESS
;
2590 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2591 iwl_mvm_add_sta_cmd_size(mvm
),
2596 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2597 case ADD_STA_SUCCESS
:
2598 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
2599 start
? "start" : "stopp");
2601 case ADD_STA_IMMEDIATE_BA_FAILURE
:
2602 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
2607 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
2608 start
? "start" : "stopp", status
);
2618 mvm
->rx_ba_sessions
++;
2620 if (!iwl_mvm_has_new_rx_api(mvm
))
2623 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
2627 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
2628 IWL_ADD_STA_BAID_SHIFT
);
2629 baid_data
->baid
= baid
;
2630 baid_data
->timeout
= timeout
;
2631 baid_data
->last_rx
= jiffies
;
2632 baid_data
->rcu_ptr
= &mvm
->baid_map
[baid
];
2633 timer_setup(&baid_data
->session_timer
,
2634 iwl_mvm_rx_agg_session_expired
, 0);
2635 baid_data
->mvm
= mvm
;
2636 baid_data
->tid
= tid
;
2637 baid_data
->sta_id
= mvm_sta
->sta_id
;
2639 mvm_sta
->tid_to_baid
[tid
] = baid
;
2641 mod_timer(&baid_data
->session_timer
,
2642 TU_TO_EXP_TIME(timeout
* 2));
2644 iwl_mvm_init_reorder_buffer(mvm
, baid_data
, ssn
, buf_size
);
2646 * protect the BA data with RCU to cover a case where our
2647 * internal RX sync mechanism will timeout (not that it's
2648 * supposed to happen) and we will free the session data while
2649 * RX is being processed in parallel
2651 IWL_DEBUG_HT(mvm
, "Sta %d(%d) is assigned to BAID %d\n",
2652 mvm_sta
->sta_id
, tid
, baid
);
2653 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
2654 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
2656 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
2658 if (mvm
->rx_ba_sessions
> 0)
2659 /* check that restart flow didn't zero the counter */
2660 mvm
->rx_ba_sessions
--;
2661 if (!iwl_mvm_has_new_rx_api(mvm
))
2664 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
2667 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
2668 if (WARN_ON(!baid_data
))
2671 /* synchronize all rx queues so we can safely delete */
2672 iwl_mvm_free_reorder(mvm
, baid_data
);
2673 del_timer_sync(&baid_data
->session_timer
);
2674 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
2675 kfree_rcu(baid_data
, rcu_head
);
2676 IWL_DEBUG_HT(mvm
, "BAID %d is free\n", baid
);
2685 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2686 int tid
, u8 queue
, bool start
)
2688 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2689 struct iwl_mvm_add_sta_cmd cmd
= {};
2693 lockdep_assert_held(&mvm
->mutex
);
2696 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
2697 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
2699 /* In DQA-mode the queue isn't removed on agg termination */
2700 mvm_sta
->tid_disable_agg
|= BIT(tid
);
2703 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
2704 cmd
.sta_id
= mvm_sta
->sta_id
;
2705 cmd
.add_modify
= STA_MODE_MODIFY
;
2706 if (!iwl_mvm_has_new_tx_api(mvm
))
2707 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
2708 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
2709 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
2710 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
2712 status
= ADD_STA_SUCCESS
;
2713 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2714 iwl_mvm_add_sta_cmd_size(mvm
),
2719 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2720 case ADD_STA_SUCCESS
:
2724 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
2725 start
? "start" : "stopp", status
);
2732 const u8 tid_to_mac80211_ac
[] = {
2741 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
2744 static const u8 tid_to_ucode_ac
[] = {
2755 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2756 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
2758 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2759 struct iwl_mvm_tid_data
*tid_data
;
2764 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
2767 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_QUEUED
&&
2768 mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
2770 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2771 mvmsta
->tid_data
[tid
].state
);
2775 lockdep_assert_held(&mvm
->mutex
);
2777 if (mvmsta
->tid_data
[tid
].txq_id
== IWL_MVM_INVALID_QUEUE
&&
2778 iwl_mvm_has_new_tx_api(mvm
)) {
2779 u8 ac
= tid_to_mac80211_ac
[tid
];
2781 ret
= iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
2786 spin_lock_bh(&mvmsta
->lock
);
2789 * Note the possible cases:
2790 * 1. An enabled TXQ - TXQ needs to become agg'ed
2791 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2794 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
2795 if (txq_id
== IWL_MVM_INVALID_QUEUE
) {
2796 ret
= iwl_mvm_find_free_queue(mvm
, mvmsta
->sta_id
,
2797 IWL_MVM_DQA_MIN_DATA_QUEUE
,
2798 IWL_MVM_DQA_MAX_DATA_QUEUE
);
2800 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
2806 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2807 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
2808 } else if (WARN_ON(txq_id
>= IWL_MAX_HW_QUEUES
)) {
2810 IWL_ERR(mvm
, "tid_id %d out of range (0, %d)!\n",
2811 tid
, IWL_MAX_HW_QUEUES
- 1);
2814 } else if (unlikely(mvm
->queue_info
[txq_id
].status
==
2815 IWL_MVM_QUEUE_SHARED
)) {
2817 IWL_DEBUG_TX_QUEUES(mvm
,
2818 "Can't start tid %d agg on shared queue!\n",
2823 IWL_DEBUG_TX_QUEUES(mvm
,
2824 "AGG for tid %d will be on queue #%d\n",
2827 tid_data
= &mvmsta
->tid_data
[tid
];
2828 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
2829 tid_data
->txq_id
= txq_id
;
2830 *ssn
= tid_data
->ssn
;
2832 IWL_DEBUG_TX_QUEUES(mvm
,
2833 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2834 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
2835 tid_data
->next_reclaimed
);
2838 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2839 * to align the wrap around of ssn so we compare relevant values.
2841 normalized_ssn
= tid_data
->ssn
;
2842 if (mvm
->trans
->trans_cfg
->gen2
)
2843 normalized_ssn
&= 0xff;
2845 if (normalized_ssn
== tid_data
->next_reclaimed
) {
2846 tid_data
->state
= IWL_AGG_STARTING
;
2847 ret
= IEEE80211_AMPDU_TX_START_IMMEDIATE
;
2849 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
2854 spin_unlock_bh(&mvmsta
->lock
);
2859 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2860 struct ieee80211_sta
*sta
, u16 tid
, u16 buf_size
,
2863 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2864 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
2865 unsigned int wdg_timeout
=
2866 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
2868 bool alloc_queue
= true;
2869 enum iwl_mvm_queue_status queue_status
;
2872 struct iwl_trans_txq_scd_cfg cfg
= {
2873 .sta_id
= mvmsta
->sta_id
,
2875 .frame_limit
= buf_size
,
2880 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2881 * manager, so this function should never be called in this case.
2883 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm
)))
2886 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
2887 != IWL_MAX_TID_COUNT
);
2889 spin_lock_bh(&mvmsta
->lock
);
2890 ssn
= tid_data
->ssn
;
2891 queue
= tid_data
->txq_id
;
2892 tid_data
->state
= IWL_AGG_ON
;
2893 mvmsta
->agg_tids
|= BIT(tid
);
2894 tid_data
->ssn
= 0xffff;
2895 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
2896 spin_unlock_bh(&mvmsta
->lock
);
2898 if (iwl_mvm_has_new_tx_api(mvm
)) {
2900 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2901 * would have failed, so if we are here there is no need to
2903 * However, if aggregation size is different than the default
2904 * size, the scheduler should be reconfigured.
2905 * We cannot do this with the new TX API, so return unsupported
2906 * for now, until it will be offloaded to firmware..
2907 * Note that if SCD default value changes - this condition
2908 * should be updated as well.
2910 if (buf_size
< IWL_FRAME_LIMIT
)
2913 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2919 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
2921 queue_status
= mvm
->queue_info
[queue
].status
;
2923 /* Maybe there is no need to even alloc a queue... */
2924 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
2925 alloc_queue
= false;
2928 * Only reconfig the SCD for the queue if the window size has
2929 * changed from current (become smaller)
2931 if (!alloc_queue
&& buf_size
< IWL_FRAME_LIMIT
) {
2933 * If reconfiguring an existing queue, it first must be
2936 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
2940 "Error draining queue before reconfig\n");
2944 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
2945 mvmsta
->sta_id
, tid
,
2949 "Error reconfiguring TXQ #%d\n", queue
);
2955 iwl_mvm_enable_txq(mvm
, sta
, queue
, ssn
,
2958 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2959 if (queue_status
!= IWL_MVM_QUEUE_SHARED
) {
2960 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
2965 /* No need to mark as reserved */
2966 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
2970 * Even though in theory the peer could have different
2971 * aggregation reorder buffer sizes for different sessions,
2972 * our ucode doesn't allow for that and has a global limit
2973 * for each station. Therefore, use the minimum of all the
2974 * aggregation sessions and our default value.
2976 mvmsta
->max_agg_bufsize
=
2977 min(mvmsta
->max_agg_bufsize
, buf_size
);
2978 mvmsta
->lq_sta
.rs_drv
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
2980 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2983 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.rs_drv
.lq
);
2986 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm
*mvm
,
2987 struct iwl_mvm_sta
*mvmsta
,
2988 struct iwl_mvm_tid_data
*tid_data
)
2990 u16 txq_id
= tid_data
->txq_id
;
2992 lockdep_assert_held(&mvm
->mutex
);
2994 if (iwl_mvm_has_new_tx_api(mvm
))
2998 * The TXQ is marked as reserved only if no traffic came through yet
2999 * This means no traffic has been sent on this TID (agg'd or not), so
3000 * we no longer have use for the queue. Since it hasn't even been
3001 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3004 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
) {
3005 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
3006 tid_data
->txq_id
= IWL_MVM_INVALID_QUEUE
;
3010 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
3011 struct ieee80211_sta
*sta
, u16 tid
)
3013 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3014 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
3019 * If mac80211 is cleaning its state, then say that we finished since
3020 * our state has been cleared anyway.
3022 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
3023 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
3027 spin_lock_bh(&mvmsta
->lock
);
3029 txq_id
= tid_data
->txq_id
;
3031 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
3032 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
3034 mvmsta
->agg_tids
&= ~BIT(tid
);
3036 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
3038 switch (tid_data
->state
) {
3040 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
3042 IWL_DEBUG_TX_QUEUES(mvm
,
3043 "ssn = %d, next_recl = %d\n",
3044 tid_data
->ssn
, tid_data
->next_reclaimed
);
3046 tid_data
->ssn
= 0xffff;
3047 tid_data
->state
= IWL_AGG_OFF
;
3048 spin_unlock_bh(&mvmsta
->lock
);
3050 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
3052 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
3054 case IWL_AGG_STARTING
:
3055 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
3057 * The agg session has been stopped before it was set up. This
3058 * can happen when the AddBA timer times out for example.
3061 /* No barriers since we are under mutex */
3062 lockdep_assert_held(&mvm
->mutex
);
3064 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
3065 tid_data
->state
= IWL_AGG_OFF
;
3070 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3071 mvmsta
->sta_id
, tid
, tid_data
->state
);
3073 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
3077 spin_unlock_bh(&mvmsta
->lock
);
3082 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
3083 struct ieee80211_sta
*sta
, u16 tid
)
3085 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3086 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
3088 enum iwl_mvm_agg_state old_state
;
3091 * First set the agg state to OFF to avoid calling
3092 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3094 spin_lock_bh(&mvmsta
->lock
);
3095 txq_id
= tid_data
->txq_id
;
3096 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
3097 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
3098 old_state
= tid_data
->state
;
3099 tid_data
->state
= IWL_AGG_OFF
;
3100 mvmsta
->agg_tids
&= ~BIT(tid
);
3101 spin_unlock_bh(&mvmsta
->lock
);
3103 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
3105 if (old_state
>= IWL_AGG_ON
) {
3106 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
3108 if (iwl_mvm_has_new_tx_api(mvm
)) {
3109 if (iwl_mvm_flush_sta_tids(mvm
, mvmsta
->sta_id
,
3111 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
3112 iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
3114 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
3115 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
3116 iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(txq_id
));
3119 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
3121 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
3127 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
3129 int i
, max
= -1, max_offs
= -1;
3131 lockdep_assert_held(&mvm
->mutex
);
3133 /* Pick the unused key offset with the highest 'deleted'
3134 * counter. Every time a key is deleted, all the counters
3135 * are incremented and the one that was just deleted is
3136 * reset to zero. Thus, the highest counter is the one
3137 * that was deleted longest ago. Pick that one.
3139 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
3140 if (test_bit(i
, mvm
->fw_key_table
))
3142 if (mvm
->fw_key_deleted
[i
] > max
) {
3143 max
= mvm
->fw_key_deleted
[i
];
3149 return STA_KEY_IDX_INVALID
;
3154 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
3155 struct ieee80211_vif
*vif
,
3156 struct ieee80211_sta
*sta
)
3158 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3161 return iwl_mvm_sta_from_mac80211(sta
);
3164 * The device expects GTKs for station interfaces to be
3165 * installed as GTKs for the AP station. If we have no
3166 * station ID, then use AP's station ID.
3168 if (vif
->type
== NL80211_IFTYPE_STATION
&&
3169 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
3170 u8 sta_id
= mvmvif
->ap_sta_id
;
3172 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
3173 lockdep_is_held(&mvm
->mutex
));
3176 * It is possible that the 'sta' parameter is NULL,
3177 * for example when a GTK is removed - the sta_id will then
3178 * be the AP ID, and no station was passed by mac80211.
3180 if (IS_ERR_OR_NULL(sta
))
3183 return iwl_mvm_sta_from_mac80211(sta
);
3189 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
3191 struct ieee80211_key_conf
*key
, bool mcast
,
3192 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
3193 u8 key_offset
, bool mfp
)
3196 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
3197 struct iwl_mvm_add_sta_key_cmd cmd
;
3205 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
3206 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
3208 if (sta_id
== IWL_MVM_INVALID_STA
)
3211 keyidx
= (key
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
3212 STA_KEY_FLG_KEYID_MSK
;
3213 key_flags
= cpu_to_le16(keyidx
);
3214 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
3216 switch (key
->cipher
) {
3217 case WLAN_CIPHER_SUITE_TKIP
:
3218 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
3220 memcpy((void *)&u
.cmd
.tx_mic_key
,
3221 &key
->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY
],
3224 memcpy((void *)&u
.cmd
.rx_mic_key
,
3225 &key
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
],
3227 pn
= atomic64_read(&key
->tx_pn
);
3230 u
.cmd_v1
.tkip_rx_tsc_byte2
= tkip_iv32
;
3231 for (i
= 0; i
< 5; i
++)
3232 u
.cmd_v1
.tkip_rx_ttak
[i
] =
3233 cpu_to_le16(tkip_p1k
[i
]);
3235 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3237 case WLAN_CIPHER_SUITE_CCMP
:
3238 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
3239 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3241 pn
= atomic64_read(&key
->tx_pn
);
3243 case WLAN_CIPHER_SUITE_WEP104
:
3244 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
3246 case WLAN_CIPHER_SUITE_WEP40
:
3247 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
3248 memcpy(u
.cmd
.common
.key
+ 3, key
->key
, key
->keylen
);
3250 case WLAN_CIPHER_SUITE_GCMP_256
:
3251 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
3253 case WLAN_CIPHER_SUITE_GCMP
:
3254 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
3255 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3257 pn
= atomic64_read(&key
->tx_pn
);
3260 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
3261 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3265 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
3267 key_flags
|= cpu_to_le16(STA_KEY_MFP
);
3269 u
.cmd
.common
.key_offset
= key_offset
;
3270 u
.cmd
.common
.key_flags
= key_flags
;
3271 u
.cmd
.common
.sta_id
= sta_id
;
3274 u
.cmd
.transmit_seq_cnt
= cpu_to_le64(pn
);
3275 size
= sizeof(u
.cmd
);
3277 size
= sizeof(u
.cmd_v1
);
3280 status
= ADD_STA_SUCCESS
;
3281 if (cmd_flags
& CMD_ASYNC
)
3282 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
, size
,
3285 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
,
3289 case ADD_STA_SUCCESS
:
3290 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
3294 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
3301 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
3302 struct ieee80211_key_conf
*keyconf
,
3303 u8 sta_id
, bool remove_key
)
3305 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
3307 /* verify the key details match the required command's expectations */
3308 if (WARN_ON((keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
3309 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5) ||
3310 (keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
&&
3311 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_128
&&
3312 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_256
)))
3315 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm
) &&
3316 keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
))
3319 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
3320 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
3323 /* This is a valid situation for IGTK */
3324 if (sta_id
== IWL_MVM_INVALID_STA
)
3327 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
3329 struct ieee80211_key_seq seq
;
3332 switch (keyconf
->cipher
) {
3333 case WLAN_CIPHER_SUITE_AES_CMAC
:
3334 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
3336 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
3337 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
3338 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_GCMP
);
3344 memcpy(igtk_cmd
.igtk
, keyconf
->key
, keyconf
->keylen
);
3345 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
3346 igtk_cmd
.ctrl_flags
|=
3347 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES
);
3348 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3349 pn
= seq
.aes_cmac
.pn
;
3350 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
3351 ((u64
) pn
[4] << 8) |
3352 ((u64
) pn
[3] << 16) |
3353 ((u64
) pn
[2] << 24) |
3354 ((u64
) pn
[1] << 32) |
3355 ((u64
) pn
[0] << 40));
3358 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
3359 remove_key
? "removing" : "installing",
3362 if (!iwl_mvm_has_new_rx_api(mvm
)) {
3363 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1
= {
3364 .ctrl_flags
= igtk_cmd
.ctrl_flags
,
3365 .key_id
= igtk_cmd
.key_id
,
3366 .sta_id
= igtk_cmd
.sta_id
,
3367 .receive_seq_cnt
= igtk_cmd
.receive_seq_cnt
3370 memcpy(igtk_cmd_v1
.igtk
, igtk_cmd
.igtk
,
3371 ARRAY_SIZE(igtk_cmd_v1
.igtk
));
3372 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3373 sizeof(igtk_cmd_v1
), &igtk_cmd_v1
);
3375 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3376 sizeof(igtk_cmd
), &igtk_cmd
);
3380 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
3381 struct ieee80211_vif
*vif
,
3382 struct ieee80211_sta
*sta
)
3384 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3389 if (vif
->type
== NL80211_IFTYPE_STATION
&&
3390 mvmvif
->ap_sta_id
!= IWL_MVM_INVALID_STA
) {
3391 u8 sta_id
= mvmvif
->ap_sta_id
;
3392 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
3393 lockdep_is_held(&mvm
->mutex
));
3401 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3402 struct ieee80211_vif
*vif
,
3403 struct ieee80211_sta
*sta
,
3404 struct ieee80211_key_conf
*keyconf
,
3410 struct ieee80211_key_seq seq
;
3416 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3418 sta_id
= mvm_sta
->sta_id
;
3420 } else if (vif
->type
== NL80211_IFTYPE_AP
&&
3421 !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)) {
3422 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3424 sta_id
= mvmvif
->mcast_sta
.sta_id
;
3426 IWL_ERR(mvm
, "Failed to find station id\n");
3430 switch (keyconf
->cipher
) {
3431 case WLAN_CIPHER_SUITE_TKIP
:
3432 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
3433 /* get phase 1 key from mac80211 */
3434 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3435 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
3436 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3437 seq
.tkip
.iv32
, p1k
, 0, key_offset
,
3440 case WLAN_CIPHER_SUITE_CCMP
:
3441 case WLAN_CIPHER_SUITE_WEP40
:
3442 case WLAN_CIPHER_SUITE_WEP104
:
3443 case WLAN_CIPHER_SUITE_GCMP
:
3444 case WLAN_CIPHER_SUITE_GCMP_256
:
3445 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3446 0, NULL
, 0, key_offset
, mfp
);
3449 ret
= iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3450 0, NULL
, 0, key_offset
, mfp
);
3456 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3457 struct ieee80211_vif
*vif
,
3458 struct ieee80211_sta
*sta
,
3459 struct ieee80211_key_conf
*keyconf
,
3462 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3463 struct iwl_mvm_sta
*mvm_sta
;
3464 u8 sta_id
= IWL_MVM_INVALID_STA
;
3466 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
3468 lockdep_assert_held(&mvm
->mutex
);
3470 if (vif
->type
!= NL80211_IFTYPE_AP
||
3471 keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) {
3472 /* Get the station id from the mvm local station table */
3473 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3475 IWL_ERR(mvm
, "Failed to find station\n");
3478 sta_id
= mvm_sta
->sta_id
;
3481 * It is possible that the 'sta' parameter is NULL, and thus
3482 * there is a need to retrieve the sta from the local station
3486 sta
= rcu_dereference_protected(
3487 mvm
->fw_id_to_mac_id
[sta_id
],
3488 lockdep_is_held(&mvm
->mutex
));
3489 if (IS_ERR_OR_NULL(sta
)) {
3490 IWL_ERR(mvm
, "Invalid station id\n");
3495 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
3498 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3500 sta_id
= mvmvif
->mcast_sta
.sta_id
;
3503 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3504 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3505 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
) {
3506 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
3510 /* If the key_offset is not pre-assigned, we need to find a
3511 * new offset to use. In normal cases, the offset is not
3512 * pre-assigned, but during HW_RESTART we want to reuse the
3513 * same indices, so we pass them when this function is called.
3515 * In D3 entry, we need to hardcoded the indices (because the
3516 * firmware hardcodes the PTK offset to 0). In this case, we
3517 * need to make sure we don't overwrite the hw_key_idx in the
3518 * keyconf structure, because otherwise we cannot configure
3519 * the original ones back when resuming.
3521 if (key_offset
== STA_KEY_IDX_INVALID
) {
3522 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
3523 if (key_offset
== STA_KEY_IDX_INVALID
)
3525 keyconf
->hw_key_idx
= key_offset
;
3528 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
3533 * For WEP, the same key is used for multicast and unicast. Upload it
3534 * again, using the same key offset, and now pointing the other one
3535 * to the same key slot (offset).
3536 * If this fails, remove the original as well.
3538 if ((keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3539 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
3541 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
3542 key_offset
, !mcast
);
3544 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3549 __set_bit(key_offset
, mvm
->fw_key_table
);
3552 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3553 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
3554 sta
? sta
->addr
: zero_addr
, ret
);
3558 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
3559 struct ieee80211_vif
*vif
,
3560 struct ieee80211_sta
*sta
,
3561 struct ieee80211_key_conf
*keyconf
)
3563 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3564 struct iwl_mvm_sta
*mvm_sta
;
3565 u8 sta_id
= IWL_MVM_INVALID_STA
;
3568 lockdep_assert_held(&mvm
->mutex
);
3570 /* Get the station from the mvm local station table */
3571 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3573 sta_id
= mvm_sta
->sta_id
;
3574 else if (!sta
&& vif
->type
== NL80211_IFTYPE_AP
&& mcast
)
3575 sta_id
= iwl_mvm_vif_from_mac80211(vif
)->mcast_sta
.sta_id
;
3578 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
3579 keyconf
->keyidx
, sta_id
);
3581 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3582 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3583 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
3584 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
3586 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
3587 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
3588 keyconf
->hw_key_idx
);
3592 /* track which key was deleted last */
3593 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
3594 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
3595 mvm
->fw_key_deleted
[i
]++;
3597 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
3599 if (sta
&& !mvm_sta
) {
3600 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
3604 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3608 /* delete WEP key twice to get rid of (now useless) offset */
3609 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3610 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
3611 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
3616 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
3617 struct ieee80211_vif
*vif
,
3618 struct ieee80211_key_conf
*keyconf
,
3619 struct ieee80211_sta
*sta
, u32 iv32
,
3622 struct iwl_mvm_sta
*mvm_sta
;
3623 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3624 bool mfp
= sta
? sta
->mfp
: false;
3628 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3629 if (WARN_ON_ONCE(!mvm_sta
))
3631 iwl_mvm_send_sta_key(mvm
, mvm_sta
->sta_id
, keyconf
, mcast
,
3632 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
,
3639 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
3640 struct ieee80211_sta
*sta
)
3642 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3643 struct iwl_mvm_add_sta_cmd cmd
= {
3644 .add_modify
= STA_MODE_MODIFY
,
3645 .sta_id
= mvmsta
->sta_id
,
3646 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
3647 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3651 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3652 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3654 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3657 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
3658 struct ieee80211_sta
*sta
,
3659 enum ieee80211_frame_release_type reason
,
3660 u16 cnt
, u16 tids
, bool more_data
,
3661 bool single_sta_queue
)
3663 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3664 struct iwl_mvm_add_sta_cmd cmd
= {
3665 .add_modify
= STA_MODE_MODIFY
,
3666 .sta_id
= mvmsta
->sta_id
,
3667 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
3668 .sleep_tx_count
= cpu_to_le16(cnt
),
3669 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3672 unsigned long _tids
= tids
;
3674 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3675 * Note that this field is reserved and unused by firmware not
3676 * supporting GO uAPSD, so it's safe to always do this.
3678 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
3679 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
3681 /* If we're releasing frames from aggregation or dqa queues then check
3682 * if all the queues that we're releasing frames from, combined, have:
3683 * - more frames than the service period, in which case more_data
3685 * - fewer than 'cnt' frames, in which case we need to adjust the
3686 * firmware command (but do that unconditionally)
3688 if (single_sta_queue
) {
3689 int remaining
= cnt
;
3692 spin_lock_bh(&mvmsta
->lock
);
3693 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
3694 struct iwl_mvm_tid_data
*tid_data
;
3697 tid_data
= &mvmsta
->tid_data
[tid
];
3699 n_queued
= iwl_mvm_tid_queued(mvm
, tid_data
);
3700 if (n_queued
> remaining
) {
3705 remaining
-= n_queued
;
3707 sleep_tx_count
= cnt
- remaining
;
3708 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
3709 mvmsta
->sleep_tx_count
= sleep_tx_count
;
3710 spin_unlock_bh(&mvmsta
->lock
);
3712 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
3713 if (WARN_ON(cnt
- remaining
== 0)) {
3714 ieee80211_sta_eosp(sta
);
3719 /* Note: this is ignored by firmware not supporting GO uAPSD */
3721 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_MOREDATA
;
3723 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
3724 mvmsta
->next_status_eosp
= true;
3725 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_PS_POLL
;
3727 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_UAPSD
;
3730 /* block the Tx queues until the FW updated the sleep Tx count */
3731 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
3733 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
3734 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
3735 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3737 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3740 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
3741 struct iwl_rx_cmd_buffer
*rxb
)
3743 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
3744 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
3745 struct ieee80211_sta
*sta
;
3746 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
3748 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
3752 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
3753 if (!IS_ERR_OR_NULL(sta
))
3754 ieee80211_sta_eosp(sta
);
3758 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3759 struct iwl_mvm_sta
*mvmsta
, bool disable
)
3761 struct iwl_mvm_add_sta_cmd cmd
= {
3762 .add_modify
= STA_MODE_MODIFY
,
3763 .sta_id
= mvmsta
->sta_id
,
3764 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3765 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3766 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
3770 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
3771 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3773 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3776 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
3777 struct ieee80211_sta
*sta
,
3780 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3782 spin_lock_bh(&mvm_sta
->lock
);
3784 if (mvm_sta
->disable_tx
== disable
) {
3785 spin_unlock_bh(&mvm_sta
->lock
);
3789 mvm_sta
->disable_tx
= disable
;
3791 /* Tell mac80211 to start/stop queuing tx for this station */
3792 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
3794 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
3796 spin_unlock_bh(&mvm_sta
->lock
);
3799 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
3800 struct iwl_mvm_vif
*mvmvif
,
3801 struct iwl_mvm_int_sta
*sta
,
3804 u32 id
= FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
);
3805 struct iwl_mvm_add_sta_cmd cmd
= {
3806 .add_modify
= STA_MODE_MODIFY
,
3807 .sta_id
= sta
->sta_id
,
3808 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
3809 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
3810 .mac_id_n_color
= cpu_to_le32(id
),
3814 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, 0,
3815 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
3817 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
3820 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
3821 struct iwl_mvm_vif
*mvmvif
,
3824 struct ieee80211_sta
*sta
;
3825 struct iwl_mvm_sta
*mvm_sta
;
3828 lockdep_assert_held(&mvm
->mutex
);
3830 /* Block/unblock all the stations of the given mvmvif */
3831 for (i
= 0; i
< ARRAY_SIZE(mvm
->fw_id_to_mac_id
); i
++) {
3832 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
3833 lockdep_is_held(&mvm
->mutex
));
3834 if (IS_ERR_OR_NULL(sta
))
3837 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3838 if (mvm_sta
->mac_id_n_color
!=
3839 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
3842 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
3845 if (!fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
3848 /* Need to block/unblock also multicast station */
3849 if (mvmvif
->mcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3850 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3851 &mvmvif
->mcast_sta
, disable
);
3854 * Only unblock the broadcast station (FW blocks it for immediate
3855 * quiet, not the driver)
3857 if (!disable
&& mvmvif
->bcast_sta
.sta_id
!= IWL_MVM_INVALID_STA
)
3858 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
3859 &mvmvif
->bcast_sta
, disable
);
3862 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
3864 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3865 struct iwl_mvm_sta
*mvmsta
;
3869 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
3871 if (!WARN_ON(!mvmsta
))
3872 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);
3877 u16
iwl_mvm_tid_queued(struct iwl_mvm
*mvm
, struct iwl_mvm_tid_data
*tid_data
)
3879 u16 sn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
3882 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3883 * to align the wrap around of ssn so we compare relevant values.
3885 if (mvm
->trans
->trans_cfg
->gen2
)
3888 return ieee80211_sn_sub(sn
, tid_data
->next_reclaimed
);