1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <net/mac80211.h>
14 * New version of ADD_STA_sta command added new fields at the end of the
15 * structure, so sending the size of the relevant API's structure is enough to
16 * support both API versions.
18 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
20 if (iwl_mvm_has_new_rx_api(mvm
) ||
21 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
22 return sizeof(struct iwl_mvm_add_sta_cmd
);
24 return sizeof(struct iwl_mvm_add_sta_cmd_v7
);
27 int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
, enum nl80211_iftype iftype
)
32 BUILD_BUG_ON(IWL_STATION_COUNT_MAX
> 32);
33 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
35 lockdep_assert_held(&mvm
->mutex
);
37 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
38 if (iftype
!= NL80211_IFTYPE_STATION
)
39 reserved_ids
= BIT(0);
41 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
42 for (sta_id
= 0; sta_id
< mvm
->fw
->ucode_capa
.num_stations
; sta_id
++) {
43 if (BIT(sta_id
) & reserved_ids
)
46 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
47 lockdep_is_held(&mvm
->mutex
)))
50 return IWL_INVALID_STA
;
53 /* Calculate the ampdu density and max size */
54 u32
iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta
*link_sta
,
55 struct ieee80211_bss_conf
*link_conf
,
58 u32 agg_size
= 0, mpdu_dens
= 0;
60 if (WARN_ON(!link_sta
))
63 /* Note that we always use only legacy & highest supported PPDUs, so
64 * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
65 * the maximum A-MPDU size of various PPDU types in different bands,
66 * we only need to worry about the highest supported PPDU type here.
69 if (link_sta
->ht_cap
.ht_supported
) {
70 agg_size
= link_sta
->ht_cap
.ampdu_factor
;
71 mpdu_dens
= link_sta
->ht_cap
.ampdu_density
;
74 if (link_conf
->chanreq
.oper
.chan
->band
== NL80211_BAND_6GHZ
) {
75 /* overwrite HT values on 6 GHz */
76 mpdu_dens
= le16_get_bits(link_sta
->he_6ghz_capa
.capa
,
77 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START
);
78 agg_size
= le16_get_bits(link_sta
->he_6ghz_capa
.capa
,
79 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP
);
80 } else if (link_sta
->vht_cap
.vht_supported
) {
81 /* if VHT supported overwrite HT value */
82 agg_size
= u32_get_bits(link_sta
->vht_cap
.cap
,
83 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
);
86 /* D6.0 10.12.2 A-MPDU length limit rules
87 * A STA indicates the maximum length of the A-MPDU preEOF padding
88 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
89 * Exponent field in its HT Capabilities, VHT Capabilities,
90 * and HE 6 GHz Band Capabilities elements (if present) and the
91 * Maximum AMPDU Length Exponent Extension field in its HE
92 * Capabilities element
94 if (link_sta
->he_cap
.has_he
)
96 u8_get_bits(link_sta
->he_cap
.he_cap_elem
.mac_cap_info
[3],
97 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK
);
99 if (link_sta
->eht_cap
.has_eht
)
100 agg_size
+= u8_get_bits(link_sta
->eht_cap
.eht_cap_elem
.mac_cap_info
[1],
101 IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK
);
103 /* Limit to max A-MPDU supported by FW */
104 agg_size
= min_t(u32
, agg_size
,
105 STA_FLG_MAX_AGG_SIZE_4M
>> STA_FLG_MAX_AGG_SIZE_SHIFT
);
107 *_agg_size
= agg_size
;
111 u8
iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta
*sta
)
115 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BK
)
116 uapsd_acs
|= BIT(AC_BK
);
117 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_BE
)
118 uapsd_acs
|= BIT(AC_BE
);
119 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VI
)
120 uapsd_acs
|= BIT(AC_VI
);
121 if (sta
->uapsd_queues
& IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
)
122 uapsd_acs
|= BIT(AC_VO
);
124 return uapsd_acs
| uapsd_acs
<< 4;
127 /* send station add/update command to firmware */
128 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
129 bool update
, unsigned int flags
)
131 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
132 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
133 .sta_id
= mvm_sta
->deflink
.sta_id
,
134 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
135 .add_modify
= update
? 1 : 0,
136 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
137 STA_FLG_MIMO_EN_MSK
|
138 STA_FLG_RTS_MIMO_PROT
),
139 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
143 u32 agg_size
= 0, mpdu_dens
= 0;
145 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
146 add_sta_cmd
.station_type
= mvm_sta
->sta_type
;
148 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
149 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
151 if (!iwl_mvm_has_new_tx_api(mvm
)) {
152 add_sta_cmd
.tfd_queue_msk
=
153 cpu_to_le32(mvm_sta
->tfd_queue_msk
);
155 if (flags
& STA_MODIFY_QUEUES
)
156 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
158 WARN_ON(flags
& STA_MODIFY_QUEUES
);
162 switch (sta
->deflink
.bandwidth
) {
163 case IEEE80211_STA_RX_BW_320
:
164 case IEEE80211_STA_RX_BW_160
:
165 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
167 case IEEE80211_STA_RX_BW_80
:
168 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
170 case IEEE80211_STA_RX_BW_40
:
171 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
173 case IEEE80211_STA_RX_BW_20
:
174 if (sta
->deflink
.ht_cap
.ht_supported
)
175 add_sta_cmd
.station_flags
|=
176 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
180 switch (sta
->deflink
.rx_nss
) {
182 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
185 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
188 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
192 switch (sta
->deflink
.smps_mode
) {
193 case IEEE80211_SMPS_AUTOMATIC
:
194 case IEEE80211_SMPS_NUM_MODES
:
197 case IEEE80211_SMPS_STATIC
:
199 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
200 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
202 case IEEE80211_SMPS_DYNAMIC
:
203 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
205 case IEEE80211_SMPS_OFF
:
210 if (sta
->deflink
.ht_cap
.ht_supported
||
211 mvm_sta
->vif
->bss_conf
.chanreq
.oper
.chan
->band
== NL80211_BAND_6GHZ
)
212 add_sta_cmd
.station_flags_msk
|=
213 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
214 STA_FLG_AGG_MPDU_DENS_MSK
);
216 mpdu_dens
= iwl_mvm_get_sta_ampdu_dens(&sta
->deflink
,
217 &mvm_sta
->vif
->bss_conf
,
219 add_sta_cmd
.station_flags
|=
220 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
221 add_sta_cmd
.station_flags
|=
222 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
224 if (mvm_sta
->sta_state
>= IEEE80211_STA_ASSOC
)
225 add_sta_cmd
.assoc_id
= cpu_to_le16(sta
->aid
);
228 add_sta_cmd
.modify_mask
|= STA_MODIFY_UAPSD_ACS
;
229 add_sta_cmd
.uapsd_acs
= iwl_mvm_get_sta_uapsd_acs(sta
);
230 add_sta_cmd
.sp_length
= sta
->max_sp
? sta
->max_sp
* 2 : 128;
233 status
= ADD_STA_SUCCESS
;
234 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
235 iwl_mvm_add_sta_cmd_size(mvm
),
236 &add_sta_cmd
, &status
);
240 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
241 case ADD_STA_SUCCESS
:
242 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
246 IWL_ERR(mvm
, "ADD_STA failed\n");
253 static void iwl_mvm_rx_agg_session_expired(struct timer_list
*t
)
255 struct iwl_mvm_baid_data
*data
=
256 from_timer(data
, t
, session_timer
);
257 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= data
->rcu_ptr
;
258 struct iwl_mvm_baid_data
*ba_data
;
259 struct ieee80211_sta
*sta
;
260 struct iwl_mvm_sta
*mvm_sta
;
261 unsigned long timeout
;
266 ba_data
= rcu_dereference(*rcu_ptr
);
268 if (WARN_ON(!ba_data
))
271 if (!ba_data
->timeout
)
274 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
275 if (time_is_after_jiffies(timeout
)) {
276 mod_timer(&ba_data
->session_timer
, timeout
);
281 sta_id
= ffs(ba_data
->sta_mask
) - 1; /* don't care which one */
282 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[sta_id
]);
285 * sta should be valid unless the following happens:
286 * The firmware asserts which triggers a reconfig flow, but
287 * the reconfig fails before we set the pointer to sta into
288 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
289 * A-MDPU and hence the timer continues to run. Then, the
290 * timer expires and sta is NULL.
292 if (IS_ERR_OR_NULL(sta
))
295 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
296 ieee80211_rx_ba_timer_expired(mvm_sta
->vif
,
297 sta
->addr
, ba_data
->tid
);
302 /* Disable aggregations for a bitmap of TIDs for a given station */
303 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm
*mvm
, int queue
,
304 unsigned long disable_agg_tids
,
307 struct iwl_mvm_add_sta_cmd cmd
= {};
308 struct ieee80211_sta
*sta
;
309 struct iwl_mvm_sta
*mvmsta
;
313 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
316 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
320 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
322 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
327 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
329 mvmsta
->tid_disable_agg
|= disable_agg_tids
;
331 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
332 cmd
.sta_id
= mvmsta
->deflink
.sta_id
;
333 cmd
.add_modify
= STA_MODE_MODIFY
;
334 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
335 if (disable_agg_tids
)
336 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
338 cmd
.modify_mask
|= STA_MODIFY_QUEUE_REMOVAL
;
339 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
340 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
344 /* Notify FW of queue removal from the STA queues */
345 status
= ADD_STA_SUCCESS
;
346 return iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
347 iwl_mvm_add_sta_cmd_size(mvm
),
351 static int iwl_mvm_disable_txq(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
352 int sta_id
, u16
*queueptr
, u8 tid
)
354 int queue
= *queueptr
;
355 struct iwl_scd_txq_cfg_cmd cmd
= {
357 .action
= SCD_CFG_DISABLE_QUEUE
,
361 lockdep_assert_held(&mvm
->mutex
);
363 if (iwl_mvm_has_new_tx_api(mvm
)) {
364 if (mvm
->sta_remove_requires_queue_remove
) {
365 u32 cmd_id
= WIDE_ID(DATA_PATH_GROUP
,
366 SCD_QUEUE_CONFIG_CMD
);
367 struct iwl_scd_queue_cfg_cmd remove_cmd
= {
368 .operation
= cpu_to_le32(IWL_SCD_QUEUE_REMOVE
),
369 .u
.remove
.sta_mask
= cpu_to_le32(BIT(sta_id
)),
372 if (tid
== IWL_MAX_TID_COUNT
)
375 remove_cmd
.u
.remove
.tid
= cpu_to_le32(tid
);
377 ret
= iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0,
384 iwl_trans_txq_free(mvm
->trans
, queue
);
385 *queueptr
= IWL_MVM_INVALID_QUEUE
;
390 if (WARN_ON(mvm
->queue_info
[queue
].tid_bitmap
== 0))
393 mvm
->queue_info
[queue
].tid_bitmap
&= ~BIT(tid
);
395 cmd
.action
= mvm
->queue_info
[queue
].tid_bitmap
?
396 SCD_CFG_ENABLE_QUEUE
: SCD_CFG_DISABLE_QUEUE
;
397 if (cmd
.action
== SCD_CFG_DISABLE_QUEUE
)
398 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_FREE
;
400 IWL_DEBUG_TX_QUEUES(mvm
,
401 "Disabling TXQ #%d tids=0x%x\n",
403 mvm
->queue_info
[queue
].tid_bitmap
);
405 /* If the queue is still enabled - nothing left to do in this func */
406 if (cmd
.action
== SCD_CFG_ENABLE_QUEUE
)
409 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
410 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
412 /* Make sure queue info is correct even though we overwrite it */
413 WARN(mvm
->queue_info
[queue
].tid_bitmap
,
414 "TXQ #%d info out-of-sync - tids=0x%x\n",
415 queue
, mvm
->queue_info
[queue
].tid_bitmap
);
417 /* If we are here - the queue is freed and we can zero out these vals */
418 mvm
->queue_info
[queue
].tid_bitmap
= 0;
421 struct iwl_mvm_txq
*mvmtxq
=
422 iwl_mvm_txq_from_tid(sta
, tid
);
424 spin_lock_bh(&mvm
->add_stream_lock
);
425 list_del_init(&mvmtxq
->list
);
426 clear_bit(IWL_MVM_TXQ_STATE_READY
, &mvmtxq
->state
);
427 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
428 spin_unlock_bh(&mvm
->add_stream_lock
);
431 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
432 mvm
->queue_info
[queue
].reserved
= false;
434 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
435 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0,
436 sizeof(struct iwl_scd_txq_cfg_cmd
), &cmd
);
439 IWL_ERR(mvm
, "Failed to disable queue %d (ret=%d)\n",
444 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm
*mvm
, int queue
)
446 struct ieee80211_sta
*sta
;
447 struct iwl_mvm_sta
*mvmsta
;
448 unsigned long tid_bitmap
;
449 unsigned long agg_tids
= 0;
453 lockdep_assert_held(&mvm
->mutex
);
455 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
458 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
459 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
461 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
462 lockdep_is_held(&mvm
->mutex
));
464 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
467 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
469 spin_lock_bh(&mvmsta
->lock
);
470 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
471 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
472 agg_tids
|= BIT(tid
);
474 spin_unlock_bh(&mvmsta
->lock
);
480 * Remove a queue from a station's resources.
481 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
482 * doesn't disable the queue
484 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm
*mvm
, int queue
)
486 struct ieee80211_sta
*sta
;
487 struct iwl_mvm_sta
*mvmsta
;
488 unsigned long tid_bitmap
;
489 unsigned long disable_agg_tids
= 0;
493 lockdep_assert_held(&mvm
->mutex
);
495 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
498 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
499 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
503 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
505 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
))) {
510 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
512 spin_lock_bh(&mvmsta
->lock
);
513 /* Unmap MAC queues and TIDs from this queue */
514 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
515 struct iwl_mvm_txq
*mvmtxq
=
516 iwl_mvm_txq_from_tid(sta
, tid
);
518 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
)
519 disable_agg_tids
|= BIT(tid
);
520 mvmsta
->tid_data
[tid
].txq_id
= IWL_MVM_INVALID_QUEUE
;
522 spin_lock_bh(&mvm
->add_stream_lock
);
523 list_del_init(&mvmtxq
->list
);
524 clear_bit(IWL_MVM_TXQ_STATE_READY
, &mvmtxq
->state
);
525 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
526 spin_unlock_bh(&mvm
->add_stream_lock
);
529 mvmsta
->tfd_queue_msk
&= ~BIT(queue
); /* Don't use this queue anymore */
530 spin_unlock_bh(&mvmsta
->lock
);
535 * The TX path may have been using this TXQ_ID from the tid_data,
536 * so make sure it's no longer running so that we can safely reuse
537 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
538 * above, but nothing guarantees we've stopped using them. Thus,
539 * without this, we could get to iwl_mvm_disable_txq() and remove
540 * the queue while still sending frames to it.
544 return disable_agg_tids
;
547 static int iwl_mvm_free_inactive_queue(struct iwl_mvm
*mvm
, int queue
,
548 struct ieee80211_sta
*old_sta
,
551 struct iwl_mvm_sta
*mvmsta
;
553 unsigned long disable_agg_tids
= 0;
555 u16 queue_tmp
= queue
;
558 lockdep_assert_held(&mvm
->mutex
);
560 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
563 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
564 tid
= mvm
->queue_info
[queue
].txq_tid
;
566 same_sta
= sta_id
== new_sta_id
;
568 mvmsta
= iwl_mvm_sta_from_staid_protected(mvm
, sta_id
);
569 if (WARN_ON(!mvmsta
))
572 disable_agg_tids
= iwl_mvm_remove_sta_queue_marking(mvm
, queue
);
573 /* Disable the queue */
574 if (disable_agg_tids
)
575 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
576 disable_agg_tids
, false);
578 ret
= iwl_mvm_disable_txq(mvm
, old_sta
, sta_id
, &queue_tmp
, tid
);
581 "Failed to free inactive queue %d (ret=%d)\n",
587 /* If TXQ is allocated to another STA, update removal in FW */
589 iwl_mvm_invalidate_sta_queue(mvm
, queue
, 0, true);
594 static int iwl_mvm_get_shared_queue(struct iwl_mvm
*mvm
,
595 unsigned long tfd_queue_mask
, u8 ac
)
598 u8 ac_to_queue
[IEEE80211_NUM_ACS
];
602 * This protects us against grabbing a queue that's being reconfigured
603 * by the inactivity checker.
605 lockdep_assert_held(&mvm
->mutex
);
607 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
610 memset(&ac_to_queue
, IEEE80211_INVAL_HW_QUEUE
, sizeof(ac_to_queue
));
612 /* See what ACs the existing queues for this STA have */
613 for_each_set_bit(i
, &tfd_queue_mask
, IWL_MVM_DQA_MAX_DATA_QUEUE
) {
614 /* Only DATA queues can be shared */
615 if (i
< IWL_MVM_DQA_MIN_DATA_QUEUE
&&
616 i
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)
619 ac_to_queue
[mvm
->queue_info
[i
].mac80211_ac
] = i
;
623 * The queue to share is chosen only from DATA queues as follows (in
624 * descending priority):
627 * 3. Highest AC queue that is lower than new AC
628 * 4. Any existing AC (there always is at least 1 DATA queue)
631 /* Priority 1: An AC_BE queue */
632 if (ac_to_queue
[IEEE80211_AC_BE
] != IEEE80211_INVAL_HW_QUEUE
)
633 queue
= ac_to_queue
[IEEE80211_AC_BE
];
634 /* Priority 2: Same AC queue */
635 else if (ac_to_queue
[ac
] != IEEE80211_INVAL_HW_QUEUE
)
636 queue
= ac_to_queue
[ac
];
637 /* Priority 3a: If new AC is VO and VI exists - use VI */
638 else if (ac
== IEEE80211_AC_VO
&&
639 ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
640 queue
= ac_to_queue
[IEEE80211_AC_VI
];
641 /* Priority 3b: No BE so only AC less than the new one is BK */
642 else if (ac_to_queue
[IEEE80211_AC_BK
] != IEEE80211_INVAL_HW_QUEUE
)
643 queue
= ac_to_queue
[IEEE80211_AC_BK
];
644 /* Priority 4a: No BE nor BK - use VI if exists */
645 else if (ac_to_queue
[IEEE80211_AC_VI
] != IEEE80211_INVAL_HW_QUEUE
)
646 queue
= ac_to_queue
[IEEE80211_AC_VI
];
647 /* Priority 4b: No BE, BK nor VI - use VO if exists */
648 else if (ac_to_queue
[IEEE80211_AC_VO
] != IEEE80211_INVAL_HW_QUEUE
)
649 queue
= ac_to_queue
[IEEE80211_AC_VO
];
651 /* Make sure queue found (or not) is legal */
652 if (!iwl_mvm_is_dqa_data_queue(mvm
, queue
) &&
653 !iwl_mvm_is_dqa_mgmt_queue(mvm
, queue
) &&
654 (queue
!= IWL_MVM_DQA_BSS_CLIENT_QUEUE
)) {
655 IWL_ERR(mvm
, "No DATA queues available to share\n");
662 /* Re-configure the SCD for a queue that has already been configured */
663 static int iwl_mvm_reconfig_scd(struct iwl_mvm
*mvm
, int queue
, int fifo
,
664 int sta_id
, int tid
, int frame_limit
, u16 ssn
)
666 struct iwl_scd_txq_cfg_cmd cmd
= {
668 .action
= SCD_CFG_ENABLE_QUEUE
,
669 .window
= frame_limit
,
671 .ssn
= cpu_to_le16(ssn
),
673 .aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
674 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
),
679 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
682 if (WARN(mvm
->queue_info
[queue
].tid_bitmap
== 0,
683 "Trying to reconfig unallocated queue %d\n", queue
))
686 IWL_DEBUG_TX_QUEUES(mvm
, "Reconfig SCD for TXQ #%d\n", queue
);
688 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
689 WARN_ONCE(ret
, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
696 * If a given queue has a higher AC than the TID stream that is being compared
697 * to, the queue needs to be redirected to the lower AC. This function does that
698 * in such a case, otherwise - if no redirection required - it does nothing,
699 * unless the %force param is true.
701 static int iwl_mvm_redirect_queue(struct iwl_mvm
*mvm
, int queue
, int tid
,
702 int ac
, int ssn
, unsigned int wdg_timeout
,
703 bool force
, struct iwl_mvm_txq
*txq
)
705 struct iwl_scd_txq_cfg_cmd cmd
= {
707 .action
= SCD_CFG_DISABLE_QUEUE
,
712 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
716 * If the AC is lower than current one - FIFO needs to be redirected to
717 * the lowest one of the streams in the queue. Check if this is needed
719 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
720 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
721 * we need to check if the numerical value of X is LARGER than of Y.
723 if (ac
<= mvm
->queue_info
[queue
].mac80211_ac
&& !force
) {
724 IWL_DEBUG_TX_QUEUES(mvm
,
725 "No redirection needed on TXQ #%d\n",
730 cmd
.sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
731 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[mvm
->queue_info
[queue
].mac80211_ac
];
732 cmd
.tid
= mvm
->queue_info
[queue
].txq_tid
;
733 shared_queue
= hweight16(mvm
->queue_info
[queue
].tid_bitmap
) > 1;
735 IWL_DEBUG_TX_QUEUES(mvm
, "Redirecting TXQ #%d to FIFO #%d\n",
736 queue
, iwl_mvm_ac_to_tx_fifo
[ac
]);
738 /* Stop the queue and wait for it to empty */
739 set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT
, &txq
->state
);
741 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(queue
));
743 IWL_ERR(mvm
, "Error draining queue %d before reconfig\n",
749 /* Before redirecting the queue we need to de-activate it */
750 iwl_trans_txq_disable(mvm
->trans
, queue
, false);
751 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
753 IWL_ERR(mvm
, "Failed SCD disable TXQ %d (ret=%d)\n", queue
,
756 /* Make sure the SCD wrptr is correctly set before reconfiguring */
757 iwl_trans_txq_enable_cfg(mvm
->trans
, queue
, ssn
, NULL
, wdg_timeout
);
759 /* Update the TID "owner" of the queue */
760 mvm
->queue_info
[queue
].txq_tid
= tid
;
762 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
764 /* Redirect to lower AC */
765 iwl_mvm_reconfig_scd(mvm
, queue
, iwl_mvm_ac_to_tx_fifo
[ac
],
766 cmd
.sta_id
, tid
, IWL_FRAME_LIMIT
, ssn
);
768 /* Update AC marking of the queue */
769 mvm
->queue_info
[queue
].mac80211_ac
= ac
;
772 * Mark queue as shared in transport if shared
773 * Note this has to be done after queue enablement because enablement
774 * can also set this value, and there is no indication there to shared
778 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
781 /* Continue using the queue */
782 clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT
, &txq
->state
);
787 static int iwl_mvm_find_free_queue(struct iwl_mvm
*mvm
, u8 sta_id
,
792 lockdep_assert_held(&mvm
->mutex
);
794 if (WARN(maxq
>= mvm
->trans
->trans_cfg
->base_params
->num_of_queues
,
795 "max queue %d >= num_of_queues (%d)", maxq
,
796 mvm
->trans
->trans_cfg
->base_params
->num_of_queues
))
797 maxq
= mvm
->trans
->trans_cfg
->base_params
->num_of_queues
- 1;
799 /* This should not be hit with new TX path */
800 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
803 /* Start by looking for a free queue */
804 for (i
= minq
; i
<= maxq
; i
++)
805 if (mvm
->queue_info
[i
].tid_bitmap
== 0 &&
806 mvm
->queue_info
[i
].status
== IWL_MVM_QUEUE_FREE
)
812 static int iwl_mvm_get_queue_size(struct ieee80211_sta
*sta
)
814 int max_size
= IWL_DEFAULT_QUEUE_SIZE
;
815 unsigned int link_id
;
817 /* this queue isn't used for traffic (cab_queue) */
819 return IWL_MGMT_QUEUE_SIZE
;
823 for (link_id
= 0; link_id
< ARRAY_SIZE(sta
->link
); link_id
++) {
824 struct ieee80211_link_sta
*link
=
825 rcu_dereference(sta
->link
[link_id
]);
830 /* support for 512 ba size */
831 if (link
->eht_cap
.has_eht
&&
832 max_size
< IWL_DEFAULT_QUEUE_SIZE_EHT
)
833 max_size
= IWL_DEFAULT_QUEUE_SIZE_EHT
;
835 /* support for 256 ba size */
836 if (link
->he_cap
.has_he
&&
837 max_size
< IWL_DEFAULT_QUEUE_SIZE_HE
)
838 max_size
= IWL_DEFAULT_QUEUE_SIZE_HE
;
845 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm
*mvm
,
846 struct ieee80211_sta
*sta
,
847 u8 sta_id
, u8 tid
, unsigned int timeout
)
852 if (tid
== IWL_MAX_TID_COUNT
) {
854 size
= max_t(u32
, IWL_MGMT_QUEUE_SIZE
,
855 mvm
->trans
->cfg
->min_txq_size
);
857 size
= iwl_mvm_get_queue_size(sta
);
861 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
862 struct ieee80211_link_sta
*link_sta
;
863 unsigned int link_id
;
866 for_each_sta_active_link(mvmsta
->vif
, sta
, link_sta
, link_id
) {
867 struct iwl_mvm_link_sta
*link
=
868 rcu_dereference_protected(mvmsta
->link
[link_id
],
869 lockdep_is_held(&mvm
->mutex
));
874 sta_mask
|= BIT(link
->sta_id
);
878 sta_mask
|= BIT(sta_id
);
884 queue
= iwl_trans_txq_alloc(mvm
->trans
, 0, sta_mask
,
888 IWL_DEBUG_TX_QUEUES(mvm
,
889 "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
890 queue
, sta_mask
, tid
);
895 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm
*mvm
,
896 struct ieee80211_sta
*sta
, u8 ac
,
899 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
900 struct iwl_mvm_txq
*mvmtxq
=
901 iwl_mvm_txq_from_tid(sta
, tid
);
902 unsigned int wdg_timeout
=
903 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
);
906 lockdep_assert_held(&mvm
->mutex
);
908 IWL_DEBUG_TX_QUEUES(mvm
,
909 "Allocating queue for sta %d on tid %d\n",
910 mvmsta
->deflink
.sta_id
, tid
);
911 queue
= iwl_mvm_tvqm_enable_txq(mvm
, sta
, mvmsta
->deflink
.sta_id
,
916 mvmtxq
->txq_id
= queue
;
917 mvm
->tvqm_info
[queue
].txq_tid
= tid
;
918 mvm
->tvqm_info
[queue
].sta_id
= mvmsta
->deflink
.sta_id
;
920 IWL_DEBUG_TX_QUEUES(mvm
, "Allocated queue is %d\n", queue
);
922 spin_lock_bh(&mvmsta
->lock
);
923 mvmsta
->tid_data
[tid
].txq_id
= queue
;
924 spin_unlock_bh(&mvmsta
->lock
);
929 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm
*mvm
,
930 struct ieee80211_sta
*sta
,
931 int queue
, u8 sta_id
, u8 tid
)
933 bool enable_queue
= true;
935 /* Make sure this TID isn't already enabled */
936 if (mvm
->queue_info
[queue
].tid_bitmap
& BIT(tid
)) {
937 IWL_ERR(mvm
, "Trying to enable TXQ %d with existing TID %d\n",
942 /* Update mappings and refcounts */
943 if (mvm
->queue_info
[queue
].tid_bitmap
)
944 enable_queue
= false;
946 mvm
->queue_info
[queue
].tid_bitmap
|= BIT(tid
);
947 mvm
->queue_info
[queue
].ra_sta_id
= sta_id
;
950 if (tid
!= IWL_MAX_TID_COUNT
)
951 mvm
->queue_info
[queue
].mac80211_ac
=
952 tid_to_mac80211_ac
[tid
];
954 mvm
->queue_info
[queue
].mac80211_ac
= IEEE80211_AC_VO
;
956 mvm
->queue_info
[queue
].txq_tid
= tid
;
960 struct iwl_mvm_txq
*mvmtxq
=
961 iwl_mvm_txq_from_tid(sta
, tid
);
963 mvmtxq
->txq_id
= queue
;
966 IWL_DEBUG_TX_QUEUES(mvm
,
967 "Enabling TXQ #%d tids=0x%x\n",
968 queue
, mvm
->queue_info
[queue
].tid_bitmap
);
973 static bool iwl_mvm_enable_txq(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
975 const struct iwl_trans_txq_scd_cfg
*cfg
,
976 unsigned int wdg_timeout
)
978 struct iwl_scd_txq_cfg_cmd cmd
= {
980 .action
= SCD_CFG_ENABLE_QUEUE
,
981 .window
= cfg
->frame_limit
,
982 .sta_id
= cfg
->sta_id
,
983 .ssn
= cpu_to_le16(ssn
),
984 .tx_fifo
= cfg
->fifo
,
985 .aggregate
= cfg
->aggregate
,
990 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
993 /* Send the enabling command if we need to */
994 if (!iwl_mvm_update_txq_mapping(mvm
, sta
, queue
, cfg
->sta_id
, cfg
->tid
))
997 inc_ssn
= iwl_trans_txq_enable_cfg(mvm
->trans
, queue
, ssn
,
1000 le16_add_cpu(&cmd
.ssn
, 1);
1002 WARN(iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
),
1003 "Failed to configure queue %d on FIFO %d\n", queue
, cfg
->fifo
);
1008 static void iwl_mvm_change_queue_tid(struct iwl_mvm
*mvm
, int queue
)
1010 struct iwl_scd_txq_cfg_cmd cmd
= {
1012 .action
= SCD_CFG_UPDATE_QUEUE_TID
,
1015 unsigned long tid_bitmap
;
1018 lockdep_assert_held(&mvm
->mutex
);
1020 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1023 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
1025 if (WARN(!tid_bitmap
, "TXQ %d has no tids assigned to it\n", queue
))
1028 /* Find any TID for queue */
1029 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
1031 cmd
.tx_fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
1033 ret
= iwl_mvm_send_cmd_pdu(mvm
, SCD_QUEUE_CFG
, 0, sizeof(cmd
), &cmd
);
1035 IWL_ERR(mvm
, "Failed to update owner of TXQ %d (ret=%d)\n",
1040 mvm
->queue_info
[queue
].txq_tid
= tid
;
1041 IWL_DEBUG_TX_QUEUES(mvm
, "Changed TXQ %d ownership to tid %d\n",
1045 static void iwl_mvm_unshare_queue(struct iwl_mvm
*mvm
, int queue
)
1047 struct ieee80211_sta
*sta
;
1048 struct iwl_mvm_sta
*mvmsta
;
1051 unsigned long tid_bitmap
;
1052 unsigned int wdg_timeout
;
1056 /* queue sharing is disabled on new TX path */
1057 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1060 lockdep_assert_held(&mvm
->mutex
);
1062 sta_id
= mvm
->queue_info
[queue
].ra_sta_id
;
1063 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
1065 /* Find TID for queue, and make sure it is the only one on the queue */
1066 tid
= find_first_bit(&tid_bitmap
, IWL_MAX_TID_COUNT
+ 1);
1067 if (tid_bitmap
!= BIT(tid
)) {
1068 IWL_ERR(mvm
, "Failed to unshare q %d, active tids=0x%lx\n",
1073 IWL_DEBUG_TX_QUEUES(mvm
, "Unsharing TXQ %d, keeping tid %d\n", queue
,
1076 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1077 lockdep_is_held(&mvm
->mutex
));
1079 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
1082 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1083 wdg_timeout
= iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
);
1085 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
1087 ret
= iwl_mvm_redirect_queue(mvm
, queue
, tid
,
1088 tid_to_mac80211_ac
[tid
], ssn
,
1090 iwl_mvm_txq_from_tid(sta
, tid
));
1092 IWL_ERR(mvm
, "Failed to redirect TXQ %d\n", queue
);
1096 /* If aggs should be turned back on - do it */
1097 if (mvmsta
->tid_data
[tid
].state
== IWL_AGG_ON
) {
1098 struct iwl_mvm_add_sta_cmd cmd
= {0};
1100 mvmsta
->tid_disable_agg
&= ~BIT(tid
);
1102 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1103 cmd
.sta_id
= mvmsta
->deflink
.sta_id
;
1104 cmd
.add_modify
= STA_MODE_MODIFY
;
1105 cmd
.modify_mask
= STA_MODIFY_TID_DISABLE_TX
;
1106 cmd
.tfd_queue_msk
= cpu_to_le32(mvmsta
->tfd_queue_msk
);
1107 cmd
.tid_disable_tx
= cpu_to_le16(mvmsta
->tid_disable_agg
);
1109 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
1110 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
1112 IWL_DEBUG_TX_QUEUES(mvm
,
1113 "TXQ #%d is now aggregated again\n",
1116 /* Mark queue intenally as aggregating again */
1117 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, false);
1121 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1125 * Remove inactive TIDs of a given queue.
1126 * If all queue TIDs are inactive - mark the queue as inactive
1127 * If only some the queue TIDs are inactive - unmap them from the queue
1129 * Returns %true if all TIDs were removed and the queue could be reused.
1131 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm
*mvm
,
1132 struct iwl_mvm_sta
*mvmsta
, int queue
,
1133 unsigned long tid_bitmap
,
1134 unsigned long *unshare_queues
,
1135 unsigned long *changetid_queues
)
1139 lockdep_assert_held(&mvmsta
->lock
);
1140 lockdep_assert_held(&mvm
->mutex
);
1142 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1145 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1146 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
1147 /* If some TFDs are still queued - don't mark TID as inactive */
1148 if (iwl_mvm_tid_queued(mvm
, &mvmsta
->tid_data
[tid
]))
1149 tid_bitmap
&= ~BIT(tid
);
1151 /* Don't mark as inactive any TID that has an active BA */
1152 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
)
1153 tid_bitmap
&= ~BIT(tid
);
1156 /* If all TIDs in the queue are inactive - return it can be reused */
1157 if (tid_bitmap
== mvm
->queue_info
[queue
].tid_bitmap
) {
1158 IWL_DEBUG_TX_QUEUES(mvm
, "Queue %d is inactive\n", queue
);
1163 * If we are here, this is a shared queue and not all TIDs timed-out.
1164 * Remove the ones that did.
1166 for_each_set_bit(tid
, &tid_bitmap
, IWL_MAX_TID_COUNT
+ 1) {
1169 mvmsta
->tid_data
[tid
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1170 mvm
->queue_info
[queue
].tid_bitmap
&= ~BIT(tid
);
1172 q_tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
1175 * We need to take into account a situation in which a TXQ was
1176 * allocated to TID x, and then turned shared by adding TIDs y
1177 * and z. If TID x becomes inactive and is removed from the TXQ,
1178 * ownership must be given to one of the remaining TIDs.
1179 * This is mainly because if TID x continues - a new queue can't
1180 * be allocated for it as long as it is an owner of another TXQ.
1182 * Mark this queue in the right bitmap, we'll send the command
1183 * to the firmware later.
1185 if (!(q_tid_bitmap
& BIT(mvm
->queue_info
[queue
].txq_tid
)))
1186 set_bit(queue
, changetid_queues
);
1188 IWL_DEBUG_TX_QUEUES(mvm
,
1189 "Removing inactive TID %d from shared Q:%d\n",
1193 IWL_DEBUG_TX_QUEUES(mvm
,
1194 "TXQ #%d left with tid bitmap 0x%x\n", queue
,
1195 mvm
->queue_info
[queue
].tid_bitmap
);
1198 * There may be different TIDs with the same mac queues, so make
1199 * sure all TIDs have existing corresponding mac queues enabled
1201 tid_bitmap
= mvm
->queue_info
[queue
].tid_bitmap
;
1203 /* If the queue is marked as shared - "unshare" it */
1204 if (hweight16(mvm
->queue_info
[queue
].tid_bitmap
) == 1 &&
1205 mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_SHARED
) {
1206 IWL_DEBUG_TX_QUEUES(mvm
, "Marking Q:%d for reconfig\n",
1208 set_bit(queue
, unshare_queues
);
1215 * Check for inactivity - this includes checking if any queue
1216 * can be unshared and finding one (and only one) that can be
1218 * This function is also invoked as a sort of clean-up task,
1219 * in which case @alloc_for_sta is IWL_INVALID_STA.
1221 * Returns the queue number, or -ENOSPC.
1223 static int iwl_mvm_inactivity_check(struct iwl_mvm
*mvm
, u8 alloc_for_sta
)
1225 unsigned long now
= jiffies
;
1226 unsigned long unshare_queues
= 0;
1227 unsigned long changetid_queues
= 0;
1228 int i
, ret
, free_queue
= -ENOSPC
;
1229 struct ieee80211_sta
*queue_owner
= NULL
;
1231 lockdep_assert_held(&mvm
->mutex
);
1233 if (iwl_mvm_has_new_tx_api(mvm
))
1238 /* we skip the CMD queue below by starting at 1 */
1239 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE
!= 0);
1241 for (i
= 1; i
< IWL_MAX_HW_QUEUES
; i
++) {
1242 struct ieee80211_sta
*sta
;
1243 struct iwl_mvm_sta
*mvmsta
;
1246 unsigned long inactive_tid_bitmap
= 0;
1247 unsigned long queue_tid_bitmap
;
1249 queue_tid_bitmap
= mvm
->queue_info
[i
].tid_bitmap
;
1250 if (!queue_tid_bitmap
)
1253 /* If TXQ isn't in active use anyway - nothing to do here... */
1254 if (mvm
->queue_info
[i
].status
!= IWL_MVM_QUEUE_READY
&&
1255 mvm
->queue_info
[i
].status
!= IWL_MVM_QUEUE_SHARED
)
1258 /* Check to see if there are inactive TIDs on this queue */
1259 for_each_set_bit(tid
, &queue_tid_bitmap
,
1260 IWL_MAX_TID_COUNT
+ 1) {
1261 if (time_after(mvm
->queue_info
[i
].last_frame_time
[tid
] +
1262 IWL_MVM_DQA_QUEUE_TIMEOUT
, now
))
1265 inactive_tid_bitmap
|= BIT(tid
);
1268 /* If all TIDs are active - finish check on this queue */
1269 if (!inactive_tid_bitmap
)
1273 * If we are here - the queue hadn't been served recently and is
1277 sta_id
= mvm
->queue_info
[i
].ra_sta_id
;
1278 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
1281 * If the STA doesn't exist anymore, it isn't an error. It could
1282 * be that it was removed since getting the queues, and in this
1283 * case it should've inactivated its queues anyway.
1285 if (IS_ERR_OR_NULL(sta
))
1288 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1290 spin_lock_bh(&mvmsta
->lock
);
1291 ret
= iwl_mvm_remove_inactive_tids(mvm
, mvmsta
, i
,
1292 inactive_tid_bitmap
,
1295 if (ret
&& free_queue
< 0) {
1299 /* only unlock sta lock - we still need the queue info lock */
1300 spin_unlock_bh(&mvmsta
->lock
);
1304 /* Reconfigure queues requiring reconfiguation */
1305 for_each_set_bit(i
, &unshare_queues
, IWL_MAX_HW_QUEUES
)
1306 iwl_mvm_unshare_queue(mvm
, i
);
1307 for_each_set_bit(i
, &changetid_queues
, IWL_MAX_HW_QUEUES
)
1308 iwl_mvm_change_queue_tid(mvm
, i
);
1312 if (free_queue
>= 0 && alloc_for_sta
!= IWL_INVALID_STA
) {
1313 ret
= iwl_mvm_free_inactive_queue(mvm
, free_queue
, queue_owner
,
1322 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
1323 struct ieee80211_sta
*sta
, u8 ac
, int tid
)
1325 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1326 struct iwl_trans_txq_scd_cfg cfg
= {
1327 .fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
),
1328 .sta_id
= mvmsta
->deflink
.sta_id
,
1330 .frame_limit
= IWL_FRAME_LIMIT
,
1332 unsigned int wdg_timeout
=
1333 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
);
1336 unsigned long disable_agg_tids
= 0;
1337 enum iwl_mvm_agg_state queue_state
;
1338 bool shared_queue
= false, inc_ssn
;
1340 unsigned long tfd_queue_mask
;
1343 lockdep_assert_held(&mvm
->mutex
);
1345 if (iwl_mvm_has_new_tx_api(mvm
))
1346 return iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
1348 spin_lock_bh(&mvmsta
->lock
);
1349 tfd_queue_mask
= mvmsta
->tfd_queue_msk
;
1350 ssn
= IEEE80211_SEQ_TO_SN(mvmsta
->tid_data
[tid
].seq_number
);
1351 spin_unlock_bh(&mvmsta
->lock
);
1353 if (tid
== IWL_MAX_TID_COUNT
) {
1354 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->deflink
.sta_id
,
1355 IWL_MVM_DQA_MIN_MGMT_QUEUE
,
1356 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
1357 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
1358 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
1361 /* If no such queue is found, we'll use a DATA queue instead */
1364 if ((queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) &&
1365 (mvm
->queue_info
[mvmsta
->reserved_queue
].status
==
1366 IWL_MVM_QUEUE_RESERVED
)) {
1367 queue
= mvmsta
->reserved_queue
;
1368 mvm
->queue_info
[queue
].reserved
= true;
1369 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
1373 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->deflink
.sta_id
,
1374 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1375 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1377 /* try harder - perhaps kill an inactive queue */
1378 queue
= iwl_mvm_inactivity_check(mvm
, mvmsta
->deflink
.sta_id
);
1381 /* No free queue - we'll have to share */
1383 queue
= iwl_mvm_get_shared_queue(mvm
, tfd_queue_mask
, ac
);
1385 shared_queue
= true;
1386 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_SHARED
;
1391 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1392 * to make sure no one else takes it.
1393 * This will allow avoiding re-acquiring the lock at the end of the
1394 * configuration. On error we'll mark it back as free.
1396 if (queue
> 0 && !shared_queue
)
1397 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1399 /* This shouldn't happen - out of queues */
1400 if (WARN_ON(queue
<= 0)) {
1401 IWL_ERR(mvm
, "No available queues for tid %d on sta_id %d\n",
1407 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1408 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1410 * Mark all DATA queues as allowing to be aggregated at some point
1412 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1413 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1415 IWL_DEBUG_TX_QUEUES(mvm
,
1416 "Allocating %squeue #%d to sta %d on tid %d\n",
1417 shared_queue
? "shared " : "", queue
,
1418 mvmsta
->deflink
.sta_id
, tid
);
1421 /* Disable any open aggs on this queue */
1422 disable_agg_tids
= iwl_mvm_get_queue_agg_tids(mvm
, queue
);
1424 if (disable_agg_tids
) {
1425 IWL_DEBUG_TX_QUEUES(mvm
, "Disabling aggs on queue %d\n",
1427 iwl_mvm_invalidate_sta_queue(mvm
, queue
,
1428 disable_agg_tids
, false);
1432 inc_ssn
= iwl_mvm_enable_txq(mvm
, sta
, queue
, ssn
, &cfg
, wdg_timeout
);
1435 * Mark queue as shared in transport if shared
1436 * Note this has to be done after queue enablement because enablement
1437 * can also set this value, and there is no indication there to shared
1441 iwl_trans_txq_set_shared_mode(mvm
->trans
, queue
, true);
1443 spin_lock_bh(&mvmsta
->lock
);
1445 * This looks racy, but it is not. We have only one packet for
1446 * this ra/tid in our Tx path since we stop the Qdisc when we
1447 * need to allocate a new TFD queue.
1450 mvmsta
->tid_data
[tid
].seq_number
+= 0x10;
1451 ssn
= (ssn
+ 1) & IEEE80211_SCTL_SEQ
;
1453 mvmsta
->tid_data
[tid
].txq_id
= queue
;
1454 mvmsta
->tfd_queue_msk
|= BIT(queue
);
1455 queue_state
= mvmsta
->tid_data
[tid
].state
;
1457 if (mvmsta
->reserved_queue
== queue
)
1458 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
1459 spin_unlock_bh(&mvmsta
->lock
);
1461 if (!shared_queue
) {
1462 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
1466 /* If we need to re-enable aggregations... */
1467 if (queue_state
== IWL_AGG_ON
) {
1468 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
1473 /* Redirect queue, if needed */
1474 ret
= iwl_mvm_redirect_queue(mvm
, queue
, tid
, ac
, ssn
,
1476 iwl_mvm_txq_from_tid(sta
, tid
));
1485 iwl_mvm_disable_txq(mvm
, sta
, mvmsta
->deflink
.sta_id
, &queue_tmp
, tid
);
1490 int iwl_mvm_sta_ensure_queue(struct iwl_mvm
*mvm
,
1491 struct ieee80211_txq
*txq
)
1493 struct iwl_mvm_txq
*mvmtxq
= iwl_mvm_txq_from_mac80211(txq
);
1496 lockdep_assert_held(&mvm
->mutex
);
1498 if (likely(test_bit(IWL_MVM_TXQ_STATE_READY
, &mvmtxq
->state
)) ||
1503 if (!iwl_mvm_sta_alloc_queue(mvm
, txq
->sta
, txq
->ac
, txq
->tid
)) {
1504 set_bit(IWL_MVM_TXQ_STATE_READY
, &mvmtxq
->state
);
1509 spin_lock(&mvm
->add_stream_lock
);
1510 if (!list_empty(&mvmtxq
->list
))
1511 list_del_init(&mvmtxq
->list
);
1512 spin_unlock(&mvm
->add_stream_lock
);
1518 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
1520 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
1523 mutex_lock(&mvm
->mutex
);
1525 iwl_mvm_inactivity_check(mvm
, IWL_INVALID_STA
);
1527 while (!list_empty(&mvm
->add_stream_txqs
)) {
1528 struct iwl_mvm_txq
*mvmtxq
;
1529 struct ieee80211_txq
*txq
;
1532 mvmtxq
= list_first_entry(&mvm
->add_stream_txqs
,
1533 struct iwl_mvm_txq
, list
);
1535 txq
= container_of((void *)mvmtxq
, struct ieee80211_txq
,
1538 if (tid
== IEEE80211_NUM_TIDS
)
1539 tid
= IWL_MAX_TID_COUNT
;
1542 * We can't really do much here, but if this fails we can't
1543 * transmit anyway - so just don't transmit the frame etc.
1544 * and let them back up ... we've tried our best to allocate
1545 * a queue in the function itself.
1547 if (iwl_mvm_sta_alloc_queue(mvm
, txq
->sta
, txq
->ac
, tid
)) {
1548 spin_lock_bh(&mvm
->add_stream_lock
);
1549 list_del_init(&mvmtxq
->list
);
1550 spin_unlock_bh(&mvm
->add_stream_lock
);
1554 /* now we're ready, any remaining races/concurrency will be
1555 * handled in iwl_mvm_mac_itxq_xmit()
1557 set_bit(IWL_MVM_TXQ_STATE_READY
, &mvmtxq
->state
);
1560 spin_lock(&mvm
->add_stream_lock
);
1561 list_del_init(&mvmtxq
->list
);
1562 spin_unlock(&mvm
->add_stream_lock
);
1564 iwl_mvm_mac_itxq_xmit(mvm
->hw
, txq
);
1568 mutex_unlock(&mvm
->mutex
);
1571 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
1572 struct ieee80211_sta
*sta
,
1573 enum nl80211_iftype vif_type
)
1575 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1578 /* queue reserving is disabled on new TX path */
1579 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm
)))
1582 /* run the general cleanup/unsharing of queues */
1583 iwl_mvm_inactivity_check(mvm
, IWL_INVALID_STA
);
1585 /* Make sure we have free resources for this STA */
1586 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
1587 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].tid_bitmap
&&
1588 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
1589 IWL_MVM_QUEUE_FREE
))
1590 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
1592 queue
= iwl_mvm_find_free_queue(mvm
, mvmsta
->deflink
.sta_id
,
1593 IWL_MVM_DQA_MIN_DATA_QUEUE
,
1594 IWL_MVM_DQA_MAX_DATA_QUEUE
);
1596 /* try again - this time kick out a queue if needed */
1597 queue
= iwl_mvm_inactivity_check(mvm
, mvmsta
->deflink
.sta_id
);
1599 IWL_ERR(mvm
, "No available queues for new station\n");
1603 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
1605 mvmsta
->reserved_queue
= queue
;
1607 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
1608 queue
, mvmsta
->deflink
.sta_id
);
1614 * In DQA mode, after a HW restart the queues should be allocated as before, in
1615 * order to avoid race conditions when there are shared queues. This function
1616 * does the re-mapping and queue allocation.
1618 * Note that re-enabling aggregations isn't done in this function.
1620 void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm
*mvm
,
1621 struct ieee80211_sta
*sta
)
1623 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1625 iwl_mvm_get_wd_timeout(mvm
, mvm_sta
->vif
);
1627 struct iwl_trans_txq_scd_cfg cfg
= {
1628 .sta_id
= mvm_sta
->deflink
.sta_id
,
1629 .frame_limit
= IWL_FRAME_LIMIT
,
1632 /* Make sure reserved queue is still marked as such (if allocated) */
1633 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
)
1634 mvm
->queue_info
[mvm_sta
->reserved_queue
].status
=
1635 IWL_MVM_QUEUE_RESERVED
;
1637 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1638 struct iwl_mvm_tid_data
*tid_data
= &mvm_sta
->tid_data
[i
];
1639 int txq_id
= tid_data
->txq_id
;
1642 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
1645 ac
= tid_to_mac80211_ac
[i
];
1647 if (iwl_mvm_has_new_tx_api(mvm
)) {
1648 IWL_DEBUG_TX_QUEUES(mvm
,
1649 "Re-mapping sta %d tid %d\n",
1650 mvm_sta
->deflink
.sta_id
, i
);
1651 txq_id
= iwl_mvm_tvqm_enable_txq(mvm
, sta
,
1652 mvm_sta
->deflink
.sta_id
,
1655 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1656 * to try again later, we have no other good way of
1660 txq_id
= IWL_MVM_INVALID_QUEUE
;
1661 tid_data
->txq_id
= txq_id
;
1664 * Since we don't set the seq number after reset, and HW
1665 * sets it now, FW reset will cause the seq num to start
1666 * at 0 again, so driver will need to update it
1667 * internally as well, so it keeps in sync with real val
1669 tid_data
->seq_number
= 0;
1671 u16 seq
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1674 cfg
.fifo
= iwl_mvm_mac_ac_to_tx_fifo(mvm
, ac
);
1675 cfg
.aggregate
= (txq_id
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
1677 IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
1679 IWL_DEBUG_TX_QUEUES(mvm
,
1680 "Re-mapping sta %d tid %d to queue %d\n",
1681 mvm_sta
->deflink
.sta_id
, i
,
1684 iwl_mvm_enable_txq(mvm
, sta
, txq_id
, seq
, &cfg
, wdg
);
1685 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_READY
;
1690 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
1691 struct iwl_mvm_int_sta
*sta
,
1693 u16 mac_id
, u16 color
)
1695 struct iwl_mvm_add_sta_cmd cmd
;
1697 u32 status
= ADD_STA_SUCCESS
;
1699 lockdep_assert_held(&mvm
->mutex
);
1701 memset(&cmd
, 0, sizeof(cmd
));
1702 cmd
.sta_id
= sta
->sta_id
;
1704 if (iwl_mvm_has_new_station_api(mvm
->fw
) &&
1705 sta
->type
== IWL_STA_AUX_ACTIVITY
)
1706 cmd
.mac_id_n_color
= cpu_to_le32(mac_id
);
1708 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
1711 if (fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
1712 cmd
.station_type
= sta
->type
;
1714 if (!iwl_mvm_has_new_tx_api(mvm
))
1715 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
1716 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
1719 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
1721 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1722 iwl_mvm_add_sta_cmd_size(mvm
),
1727 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1728 case ADD_STA_SUCCESS
:
1729 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
1733 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
1740 /* Initialize driver data of a new sta */
1741 int iwl_mvm_sta_init(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1742 struct ieee80211_sta
*sta
, int sta_id
, u8 sta_type
)
1744 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1745 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1746 struct iwl_mvm_rxq_dup_data
*dup_data
;
1749 lockdep_assert_held(&mvm
->mutex
);
1751 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
1755 /* for MLD sta_id(s) should be allocated for each link before calling
1758 if (!mvm
->mld_api_is_used
) {
1759 if (WARN_ON(sta_id
== IWL_INVALID_STA
))
1762 mvm_sta
->deflink
.sta_id
= sta_id
;
1763 rcu_assign_pointer(mvm_sta
->link
[0], &mvm_sta
->deflink
);
1765 if (!mvm
->trans
->trans_cfg
->gen2
)
1766 mvm_sta
->deflink
.lq_sta
.rs_drv
.pers
.max_agg_bufsize
=
1767 LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
1769 mvm_sta
->deflink
.lq_sta
.rs_drv
.pers
.max_agg_bufsize
=
1770 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF
;
1773 mvm_sta
->tt_tx_protection
= false;
1774 mvm_sta
->sta_type
= sta_type
;
1776 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
1778 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
1780 * Mark all queues for this STA as unallocated and defer TX
1781 * frames until the queue is allocated
1783 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
1786 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
1787 struct iwl_mvm_txq
*mvmtxq
=
1788 iwl_mvm_txq_from_mac80211(sta
->txq
[i
]);
1790 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
1791 INIT_LIST_HEAD(&mvmtxq
->list
);
1792 atomic_set(&mvmtxq
->tx_request
, 0);
1795 if (iwl_mvm_has_new_rx_api(mvm
)) {
1798 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
1799 sizeof(*dup_data
), GFP_KERNEL
);
1803 * Initialize all the last_seq values to 0xffff which can never
1804 * compare equal to the frame's seq_ctrl in the check in
1805 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1806 * number and fragmented packets don't reach that function.
1808 * This thus allows receiving a packet with seqno 0 and the
1809 * retry bit set as the very first packet on a new TID.
1811 for (q
= 0; q
< mvm
->trans
->num_rx_queues
; q
++)
1812 memset(dup_data
[q
].last_seq
, 0xff,
1813 sizeof(dup_data
[q
].last_seq
));
1814 mvm_sta
->dup_data
= dup_data
;
1817 if (!iwl_mvm_has_new_tx_api(mvm
)) {
1818 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
1819 ieee80211_vif_type_p2p(vif
));
1825 * if rs is registered with mac80211, then "add station" will be handled
1826 * via the corresponding ops, otherwise need to notify rate scaling here
1828 if (iwl_mvm_has_tlc_offload(mvm
))
1829 iwl_mvm_rs_add_sta(mvm
, mvm_sta
);
1831 spin_lock_init(&mvm_sta
->deflink
.lq_sta
.rs_drv
.pers
.lock
);
1833 iwl_mvm_toggle_tx_ant(mvm
, &mvm_sta
->tx_ant
);
1835 /* MPDUs are counted only when EMLSR is possible */
1836 if (vif
->type
== NL80211_IFTYPE_STATION
&& !vif
->p2p
&&
1837 !sta
->tdls
&& ieee80211_vif_is_mld(vif
)) {
1838 mvm_sta
->mpdu_counters
=
1839 kcalloc(mvm
->trans
->num_rx_queues
,
1840 sizeof(*mvm_sta
->mpdu_counters
),
1842 if (mvm_sta
->mpdu_counters
)
1843 for (int q
= 0; q
< mvm
->trans
->num_rx_queues
; q
++)
1844 spin_lock_init(&mvm_sta
->mpdu_counters
[q
].lock
);
1850 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
1851 struct ieee80211_vif
*vif
,
1852 struct ieee80211_sta
*sta
)
1854 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1855 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1857 bool sta_update
= false;
1858 unsigned int sta_flags
= 0;
1860 lockdep_assert_held(&mvm
->mutex
);
1862 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1863 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
1864 ieee80211_vif_type_p2p(vif
));
1866 sta_id
= mvm_sta
->deflink
.sta_id
;
1868 if (sta_id
== IWL_INVALID_STA
)
1871 spin_lock_init(&mvm_sta
->lock
);
1873 /* if this is a HW restart re-alloc existing queues */
1874 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1875 struct iwl_mvm_int_sta tmp_sta
= {
1877 .type
= mvm_sta
->sta_type
,
1880 /* First add an empty station since allocating
1881 * a queue requires a valid station
1883 ret
= iwl_mvm_add_int_sta_common(mvm
, &tmp_sta
, sta
->addr
,
1884 mvmvif
->id
, mvmvif
->color
);
1888 iwl_mvm_realloc_queues_after_restart(mvm
, sta
);
1890 sta_flags
= iwl_mvm_has_new_tx_api(mvm
) ? 0 : STA_MODIFY_QUEUES
;
1894 ret
= iwl_mvm_sta_init(mvm
, vif
, sta
, sta_id
,
1895 sta
->tdls
? IWL_STA_TDLS_LINK
: IWL_STA_LINK
);
1900 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, sta_update
, sta_flags
);
1904 if (vif
->type
== NL80211_IFTYPE_STATION
) {
1906 WARN_ON(mvmvif
->deflink
.ap_sta_id
!= IWL_INVALID_STA
);
1907 mvmvif
->deflink
.ap_sta_id
= sta_id
;
1909 WARN_ON(mvmvif
->deflink
.ap_sta_id
== IWL_INVALID_STA
);
1913 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
1921 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
1924 struct iwl_mvm_add_sta_cmd cmd
= {};
1928 lockdep_assert_held(&mvm
->mutex
);
1930 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
1931 cmd
.sta_id
= mvmsta
->deflink
.sta_id
;
1932 cmd
.add_modify
= STA_MODE_MODIFY
;
1933 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
1934 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
1936 status
= ADD_STA_SUCCESS
;
1937 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1938 iwl_mvm_add_sta_cmd_size(mvm
),
1943 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1944 case ADD_STA_SUCCESS
:
1945 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
1946 mvmsta
->deflink
.sta_id
);
1950 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
1951 mvmsta
->deflink
.sta_id
);
1959 * Remove a station from the FW table. Before sending the command to remove
1960 * the station validate that the station is indeed known to the driver (sanity
1963 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
1965 struct ieee80211_sta
*sta
;
1966 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
1971 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
1972 lockdep_is_held(&mvm
->mutex
));
1974 /* Note: internal stations are marked as error values */
1976 IWL_ERR(mvm
, "Invalid station id\n");
1980 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
1981 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
1983 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
1990 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
1991 struct ieee80211_vif
*vif
,
1992 struct ieee80211_sta
*sta
)
1994 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1997 lockdep_assert_held(&mvm
->mutex
);
1999 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
2000 if (mvm_sta
->tid_data
[i
].txq_id
== IWL_MVM_INVALID_QUEUE
)
2003 iwl_mvm_disable_txq(mvm
, sta
, mvm_sta
->deflink
.sta_id
,
2004 &mvm_sta
->tid_data
[i
].txq_id
, i
);
2005 mvm_sta
->tid_data
[i
].txq_id
= IWL_MVM_INVALID_QUEUE
;
2008 for (i
= 0; i
< ARRAY_SIZE(sta
->txq
); i
++) {
2009 struct iwl_mvm_txq
*mvmtxq
=
2010 iwl_mvm_txq_from_mac80211(sta
->txq
[i
]);
2012 spin_lock_bh(&mvm
->add_stream_lock
);
2013 mvmtxq
->txq_id
= IWL_MVM_INVALID_QUEUE
;
2014 list_del_init(&mvmtxq
->list
);
2015 clear_bit(IWL_MVM_TXQ_STATE_READY
, &mvmtxq
->state
);
2016 spin_unlock_bh(&mvm
->add_stream_lock
);
2020 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm
*mvm
,
2021 struct iwl_mvm_sta
*mvm_sta
)
2025 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
2029 spin_lock_bh(&mvm_sta
->lock
);
2030 txq_id
= mvm_sta
->tid_data
[i
].txq_id
;
2031 spin_unlock_bh(&mvm_sta
->lock
);
2033 if (txq_id
== IWL_MVM_INVALID_QUEUE
)
2036 ret
= iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
2044 /* Execute the common part for both MLD and non-MLD modes.
2045 * Returns if we're done with removing the station, either
2046 * with error or success
2048 bool iwl_mvm_sta_del(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
2049 struct ieee80211_sta
*sta
,
2050 struct ieee80211_link_sta
*link_sta
, int *ret
)
2052 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2053 struct iwl_mvm_vif_link_info
*mvm_link
=
2054 mvmvif
->link
[link_sta
->link_id
];
2055 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2056 struct iwl_mvm_link_sta
*mvm_link_sta
;
2059 lockdep_assert_held(&mvm
->mutex
);
2062 rcu_dereference_protected(mvm_sta
->link
[link_sta
->link_id
],
2063 lockdep_is_held(&mvm
->mutex
));
2064 sta_id
= mvm_link_sta
->sta_id
;
2066 /* If there is a TXQ still marked as reserved - free it */
2067 if (mvm_sta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) {
2068 u8 reserved_txq
= mvm_sta
->reserved_queue
;
2069 enum iwl_mvm_queue_status
*status
;
2072 * If no traffic has gone through the reserved TXQ - it
2073 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2074 * should be manually marked as free again
2076 status
= &mvm
->queue_info
[reserved_txq
].status
;
2077 if (WARN((*status
!= IWL_MVM_QUEUE_RESERVED
) &&
2078 (*status
!= IWL_MVM_QUEUE_FREE
),
2079 "sta_id %d reserved txq %d status %d",
2080 sta_id
, reserved_txq
, *status
)) {
2085 *status
= IWL_MVM_QUEUE_FREE
;
2088 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2089 mvm_link
->ap_sta_id
== sta_id
) {
2090 /* if associated - we can't remove the AP STA now */
2094 /* first remove remaining keys */
2095 iwl_mvm_sec_key_remove_ap(mvm
, vif
, mvm_link
, 0);
2097 /* unassoc - go ahead - remove the AP STA now */
2098 mvm_link
->ap_sta_id
= IWL_INVALID_STA
;
2102 * This shouldn't happen - the TDLS channel switch should be canceled
2103 * before the STA is removed.
2105 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== sta_id
)) {
2106 mvm
->tdls_cs
.peer
.sta_id
= IWL_INVALID_STA
;
2107 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
2113 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
2114 struct ieee80211_vif
*vif
,
2115 struct ieee80211_sta
*sta
)
2117 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2120 lockdep_assert_held(&mvm
->mutex
);
2122 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
2126 /* flush its queues here since we are freeing mvm_sta */
2127 ret
= iwl_mvm_flush_sta(mvm
, mvm_sta
->deflink
.sta_id
,
2128 mvm_sta
->tfd_queue_msk
);
2131 if (iwl_mvm_has_new_tx_api(mvm
)) {
2132 ret
= iwl_mvm_wait_sta_queues_empty(mvm
, mvm_sta
);
2134 u32 q_mask
= mvm_sta
->tfd_queue_msk
;
2136 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
2142 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
2144 iwl_mvm_disable_sta_queues(mvm
, vif
, sta
);
2146 if (iwl_mvm_sta_del(mvm
, vif
, sta
, &sta
->deflink
, &ret
))
2149 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->deflink
.sta_id
);
2150 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->deflink
.sta_id
], NULL
);
2155 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
2156 struct ieee80211_vif
*vif
,
2159 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
2161 lockdep_assert_held(&mvm
->mutex
);
2163 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
2167 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
2168 struct iwl_mvm_int_sta
*sta
,
2169 u32 qmask
, enum nl80211_iftype iftype
,
2172 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
) ||
2173 sta
->sta_id
== IWL_INVALID_STA
) {
2174 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
2175 if (WARN_ON_ONCE(sta
->sta_id
== IWL_INVALID_STA
))
2179 sta
->tfd_queue_msk
= qmask
;
2182 /* put a non-NULL value so iterating over the stations won't stop */
2183 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
2187 void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_int_sta
*sta
)
2189 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
2190 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
2191 sta
->sta_id
= IWL_INVALID_STA
;
2194 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm
*mvm
, u16 queue
,
2197 unsigned int wdg_timeout
=
2198 mvm
->trans
->trans_cfg
->base_params
->wd_timeout
;
2199 struct iwl_trans_txq_scd_cfg cfg
= {
2202 .tid
= IWL_MAX_TID_COUNT
,
2204 .frame_limit
= IWL_FRAME_LIMIT
,
2207 WARN_ON(iwl_mvm_has_new_tx_api(mvm
));
2209 iwl_mvm_enable_txq(mvm
, NULL
, queue
, 0, &cfg
, wdg_timeout
);
2212 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm
*mvm
, u8 sta_id
)
2214 unsigned int wdg_timeout
=
2215 mvm
->trans
->trans_cfg
->base_params
->wd_timeout
;
2217 WARN_ON(!iwl_mvm_has_new_tx_api(mvm
));
2219 return iwl_mvm_tvqm_enable_txq(mvm
, NULL
, sta_id
, IWL_MAX_TID_COUNT
,
2223 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm
*mvm
, int macidx
,
2224 int maccolor
, u8
*addr
,
2225 struct iwl_mvm_int_sta
*sta
,
2226 u16
*queue
, int fifo
)
2230 /* Map queue to fifo - needs to happen before adding station */
2231 if (!iwl_mvm_has_new_tx_api(mvm
))
2232 iwl_mvm_enable_aux_snif_queue(mvm
, *queue
, sta
->sta_id
, fifo
);
2234 ret
= iwl_mvm_add_int_sta_common(mvm
, sta
, addr
, macidx
, maccolor
);
2236 if (!iwl_mvm_has_new_tx_api(mvm
))
2237 iwl_mvm_disable_txq(mvm
, NULL
, sta
->sta_id
, queue
,
2243 * For 22000 firmware and on we cannot add queue to a station unknown
2244 * to firmware so enable queue here - after the station was added
2246 if (iwl_mvm_has_new_tx_api(mvm
)) {
2249 txq
= iwl_mvm_enable_aux_snif_queue_tvqm(mvm
, sta
->sta_id
);
2251 iwl_mvm_rm_sta_common(mvm
, sta
->sta_id
);
2261 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
, u32 lmac_id
)
2264 u32 qmask
= mvm
->aux_queue
== IWL_MVM_INVALID_QUEUE
? 0 :
2265 BIT(mvm
->aux_queue
);
2267 lockdep_assert_held(&mvm
->mutex
);
2269 /* Allocate aux station and assign to it the aux queue */
2270 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, qmask
,
2271 NL80211_IFTYPE_UNSPECIFIED
,
2272 IWL_STA_AUX_ACTIVITY
);
2277 * In CDB NICs we need to specify which lmac to use for aux activity
2278 * using the mac_id argument place to send lmac_id to the function
2280 ret
= iwl_mvm_add_int_sta_with_queue(mvm
, lmac_id
, 0, NULL
,
2281 &mvm
->aux_sta
, &mvm
->aux_queue
,
2282 IWL_MVM_TX_FIFO_MCAST
);
2284 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
2291 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2293 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2295 lockdep_assert_held(&mvm
->mutex
);
2297 return iwl_mvm_add_int_sta_with_queue(mvm
, mvmvif
->id
, mvmvif
->color
,
2298 NULL
, &mvm
->snif_sta
,
2300 IWL_MVM_TX_FIFO_BE
);
2303 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2307 lockdep_assert_held(&mvm
->mutex
);
2309 if (WARN_ON_ONCE(mvm
->snif_sta
.sta_id
== IWL_INVALID_STA
))
2312 iwl_mvm_disable_txq(mvm
, NULL
, mvm
->snif_sta
.sta_id
,
2313 &mvm
->snif_queue
, IWL_MAX_TID_COUNT
);
2314 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
2316 IWL_WARN(mvm
, "Failed sending remove station\n");
2321 int iwl_mvm_rm_aux_sta(struct iwl_mvm
*mvm
)
2325 lockdep_assert_held(&mvm
->mutex
);
2327 if (WARN_ON_ONCE(mvm
->aux_sta
.sta_id
== IWL_INVALID_STA
))
2330 iwl_mvm_disable_txq(mvm
, NULL
, mvm
->aux_sta
.sta_id
,
2331 &mvm
->aux_queue
, IWL_MAX_TID_COUNT
);
2332 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->aux_sta
.sta_id
);
2334 IWL_WARN(mvm
, "Failed sending remove station\n");
2335 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
2340 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
2342 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
2346 * Send the add station command for the vif's broadcast station.
2347 * Assumes that the station was already allocated.
2349 * @mvm: the mvm component
2350 * @vif: the interface to which the broadcast station is added
2351 * @bsta: the broadcast station to add.
2353 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2355 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2356 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->deflink
.bcast_sta
;
2357 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2358 const u8
*baddr
= _baddr
;
2361 unsigned int wdg_timeout
=
2362 iwl_mvm_get_wd_timeout(mvm
, vif
);
2363 struct iwl_trans_txq_scd_cfg cfg
= {
2364 .fifo
= IWL_MVM_TX_FIFO_VO
,
2365 .sta_id
= mvmvif
->deflink
.bcast_sta
.sta_id
,
2366 .tid
= IWL_MAX_TID_COUNT
,
2368 .frame_limit
= IWL_FRAME_LIMIT
,
2371 lockdep_assert_held(&mvm
->mutex
);
2373 if (!iwl_mvm_has_new_tx_api(mvm
)) {
2374 if (vif
->type
== NL80211_IFTYPE_AP
||
2375 vif
->type
== NL80211_IFTYPE_ADHOC
) {
2376 queue
= mvm
->probe_queue
;
2377 } else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
2378 queue
= mvm
->p2p_dev_queue
;
2380 WARN(1, "Missing required TXQ for adding bcast STA\n");
2384 bsta
->tfd_queue_msk
|= BIT(queue
);
2386 iwl_mvm_enable_txq(mvm
, NULL
, queue
, 0, &cfg
, wdg_timeout
);
2389 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
2390 baddr
= vif
->bss_conf
.bssid
;
2392 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_INVALID_STA
))
2395 ret
= iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
2396 mvmvif
->id
, mvmvif
->color
);
2401 * For 22000 firmware and on we cannot add queue to a station unknown
2402 * to firmware so enable queue here - after the station was added
2404 if (iwl_mvm_has_new_tx_api(mvm
)) {
2405 queue
= iwl_mvm_tvqm_enable_txq(mvm
, NULL
, bsta
->sta_id
,
2409 iwl_mvm_rm_sta_common(mvm
, bsta
->sta_id
);
2413 if (vif
->type
== NL80211_IFTYPE_AP
||
2414 vif
->type
== NL80211_IFTYPE_ADHOC
) {
2415 /* for queue management */
2416 mvm
->probe_queue
= queue
;
2418 mvmvif
->deflink
.mgmt_queue
= queue
;
2419 } else if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
2420 mvm
->p2p_dev_queue
= queue
;
2422 } else if (vif
->type
== NL80211_IFTYPE_AP
||
2423 vif
->type
== NL80211_IFTYPE_ADHOC
) {
2424 /* set it for use in TX */
2425 mvmvif
->deflink
.mgmt_queue
= mvm
->probe_queue
;
2431 void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm
*mvm
,
2432 struct ieee80211_vif
*vif
)
2434 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2435 u16
*queueptr
, queue
;
2437 lockdep_assert_held(&mvm
->mutex
);
2439 iwl_mvm_flush_sta(mvm
, mvmvif
->deflink
.bcast_sta
.sta_id
,
2440 mvmvif
->deflink
.bcast_sta
.tfd_queue_msk
);
2442 switch (vif
->type
) {
2443 case NL80211_IFTYPE_AP
:
2444 case NL80211_IFTYPE_ADHOC
:
2445 queueptr
= &mvm
->probe_queue
;
2447 case NL80211_IFTYPE_P2P_DEVICE
:
2448 queueptr
= &mvm
->p2p_dev_queue
;
2451 WARN(1, "Can't free bcast queue on vif type %d\n",
2457 iwl_mvm_disable_txq(mvm
, NULL
, mvmvif
->deflink
.bcast_sta
.sta_id
,
2458 queueptr
, IWL_MAX_TID_COUNT
);
2460 if (vif
->type
== NL80211_IFTYPE_AP
|| vif
->type
== NL80211_IFTYPE_ADHOC
)
2461 mvmvif
->deflink
.mgmt_queue
= mvm
->probe_queue
;
2463 if (iwl_mvm_has_new_tx_api(mvm
))
2466 WARN_ON(!(mvmvif
->deflink
.bcast_sta
.tfd_queue_msk
& BIT(queue
)));
2467 mvmvif
->deflink
.bcast_sta
.tfd_queue_msk
&= ~BIT(queue
);
2470 /* Send the FW a request to remove the station from it's internal data
2471 * structures, but DO NOT remove the entry from the local data structures. */
2472 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2474 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2477 lockdep_assert_held(&mvm
->mutex
);
2479 iwl_mvm_free_bcast_sta_queues(mvm
, vif
);
2481 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->deflink
.bcast_sta
.sta_id
);
2483 IWL_WARN(mvm
, "Failed sending remove station\n");
2487 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2489 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2491 lockdep_assert_held(&mvm
->mutex
);
2493 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->deflink
.bcast_sta
, 0,
2494 ieee80211_vif_type_p2p(vif
),
2495 IWL_STA_GENERAL_PURPOSE
);
2498 /* Allocate a new station entry for the broadcast station to the given vif,
2499 * and send it to the FW.
2500 * Note that each P2P mac should have its own broadcast station.
2502 * @mvm: the mvm component
2503 * @vif: the interface to which the broadcast station is added
2504 * @bsta: the broadcast station to add. */
2505 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2507 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2508 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->deflink
.bcast_sta
;
2511 lockdep_assert_held(&mvm
->mutex
);
2513 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
2517 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
2520 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
2525 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2527 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2529 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->deflink
.bcast_sta
);
2533 * Send the FW a request to remove the station from it's internal data
2534 * structures, and in addition remove it from the local data structure.
2536 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2540 lockdep_assert_held(&mvm
->mutex
);
2542 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
2544 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
2550 * Allocate a new station entry for the multicast station to the given vif,
2551 * and send it to the FW.
2552 * Note that each AP/GO mac should have its own multicast station.
2554 * @mvm: the mvm component
2555 * @vif: the interface to which the multicast station is added
2557 int iwl_mvm_add_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2559 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2560 struct iwl_mvm_int_sta
*msta
= &mvmvif
->deflink
.mcast_sta
;
2561 static const u8 _maddr
[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2562 const u8
*maddr
= _maddr
;
2563 struct iwl_trans_txq_scd_cfg cfg
= {
2564 .fifo
= vif
->type
== NL80211_IFTYPE_AP
?
2565 IWL_MVM_TX_FIFO_MCAST
: IWL_MVM_TX_FIFO_BE
,
2566 .sta_id
= msta
->sta_id
,
2569 .frame_limit
= IWL_FRAME_LIMIT
,
2571 unsigned int timeout
= iwl_mvm_get_wd_timeout(mvm
, vif
);
2574 lockdep_assert_held(&mvm
->mutex
);
2576 if (WARN_ON(vif
->type
!= NL80211_IFTYPE_AP
&&
2577 vif
->type
!= NL80211_IFTYPE_ADHOC
))
2581 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2582 * invalid, so make sure we use the queue we want.
2583 * Note that this is done here as we want to avoid making DQA
2584 * changes in mac80211 layer.
2586 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
2587 mvmvif
->deflink
.cab_queue
= IWL_MVM_DQA_GCAST_QUEUE
;
2590 * While in previous FWs we had to exclude cab queue from TFD queue
2591 * mask, now it is needed as any other queue.
2593 if (!iwl_mvm_has_new_tx_api(mvm
) &&
2594 fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
)) {
2595 iwl_mvm_enable_txq(mvm
, NULL
, mvmvif
->deflink
.cab_queue
, 0,
2598 msta
->tfd_queue_msk
|= BIT(mvmvif
->deflink
.cab_queue
);
2600 ret
= iwl_mvm_add_int_sta_common(mvm
, msta
, maddr
,
2601 mvmvif
->id
, mvmvif
->color
);
2606 * Enable cab queue after the ADD_STA command is sent.
2607 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2608 * command with unknown station id, and for FW that doesn't support
2609 * station API since the cab queue is not included in the
2612 if (iwl_mvm_has_new_tx_api(mvm
)) {
2613 int queue
= iwl_mvm_tvqm_enable_txq(mvm
, NULL
, msta
->sta_id
,
2619 mvmvif
->deflink
.cab_queue
= queue
;
2620 } else if (!fw_has_api(&mvm
->fw
->ucode_capa
,
2621 IWL_UCODE_TLV_API_STA_TYPE
))
2622 iwl_mvm_enable_txq(mvm
, NULL
, mvmvif
->deflink
.cab_queue
, 0,
2628 iwl_mvm_dealloc_int_sta(mvm
, msta
);
2632 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
2633 struct ieee80211_key_conf
*keyconf
,
2637 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
2638 struct iwl_mvm_add_sta_key_cmd cmd
;
2640 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
2641 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
2646 /* This is a valid situation for GTK removal */
2647 if (sta_id
== IWL_INVALID_STA
)
2650 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2651 STA_KEY_FLG_KEYID_MSK
);
2652 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
2653 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
2656 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2659 * The fields assigned here are in the same location at the start
2660 * of the command, so we can do this union trick.
2662 u
.cmd
.common
.key_flags
= key_flags
;
2663 u
.cmd
.common
.key_offset
= keyconf
->hw_key_idx
;
2664 u
.cmd
.common
.sta_id
= sta_id
;
2666 size
= new_api
? sizeof(u
.cmd
) : sizeof(u
.cmd_v1
);
2668 status
= ADD_STA_SUCCESS
;
2669 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
, &u
.cmd
,
2673 case ADD_STA_SUCCESS
:
2674 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
2678 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
2686 * Send the FW a request to remove the station from it's internal data
2687 * structures, and in addition remove it from the local data structure.
2689 int iwl_mvm_rm_mcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2691 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2694 lockdep_assert_held(&mvm
->mutex
);
2696 iwl_mvm_flush_sta(mvm
, mvmvif
->deflink
.mcast_sta
.sta_id
,
2697 mvmvif
->deflink
.mcast_sta
.tfd_queue_msk
);
2699 iwl_mvm_disable_txq(mvm
, NULL
, mvmvif
->deflink
.mcast_sta
.sta_id
,
2700 &mvmvif
->deflink
.cab_queue
, 0);
2702 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->deflink
.mcast_sta
.sta_id
);
2704 IWL_WARN(mvm
, "Failed sending remove station\n");
2709 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
2711 struct iwl_mvm_delba_data notif
= {
2715 iwl_mvm_sync_rx_queues_internal(mvm
, IWL_MVM_RXQ_NOTIF_DEL_BA
, true,
2716 ¬if
, sizeof(notif
));
2719 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
2720 struct iwl_mvm_baid_data
*data
)
2724 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
2726 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2728 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2729 &data
->reorder_buf
[i
];
2730 struct iwl_mvm_reorder_buf_entry
*entries
=
2731 &data
->entries
[i
* data
->entries_per_queue
];
2733 spin_lock_bh(&reorder_buf
->lock
);
2734 if (likely(!reorder_buf
->num_stored
)) {
2735 spin_unlock_bh(&reorder_buf
->lock
);
2740 * This shouldn't happen in regular DELBA since the internal
2741 * delBA notification should trigger a release of all frames in
2742 * the reorder buffer.
2746 for (j
= 0; j
< data
->buf_size
; j
++)
2747 __skb_queue_purge(&entries
[j
].frames
);
2749 spin_unlock_bh(&reorder_buf
->lock
);
2753 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
2754 struct iwl_mvm_baid_data
*data
,
2759 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
2760 struct iwl_mvm_reorder_buffer
*reorder_buf
=
2761 &data
->reorder_buf
[i
];
2762 struct iwl_mvm_reorder_buf_entry
*entries
=
2763 &data
->entries
[i
* data
->entries_per_queue
];
2766 reorder_buf
->num_stored
= 0;
2767 reorder_buf
->head_sn
= ssn
;
2768 spin_lock_init(&reorder_buf
->lock
);
2769 reorder_buf
->queue
= i
;
2770 reorder_buf
->valid
= false;
2771 for (j
= 0; j
< data
->buf_size
; j
++)
2772 __skb_queue_head_init(&entries
[j
].frames
);
2776 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm
*mvm
,
2777 struct ieee80211_sta
*sta
,
2778 bool start
, int tid
, u16 ssn
,
2781 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2782 struct iwl_mvm_add_sta_cmd cmd
= {
2783 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
2784 .sta_id
= mvm_sta
->deflink
.sta_id
,
2785 .add_modify
= STA_MODE_MODIFY
,
2791 cmd
.add_immediate_ba_tid
= tid
;
2792 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
2793 cmd
.rx_ba_window
= cpu_to_le16(buf_size
);
2794 cmd
.modify_mask
= STA_MODIFY_ADD_BA_TID
;
2796 cmd
.remove_immediate_ba_tid
= tid
;
2797 cmd
.modify_mask
= STA_MODIFY_REMOVE_BA_TID
;
2800 status
= ADD_STA_SUCCESS
;
2801 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
2802 iwl_mvm_add_sta_cmd_size(mvm
),
2807 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
2808 case ADD_STA_SUCCESS
:
2809 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
2810 start
? "start" : "stopp");
2811 if (WARN_ON(start
&& iwl_mvm_has_new_rx_api(mvm
) &&
2812 !(status
& IWL_ADD_STA_BAID_VALID_MASK
)))
2814 return u32_get_bits(status
, IWL_ADD_STA_BAID_MASK
);
2815 case ADD_STA_IMMEDIATE_BA_FAILURE
:
2816 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
2819 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
2820 start
? "start" : "stopp", status
);
2825 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm
*mvm
,
2826 struct ieee80211_sta
*sta
,
2827 bool start
, int tid
, u16 ssn
,
2828 u16 buf_size
, int baid
)
2830 struct iwl_rx_baid_cfg_cmd cmd
= {
2831 .action
= start
? cpu_to_le32(IWL_RX_BAID_ACTION_ADD
) :
2832 cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE
),
2834 struct iwl_host_cmd hcmd
= {
2835 .id
= WIDE_ID(DATA_PATH_GROUP
, RX_BAID_ALLOCATION_CONFIG_CMD
),
2836 .flags
= CMD_SEND_IN_RFKILL
,
2837 .len
[0] = sizeof(cmd
),
2842 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp
) != sizeof(baid
));
2845 cmd
.alloc
.sta_id_mask
=
2846 cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm
, sta
, -1));
2847 cmd
.alloc
.tid
= tid
;
2848 cmd
.alloc
.ssn
= cpu_to_le16(ssn
);
2849 cmd
.alloc
.win_size
= cpu_to_le16(buf_size
);
2851 } else if (iwl_fw_lookup_cmd_ver(mvm
->fw
, hcmd
.id
, 1) == 1) {
2852 cmd
.remove_v1
.baid
= cpu_to_le32(baid
);
2853 BUILD_BUG_ON(sizeof(cmd
.remove_v1
) > sizeof(cmd
.remove
));
2855 cmd
.remove
.sta_id_mask
=
2856 cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm
, sta
, -1));
2857 cmd
.remove
.tid
= cpu_to_le32(tid
);
2860 ret
= iwl_mvm_send_cmd_status(mvm
, &hcmd
, &baid
);
2865 /* ignore firmware baid on remove */
2869 IWL_DEBUG_HT(mvm
, "RX BA Session %sed in fw\n",
2870 start
? "start" : "stopp");
2872 if (baid
< 0 || baid
>= ARRAY_SIZE(mvm
->baid_map
))
2878 static int iwl_mvm_fw_baid_op(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2879 bool start
, int tid
, u16 ssn
, u16 buf_size
,
2882 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
2883 IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT
))
2884 return iwl_mvm_fw_baid_op_cmd(mvm
, sta
, start
,
2885 tid
, ssn
, buf_size
, baid
);
2887 return iwl_mvm_fw_baid_op_sta(mvm
, sta
, start
,
2888 tid
, ssn
, buf_size
);
2891 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
2892 int tid
, u16 ssn
, bool start
, u16 buf_size
, u16 timeout
)
2894 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2895 struct iwl_mvm_baid_data
*baid_data
= NULL
;
2897 u32 max_ba_id_sessions
= iwl_mvm_has_new_tx_api(mvm
) ? IWL_MAX_BAID
:
2900 lockdep_assert_held(&mvm
->mutex
);
2902 if (start
&& mvm
->rx_ba_sessions
>= max_ba_id_sessions
) {
2903 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
2907 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
2908 u32 reorder_buf_size
= buf_size
* sizeof(baid_data
->entries
[0]);
2910 /* sparse doesn't like the __align() so don't check */
2913 * The division below will be OK if either the cache line size
2914 * can be divided by the entry size (ALIGN will round up) or if
2915 * if the entry size can be divided by the cache line size, in
2916 * which case the ALIGN() will do nothing.
2918 BUILD_BUG_ON(SMP_CACHE_BYTES
% sizeof(baid_data
->entries
[0]) &&
2919 sizeof(baid_data
->entries
[0]) % SMP_CACHE_BYTES
);
2923 * Upward align the reorder buffer size to fill an entire cache
2924 * line for each queue, to avoid sharing cache lines between
2927 reorder_buf_size
= ALIGN(reorder_buf_size
, SMP_CACHE_BYTES
);
2930 * Allocate here so if allocation fails we can bail out early
2931 * before starting the BA session in the firmware
2933 baid_data
= kzalloc(sizeof(*baid_data
) +
2934 mvm
->trans
->num_rx_queues
*
2941 * This division is why we need the above BUILD_BUG_ON(),
2942 * if that doesn't hold then this will not be right.
2944 baid_data
->entries_per_queue
=
2945 reorder_buf_size
/ sizeof(baid_data
->entries
[0]);
2948 if (iwl_mvm_has_new_rx_api(mvm
) && !start
) {
2949 baid
= mvm_sta
->tid_to_baid
[tid
];
2951 /* we don't really need it in this case */
2955 /* Don't send command to remove (start=0) BAID during restart */
2956 if (start
|| !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
2957 baid
= iwl_mvm_fw_baid_op(mvm
, sta
, start
, tid
, ssn
, buf_size
,
2966 mvm
->rx_ba_sessions
++;
2968 if (!iwl_mvm_has_new_rx_api(mvm
))
2971 baid_data
->baid
= baid
;
2972 baid_data
->timeout
= timeout
;
2973 baid_data
->last_rx
= jiffies
;
2974 baid_data
->rcu_ptr
= &mvm
->baid_map
[baid
];
2975 timer_setup(&baid_data
->session_timer
,
2976 iwl_mvm_rx_agg_session_expired
, 0);
2977 baid_data
->mvm
= mvm
;
2978 baid_data
->tid
= tid
;
2979 baid_data
->sta_mask
= iwl_mvm_sta_fw_id_mask(mvm
, sta
, -1);
2980 baid_data
->buf_size
= buf_size
;
2982 mvm_sta
->tid_to_baid
[tid
] = baid
;
2984 mod_timer(&baid_data
->session_timer
,
2985 TU_TO_EXP_TIME(timeout
* 2));
2987 iwl_mvm_init_reorder_buffer(mvm
, baid_data
, ssn
);
2989 * protect the BA data with RCU to cover a case where our
2990 * internal RX sync mechanism will timeout (not that it's
2991 * supposed to happen) and we will free the session data while
2992 * RX is being processed in parallel
2994 IWL_DEBUG_HT(mvm
, "Sta %d(%d) is assigned to BAID %d\n",
2995 mvm_sta
->deflink
.sta_id
, tid
, baid
);
2996 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
2997 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
2999 baid
= mvm_sta
->tid_to_baid
[tid
];
3001 if (mvm
->rx_ba_sessions
> 0)
3002 /* check that restart flow didn't zero the counter */
3003 mvm
->rx_ba_sessions
--;
3004 if (!iwl_mvm_has_new_rx_api(mvm
))
3007 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
3010 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
3011 if (WARN_ON(!baid_data
))
3014 /* synchronize all rx queues so we can safely delete */
3015 iwl_mvm_free_reorder(mvm
, baid_data
);
3016 timer_shutdown_sync(&baid_data
->session_timer
);
3017 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
3018 kfree_rcu(baid_data
, rcu_head
);
3019 IWL_DEBUG_HT(mvm
, "BAID %d is free\n", baid
);
3028 int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
3029 int tid
, u8 queue
, bool start
)
3031 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3032 struct iwl_mvm_add_sta_cmd cmd
= {};
3036 lockdep_assert_held(&mvm
->mutex
);
3039 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
3040 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
3042 /* In DQA-mode the queue isn't removed on agg termination */
3043 mvm_sta
->tid_disable_agg
|= BIT(tid
);
3046 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
3047 cmd
.sta_id
= mvm_sta
->deflink
.sta_id
;
3048 cmd
.add_modify
= STA_MODE_MODIFY
;
3049 if (!iwl_mvm_has_new_tx_api(mvm
))
3050 cmd
.modify_mask
= STA_MODIFY_QUEUES
;
3051 cmd
.modify_mask
|= STA_MODIFY_TID_DISABLE_TX
;
3052 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
3053 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
3055 status
= ADD_STA_SUCCESS
;
3056 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
3057 iwl_mvm_add_sta_cmd_size(mvm
),
3062 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
3063 case ADD_STA_SUCCESS
:
3067 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
3068 start
? "start" : "stopp", status
);
3075 const u8 tid_to_mac80211_ac
[] = {
3084 IEEE80211_AC_VO
, /* We treat MGMT as TID 8, which is set as AC_VO */
3087 static const u8 tid_to_ucode_ac
[] = {
3098 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
3099 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
3101 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3102 struct iwl_mvm_tid_data
*tid_data
;
3107 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
3110 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_QUEUED
&&
3111 mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
3113 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
3114 mvmsta
->tid_data
[tid
].state
);
3118 lockdep_assert_held(&mvm
->mutex
);
3120 if (mvmsta
->tid_data
[tid
].txq_id
== IWL_MVM_INVALID_QUEUE
&&
3121 iwl_mvm_has_new_tx_api(mvm
)) {
3122 u8 ac
= tid_to_mac80211_ac
[tid
];
3124 ret
= iwl_mvm_sta_alloc_queue_tvqm(mvm
, sta
, ac
, tid
);
3129 spin_lock_bh(&mvmsta
->lock
);
3132 * Note the possible cases:
3133 * 1. An enabled TXQ - TXQ needs to become agg'ed
3134 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
3137 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
3138 if (txq_id
== IWL_MVM_INVALID_QUEUE
) {
3139 ret
= iwl_mvm_find_free_queue(mvm
, mvmsta
->deflink
.sta_id
,
3140 IWL_MVM_DQA_MIN_DATA_QUEUE
,
3141 IWL_MVM_DQA_MAX_DATA_QUEUE
);
3143 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
3149 /* TXQ hasn't yet been enabled, so mark it only as reserved */
3150 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
3151 } else if (WARN_ON(txq_id
>= IWL_MAX_HW_QUEUES
)) {
3153 IWL_ERR(mvm
, "tid_id %d out of range (0, %d)!\n",
3154 tid
, IWL_MAX_HW_QUEUES
- 1);
3157 } else if (unlikely(mvm
->queue_info
[txq_id
].status
==
3158 IWL_MVM_QUEUE_SHARED
)) {
3160 IWL_DEBUG_TX_QUEUES(mvm
,
3161 "Can't start tid %d agg on shared queue!\n",
3166 IWL_DEBUG_TX_QUEUES(mvm
,
3167 "AGG for tid %d will be on queue #%d\n",
3170 tid_data
= &mvmsta
->tid_data
[tid
];
3171 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
3172 tid_data
->txq_id
= txq_id
;
3173 *ssn
= tid_data
->ssn
;
3175 IWL_DEBUG_TX_QUEUES(mvm
,
3176 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3177 mvmsta
->deflink
.sta_id
, tid
, txq_id
,
3179 tid_data
->next_reclaimed
);
3182 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3183 * to align the wrap around of ssn so we compare relevant values.
3185 normalized_ssn
= tid_data
->ssn
;
3186 if (mvm
->trans
->trans_cfg
->gen2
)
3187 normalized_ssn
&= 0xff;
3189 if (normalized_ssn
== tid_data
->next_reclaimed
) {
3190 tid_data
->state
= IWL_AGG_STARTING
;
3191 ret
= IEEE80211_AMPDU_TX_START_IMMEDIATE
;
3193 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
3194 ret
= IEEE80211_AMPDU_TX_START_DELAY_ADDBA
;
3198 spin_unlock_bh(&mvmsta
->lock
);
3203 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
3204 struct ieee80211_sta
*sta
, u16 tid
, u16 buf_size
,
3207 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3208 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
3209 unsigned int wdg_timeout
=
3210 iwl_mvm_get_wd_timeout(mvm
, vif
);
3212 bool alloc_queue
= true;
3213 enum iwl_mvm_queue_status queue_status
;
3216 struct iwl_trans_txq_scd_cfg cfg
= {
3217 .sta_id
= mvmsta
->deflink
.sta_id
,
3219 .frame_limit
= buf_size
,
3224 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3225 * manager, so this function should never be called in this case.
3227 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm
)))
3230 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
3231 != IWL_MAX_TID_COUNT
);
3233 spin_lock_bh(&mvmsta
->lock
);
3234 ssn
= tid_data
->ssn
;
3235 queue
= tid_data
->txq_id
;
3236 tid_data
->state
= IWL_AGG_ON
;
3237 mvmsta
->agg_tids
|= BIT(tid
);
3238 tid_data
->ssn
= 0xffff;
3239 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
3240 spin_unlock_bh(&mvmsta
->lock
);
3242 if (iwl_mvm_has_new_tx_api(mvm
)) {
3244 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3245 * would have failed, so if we are here there is no need to
3247 * However, if aggregation size is different than the default
3248 * size, the scheduler should be reconfigured.
3249 * We cannot do this with the new TX API, so return unsupported
3250 * for now, until it will be offloaded to firmware..
3251 * Note that if SCD default value changes - this condition
3252 * should be updated as well.
3254 if (buf_size
< IWL_FRAME_LIMIT
)
3257 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
3263 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
3265 queue_status
= mvm
->queue_info
[queue
].status
;
3267 /* Maybe there is no need to even alloc a queue... */
3268 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
3269 alloc_queue
= false;
3272 * Only reconfig the SCD for the queue if the window size has
3273 * changed from current (become smaller)
3275 if (!alloc_queue
&& buf_size
< IWL_FRAME_LIMIT
) {
3277 * If reconfiguring an existing queue, it first must be
3280 ret
= iwl_trans_wait_tx_queues_empty(mvm
->trans
,
3284 "Error draining queue before reconfig\n");
3288 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
3289 mvmsta
->deflink
.sta_id
, tid
,
3293 "Error reconfiguring TXQ #%d\n", queue
);
3299 iwl_mvm_enable_txq(mvm
, sta
, queue
, ssn
,
3302 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3303 if (queue_status
!= IWL_MVM_QUEUE_SHARED
) {
3304 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
3309 /* No need to mark as reserved */
3310 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
3314 * Even though in theory the peer could have different
3315 * aggregation reorder buffer sizes for different sessions,
3316 * our ucode doesn't allow for that and has a global limit
3317 * for each station. Therefore, use the minimum of all the
3318 * aggregation sessions and our default value.
3320 mvmsta
->deflink
.lq_sta
.rs_drv
.pers
.max_agg_bufsize
=
3321 min(mvmsta
->deflink
.lq_sta
.rs_drv
.pers
.max_agg_bufsize
,
3323 mvmsta
->deflink
.lq_sta
.rs_drv
.lq
.agg_frame_cnt_limit
=
3324 mvmsta
->deflink
.lq_sta
.rs_drv
.pers
.max_agg_bufsize
;
3326 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
3329 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->deflink
.lq_sta
.rs_drv
.lq
);
3332 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm
*mvm
,
3333 struct iwl_mvm_sta
*mvmsta
,
3334 struct iwl_mvm_tid_data
*tid_data
)
3336 u16 txq_id
= tid_data
->txq_id
;
3338 lockdep_assert_held(&mvm
->mutex
);
3340 if (iwl_mvm_has_new_tx_api(mvm
))
3344 * The TXQ is marked as reserved only if no traffic came through yet
3345 * This means no traffic has been sent on this TID (agg'd or not), so
3346 * we no longer have use for the queue. Since it hasn't even been
3347 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3350 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
) {
3351 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
3352 tid_data
->txq_id
= IWL_MVM_INVALID_QUEUE
;
3356 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
3357 struct ieee80211_sta
*sta
, u16 tid
)
3359 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3360 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
3365 * If mac80211 is cleaning its state, then say that we finished since
3366 * our state has been cleared anyway.
3368 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
3369 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
3373 spin_lock_bh(&mvmsta
->lock
);
3375 txq_id
= tid_data
->txq_id
;
3377 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
3378 mvmsta
->deflink
.sta_id
, tid
, txq_id
,
3381 mvmsta
->agg_tids
&= ~BIT(tid
);
3383 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
3385 switch (tid_data
->state
) {
3387 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
3389 IWL_DEBUG_TX_QUEUES(mvm
,
3390 "ssn = %d, next_recl = %d\n",
3391 tid_data
->ssn
, tid_data
->next_reclaimed
);
3393 tid_data
->ssn
= 0xffff;
3394 tid_data
->state
= IWL_AGG_OFF
;
3395 spin_unlock_bh(&mvmsta
->lock
);
3397 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
3399 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
3401 case IWL_AGG_STARTING
:
3402 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
3404 * The agg session has been stopped before it was set up. This
3405 * can happen when the AddBA timer times out for example.
3408 /* No barriers since we are under mutex */
3409 lockdep_assert_held(&mvm
->mutex
);
3411 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
3412 tid_data
->state
= IWL_AGG_OFF
;
3417 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3418 mvmsta
->deflink
.sta_id
, tid
, tid_data
->state
);
3420 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
3424 spin_unlock_bh(&mvmsta
->lock
);
3429 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
3430 struct ieee80211_sta
*sta
, u16 tid
)
3432 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
3433 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
3435 enum iwl_mvm_agg_state old_state
;
3438 * First set the agg state to OFF to avoid calling
3439 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3441 spin_lock_bh(&mvmsta
->lock
);
3442 txq_id
= tid_data
->txq_id
;
3443 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
3444 mvmsta
->deflink
.sta_id
, tid
, txq_id
,
3446 old_state
= tid_data
->state
;
3447 tid_data
->state
= IWL_AGG_OFF
;
3448 mvmsta
->agg_tids
&= ~BIT(tid
);
3449 spin_unlock_bh(&mvmsta
->lock
);
3451 iwl_mvm_unreserve_agg_queue(mvm
, mvmsta
, tid_data
);
3453 if (old_state
>= IWL_AGG_ON
) {
3454 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
3456 if (iwl_mvm_has_new_tx_api(mvm
)) {
3457 if (iwl_mvm_flush_sta_tids(mvm
, mvmsta
->deflink
.sta_id
,
3459 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
3460 iwl_trans_wait_txq_empty(mvm
->trans
, txq_id
);
3462 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
)))
3463 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
3464 iwl_trans_wait_tx_queues_empty(mvm
->trans
, BIT(txq_id
));
3467 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
3469 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
3475 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
3477 int i
, max
= -1, max_offs
= -1;
3479 lockdep_assert_held(&mvm
->mutex
);
3481 /* Pick the unused key offset with the highest 'deleted'
3482 * counter. Every time a key is deleted, all the counters
3483 * are incremented and the one that was just deleted is
3484 * reset to zero. Thus, the highest counter is the one
3485 * that was deleted longest ago. Pick that one.
3487 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
3488 if (test_bit(i
, mvm
->fw_key_table
))
3490 if (mvm
->fw_key_deleted
[i
] > max
) {
3491 max
= mvm
->fw_key_deleted
[i
];
3497 return STA_KEY_IDX_INVALID
;
3502 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
3503 struct ieee80211_vif
*vif
,
3504 struct ieee80211_sta
*sta
)
3506 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3509 return iwl_mvm_sta_from_mac80211(sta
);
3512 * The device expects GTKs for station interfaces to be
3513 * installed as GTKs for the AP station. If we have no
3514 * station ID, then use AP's station ID.
3516 if (vif
->type
== NL80211_IFTYPE_STATION
&&
3517 mvmvif
->deflink
.ap_sta_id
!= IWL_INVALID_STA
) {
3518 u8 sta_id
= mvmvif
->deflink
.ap_sta_id
;
3520 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
3521 lockdep_is_held(&mvm
->mutex
));
3524 * It is possible that the 'sta' parameter is NULL,
3525 * for example when a GTK is removed - the sta_id will then
3526 * be the AP ID, and no station was passed by mac80211.
3528 if (IS_ERR_OR_NULL(sta
))
3531 return iwl_mvm_sta_from_mac80211(sta
);
3537 static int iwl_mvm_pn_cmp(const u8
*pn1
, const u8
*pn2
, int len
)
3541 for (i
= len
- 1; i
>= 0; i
--) {
3542 if (pn1
[i
] > pn2
[i
])
3544 if (pn1
[i
] < pn2
[i
])
3551 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
3553 struct ieee80211_key_conf
*key
, bool mcast
,
3554 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
3555 u8 key_offset
, bool mfp
)
3558 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1
;
3559 struct iwl_mvm_add_sta_key_cmd cmd
;
3567 bool new_api
= fw_has_api(&mvm
->fw
->ucode_capa
,
3568 IWL_UCODE_TLV_API_TKIP_MIC_KEYS
);
3569 int api_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, ADD_STA_KEY
,
3572 if (sta_id
== IWL_INVALID_STA
)
3575 keyidx
= (key
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
3576 STA_KEY_FLG_KEYID_MSK
;
3577 key_flags
= cpu_to_le16(keyidx
);
3578 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
3580 if (key
->flags
& IEEE80211_KEY_FLAG_SPP_AMSDU
)
3581 key_flags
|= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP
);
3583 switch (key
->cipher
) {
3584 case WLAN_CIPHER_SUITE_TKIP
:
3585 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
3587 memcpy((void *)&u
.cmd
.tx_mic_key
,
3588 &key
->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY
],
3591 memcpy((void *)&u
.cmd
.rx_mic_key
,
3592 &key
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
],
3594 pn
= atomic64_read(&key
->tx_pn
);
3597 u
.cmd_v1
.tkip_rx_tsc_byte2
= tkip_iv32
;
3598 for (i
= 0; i
< 5; i
++)
3599 u
.cmd_v1
.tkip_rx_ttak
[i
] =
3600 cpu_to_le16(tkip_p1k
[i
]);
3602 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3604 case WLAN_CIPHER_SUITE_CCMP
:
3605 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
3606 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3608 pn
= atomic64_read(&key
->tx_pn
);
3610 case WLAN_CIPHER_SUITE_WEP104
:
3611 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
3613 case WLAN_CIPHER_SUITE_WEP40
:
3614 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
3615 memcpy(u
.cmd
.common
.key
+ 3, key
->key
, key
->keylen
);
3617 case WLAN_CIPHER_SUITE_GCMP_256
:
3618 key_flags
|= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES
);
3620 case WLAN_CIPHER_SUITE_GCMP
:
3621 key_flags
|= cpu_to_le16(STA_KEY_FLG_GCMP
);
3622 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3624 pn
= atomic64_read(&key
->tx_pn
);
3627 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
3628 memcpy(u
.cmd
.common
.key
, key
->key
, key
->keylen
);
3632 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
3634 key_flags
|= cpu_to_le16(STA_KEY_MFP
);
3636 u
.cmd
.common
.key_offset
= key_offset
;
3637 u
.cmd
.common
.key_flags
= key_flags
;
3638 u
.cmd
.common
.sta_id
= sta_id
;
3640 if (key
->cipher
== WLAN_CIPHER_SUITE_TKIP
)
3645 for (; i
< IEEE80211_NUM_TIDS
; i
++) {
3646 struct ieee80211_key_seq seq
= {};
3647 u8 _rx_pn
[IEEE80211_MAX_PN_LEN
] = {}, *rx_pn
= _rx_pn
;
3649 /* there's a hole at 2/3 in FW format depending on version */
3650 int hole
= api_ver
>= 3 ? 0 : 2;
3652 ieee80211_get_key_rx_seq(key
, i
, &seq
);
3654 if (key
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
3655 rx_pn
[0] = seq
.tkip
.iv16
;
3656 rx_pn
[1] = seq
.tkip
.iv16
>> 8;
3657 rx_pn
[2 + hole
] = seq
.tkip
.iv32
;
3658 rx_pn
[3 + hole
] = seq
.tkip
.iv32
>> 8;
3659 rx_pn
[4 + hole
] = seq
.tkip
.iv32
>> 16;
3660 rx_pn
[5 + hole
] = seq
.tkip
.iv32
>> 24;
3661 } else if (key_flags
& cpu_to_le16(STA_KEY_FLG_EXT
)) {
3663 rx_pn_len
= seq
.hw
.seq_len
;
3665 rx_pn
[0] = seq
.ccmp
.pn
[0];
3666 rx_pn
[1] = seq
.ccmp
.pn
[1];
3667 rx_pn
[2 + hole
] = seq
.ccmp
.pn
[2];
3668 rx_pn
[3 + hole
] = seq
.ccmp
.pn
[3];
3669 rx_pn
[4 + hole
] = seq
.ccmp
.pn
[4];
3670 rx_pn
[5 + hole
] = seq
.ccmp
.pn
[5];
3673 if (iwl_mvm_pn_cmp(rx_pn
, (u8
*)&u
.cmd
.common
.rx_secur_seq_cnt
,
3675 memcpy(&u
.cmd
.common
.rx_secur_seq_cnt
, rx_pn
,
3680 u
.cmd
.transmit_seq_cnt
= cpu_to_le64(pn
);
3681 size
= sizeof(u
.cmd
);
3683 size
= sizeof(u
.cmd_v1
);
3686 status
= ADD_STA_SUCCESS
;
3687 if (cmd_flags
& CMD_ASYNC
)
3688 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
, size
,
3691 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, size
,
3695 case ADD_STA_SUCCESS
:
3696 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
3700 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
3707 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
3708 struct ieee80211_key_conf
*keyconf
,
3709 u8 sta_id
, bool remove_key
)
3711 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
3713 /* verify the key details match the required command's expectations */
3714 if (WARN_ON((keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
3715 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5 &&
3716 keyconf
->keyidx
!= 6 && keyconf
->keyidx
!= 7) ||
3717 (keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
&&
3718 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_128
&&
3719 keyconf
->cipher
!= WLAN_CIPHER_SUITE_BIP_GMAC_256
)))
3722 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm
) &&
3723 keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
))
3726 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
3727 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
3730 /* This is a valid situation for IGTK */
3731 if (sta_id
== IWL_INVALID_STA
)
3734 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
3736 struct ieee80211_key_seq seq
;
3739 switch (keyconf
->cipher
) {
3740 case WLAN_CIPHER_SUITE_AES_CMAC
:
3741 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
3743 case WLAN_CIPHER_SUITE_BIP_GMAC_128
:
3744 case WLAN_CIPHER_SUITE_BIP_GMAC_256
:
3745 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_GCMP
);
3751 memcpy(igtk_cmd
.igtk
, keyconf
->key
, keyconf
->keylen
);
3752 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
3753 igtk_cmd
.ctrl_flags
|=
3754 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES
);
3755 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3756 pn
= seq
.aes_cmac
.pn
;
3757 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
3758 ((u64
) pn
[4] << 8) |
3759 ((u64
) pn
[3] << 16) |
3760 ((u64
) pn
[2] << 24) |
3761 ((u64
) pn
[1] << 32) |
3762 ((u64
) pn
[0] << 40));
3765 IWL_DEBUG_INFO(mvm
, "%s %sIGTK (%d) for sta %u\n",
3766 remove_key
? "removing" : "installing",
3767 keyconf
->keyidx
>= 6 ? "B" : "",
3768 keyconf
->keyidx
, igtk_cmd
.sta_id
);
3770 if (!iwl_mvm_has_new_rx_api(mvm
)) {
3771 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1
= {
3772 .ctrl_flags
= igtk_cmd
.ctrl_flags
,
3773 .key_id
= igtk_cmd
.key_id
,
3774 .sta_id
= igtk_cmd
.sta_id
,
3775 .receive_seq_cnt
= igtk_cmd
.receive_seq_cnt
3778 memcpy(igtk_cmd_v1
.igtk
, igtk_cmd
.igtk
,
3779 ARRAY_SIZE(igtk_cmd_v1
.igtk
));
3780 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3781 sizeof(igtk_cmd_v1
), &igtk_cmd_v1
);
3783 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
3784 sizeof(igtk_cmd
), &igtk_cmd
);
3788 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
3789 struct ieee80211_vif
*vif
,
3790 struct ieee80211_sta
*sta
)
3792 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3797 if (vif
->type
== NL80211_IFTYPE_STATION
&&
3798 mvmvif
->deflink
.ap_sta_id
!= IWL_INVALID_STA
) {
3799 u8 sta_id
= mvmvif
->deflink
.ap_sta_id
;
3800 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
3801 lockdep_is_held(&mvm
->mutex
));
3802 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta
)))
3812 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3813 struct ieee80211_vif
*vif
,
3814 struct ieee80211_sta
*sta
,
3815 struct ieee80211_key_conf
*keyconf
,
3820 struct ieee80211_key_seq seq
;
3826 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
3828 sta_id
= mvm_sta
->deflink
.sta_id
;
3830 } else if (vif
->type
== NL80211_IFTYPE_AP
&&
3831 !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
)) {
3832 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3834 sta_id
= mvmvif
->deflink
.mcast_sta
.sta_id
;
3836 IWL_ERR(mvm
, "Failed to find station id\n");
3840 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
3841 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
3843 IWL_ERR(mvm
, "Failed to find mac address\n");
3847 /* get phase 1 key from mac80211 */
3848 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
3849 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
3851 return iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3852 seq
.tkip
.iv32
, p1k
, 0, key_offset
,
3856 return iwl_mvm_send_sta_key(mvm
, sta_id
, keyconf
, mcast
,
3857 0, NULL
, 0, key_offset
, mfp
);
3860 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
3861 struct ieee80211_vif
*vif
,
3862 struct ieee80211_sta
*sta
,
3863 struct ieee80211_key_conf
*keyconf
,
3866 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3867 struct iwl_mvm_sta
*mvm_sta
;
3868 u8 sta_id
= IWL_INVALID_STA
;
3870 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
3872 lockdep_assert_held(&mvm
->mutex
);
3874 if (vif
->type
!= NL80211_IFTYPE_AP
||
3875 keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) {
3876 /* Get the station id from the mvm local station table */
3877 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3879 IWL_ERR(mvm
, "Failed to find station\n");
3882 sta_id
= mvm_sta
->deflink
.sta_id
;
3885 * It is possible that the 'sta' parameter is NULL, and thus
3886 * there is a need to retrieve the sta from the local station
3890 sta
= rcu_dereference_protected(
3891 mvm
->fw_id_to_mac_id
[sta_id
],
3892 lockdep_is_held(&mvm
->mutex
));
3893 if (IS_ERR_OR_NULL(sta
)) {
3894 IWL_ERR(mvm
, "Invalid station id\n");
3899 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
3902 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
3904 sta_id
= mvmvif
->deflink
.mcast_sta
.sta_id
;
3907 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3908 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3909 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
) {
3910 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
3914 /* If the key_offset is not pre-assigned, we need to find a
3915 * new offset to use. In normal cases, the offset is not
3916 * pre-assigned, but during HW_RESTART we want to reuse the
3917 * same indices, so we pass them when this function is called.
3919 * In D3 entry, we need to hardcoded the indices (because the
3920 * firmware hardcodes the PTK offset to 0). In this case, we
3921 * need to make sure we don't overwrite the hw_key_idx in the
3922 * keyconf structure, because otherwise we cannot configure
3923 * the original ones back when resuming.
3925 if (key_offset
== STA_KEY_IDX_INVALID
) {
3926 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
3927 if (key_offset
== STA_KEY_IDX_INVALID
)
3929 keyconf
->hw_key_idx
= key_offset
;
3932 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
3937 * For WEP, the same key is used for multicast and unicast. Upload it
3938 * again, using the same key offset, and now pointing the other one
3939 * to the same key slot (offset).
3940 * If this fails, remove the original as well.
3942 if ((keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
3943 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
3945 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
3946 key_offset
, !mcast
);
3948 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
3953 __set_bit(key_offset
, mvm
->fw_key_table
);
3956 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3957 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
3958 sta
? sta
->addr
: zero_addr
, ret
);
3962 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
3963 struct ieee80211_vif
*vif
,
3964 struct ieee80211_sta
*sta
,
3965 struct ieee80211_key_conf
*keyconf
)
3967 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
3968 struct iwl_mvm_sta
*mvm_sta
;
3969 u8 sta_id
= IWL_INVALID_STA
;
3972 lockdep_assert_held(&mvm
->mutex
);
3974 /* Get the station from the mvm local station table */
3975 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
3977 sta_id
= mvm_sta
->deflink
.sta_id
;
3978 else if (!sta
&& vif
->type
== NL80211_IFTYPE_AP
&& mcast
)
3979 sta_id
= iwl_mvm_vif_from_mac80211(vif
)->deflink
.mcast_sta
.sta_id
;
3982 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
3983 keyconf
->keyidx
, sta_id
);
3985 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
||
3986 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_128
||
3987 keyconf
->cipher
== WLAN_CIPHER_SUITE_BIP_GMAC_256
)
3988 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
3990 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
3991 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
3992 keyconf
->hw_key_idx
);
3996 /* track which key was deleted last */
3997 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
3998 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
3999 mvm
->fw_key_deleted
[i
]++;
4001 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
4003 if (sta
&& !mvm_sta
) {
4004 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
4008 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
4012 /* delete WEP key twice to get rid of (now useless) offset */
4013 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
4014 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
4015 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
4020 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
4021 struct ieee80211_vif
*vif
,
4022 struct ieee80211_key_conf
*keyconf
,
4023 struct ieee80211_sta
*sta
, u32 iv32
,
4026 struct iwl_mvm_sta
*mvm_sta
;
4027 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
4028 bool mfp
= sta
? sta
->mfp
: false;
4032 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
4033 if (WARN_ON_ONCE(!mvm_sta
))
4035 iwl_mvm_send_sta_key(mvm
, mvm_sta
->deflink
.sta_id
, keyconf
, mcast
,
4036 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
,
4043 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
4044 struct ieee80211_sta
*sta
)
4046 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
4047 struct iwl_mvm_add_sta_cmd cmd
= {
4048 .add_modify
= STA_MODE_MODIFY
,
4049 .sta_id
= mvmsta
->deflink
.sta_id
,
4050 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
4051 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
4055 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
4056 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
4058 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
4061 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
4062 struct ieee80211_sta
*sta
,
4063 enum ieee80211_frame_release_type reason
,
4064 u16 cnt
, u16 tids
, bool more_data
,
4065 bool single_sta_queue
)
4067 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
4068 struct iwl_mvm_add_sta_cmd cmd
= {
4069 .add_modify
= STA_MODE_MODIFY
,
4070 .sta_id
= mvmsta
->deflink
.sta_id
,
4071 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
4072 .sleep_tx_count
= cpu_to_le16(cnt
),
4073 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
4076 unsigned long _tids
= tids
;
4078 /* convert TIDs to ACs - we don't support TSPEC so that's OK
4079 * Note that this field is reserved and unused by firmware not
4080 * supporting GO uAPSD, so it's safe to always do this.
4082 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
4083 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
4085 /* If we're releasing frames from aggregation or dqa queues then check
4086 * if all the queues that we're releasing frames from, combined, have:
4087 * - more frames than the service period, in which case more_data
4089 * - fewer than 'cnt' frames, in which case we need to adjust the
4090 * firmware command (but do that unconditionally)
4092 if (single_sta_queue
) {
4093 int remaining
= cnt
;
4096 spin_lock_bh(&mvmsta
->lock
);
4097 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
4098 struct iwl_mvm_tid_data
*tid_data
;
4101 tid_data
= &mvmsta
->tid_data
[tid
];
4103 n_queued
= iwl_mvm_tid_queued(mvm
, tid_data
);
4104 if (n_queued
> remaining
) {
4109 remaining
-= n_queued
;
4111 sleep_tx_count
= cnt
- remaining
;
4112 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
4113 mvmsta
->sleep_tx_count
= sleep_tx_count
;
4114 spin_unlock_bh(&mvmsta
->lock
);
4116 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
4117 if (WARN_ON(cnt
- remaining
== 0)) {
4118 ieee80211_sta_eosp(sta
);
4123 /* Note: this is ignored by firmware not supporting GO uAPSD */
4125 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_MOREDATA
;
4127 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
4128 mvmsta
->next_status_eosp
= true;
4129 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_PS_POLL
;
4131 cmd
.sleep_state_flags
|= STA_SLEEP_STATE_UAPSD
;
4134 /* block the Tx queues until the FW updated the sleep Tx count */
4135 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
4136 CMD_ASYNC
| CMD_BLOCK_TXQS
,
4137 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
4139 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
4142 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
4143 struct iwl_rx_cmd_buffer
*rxb
)
4145 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
4146 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
4147 struct ieee80211_sta
*sta
;
4148 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
4150 if (WARN_ON_ONCE(sta_id
>= mvm
->fw
->ucode_capa
.num_stations
))
4154 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
4155 if (!IS_ERR_OR_NULL(sta
))
4156 ieee80211_sta_eosp(sta
);
4160 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
4161 struct iwl_mvm_sta
*mvmsta
,
4164 struct iwl_mvm_add_sta_cmd cmd
= {
4165 .add_modify
= STA_MODE_MODIFY
,
4166 .sta_id
= mvmsta
->deflink
.sta_id
,
4167 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
4168 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
4169 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
4173 if (mvm
->mld_api_is_used
) {
4174 if (!iwl_mvm_has_no_host_disable_tx(mvm
))
4175 iwl_mvm_mld_sta_modify_disable_tx(mvm
, mvmsta
, disable
);
4179 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
4180 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
4182 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
4185 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
4186 struct ieee80211_sta
*sta
,
4189 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
4191 if (mvm
->mld_api_is_used
) {
4192 if (!iwl_mvm_has_no_host_disable_tx(mvm
))
4193 iwl_mvm_mld_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
4197 spin_lock_bh(&mvm_sta
->lock
);
4199 if (mvm_sta
->disable_tx
== disable
) {
4200 spin_unlock_bh(&mvm_sta
->lock
);
4204 mvm_sta
->disable_tx
= disable
;
4207 * If sta PS state is handled by mac80211, tell it to start/stop
4208 * queuing tx for this station.
4210 if (!ieee80211_hw_check(mvm
->hw
, AP_LINK_PS
))
4211 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
4213 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
4215 spin_unlock_bh(&mvm_sta
->lock
);
4218 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
4219 struct iwl_mvm_vif
*mvmvif
,
4220 struct iwl_mvm_int_sta
*sta
,
4223 u32 id
= FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
);
4224 struct iwl_mvm_add_sta_cmd cmd
= {
4225 .add_modify
= STA_MODE_MODIFY
,
4226 .sta_id
= sta
->sta_id
,
4227 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
4228 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
4229 .mac_id_n_color
= cpu_to_le32(id
),
4233 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
4234 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
4236 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
4239 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
4240 struct iwl_mvm_vif
*mvmvif
,
4243 struct ieee80211_sta
*sta
;
4244 struct iwl_mvm_sta
*mvm_sta
;
4247 if (mvm
->mld_api_is_used
) {
4248 if (!iwl_mvm_has_no_host_disable_tx(mvm
))
4249 iwl_mvm_mld_modify_all_sta_disable_tx(mvm
, mvmvif
,
4256 /* Block/unblock all the stations of the given mvmvif */
4257 for (i
= 0; i
< mvm
->fw
->ucode_capa
.num_stations
; i
++) {
4258 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[i
]);
4259 if (IS_ERR_OR_NULL(sta
))
4262 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
4263 if (mvm_sta
->mac_id_n_color
!=
4264 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
4267 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
4272 if (!fw_has_api(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_API_STA_TYPE
))
4275 /* Need to block/unblock also multicast station */
4276 if (mvmvif
->deflink
.mcast_sta
.sta_id
!= IWL_INVALID_STA
)
4277 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
4278 &mvmvif
->deflink
.mcast_sta
,
4282 * Only unblock the broadcast station (FW blocks it for immediate
4283 * quiet, not the driver)
4285 if (!disable
&& mvmvif
->deflink
.bcast_sta
.sta_id
!= IWL_INVALID_STA
)
4286 iwl_mvm_int_sta_modify_disable_tx(mvm
, mvmvif
,
4287 &mvmvif
->deflink
.bcast_sta
,
4291 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
4293 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
4294 struct iwl_mvm_sta
*mvmsta
;
4298 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->deflink
.ap_sta_id
);
4301 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);
4306 u16
iwl_mvm_tid_queued(struct iwl_mvm
*mvm
, struct iwl_mvm_tid_data
*tid_data
)
4308 u16 sn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
4311 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4312 * to align the wrap around of ssn so we compare relevant values.
4314 if (mvm
->trans
->trans_cfg
->gen2
)
4317 return ieee80211_sn_sub(sn
, tid_data
->next_reclaimed
);
4320 int iwl_mvm_add_pasn_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
4321 struct iwl_mvm_int_sta
*sta
, u8
*addr
, u32 cipher
,
4322 u8
*key
, u32 key_len
,
4323 struct ieee80211_key_conf
*keyconf
)
4327 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
4328 unsigned int wdg_timeout
=
4329 iwl_mvm_get_wd_timeout(mvm
, vif
);
4330 bool mld
= iwl_mvm_has_mld_api(mvm
->fw
);
4331 u32 type
= IWL_STA_LINK
;
4334 type
= STATION_TYPE_PEER
;
4336 ret
= iwl_mvm_allocate_int_sta(mvm
, sta
, 0,
4337 NL80211_IFTYPE_UNSPECIFIED
, type
);
4342 ret
= iwl_mvm_mld_add_int_sta_with_queue(mvm
, sta
, addr
,
4343 mvmvif
->deflink
.fw_link_id
,
4348 ret
= iwl_mvm_add_int_sta_with_queue(mvm
, mvmvif
->id
,
4349 mvmvif
->color
, addr
, sta
,
4351 IWL_MVM_TX_FIFO_BE
);
4355 keyconf
->cipher
= cipher
;
4356 memcpy(keyconf
->key
, key
, key_len
);
4357 keyconf
->keylen
= key_len
;
4358 keyconf
->flags
= IEEE80211_KEY_FLAG_PAIRWISE
;
4361 /* The MFP flag is set according to the station mfp field. Since
4362 * we don't have a station, set it manually.
4365 iwl_mvm_get_sec_flags(mvm
, vif
, NULL
, keyconf
) |
4366 IWL_SEC_KEY_FLAG_MFP
;
4367 u32 sta_mask
= BIT(sta
->sta_id
);
4369 ret
= iwl_mvm_mld_send_key(mvm
, sta_mask
, key_flags
, keyconf
);
4371 ret
= iwl_mvm_send_sta_key(mvm
, sta
->sta_id
, keyconf
, false,
4372 0, NULL
, 0, 0, true);
4377 iwl_mvm_dealloc_int_sta(mvm
, sta
);
4381 void iwl_mvm_cancel_channel_switch(struct iwl_mvm
*mvm
,
4382 struct ieee80211_vif
*vif
,
4385 struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd
= {
4386 .id
= cpu_to_le32(id
),
4390 ret
= iwl_mvm_send_cmd_pdu(mvm
,
4391 WIDE_ID(MAC_CONF_GROUP
, CANCEL_CHANNEL_SWITCH_CMD
),
4393 sizeof(cancel_channel_switch_cmd
),
4394 &cancel_channel_switch_cmd
);
4396 IWL_ERR(mvm
, "Failed to cancel the channel switch\n");
4399 static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif
*mvmvif
,
4402 struct ieee80211_link_sta
*link_sta
=
4403 rcu_dereference(mvmvif
->mvm
->fw_id_to_link_sta
[fw_sta_id
]);
4404 struct iwl_mvm_vif_link_info
*link
;
4406 if (WARN_ON_ONCE(!link_sta
))
4409 link
= mvmvif
->link
[link_sta
->link_id
];
4411 if (WARN_ON_ONCE(!link
))
4414 return link
->fw_link_id
;
4417 #define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
4419 void iwl_mvm_count_mpdu(struct iwl_mvm_sta
*mvm_sta
, u8 fw_sta_id
, u32 count
,
4422 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(mvm_sta
->vif
);
4423 struct iwl_mvm
*mvm
= mvmvif
->mvm
;
4424 struct iwl_mvm_tpt_counter
*queue_counter
;
4425 struct iwl_mvm_mpdu_counter
*link_counter
;
4426 u32 total_mpdus
= 0;
4429 /* Count only for a BSS sta, and only when EMLSR is possible */
4430 if (!mvm_sta
->mpdu_counters
)
4433 /* Map sta id to link id */
4434 fw_link_id
= iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif
, fw_sta_id
);
4438 queue_counter
= &mvm_sta
->mpdu_counters
[queue
];
4439 link_counter
= &queue_counter
->per_link
[fw_link_id
];
4441 spin_lock_bh(&queue_counter
->lock
);
4444 link_counter
->tx
+= count
;
4446 link_counter
->rx
+= count
;
4449 * When not in EMLSR, the window and the decision to enter EMLSR are
4450 * handled during counting, when in EMLSR - in the statistics flow
4452 if (mvmvif
->esr_active
)
4455 if (time_is_before_jiffies(queue_counter
->window_start
+
4456 IWL_MVM_TPT_COUNT_WINDOW
)) {
4457 memset(queue_counter
->per_link
, 0,
4458 sizeof(queue_counter
->per_link
));
4459 queue_counter
->window_start
= jiffies
;
4461 IWL_DEBUG_INFO(mvm
, "MPDU counters are cleared\n");
4464 for (int i
= 0; i
< IWL_FW_MAX_LINK_ID
; i
++)
4465 total_mpdus
+= tx
? queue_counter
->per_link
[i
].tx
:
4466 queue_counter
->per_link
[i
].rx
;
4468 if (total_mpdus
> IWL_MVM_ENTER_ESR_TPT_THRESH
)
4469 wiphy_work_queue(mvmvif
->mvm
->hw
->wiphy
,
4470 &mvmvif
->unblock_esr_tpt_wk
);
4473 spin_unlock_bh(&queue_counter
->lock
);