1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm
*mvm
)
80 return iwl_mvm_has_new_rx_api(mvm
) ?
81 sizeof(struct iwl_mvm_add_sta_cmd
) :
82 sizeof(struct iwl_mvm_add_sta_cmd_v7
);
85 static int iwl_mvm_find_free_sta_id(struct iwl_mvm
*mvm
,
86 enum nl80211_iftype iftype
)
91 BUILD_BUG_ON(IWL_MVM_STATION_COUNT
> 32);
92 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
));
94 lockdep_assert_held(&mvm
->mutex
);
96 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
97 if (iftype
!= NL80211_IFTYPE_STATION
)
98 reserved_ids
= BIT(0);
100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
101 for (sta_id
= 0; sta_id
< IWL_MVM_STATION_COUNT
; sta_id
++) {
102 if (BIT(sta_id
) & reserved_ids
)
105 if (!rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
106 lockdep_is_held(&mvm
->mutex
)))
109 return IWL_MVM_STATION_COUNT
;
112 /* send station add/update command to firmware */
113 int iwl_mvm_sta_send_to_fw(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
114 bool update
, unsigned int flags
)
116 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
117 struct iwl_mvm_add_sta_cmd add_sta_cmd
= {
118 .sta_id
= mvm_sta
->sta_id
,
119 .mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
),
120 .add_modify
= update
? 1 : 0,
121 .station_flags_msk
= cpu_to_le32(STA_FLG_FAT_EN_MSK
|
122 STA_FLG_MIMO_EN_MSK
),
123 .tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
),
127 u32 agg_size
= 0, mpdu_dens
= 0;
129 if (!update
|| (flags
& STA_MODIFY_QUEUES
)) {
130 add_sta_cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
131 memcpy(&add_sta_cmd
.addr
, sta
->addr
, ETH_ALEN
);
133 if (flags
& STA_MODIFY_QUEUES
)
134 add_sta_cmd
.modify_mask
|= STA_MODIFY_QUEUES
;
137 switch (sta
->bandwidth
) {
138 case IEEE80211_STA_RX_BW_160
:
139 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_160MHZ
);
141 case IEEE80211_STA_RX_BW_80
:
142 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_80MHZ
);
144 case IEEE80211_STA_RX_BW_40
:
145 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_FAT_EN_40MHZ
);
147 case IEEE80211_STA_RX_BW_20
:
148 if (sta
->ht_cap
.ht_supported
)
149 add_sta_cmd
.station_flags
|=
150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ
);
154 switch (sta
->rx_nss
) {
156 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
159 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2
);
162 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3
);
166 switch (sta
->smps_mode
) {
167 case IEEE80211_SMPS_AUTOMATIC
:
168 case IEEE80211_SMPS_NUM_MODES
:
171 case IEEE80211_SMPS_STATIC
:
173 add_sta_cmd
.station_flags
&= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK
);
174 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_MIMO_EN_SISO
);
176 case IEEE80211_SMPS_DYNAMIC
:
177 add_sta_cmd
.station_flags
|= cpu_to_le32(STA_FLG_RTS_MIMO_PROT
);
179 case IEEE80211_SMPS_OFF
:
184 if (sta
->ht_cap
.ht_supported
) {
185 add_sta_cmd
.station_flags_msk
|=
186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK
|
187 STA_FLG_AGG_MPDU_DENS_MSK
);
189 mpdu_dens
= sta
->ht_cap
.ampdu_density
;
192 if (sta
->vht_cap
.vht_supported
) {
193 agg_size
= sta
->vht_cap
.cap
&
194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK
;
196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
;
197 } else if (sta
->ht_cap
.ht_supported
) {
198 agg_size
= sta
->ht_cap
.ampdu_factor
;
201 add_sta_cmd
.station_flags
|=
202 cpu_to_le32(agg_size
<< STA_FLG_MAX_AGG_SIZE_SHIFT
);
203 add_sta_cmd
.station_flags
|=
204 cpu_to_le32(mpdu_dens
<< STA_FLG_AGG_MPDU_DENS_SHIFT
);
206 status
= ADD_STA_SUCCESS
;
207 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
208 iwl_mvm_add_sta_cmd_size(mvm
),
209 &add_sta_cmd
, &status
);
213 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
214 case ADD_STA_SUCCESS
:
215 IWL_DEBUG_ASSOC(mvm
, "ADD_STA PASSED\n");
219 IWL_ERR(mvm
, "ADD_STA failed\n");
226 static void iwl_mvm_rx_agg_session_expired(unsigned long data
)
228 struct iwl_mvm_baid_data __rcu
**rcu_ptr
= (void *)data
;
229 struct iwl_mvm_baid_data
*ba_data
;
230 struct ieee80211_sta
*sta
;
231 struct iwl_mvm_sta
*mvm_sta
;
232 unsigned long timeout
;
236 ba_data
= rcu_dereference(*rcu_ptr
);
238 if (WARN_ON(!ba_data
))
241 if (!ba_data
->timeout
)
244 timeout
= ba_data
->last_rx
+ TU_TO_JIFFIES(ba_data
->timeout
* 2);
245 if (time_is_after_jiffies(timeout
)) {
246 mod_timer(&ba_data
->session_timer
, timeout
);
251 sta
= rcu_dereference(ba_data
->mvm
->fw_id_to_mac_id
[ba_data
->sta_id
]);
252 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
253 ieee80211_stop_rx_ba_session_offl(mvm_sta
->vif
,
254 sta
->addr
, ba_data
->tid
);
259 static int iwl_mvm_tdls_sta_init(struct iwl_mvm
*mvm
,
260 struct ieee80211_sta
*sta
)
262 unsigned long used_hw_queues
;
263 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
264 unsigned int wdg_timeout
=
265 iwl_mvm_get_wd_timeout(mvm
, NULL
, true, false);
268 lockdep_assert_held(&mvm
->mutex
);
270 used_hw_queues
= iwl_mvm_get_used_hw_queues(mvm
, NULL
);
272 /* Find available queues, and allocate them to the ACs */
273 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
274 u8 queue
= find_first_zero_bit(&used_hw_queues
,
275 mvm
->first_agg_queue
);
277 if (queue
>= mvm
->first_agg_queue
) {
278 IWL_ERR(mvm
, "Failed to allocate STA queue\n");
282 __set_bit(queue
, &used_hw_queues
);
283 mvmsta
->hw_queue
[ac
] = queue
;
286 /* Found a place for all queues - enable them */
287 for (ac
= 0; ac
< IEEE80211_NUM_ACS
; ac
++) {
288 iwl_mvm_enable_ac_txq(mvm
, mvmsta
->hw_queue
[ac
],
289 mvmsta
->hw_queue
[ac
],
290 iwl_mvm_ac_to_tx_fifo
[ac
], 0,
292 mvmsta
->tfd_queue_msk
|= BIT(mvmsta
->hw_queue
[ac
]);
298 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm
*mvm
,
299 struct ieee80211_sta
*sta
)
301 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
302 unsigned long sta_msk
;
305 lockdep_assert_held(&mvm
->mutex
);
307 /* disable the TDLS STA-specific queues */
308 sta_msk
= mvmsta
->tfd_queue_msk
;
309 for_each_set_bit(i
, &sta_msk
, sizeof(sta_msk
) * BITS_PER_BYTE
)
310 iwl_mvm_disable_txq(mvm
, i
, i
, IWL_MAX_TID_COUNT
, 0);
313 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm
*mvm
,
314 struct ieee80211_sta
*sta
, u8 ac
, int tid
,
315 struct ieee80211_hdr
*hdr
)
317 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
318 struct iwl_trans_txq_scd_cfg cfg
= {
319 .fifo
= iwl_mvm_ac_to_tx_fifo
[ac
],
320 .sta_id
= mvmsta
->sta_id
,
322 .frame_limit
= IWL_FRAME_LIMIT
,
324 unsigned int wdg_timeout
=
325 iwl_mvm_get_wd_timeout(mvm
, mvmsta
->vif
, false, false);
326 u8 mac_queue
= mvmsta
->vif
->hw_queue
[ac
];
331 lockdep_assert_held(&mvm
->mutex
);
333 spin_lock_bh(&mvm
->queue_info_lock
);
336 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
339 if (!ieee80211_is_data_qos(hdr
->frame_control
) ||
340 ieee80211_is_qos_nullfunc(hdr
->frame_control
)) {
341 queue
= iwl_mvm_find_free_queue(mvm
, IWL_MVM_DQA_MIN_MGMT_QUEUE
,
342 IWL_MVM_DQA_MAX_MGMT_QUEUE
);
343 if (queue
>= IWL_MVM_DQA_MIN_MGMT_QUEUE
)
344 IWL_DEBUG_TX_QUEUES(mvm
, "Found free MGMT queue #%d\n",
347 /* If no such queue is found, we'll use a DATA queue instead */
350 if (queue
< 0 && mvmsta
->reserved_queue
!= IEEE80211_INVAL_HW_QUEUE
) {
351 queue
= mvmsta
->reserved_queue
;
352 IWL_DEBUG_TX_QUEUES(mvm
, "Using reserved queue #%d\n", queue
);
356 queue
= iwl_mvm_find_free_queue(mvm
, IWL_MVM_DQA_MIN_DATA_QUEUE
,
357 IWL_MVM_DQA_MAX_DATA_QUEUE
);
360 * Mark TXQ as ready, even though it hasn't been fully configured yet,
361 * to make sure no one else takes it.
362 * This will allow avoiding re-acquiring the lock at the end of the
363 * configuration. On error we'll mark it back as free.
366 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
368 spin_unlock_bh(&mvm
->queue_info_lock
);
370 /* TODO: support shared queues for same RA */
375 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
376 * but for configuring the SCD to send A-MPDUs we need to mark the queue
378 * Mark all DATA queues as allowing to be aggregated at some point
380 cfg
.aggregate
= (queue
>= IWL_MVM_DQA_MIN_DATA_QUEUE
||
381 queue
== IWL_MVM_DQA_BSS_CLIENT_QUEUE
);
383 IWL_DEBUG_TX_QUEUES(mvm
, "Allocating queue #%d to sta %d on tid %d\n",
384 queue
, mvmsta
->sta_id
, tid
);
386 ssn
= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr
->seq_ctrl
));
387 iwl_mvm_enable_txq(mvm
, queue
, mac_queue
, ssn
, &cfg
,
390 spin_lock_bh(&mvmsta
->lock
);
391 mvmsta
->tid_data
[tid
].txq_id
= queue
;
392 mvmsta
->tfd_queue_msk
|= BIT(queue
);
394 if (mvmsta
->reserved_queue
== queue
)
395 mvmsta
->reserved_queue
= IEEE80211_INVAL_HW_QUEUE
;
396 spin_unlock_bh(&mvmsta
->lock
);
398 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, true, STA_MODIFY_QUEUES
);
405 iwl_mvm_disable_txq(mvm
, queue
, mac_queue
, tid
, 0);
410 static inline u8
iwl_mvm_tid_to_ac_queue(int tid
)
412 if (tid
== IWL_MAX_TID_COUNT
)
413 return IEEE80211_AC_VO
; /* MGMT */
415 return tid_to_mac80211_ac
[tid
];
418 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm
*mvm
,
419 struct ieee80211_sta
*sta
, int tid
)
421 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
422 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
424 struct ieee80211_hdr
*hdr
;
425 struct sk_buff_head deferred_tx
;
427 bool no_queue
= false; /* Marks if there is a problem with the queue */
430 lockdep_assert_held(&mvm
->mutex
);
432 skb
= skb_peek(&tid_data
->deferred_tx_frames
);
435 hdr
= (void *)skb
->data
;
437 ac
= iwl_mvm_tid_to_ac_queue(tid
);
438 mac_queue
= IEEE80211_SKB_CB(skb
)->hw_queue
;
440 if (tid_data
->txq_id
== IEEE80211_INVAL_HW_QUEUE
&&
441 iwl_mvm_sta_alloc_queue(mvm
, sta
, ac
, tid
, hdr
)) {
443 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
444 mvmsta
->sta_id
, tid
);
447 * Mark queue as problematic so later the deferred traffic is
448 * freed, as we can do nothing with it
453 __skb_queue_head_init(&deferred_tx
);
455 /* Disable bottom-halves when entering TX path */
457 spin_lock(&mvmsta
->lock
);
458 skb_queue_splice_init(&tid_data
->deferred_tx_frames
, &deferred_tx
);
459 spin_unlock(&mvmsta
->lock
);
461 while ((skb
= __skb_dequeue(&deferred_tx
)))
462 if (no_queue
|| iwl_mvm_tx_skb(mvm
, skb
, sta
))
463 ieee80211_free_txskb(mvm
->hw
, skb
);
467 iwl_mvm_start_mac_queues(mvm
, BIT(mac_queue
));
470 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct
*wk
)
472 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
,
474 struct ieee80211_sta
*sta
;
475 struct iwl_mvm_sta
*mvmsta
;
476 unsigned long deferred_tid_traffic
;
479 mutex_lock(&mvm
->mutex
);
481 /* Go over all stations with deferred traffic */
482 for_each_set_bit(sta_id
, mvm
->sta_deferred_frames
,
483 IWL_MVM_STATION_COUNT
) {
484 clear_bit(sta_id
, mvm
->sta_deferred_frames
);
485 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
486 lockdep_is_held(&mvm
->mutex
));
487 if (IS_ERR_OR_NULL(sta
))
490 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
491 deferred_tid_traffic
= mvmsta
->deferred_traffic_tid_map
;
493 for_each_set_bit(tid
, &deferred_tid_traffic
,
494 IWL_MAX_TID_COUNT
+ 1)
495 iwl_mvm_tx_deferred_stream(mvm
, sta
, tid
);
498 mutex_unlock(&mvm
->mutex
);
501 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm
*mvm
,
502 struct ieee80211_sta
*sta
,
503 enum nl80211_iftype vif_type
)
505 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
508 spin_lock_bh(&mvm
->queue_info_lock
);
510 /* Make sure we have free resources for this STA */
511 if (vif_type
== NL80211_IFTYPE_STATION
&& !sta
->tdls
&&
512 !mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].hw_queue_refcount
&&
513 (mvm
->queue_info
[IWL_MVM_DQA_BSS_CLIENT_QUEUE
].status
==
515 queue
= IWL_MVM_DQA_BSS_CLIENT_QUEUE
;
517 queue
= iwl_mvm_find_free_queue(mvm
, IWL_MVM_DQA_MIN_DATA_QUEUE
,
518 IWL_MVM_DQA_MAX_DATA_QUEUE
);
520 spin_unlock_bh(&mvm
->queue_info_lock
);
521 IWL_ERR(mvm
, "No available queues for new station\n");
524 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_RESERVED
;
526 spin_unlock_bh(&mvm
->queue_info_lock
);
528 mvmsta
->reserved_queue
= queue
;
530 IWL_DEBUG_TX_QUEUES(mvm
, "Reserving data queue #%d for sta_id %d\n",
531 queue
, mvmsta
->sta_id
);
536 int iwl_mvm_add_sta(struct iwl_mvm
*mvm
,
537 struct ieee80211_vif
*vif
,
538 struct ieee80211_sta
*sta
)
540 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
541 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
542 struct iwl_mvm_rxq_dup_data
*dup_data
;
545 lockdep_assert_held(&mvm
->mutex
);
547 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
548 sta_id
= iwl_mvm_find_free_sta_id(mvm
,
549 ieee80211_vif_type_p2p(vif
));
551 sta_id
= mvm_sta
->sta_id
;
553 if (sta_id
== IWL_MVM_STATION_COUNT
)
556 spin_lock_init(&mvm_sta
->lock
);
558 mvm_sta
->sta_id
= sta_id
;
559 mvm_sta
->mac_id_n_color
= FW_CMD_ID_AND_COLOR(mvmvif
->id
,
562 mvm_sta
->max_agg_bufsize
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
563 mvm_sta
->tx_protection
= 0;
564 mvm_sta
->tt_tx_protection
= false;
566 /* HW restart, don't assume the memory has been zeroed */
567 atomic_set(&mvm
->pending_frames
[sta_id
], 0);
568 mvm_sta
->tid_disable_agg
= 0xffff; /* No aggs at first */
569 mvm_sta
->tfd_queue_msk
= 0;
571 /* allocate new queues for a TDLS station */
573 ret
= iwl_mvm_tdls_sta_init(mvm
, sta
);
576 } else if (!iwl_mvm_is_dqa_supported(mvm
)) {
577 for (i
= 0; i
< IEEE80211_NUM_ACS
; i
++)
578 if (vif
->hw_queue
[i
] != IEEE80211_INVAL_HW_QUEUE
)
579 mvm_sta
->tfd_queue_msk
|= BIT(vif
->hw_queue
[i
]);
582 /* for HW restart - reset everything but the sequence number */
583 for (i
= 0; i
<= IWL_MAX_TID_COUNT
; i
++) {
584 u16 seq
= mvm_sta
->tid_data
[i
].seq_number
;
585 memset(&mvm_sta
->tid_data
[i
], 0, sizeof(mvm_sta
->tid_data
[i
]));
586 mvm_sta
->tid_data
[i
].seq_number
= seq
;
588 if (!iwl_mvm_is_dqa_supported(mvm
))
592 * Mark all queues for this STA as unallocated and defer TX
593 * frames until the queue is allocated
595 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
596 skb_queue_head_init(&mvm_sta
->tid_data
[i
].deferred_tx_frames
);
598 mvm_sta
->deferred_traffic_tid_map
= 0;
599 mvm_sta
->agg_tids
= 0;
601 if (iwl_mvm_has_new_rx_api(mvm
) &&
602 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
603 dup_data
= kcalloc(mvm
->trans
->num_rx_queues
,
608 mvm_sta
->dup_data
= dup_data
;
611 if (iwl_mvm_is_dqa_supported(mvm
)) {
612 ret
= iwl_mvm_reserve_sta_stream(mvm
, sta
,
613 ieee80211_vif_type_p2p(vif
));
618 ret
= iwl_mvm_sta_send_to_fw(mvm
, sta
, false, 0);
622 if (vif
->type
== NL80211_IFTYPE_STATION
) {
624 WARN_ON(mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
);
625 mvmvif
->ap_sta_id
= sta_id
;
627 WARN_ON(mvmvif
->ap_sta_id
== IWL_MVM_STATION_COUNT
);
631 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta_id
], sta
);
636 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
640 int iwl_mvm_update_sta(struct iwl_mvm
*mvm
,
641 struct ieee80211_vif
*vif
,
642 struct ieee80211_sta
*sta
)
644 return iwl_mvm_sta_send_to_fw(mvm
, sta
, true, 0);
647 int iwl_mvm_drain_sta(struct iwl_mvm
*mvm
, struct iwl_mvm_sta
*mvmsta
,
650 struct iwl_mvm_add_sta_cmd cmd
= {};
654 lockdep_assert_held(&mvm
->mutex
);
656 cmd
.mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
);
657 cmd
.sta_id
= mvmsta
->sta_id
;
658 cmd
.add_modify
= STA_MODE_MODIFY
;
659 cmd
.station_flags
= drain
? cpu_to_le32(STA_FLG_DRAIN_FLOW
) : 0;
660 cmd
.station_flags_msk
= cpu_to_le32(STA_FLG_DRAIN_FLOW
);
662 status
= ADD_STA_SUCCESS
;
663 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
664 iwl_mvm_add_sta_cmd_size(mvm
),
669 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
670 case ADD_STA_SUCCESS
:
671 IWL_DEBUG_INFO(mvm
, "Frames for staid %d will drained in fw\n",
676 IWL_ERR(mvm
, "Couldn't drain frames for staid %d\n",
685 * Remove a station from the FW table. Before sending the command to remove
686 * the station validate that the station is indeed known to the driver (sanity
689 static int iwl_mvm_rm_sta_common(struct iwl_mvm
*mvm
, u8 sta_id
)
691 struct ieee80211_sta
*sta
;
692 struct iwl_mvm_rm_sta_cmd rm_sta_cmd
= {
697 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
698 lockdep_is_held(&mvm
->mutex
));
700 /* Note: internal stations are marked as error values */
702 IWL_ERR(mvm
, "Invalid station id\n");
706 ret
= iwl_mvm_send_cmd_pdu(mvm
, REMOVE_STA
, 0,
707 sizeof(rm_sta_cmd
), &rm_sta_cmd
);
709 IWL_ERR(mvm
, "Failed to remove station. Id=%d\n", sta_id
);
716 void iwl_mvm_sta_drained_wk(struct work_struct
*wk
)
718 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
, sta_drained_wk
);
722 * The mutex is needed because of the SYNC cmd, but not only: if the
723 * work would run concurrently with iwl_mvm_rm_sta, it would run before
724 * iwl_mvm_rm_sta sets the station as busy, and exit. Then
725 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
728 mutex_lock(&mvm
->mutex
);
730 for_each_set_bit(sta_id
, mvm
->sta_drained
, IWL_MVM_STATION_COUNT
) {
732 struct ieee80211_sta
*sta
=
733 rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
734 lockdep_is_held(&mvm
->mutex
));
737 * This station is in use or RCU-removed; the latter happens in
738 * managed mode, where mac80211 removes the station before we
739 * can remove it from firmware (we can only do that after the
740 * MAC is marked unassociated), and possibly while the deauth
741 * frame to disconnect from the AP is still queued. Then, the
742 * station pointer is -ENOENT when the last skb is reclaimed.
744 if (!IS_ERR(sta
) || PTR_ERR(sta
) == -ENOENT
)
747 if (PTR_ERR(sta
) == -EINVAL
) {
748 IWL_ERR(mvm
, "Drained sta %d, but it is internal?\n",
754 IWL_ERR(mvm
, "Drained sta %d, but it was NULL?\n",
759 WARN_ON(PTR_ERR(sta
) != -EBUSY
);
760 /* This station was removed and we waited until it got drained,
761 * we can now proceed and remove it.
763 ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
766 "Couldn't remove sta %d after it was drained\n",
770 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
771 clear_bit(sta_id
, mvm
->sta_drained
);
773 if (mvm
->tfd_drained
[sta_id
]) {
774 unsigned long i
, msk
= mvm
->tfd_drained
[sta_id
];
776 for_each_set_bit(i
, &msk
, sizeof(msk
) * BITS_PER_BYTE
)
777 iwl_mvm_disable_txq(mvm
, i
, i
,
778 IWL_MAX_TID_COUNT
, 0);
780 mvm
->tfd_drained
[sta_id
] = 0;
781 IWL_DEBUG_TDLS(mvm
, "Drained sta %d, with queues %ld\n",
786 mutex_unlock(&mvm
->mutex
);
789 static void iwl_mvm_disable_sta_queues(struct iwl_mvm
*mvm
,
790 struct ieee80211_vif
*vif
,
791 struct iwl_mvm_sta
*mvm_sta
)
796 lockdep_assert_held(&mvm
->mutex
);
798 for (i
= 0; i
< ARRAY_SIZE(mvm_sta
->tid_data
); i
++) {
799 if (mvm_sta
->tid_data
[i
].txq_id
== IEEE80211_INVAL_HW_QUEUE
)
802 ac
= iwl_mvm_tid_to_ac_queue(i
);
803 iwl_mvm_disable_txq(mvm
, mvm_sta
->tid_data
[i
].txq_id
,
804 vif
->hw_queue
[ac
], i
, 0);
805 mvm_sta
->tid_data
[i
].txq_id
= IEEE80211_INVAL_HW_QUEUE
;
809 int iwl_mvm_rm_sta(struct iwl_mvm
*mvm
,
810 struct ieee80211_vif
*vif
,
811 struct ieee80211_sta
*sta
)
813 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
814 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
817 lockdep_assert_held(&mvm
->mutex
);
819 if (iwl_mvm_has_new_rx_api(mvm
))
820 kfree(mvm_sta
->dup_data
);
822 if (vif
->type
== NL80211_IFTYPE_STATION
&&
823 mvmvif
->ap_sta_id
== mvm_sta
->sta_id
) {
824 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
827 /* flush its queues here since we are freeing mvm_sta */
828 ret
= iwl_mvm_flush_tx_path(mvm
, mvm_sta
->tfd_queue_msk
, 0);
831 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
832 mvm_sta
->tfd_queue_msk
);
835 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, false);
837 /* If DQA is supported - the queues can be disabled now */
838 if (iwl_mvm_is_dqa_supported(mvm
))
839 iwl_mvm_disable_sta_queues(mvm
, vif
, mvm_sta
);
841 /* if we are associated - we can't remove the AP STA now */
842 if (vif
->bss_conf
.assoc
)
845 /* unassoc - go ahead - remove the AP STA now */
846 mvmvif
->ap_sta_id
= IWL_MVM_STATION_COUNT
;
848 /* clear d0i3_ap_sta_id if no longer relevant */
849 if (mvm
->d0i3_ap_sta_id
== mvm_sta
->sta_id
)
850 mvm
->d0i3_ap_sta_id
= IWL_MVM_STATION_COUNT
;
854 * This shouldn't happen - the TDLS channel switch should be canceled
855 * before the STA is removed.
857 if (WARN_ON_ONCE(mvm
->tdls_cs
.peer
.sta_id
== mvm_sta
->sta_id
)) {
858 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
859 cancel_delayed_work(&mvm
->tdls_cs
.dwork
);
863 * Make sure that the tx response code sees the station as -EBUSY and
864 * calls the drain worker.
866 spin_lock_bh(&mvm_sta
->lock
);
868 * There are frames pending on the AC queues for this station.
869 * We need to wait until all the frames are drained...
871 if (atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
])) {
872 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
],
874 spin_unlock_bh(&mvm_sta
->lock
);
876 /* disable TDLS sta queues on drain complete */
878 mvm
->tfd_drained
[mvm_sta
->sta_id
] =
879 mvm_sta
->tfd_queue_msk
;
880 IWL_DEBUG_TDLS(mvm
, "Draining TDLS sta %d\n",
884 ret
= iwl_mvm_drain_sta(mvm
, mvm_sta
, true);
886 spin_unlock_bh(&mvm_sta
->lock
);
889 iwl_mvm_tdls_sta_deinit(mvm
, sta
);
891 ret
= iwl_mvm_rm_sta_common(mvm
, mvm_sta
->sta_id
);
892 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[mvm_sta
->sta_id
], NULL
);
898 int iwl_mvm_rm_sta_id(struct iwl_mvm
*mvm
,
899 struct ieee80211_vif
*vif
,
902 int ret
= iwl_mvm_rm_sta_common(mvm
, sta_id
);
904 lockdep_assert_held(&mvm
->mutex
);
906 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta_id
], NULL
);
910 int iwl_mvm_allocate_int_sta(struct iwl_mvm
*mvm
,
911 struct iwl_mvm_int_sta
*sta
,
912 u32 qmask
, enum nl80211_iftype iftype
)
914 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
915 sta
->sta_id
= iwl_mvm_find_free_sta_id(mvm
, iftype
);
916 if (WARN_ON_ONCE(sta
->sta_id
== IWL_MVM_STATION_COUNT
))
920 sta
->tfd_queue_msk
= qmask
;
922 /* put a non-NULL value so iterating over the stations won't stop */
923 rcu_assign_pointer(mvm
->fw_id_to_mac_id
[sta
->sta_id
], ERR_PTR(-EINVAL
));
927 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm
*mvm
,
928 struct iwl_mvm_int_sta
*sta
)
930 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[sta
->sta_id
], NULL
);
931 memset(sta
, 0, sizeof(struct iwl_mvm_int_sta
));
932 sta
->sta_id
= IWL_MVM_STATION_COUNT
;
935 static int iwl_mvm_add_int_sta_common(struct iwl_mvm
*mvm
,
936 struct iwl_mvm_int_sta
*sta
,
938 u16 mac_id
, u16 color
)
940 struct iwl_mvm_add_sta_cmd cmd
;
944 lockdep_assert_held(&mvm
->mutex
);
946 memset(&cmd
, 0, sizeof(cmd
));
947 cmd
.sta_id
= sta
->sta_id
;
948 cmd
.mac_id_n_color
= cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id
,
951 cmd
.tfd_queue_msk
= cpu_to_le32(sta
->tfd_queue_msk
);
952 cmd
.tid_disable_tx
= cpu_to_le16(0xffff);
955 memcpy(cmd
.addr
, addr
, ETH_ALEN
);
957 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
958 iwl_mvm_add_sta_cmd_size(mvm
),
963 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
964 case ADD_STA_SUCCESS
:
965 IWL_DEBUG_INFO(mvm
, "Internal station added.\n");
969 IWL_ERR(mvm
, "Add internal station failed, status=0x%x\n",
976 int iwl_mvm_add_aux_sta(struct iwl_mvm
*mvm
)
978 unsigned int wdg_timeout
= iwlmvm_mod_params
.tfd_q_hang_detect
?
979 mvm
->cfg
->base_params
->wd_timeout
:
980 IWL_WATCHDOG_DISABLED
;
983 lockdep_assert_held(&mvm
->mutex
);
985 /* Map Aux queue to fifo - needs to happen before adding Aux station */
986 iwl_mvm_enable_ac_txq(mvm
, mvm
->aux_queue
, mvm
->aux_queue
,
987 IWL_MVM_TX_FIFO_MCAST
, 0, wdg_timeout
);
989 /* Allocate aux station and assign to it the aux queue */
990 ret
= iwl_mvm_allocate_int_sta(mvm
, &mvm
->aux_sta
, BIT(mvm
->aux_queue
),
991 NL80211_IFTYPE_UNSPECIFIED
);
995 ret
= iwl_mvm_add_int_sta_common(mvm
, &mvm
->aux_sta
, NULL
,
999 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1003 int iwl_mvm_add_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1005 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1007 lockdep_assert_held(&mvm
->mutex
);
1008 return iwl_mvm_add_int_sta_common(mvm
, &mvm
->snif_sta
, vif
->addr
,
1012 int iwl_mvm_rm_snif_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1016 lockdep_assert_held(&mvm
->mutex
);
1018 ret
= iwl_mvm_rm_sta_common(mvm
, mvm
->snif_sta
.sta_id
);
1020 IWL_WARN(mvm
, "Failed sending remove station\n");
1025 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm
*mvm
)
1027 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->snif_sta
);
1030 void iwl_mvm_del_aux_sta(struct iwl_mvm
*mvm
)
1032 lockdep_assert_held(&mvm
->mutex
);
1034 iwl_mvm_dealloc_int_sta(mvm
, &mvm
->aux_sta
);
1038 * Send the add station command for the vif's broadcast station.
1039 * Assumes that the station was already allocated.
1041 * @mvm: the mvm component
1042 * @vif: the interface to which the broadcast station is added
1043 * @bsta: the broadcast station to add.
1045 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1047 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1048 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1049 static const u8 _baddr
[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1050 const u8
*baddr
= _baddr
;
1052 lockdep_assert_held(&mvm
->mutex
);
1054 if (iwl_mvm_is_dqa_supported(mvm
)) {
1055 struct iwl_trans_txq_scd_cfg cfg
= {
1056 .fifo
= IWL_MVM_TX_FIFO_VO
,
1057 .sta_id
= mvmvif
->bcast_sta
.sta_id
,
1058 .tid
= IWL_MAX_TID_COUNT
,
1060 .frame_limit
= IWL_FRAME_LIMIT
,
1062 unsigned int wdg_timeout
=
1063 iwl_mvm_get_wd_timeout(mvm
, vif
, false, false);
1066 if ((vif
->type
== NL80211_IFTYPE_AP
) &&
1067 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1068 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
)))
1069 queue
= IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
;
1070 else if ((vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) &&
1071 (mvmvif
->bcast_sta
.tfd_queue_msk
&
1072 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
)))
1073 queue
= IWL_MVM_DQA_P2P_DEVICE_QUEUE
;
1074 else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
1077 iwl_mvm_enable_txq(mvm
, queue
, vif
->hw_queue
[0], 0, &cfg
,
1081 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1082 baddr
= vif
->bss_conf
.bssid
;
1084 if (WARN_ON_ONCE(bsta
->sta_id
== IWL_MVM_STATION_COUNT
))
1087 return iwl_mvm_add_int_sta_common(mvm
, bsta
, baddr
,
1088 mvmvif
->id
, mvmvif
->color
);
1091 /* Send the FW a request to remove the station from it's internal data
1092 * structures, but DO NOT remove the entry from the local data structures. */
1093 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1095 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1098 lockdep_assert_held(&mvm
->mutex
);
1100 ret
= iwl_mvm_rm_sta_common(mvm
, mvmvif
->bcast_sta
.sta_id
);
1102 IWL_WARN(mvm
, "Failed sending remove station\n");
1106 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1108 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1111 lockdep_assert_held(&mvm
->mutex
);
1113 if (!iwl_mvm_is_dqa_supported(mvm
))
1114 qmask
= iwl_mvm_mac_get_queues_mask(vif
);
1116 if (vif
->type
== NL80211_IFTYPE_AP
) {
1118 * The firmware defines the TFD queue mask to only be relevant
1119 * for *unicast* queues, so the multicast (CAB) queue shouldn't
1122 qmask
&= ~BIT(vif
->cab_queue
);
1124 if (iwl_mvm_is_dqa_supported(mvm
))
1125 qmask
|= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE
);
1126 } else if (iwl_mvm_is_dqa_supported(mvm
) &&
1127 vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
1128 qmask
|= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE
);
1131 return iwl_mvm_allocate_int_sta(mvm
, &mvmvif
->bcast_sta
, qmask
,
1132 ieee80211_vif_type_p2p(vif
));
1135 /* Allocate a new station entry for the broadcast station to the given vif,
1136 * and send it to the FW.
1137 * Note that each P2P mac should have its own broadcast station.
1139 * @mvm: the mvm component
1140 * @vif: the interface to which the broadcast station is added
1141 * @bsta: the broadcast station to add. */
1142 int iwl_mvm_add_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1144 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1145 struct iwl_mvm_int_sta
*bsta
= &mvmvif
->bcast_sta
;
1148 lockdep_assert_held(&mvm
->mutex
);
1150 ret
= iwl_mvm_alloc_bcast_sta(mvm
, vif
);
1154 ret
= iwl_mvm_send_add_bcast_sta(mvm
, vif
);
1157 iwl_mvm_dealloc_int_sta(mvm
, bsta
);
1162 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1164 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1166 iwl_mvm_dealloc_int_sta(mvm
, &mvmvif
->bcast_sta
);
1170 * Send the FW a request to remove the station from it's internal data
1171 * structures, and in addition remove it from the local data structure.
1173 int iwl_mvm_rm_bcast_sta(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
1177 lockdep_assert_held(&mvm
->mutex
);
1179 ret
= iwl_mvm_send_rm_bcast_sta(mvm
, vif
);
1181 iwl_mvm_dealloc_bcast_sta(mvm
, vif
);
1186 #define IWL_MAX_RX_BA_SESSIONS 16
1188 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm
*mvm
, u8 baid
)
1190 struct iwl_mvm_delba_notif notif
= {
1191 .metadata
.type
= IWL_MVM_RXQ_NOTIF_DEL_BA
,
1195 iwl_mvm_sync_rx_queues_internal(mvm
, (void *)¬if
, sizeof(notif
));
1198 static void iwl_mvm_free_reorder(struct iwl_mvm
*mvm
,
1199 struct iwl_mvm_baid_data
*data
)
1203 iwl_mvm_sync_rxq_del_ba(mvm
, data
->baid
);
1205 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1207 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1208 &data
->reorder_buf
[i
];
1210 spin_lock_bh(&reorder_buf
->lock
);
1211 if (likely(!reorder_buf
->num_stored
)) {
1212 spin_unlock_bh(&reorder_buf
->lock
);
1217 * This shouldn't happen in regular DELBA since the internal
1218 * delBA notification should trigger a release of all frames in
1219 * the reorder buffer.
1223 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1224 __skb_queue_purge(&reorder_buf
->entries
[j
]);
1226 * Prevent timer re-arm. This prevents a very far fetched case
1227 * where we timed out on the notification. There may be prior
1228 * RX frames pending in the RX queue before the notification
1229 * that might get processed between now and the actual deletion
1230 * and we would re-arm the timer although we are deleting the
1233 reorder_buf
->removed
= true;
1234 spin_unlock_bh(&reorder_buf
->lock
);
1235 del_timer_sync(&reorder_buf
->reorder_timer
);
1239 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm
*mvm
,
1241 struct iwl_mvm_baid_data
*data
,
1242 u16 ssn
, u8 buf_size
)
1246 for (i
= 0; i
< mvm
->trans
->num_rx_queues
; i
++) {
1247 struct iwl_mvm_reorder_buffer
*reorder_buf
=
1248 &data
->reorder_buf
[i
];
1251 reorder_buf
->num_stored
= 0;
1252 reorder_buf
->head_sn
= ssn
;
1253 reorder_buf
->buf_size
= buf_size
;
1254 /* rx reorder timer */
1255 reorder_buf
->reorder_timer
.function
=
1256 iwl_mvm_reorder_timer_expired
;
1257 reorder_buf
->reorder_timer
.data
= (unsigned long)reorder_buf
;
1258 init_timer(&reorder_buf
->reorder_timer
);
1259 spin_lock_init(&reorder_buf
->lock
);
1260 reorder_buf
->mvm
= mvm
;
1261 reorder_buf
->queue
= i
;
1262 reorder_buf
->sta_id
= sta_id
;
1263 for (j
= 0; j
< reorder_buf
->buf_size
; j
++)
1264 __skb_queue_head_init(&reorder_buf
->entries
[j
]);
1268 int iwl_mvm_sta_rx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
1269 int tid
, u16 ssn
, bool start
, u8 buf_size
, u16 timeout
)
1271 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1272 struct iwl_mvm_add_sta_cmd cmd
= {};
1273 struct iwl_mvm_baid_data
*baid_data
= NULL
;
1277 lockdep_assert_held(&mvm
->mutex
);
1279 if (start
&& mvm
->rx_ba_sessions
>= IWL_MAX_RX_BA_SESSIONS
) {
1280 IWL_WARN(mvm
, "Not enough RX BA SESSIONS\n");
1284 if (iwl_mvm_has_new_rx_api(mvm
) && start
) {
1286 * Allocate here so if allocation fails we can bail out early
1287 * before starting the BA session in the firmware
1289 baid_data
= kzalloc(sizeof(*baid_data
) +
1290 mvm
->trans
->num_rx_queues
*
1291 sizeof(baid_data
->reorder_buf
[0]),
1297 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
1298 cmd
.sta_id
= mvm_sta
->sta_id
;
1299 cmd
.add_modify
= STA_MODE_MODIFY
;
1301 cmd
.add_immediate_ba_tid
= (u8
) tid
;
1302 cmd
.add_immediate_ba_ssn
= cpu_to_le16(ssn
);
1303 cmd
.rx_ba_window
= cpu_to_le16((u16
)buf_size
);
1305 cmd
.remove_immediate_ba_tid
= (u8
) tid
;
1307 cmd
.modify_mask
= start
? STA_MODIFY_ADD_BA_TID
:
1308 STA_MODIFY_REMOVE_BA_TID
;
1310 status
= ADD_STA_SUCCESS
;
1311 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1312 iwl_mvm_add_sta_cmd_size(mvm
),
1317 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1318 case ADD_STA_SUCCESS
:
1319 IWL_DEBUG_INFO(mvm
, "RX BA Session %sed in fw\n",
1320 start
? "start" : "stopp");
1322 case ADD_STA_IMMEDIATE_BA_FAILURE
:
1323 IWL_WARN(mvm
, "RX BA Session refused by fw\n");
1328 IWL_ERR(mvm
, "RX BA Session failed %sing, status 0x%x\n",
1329 start
? "start" : "stopp", status
);
1339 mvm
->rx_ba_sessions
++;
1341 if (!iwl_mvm_has_new_rx_api(mvm
))
1344 if (WARN_ON(!(status
& IWL_ADD_STA_BAID_VALID_MASK
))) {
1348 baid
= (u8
)((status
& IWL_ADD_STA_BAID_MASK
) >>
1349 IWL_ADD_STA_BAID_SHIFT
);
1350 baid_data
->baid
= baid
;
1351 baid_data
->timeout
= timeout
;
1352 baid_data
->last_rx
= jiffies
;
1353 init_timer(&baid_data
->session_timer
);
1354 baid_data
->session_timer
.function
=
1355 iwl_mvm_rx_agg_session_expired
;
1356 baid_data
->session_timer
.data
=
1357 (unsigned long)&mvm
->baid_map
[baid
];
1358 baid_data
->mvm
= mvm
;
1359 baid_data
->tid
= tid
;
1360 baid_data
->sta_id
= mvm_sta
->sta_id
;
1362 mvm_sta
->tid_to_baid
[tid
] = baid
;
1364 mod_timer(&baid_data
->session_timer
,
1365 TU_TO_EXP_TIME(timeout
* 2));
1367 iwl_mvm_init_reorder_buffer(mvm
, mvm_sta
->sta_id
,
1368 baid_data
, ssn
, buf_size
);
1370 * protect the BA data with RCU to cover a case where our
1371 * internal RX sync mechanism will timeout (not that it's
1372 * supposed to happen) and we will free the session data while
1373 * RX is being processed in parallel
1375 WARN_ON(rcu_access_pointer(mvm
->baid_map
[baid
]));
1376 rcu_assign_pointer(mvm
->baid_map
[baid
], baid_data
);
1377 } else if (mvm
->rx_ba_sessions
> 0) {
1378 u8 baid
= mvm_sta
->tid_to_baid
[tid
];
1380 /* check that restart flow didn't zero the counter */
1381 mvm
->rx_ba_sessions
--;
1382 if (!iwl_mvm_has_new_rx_api(mvm
))
1385 if (WARN_ON(baid
== IWL_RX_REORDER_DATA_INVALID_BAID
))
1388 baid_data
= rcu_access_pointer(mvm
->baid_map
[baid
]);
1389 if (WARN_ON(!baid_data
))
1392 /* synchronize all rx queues so we can safely delete */
1393 iwl_mvm_free_reorder(mvm
, baid_data
);
1394 del_timer_sync(&baid_data
->session_timer
);
1395 RCU_INIT_POINTER(mvm
->baid_map
[baid
], NULL
);
1396 kfree_rcu(baid_data
, rcu_head
);
1405 static int iwl_mvm_sta_tx_agg(struct iwl_mvm
*mvm
, struct ieee80211_sta
*sta
,
1406 int tid
, u8 queue
, bool start
)
1408 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
1409 struct iwl_mvm_add_sta_cmd cmd
= {};
1413 lockdep_assert_held(&mvm
->mutex
);
1416 mvm_sta
->tfd_queue_msk
|= BIT(queue
);
1417 mvm_sta
->tid_disable_agg
&= ~BIT(tid
);
1419 /* In DQA-mode the queue isn't removed on agg termination */
1420 if (!iwl_mvm_is_dqa_supported(mvm
))
1421 mvm_sta
->tfd_queue_msk
&= ~BIT(queue
);
1422 mvm_sta
->tid_disable_agg
|= BIT(tid
);
1425 cmd
.mac_id_n_color
= cpu_to_le32(mvm_sta
->mac_id_n_color
);
1426 cmd
.sta_id
= mvm_sta
->sta_id
;
1427 cmd
.add_modify
= STA_MODE_MODIFY
;
1428 cmd
.modify_mask
= STA_MODIFY_QUEUES
| STA_MODIFY_TID_DISABLE_TX
;
1429 cmd
.tfd_queue_msk
= cpu_to_le32(mvm_sta
->tfd_queue_msk
);
1430 cmd
.tid_disable_tx
= cpu_to_le16(mvm_sta
->tid_disable_agg
);
1432 status
= ADD_STA_SUCCESS
;
1433 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA
,
1434 iwl_mvm_add_sta_cmd_size(mvm
),
1439 switch (status
& IWL_ADD_STA_STATUS_MASK
) {
1440 case ADD_STA_SUCCESS
:
1444 IWL_ERR(mvm
, "TX BA Session failed %sing, status 0x%x\n",
1445 start
? "start" : "stopp", status
);
1452 const u8 tid_to_mac80211_ac
[] = {
1463 static const u8 tid_to_ucode_ac
[] = {
1474 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1475 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
1477 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1478 struct iwl_mvm_tid_data
*tid_data
;
1482 if (WARN_ON_ONCE(tid
>= IWL_MAX_TID_COUNT
))
1485 if (mvmsta
->tid_data
[tid
].state
!= IWL_AGG_OFF
) {
1486 IWL_ERR(mvm
, "Start AGG when state is not IWL_AGG_OFF %d!\n",
1487 mvmsta
->tid_data
[tid
].state
);
1491 lockdep_assert_held(&mvm
->mutex
);
1493 spin_lock_bh(&mvmsta
->lock
);
1495 /* possible race condition - we entered D0i3 while starting agg */
1496 if (test_bit(IWL_MVM_STATUS_IN_D0I3
, &mvm
->status
)) {
1497 spin_unlock_bh(&mvmsta
->lock
);
1498 IWL_ERR(mvm
, "Entered D0i3 while starting Tx agg\n");
1502 spin_lock_bh(&mvm
->queue_info_lock
);
1505 * Note the possible cases:
1506 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
1507 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
1508 * one and mark it as reserved
1509 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
1510 * non-DQA mode, since the TXQ hasn't yet been allocated
1512 txq_id
= mvmsta
->tid_data
[tid
].txq_id
;
1513 if (!iwl_mvm_is_dqa_supported(mvm
) ||
1514 mvm
->queue_info
[txq_id
].status
!= IWL_MVM_QUEUE_READY
) {
1515 txq_id
= iwl_mvm_find_free_queue(mvm
, mvm
->first_agg_queue
,
1516 mvm
->last_agg_queue
);
1519 spin_unlock_bh(&mvm
->queue_info_lock
);
1520 IWL_ERR(mvm
, "Failed to allocate agg queue\n");
1524 /* TXQ hasn't yet been enabled, so mark it only as reserved */
1525 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_RESERVED
;
1527 spin_unlock_bh(&mvm
->queue_info_lock
);
1529 IWL_DEBUG_TX_QUEUES(mvm
,
1530 "AGG for tid %d will be on queue #%d\n",
1533 tid_data
= &mvmsta
->tid_data
[tid
];
1534 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1535 tid_data
->txq_id
= txq_id
;
1536 *ssn
= tid_data
->ssn
;
1538 IWL_DEBUG_TX_QUEUES(mvm
,
1539 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
1540 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->ssn
,
1541 tid_data
->next_reclaimed
);
1543 if (tid_data
->ssn
== tid_data
->next_reclaimed
) {
1544 tid_data
->state
= IWL_AGG_STARTING
;
1545 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1547 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
1553 spin_unlock_bh(&mvmsta
->lock
);
1558 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1559 struct ieee80211_sta
*sta
, u16 tid
, u8 buf_size
,
1562 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1563 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1564 unsigned int wdg_timeout
=
1565 iwl_mvm_get_wd_timeout(mvm
, vif
, sta
->tdls
, false);
1567 bool alloc_queue
= true;
1570 struct iwl_trans_txq_scd_cfg cfg
= {
1571 .sta_id
= mvmsta
->sta_id
,
1573 .frame_limit
= buf_size
,
1577 BUILD_BUG_ON((sizeof(mvmsta
->agg_tids
) * BITS_PER_BYTE
)
1578 != IWL_MAX_TID_COUNT
);
1580 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
1582 spin_lock_bh(&mvmsta
->lock
);
1583 ssn
= tid_data
->ssn
;
1584 queue
= tid_data
->txq_id
;
1585 tid_data
->state
= IWL_AGG_ON
;
1586 mvmsta
->agg_tids
|= BIT(tid
);
1587 tid_data
->ssn
= 0xffff;
1588 tid_data
->amsdu_in_ampdu_allowed
= amsdu
;
1589 spin_unlock_bh(&mvmsta
->lock
);
1591 cfg
.fifo
= iwl_mvm_ac_to_tx_fifo
[tid_to_mac80211_ac
[tid
]];
1593 /* In DQA mode, the existing queue might need to be reconfigured */
1594 if (iwl_mvm_is_dqa_supported(mvm
)) {
1595 spin_lock_bh(&mvm
->queue_info_lock
);
1596 /* Maybe there is no need to even alloc a queue... */
1597 if (mvm
->queue_info
[queue
].status
== IWL_MVM_QUEUE_READY
)
1598 alloc_queue
= false;
1599 spin_unlock_bh(&mvm
->queue_info_lock
);
1602 * Only reconfig the SCD for the queue if the window size has
1603 * changed from current (become smaller)
1605 if (!alloc_queue
&& buf_size
< mvmsta
->max_agg_bufsize
) {
1607 * If reconfiguring an existing queue, it first must be
1610 ret
= iwl_trans_wait_tx_queue_empty(mvm
->trans
,
1614 "Error draining queue before reconfig\n");
1618 ret
= iwl_mvm_reconfig_scd(mvm
, queue
, cfg
.fifo
,
1619 mvmsta
->sta_id
, tid
,
1623 "Error reconfiguring TXQ #%d\n", queue
);
1630 iwl_mvm_enable_txq(mvm
, queue
,
1631 vif
->hw_queue
[tid_to_mac80211_ac
[tid
]], ssn
,
1634 ret
= iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, queue
, true);
1638 /* No need to mark as reserved */
1639 spin_lock_bh(&mvm
->queue_info_lock
);
1640 mvm
->queue_info
[queue
].status
= IWL_MVM_QUEUE_READY
;
1641 spin_unlock_bh(&mvm
->queue_info_lock
);
1644 * Even though in theory the peer could have different
1645 * aggregation reorder buffer sizes for different sessions,
1646 * our ucode doesn't allow for that and has a global limit
1647 * for each station. Therefore, use the minimum of all the
1648 * aggregation sessions and our default value.
1650 mvmsta
->max_agg_bufsize
=
1651 min(mvmsta
->max_agg_bufsize
, buf_size
);
1652 mvmsta
->lq_sta
.lq
.agg_frame_cnt_limit
= mvmsta
->max_agg_bufsize
;
1654 IWL_DEBUG_HT(mvm
, "Tx aggregation enabled on ra = %pM tid = %d\n",
1657 return iwl_mvm_send_lq_cmd(mvm
, &mvmsta
->lq_sta
.lq
, false);
1660 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1661 struct ieee80211_sta
*sta
, u16 tid
)
1663 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1664 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1670 * If mac80211 is cleaning its state, then say that we finished since
1671 * our state has been cleared anyway.
1673 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1674 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1678 spin_lock_bh(&mvmsta
->lock
);
1680 txq_id
= tid_data
->txq_id
;
1682 IWL_DEBUG_TX_QUEUES(mvm
, "Stop AGG: sta %d tid %d q %d state %d\n",
1683 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
1685 mvmsta
->agg_tids
&= ~BIT(tid
);
1687 spin_lock_bh(&mvm
->queue_info_lock
);
1689 * The TXQ is marked as reserved only if no traffic came through yet
1690 * This means no traffic has been sent on this TID (agg'd or not), so
1691 * we no longer have use for the queue. Since it hasn't even been
1692 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1695 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
1696 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
1697 spin_unlock_bh(&mvm
->queue_info_lock
);
1699 switch (tid_data
->state
) {
1701 tid_data
->ssn
= IEEE80211_SEQ_TO_SN(tid_data
->seq_number
);
1703 IWL_DEBUG_TX_QUEUES(mvm
,
1704 "ssn = %d, next_recl = %d\n",
1705 tid_data
->ssn
, tid_data
->next_reclaimed
);
1707 /* There are still packets for this RA / TID in the HW */
1708 if (tid_data
->ssn
!= tid_data
->next_reclaimed
) {
1709 tid_data
->state
= IWL_EMPTYING_HW_QUEUE_DELBA
;
1714 tid_data
->ssn
= 0xffff;
1715 tid_data
->state
= IWL_AGG_OFF
;
1716 spin_unlock_bh(&mvmsta
->lock
);
1718 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1720 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
1722 if (!iwl_mvm_is_dqa_supported(mvm
)) {
1723 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
1725 iwl_mvm_disable_txq(mvm
, txq_id
, mac_queue
, tid
, 0);
1728 case IWL_AGG_STARTING
:
1729 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1731 * The agg session has been stopped before it was set up. This
1732 * can happen when the AddBA timer times out for example.
1735 /* No barriers since we are under mutex */
1736 lockdep_assert_held(&mvm
->mutex
);
1738 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1739 tid_data
->state
= IWL_AGG_OFF
;
1744 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
1745 mvmsta
->sta_id
, tid
, tid_data
->state
);
1747 "\ttid_data->txq_id = %d\n", tid_data
->txq_id
);
1751 spin_unlock_bh(&mvmsta
->lock
);
1756 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
1757 struct ieee80211_sta
*sta
, u16 tid
)
1759 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
1760 struct iwl_mvm_tid_data
*tid_data
= &mvmsta
->tid_data
[tid
];
1762 enum iwl_mvm_agg_state old_state
;
1765 * First set the agg state to OFF to avoid calling
1766 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
1768 spin_lock_bh(&mvmsta
->lock
);
1769 txq_id
= tid_data
->txq_id
;
1770 IWL_DEBUG_TX_QUEUES(mvm
, "Flush AGG: sta %d tid %d q %d state %d\n",
1771 mvmsta
->sta_id
, tid
, txq_id
, tid_data
->state
);
1772 old_state
= tid_data
->state
;
1773 tid_data
->state
= IWL_AGG_OFF
;
1774 mvmsta
->agg_tids
&= ~BIT(tid
);
1775 spin_unlock_bh(&mvmsta
->lock
);
1777 spin_lock_bh(&mvm
->queue_info_lock
);
1779 * The TXQ is marked as reserved only if no traffic came through yet
1780 * This means no traffic has been sent on this TID (agg'd or not), so
1781 * we no longer have use for the queue. Since it hasn't even been
1782 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
1785 if (mvm
->queue_info
[txq_id
].status
== IWL_MVM_QUEUE_RESERVED
)
1786 mvm
->queue_info
[txq_id
].status
= IWL_MVM_QUEUE_FREE
;
1787 spin_unlock_bh(&mvm
->queue_info_lock
);
1789 if (old_state
>= IWL_AGG_ON
) {
1790 iwl_mvm_drain_sta(mvm
, mvmsta
, true);
1791 if (iwl_mvm_flush_tx_path(mvm
, BIT(txq_id
), 0))
1792 IWL_ERR(mvm
, "Couldn't flush the AGG queue\n");
1793 iwl_trans_wait_tx_queue_empty(mvm
->trans
,
1794 mvmsta
->tfd_queue_msk
);
1795 iwl_mvm_drain_sta(mvm
, mvmsta
, false);
1797 iwl_mvm_sta_tx_agg(mvm
, sta
, tid
, txq_id
, false);
1799 if (!iwl_mvm_is_dqa_supported(mvm
)) {
1800 int mac_queue
= vif
->hw_queue
[tid_to_mac80211_ac
[tid
]];
1802 iwl_mvm_disable_txq(mvm
, tid_data
->txq_id
, mac_queue
,
1810 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm
*mvm
)
1812 int i
, max
= -1, max_offs
= -1;
1814 lockdep_assert_held(&mvm
->mutex
);
1816 /* Pick the unused key offset with the highest 'deleted'
1817 * counter. Every time a key is deleted, all the counters
1818 * are incremented and the one that was just deleted is
1819 * reset to zero. Thus, the highest counter is the one
1820 * that was deleted longest ago. Pick that one.
1822 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
1823 if (test_bit(i
, mvm
->fw_key_table
))
1825 if (mvm
->fw_key_deleted
[i
] > max
) {
1826 max
= mvm
->fw_key_deleted
[i
];
1832 return STA_KEY_IDX_INVALID
;
1837 static struct iwl_mvm_sta
*iwl_mvm_get_key_sta(struct iwl_mvm
*mvm
,
1838 struct ieee80211_vif
*vif
,
1839 struct ieee80211_sta
*sta
)
1841 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1844 return iwl_mvm_sta_from_mac80211(sta
);
1847 * The device expects GTKs for station interfaces to be
1848 * installed as GTKs for the AP station. If we have no
1849 * station ID, then use AP's station ID.
1851 if (vif
->type
== NL80211_IFTYPE_STATION
&&
1852 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
1853 u8 sta_id
= mvmvif
->ap_sta_id
;
1855 sta
= rcu_dereference_check(mvm
->fw_id_to_mac_id
[sta_id
],
1856 lockdep_is_held(&mvm
->mutex
));
1859 * It is possible that the 'sta' parameter is NULL,
1860 * for example when a GTK is removed - the sta_id will then
1861 * be the AP ID, and no station was passed by mac80211.
1863 if (IS_ERR_OR_NULL(sta
))
1866 return iwl_mvm_sta_from_mac80211(sta
);
1872 static int iwl_mvm_send_sta_key(struct iwl_mvm
*mvm
,
1873 struct iwl_mvm_sta
*mvm_sta
,
1874 struct ieee80211_key_conf
*keyconf
, bool mcast
,
1875 u32 tkip_iv32
, u16
*tkip_p1k
, u32 cmd_flags
,
1878 struct iwl_mvm_add_sta_key_cmd cmd
= {};
1884 u8 sta_id
= mvm_sta
->sta_id
;
1886 keyidx
= (keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
1887 STA_KEY_FLG_KEYID_MSK
;
1888 key_flags
= cpu_to_le16(keyidx
);
1889 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP
);
1891 switch (keyconf
->cipher
) {
1892 case WLAN_CIPHER_SUITE_TKIP
:
1893 key_flags
|= cpu_to_le16(STA_KEY_FLG_TKIP
);
1894 cmd
.tkip_rx_tsc_byte2
= tkip_iv32
;
1895 for (i
= 0; i
< 5; i
++)
1896 cmd
.tkip_rx_ttak
[i
] = cpu_to_le16(tkip_p1k
[i
]);
1897 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
1899 case WLAN_CIPHER_SUITE_CCMP
:
1900 key_flags
|= cpu_to_le16(STA_KEY_FLG_CCM
);
1901 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
1903 case WLAN_CIPHER_SUITE_WEP104
:
1904 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES
);
1906 case WLAN_CIPHER_SUITE_WEP40
:
1907 key_flags
|= cpu_to_le16(STA_KEY_FLG_WEP
);
1908 memcpy(cmd
.key
+ 3, keyconf
->key
, keyconf
->keylen
);
1911 key_flags
|= cpu_to_le16(STA_KEY_FLG_EXT
);
1912 memcpy(cmd
.key
, keyconf
->key
, keyconf
->keylen
);
1916 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
1918 cmd
.key_offset
= key_offset
;
1919 cmd
.key_flags
= key_flags
;
1920 cmd
.sta_id
= sta_id
;
1922 status
= ADD_STA_SUCCESS
;
1923 if (cmd_flags
& CMD_ASYNC
)
1924 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA_KEY
, CMD_ASYNC
,
1927 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
1931 case ADD_STA_SUCCESS
:
1932 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: set dynamic key passed\n");
1936 IWL_ERR(mvm
, "MODIFY_STA: set dynamic key failed\n");
1943 static int iwl_mvm_send_sta_igtk(struct iwl_mvm
*mvm
,
1944 struct ieee80211_key_conf
*keyconf
,
1945 u8 sta_id
, bool remove_key
)
1947 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd
= {};
1949 /* verify the key details match the required command's expectations */
1950 if (WARN_ON((keyconf
->cipher
!= WLAN_CIPHER_SUITE_AES_CMAC
) ||
1951 (keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
) ||
1952 (keyconf
->keyidx
!= 4 && keyconf
->keyidx
!= 5)))
1955 igtk_cmd
.key_id
= cpu_to_le32(keyconf
->keyidx
);
1956 igtk_cmd
.sta_id
= cpu_to_le32(sta_id
);
1959 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_NOT_VALID
);
1961 struct ieee80211_key_seq seq
;
1964 switch (keyconf
->cipher
) {
1965 case WLAN_CIPHER_SUITE_AES_CMAC
:
1966 igtk_cmd
.ctrl_flags
|= cpu_to_le32(STA_KEY_FLG_CCM
);
1972 memcpy(igtk_cmd
.IGTK
, keyconf
->key
, keyconf
->keylen
);
1973 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
1974 pn
= seq
.aes_cmac
.pn
;
1975 igtk_cmd
.receive_seq_cnt
= cpu_to_le64(((u64
) pn
[5] << 0) |
1976 ((u64
) pn
[4] << 8) |
1977 ((u64
) pn
[3] << 16) |
1978 ((u64
) pn
[2] << 24) |
1979 ((u64
) pn
[1] << 32) |
1980 ((u64
) pn
[0] << 40));
1983 IWL_DEBUG_INFO(mvm
, "%s igtk for sta %u\n",
1984 remove_key
? "removing" : "installing",
1987 return iwl_mvm_send_cmd_pdu(mvm
, MGMT_MCAST_KEY
, 0,
1988 sizeof(igtk_cmd
), &igtk_cmd
);
1992 static inline u8
*iwl_mvm_get_mac_addr(struct iwl_mvm
*mvm
,
1993 struct ieee80211_vif
*vif
,
1994 struct ieee80211_sta
*sta
)
1996 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2001 if (vif
->type
== NL80211_IFTYPE_STATION
&&
2002 mvmvif
->ap_sta_id
!= IWL_MVM_STATION_COUNT
) {
2003 u8 sta_id
= mvmvif
->ap_sta_id
;
2004 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2005 lockdep_is_held(&mvm
->mutex
));
2013 static int __iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2014 struct ieee80211_vif
*vif
,
2015 struct ieee80211_sta
*sta
,
2016 struct ieee80211_key_conf
*keyconf
,
2020 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2023 struct ieee80211_key_seq seq
;
2026 switch (keyconf
->cipher
) {
2027 case WLAN_CIPHER_SUITE_TKIP
:
2028 addr
= iwl_mvm_get_mac_addr(mvm
, vif
, sta
);
2029 /* get phase 1 key from mac80211 */
2030 ieee80211_get_key_rx_seq(keyconf
, 0, &seq
);
2031 ieee80211_get_tkip_rx_p1k(keyconf
, addr
, seq
.tkip
.iv32
, p1k
);
2032 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2033 seq
.tkip
.iv32
, p1k
, 0, key_offset
);
2035 case WLAN_CIPHER_SUITE_CCMP
:
2036 case WLAN_CIPHER_SUITE_WEP40
:
2037 case WLAN_CIPHER_SUITE_WEP104
:
2038 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2039 0, NULL
, 0, key_offset
);
2042 ret
= iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2043 0, NULL
, 0, key_offset
);
2049 static int __iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
, u8 sta_id
,
2050 struct ieee80211_key_conf
*keyconf
,
2053 struct iwl_mvm_add_sta_key_cmd cmd
= {};
2058 key_flags
= cpu_to_le16((keyconf
->keyidx
<< STA_KEY_FLG_KEYID_POS
) &
2059 STA_KEY_FLG_KEYID_MSK
);
2060 key_flags
|= cpu_to_le16(STA_KEY_FLG_NO_ENC
| STA_KEY_FLG_WEP_KEY_MAP
);
2061 key_flags
|= cpu_to_le16(STA_KEY_NOT_VALID
);
2064 key_flags
|= cpu_to_le16(STA_KEY_MULTICAST
);
2066 cmd
.key_flags
= key_flags
;
2067 cmd
.key_offset
= keyconf
->hw_key_idx
;
2068 cmd
.sta_id
= sta_id
;
2070 status
= ADD_STA_SUCCESS
;
2071 ret
= iwl_mvm_send_cmd_pdu_status(mvm
, ADD_STA_KEY
, sizeof(cmd
),
2075 case ADD_STA_SUCCESS
:
2076 IWL_DEBUG_WEP(mvm
, "MODIFY_STA: remove sta key passed\n");
2080 IWL_ERR(mvm
, "MODIFY_STA: remove sta key failed\n");
2087 int iwl_mvm_set_sta_key(struct iwl_mvm
*mvm
,
2088 struct ieee80211_vif
*vif
,
2089 struct ieee80211_sta
*sta
,
2090 struct ieee80211_key_conf
*keyconf
,
2093 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2094 struct iwl_mvm_sta
*mvm_sta
;
2097 static const u8 __maybe_unused zero_addr
[ETH_ALEN
] = {0};
2099 lockdep_assert_held(&mvm
->mutex
);
2101 /* Get the station id from the mvm local station table */
2102 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2104 IWL_ERR(mvm
, "Failed to find station\n");
2107 sta_id
= mvm_sta
->sta_id
;
2109 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
) {
2110 ret
= iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, false);
2115 * It is possible that the 'sta' parameter is NULL, and thus
2116 * there is a need to retrieve the sta from the local station table.
2119 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
2120 lockdep_is_held(&mvm
->mutex
));
2121 if (IS_ERR_OR_NULL(sta
)) {
2122 IWL_ERR(mvm
, "Invalid station id\n");
2127 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta
)->vif
!= vif
))
2130 /* If the key_offset is not pre-assigned, we need to find a
2131 * new offset to use. In normal cases, the offset is not
2132 * pre-assigned, but during HW_RESTART we want to reuse the
2133 * same indices, so we pass them when this function is called.
2135 * In D3 entry, we need to hardcoded the indices (because the
2136 * firmware hardcodes the PTK offset to 0). In this case, we
2137 * need to make sure we don't overwrite the hw_key_idx in the
2138 * keyconf structure, because otherwise we cannot configure
2139 * the original ones back when resuming.
2141 if (key_offset
== STA_KEY_IDX_INVALID
) {
2142 key_offset
= iwl_mvm_set_fw_key_idx(mvm
);
2143 if (key_offset
== STA_KEY_IDX_INVALID
)
2145 keyconf
->hw_key_idx
= key_offset
;
2148 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
, key_offset
, mcast
);
2153 * For WEP, the same key is used for multicast and unicast. Upload it
2154 * again, using the same key offset, and now pointing the other one
2155 * to the same key slot (offset).
2156 * If this fails, remove the original as well.
2158 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2159 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
) {
2160 ret
= __iwl_mvm_set_sta_key(mvm
, vif
, sta
, keyconf
,
2161 key_offset
, !mcast
);
2163 __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2168 __set_bit(key_offset
, mvm
->fw_key_table
);
2171 IWL_DEBUG_WEP(mvm
, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
2172 keyconf
->cipher
, keyconf
->keylen
, keyconf
->keyidx
,
2173 sta
? sta
->addr
: zero_addr
, ret
);
2177 int iwl_mvm_remove_sta_key(struct iwl_mvm
*mvm
,
2178 struct ieee80211_vif
*vif
,
2179 struct ieee80211_sta
*sta
,
2180 struct ieee80211_key_conf
*keyconf
)
2182 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2183 struct iwl_mvm_sta
*mvm_sta
;
2184 u8 sta_id
= IWL_MVM_STATION_COUNT
;
2187 lockdep_assert_held(&mvm
->mutex
);
2189 /* Get the station from the mvm local station table */
2190 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2192 IWL_DEBUG_WEP(mvm
, "mvm remove dynamic key: idx=%d sta=%d\n",
2193 keyconf
->keyidx
, sta_id
);
2195 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_AES_CMAC
)
2196 return iwl_mvm_send_sta_igtk(mvm
, keyconf
, sta_id
, true);
2198 if (!__test_and_clear_bit(keyconf
->hw_key_idx
, mvm
->fw_key_table
)) {
2199 IWL_ERR(mvm
, "offset %d not used in fw key table.\n",
2200 keyconf
->hw_key_idx
);
2204 /* track which key was deleted last */
2205 for (i
= 0; i
< STA_KEY_MAX_NUM
; i
++) {
2206 if (mvm
->fw_key_deleted
[i
] < U8_MAX
)
2207 mvm
->fw_key_deleted
[i
]++;
2209 mvm
->fw_key_deleted
[keyconf
->hw_key_idx
] = 0;
2212 IWL_DEBUG_WEP(mvm
, "station non-existent, early return.\n");
2216 sta_id
= mvm_sta
->sta_id
;
2218 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, mcast
);
2222 /* delete WEP key twice to get rid of (now useless) offset */
2223 if (keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2224 keyconf
->cipher
== WLAN_CIPHER_SUITE_WEP104
)
2225 ret
= __iwl_mvm_remove_sta_key(mvm
, sta_id
, keyconf
, !mcast
);
2230 void iwl_mvm_update_tkip_key(struct iwl_mvm
*mvm
,
2231 struct ieee80211_vif
*vif
,
2232 struct ieee80211_key_conf
*keyconf
,
2233 struct ieee80211_sta
*sta
, u32 iv32
,
2236 struct iwl_mvm_sta
*mvm_sta
;
2237 bool mcast
= !(keyconf
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
);
2241 mvm_sta
= iwl_mvm_get_key_sta(mvm
, vif
, sta
);
2242 if (WARN_ON_ONCE(!mvm_sta
))
2244 iwl_mvm_send_sta_key(mvm
, mvm_sta
, keyconf
, mcast
,
2245 iv32
, phase1key
, CMD_ASYNC
, keyconf
->hw_key_idx
);
2251 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm
*mvm
,
2252 struct ieee80211_sta
*sta
)
2254 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2255 struct iwl_mvm_add_sta_cmd cmd
= {
2256 .add_modify
= STA_MODE_MODIFY
,
2257 .sta_id
= mvmsta
->sta_id
,
2258 .station_flags_msk
= cpu_to_le32(STA_FLG_PS
),
2259 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2263 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
2264 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2266 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2269 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm
*mvm
,
2270 struct ieee80211_sta
*sta
,
2271 enum ieee80211_frame_release_type reason
,
2272 u16 cnt
, u16 tids
, bool more_data
,
2275 struct iwl_mvm_sta
*mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
2276 struct iwl_mvm_add_sta_cmd cmd
= {
2277 .add_modify
= STA_MODE_MODIFY
,
2278 .sta_id
= mvmsta
->sta_id
,
2279 .modify_mask
= STA_MODIFY_SLEEPING_STA_TX_COUNT
,
2280 .sleep_tx_count
= cpu_to_le16(cnt
),
2281 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2284 unsigned long _tids
= tids
;
2286 /* convert TIDs to ACs - we don't support TSPEC so that's OK
2287 * Note that this field is reserved and unused by firmware not
2288 * supporting GO uAPSD, so it's safe to always do this.
2290 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
)
2291 cmd
.awake_acs
|= BIT(tid_to_ucode_ac
[tid
]);
2293 /* If we're releasing frames from aggregation queues then check if the
2294 * all queues combined that we're releasing frames from have
2295 * - more frames than the service period, in which case more_data
2297 * - fewer than 'cnt' frames, in which case we need to adjust the
2298 * firmware command (but do that unconditionally)
2301 int remaining
= cnt
;
2304 spin_lock_bh(&mvmsta
->lock
);
2305 for_each_set_bit(tid
, &_tids
, IWL_MAX_TID_COUNT
) {
2306 struct iwl_mvm_tid_data
*tid_data
;
2309 tid_data
= &mvmsta
->tid_data
[tid
];
2310 if (WARN(tid_data
->state
!= IWL_AGG_ON
&&
2311 tid_data
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
,
2312 "TID %d state is %d\n",
2313 tid
, tid_data
->state
)) {
2314 spin_unlock_bh(&mvmsta
->lock
);
2315 ieee80211_sta_eosp(sta
);
2319 n_queued
= iwl_mvm_tid_queued(tid_data
);
2320 if (n_queued
> remaining
) {
2325 remaining
-= n_queued
;
2327 sleep_tx_count
= cnt
- remaining
;
2328 if (reason
== IEEE80211_FRAME_RELEASE_UAPSD
)
2329 mvmsta
->sleep_tx_count
= sleep_tx_count
;
2330 spin_unlock_bh(&mvmsta
->lock
);
2332 cmd
.sleep_tx_count
= cpu_to_le16(sleep_tx_count
);
2333 if (WARN_ON(cnt
- remaining
== 0)) {
2334 ieee80211_sta_eosp(sta
);
2339 /* Note: this is ignored by firmware not supporting GO uAPSD */
2341 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_MOREDATA
);
2343 if (reason
== IEEE80211_FRAME_RELEASE_PSPOLL
) {
2344 mvmsta
->next_status_eosp
= true;
2345 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_PS_POLL
);
2347 cmd
.sleep_state_flags
|= cpu_to_le16(STA_SLEEP_STATE_UAPSD
);
2350 /* block the Tx queues until the FW updated the sleep Tx count */
2351 iwl_trans_block_txq_ptrs(mvm
->trans
, true);
2353 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
,
2354 CMD_ASYNC
| CMD_WANT_ASYNC_CALLBACK
,
2355 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2357 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2360 void iwl_mvm_rx_eosp_notif(struct iwl_mvm
*mvm
,
2361 struct iwl_rx_cmd_buffer
*rxb
)
2363 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
2364 struct iwl_mvm_eosp_notification
*notif
= (void *)pkt
->data
;
2365 struct ieee80211_sta
*sta
;
2366 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
2368 if (WARN_ON_ONCE(sta_id
>= IWL_MVM_STATION_COUNT
))
2372 sta
= rcu_dereference(mvm
->fw_id_to_mac_id
[sta_id
]);
2373 if (!IS_ERR_OR_NULL(sta
))
2374 ieee80211_sta_eosp(sta
);
2378 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm
*mvm
,
2379 struct iwl_mvm_sta
*mvmsta
, bool disable
)
2381 struct iwl_mvm_add_sta_cmd cmd
= {
2382 .add_modify
= STA_MODE_MODIFY
,
2383 .sta_id
= mvmsta
->sta_id
,
2384 .station_flags
= disable
? cpu_to_le32(STA_FLG_DISABLE_TX
) : 0,
2385 .station_flags_msk
= cpu_to_le32(STA_FLG_DISABLE_TX
),
2386 .mac_id_n_color
= cpu_to_le32(mvmsta
->mac_id_n_color
),
2390 ret
= iwl_mvm_send_cmd_pdu(mvm
, ADD_STA
, CMD_ASYNC
,
2391 iwl_mvm_add_sta_cmd_size(mvm
), &cmd
);
2393 IWL_ERR(mvm
, "Failed to send ADD_STA command (%d)\n", ret
);
2396 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm
*mvm
,
2397 struct ieee80211_sta
*sta
,
2400 struct iwl_mvm_sta
*mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2402 spin_lock_bh(&mvm_sta
->lock
);
2404 if (mvm_sta
->disable_tx
== disable
) {
2405 spin_unlock_bh(&mvm_sta
->lock
);
2409 mvm_sta
->disable_tx
= disable
;
2412 * Tell mac80211 to start/stop queuing tx for this station,
2413 * but don't stop queuing if there are still pending frames
2416 if (disable
|| !atomic_read(&mvm
->pending_frames
[mvm_sta
->sta_id
]))
2417 ieee80211_sta_block_awake(mvm
->hw
, sta
, disable
);
2419 iwl_mvm_sta_modify_disable_tx(mvm
, mvm_sta
, disable
);
2421 spin_unlock_bh(&mvm_sta
->lock
);
2424 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm
*mvm
,
2425 struct iwl_mvm_vif
*mvmvif
,
2428 struct ieee80211_sta
*sta
;
2429 struct iwl_mvm_sta
*mvm_sta
;
2432 lockdep_assert_held(&mvm
->mutex
);
2434 /* Block/unblock all the stations of the given mvmvif */
2435 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++) {
2436 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
2437 lockdep_is_held(&mvm
->mutex
));
2438 if (IS_ERR_OR_NULL(sta
))
2441 mvm_sta
= iwl_mvm_sta_from_mac80211(sta
);
2442 if (mvm_sta
->mac_id_n_color
!=
2443 FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
))
2446 iwl_mvm_sta_modify_disable_tx_ap(mvm
, sta
, disable
);
2450 void iwl_mvm_csa_client_absent(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
2452 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
2453 struct iwl_mvm_sta
*mvmsta
;
2457 mvmsta
= iwl_mvm_sta_from_staid_rcu(mvm
, mvmvif
->ap_sta_id
);
2459 if (!WARN_ON(!mvmsta
))
2460 iwl_mvm_sta_modify_disable_tx(mvm
, mvmsta
, true);