1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2014 Intel Mobile Communications GmbH
4 * Copyright (C) 2017 Intel Deutschland GmbH
5 * Copyright (C) 2018-2020 Intel Corporation
7 #include <linux/etherdevice.h>
9 #include "time-event.h"
13 #define TU_TO_US(x) (x * 1024)
14 #define TU_TO_MS(x) (TU_TO_US(x) / 1000)
16 void iwl_mvm_teardown_tdls_peers(struct iwl_mvm
*mvm
)
18 struct ieee80211_sta
*sta
;
19 struct iwl_mvm_sta
*mvmsta
;
22 lockdep_assert_held(&mvm
->mutex
);
24 for (i
= 0; i
< mvm
->fw
->ucode_capa
.num_stations
; i
++) {
25 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
26 lockdep_is_held(&mvm
->mutex
));
27 if (!sta
|| IS_ERR(sta
) || !sta
->tdls
)
30 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
31 ieee80211_tdls_oper_request(mvmsta
->vif
, sta
->addr
,
32 NL80211_TDLS_TEARDOWN
,
33 WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED
,
38 int iwl_mvm_tdls_sta_count(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
40 struct ieee80211_sta
*sta
;
41 struct iwl_mvm_sta
*mvmsta
;
45 lockdep_assert_held(&mvm
->mutex
);
47 for (i
= 0; i
< mvm
->fw
->ucode_capa
.num_stations
; i
++) {
48 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
49 lockdep_is_held(&mvm
->mutex
));
50 if (!sta
|| IS_ERR(sta
) || !sta
->tdls
)
54 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
55 if (mvmsta
->vif
!= vif
)
65 static void iwl_mvm_tdls_config(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
67 struct iwl_rx_packet
*pkt
;
68 struct iwl_tdls_config_res
*resp
;
69 struct iwl_tdls_config_cmd tdls_cfg_cmd
= {};
70 struct iwl_host_cmd cmd
= {
71 .id
= TDLS_CONFIG_CMD
,
72 .flags
= CMD_WANT_SKB
,
73 .data
= { &tdls_cfg_cmd
, },
74 .len
= { sizeof(struct iwl_tdls_config_cmd
), },
76 struct ieee80211_sta
*sta
;
78 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
80 lockdep_assert_held(&mvm
->mutex
);
82 tdls_cfg_cmd
.id_and_color
=
83 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
84 tdls_cfg_cmd
.tx_to_ap_tid
= IWL_MVM_TDLS_FW_TID
;
85 tdls_cfg_cmd
.tx_to_ap_ssn
= cpu_to_le16(0); /* not used for now */
87 /* for now the Tx cmd is empty and unused */
89 /* populate TDLS peer data */
91 for (i
= 0; i
< mvm
->fw
->ucode_capa
.num_stations
; i
++) {
92 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[i
],
93 lockdep_is_held(&mvm
->mutex
));
94 if (IS_ERR_OR_NULL(sta
) || !sta
->tdls
)
97 tdls_cfg_cmd
.sta_info
[cnt
].sta_id
= i
;
98 tdls_cfg_cmd
.sta_info
[cnt
].tx_to_peer_tid
=
100 tdls_cfg_cmd
.sta_info
[cnt
].tx_to_peer_ssn
= cpu_to_le16(0);
101 tdls_cfg_cmd
.sta_info
[cnt
].is_initiator
=
102 cpu_to_le32(sta
->tdls_initiator
? 1 : 0);
107 tdls_cfg_cmd
.tdls_peer_count
= cnt
;
108 IWL_DEBUG_TDLS(mvm
, "send TDLS config to FW for %d peers\n", cnt
);
110 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
111 if (WARN_ON_ONCE(ret
))
116 WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt
) != sizeof(*resp
));
118 /* we don't really care about the response at this point */
123 void iwl_mvm_recalc_tdls_state(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
126 int tdls_sta_cnt
= iwl_mvm_tdls_sta_count(mvm
, vif
);
128 /* when the first peer joins, send a power update first */
129 if (tdls_sta_cnt
== 1 && sta_added
)
130 iwl_mvm_power_update_mac(mvm
);
132 /* Configure the FW with TDLS peer info only if TDLS channel switch
134 * TDLS config data is used currently only in TDLS channel switch code.
135 * Supposed to serve also TDLS buffer station which is not implemneted
137 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
138 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH
))
139 iwl_mvm_tdls_config(mvm
, vif
);
141 /* when the last peer leaves, send a power update last */
142 if (tdls_sta_cnt
== 0 && !sta_added
)
143 iwl_mvm_power_update_mac(mvm
);
146 void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw
*hw
,
147 struct ieee80211_vif
*vif
)
149 struct iwl_mvm
*mvm
= IWL_MAC80211_GET_MVM(hw
);
150 u32 duration
= 2 * vif
->bss_conf
.dtim_period
* vif
->bss_conf
.beacon_int
;
152 /* Protect the session to hear the TDLS setup response on the channel */
153 mutex_lock(&mvm
->mutex
);
154 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
155 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD
))
156 iwl_mvm_schedule_session_protection(mvm
, vif
, duration
,
159 iwl_mvm_protect_session(mvm
, vif
, duration
,
160 duration
, 100, true);
161 mutex_unlock(&mvm
->mutex
);
165 iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state
)
168 case IWL_MVM_TDLS_SW_IDLE
:
170 case IWL_MVM_TDLS_SW_REQ_SENT
:
172 case IWL_MVM_TDLS_SW_RESP_RCVD
:
173 return "RESP RECEIVED";
174 case IWL_MVM_TDLS_SW_REQ_RCVD
:
175 return "REQ RECEIVED";
176 case IWL_MVM_TDLS_SW_ACTIVE
:
183 static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm
*mvm
,
184 enum iwl_mvm_tdls_cs_state state
)
186 if (mvm
->tdls_cs
.state
== state
)
189 IWL_DEBUG_TDLS(mvm
, "TDLS channel switch state: %s -> %s\n",
190 iwl_mvm_tdls_cs_state_str(mvm
->tdls_cs
.state
),
191 iwl_mvm_tdls_cs_state_str(state
));
192 mvm
->tdls_cs
.state
= state
;
194 /* we only send requests to our switching peer - update sent time */
195 if (state
== IWL_MVM_TDLS_SW_REQ_SENT
)
196 mvm
->tdls_cs
.peer
.sent_timestamp
= iwl_mvm_get_systime(mvm
);
198 if (state
== IWL_MVM_TDLS_SW_IDLE
)
199 mvm
->tdls_cs
.cur_sta_id
= IWL_MVM_INVALID_STA
;
202 void iwl_mvm_rx_tdls_notif(struct iwl_mvm
*mvm
, struct iwl_rx_cmd_buffer
*rxb
)
204 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
205 struct iwl_tdls_channel_switch_notif
*notif
= (void *)pkt
->data
;
206 struct ieee80211_sta
*sta
;
208 struct iwl_mvm_sta
*mvmsta
;
209 struct ieee80211_vif
*vif
;
210 u32 sta_id
= le32_to_cpu(notif
->sta_id
);
212 lockdep_assert_held(&mvm
->mutex
);
214 /* can fail sometimes */
215 if (!le32_to_cpu(notif
->status
)) {
216 iwl_mvm_tdls_update_cs_state(mvm
, IWL_MVM_TDLS_SW_IDLE
);
220 if (WARN_ON(sta_id
>= mvm
->fw
->ucode_capa
.num_stations
))
223 sta
= rcu_dereference_protected(mvm
->fw_id_to_mac_id
[sta_id
],
224 lockdep_is_held(&mvm
->mutex
));
225 /* the station may not be here, but if it is, it must be a TDLS peer */
226 if (IS_ERR_OR_NULL(sta
) || WARN_ON(!sta
->tdls
))
229 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
233 * Update state and possibly switch again after this is over (DTIM).
234 * Also convert TU to msec.
236 delay
= TU_TO_MS(vif
->bss_conf
.dtim_period
* vif
->bss_conf
.beacon_int
);
237 mod_delayed_work(system_wq
, &mvm
->tdls_cs
.dwork
,
238 msecs_to_jiffies(delay
));
240 iwl_mvm_tdls_update_cs_state(mvm
, IWL_MVM_TDLS_SW_ACTIVE
);
244 iwl_mvm_tdls_check_action(struct iwl_mvm
*mvm
,
245 enum iwl_tdls_channel_switch_type type
,
246 const u8
*peer
, bool peer_initiator
, u32 timestamp
)
248 bool same_peer
= false;
251 /* get the existing peer if it's there */
252 if (mvm
->tdls_cs
.state
!= IWL_MVM_TDLS_SW_IDLE
&&
253 mvm
->tdls_cs
.cur_sta_id
!= IWL_MVM_INVALID_STA
) {
254 struct ieee80211_sta
*sta
= rcu_dereference_protected(
255 mvm
->fw_id_to_mac_id
[mvm
->tdls_cs
.cur_sta_id
],
256 lockdep_is_held(&mvm
->mutex
));
257 if (!IS_ERR_OR_NULL(sta
))
258 same_peer
= ether_addr_equal(peer
, sta
->addr
);
261 switch (mvm
->tdls_cs
.state
) {
262 case IWL_MVM_TDLS_SW_IDLE
:
264 * might be spurious packet from the peer after the switch is
267 if (type
== TDLS_MOVE_CH
)
270 case IWL_MVM_TDLS_SW_REQ_SENT
:
271 /* only allow requests from the same peer */
274 else if (type
== TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH
&&
277 * We received a ch-switch request while an outgoing
278 * one is pending. Allow it if the peer is the link
282 else if (type
== TDLS_SEND_CHAN_SW_REQ
)
283 /* wait for idle before sending another request */
285 else if (timestamp
<= mvm
->tdls_cs
.peer
.sent_timestamp
)
286 /* we got a stale response - ignore it */
289 case IWL_MVM_TDLS_SW_RESP_RCVD
:
291 * we are waiting for the FW to give an "active" notification,
292 * so ignore requests in the meantime
296 case IWL_MVM_TDLS_SW_REQ_RCVD
:
297 /* as above, allow the link initiator to proceed */
298 if (type
== TDLS_SEND_CHAN_SW_REQ
) {
301 else if (peer_initiator
) /* they are the initiator */
303 } else if (type
== TDLS_MOVE_CH
) {
307 case IWL_MVM_TDLS_SW_ACTIVE
:
309 * the only valid request when active is a request to return
310 * to the base channel by the current off-channel peer
312 if (type
!= TDLS_MOVE_CH
|| !same_peer
)
319 "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
320 type
, mvm
->tdls_cs
.state
, peer
, same_peer
,
327 iwl_mvm_tdls_config_channel_switch(struct iwl_mvm
*mvm
,
328 struct ieee80211_vif
*vif
,
329 enum iwl_tdls_channel_switch_type type
,
330 const u8
*peer
, bool peer_initiator
,
332 struct cfg80211_chan_def
*chandef
,
333 u32 timestamp
, u16 switch_time
,
334 u16 switch_timeout
, struct sk_buff
*skb
,
337 struct ieee80211_sta
*sta
;
338 struct iwl_mvm_sta
*mvmsta
;
339 struct ieee80211_tx_info
*info
;
340 struct ieee80211_hdr
*hdr
;
341 struct iwl_tdls_channel_switch_cmd cmd
= {0};
342 struct iwl_tdls_channel_switch_cmd_tail
*tail
=
343 iwl_mvm_chan_info_cmd_tail(mvm
, &cmd
.ci
);
344 u16 len
= sizeof(cmd
) - iwl_mvm_chan_info_padding(mvm
);
347 lockdep_assert_held(&mvm
->mutex
);
349 ret
= iwl_mvm_tdls_check_action(mvm
, type
, peer
, peer_initiator
,
354 if (!skb
|| WARN_ON(skb
->len
> IWL_TDLS_CH_SW_FRAME_MAX_SIZE
)) {
359 cmd
.switch_type
= type
;
360 tail
->timing
.frame_timestamp
= cpu_to_le32(timestamp
);
361 tail
->timing
.switch_time
= cpu_to_le32(switch_time
);
362 tail
->timing
.switch_timeout
= cpu_to_le32(switch_timeout
);
365 sta
= ieee80211_find_sta(vif
, peer
);
371 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
372 cmd
.peer_sta_id
= cpu_to_le32(mvmsta
->sta_id
);
375 if (mvm
->tdls_cs
.state
== IWL_MVM_TDLS_SW_REQ_SENT
&&
376 mvm
->tdls_cs
.peer
.chandef
.chan
) {
377 /* actually moving to the channel */
378 chandef
= &mvm
->tdls_cs
.peer
.chandef
;
379 } else if (mvm
->tdls_cs
.state
== IWL_MVM_TDLS_SW_ACTIVE
&&
380 type
== TDLS_MOVE_CH
) {
381 /* we need to return to base channel */
382 struct ieee80211_chanctx_conf
*chanctx
=
383 rcu_dereference(vif
->chanctx_conf
);
385 if (WARN_ON_ONCE(!chanctx
)) {
390 chandef
= &chanctx
->def
;
395 iwl_mvm_set_chan_info_chandef(mvm
, &cmd
.ci
, chandef
);
397 /* keep quota calculation simple for now - 50% of DTIM for TDLS */
398 tail
->timing
.max_offchan_duration
=
399 cpu_to_le32(TU_TO_US(vif
->bss_conf
.dtim_period
*
400 vif
->bss_conf
.beacon_int
) / 2);
402 /* Switch time is the first element in the switch-timing IE. */
403 tail
->frame
.switch_time_offset
= cpu_to_le32(ch_sw_tm_ie
+ 2);
405 info
= IEEE80211_SKB_CB(skb
);
406 hdr
= (void *)skb
->data
;
407 if (info
->control
.hw_key
) {
408 if (info
->control
.hw_key
->cipher
!= WLAN_CIPHER_SUITE_CCMP
) {
413 iwl_mvm_set_tx_cmd_ccmp(info
, &tail
->frame
.tx_cmd
);
416 iwl_mvm_set_tx_cmd(mvm
, skb
, &tail
->frame
.tx_cmd
, info
,
419 iwl_mvm_set_tx_cmd_rate(mvm
, &tail
->frame
.tx_cmd
, info
, sta
,
423 memcpy(tail
->frame
.data
, skb
->data
, skb
->len
);
425 ret
= iwl_mvm_send_cmd_pdu(mvm
, TDLS_CHANNEL_SWITCH_CMD
, 0, len
, &cmd
);
427 IWL_ERR(mvm
, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
432 /* channel switch has started, update state */
433 if (type
!= TDLS_MOVE_CH
) {
434 mvm
->tdls_cs
.cur_sta_id
= mvmsta
->sta_id
;
435 iwl_mvm_tdls_update_cs_state(mvm
,
436 type
== TDLS_SEND_CHAN_SW_REQ
?
437 IWL_MVM_TDLS_SW_REQ_SENT
:
438 IWL_MVM_TDLS_SW_REQ_RCVD
);
440 iwl_mvm_tdls_update_cs_state(mvm
, IWL_MVM_TDLS_SW_RESP_RCVD
);
445 /* channel switch failed - we are idle */
447 iwl_mvm_tdls_update_cs_state(mvm
, IWL_MVM_TDLS_SW_IDLE
);
452 void iwl_mvm_tdls_ch_switch_work(struct work_struct
*work
)
455 struct ieee80211_sta
*sta
;
456 struct iwl_mvm_sta
*mvmsta
;
457 struct ieee80211_vif
*vif
;
461 mvm
= container_of(work
, struct iwl_mvm
, tdls_cs
.dwork
.work
);
462 mutex_lock(&mvm
->mutex
);
464 /* called after an active channel switch has finished or timed-out */
465 iwl_mvm_tdls_update_cs_state(mvm
, IWL_MVM_TDLS_SW_IDLE
);
467 /* station might be gone, in that case do nothing */
468 if (mvm
->tdls_cs
.peer
.sta_id
== IWL_MVM_INVALID_STA
)
471 sta
= rcu_dereference_protected(
472 mvm
->fw_id_to_mac_id
[mvm
->tdls_cs
.peer
.sta_id
],
473 lockdep_is_held(&mvm
->mutex
));
474 /* the station may not be here, but if it is, it must be a TDLS peer */
475 if (!sta
|| IS_ERR(sta
) || WARN_ON(!sta
->tdls
))
478 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
480 ret
= iwl_mvm_tdls_config_channel_switch(mvm
, vif
,
481 TDLS_SEND_CHAN_SW_REQ
,
483 mvm
->tdls_cs
.peer
.initiator
,
484 mvm
->tdls_cs
.peer
.op_class
,
485 &mvm
->tdls_cs
.peer
.chandef
,
487 mvm
->tdls_cs
.peer
.skb
,
488 mvm
->tdls_cs
.peer
.ch_sw_tm_ie
);
490 IWL_ERR(mvm
, "Not sending TDLS channel switch: %d\n", ret
);
492 /* retry after a DTIM if we failed sending now */
493 delay
= TU_TO_MS(vif
->bss_conf
.dtim_period
* vif
->bss_conf
.beacon_int
);
494 schedule_delayed_work(&mvm
->tdls_cs
.dwork
, msecs_to_jiffies(delay
));
496 mutex_unlock(&mvm
->mutex
);
500 iwl_mvm_tdls_channel_switch(struct ieee80211_hw
*hw
,
501 struct ieee80211_vif
*vif
,
502 struct ieee80211_sta
*sta
, u8 oper_class
,
503 struct cfg80211_chan_def
*chandef
,
504 struct sk_buff
*tmpl_skb
, u32 ch_sw_tm_ie
)
506 struct iwl_mvm
*mvm
= IWL_MAC80211_GET_MVM(hw
);
507 struct iwl_mvm_sta
*mvmsta
;
511 mutex_lock(&mvm
->mutex
);
513 IWL_DEBUG_TDLS(mvm
, "TDLS channel switch with %pM ch %d width %d\n",
514 sta
->addr
, chandef
->chan
->center_freq
, chandef
->width
);
516 /* we only support a single peer for channel switching */
517 if (mvm
->tdls_cs
.peer
.sta_id
!= IWL_MVM_INVALID_STA
) {
519 "Existing peer. Can't start switch with %pM\n",
525 ret
= iwl_mvm_tdls_config_channel_switch(mvm
, vif
,
526 TDLS_SEND_CHAN_SW_REQ
,
527 sta
->addr
, sta
->tdls_initiator
,
528 oper_class
, chandef
, 0, 0, 0,
529 tmpl_skb
, ch_sw_tm_ie
);
534 * Mark the peer as "in tdls switch" for this vif. We only allow a
535 * single such peer per vif.
537 mvm
->tdls_cs
.peer
.skb
= skb_copy(tmpl_skb
, GFP_KERNEL
);
538 if (!mvm
->tdls_cs
.peer
.skb
) {
543 mvmsta
= iwl_mvm_sta_from_mac80211(sta
);
544 mvm
->tdls_cs
.peer
.sta_id
= mvmsta
->sta_id
;
545 mvm
->tdls_cs
.peer
.chandef
= *chandef
;
546 mvm
->tdls_cs
.peer
.initiator
= sta
->tdls_initiator
;
547 mvm
->tdls_cs
.peer
.op_class
= oper_class
;
548 mvm
->tdls_cs
.peer
.ch_sw_tm_ie
= ch_sw_tm_ie
;
551 * Wait for 2 DTIM periods before attempting the next switch. The next
552 * switch will be made sooner if the current one completes before that.
554 delay
= 2 * TU_TO_MS(vif
->bss_conf
.dtim_period
*
555 vif
->bss_conf
.beacon_int
);
556 mod_delayed_work(system_wq
, &mvm
->tdls_cs
.dwork
,
557 msecs_to_jiffies(delay
));
560 mutex_unlock(&mvm
->mutex
);
564 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw
*hw
,
565 struct ieee80211_vif
*vif
,
566 struct ieee80211_sta
*sta
)
568 struct iwl_mvm
*mvm
= IWL_MAC80211_GET_MVM(hw
);
569 struct ieee80211_sta
*cur_sta
;
570 bool wait_for_phy
= false;
572 mutex_lock(&mvm
->mutex
);
574 IWL_DEBUG_TDLS(mvm
, "TDLS cancel channel switch with %pM\n", sta
->addr
);
576 /* we only support a single peer for channel switching */
577 if (mvm
->tdls_cs
.peer
.sta_id
== IWL_MVM_INVALID_STA
) {
578 IWL_DEBUG_TDLS(mvm
, "No ch switch peer - %pM\n", sta
->addr
);
582 cur_sta
= rcu_dereference_protected(
583 mvm
->fw_id_to_mac_id
[mvm
->tdls_cs
.peer
.sta_id
],
584 lockdep_is_held(&mvm
->mutex
));
585 /* make sure it's the same peer */
590 * If we're currently in a switch because of the now canceled peer,
591 * wait a DTIM here to make sure the phy is back on the base channel.
592 * We can't otherwise force it.
594 if (mvm
->tdls_cs
.cur_sta_id
== mvm
->tdls_cs
.peer
.sta_id
&&
595 mvm
->tdls_cs
.state
!= IWL_MVM_TDLS_SW_IDLE
)
598 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_INVALID_STA
;
599 dev_kfree_skb(mvm
->tdls_cs
.peer
.skb
);
600 mvm
->tdls_cs
.peer
.skb
= NULL
;
603 mutex_unlock(&mvm
->mutex
);
605 /* make sure the phy is on the base channel */
607 msleep(TU_TO_MS(vif
->bss_conf
.dtim_period
*
608 vif
->bss_conf
.beacon_int
));
610 /* flush the channel switch state */
611 flush_delayed_work(&mvm
->tdls_cs
.dwork
);
613 IWL_DEBUG_TDLS(mvm
, "TDLS ending channel switch with %pM\n", sta
->addr
);
617 iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw
*hw
,
618 struct ieee80211_vif
*vif
,
619 struct ieee80211_tdls_ch_sw_params
*params
)
621 struct iwl_mvm
*mvm
= IWL_MAC80211_GET_MVM(hw
);
622 enum iwl_tdls_channel_switch_type type
;
624 const char *action_str
=
625 params
->action_code
== WLAN_TDLS_CHANNEL_SWITCH_REQUEST
?
628 mutex_lock(&mvm
->mutex
);
631 "Received TDLS ch switch action %s from %pM status %d\n",
632 action_str
, params
->sta
->addr
, params
->status
);
635 * we got a non-zero status from a peer we were switching to - move to
636 * the idle state and retry again later
638 if (params
->action_code
== WLAN_TDLS_CHANNEL_SWITCH_RESPONSE
&&
639 params
->status
!= 0 &&
640 mvm
->tdls_cs
.state
== IWL_MVM_TDLS_SW_REQ_SENT
&&
641 mvm
->tdls_cs
.cur_sta_id
!= IWL_MVM_INVALID_STA
) {
642 struct ieee80211_sta
*cur_sta
;
644 /* make sure it's the same peer */
645 cur_sta
= rcu_dereference_protected(
646 mvm
->fw_id_to_mac_id
[mvm
->tdls_cs
.cur_sta_id
],
647 lockdep_is_held(&mvm
->mutex
));
648 if (cur_sta
== params
->sta
) {
649 iwl_mvm_tdls_update_cs_state(mvm
,
650 IWL_MVM_TDLS_SW_IDLE
);
655 type
= (params
->action_code
== WLAN_TDLS_CHANNEL_SWITCH_REQUEST
) ?
656 TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH
: TDLS_MOVE_CH
;
658 iwl_mvm_tdls_config_channel_switch(mvm
, vif
, type
, params
->sta
->addr
,
659 params
->sta
->tdls_initiator
, 0,
660 params
->chandef
, params
->timestamp
,
662 params
->switch_timeout
,
664 params
->ch_sw_tm_ie
);
667 /* register a timeout in case we don't succeed in switching */
668 delay
= vif
->bss_conf
.dtim_period
* vif
->bss_conf
.beacon_int
*
670 mod_delayed_work(system_wq
, &mvm
->tdls_cs
.dwork
,
671 msecs_to_jiffies(delay
));
672 mutex_unlock(&mvm
->mutex
);