1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
65 #include <linux/jiffies.h>
66 #include <net/mac80211.h>
68 #include "fw/notif-wait.h"
69 #include "iwl-trans.h"
71 #include "time-event.h"
77 * For the high priority TE use a time event type that has similar priority to
78 * the FW's action scan priority.
80 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
81 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
83 void iwl_mvm_te_clear_data(struct iwl_mvm
*mvm
,
84 struct iwl_mvm_time_event_data
*te_data
)
86 lockdep_assert_held(&mvm
->time_event_lock
);
88 if (!te_data
|| !te_data
->vif
)
91 list_del(&te_data
->list
);
92 te_data
->running
= false;
98 void iwl_mvm_roc_done_wk(struct work_struct
*wk
)
100 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
, roc_done_wk
);
103 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
104 * This will cause the TX path to drop offchannel transmissions.
105 * That would also be done by mac80211, but it is racy, in particular
106 * in the case that the time event actually completed in the firmware
107 * (which is handled in iwl_mvm_te_handle_notif).
109 clear_bit(IWL_MVM_STATUS_ROC_RUNNING
, &mvm
->status
);
110 clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING
, &mvm
->status
);
115 * Flush the offchannel queue -- this is called when the time
116 * event finishes or is canceled, so that frames queued for it
117 * won't get stuck on the queue and be transmitted in the next
119 * We have to send the command asynchronously since this cannot
120 * be under the mutex for locking reasons, but that's not an
121 * issue as it will have to complete before the next command is
122 * executed, and a new time event means a new command.
124 iwl_mvm_flush_sta(mvm
, &mvm
->aux_sta
, true, CMD_ASYNC
);
126 /* Do the same for the P2P device queue (STA) */
127 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P
, &mvm
->status
)) {
128 struct iwl_mvm_vif
*mvmvif
;
131 * NB: access to this pointer would be racy, but the flush bit
132 * can only be set when we had a P2P-Device VIF, and we have a
133 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
137 if (!WARN_ON(!mvm
->p2p_device_vif
)) {
138 mvmvif
= iwl_mvm_vif_from_mac80211(mvm
->p2p_device_vif
);
139 iwl_mvm_flush_sta(mvm
, &mvmvif
->bcast_sta
, true,
145 static void iwl_mvm_roc_finished(struct iwl_mvm
*mvm
)
148 * Of course, our status bit is just as racy as mac80211, so in
149 * addition, fire off the work struct which will drop all frames
150 * from the hardware queues that made it through the race. First
151 * it will of course synchronize the TX path to make sure that
152 * any *new* TX will be rejected.
154 schedule_work(&mvm
->roc_done_wk
);
157 static void iwl_mvm_csa_noa_start(struct iwl_mvm
*mvm
)
159 struct ieee80211_vif
*csa_vif
;
163 csa_vif
= rcu_dereference(mvm
->csa_vif
);
164 if (!csa_vif
|| !csa_vif
->csa_active
)
167 IWL_DEBUG_TE(mvm
, "CSA NOA started\n");
170 * CSA NoA is started but we still have beacons to
171 * transmit on the current channel.
172 * So we just do nothing here and the switch
173 * will be performed on the last TBTT.
175 if (!ieee80211_csa_is_complete(csa_vif
)) {
176 IWL_WARN(mvm
, "CSA NOA started too early\n");
180 ieee80211_csa_finish(csa_vif
);
184 RCU_INIT_POINTER(mvm
->csa_vif
, NULL
);
192 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm
*mvm
,
193 struct ieee80211_vif
*vif
,
196 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
198 if (vif
->type
!= NL80211_IFTYPE_STATION
)
201 if (!mvmvif
->csa_bcn_pending
&& vif
->bss_conf
.assoc
&&
202 vif
->bss_conf
.dtim_period
)
205 IWL_ERR(mvm
, "%s\n", errmsg
);
207 iwl_mvm_connection_loss(mvm
, vif
, errmsg
);
212 iwl_mvm_te_handle_notify_csa(struct iwl_mvm
*mvm
,
213 struct iwl_mvm_time_event_data
*te_data
,
214 struct iwl_time_event_notif
*notif
)
216 struct ieee80211_vif
*vif
= te_data
->vif
;
217 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
220 IWL_DEBUG_TE(mvm
, "CSA time event failed to start\n");
222 switch (te_data
->vif
->type
) {
223 case NL80211_IFTYPE_AP
:
225 mvmvif
->csa_failed
= true;
226 iwl_mvm_csa_noa_start(mvm
);
228 case NL80211_IFTYPE_STATION
:
229 if (!notif
->status
) {
230 iwl_mvm_connection_loss(mvm
, vif
,
231 "CSA TE failed to start");
234 iwl_mvm_csa_client_absent(mvm
, te_data
->vif
);
235 cancel_delayed_work(&mvmvif
->csa_work
);
236 ieee80211_chswitch_done(te_data
->vif
, true);
239 /* should never happen */
244 /* we don't need it anymore */
245 iwl_mvm_te_clear_data(mvm
, te_data
);
248 static void iwl_mvm_te_check_trigger(struct iwl_mvm
*mvm
,
249 struct iwl_time_event_notif
*notif
,
250 struct iwl_mvm_time_event_data
*te_data
)
252 struct iwl_fw_dbg_trigger_tlv
*trig
;
253 struct iwl_fw_dbg_trigger_time_event
*te_trig
;
256 trig
= iwl_fw_dbg_trigger_on(&mvm
->fwrt
,
257 ieee80211_vif_to_wdev(te_data
->vif
),
258 FW_DBG_TRIGGER_TIME_EVENT
);
262 te_trig
= (void *)trig
->data
;
264 for (i
= 0; i
< ARRAY_SIZE(te_trig
->time_events
); i
++) {
265 u32 trig_te_id
= le32_to_cpu(te_trig
->time_events
[i
].id
);
266 u32 trig_action_bitmap
=
267 le32_to_cpu(te_trig
->time_events
[i
].action_bitmap
);
268 u32 trig_status_bitmap
=
269 le32_to_cpu(te_trig
->time_events
[i
].status_bitmap
);
271 if (trig_te_id
!= te_data
->id
||
272 !(trig_action_bitmap
& le32_to_cpu(notif
->action
)) ||
273 !(trig_status_bitmap
& BIT(le32_to_cpu(notif
->status
))))
276 iwl_fw_dbg_collect_trig(&mvm
->fwrt
, trig
,
277 "Time event %d Action 0x%x received status: %d",
279 le32_to_cpu(notif
->action
),
280 le32_to_cpu(notif
->status
));
286 * Handles a FW notification for an event that is known to the driver.
288 * @mvm: the mvm component
289 * @te_data: the time event data
290 * @notif: the notification data corresponding the time event data.
292 static void iwl_mvm_te_handle_notif(struct iwl_mvm
*mvm
,
293 struct iwl_mvm_time_event_data
*te_data
,
294 struct iwl_time_event_notif
*notif
)
296 lockdep_assert_held(&mvm
->time_event_lock
);
298 IWL_DEBUG_TE(mvm
, "Handle time event notif - UID = 0x%x action %d\n",
299 le32_to_cpu(notif
->unique_id
),
300 le32_to_cpu(notif
->action
));
302 iwl_mvm_te_check_trigger(mvm
, notif
, te_data
);
305 * The FW sends the start/end time event notifications even for events
306 * that it fails to schedule. This is indicated in the status field of
307 * the notification. This happens in cases that the scheduler cannot
308 * find a schedule that can handle the event (for example requesting a
309 * P2P Device discoveribility, while there are other higher priority
310 * events in the system).
312 if (!le32_to_cpu(notif
->status
)) {
315 if (notif
->action
& cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START
))
316 msg
= "Time Event start notification failure";
318 msg
= "Time Event end notification failure";
320 IWL_DEBUG_TE(mvm
, "%s\n", msg
);
322 if (iwl_mvm_te_check_disconnect(mvm
, te_data
->vif
, msg
)) {
323 iwl_mvm_te_clear_data(mvm
, te_data
);
328 if (le32_to_cpu(notif
->action
) & TE_V2_NOTIF_HOST_EVENT_END
) {
330 "TE ended - current time %lu, estimated end %lu\n",
331 jiffies
, te_data
->end_jiffies
);
333 switch (te_data
->vif
->type
) {
334 case NL80211_IFTYPE_P2P_DEVICE
:
335 ieee80211_remain_on_channel_expired(mvm
->hw
);
336 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P
, &mvm
->status
);
337 iwl_mvm_roc_finished(mvm
);
339 case NL80211_IFTYPE_STATION
:
341 * By now, we should have finished association
342 * and know the dtim period.
344 iwl_mvm_te_check_disconnect(mvm
, te_data
->vif
,
345 "No beacon heard and the time event is over already...");
351 iwl_mvm_te_clear_data(mvm
, te_data
);
352 } else if (le32_to_cpu(notif
->action
) & TE_V2_NOTIF_HOST_EVENT_START
) {
353 te_data
->running
= true;
354 te_data
->end_jiffies
= TU_TO_EXP_TIME(te_data
->duration
);
356 if (te_data
->vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
357 set_bit(IWL_MVM_STATUS_ROC_RUNNING
, &mvm
->status
);
358 ieee80211_ready_on_channel(mvm
->hw
);
359 } else if (te_data
->id
== TE_CHANNEL_SWITCH_PERIOD
) {
360 iwl_mvm_te_handle_notify_csa(mvm
, te_data
, notif
);
363 IWL_WARN(mvm
, "Got TE with unknown action\n");
368 * Handle A Aux ROC time event
370 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm
*mvm
,
371 struct iwl_time_event_notif
*notif
)
373 struct iwl_mvm_time_event_data
*te_data
, *tmp
;
374 bool aux_roc_te
= false;
376 list_for_each_entry_safe(te_data
, tmp
, &mvm
->aux_roc_te_list
, list
) {
377 if (le32_to_cpu(notif
->unique_id
) == te_data
->uid
) {
382 if (!aux_roc_te
) /* Not a Aux ROC time event */
385 iwl_mvm_te_check_trigger(mvm
, notif
, te_data
);
388 "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
389 le32_to_cpu(notif
->unique_id
),
390 le32_to_cpu(notif
->action
), le32_to_cpu(notif
->status
));
392 if (!le32_to_cpu(notif
->status
) ||
393 le32_to_cpu(notif
->action
) == TE_V2_NOTIF_HOST_EVENT_END
) {
394 /* End TE, notify mac80211 */
395 ieee80211_remain_on_channel_expired(mvm
->hw
);
396 iwl_mvm_roc_finished(mvm
); /* flush aux queue */
397 list_del(&te_data
->list
); /* remove from list */
398 te_data
->running
= false;
401 te_data
->id
= TE_MAX
;
402 } else if (le32_to_cpu(notif
->action
) == TE_V2_NOTIF_HOST_EVENT_START
) {
403 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING
, &mvm
->status
);
404 te_data
->running
= true;
405 ieee80211_ready_on_channel(mvm
->hw
); /* Start TE */
408 "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
409 le32_to_cpu(notif
->action
));
417 * The Rx handler for time event notifications
419 void iwl_mvm_rx_time_event_notif(struct iwl_mvm
*mvm
,
420 struct iwl_rx_cmd_buffer
*rxb
)
422 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
423 struct iwl_time_event_notif
*notif
= (void *)pkt
->data
;
424 struct iwl_mvm_time_event_data
*te_data
, *tmp
;
426 IWL_DEBUG_TE(mvm
, "Time event notification - UID = 0x%x action %d\n",
427 le32_to_cpu(notif
->unique_id
),
428 le32_to_cpu(notif
->action
));
430 spin_lock_bh(&mvm
->time_event_lock
);
431 /* This time event is triggered for Aux ROC request */
432 if (!iwl_mvm_aux_roc_te_handle_notif(mvm
, notif
))
435 list_for_each_entry_safe(te_data
, tmp
, &mvm
->time_event_list
, list
) {
436 if (le32_to_cpu(notif
->unique_id
) == te_data
->uid
)
437 iwl_mvm_te_handle_notif(mvm
, te_data
, notif
);
440 spin_unlock_bh(&mvm
->time_event_lock
);
443 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data
*notif_wait
,
444 struct iwl_rx_packet
*pkt
, void *data
)
446 struct iwl_mvm
*mvm
=
447 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
448 struct iwl_mvm_time_event_data
*te_data
= data
;
449 struct iwl_time_event_notif
*resp
;
450 int resp_len
= iwl_rx_packet_payload_len(pkt
);
452 if (WARN_ON(pkt
->hdr
.cmd
!= TIME_EVENT_NOTIFICATION
))
455 if (WARN_ON_ONCE(resp_len
!= sizeof(*resp
))) {
456 IWL_ERR(mvm
, "Invalid TIME_EVENT_NOTIFICATION response\n");
460 resp
= (void *)pkt
->data
;
462 /* te_data->uid is already set in the TIME_EVENT_CMD response */
463 if (le32_to_cpu(resp
->unique_id
) != te_data
->uid
)
466 IWL_DEBUG_TE(mvm
, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
470 "TIME_EVENT_NOTIFICATION received but not executed\n");
475 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data
*notif_wait
,
476 struct iwl_rx_packet
*pkt
, void *data
)
478 struct iwl_mvm
*mvm
=
479 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
480 struct iwl_mvm_time_event_data
*te_data
= data
;
481 struct iwl_time_event_resp
*resp
;
482 int resp_len
= iwl_rx_packet_payload_len(pkt
);
484 if (WARN_ON(pkt
->hdr
.cmd
!= TIME_EVENT_CMD
))
487 if (WARN_ON_ONCE(resp_len
!= sizeof(*resp
))) {
488 IWL_ERR(mvm
, "Invalid TIME_EVENT_CMD response\n");
492 resp
= (void *)pkt
->data
;
494 /* we should never get a response to another TIME_EVENT_CMD here */
495 if (WARN_ON_ONCE(le32_to_cpu(resp
->id
) != te_data
->id
))
498 te_data
->uid
= le32_to_cpu(resp
->unique_id
);
499 IWL_DEBUG_TE(mvm
, "TIME_EVENT_CMD response - UID = 0x%x\n",
504 static int iwl_mvm_time_event_send_add(struct iwl_mvm
*mvm
,
505 struct ieee80211_vif
*vif
,
506 struct iwl_mvm_time_event_data
*te_data
,
507 struct iwl_time_event_cmd
*te_cmd
)
509 static const u16 time_event_response
[] = { TIME_EVENT_CMD
};
510 struct iwl_notification_wait wait_time_event
;
513 lockdep_assert_held(&mvm
->mutex
);
515 IWL_DEBUG_TE(mvm
, "Add new TE, duration %d TU\n",
516 le32_to_cpu(te_cmd
->duration
));
518 spin_lock_bh(&mvm
->time_event_lock
);
519 if (WARN_ON(te_data
->id
!= TE_MAX
)) {
520 spin_unlock_bh(&mvm
->time_event_lock
);
524 te_data
->duration
= le32_to_cpu(te_cmd
->duration
);
525 te_data
->id
= le32_to_cpu(te_cmd
->id
);
526 list_add_tail(&te_data
->list
, &mvm
->time_event_list
);
527 spin_unlock_bh(&mvm
->time_event_lock
);
530 * Use a notification wait, which really just processes the
531 * command response and doesn't wait for anything, in order
532 * to be able to process the response and get the UID inside
533 * the RX path. Using CMD_WANT_SKB doesn't work because it
534 * stores the buffer and then wakes up this thread, by which
535 * time another notification (that the time event started)
536 * might already be processed unsuccessfully.
538 iwl_init_notification_wait(&mvm
->notif_wait
, &wait_time_event
,
540 ARRAY_SIZE(time_event_response
),
541 iwl_mvm_time_event_response
, te_data
);
543 ret
= iwl_mvm_send_cmd_pdu(mvm
, TIME_EVENT_CMD
, 0,
544 sizeof(*te_cmd
), te_cmd
);
546 IWL_ERR(mvm
, "Couldn't send TIME_EVENT_CMD: %d\n", ret
);
547 iwl_remove_notification(&mvm
->notif_wait
, &wait_time_event
);
551 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
552 ret
= iwl_wait_notification(&mvm
->notif_wait
, &wait_time_event
, 1);
553 /* should never fail */
558 spin_lock_bh(&mvm
->time_event_lock
);
559 iwl_mvm_te_clear_data(mvm
, te_data
);
560 spin_unlock_bh(&mvm
->time_event_lock
);
565 void iwl_mvm_protect_session(struct iwl_mvm
*mvm
,
566 struct ieee80211_vif
*vif
,
567 u32 duration
, u32 min_duration
,
568 u32 max_delay
, bool wait_for_notif
)
570 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
571 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
572 const u16 te_notif_response
[] = { TIME_EVENT_NOTIFICATION
};
573 struct iwl_notification_wait wait_te_notif
;
574 struct iwl_time_event_cmd time_cmd
= {};
576 lockdep_assert_held(&mvm
->mutex
);
578 if (te_data
->running
&&
579 time_after(te_data
->end_jiffies
, TU_TO_EXP_TIME(min_duration
))) {
580 IWL_DEBUG_TE(mvm
, "We have enough time in the current TE: %u\n",
581 jiffies_to_msecs(te_data
->end_jiffies
- jiffies
));
585 if (te_data
->running
) {
586 IWL_DEBUG_TE(mvm
, "extend 0x%x: only %u ms left\n",
588 jiffies_to_msecs(te_data
->end_jiffies
- jiffies
));
590 * we don't have enough time
591 * cancel the current TE and issue a new one
592 * Of course it would be better to remove the old one only
593 * when the new one is added, but we don't care if we are off
594 * channel for a bit. All we need to do, is not to return
595 * before we actually begin to be on the channel.
597 iwl_mvm_stop_session_protection(mvm
, vif
);
600 time_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_ADD
);
601 time_cmd
.id_and_color
=
602 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
603 time_cmd
.id
= cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC
);
605 time_cmd
.apply_time
= cpu_to_le32(0);
607 time_cmd
.max_frags
= TE_V2_FRAG_NONE
;
608 time_cmd
.max_delay
= cpu_to_le32(max_delay
);
609 /* TODO: why do we need to interval = bi if it is not periodic? */
610 time_cmd
.interval
= cpu_to_le32(1);
611 time_cmd
.duration
= cpu_to_le32(duration
);
613 time_cmd
.policy
= cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START
|
614 TE_V2_NOTIF_HOST_EVENT_END
|
615 TE_V2_START_IMMEDIATELY
);
617 if (!wait_for_notif
) {
618 iwl_mvm_time_event_send_add(mvm
, vif
, te_data
, &time_cmd
);
623 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
624 * right after we send the time event
626 iwl_init_notification_wait(&mvm
->notif_wait
, &wait_te_notif
,
628 ARRAY_SIZE(te_notif_response
),
629 iwl_mvm_te_notif
, te_data
);
631 /* If TE was sent OK - wait for the notification that started */
632 if (iwl_mvm_time_event_send_add(mvm
, vif
, te_data
, &time_cmd
)) {
633 IWL_ERR(mvm
, "Failed to add TE to protect session\n");
634 iwl_remove_notification(&mvm
->notif_wait
, &wait_te_notif
);
635 } else if (iwl_wait_notification(&mvm
->notif_wait
, &wait_te_notif
,
636 TU_TO_JIFFIES(max_delay
))) {
637 IWL_ERR(mvm
, "Failed to protect session until TE\n");
641 static bool __iwl_mvm_remove_time_event(struct iwl_mvm
*mvm
,
642 struct iwl_mvm_time_event_data
*te_data
,
648 * It is possible that by the time we got to this point the time
649 * event was already removed.
651 spin_lock_bh(&mvm
->time_event_lock
);
653 /* Save time event uid before clearing its data */
658 * The clear_data function handles time events that were already removed
660 iwl_mvm_te_clear_data(mvm
, te_data
);
661 spin_unlock_bh(&mvm
->time_event_lock
);
664 * It is possible that by the time we try to remove it, the time event
665 * has already ended and removed. In such a case there is no need to
666 * send a removal command.
669 IWL_DEBUG_TE(mvm
, "TE 0x%x has already ended\n", *uid
);
677 * Explicit request to remove a aux roc time event. The removal of a time
678 * event needs to be synchronized with the flow of a time event's end
679 * notification, which also removes the time event from the op mode
682 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm
*mvm
,
683 struct iwl_mvm_vif
*mvmvif
,
684 struct iwl_mvm_time_event_data
*te_data
)
686 struct iwl_hs20_roc_req aux_cmd
= {};
687 u16 len
= sizeof(aux_cmd
) - iwl_mvm_chan_info_padding(mvm
);
692 if (!__iwl_mvm_remove_time_event(mvm
, te_data
, &uid
))
695 aux_cmd
.event_unique_id
= cpu_to_le32(uid
);
696 aux_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_REMOVE
);
697 aux_cmd
.id_and_color
=
698 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
699 IWL_DEBUG_TE(mvm
, "Removing BSS AUX ROC TE 0x%x\n",
700 le32_to_cpu(aux_cmd
.event_unique_id
));
701 ret
= iwl_mvm_send_cmd_pdu(mvm
, HOT_SPOT_CMD
, 0,
709 * Explicit request to remove a time event. The removal of a time event needs to
710 * be synchronized with the flow of a time event's end notification, which also
711 * removes the time event from the op mode data structures.
713 void iwl_mvm_remove_time_event(struct iwl_mvm
*mvm
,
714 struct iwl_mvm_vif
*mvmvif
,
715 struct iwl_mvm_time_event_data
*te_data
)
717 struct iwl_time_event_cmd time_cmd
= {};
721 if (!__iwl_mvm_remove_time_event(mvm
, te_data
, &uid
))
724 /* When we remove a TE, the UID is to be set in the id field */
725 time_cmd
.id
= cpu_to_le32(uid
);
726 time_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_REMOVE
);
727 time_cmd
.id_and_color
=
728 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
730 IWL_DEBUG_TE(mvm
, "Removing TE 0x%x\n", le32_to_cpu(time_cmd
.id
));
731 ret
= iwl_mvm_send_cmd_pdu(mvm
, TIME_EVENT_CMD
, 0,
732 sizeof(time_cmd
), &time_cmd
);
738 * When the firmware supports the session protection API,
739 * this is not needed since it'll automatically remove the
740 * session protection after association + beacon reception.
742 void iwl_mvm_stop_session_protection(struct iwl_mvm
*mvm
,
743 struct ieee80211_vif
*vif
)
745 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
746 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
749 lockdep_assert_held(&mvm
->mutex
);
751 spin_lock_bh(&mvm
->time_event_lock
);
753 spin_unlock_bh(&mvm
->time_event_lock
);
755 if (id
!= TE_BSS_STA_AGGRESSIVE_ASSOC
) {
757 "don't remove TE with id=%u (not session protection)\n",
762 iwl_mvm_remove_time_event(mvm
, mvmvif
, te_data
);
765 void iwl_mvm_rx_session_protect_notif(struct iwl_mvm
*mvm
,
766 struct iwl_rx_cmd_buffer
*rxb
)
768 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
769 struct iwl_mvm_session_prot_notif
*notif
= (void *)pkt
->data
;
770 struct ieee80211_vif
*vif
;
773 vif
= iwl_mvm_rcu_dereference_vif_id(mvm
, le32_to_cpu(notif
->mac_id
),
779 /* The vif is not a P2P_DEVICE, maintain its time_event_data */
780 if (vif
->type
!= NL80211_IFTYPE_P2P_DEVICE
) {
781 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
782 struct iwl_mvm_time_event_data
*te_data
=
783 &mvmvif
->time_event_data
;
785 if (!le32_to_cpu(notif
->status
)) {
786 iwl_mvm_te_check_disconnect(mvm
, vif
,
787 "Session protection failure");
788 spin_lock_bh(&mvm
->time_event_lock
);
789 iwl_mvm_te_clear_data(mvm
, te_data
);
790 spin_unlock_bh(&mvm
->time_event_lock
);
793 if (le32_to_cpu(notif
->start
)) {
794 spin_lock_bh(&mvm
->time_event_lock
);
795 te_data
->running
= le32_to_cpu(notif
->start
);
796 te_data
->end_jiffies
=
797 TU_TO_EXP_TIME(te_data
->duration
);
798 spin_unlock_bh(&mvm
->time_event_lock
);
801 * By now, we should have finished association
802 * and know the dtim period.
804 iwl_mvm_te_check_disconnect(mvm
, vif
,
805 "No beacon heard and the session protection is over already...");
806 spin_lock_bh(&mvm
->time_event_lock
);
807 iwl_mvm_te_clear_data(mvm
, te_data
);
808 spin_unlock_bh(&mvm
->time_event_lock
);
814 if (!le32_to_cpu(notif
->status
) || !le32_to_cpu(notif
->start
)) {
815 /* End TE, notify mac80211 */
816 ieee80211_remain_on_channel_expired(mvm
->hw
);
817 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P
, &mvm
->status
);
818 iwl_mvm_roc_finished(mvm
);
819 } else if (le32_to_cpu(notif
->start
)) {
820 set_bit(IWL_MVM_STATUS_ROC_RUNNING
, &mvm
->status
);
821 ieee80211_ready_on_channel(mvm
->hw
); /* Start TE */
829 iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm
*mvm
,
830 struct ieee80211_vif
*vif
,
832 enum ieee80211_roc_type type
)
834 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
835 struct iwl_mvm_session_prot_cmd cmd
= {
837 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
,
839 .action
= cpu_to_le32(FW_CTXT_ACTION_ADD
),
840 .duration_tu
= cpu_to_le32(MSEC_TO_TU(duration
)),
843 lockdep_assert_held(&mvm
->mutex
);
846 case IEEE80211_ROC_TYPE_NORMAL
:
848 cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV
);
850 case IEEE80211_ROC_TYPE_MGMT_TX
:
852 cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION
);
855 WARN_ONCE(1, "Got an invalid ROC type\n");
859 return iwl_mvm_send_cmd_pdu(mvm
, iwl_cmd_id(SESSION_PROTECTION_CMD
,
861 0, sizeof(cmd
), &cmd
);
864 int iwl_mvm_start_p2p_roc(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
865 int duration
, enum ieee80211_roc_type type
)
867 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
868 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
869 struct iwl_time_event_cmd time_cmd
= {};
871 lockdep_assert_held(&mvm
->mutex
);
872 if (te_data
->running
) {
873 IWL_WARN(mvm
, "P2P_DEVICE remain on channel already running\n");
877 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
878 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD
))
879 return iwl_mvm_start_p2p_roc_session_protection(mvm
, vif
,
883 time_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_ADD
);
884 time_cmd
.id_and_color
=
885 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
888 case IEEE80211_ROC_TYPE_NORMAL
:
889 time_cmd
.id
= cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL
);
891 case IEEE80211_ROC_TYPE_MGMT_TX
:
892 time_cmd
.id
= cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX
);
895 WARN_ONCE(1, "Got an invalid ROC type\n");
899 time_cmd
.apply_time
= cpu_to_le32(0);
900 time_cmd
.interval
= cpu_to_le32(1);
903 * The P2P Device TEs can have lower priority than other events
904 * that are being scheduled by the driver/fw, and thus it might not be
905 * scheduled. To improve the chances of it being scheduled, allow them
906 * to be fragmented, and in addition allow them to be delayed.
908 time_cmd
.max_frags
= min(MSEC_TO_TU(duration
)/50, TE_V2_FRAG_ENDLESS
);
909 time_cmd
.max_delay
= cpu_to_le32(MSEC_TO_TU(duration
/2));
910 time_cmd
.duration
= cpu_to_le32(MSEC_TO_TU(duration
));
912 time_cmd
.policy
= cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START
|
913 TE_V2_NOTIF_HOST_EVENT_END
|
914 TE_V2_START_IMMEDIATELY
);
916 return iwl_mvm_time_event_send_add(mvm
, vif
, te_data
, &time_cmd
);
919 static struct iwl_mvm_time_event_data
*iwl_mvm_get_roc_te(struct iwl_mvm
*mvm
)
921 struct iwl_mvm_time_event_data
*te_data
;
923 lockdep_assert_held(&mvm
->mutex
);
925 spin_lock_bh(&mvm
->time_event_lock
);
928 * Iterate over the list of time events and find the time event that is
929 * associated with a P2P_DEVICE interface.
930 * This assumes that a P2P_DEVICE interface can have only a single time
931 * event at any given time and this time event coresponds to a ROC
934 list_for_each_entry(te_data
, &mvm
->time_event_list
, list
) {
935 if (te_data
->vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
939 /* There can only be at most one AUX ROC time event, we just use the
940 * list to simplify/unify code. Remove it if it exists.
942 te_data
= list_first_entry_or_null(&mvm
->aux_roc_te_list
,
943 struct iwl_mvm_time_event_data
,
946 spin_unlock_bh(&mvm
->time_event_lock
);
950 void iwl_mvm_cleanup_roc_te(struct iwl_mvm
*mvm
)
952 struct iwl_mvm_time_event_data
*te_data
;
955 te_data
= iwl_mvm_get_roc_te(mvm
);
957 __iwl_mvm_remove_time_event(mvm
, te_data
, &uid
);
960 static void iwl_mvm_cancel_session_protection(struct iwl_mvm
*mvm
,
961 struct iwl_mvm_vif
*mvmvif
)
963 struct iwl_mvm_session_prot_cmd cmd
= {
965 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
,
967 .action
= cpu_to_le32(FW_CTXT_ACTION_REMOVE
),
971 ret
= iwl_mvm_send_cmd_pdu(mvm
, iwl_cmd_id(SESSION_PROTECTION_CMD
,
973 0, sizeof(cmd
), &cmd
);
976 "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret
);
979 void iwl_mvm_stop_roc(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
)
981 struct iwl_mvm_vif
*mvmvif
;
982 struct iwl_mvm_time_event_data
*te_data
;
984 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
985 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD
)) {
986 mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
988 iwl_mvm_cancel_session_protection(mvm
, mvmvif
);
990 if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
)
991 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P
, &mvm
->status
);
993 iwl_mvm_roc_finished(mvm
);
998 te_data
= iwl_mvm_get_roc_te(mvm
);
1000 IWL_WARN(mvm
, "No remain on channel event\n");
1004 mvmvif
= iwl_mvm_vif_from_mac80211(te_data
->vif
);
1006 if (te_data
->vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
1007 iwl_mvm_remove_time_event(mvm
, mvmvif
, te_data
);
1008 set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P
, &mvm
->status
);
1010 iwl_mvm_remove_aux_roc_te(mvm
, mvmvif
, te_data
);
1013 iwl_mvm_roc_finished(mvm
);
1016 int iwl_mvm_schedule_csa_period(struct iwl_mvm
*mvm
,
1017 struct ieee80211_vif
*vif
,
1018 u32 duration
, u32 apply_time
)
1020 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1021 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
1022 struct iwl_time_event_cmd time_cmd
= {};
1024 lockdep_assert_held(&mvm
->mutex
);
1026 if (te_data
->running
) {
1029 spin_lock_bh(&mvm
->time_event_lock
);
1031 spin_unlock_bh(&mvm
->time_event_lock
);
1033 if (id
== TE_CHANNEL_SWITCH_PERIOD
) {
1034 IWL_DEBUG_TE(mvm
, "CS period is already scheduled\n");
1039 * Remove the session protection time event to allow the
1040 * channel switch. If we got here, we just heard a beacon so
1041 * the session protection is not needed anymore anyway.
1043 iwl_mvm_remove_time_event(mvm
, mvmvif
, te_data
);
1046 time_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_ADD
);
1047 time_cmd
.id_and_color
=
1048 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
1049 time_cmd
.id
= cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD
);
1050 time_cmd
.apply_time
= cpu_to_le32(apply_time
);
1051 time_cmd
.max_frags
= TE_V2_FRAG_NONE
;
1052 time_cmd
.duration
= cpu_to_le32(duration
);
1053 time_cmd
.repeat
= 1;
1054 time_cmd
.interval
= cpu_to_le32(1);
1055 time_cmd
.policy
= cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START
|
1058 time_cmd
.policy
|= cpu_to_le16(TE_V2_START_IMMEDIATELY
);
1060 return iwl_mvm_time_event_send_add(mvm
, vif
, te_data
, &time_cmd
);
1063 static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data
*notif_wait
,
1064 struct iwl_rx_packet
*pkt
, void *data
)
1066 struct iwl_mvm
*mvm
=
1067 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
1068 struct iwl_mvm_session_prot_notif
*resp
;
1069 int resp_len
= iwl_rx_packet_payload_len(pkt
);
1071 if (WARN_ON(pkt
->hdr
.cmd
!= SESSION_PROTECTION_NOTIF
||
1072 pkt
->hdr
.group_id
!= MAC_CONF_GROUP
))
1075 if (WARN_ON_ONCE(resp_len
!= sizeof(*resp
))) {
1076 IWL_ERR(mvm
, "Invalid SESSION_PROTECTION_NOTIF response\n");
1080 resp
= (void *)pkt
->data
;
1084 "TIME_EVENT_NOTIFICATION received but not executed\n");
1089 void iwl_mvm_schedule_session_protection(struct iwl_mvm
*mvm
,
1090 struct ieee80211_vif
*vif
,
1091 u32 duration
, u32 min_duration
,
1092 bool wait_for_notif
)
1094 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
1095 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
1096 const u16 notif
[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF
,
1097 MAC_CONF_GROUP
, 0) };
1098 struct iwl_notification_wait wait_notif
;
1099 struct iwl_mvm_session_prot_cmd cmd
= {
1101 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
,
1103 .action
= cpu_to_le32(FW_CTXT_ACTION_ADD
),
1104 .conf_id
= cpu_to_le32(SESSION_PROTECT_CONF_ASSOC
),
1105 .duration_tu
= cpu_to_le32(MSEC_TO_TU(duration
)),
1108 lockdep_assert_held(&mvm
->mutex
);
1110 spin_lock_bh(&mvm
->time_event_lock
);
1111 if (te_data
->running
&&
1112 time_after(te_data
->end_jiffies
, TU_TO_EXP_TIME(min_duration
))) {
1113 IWL_DEBUG_TE(mvm
, "We have enough time in the current TE: %u\n",
1114 jiffies_to_msecs(te_data
->end_jiffies
- jiffies
));
1115 spin_unlock_bh(&mvm
->time_event_lock
);
1120 iwl_mvm_te_clear_data(mvm
, te_data
);
1121 te_data
->duration
= le32_to_cpu(cmd
.duration_tu
);
1122 spin_unlock_bh(&mvm
->time_event_lock
);
1124 IWL_DEBUG_TE(mvm
, "Add new session protection, duration %d TU\n",
1125 le32_to_cpu(cmd
.duration_tu
));
1127 if (!wait_for_notif
) {
1128 if (iwl_mvm_send_cmd_pdu(mvm
,
1129 iwl_cmd_id(SESSION_PROTECTION_CMD
,
1131 0, sizeof(cmd
), &cmd
)) {
1133 "Couldn't send the SESSION_PROTECTION_CMD\n");
1134 spin_lock_bh(&mvm
->time_event_lock
);
1135 iwl_mvm_te_clear_data(mvm
, te_data
);
1136 spin_unlock_bh(&mvm
->time_event_lock
);
1142 iwl_init_notification_wait(&mvm
->notif_wait
, &wait_notif
,
1143 notif
, ARRAY_SIZE(notif
),
1144 iwl_mvm_session_prot_notif
, NULL
);
1146 if (iwl_mvm_send_cmd_pdu(mvm
,
1147 iwl_cmd_id(SESSION_PROTECTION_CMD
,
1149 0, sizeof(cmd
), &cmd
)) {
1151 "Couldn't send the SESSION_PROTECTION_CMD\n");
1152 iwl_remove_notification(&mvm
->notif_wait
, &wait_notif
);
1153 } else if (iwl_wait_notification(&mvm
->notif_wait
, &wait_notif
,
1154 TU_TO_JIFFIES(100))) {
1156 "Failed to protect session until session protection\n");