1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
64 #include <linux/jiffies.h>
65 #include <net/mac80211.h>
67 #include "iwl-notif-wait.h"
68 #include "iwl-trans.h"
70 #include "time-event.h"
75 /* A TimeUnit is 1024 microsecond */
76 #define MSEC_TO_TU(_msec) (_msec*1000/1024)
79 * For the high priority TE use a time event type that has similar priority to
80 * the FW's action scan priority.
82 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
83 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
85 void iwl_mvm_te_clear_data(struct iwl_mvm
*mvm
,
86 struct iwl_mvm_time_event_data
*te_data
)
88 lockdep_assert_held(&mvm
->time_event_lock
);
90 if (te_data
->id
== TE_MAX
)
93 list_del(&te_data
->list
);
94 te_data
->running
= false;
100 void iwl_mvm_roc_done_wk(struct work_struct
*wk
)
102 struct iwl_mvm
*mvm
= container_of(wk
, struct iwl_mvm
, roc_done_wk
);
107 * Flush the offchannel queue -- this is called when the time
108 * event finishes or is cancelled, so that frames queued for it
109 * won't get stuck on the queue and be transmitted in the next
111 * We have to send the command asynchronously since this cannot
112 * be under the mutex for locking reasons, but that's not an
113 * issue as it will have to complete before the next command is
114 * executed, and a new time event means a new command.
116 iwl_mvm_flush_tx_path(mvm
, BIT(IWL_MVM_OFFCHANNEL_QUEUE
), false);
119 static void iwl_mvm_roc_finished(struct iwl_mvm
*mvm
)
122 * First, clear the ROC_RUNNING status bit. This will cause the TX
123 * path to drop offchannel transmissions. That would also be done
124 * by mac80211, but it is racy, in particular in the case that the
125 * time event actually completed in the firmware (which is handled
126 * in iwl_mvm_te_handle_notif).
128 clear_bit(IWL_MVM_STATUS_ROC_RUNNING
, &mvm
->status
);
131 * Of course, our status bit is just as racy as mac80211, so in
132 * addition, fire off the work struct which will drop all frames
133 * from the hardware queues that made it through the race. First
134 * it will of course synchronize the TX path to make sure that
135 * any *new* TX will be rejected.
137 schedule_work(&mvm
->roc_done_wk
);
140 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm
*mvm
,
141 struct ieee80211_vif
*vif
,
144 if (vif
->type
!= NL80211_IFTYPE_STATION
)
146 if (vif
->bss_conf
.assoc
&& vif
->bss_conf
.dtim_period
)
149 IWL_ERR(mvm
, "%s\n", errmsg
);
150 ieee80211_connection_loss(vif
);
155 * Handles a FW notification for an event that is known to the driver.
157 * @mvm: the mvm component
158 * @te_data: the time event data
159 * @notif: the notification data corresponding the time event data.
161 static void iwl_mvm_te_handle_notif(struct iwl_mvm
*mvm
,
162 struct iwl_mvm_time_event_data
*te_data
,
163 struct iwl_time_event_notif
*notif
)
165 lockdep_assert_held(&mvm
->time_event_lock
);
167 IWL_DEBUG_TE(mvm
, "Handle time event notif - UID = 0x%x action %d\n",
168 le32_to_cpu(notif
->unique_id
),
169 le32_to_cpu(notif
->action
));
172 * The FW sends the start/end time event notifications even for events
173 * that it fails to schedule. This is indicated in the status field of
174 * the notification. This happens in cases that the scheduler cannot
175 * find a schedule that can handle the event (for example requesting a
176 * P2P Device discoveribility, while there are other higher priority
177 * events in the system).
179 if (!le32_to_cpu(notif
->status
)) {
180 bool start
= le32_to_cpu(notif
->action
) &
181 TE_V2_NOTIF_HOST_EVENT_START
;
182 IWL_WARN(mvm
, "Time Event %s notification failure\n",
183 start
? "start" : "end");
184 if (iwl_mvm_te_check_disconnect(mvm
, te_data
->vif
, NULL
)) {
185 iwl_mvm_te_clear_data(mvm
, te_data
);
190 if (le32_to_cpu(notif
->action
) & TE_V2_NOTIF_HOST_EVENT_END
) {
192 "TE ended - current time %lu, estimated end %lu\n",
193 jiffies
, te_data
->end_jiffies
);
195 if (te_data
->vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
196 ieee80211_remain_on_channel_expired(mvm
->hw
);
197 iwl_mvm_roc_finished(mvm
);
201 * By now, we should have finished association
202 * and know the dtim period.
204 iwl_mvm_te_check_disconnect(mvm
, te_data
->vif
,
205 "No association and the time event is over already...");
206 iwl_mvm_te_clear_data(mvm
, te_data
);
207 } else if (le32_to_cpu(notif
->action
) & TE_V2_NOTIF_HOST_EVENT_START
) {
208 te_data
->running
= true;
209 te_data
->end_jiffies
= TU_TO_EXP_TIME(te_data
->duration
);
211 if (te_data
->vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
212 set_bit(IWL_MVM_STATUS_ROC_RUNNING
, &mvm
->status
);
213 ieee80211_ready_on_channel(mvm
->hw
);
216 IWL_WARN(mvm
, "Got TE with unknown action\n");
221 * The Rx handler for time event notifications
223 int iwl_mvm_rx_time_event_notif(struct iwl_mvm
*mvm
,
224 struct iwl_rx_cmd_buffer
*rxb
,
225 struct iwl_device_cmd
*cmd
)
227 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
228 struct iwl_time_event_notif
*notif
= (void *)pkt
->data
;
229 struct iwl_mvm_time_event_data
*te_data
, *tmp
;
231 IWL_DEBUG_TE(mvm
, "Time event notification - UID = 0x%x action %d\n",
232 le32_to_cpu(notif
->unique_id
),
233 le32_to_cpu(notif
->action
));
235 spin_lock_bh(&mvm
->time_event_lock
);
236 list_for_each_entry_safe(te_data
, tmp
, &mvm
->time_event_list
, list
) {
237 if (le32_to_cpu(notif
->unique_id
) == te_data
->uid
)
238 iwl_mvm_te_handle_notif(mvm
, te_data
, notif
);
240 spin_unlock_bh(&mvm
->time_event_lock
);
245 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data
*notif_wait
,
246 struct iwl_rx_packet
*pkt
, void *data
)
248 struct iwl_mvm
*mvm
=
249 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
250 struct iwl_mvm_time_event_data
*te_data
= data
;
251 struct iwl_time_event_resp
*resp
;
252 int resp_len
= iwl_rx_packet_payload_len(pkt
);
254 if (WARN_ON(pkt
->hdr
.cmd
!= TIME_EVENT_CMD
))
257 if (WARN_ON_ONCE(resp_len
!= sizeof(*resp
))) {
258 IWL_ERR(mvm
, "Invalid TIME_EVENT_CMD response\n");
262 resp
= (void *)pkt
->data
;
264 /* we should never get a response to another TIME_EVENT_CMD here */
265 if (WARN_ON_ONCE(le32_to_cpu(resp
->id
) != te_data
->id
))
268 te_data
->uid
= le32_to_cpu(resp
->unique_id
);
269 IWL_DEBUG_TE(mvm
, "TIME_EVENT_CMD response - UID = 0x%x\n",
274 /* used to convert from time event API v2 to v1 */
275 #define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
276 TE_V2_EVENT_SOCIOPATHIC)
277 static inline u16
te_v2_get_notify(__le16 policy
)
279 return le16_to_cpu(policy
) & TE_V2_NOTIF_MSK
;
282 static inline u16
te_v2_get_dep_policy(__le16 policy
)
284 return (le16_to_cpu(policy
) & TE_V2_DEP_POLICY_MSK
) >>
288 static inline u16
te_v2_get_absence(__le16 policy
)
290 return (le16_to_cpu(policy
) & TE_V2_ABSENCE
) >> TE_V2_ABSENCE_POS
;
293 static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2
*cmd_v2
,
294 struct iwl_time_event_cmd_v1
*cmd_v1
)
296 cmd_v1
->id_and_color
= cmd_v2
->id_and_color
;
297 cmd_v1
->action
= cmd_v2
->action
;
298 cmd_v1
->id
= cmd_v2
->id
;
299 cmd_v1
->apply_time
= cmd_v2
->apply_time
;
300 cmd_v1
->max_delay
= cmd_v2
->max_delay
;
301 cmd_v1
->depends_on
= cmd_v2
->depends_on
;
302 cmd_v1
->interval
= cmd_v2
->interval
;
303 cmd_v1
->duration
= cmd_v2
->duration
;
304 if (cmd_v2
->repeat
== TE_V2_REPEAT_ENDLESS
)
305 cmd_v1
->repeat
= cpu_to_le32(TE_V1_REPEAT_ENDLESS
);
307 cmd_v1
->repeat
= cpu_to_le32(cmd_v2
->repeat
);
308 cmd_v1
->max_frags
= cpu_to_le32(cmd_v2
->max_frags
);
309 cmd_v1
->interval_reciprocal
= 0; /* unused */
311 cmd_v1
->dep_policy
= cpu_to_le32(te_v2_get_dep_policy(cmd_v2
->policy
));
312 cmd_v1
->is_present
= cpu_to_le32(!te_v2_get_absence(cmd_v2
->policy
));
313 cmd_v1
->notify
= cpu_to_le32(te_v2_get_notify(cmd_v2
->policy
));
316 static int iwl_mvm_send_time_event_cmd(struct iwl_mvm
*mvm
,
317 const struct iwl_time_event_cmd_v2
*cmd
)
319 struct iwl_time_event_cmd_v1 cmd_v1
;
321 if (mvm
->fw
->ucode_capa
.flags
& IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2
)
322 return iwl_mvm_send_cmd_pdu(mvm
, TIME_EVENT_CMD
, CMD_SYNC
,
325 iwl_mvm_te_v2_to_v1(cmd
, &cmd_v1
);
326 return iwl_mvm_send_cmd_pdu(mvm
, TIME_EVENT_CMD
, CMD_SYNC
,
327 sizeof(cmd_v1
), &cmd_v1
);
331 static int iwl_mvm_time_event_send_add(struct iwl_mvm
*mvm
,
332 struct ieee80211_vif
*vif
,
333 struct iwl_mvm_time_event_data
*te_data
,
334 struct iwl_time_event_cmd_v2
*te_cmd
)
336 static const u8 time_event_response
[] = { TIME_EVENT_CMD
};
337 struct iwl_notification_wait wait_time_event
;
340 lockdep_assert_held(&mvm
->mutex
);
342 IWL_DEBUG_TE(mvm
, "Add new TE, duration %d TU\n",
343 le32_to_cpu(te_cmd
->duration
));
345 spin_lock_bh(&mvm
->time_event_lock
);
346 if (WARN_ON(te_data
->id
!= TE_MAX
)) {
347 spin_unlock_bh(&mvm
->time_event_lock
);
351 te_data
->duration
= le32_to_cpu(te_cmd
->duration
);
352 te_data
->id
= le32_to_cpu(te_cmd
->id
);
353 list_add_tail(&te_data
->list
, &mvm
->time_event_list
);
354 spin_unlock_bh(&mvm
->time_event_lock
);
357 * Use a notification wait, which really just processes the
358 * command response and doesn't wait for anything, in order
359 * to be able to process the response and get the UID inside
360 * the RX path. Using CMD_WANT_SKB doesn't work because it
361 * stores the buffer and then wakes up this thread, by which
362 * time another notification (that the time event started)
363 * might already be processed unsuccessfully.
365 iwl_init_notification_wait(&mvm
->notif_wait
, &wait_time_event
,
367 ARRAY_SIZE(time_event_response
),
368 iwl_mvm_time_event_response
, te_data
);
370 ret
= iwl_mvm_send_time_event_cmd(mvm
, te_cmd
);
372 IWL_ERR(mvm
, "Couldn't send TIME_EVENT_CMD: %d\n", ret
);
373 iwl_remove_notification(&mvm
->notif_wait
, &wait_time_event
);
377 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
378 ret
= iwl_wait_notification(&mvm
->notif_wait
, &wait_time_event
, 1);
379 /* should never fail */
384 spin_lock_bh(&mvm
->time_event_lock
);
385 iwl_mvm_te_clear_data(mvm
, te_data
);
386 spin_unlock_bh(&mvm
->time_event_lock
);
391 void iwl_mvm_protect_session(struct iwl_mvm
*mvm
,
392 struct ieee80211_vif
*vif
,
393 u32 duration
, u32 min_duration
,
396 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
397 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
398 struct iwl_time_event_cmd_v2 time_cmd
= {};
400 lockdep_assert_held(&mvm
->mutex
);
402 if (te_data
->running
&&
403 time_after(te_data
->end_jiffies
, TU_TO_EXP_TIME(min_duration
))) {
404 IWL_DEBUG_TE(mvm
, "We have enough time in the current TE: %u\n",
405 jiffies_to_msecs(te_data
->end_jiffies
- jiffies
));
409 if (te_data
->running
) {
410 IWL_DEBUG_TE(mvm
, "extend 0x%x: only %u ms left\n",
412 jiffies_to_msecs(te_data
->end_jiffies
- jiffies
));
414 * we don't have enough time
415 * cancel the current TE and issue a new one
416 * Of course it would be better to remove the old one only
417 * when the new one is added, but we don't care if we are off
418 * channel for a bit. All we need to do, is not to return
419 * before we actually begin to be on the channel.
421 iwl_mvm_stop_session_protection(mvm
, vif
);
424 time_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_ADD
);
425 time_cmd
.id_and_color
=
426 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
427 time_cmd
.id
= cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC
);
429 time_cmd
.apply_time
=
430 cpu_to_le32(iwl_read_prph(mvm
->trans
, DEVICE_SYSTEM_TIME_REG
));
432 time_cmd
.max_frags
= TE_V2_FRAG_NONE
;
433 time_cmd
.max_delay
= cpu_to_le32(max_delay
);
434 /* TODO: why do we need to interval = bi if it is not periodic? */
435 time_cmd
.interval
= cpu_to_le32(1);
436 time_cmd
.duration
= cpu_to_le32(duration
);
438 time_cmd
.policy
= cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START
|
439 TE_V2_NOTIF_HOST_EVENT_END
);
441 iwl_mvm_time_event_send_add(mvm
, vif
, te_data
, &time_cmd
);
445 * Explicit request to remove a time event. The removal of a time event needs to
446 * be synchronized with the flow of a time event's end notification, which also
447 * removes the time event from the op mode data structures.
449 void iwl_mvm_remove_time_event(struct iwl_mvm
*mvm
,
450 struct iwl_mvm_vif
*mvmvif
,
451 struct iwl_mvm_time_event_data
*te_data
)
453 struct iwl_time_event_cmd_v2 time_cmd
= {};
458 * It is possible that by the time we got to this point the time
459 * event was already removed.
461 spin_lock_bh(&mvm
->time_event_lock
);
463 /* Save time event uid before clearing its data */
468 * The clear_data function handles time events that were already removed
470 iwl_mvm_te_clear_data(mvm
, te_data
);
471 spin_unlock_bh(&mvm
->time_event_lock
);
474 * It is possible that by the time we try to remove it, the time event
475 * has already ended and removed. In such a case there is no need to
476 * send a removal command.
479 IWL_DEBUG_TE(mvm
, "TE 0x%x has already ended\n", uid
);
483 /* When we remove a TE, the UID is to be set in the id field */
484 time_cmd
.id
= cpu_to_le32(uid
);
485 time_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_REMOVE
);
486 time_cmd
.id_and_color
=
487 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
489 IWL_DEBUG_TE(mvm
, "Removing TE 0x%x\n", le32_to_cpu(time_cmd
.id
));
490 ret
= iwl_mvm_send_time_event_cmd(mvm
, &time_cmd
);
495 void iwl_mvm_stop_session_protection(struct iwl_mvm
*mvm
,
496 struct ieee80211_vif
*vif
)
498 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
499 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
501 lockdep_assert_held(&mvm
->mutex
);
502 iwl_mvm_remove_time_event(mvm
, mvmvif
, te_data
);
505 int iwl_mvm_start_p2p_roc(struct iwl_mvm
*mvm
, struct ieee80211_vif
*vif
,
506 int duration
, enum ieee80211_roc_type type
)
508 struct iwl_mvm_vif
*mvmvif
= iwl_mvm_vif_from_mac80211(vif
);
509 struct iwl_mvm_time_event_data
*te_data
= &mvmvif
->time_event_data
;
510 struct iwl_time_event_cmd_v2 time_cmd
= {};
512 lockdep_assert_held(&mvm
->mutex
);
513 if (te_data
->running
) {
514 IWL_WARN(mvm
, "P2P_DEVICE remain on channel already running\n");
519 * Flush the done work, just in case it's still pending, so that
520 * the work it does can complete and we can accept new frames.
522 flush_work(&mvm
->roc_done_wk
);
524 time_cmd
.action
= cpu_to_le32(FW_CTXT_ACTION_ADD
);
525 time_cmd
.id_and_color
=
526 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif
->id
, mvmvif
->color
));
529 case IEEE80211_ROC_TYPE_NORMAL
:
530 time_cmd
.id
= cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL
);
532 case IEEE80211_ROC_TYPE_MGMT_TX
:
533 time_cmd
.id
= cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX
);
536 WARN_ONCE(1, "Got an invalid ROC type\n");
540 time_cmd
.apply_time
= cpu_to_le32(0);
541 time_cmd
.interval
= cpu_to_le32(1);
544 * The P2P Device TEs can have lower priority than other events
545 * that are being scheduled by the driver/fw, and thus it might not be
546 * scheduled. To improve the chances of it being scheduled, allow them
547 * to be fragmented, and in addition allow them to be delayed.
549 time_cmd
.max_frags
= min(MSEC_TO_TU(duration
)/50, TE_V2_FRAG_ENDLESS
);
550 time_cmd
.max_delay
= cpu_to_le32(MSEC_TO_TU(duration
/2));
551 time_cmd
.duration
= cpu_to_le32(MSEC_TO_TU(duration
));
553 time_cmd
.policy
= cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START
|
554 TE_V2_NOTIF_HOST_EVENT_END
);
556 return iwl_mvm_time_event_send_add(mvm
, vif
, te_data
, &time_cmd
);
559 void iwl_mvm_stop_p2p_roc(struct iwl_mvm
*mvm
)
561 struct iwl_mvm_vif
*mvmvif
;
562 struct iwl_mvm_time_event_data
*te_data
;
564 lockdep_assert_held(&mvm
->mutex
);
567 * Iterate over the list of time events and find the time event that is
568 * associated with a P2P_DEVICE interface.
569 * This assumes that a P2P_DEVICE interface can have only a single time
570 * event at any given time and this time event coresponds to a ROC
574 spin_lock_bh(&mvm
->time_event_lock
);
575 list_for_each_entry(te_data
, &mvm
->time_event_list
, list
) {
576 if (te_data
->vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
577 mvmvif
= iwl_mvm_vif_from_mac80211(te_data
->vif
);
581 spin_unlock_bh(&mvm
->time_event_lock
);
584 IWL_WARN(mvm
, "P2P_DEVICE no remain on channel event\n");
588 iwl_mvm_remove_time_event(mvm
, mvmvif
, te_data
);
590 iwl_mvm_roc_finished(mvm
);