Linux 5.7.7
[linux/fpc-iii.git] / net / mac80211 / offchannel.c
blobc710504ccf1aa3990ac41aa6895d8dbf32a5fa56
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Off-channel operation helpers
5 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
6 * Copyright 2004, Instant802 Networks, Inc.
7 * Copyright 2005, Devicescape Software, Inc.
8 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
9 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
10 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
11 * Copyright (C) 2019 Intel Corporation
13 #include <linux/export.h>
14 #include <net/mac80211.h>
15 #include "ieee80211_i.h"
16 #include "driver-ops.h"
19 * Tell our hardware to disable PS.
20 * Optionally inform AP that we will go to sleep so that it will buffer
21 * the frames while we are doing off-channel work. This is optional
22 * because we *may* be doing work on-operating channel, and want our
23 * hardware unconditionally awake, but still let the AP send us normal frames.
25 static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
27 struct ieee80211_local *local = sdata->local;
28 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
30 local->offchannel_ps_enabled = false;
32 /* FIXME: what to do when local->pspolling is true? */
34 del_timer_sync(&local->dynamic_ps_timer);
35 del_timer_sync(&ifmgd->bcn_mon_timer);
36 del_timer_sync(&ifmgd->conn_mon_timer);
38 cancel_work_sync(&local->dynamic_ps_enable_work);
40 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
41 local->offchannel_ps_enabled = true;
42 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
43 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
46 if (!local->offchannel_ps_enabled ||
47 !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
49 * If power save was enabled, no need to send a nullfunc
50 * frame because AP knows that we are sleeping. But if the
51 * hardware is creating the nullfunc frame for power save
52 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
53 * enabled) and power save was enabled, the firmware just
54 * sent a null frame with power save disabled. So we need
55 * to send a new nullfunc frame to inform the AP that we
56 * are again sleeping.
58 ieee80211_send_nullfunc(local, sdata, true);
61 /* inform AP that we are awake again, unless power save is enabled */
62 static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
64 struct ieee80211_local *local = sdata->local;
66 if (!local->ps_sdata)
67 ieee80211_send_nullfunc(local, sdata, false);
68 else if (local->offchannel_ps_enabled) {
70 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
71 * will send a nullfunc frame with the powersave bit set
72 * even though the AP already knows that we are sleeping.
73 * This could be avoided by sending a null frame with power
74 * save bit disabled before enabling the power save, but
75 * this doesn't gain anything.
77 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
78 * to send a nullfunc frame because AP already knows that
79 * we are sleeping, let's just enable power save mode in
80 * hardware.
82 /* TODO: Only set hardware if CONF_PS changed?
83 * TODO: Should we set offchannel_ps_enabled to false?
85 local->hw.conf.flags |= IEEE80211_CONF_PS;
86 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
87 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
89 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
90 * had been running before leaving the operating channel,
91 * restart the timer now and send a nullfunc frame to inform
92 * the AP that we are awake.
94 ieee80211_send_nullfunc(local, sdata, false);
95 mod_timer(&local->dynamic_ps_timer, jiffies +
96 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
99 ieee80211_sta_reset_beacon_monitor(sdata);
100 ieee80211_sta_reset_conn_monitor(sdata);
103 void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
105 struct ieee80211_sub_if_data *sdata;
107 if (WARN_ON(local->use_chanctx))
108 return;
111 * notify the AP about us leaving the channel and stop all
112 * STA interfaces.
116 * Stop queues and transmit all frames queued by the driver
117 * before sending nullfunc to enable powersave at the AP.
119 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
120 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
121 false);
122 ieee80211_flush_queues(local, NULL, false);
124 mutex_lock(&local->iflist_mtx);
125 list_for_each_entry(sdata, &local->interfaces, list) {
126 if (!ieee80211_sdata_running(sdata))
127 continue;
129 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
130 sdata->vif.type == NL80211_IFTYPE_NAN)
131 continue;
133 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
134 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
136 /* Check to see if we should disable beaconing. */
137 if (sdata->vif.bss_conf.enable_beacon) {
138 set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
139 &sdata->state);
140 sdata->vif.bss_conf.enable_beacon = false;
141 ieee80211_bss_info_change_notify(
142 sdata, BSS_CHANGED_BEACON_ENABLED);
145 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
146 sdata->u.mgd.associated)
147 ieee80211_offchannel_ps_enable(sdata);
149 mutex_unlock(&local->iflist_mtx);
152 void ieee80211_offchannel_return(struct ieee80211_local *local)
154 struct ieee80211_sub_if_data *sdata;
156 if (WARN_ON(local->use_chanctx))
157 return;
159 mutex_lock(&local->iflist_mtx);
160 list_for_each_entry(sdata, &local->interfaces, list) {
161 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
162 continue;
164 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
165 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
167 if (!ieee80211_sdata_running(sdata))
168 continue;
170 /* Tell AP we're back */
171 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
172 sdata->u.mgd.associated)
173 ieee80211_offchannel_ps_disable(sdata);
175 if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
176 &sdata->state)) {
177 sdata->vif.bss_conf.enable_beacon = true;
178 ieee80211_bss_info_change_notify(
179 sdata, BSS_CHANGED_BEACON_ENABLED);
182 mutex_unlock(&local->iflist_mtx);
184 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
185 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
186 false);
189 static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
191 /* was never transmitted */
192 if (roc->frame) {
193 cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie,
194 roc->frame->data, roc->frame->len,
195 false, GFP_KERNEL);
196 ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame);
199 if (!roc->mgmt_tx_cookie)
200 cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
201 roc->cookie, roc->chan,
202 GFP_KERNEL);
203 else
204 cfg80211_tx_mgmt_expired(&roc->sdata->wdev,
205 roc->mgmt_tx_cookie,
206 roc->chan, GFP_KERNEL);
208 list_del(&roc->list);
209 kfree(roc);
212 static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local,
213 unsigned long now)
215 struct ieee80211_roc_work *roc, *tmp;
216 long remaining_dur_min = LONG_MAX;
218 lockdep_assert_held(&local->mtx);
220 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
221 long remaining;
223 if (!roc->started)
224 break;
226 remaining = roc->start_time +
227 msecs_to_jiffies(roc->duration) -
228 now;
230 /* In case of HW ROC, it is possible that the HW finished the
231 * ROC session before the actual requested time. In such a case
232 * end the ROC session (disregarding the remaining time).
234 if (roc->abort || roc->hw_begun || remaining <= 0)
235 ieee80211_roc_notify_destroy(roc);
236 else
237 remaining_dur_min = min(remaining_dur_min, remaining);
240 return remaining_dur_min;
243 static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
244 unsigned long now)
246 long dur = ieee80211_end_finished_rocs(local, now);
248 if (dur == LONG_MAX)
249 return false;
251 mod_delayed_work(local->workqueue, &local->roc_work, dur);
252 return true;
255 static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
256 unsigned long start_time)
258 if (WARN_ON(roc->notified))
259 return;
261 roc->start_time = start_time;
262 roc->started = true;
264 if (roc->mgmt_tx_cookie) {
265 if (!WARN_ON(!roc->frame)) {
266 ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
267 roc->chan->band, 0);
268 roc->frame = NULL;
270 } else {
271 cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie,
272 roc->chan, roc->req_duration,
273 GFP_KERNEL);
276 roc->notified = true;
279 static void ieee80211_hw_roc_start(struct work_struct *work)
281 struct ieee80211_local *local =
282 container_of(work, struct ieee80211_local, hw_roc_start);
283 struct ieee80211_roc_work *roc;
285 mutex_lock(&local->mtx);
287 list_for_each_entry(roc, &local->roc_list, list) {
288 if (!roc->started)
289 break;
291 roc->hw_begun = true;
292 ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
295 mutex_unlock(&local->mtx);
298 void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
300 struct ieee80211_local *local = hw_to_local(hw);
302 local->hw_roc_start_time = jiffies;
304 trace_api_ready_on_channel(local);
306 ieee80211_queue_work(hw, &local->hw_roc_start);
308 EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
310 static void _ieee80211_start_next_roc(struct ieee80211_local *local)
312 struct ieee80211_roc_work *roc, *tmp;
313 enum ieee80211_roc_type type;
314 u32 min_dur, max_dur;
316 lockdep_assert_held(&local->mtx);
318 if (WARN_ON(list_empty(&local->roc_list)))
319 return;
321 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
322 list);
324 if (WARN_ON(roc->started))
325 return;
327 min_dur = roc->duration;
328 max_dur = roc->duration;
329 type = roc->type;
331 list_for_each_entry(tmp, &local->roc_list, list) {
332 if (tmp == roc)
333 continue;
334 if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
335 break;
336 max_dur = max(tmp->duration, max_dur);
337 min_dur = min(tmp->duration, min_dur);
338 type = max(tmp->type, type);
341 if (local->ops->remain_on_channel) {
342 int ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
343 max_dur, type);
345 if (ret) {
346 wiphy_warn(local->hw.wiphy,
347 "failed to start next HW ROC (%d)\n", ret);
349 * queue the work struct again to avoid recursion
350 * when multiple failures occur
352 list_for_each_entry(tmp, &local->roc_list, list) {
353 if (tmp->sdata != roc->sdata ||
354 tmp->chan != roc->chan)
355 break;
356 tmp->started = true;
357 tmp->abort = true;
359 ieee80211_queue_work(&local->hw, &local->hw_roc_done);
360 return;
363 /* we'll notify about the start once the HW calls back */
364 list_for_each_entry(tmp, &local->roc_list, list) {
365 if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
366 break;
367 tmp->started = true;
369 } else {
370 /* If actually operating on the desired channel (with at least
371 * 20 MHz channel width) don't stop all the operations but still
372 * treat it as though the ROC operation started properly, so
373 * other ROC operations won't interfere with this one.
375 roc->on_channel = roc->chan == local->_oper_chandef.chan &&
376 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
377 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
379 /* start this ROC */
380 ieee80211_recalc_idle(local);
382 if (!roc->on_channel) {
383 ieee80211_offchannel_stop_vifs(local);
385 local->tmp_channel = roc->chan;
386 ieee80211_hw_config(local, 0);
389 ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
390 msecs_to_jiffies(min_dur));
392 /* tell userspace or send frame(s) */
393 list_for_each_entry(tmp, &local->roc_list, list) {
394 if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
395 break;
397 tmp->on_channel = roc->on_channel;
398 ieee80211_handle_roc_started(tmp, jiffies);
403 void ieee80211_start_next_roc(struct ieee80211_local *local)
405 struct ieee80211_roc_work *roc;
407 lockdep_assert_held(&local->mtx);
409 if (list_empty(&local->roc_list)) {
410 ieee80211_run_deferred_scan(local);
411 return;
414 /* defer roc if driver is not started (i.e. during reconfig) */
415 if (local->in_reconfig)
416 return;
418 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
419 list);
421 if (WARN_ON_ONCE(roc->started))
422 return;
424 if (local->ops->remain_on_channel) {
425 _ieee80211_start_next_roc(local);
426 } else {
427 /* delay it a bit */
428 ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
429 round_jiffies_relative(HZ/2));
433 static void __ieee80211_roc_work(struct ieee80211_local *local)
435 struct ieee80211_roc_work *roc;
436 bool on_channel;
438 lockdep_assert_held(&local->mtx);
440 if (WARN_ON(local->ops->remain_on_channel))
441 return;
443 roc = list_first_entry_or_null(&local->roc_list,
444 struct ieee80211_roc_work, list);
445 if (!roc)
446 return;
448 if (!roc->started) {
449 WARN_ON(local->use_chanctx);
450 _ieee80211_start_next_roc(local);
451 } else {
452 on_channel = roc->on_channel;
453 if (ieee80211_recalc_sw_work(local, jiffies))
454 return;
456 /* careful - roc pointer became invalid during recalc */
458 if (!on_channel) {
459 ieee80211_flush_queues(local, NULL, false);
461 local->tmp_channel = NULL;
462 ieee80211_hw_config(local, 0);
464 ieee80211_offchannel_return(local);
467 ieee80211_recalc_idle(local);
468 ieee80211_start_next_roc(local);
472 static void ieee80211_roc_work(struct work_struct *work)
474 struct ieee80211_local *local =
475 container_of(work, struct ieee80211_local, roc_work.work);
477 mutex_lock(&local->mtx);
478 __ieee80211_roc_work(local);
479 mutex_unlock(&local->mtx);
482 static void ieee80211_hw_roc_done(struct work_struct *work)
484 struct ieee80211_local *local =
485 container_of(work, struct ieee80211_local, hw_roc_done);
487 mutex_lock(&local->mtx);
489 ieee80211_end_finished_rocs(local, jiffies);
491 /* if there's another roc, start it now */
492 ieee80211_start_next_roc(local);
494 mutex_unlock(&local->mtx);
497 void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
499 struct ieee80211_local *local = hw_to_local(hw);
501 trace_api_remain_on_channel_expired(local);
503 ieee80211_queue_work(hw, &local->hw_roc_done);
505 EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
507 static bool
508 ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
509 struct ieee80211_roc_work *new_roc,
510 struct ieee80211_roc_work *cur_roc)
512 unsigned long now = jiffies;
513 unsigned long remaining;
515 if (WARN_ON(!cur_roc->started))
516 return false;
518 /* if it was scheduled in the hardware, but not started yet,
519 * we can only combine if the older one had a longer duration
521 if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration)
522 return false;
524 remaining = cur_roc->start_time +
525 msecs_to_jiffies(cur_roc->duration) -
526 now;
528 /* if it doesn't fit entirely, schedule a new one */
529 if (new_roc->duration > jiffies_to_msecs(remaining))
530 return false;
532 /* add just after the current one so we combine their finish later */
533 list_add(&new_roc->list, &cur_roc->list);
535 /* if the existing one has already begun then let this one also
536 * begin, otherwise they'll both be marked properly by the work
537 * struct that runs once the driver notifies us of the beginning
539 if (cur_roc->hw_begun) {
540 new_roc->hw_begun = true;
541 ieee80211_handle_roc_started(new_roc, now);
544 return true;
547 static int ieee80211_start_roc_work(struct ieee80211_local *local,
548 struct ieee80211_sub_if_data *sdata,
549 struct ieee80211_channel *channel,
550 unsigned int duration, u64 *cookie,
551 struct sk_buff *txskb,
552 enum ieee80211_roc_type type)
554 struct ieee80211_roc_work *roc, *tmp;
555 bool queued = false, combine_started = true;
556 int ret;
558 lockdep_assert_held(&local->mtx);
560 if (local->use_chanctx && !local->ops->remain_on_channel)
561 return -EOPNOTSUPP;
563 roc = kzalloc(sizeof(*roc), GFP_KERNEL);
564 if (!roc)
565 return -ENOMEM;
568 * If the duration is zero, then the driver
569 * wouldn't actually do anything. Set it to
570 * 10 for now.
572 * TODO: cancel the off-channel operation
573 * when we get the SKB's TX status and
574 * the wait time was zero before.
576 if (!duration)
577 duration = 10;
579 roc->chan = channel;
580 roc->duration = duration;
581 roc->req_duration = duration;
582 roc->frame = txskb;
583 roc->type = type;
584 roc->sdata = sdata;
587 * cookie is either the roc cookie (for normal roc)
588 * or the SKB (for mgmt TX)
590 if (!txskb) {
591 roc->cookie = ieee80211_mgmt_tx_cookie(local);
592 *cookie = roc->cookie;
593 } else {
594 roc->mgmt_tx_cookie = *cookie;
597 /* if there's no need to queue, handle it immediately */
598 if (list_empty(&local->roc_list) &&
599 !local->scanning && !ieee80211_is_radar_required(local)) {
600 /* if not HW assist, just queue & schedule work */
601 if (!local->ops->remain_on_channel) {
602 list_add_tail(&roc->list, &local->roc_list);
603 ieee80211_queue_delayed_work(&local->hw,
604 &local->roc_work, 0);
605 } else {
606 /* otherwise actually kick it off here
607 * (for error handling)
609 ret = drv_remain_on_channel(local, sdata, channel,
610 duration, type);
611 if (ret) {
612 kfree(roc);
613 return ret;
615 roc->started = true;
616 list_add_tail(&roc->list, &local->roc_list);
619 return 0;
622 /* otherwise handle queueing */
624 list_for_each_entry(tmp, &local->roc_list, list) {
625 if (tmp->chan != channel || tmp->sdata != sdata)
626 continue;
629 * Extend this ROC if possible: If it hasn't started, add
630 * just after the new one to combine.
632 if (!tmp->started) {
633 list_add(&roc->list, &tmp->list);
634 queued = true;
635 break;
638 if (!combine_started)
639 continue;
641 if (!local->ops->remain_on_channel) {
642 /* If there's no hardware remain-on-channel, and
643 * doing so won't push us over the maximum r-o-c
644 * we allow, then we can just add the new one to
645 * the list and mark it as having started now.
646 * If it would push over the limit, don't try to
647 * combine with other started ones (that haven't
648 * been running as long) but potentially sort it
649 * with others that had the same fate.
651 unsigned long now = jiffies;
652 u32 elapsed = jiffies_to_msecs(now - tmp->start_time);
653 struct wiphy *wiphy = local->hw.wiphy;
654 u32 max_roc = wiphy->max_remain_on_channel_duration;
656 if (elapsed + roc->duration > max_roc) {
657 combine_started = false;
658 continue;
661 list_add(&roc->list, &tmp->list);
662 queued = true;
663 roc->on_channel = tmp->on_channel;
664 ieee80211_handle_roc_started(roc, now);
665 ieee80211_recalc_sw_work(local, now);
666 break;
669 queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp);
670 if (queued)
671 break;
672 /* if it wasn't queued, perhaps it can be combined with
673 * another that also couldn't get combined previously,
674 * but no need to check for already started ones, since
675 * that can't work.
677 combine_started = false;
680 if (!queued)
681 list_add_tail(&roc->list, &local->roc_list);
683 return 0;
686 int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
687 struct ieee80211_channel *chan,
688 unsigned int duration, u64 *cookie)
690 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
691 struct ieee80211_local *local = sdata->local;
692 int ret;
694 mutex_lock(&local->mtx);
695 ret = ieee80211_start_roc_work(local, sdata, chan,
696 duration, cookie, NULL,
697 IEEE80211_ROC_TYPE_NORMAL);
698 mutex_unlock(&local->mtx);
700 return ret;
703 static int ieee80211_cancel_roc(struct ieee80211_local *local,
704 u64 cookie, bool mgmt_tx)
706 struct ieee80211_roc_work *roc, *tmp, *found = NULL;
707 int ret;
709 if (!cookie)
710 return -ENOENT;
712 flush_work(&local->hw_roc_start);
714 mutex_lock(&local->mtx);
715 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
716 if (!mgmt_tx && roc->cookie != cookie)
717 continue;
718 else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
719 continue;
721 found = roc;
722 break;
725 if (!found) {
726 mutex_unlock(&local->mtx);
727 return -ENOENT;
730 if (!found->started) {
731 ieee80211_roc_notify_destroy(found);
732 goto out_unlock;
735 if (local->ops->remain_on_channel) {
736 ret = drv_cancel_remain_on_channel(local, roc->sdata);
737 if (WARN_ON_ONCE(ret)) {
738 mutex_unlock(&local->mtx);
739 return ret;
742 /* TODO:
743 * if multiple items were combined here then we really shouldn't
744 * cancel them all - we should wait for as much time as needed
745 * for the longest remaining one, and only then cancel ...
747 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
748 if (!roc->started)
749 break;
750 if (roc == found)
751 found = NULL;
752 ieee80211_roc_notify_destroy(roc);
755 /* that really must not happen - it was started */
756 WARN_ON(found);
758 ieee80211_start_next_roc(local);
759 } else {
760 /* go through work struct to return to the operating channel */
761 found->abort = true;
762 mod_delayed_work(local->workqueue, &local->roc_work, 0);
765 out_unlock:
766 mutex_unlock(&local->mtx);
768 return 0;
771 int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
772 struct wireless_dev *wdev, u64 cookie)
774 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
775 struct ieee80211_local *local = sdata->local;
777 return ieee80211_cancel_roc(local, cookie, false);
780 int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
781 struct cfg80211_mgmt_tx_params *params, u64 *cookie)
783 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
784 struct ieee80211_local *local = sdata->local;
785 struct sk_buff *skb;
786 struct sta_info *sta;
787 const struct ieee80211_mgmt *mgmt = (void *)params->buf;
788 bool need_offchan = false;
789 u32 flags;
790 int ret;
791 u8 *data;
793 if (params->dont_wait_for_ack)
794 flags = IEEE80211_TX_CTL_NO_ACK;
795 else
796 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
797 IEEE80211_TX_CTL_REQ_TX_STATUS;
799 if (params->no_cck)
800 flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
802 switch (sdata->vif.type) {
803 case NL80211_IFTYPE_ADHOC:
804 if (!sdata->vif.bss_conf.ibss_joined)
805 need_offchan = true;
806 #ifdef CONFIG_MAC80211_MESH
807 /* fall through */
808 case NL80211_IFTYPE_MESH_POINT:
809 if (ieee80211_vif_is_mesh(&sdata->vif) &&
810 !sdata->u.mesh.mesh_id_len)
811 need_offchan = true;
812 #endif
813 /* fall through */
814 case NL80211_IFTYPE_AP:
815 case NL80211_IFTYPE_AP_VLAN:
816 case NL80211_IFTYPE_P2P_GO:
817 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
818 !ieee80211_vif_is_mesh(&sdata->vif) &&
819 !rcu_access_pointer(sdata->bss->beacon))
820 need_offchan = true;
821 if (!ieee80211_is_action(mgmt->frame_control) ||
822 mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
823 mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
824 mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
825 break;
826 rcu_read_lock();
827 sta = sta_info_get_bss(sdata, mgmt->da);
828 rcu_read_unlock();
829 if (!sta)
830 return -ENOLINK;
831 break;
832 case NL80211_IFTYPE_STATION:
833 case NL80211_IFTYPE_P2P_CLIENT:
834 sdata_lock(sdata);
835 if (!sdata->u.mgd.associated ||
836 (params->offchan && params->wait &&
837 local->ops->remain_on_channel &&
838 memcmp(sdata->u.mgd.associated->bssid,
839 mgmt->bssid, ETH_ALEN)))
840 need_offchan = true;
841 sdata_unlock(sdata);
842 break;
843 case NL80211_IFTYPE_P2P_DEVICE:
844 need_offchan = true;
845 break;
846 case NL80211_IFTYPE_NAN:
847 default:
848 return -EOPNOTSUPP;
851 /* configurations requiring offchan cannot work if no channel has been
852 * specified
854 if (need_offchan && !params->chan)
855 return -EINVAL;
857 mutex_lock(&local->mtx);
859 /* Check if the operating channel is the requested channel */
860 if (!need_offchan) {
861 struct ieee80211_chanctx_conf *chanctx_conf;
863 rcu_read_lock();
864 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
866 if (chanctx_conf) {
867 need_offchan = params->chan &&
868 (params->chan !=
869 chanctx_conf->def.chan);
870 } else if (!params->chan) {
871 ret = -EINVAL;
872 rcu_read_unlock();
873 goto out_unlock;
874 } else {
875 need_offchan = true;
877 rcu_read_unlock();
880 if (need_offchan && !params->offchan) {
881 ret = -EBUSY;
882 goto out_unlock;
885 skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
886 if (!skb) {
887 ret = -ENOMEM;
888 goto out_unlock;
890 skb_reserve(skb, local->hw.extra_tx_headroom);
892 data = skb_put_data(skb, params->buf, params->len);
894 /* Update CSA counters */
895 if (sdata->vif.csa_active &&
896 (sdata->vif.type == NL80211_IFTYPE_AP ||
897 sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
898 sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
899 params->n_csa_offsets) {
900 int i;
901 struct beacon_data *beacon = NULL;
903 rcu_read_lock();
905 if (sdata->vif.type == NL80211_IFTYPE_AP)
906 beacon = rcu_dereference(sdata->u.ap.beacon);
907 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
908 beacon = rcu_dereference(sdata->u.ibss.presp);
909 else if (ieee80211_vif_is_mesh(&sdata->vif))
910 beacon = rcu_dereference(sdata->u.mesh.beacon);
912 if (beacon)
913 for (i = 0; i < params->n_csa_offsets; i++)
914 data[params->csa_offsets[i]] =
915 beacon->csa_current_counter;
917 rcu_read_unlock();
920 IEEE80211_SKB_CB(skb)->flags = flags;
922 skb->dev = sdata->dev;
924 if (!params->dont_wait_for_ack) {
925 /* make a copy to preserve the frame contents
926 * in case of encryption.
928 ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL);
929 if (ret) {
930 kfree_skb(skb);
931 goto out_unlock;
933 } else {
934 /* Assign a dummy non-zero cookie, it's not sent to
935 * userspace in this case but we rely on its value
936 * internally in the need_offchan case to distinguish
937 * mgmt-tx from remain-on-channel.
939 *cookie = 0xffffffff;
942 if (!need_offchan) {
943 ieee80211_tx_skb(sdata, skb);
944 ret = 0;
945 goto out_unlock;
948 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
949 IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
950 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
951 IEEE80211_SKB_CB(skb)->hw_queue =
952 local->hw.offchannel_tx_hw_queue;
954 /* This will handle all kinds of coalescing and immediate TX */
955 ret = ieee80211_start_roc_work(local, sdata, params->chan,
956 params->wait, cookie, skb,
957 IEEE80211_ROC_TYPE_MGMT_TX);
958 if (ret)
959 ieee80211_free_txskb(&local->hw, skb);
960 out_unlock:
961 mutex_unlock(&local->mtx);
962 return ret;
965 int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
966 struct wireless_dev *wdev, u64 cookie)
968 struct ieee80211_local *local = wiphy_priv(wiphy);
970 return ieee80211_cancel_roc(local, cookie, true);
973 void ieee80211_roc_setup(struct ieee80211_local *local)
975 INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
976 INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
977 INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
978 INIT_LIST_HEAD(&local->roc_list);
981 void ieee80211_roc_purge(struct ieee80211_local *local,
982 struct ieee80211_sub_if_data *sdata)
984 struct ieee80211_roc_work *roc, *tmp;
985 bool work_to_do = false;
987 mutex_lock(&local->mtx);
988 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
989 if (sdata && roc->sdata != sdata)
990 continue;
992 if (roc->started) {
993 if (local->ops->remain_on_channel) {
994 /* can race, so ignore return value */
995 drv_cancel_remain_on_channel(local, sdata);
996 ieee80211_roc_notify_destroy(roc);
997 } else {
998 roc->abort = true;
999 work_to_do = true;
1001 } else {
1002 ieee80211_roc_notify_destroy(roc);
1005 if (work_to_do)
1006 __ieee80211_roc_work(local);
1007 mutex_unlock(&local->mtx);