spi: sprd: adi: Change hwlock to be optional
[linux/fpc-iii.git] / net / mac80211 / offchannel.c
blob60ef8972b254a6a96a607821a4c64cfff756cb6a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Off-channel operation helpers
5 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
6 * Copyright 2004, Instant802 Networks, Inc.
7 * Copyright 2005, Devicescape Software, Inc.
8 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
9 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
10 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
12 #include <linux/export.h>
13 #include <net/mac80211.h>
14 #include "ieee80211_i.h"
15 #include "driver-ops.h"
18 * Tell our hardware to disable PS.
19 * Optionally inform AP that we will go to sleep so that it will buffer
20 * the frames while we are doing off-channel work. This is optional
21 * because we *may* be doing work on-operating channel, and want our
22 * hardware unconditionally awake, but still let the AP send us normal frames.
24 static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
26 struct ieee80211_local *local = sdata->local;
27 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
29 local->offchannel_ps_enabled = false;
31 /* FIXME: what to do when local->pspolling is true? */
33 del_timer_sync(&local->dynamic_ps_timer);
34 del_timer_sync(&ifmgd->bcn_mon_timer);
35 del_timer_sync(&ifmgd->conn_mon_timer);
37 cancel_work_sync(&local->dynamic_ps_enable_work);
39 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
40 local->offchannel_ps_enabled = true;
41 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
42 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
45 if (!local->offchannel_ps_enabled ||
46 !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
48 * If power save was enabled, no need to send a nullfunc
49 * frame because AP knows that we are sleeping. But if the
50 * hardware is creating the nullfunc frame for power save
51 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
52 * enabled) and power save was enabled, the firmware just
53 * sent a null frame with power save disabled. So we need
54 * to send a new nullfunc frame to inform the AP that we
55 * are again sleeping.
57 ieee80211_send_nullfunc(local, sdata, true);
60 /* inform AP that we are awake again, unless power save is enabled */
61 static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
63 struct ieee80211_local *local = sdata->local;
65 if (!local->ps_sdata)
66 ieee80211_send_nullfunc(local, sdata, false);
67 else if (local->offchannel_ps_enabled) {
69 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
70 * will send a nullfunc frame with the powersave bit set
71 * even though the AP already knows that we are sleeping.
72 * This could be avoided by sending a null frame with power
73 * save bit disabled before enabling the power save, but
74 * this doesn't gain anything.
76 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
77 * to send a nullfunc frame because AP already knows that
78 * we are sleeping, let's just enable power save mode in
79 * hardware.
81 /* TODO: Only set hardware if CONF_PS changed?
82 * TODO: Should we set offchannel_ps_enabled to false?
84 local->hw.conf.flags |= IEEE80211_CONF_PS;
85 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
86 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
88 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
89 * had been running before leaving the operating channel,
90 * restart the timer now and send a nullfunc frame to inform
91 * the AP that we are awake.
93 ieee80211_send_nullfunc(local, sdata, false);
94 mod_timer(&local->dynamic_ps_timer, jiffies +
95 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
98 ieee80211_sta_reset_beacon_monitor(sdata);
99 ieee80211_sta_reset_conn_monitor(sdata);
102 void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
104 struct ieee80211_sub_if_data *sdata;
106 if (WARN_ON(local->use_chanctx))
107 return;
110 * notify the AP about us leaving the channel and stop all
111 * STA interfaces.
115 * Stop queues and transmit all frames queued by the driver
116 * before sending nullfunc to enable powersave at the AP.
118 ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
119 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
120 false);
121 ieee80211_flush_queues(local, NULL, false);
123 mutex_lock(&local->iflist_mtx);
124 list_for_each_entry(sdata, &local->interfaces, list) {
125 if (!ieee80211_sdata_running(sdata))
126 continue;
128 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
129 sdata->vif.type == NL80211_IFTYPE_NAN)
130 continue;
132 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
133 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
135 /* Check to see if we should disable beaconing. */
136 if (sdata->vif.bss_conf.enable_beacon) {
137 set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
138 &sdata->state);
139 sdata->vif.bss_conf.enable_beacon = false;
140 ieee80211_bss_info_change_notify(
141 sdata, BSS_CHANGED_BEACON_ENABLED);
144 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
145 sdata->u.mgd.associated)
146 ieee80211_offchannel_ps_enable(sdata);
148 mutex_unlock(&local->iflist_mtx);
151 void ieee80211_offchannel_return(struct ieee80211_local *local)
153 struct ieee80211_sub_if_data *sdata;
155 if (WARN_ON(local->use_chanctx))
156 return;
158 mutex_lock(&local->iflist_mtx);
159 list_for_each_entry(sdata, &local->interfaces, list) {
160 if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
161 continue;
163 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
164 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
166 if (!ieee80211_sdata_running(sdata))
167 continue;
169 /* Tell AP we're back */
170 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
171 sdata->u.mgd.associated)
172 ieee80211_offchannel_ps_disable(sdata);
174 if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
175 &sdata->state)) {
176 sdata->vif.bss_conf.enable_beacon = true;
177 ieee80211_bss_info_change_notify(
178 sdata, BSS_CHANGED_BEACON_ENABLED);
181 mutex_unlock(&local->iflist_mtx);
183 ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
184 IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
185 false);
188 static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
190 /* was never transmitted */
191 if (roc->frame) {
192 cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie,
193 roc->frame->data, roc->frame->len,
194 false, GFP_KERNEL);
195 ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame);
198 if (!roc->mgmt_tx_cookie)
199 cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
200 roc->cookie, roc->chan,
201 GFP_KERNEL);
202 else
203 cfg80211_tx_mgmt_expired(&roc->sdata->wdev,
204 roc->mgmt_tx_cookie,
205 roc->chan, GFP_KERNEL);
207 list_del(&roc->list);
208 kfree(roc);
211 static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local,
212 unsigned long now)
214 struct ieee80211_roc_work *roc, *tmp;
215 long remaining_dur_min = LONG_MAX;
217 lockdep_assert_held(&local->mtx);
219 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
220 long remaining;
222 if (!roc->started)
223 break;
225 remaining = roc->start_time +
226 msecs_to_jiffies(roc->duration) -
227 now;
229 /* In case of HW ROC, it is possible that the HW finished the
230 * ROC session before the actual requested time. In such a case
231 * end the ROC session (disregarding the remaining time).
233 if (roc->abort || roc->hw_begun || remaining <= 0)
234 ieee80211_roc_notify_destroy(roc);
235 else
236 remaining_dur_min = min(remaining_dur_min, remaining);
239 return remaining_dur_min;
242 static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
243 unsigned long now)
245 long dur = ieee80211_end_finished_rocs(local, now);
247 if (dur == LONG_MAX)
248 return false;
250 mod_delayed_work(local->workqueue, &local->roc_work, dur);
251 return true;
254 static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
255 unsigned long start_time)
257 if (WARN_ON(roc->notified))
258 return;
260 roc->start_time = start_time;
261 roc->started = true;
263 if (roc->mgmt_tx_cookie) {
264 if (!WARN_ON(!roc->frame)) {
265 ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
266 roc->chan->band, 0);
267 roc->frame = NULL;
269 } else {
270 cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie,
271 roc->chan, roc->req_duration,
272 GFP_KERNEL);
275 roc->notified = true;
278 static void ieee80211_hw_roc_start(struct work_struct *work)
280 struct ieee80211_local *local =
281 container_of(work, struct ieee80211_local, hw_roc_start);
282 struct ieee80211_roc_work *roc;
284 mutex_lock(&local->mtx);
286 list_for_each_entry(roc, &local->roc_list, list) {
287 if (!roc->started)
288 break;
290 roc->hw_begun = true;
291 ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
294 mutex_unlock(&local->mtx);
297 void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
299 struct ieee80211_local *local = hw_to_local(hw);
301 local->hw_roc_start_time = jiffies;
303 trace_api_ready_on_channel(local);
305 ieee80211_queue_work(hw, &local->hw_roc_start);
307 EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
309 static void _ieee80211_start_next_roc(struct ieee80211_local *local)
311 struct ieee80211_roc_work *roc, *tmp;
312 enum ieee80211_roc_type type;
313 u32 min_dur, max_dur;
315 lockdep_assert_held(&local->mtx);
317 if (WARN_ON(list_empty(&local->roc_list)))
318 return;
320 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
321 list);
323 if (WARN_ON(roc->started))
324 return;
326 min_dur = roc->duration;
327 max_dur = roc->duration;
328 type = roc->type;
330 list_for_each_entry(tmp, &local->roc_list, list) {
331 if (tmp == roc)
332 continue;
333 if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
334 break;
335 max_dur = max(tmp->duration, max_dur);
336 min_dur = min(tmp->duration, min_dur);
337 type = max(tmp->type, type);
340 if (local->ops->remain_on_channel) {
341 int ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
342 max_dur, type);
344 if (ret) {
345 wiphy_warn(local->hw.wiphy,
346 "failed to start next HW ROC (%d)\n", ret);
348 * queue the work struct again to avoid recursion
349 * when multiple failures occur
351 list_for_each_entry(tmp, &local->roc_list, list) {
352 if (tmp->sdata != roc->sdata ||
353 tmp->chan != roc->chan)
354 break;
355 tmp->started = true;
356 tmp->abort = true;
358 ieee80211_queue_work(&local->hw, &local->hw_roc_done);
359 return;
362 /* we'll notify about the start once the HW calls back */
363 list_for_each_entry(tmp, &local->roc_list, list) {
364 if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
365 break;
366 tmp->started = true;
368 } else {
369 /* If actually operating on the desired channel (with at least
370 * 20 MHz channel width) don't stop all the operations but still
371 * treat it as though the ROC operation started properly, so
372 * other ROC operations won't interfere with this one.
374 roc->on_channel = roc->chan == local->_oper_chandef.chan &&
375 local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
376 local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
378 /* start this ROC */
379 ieee80211_recalc_idle(local);
381 if (!roc->on_channel) {
382 ieee80211_offchannel_stop_vifs(local);
384 local->tmp_channel = roc->chan;
385 ieee80211_hw_config(local, 0);
388 ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
389 msecs_to_jiffies(min_dur));
391 /* tell userspace or send frame(s) */
392 list_for_each_entry(tmp, &local->roc_list, list) {
393 if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
394 break;
396 tmp->on_channel = roc->on_channel;
397 ieee80211_handle_roc_started(tmp, jiffies);
402 void ieee80211_start_next_roc(struct ieee80211_local *local)
404 struct ieee80211_roc_work *roc;
406 lockdep_assert_held(&local->mtx);
408 if (list_empty(&local->roc_list)) {
409 ieee80211_run_deferred_scan(local);
410 return;
413 /* defer roc if driver is not started (i.e. during reconfig) */
414 if (local->in_reconfig)
415 return;
417 roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
418 list);
420 if (WARN_ON_ONCE(roc->started))
421 return;
423 if (local->ops->remain_on_channel) {
424 _ieee80211_start_next_roc(local);
425 } else {
426 /* delay it a bit */
427 ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
428 round_jiffies_relative(HZ/2));
432 static void __ieee80211_roc_work(struct ieee80211_local *local)
434 struct ieee80211_roc_work *roc;
435 bool on_channel;
437 lockdep_assert_held(&local->mtx);
439 if (WARN_ON(local->ops->remain_on_channel))
440 return;
442 roc = list_first_entry_or_null(&local->roc_list,
443 struct ieee80211_roc_work, list);
444 if (!roc)
445 return;
447 if (!roc->started) {
448 WARN_ON(local->use_chanctx);
449 _ieee80211_start_next_roc(local);
450 } else {
451 on_channel = roc->on_channel;
452 if (ieee80211_recalc_sw_work(local, jiffies))
453 return;
455 /* careful - roc pointer became invalid during recalc */
457 if (!on_channel) {
458 ieee80211_flush_queues(local, NULL, false);
460 local->tmp_channel = NULL;
461 ieee80211_hw_config(local, 0);
463 ieee80211_offchannel_return(local);
466 ieee80211_recalc_idle(local);
467 ieee80211_start_next_roc(local);
471 static void ieee80211_roc_work(struct work_struct *work)
473 struct ieee80211_local *local =
474 container_of(work, struct ieee80211_local, roc_work.work);
476 mutex_lock(&local->mtx);
477 __ieee80211_roc_work(local);
478 mutex_unlock(&local->mtx);
481 static void ieee80211_hw_roc_done(struct work_struct *work)
483 struct ieee80211_local *local =
484 container_of(work, struct ieee80211_local, hw_roc_done);
486 mutex_lock(&local->mtx);
488 ieee80211_end_finished_rocs(local, jiffies);
490 /* if there's another roc, start it now */
491 ieee80211_start_next_roc(local);
493 mutex_unlock(&local->mtx);
496 void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
498 struct ieee80211_local *local = hw_to_local(hw);
500 trace_api_remain_on_channel_expired(local);
502 ieee80211_queue_work(hw, &local->hw_roc_done);
504 EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
506 static bool
507 ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
508 struct ieee80211_roc_work *new_roc,
509 struct ieee80211_roc_work *cur_roc)
511 unsigned long now = jiffies;
512 unsigned long remaining;
514 if (WARN_ON(!cur_roc->started))
515 return false;
517 /* if it was scheduled in the hardware, but not started yet,
518 * we can only combine if the older one had a longer duration
520 if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration)
521 return false;
523 remaining = cur_roc->start_time +
524 msecs_to_jiffies(cur_roc->duration) -
525 now;
527 /* if it doesn't fit entirely, schedule a new one */
528 if (new_roc->duration > jiffies_to_msecs(remaining))
529 return false;
531 /* add just after the current one so we combine their finish later */
532 list_add(&new_roc->list, &cur_roc->list);
534 /* if the existing one has already begun then let this one also
535 * begin, otherwise they'll both be marked properly by the work
536 * struct that runs once the driver notifies us of the beginning
538 if (cur_roc->hw_begun) {
539 new_roc->hw_begun = true;
540 ieee80211_handle_roc_started(new_roc, now);
543 return true;
546 static int ieee80211_start_roc_work(struct ieee80211_local *local,
547 struct ieee80211_sub_if_data *sdata,
548 struct ieee80211_channel *channel,
549 unsigned int duration, u64 *cookie,
550 struct sk_buff *txskb,
551 enum ieee80211_roc_type type)
553 struct ieee80211_roc_work *roc, *tmp;
554 bool queued = false, combine_started = true;
555 int ret;
557 lockdep_assert_held(&local->mtx);
559 if (local->use_chanctx && !local->ops->remain_on_channel)
560 return -EOPNOTSUPP;
562 roc = kzalloc(sizeof(*roc), GFP_KERNEL);
563 if (!roc)
564 return -ENOMEM;
567 * If the duration is zero, then the driver
568 * wouldn't actually do anything. Set it to
569 * 10 for now.
571 * TODO: cancel the off-channel operation
572 * when we get the SKB's TX status and
573 * the wait time was zero before.
575 if (!duration)
576 duration = 10;
578 roc->chan = channel;
579 roc->duration = duration;
580 roc->req_duration = duration;
581 roc->frame = txskb;
582 roc->type = type;
583 roc->sdata = sdata;
586 * cookie is either the roc cookie (for normal roc)
587 * or the SKB (for mgmt TX)
589 if (!txskb) {
590 roc->cookie = ieee80211_mgmt_tx_cookie(local);
591 *cookie = roc->cookie;
592 } else {
593 roc->mgmt_tx_cookie = *cookie;
596 /* if there's no need to queue, handle it immediately */
597 if (list_empty(&local->roc_list) &&
598 !local->scanning && !ieee80211_is_radar_required(local)) {
599 /* if not HW assist, just queue & schedule work */
600 if (!local->ops->remain_on_channel) {
601 list_add_tail(&roc->list, &local->roc_list);
602 ieee80211_queue_delayed_work(&local->hw,
603 &local->roc_work, 0);
604 } else {
605 /* otherwise actually kick it off here
606 * (for error handling)
608 ret = drv_remain_on_channel(local, sdata, channel,
609 duration, type);
610 if (ret) {
611 kfree(roc);
612 return ret;
614 roc->started = true;
615 list_add_tail(&roc->list, &local->roc_list);
618 return 0;
621 /* otherwise handle queueing */
623 list_for_each_entry(tmp, &local->roc_list, list) {
624 if (tmp->chan != channel || tmp->sdata != sdata)
625 continue;
628 * Extend this ROC if possible: If it hasn't started, add
629 * just after the new one to combine.
631 if (!tmp->started) {
632 list_add(&roc->list, &tmp->list);
633 queued = true;
634 break;
637 if (!combine_started)
638 continue;
640 if (!local->ops->remain_on_channel) {
641 /* If there's no hardware remain-on-channel, and
642 * doing so won't push us over the maximum r-o-c
643 * we allow, then we can just add the new one to
644 * the list and mark it as having started now.
645 * If it would push over the limit, don't try to
646 * combine with other started ones (that haven't
647 * been running as long) but potentially sort it
648 * with others that had the same fate.
650 unsigned long now = jiffies;
651 u32 elapsed = jiffies_to_msecs(now - tmp->start_time);
652 struct wiphy *wiphy = local->hw.wiphy;
653 u32 max_roc = wiphy->max_remain_on_channel_duration;
655 if (elapsed + roc->duration > max_roc) {
656 combine_started = false;
657 continue;
660 list_add(&roc->list, &tmp->list);
661 queued = true;
662 roc->on_channel = tmp->on_channel;
663 ieee80211_handle_roc_started(roc, now);
664 ieee80211_recalc_sw_work(local, now);
665 break;
668 queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp);
669 if (queued)
670 break;
671 /* if it wasn't queued, perhaps it can be combined with
672 * another that also couldn't get combined previously,
673 * but no need to check for already started ones, since
674 * that can't work.
676 combine_started = false;
679 if (!queued)
680 list_add_tail(&roc->list, &local->roc_list);
682 return 0;
685 int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
686 struct ieee80211_channel *chan,
687 unsigned int duration, u64 *cookie)
689 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
690 struct ieee80211_local *local = sdata->local;
691 int ret;
693 mutex_lock(&local->mtx);
694 ret = ieee80211_start_roc_work(local, sdata, chan,
695 duration, cookie, NULL,
696 IEEE80211_ROC_TYPE_NORMAL);
697 mutex_unlock(&local->mtx);
699 return ret;
702 static int ieee80211_cancel_roc(struct ieee80211_local *local,
703 u64 cookie, bool mgmt_tx)
705 struct ieee80211_roc_work *roc, *tmp, *found = NULL;
706 int ret;
708 if (!cookie)
709 return -ENOENT;
711 flush_work(&local->hw_roc_start);
713 mutex_lock(&local->mtx);
714 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
715 if (!mgmt_tx && roc->cookie != cookie)
716 continue;
717 else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
718 continue;
720 found = roc;
721 break;
724 if (!found) {
725 mutex_unlock(&local->mtx);
726 return -ENOENT;
729 if (!found->started) {
730 ieee80211_roc_notify_destroy(found);
731 goto out_unlock;
734 if (local->ops->remain_on_channel) {
735 ret = drv_cancel_remain_on_channel(local);
736 if (WARN_ON_ONCE(ret)) {
737 mutex_unlock(&local->mtx);
738 return ret;
741 /* TODO:
742 * if multiple items were combined here then we really shouldn't
743 * cancel them all - we should wait for as much time as needed
744 * for the longest remaining one, and only then cancel ...
746 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
747 if (!roc->started)
748 break;
749 if (roc == found)
750 found = NULL;
751 ieee80211_roc_notify_destroy(roc);
754 /* that really must not happen - it was started */
755 WARN_ON(found);
757 ieee80211_start_next_roc(local);
758 } else {
759 /* go through work struct to return to the operating channel */
760 found->abort = true;
761 mod_delayed_work(local->workqueue, &local->roc_work, 0);
764 out_unlock:
765 mutex_unlock(&local->mtx);
767 return 0;
770 int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
771 struct wireless_dev *wdev, u64 cookie)
773 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
774 struct ieee80211_local *local = sdata->local;
776 return ieee80211_cancel_roc(local, cookie, false);
779 int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
780 struct cfg80211_mgmt_tx_params *params, u64 *cookie)
782 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
783 struct ieee80211_local *local = sdata->local;
784 struct sk_buff *skb;
785 struct sta_info *sta;
786 const struct ieee80211_mgmt *mgmt = (void *)params->buf;
787 bool need_offchan = false;
788 u32 flags;
789 int ret;
790 u8 *data;
792 if (params->dont_wait_for_ack)
793 flags = IEEE80211_TX_CTL_NO_ACK;
794 else
795 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
796 IEEE80211_TX_CTL_REQ_TX_STATUS;
798 if (params->no_cck)
799 flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
801 switch (sdata->vif.type) {
802 case NL80211_IFTYPE_ADHOC:
803 if (!sdata->vif.bss_conf.ibss_joined)
804 need_offchan = true;
805 #ifdef CONFIG_MAC80211_MESH
806 /* fall through */
807 case NL80211_IFTYPE_MESH_POINT:
808 if (ieee80211_vif_is_mesh(&sdata->vif) &&
809 !sdata->u.mesh.mesh_id_len)
810 need_offchan = true;
811 #endif
812 /* fall through */
813 case NL80211_IFTYPE_AP:
814 case NL80211_IFTYPE_AP_VLAN:
815 case NL80211_IFTYPE_P2P_GO:
816 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
817 !ieee80211_vif_is_mesh(&sdata->vif) &&
818 !rcu_access_pointer(sdata->bss->beacon))
819 need_offchan = true;
820 if (!ieee80211_is_action(mgmt->frame_control) ||
821 mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
822 mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
823 mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
824 break;
825 rcu_read_lock();
826 sta = sta_info_get_bss(sdata, mgmt->da);
827 rcu_read_unlock();
828 if (!sta)
829 return -ENOLINK;
830 break;
831 case NL80211_IFTYPE_STATION:
832 case NL80211_IFTYPE_P2P_CLIENT:
833 sdata_lock(sdata);
834 if (!sdata->u.mgd.associated ||
835 (params->offchan && params->wait &&
836 local->ops->remain_on_channel &&
837 memcmp(sdata->u.mgd.associated->bssid,
838 mgmt->bssid, ETH_ALEN)))
839 need_offchan = true;
840 sdata_unlock(sdata);
841 break;
842 case NL80211_IFTYPE_P2P_DEVICE:
843 need_offchan = true;
844 break;
845 case NL80211_IFTYPE_NAN:
846 default:
847 return -EOPNOTSUPP;
850 /* configurations requiring offchan cannot work if no channel has been
851 * specified
853 if (need_offchan && !params->chan)
854 return -EINVAL;
856 mutex_lock(&local->mtx);
858 /* Check if the operating channel is the requested channel */
859 if (!need_offchan) {
860 struct ieee80211_chanctx_conf *chanctx_conf;
862 rcu_read_lock();
863 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
865 if (chanctx_conf) {
866 need_offchan = params->chan &&
867 (params->chan !=
868 chanctx_conf->def.chan);
869 } else if (!params->chan) {
870 ret = -EINVAL;
871 rcu_read_unlock();
872 goto out_unlock;
873 } else {
874 need_offchan = true;
876 rcu_read_unlock();
879 if (need_offchan && !params->offchan) {
880 ret = -EBUSY;
881 goto out_unlock;
884 skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
885 if (!skb) {
886 ret = -ENOMEM;
887 goto out_unlock;
889 skb_reserve(skb, local->hw.extra_tx_headroom);
891 data = skb_put_data(skb, params->buf, params->len);
893 /* Update CSA counters */
894 if (sdata->vif.csa_active &&
895 (sdata->vif.type == NL80211_IFTYPE_AP ||
896 sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
897 sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
898 params->n_csa_offsets) {
899 int i;
900 struct beacon_data *beacon = NULL;
902 rcu_read_lock();
904 if (sdata->vif.type == NL80211_IFTYPE_AP)
905 beacon = rcu_dereference(sdata->u.ap.beacon);
906 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
907 beacon = rcu_dereference(sdata->u.ibss.presp);
908 else if (ieee80211_vif_is_mesh(&sdata->vif))
909 beacon = rcu_dereference(sdata->u.mesh.beacon);
911 if (beacon)
912 for (i = 0; i < params->n_csa_offsets; i++)
913 data[params->csa_offsets[i]] =
914 beacon->csa_current_counter;
916 rcu_read_unlock();
919 IEEE80211_SKB_CB(skb)->flags = flags;
921 skb->dev = sdata->dev;
923 if (!params->dont_wait_for_ack) {
924 /* make a copy to preserve the frame contents
925 * in case of encryption.
927 ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL);
928 if (ret) {
929 kfree_skb(skb);
930 goto out_unlock;
932 } else {
933 /* Assign a dummy non-zero cookie, it's not sent to
934 * userspace in this case but we rely on its value
935 * internally in the need_offchan case to distinguish
936 * mgmt-tx from remain-on-channel.
938 *cookie = 0xffffffff;
941 if (!need_offchan) {
942 ieee80211_tx_skb(sdata, skb);
943 ret = 0;
944 goto out_unlock;
947 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
948 IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
949 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
950 IEEE80211_SKB_CB(skb)->hw_queue =
951 local->hw.offchannel_tx_hw_queue;
953 /* This will handle all kinds of coalescing and immediate TX */
954 ret = ieee80211_start_roc_work(local, sdata, params->chan,
955 params->wait, cookie, skb,
956 IEEE80211_ROC_TYPE_MGMT_TX);
957 if (ret)
958 ieee80211_free_txskb(&local->hw, skb);
959 out_unlock:
960 mutex_unlock(&local->mtx);
961 return ret;
964 int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
965 struct wireless_dev *wdev, u64 cookie)
967 struct ieee80211_local *local = wiphy_priv(wiphy);
969 return ieee80211_cancel_roc(local, cookie, true);
972 void ieee80211_roc_setup(struct ieee80211_local *local)
974 INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
975 INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
976 INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
977 INIT_LIST_HEAD(&local->roc_list);
980 void ieee80211_roc_purge(struct ieee80211_local *local,
981 struct ieee80211_sub_if_data *sdata)
983 struct ieee80211_roc_work *roc, *tmp;
984 bool work_to_do = false;
986 mutex_lock(&local->mtx);
987 list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
988 if (sdata && roc->sdata != sdata)
989 continue;
991 if (roc->started) {
992 if (local->ops->remain_on_channel) {
993 /* can race, so ignore return value */
994 drv_cancel_remain_on_channel(local);
995 ieee80211_roc_notify_destroy(roc);
996 } else {
997 roc->abort = true;
998 work_to_do = true;
1000 } else {
1001 ieee80211_roc_notify_destroy(roc);
1004 if (work_to_do)
1005 __ieee80211_roc_work(local);
1006 mutex_unlock(&local->mtx);