Linux 4.19.133
[linux/fpc-iii.git] / drivers / net / wireless / ti / wlcore / main.c
blob2ca5658bbc2abef5a3699b0d191ed3dd05df3777
1 /*
2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/pm_runtime.h>
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
45 #define WL1271_BOOT_RETRIES 3
46 #define WL1271_SUSPEND_SLEEP 100
47 #define WL1271_WAKEUP_TIMEOUT 500
49 static char *fwlog_param;
50 static int fwlog_mem_blocks = -1;
51 static int bug_on_recovery = -1;
52 static int no_recovery = -1;
54 static void __wl1271_op_remove_interface(struct wl1271 *wl,
55 struct ieee80211_vif *vif,
56 bool reset_tx_queues);
57 static void wlcore_op_stop_locked(struct wl1271 *wl);
58 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 int ret;
64 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 return -EINVAL;
67 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 return 0;
70 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 return 0;
73 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 if (ret < 0)
75 return ret;
77 wl1271_info("Association completed.");
78 return 0;
81 static void wl1271_reg_notify(struct wiphy *wiphy,
82 struct regulatory_request *request)
84 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 struct wl1271 *wl = hw->priv;
87 /* copy the current dfs region */
88 if (request)
89 wl->dfs_region = request->dfs_region;
91 wlcore_regdomain_config(wl);
94 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
95 bool enable)
97 int ret = 0;
99 /* we should hold wl->mutex */
100 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
101 if (ret < 0)
102 goto out;
104 if (enable)
105 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106 else
107 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
108 out:
109 return ret;
113 * this function is being called when the rx_streaming interval
114 * has beed changed or rx_streaming should be disabled
116 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
118 int ret = 0;
119 int period = wl->conf.rx_streaming.interval;
121 /* don't reconfigure if rx_streaming is disabled */
122 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 goto out;
125 /* reconfigure/disable according to new streaming_period */
126 if (period &&
127 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
128 (wl->conf.rx_streaming.always ||
129 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
130 ret = wl1271_set_rx_streaming(wl, wlvif, true);
131 else {
132 ret = wl1271_set_rx_streaming(wl, wlvif, false);
133 /* don't cancel_work_sync since we might deadlock */
134 del_timer_sync(&wlvif->rx_streaming_timer);
136 out:
137 return ret;
140 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
142 int ret;
143 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
144 rx_streaming_enable_work);
145 struct wl1271 *wl = wlvif->wl;
147 mutex_lock(&wl->mutex);
149 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
150 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
151 (!wl->conf.rx_streaming.always &&
152 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 goto out;
155 if (!wl->conf.rx_streaming.interval)
156 goto out;
158 ret = pm_runtime_get_sync(wl->dev);
159 if (ret < 0) {
160 pm_runtime_put_noidle(wl->dev);
161 goto out;
164 ret = wl1271_set_rx_streaming(wl, wlvif, true);
165 if (ret < 0)
166 goto out_sleep;
168 /* stop it after some time of inactivity */
169 mod_timer(&wlvif->rx_streaming_timer,
170 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
172 out_sleep:
173 pm_runtime_mark_last_busy(wl->dev);
174 pm_runtime_put_autosuspend(wl->dev);
175 out:
176 mutex_unlock(&wl->mutex);
179 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
181 int ret;
182 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
183 rx_streaming_disable_work);
184 struct wl1271 *wl = wlvif->wl;
186 mutex_lock(&wl->mutex);
188 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
189 goto out;
191 ret = pm_runtime_get_sync(wl->dev);
192 if (ret < 0) {
193 pm_runtime_put_noidle(wl->dev);
194 goto out;
197 ret = wl1271_set_rx_streaming(wl, wlvif, false);
198 if (ret)
199 goto out_sleep;
201 out_sleep:
202 pm_runtime_mark_last_busy(wl->dev);
203 pm_runtime_put_autosuspend(wl->dev);
204 out:
205 mutex_unlock(&wl->mutex);
208 static void wl1271_rx_streaming_timer(struct timer_list *t)
210 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
211 struct wl1271 *wl = wlvif->wl;
212 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
215 /* wl->mutex must be taken */
216 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218 /* if the watchdog is not armed, don't do anything */
219 if (wl->tx_allocated_blocks == 0)
220 return;
222 cancel_delayed_work(&wl->tx_watchdog_work);
223 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
224 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227 static void wlcore_rc_update_work(struct work_struct *work)
229 int ret;
230 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
231 rc_update_work);
232 struct wl1271 *wl = wlvif->wl;
233 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
235 mutex_lock(&wl->mutex);
237 if (unlikely(wl->state != WLCORE_STATE_ON))
238 goto out;
240 ret = pm_runtime_get_sync(wl->dev);
241 if (ret < 0) {
242 pm_runtime_put_noidle(wl->dev);
243 goto out;
246 if (ieee80211_vif_is_mesh(vif)) {
247 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
248 true, wlvif->sta.hlid);
249 if (ret < 0)
250 goto out_sleep;
251 } else {
252 wlcore_hw_sta_rc_update(wl, wlvif);
255 out_sleep:
256 pm_runtime_mark_last_busy(wl->dev);
257 pm_runtime_put_autosuspend(wl->dev);
258 out:
259 mutex_unlock(&wl->mutex);
262 static void wl12xx_tx_watchdog_work(struct work_struct *work)
264 struct delayed_work *dwork;
265 struct wl1271 *wl;
267 dwork = to_delayed_work(work);
268 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
270 mutex_lock(&wl->mutex);
272 if (unlikely(wl->state != WLCORE_STATE_ON))
273 goto out;
275 /* Tx went out in the meantime - everything is ok */
276 if (unlikely(wl->tx_allocated_blocks == 0))
277 goto out;
280 * if a ROC is in progress, we might not have any Tx for a long
281 * time (e.g. pending Tx on the non-ROC channels)
283 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
284 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
285 wl->conf.tx.tx_watchdog_timeout);
286 wl12xx_rearm_tx_watchdog_locked(wl);
287 goto out;
291 * if a scan is in progress, we might not have any Tx for a long
292 * time
294 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
295 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_rearm_tx_watchdog_locked(wl);
298 goto out;
302 * AP might cache a frame for a long time for a sleeping station,
303 * so rearm the timer if there's an AP interface with stations. If
304 * Tx is genuinely stuck we will most hopefully discover it when all
305 * stations are removed due to inactivity.
307 if (wl->active_sta_count) {
308 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
309 " %d stations",
310 wl->conf.tx.tx_watchdog_timeout,
311 wl->active_sta_count);
312 wl12xx_rearm_tx_watchdog_locked(wl);
313 goto out;
316 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
317 wl->conf.tx.tx_watchdog_timeout);
318 wl12xx_queue_recovery_work(wl);
320 out:
321 mutex_unlock(&wl->mutex);
324 static void wlcore_adjust_conf(struct wl1271 *wl)
327 if (fwlog_param) {
328 if (!strcmp(fwlog_param, "continuous")) {
329 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
331 } else if (!strcmp(fwlog_param, "dbgpins")) {
332 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
333 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
334 } else if (!strcmp(fwlog_param, "disable")) {
335 wl->conf.fwlog.mem_blocks = 0;
336 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
337 } else {
338 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
342 if (bug_on_recovery != -1)
343 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
345 if (no_recovery != -1)
346 wl->conf.recovery.no_recovery = (u8) no_recovery;
349 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
350 struct wl12xx_vif *wlvif,
351 u8 hlid, u8 tx_pkts)
353 bool fw_ps;
355 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
358 * Wake up from high level PS if the STA is asleep with too little
359 * packets in FW or if the STA is awake.
361 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
362 wl12xx_ps_link_end(wl, wlvif, hlid);
365 * Start high-level PS if the STA is asleep with enough blocks in FW.
366 * Make an exception if this is the only connected link. In this
367 * case FW-memory congestion is less of a problem.
368 * Note that a single connected STA means 2*ap_count + 1 active links,
369 * since we must account for the global and broadcast AP links
370 * for each AP. The "fw_ps" check assures us the other link is a STA
371 * connected to the AP. Otherwise the FW would not set the PSM bit.
373 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
374 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
375 wl12xx_ps_link_start(wl, wlvif, hlid, true);
378 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
379 struct wl12xx_vif *wlvif,
380 struct wl_fw_status *status)
382 unsigned long cur_fw_ps_map;
383 u8 hlid;
385 cur_fw_ps_map = status->link_ps_bitmap;
386 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
387 wl1271_debug(DEBUG_PSM,
388 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
389 wl->ap_fw_ps_map, cur_fw_ps_map,
390 wl->ap_fw_ps_map ^ cur_fw_ps_map);
392 wl->ap_fw_ps_map = cur_fw_ps_map;
395 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
396 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
397 wl->links[hlid].allocated_pkts);
400 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
402 struct wl12xx_vif *wlvif;
403 u32 old_tx_blk_count = wl->tx_blocks_available;
404 int avail, freed_blocks;
405 int i;
406 int ret;
407 struct wl1271_link *lnk;
409 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
410 wl->raw_fw_status,
411 wl->fw_status_len, false);
412 if (ret < 0)
413 return ret;
415 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
417 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
418 "drv_rx_counter = %d, tx_results_counter = %d)",
419 status->intr,
420 status->fw_rx_counter,
421 status->drv_rx_counter,
422 status->tx_results_counter);
424 for (i = 0; i < NUM_TX_QUEUES; i++) {
425 /* prevent wrap-around in freed-packets counter */
426 wl->tx_allocated_pkts[i] -=
427 (status->counters.tx_released_pkts[i] -
428 wl->tx_pkts_freed[i]) & 0xff;
430 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
434 for_each_set_bit(i, wl->links_map, wl->num_links) {
435 u8 diff;
436 lnk = &wl->links[i];
438 /* prevent wrap-around in freed-packets counter */
439 diff = (status->counters.tx_lnk_free_pkts[i] -
440 lnk->prev_freed_pkts) & 0xff;
442 if (diff == 0)
443 continue;
445 lnk->allocated_pkts -= diff;
446 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
448 /* accumulate the prev_freed_pkts counter */
449 lnk->total_freed_pkts += diff;
452 /* prevent wrap-around in total blocks counter */
453 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
454 freed_blocks = status->total_released_blks -
455 wl->tx_blocks_freed;
456 else
457 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
458 status->total_released_blks;
460 wl->tx_blocks_freed = status->total_released_blks;
462 wl->tx_allocated_blocks -= freed_blocks;
465 * If the FW freed some blocks:
466 * If we still have allocated blocks - re-arm the timer, Tx is
467 * not stuck. Otherwise, cancel the timer (no Tx currently).
469 if (freed_blocks) {
470 if (wl->tx_allocated_blocks)
471 wl12xx_rearm_tx_watchdog_locked(wl);
472 else
473 cancel_delayed_work(&wl->tx_watchdog_work);
476 avail = status->tx_total - wl->tx_allocated_blocks;
479 * The FW might change the total number of TX memblocks before
480 * we get a notification about blocks being released. Thus, the
481 * available blocks calculation might yield a temporary result
482 * which is lower than the actual available blocks. Keeping in
483 * mind that only blocks that were allocated can be moved from
484 * TX to RX, tx_blocks_available should never decrease here.
486 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
487 avail);
489 /* if more blocks are available now, tx work can be scheduled */
490 if (wl->tx_blocks_available > old_tx_blk_count)
491 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493 /* for AP update num of allocated TX blocks per link and ps status */
494 wl12xx_for_each_wlvif_ap(wl, wlvif) {
495 wl12xx_irq_update_links_status(wl, wlvif, status);
498 /* update the host-chipset time offset */
499 wl->time_offset = (ktime_get_boot_ns() >> 10) -
500 (s64)(status->fw_localtime);
502 wl->fw_fast_lnk_map = status->link_fast_bitmap;
504 return 0;
507 static void wl1271_flush_deferred_work(struct wl1271 *wl)
509 struct sk_buff *skb;
511 /* Pass all received frames to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
513 ieee80211_rx_ni(wl->hw, skb);
515 /* Return sent skbs to the network stack */
516 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
517 ieee80211_tx_status_ni(wl->hw, skb);
520 static void wl1271_netstack_work(struct work_struct *work)
522 struct wl1271 *wl =
523 container_of(work, struct wl1271, netstack_work);
525 do {
526 wl1271_flush_deferred_work(wl);
527 } while (skb_queue_len(&wl->deferred_rx_queue));
530 #define WL1271_IRQ_MAX_LOOPS 256
532 static int wlcore_irq_locked(struct wl1271 *wl)
534 int ret = 0;
535 u32 intr;
536 int loopcount = WL1271_IRQ_MAX_LOOPS;
537 bool done = false;
538 unsigned int defer_count;
539 unsigned long flags;
542 * In case edge triggered interrupt must be used, we cannot iterate
543 * more than once without introducing race conditions with the hardirq.
545 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
546 loopcount = 1;
548 wl1271_debug(DEBUG_IRQ, "IRQ work");
550 if (unlikely(wl->state != WLCORE_STATE_ON))
551 goto out;
553 ret = pm_runtime_get_sync(wl->dev);
554 if (ret < 0) {
555 pm_runtime_put_noidle(wl->dev);
556 goto out;
559 while (!done && loopcount--) {
561 * In order to avoid a race with the hardirq, clear the flag
562 * before acknowledging the chip.
564 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
565 smp_mb__after_atomic();
567 ret = wlcore_fw_status(wl, wl->fw_status);
568 if (ret < 0)
569 goto out;
571 wlcore_hw_tx_immediate_compl(wl);
573 intr = wl->fw_status->intr;
574 intr &= WLCORE_ALL_INTR_MASK;
575 if (!intr) {
576 done = true;
577 continue;
580 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
581 wl1271_error("HW watchdog interrupt received! starting recovery.");
582 wl->watchdog_recovery = true;
583 ret = -EIO;
585 /* restarting the chip. ignore any other interrupt. */
586 goto out;
589 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
590 wl1271_error("SW watchdog interrupt received! "
591 "starting recovery.");
592 wl->watchdog_recovery = true;
593 ret = -EIO;
595 /* restarting the chip. ignore any other interrupt. */
596 goto out;
599 if (likely(intr & WL1271_ACX_INTR_DATA)) {
600 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
602 ret = wlcore_rx(wl, wl->fw_status);
603 if (ret < 0)
604 goto out;
606 /* Check if any tx blocks were freed */
607 spin_lock_irqsave(&wl->wl_lock, flags);
608 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
609 wl1271_tx_total_queue_count(wl) > 0) {
610 spin_unlock_irqrestore(&wl->wl_lock, flags);
612 * In order to avoid starvation of the TX path,
613 * call the work function directly.
615 ret = wlcore_tx_work_locked(wl);
616 if (ret < 0)
617 goto out;
618 } else {
619 spin_unlock_irqrestore(&wl->wl_lock, flags);
622 /* check for tx results */
623 ret = wlcore_hw_tx_delayed_compl(wl);
624 if (ret < 0)
625 goto out;
627 /* Make sure the deferred queues don't get too long */
628 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
629 skb_queue_len(&wl->deferred_rx_queue);
630 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
631 wl1271_flush_deferred_work(wl);
634 if (intr & WL1271_ACX_INTR_EVENT_A) {
635 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
636 ret = wl1271_event_handle(wl, 0);
637 if (ret < 0)
638 goto out;
641 if (intr & WL1271_ACX_INTR_EVENT_B) {
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
643 ret = wl1271_event_handle(wl, 1);
644 if (ret < 0)
645 goto out;
648 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
649 wl1271_debug(DEBUG_IRQ,
650 "WL1271_ACX_INTR_INIT_COMPLETE");
652 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
653 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
656 pm_runtime_mark_last_busy(wl->dev);
657 pm_runtime_put_autosuspend(wl->dev);
659 out:
660 return ret;
663 static irqreturn_t wlcore_irq(int irq, void *cookie)
665 int ret;
666 unsigned long flags;
667 struct wl1271 *wl = cookie;
669 /* complete the ELP completion */
670 spin_lock_irqsave(&wl->wl_lock, flags);
671 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
672 if (wl->elp_compl) {
673 complete(wl->elp_compl);
674 wl->elp_compl = NULL;
677 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
678 /* don't enqueue a work right now. mark it as pending */
679 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
680 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
681 disable_irq_nosync(wl->irq);
682 pm_wakeup_event(wl->dev, 0);
683 spin_unlock_irqrestore(&wl->wl_lock, flags);
684 return IRQ_HANDLED;
686 spin_unlock_irqrestore(&wl->wl_lock, flags);
688 /* TX might be handled here, avoid redundant work */
689 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
690 cancel_work_sync(&wl->tx_work);
692 mutex_lock(&wl->mutex);
694 ret = wlcore_irq_locked(wl);
695 if (ret)
696 wl12xx_queue_recovery_work(wl);
698 spin_lock_irqsave(&wl->wl_lock, flags);
699 /* In case TX was not handled here, queue TX work */
700 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
701 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
702 wl1271_tx_total_queue_count(wl) > 0)
703 ieee80211_queue_work(wl->hw, &wl->tx_work);
704 spin_unlock_irqrestore(&wl->wl_lock, flags);
706 mutex_unlock(&wl->mutex);
708 return IRQ_HANDLED;
711 struct vif_counter_data {
712 u8 counter;
714 struct ieee80211_vif *cur_vif;
715 bool cur_vif_running;
718 static void wl12xx_vif_count_iter(void *data, u8 *mac,
719 struct ieee80211_vif *vif)
721 struct vif_counter_data *counter = data;
723 counter->counter++;
724 if (counter->cur_vif == vif)
725 counter->cur_vif_running = true;
728 /* caller must not hold wl->mutex, as it might deadlock */
729 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
730 struct ieee80211_vif *cur_vif,
731 struct vif_counter_data *data)
733 memset(data, 0, sizeof(*data));
734 data->cur_vif = cur_vif;
736 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
737 wl12xx_vif_count_iter, data);
740 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
742 const struct firmware *fw;
743 const char *fw_name;
744 enum wl12xx_fw_type fw_type;
745 int ret;
747 if (plt) {
748 fw_type = WL12XX_FW_TYPE_PLT;
749 fw_name = wl->plt_fw_name;
750 } else {
752 * we can't call wl12xx_get_vif_count() here because
753 * wl->mutex is taken, so use the cached last_vif_count value
755 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
756 fw_type = WL12XX_FW_TYPE_MULTI;
757 fw_name = wl->mr_fw_name;
758 } else {
759 fw_type = WL12XX_FW_TYPE_NORMAL;
760 fw_name = wl->sr_fw_name;
764 if (wl->fw_type == fw_type)
765 return 0;
767 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
769 ret = request_firmware(&fw, fw_name, wl->dev);
771 if (ret < 0) {
772 wl1271_error("could not get firmware %s: %d", fw_name, ret);
773 return ret;
776 if (fw->size % 4) {
777 wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 fw->size);
779 ret = -EILSEQ;
780 goto out;
783 vfree(wl->fw);
784 wl->fw_type = WL12XX_FW_TYPE_NONE;
785 wl->fw_len = fw->size;
786 wl->fw = vmalloc(wl->fw_len);
788 if (!wl->fw) {
789 wl1271_error("could not allocate memory for the firmware");
790 ret = -ENOMEM;
791 goto out;
794 memcpy(wl->fw, fw->data, wl->fw_len);
795 ret = 0;
796 wl->fw_type = fw_type;
797 out:
798 release_firmware(fw);
800 return ret;
803 void wl12xx_queue_recovery_work(struct wl1271 *wl)
805 /* Avoid a recursive recovery */
806 if (wl->state == WLCORE_STATE_ON) {
807 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
808 &wl->flags));
810 wl->state = WLCORE_STATE_RESTARTING;
811 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
812 ieee80211_queue_work(wl->hw, &wl->recovery_work);
816 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
818 size_t len;
820 /* Make sure we have enough room */
821 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
823 /* Fill the FW log file, consumed by the sysfs fwlog entry */
824 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
825 wl->fwlog_size += len;
827 return len;
830 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
832 u32 end_of_log = 0;
833 int error;
835 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
836 return;
838 wl1271_info("Reading FW panic log");
841 * Make sure the chip is awake and the logger isn't active.
842 * Do not send a stop fwlog command if the fw is hanged or if
843 * dbgpins are used (due to some fw bug).
845 error = pm_runtime_get_sync(wl->dev);
846 if (error < 0) {
847 pm_runtime_put_noidle(wl->dev);
848 return;
850 if (!wl->watchdog_recovery &&
851 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
852 wl12xx_cmd_stop_fwlog(wl);
854 /* Traverse the memory blocks linked list */
855 do {
856 end_of_log = wlcore_event_fw_logger(wl);
857 if (end_of_log == 0) {
858 msleep(100);
859 end_of_log = wlcore_event_fw_logger(wl);
861 } while (end_of_log != 0);
864 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
865 u8 hlid, struct ieee80211_sta *sta)
867 struct wl1271_station *wl_sta;
868 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
870 wl_sta = (void *)sta->drv_priv;
871 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
874 * increment the initial seq number on recovery to account for
875 * transmitted packets that we haven't yet got in the FW status
877 if (wlvif->encryption_type == KEY_GEM)
878 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
880 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
881 wl_sta->total_freed_pkts += sqn_recovery_padding;
884 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
885 struct wl12xx_vif *wlvif,
886 u8 hlid, const u8 *addr)
888 struct ieee80211_sta *sta;
889 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
891 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
892 is_zero_ether_addr(addr)))
893 return;
895 rcu_read_lock();
896 sta = ieee80211_find_sta(vif, addr);
897 if (sta)
898 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
899 rcu_read_unlock();
902 static void wlcore_print_recovery(struct wl1271 *wl)
904 u32 pc = 0;
905 u32 hint_sts = 0;
906 int ret;
908 wl1271_info("Hardware recovery in progress. FW ver: %s",
909 wl->chip.fw_ver_str);
911 /* change partitions momentarily so we can read the FW pc */
912 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
913 if (ret < 0)
914 return;
916 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
917 if (ret < 0)
918 return;
920 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
921 if (ret < 0)
922 return;
924 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
925 pc, hint_sts, ++wl->recovery_count);
927 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
931 static void wl1271_recovery_work(struct work_struct *work)
933 struct wl1271 *wl =
934 container_of(work, struct wl1271, recovery_work);
935 struct wl12xx_vif *wlvif;
936 struct ieee80211_vif *vif;
937 int error;
939 mutex_lock(&wl->mutex);
941 if (wl->state == WLCORE_STATE_OFF || wl->plt)
942 goto out_unlock;
944 error = pm_runtime_get_sync(wl->dev);
945 if (error < 0) {
946 wl1271_warning("Enable for recovery failed");
947 pm_runtime_put_noidle(wl->dev);
949 wlcore_disable_interrupts_nosync(wl);
951 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
952 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
953 wl12xx_read_fwlog_panic(wl);
954 wlcore_print_recovery(wl);
957 BUG_ON(wl->conf.recovery.bug_on_recovery &&
958 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
960 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
962 if (wl->conf.recovery.no_recovery) {
963 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
964 goto out_unlock;
967 /* Prevent spurious TX during FW restart */
968 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970 /* reboot the chipset */
971 while (!list_empty(&wl->wlvif_list)) {
972 wlvif = list_first_entry(&wl->wlvif_list,
973 struct wl12xx_vif, list);
974 vif = wl12xx_wlvif_to_vif(wlvif);
976 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
977 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
978 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
979 vif->bss_conf.bssid);
982 __wl1271_op_remove_interface(wl, vif, false);
985 wlcore_op_stop_locked(wl);
986 pm_runtime_mark_last_busy(wl->dev);
987 pm_runtime_put_autosuspend(wl->dev);
989 ieee80211_restart_hw(wl->hw);
992 * Its safe to enable TX now - the queues are stopped after a request
993 * to restart the HW.
995 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
997 out_unlock:
998 wl->watchdog_recovery = false;
999 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1000 mutex_unlock(&wl->mutex);
1003 static int wlcore_fw_wakeup(struct wl1271 *wl)
1005 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1008 static int wl1271_setup(struct wl1271 *wl)
1010 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1011 if (!wl->raw_fw_status)
1012 goto err;
1014 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1015 if (!wl->fw_status)
1016 goto err;
1018 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1019 if (!wl->tx_res_if)
1020 goto err;
1022 return 0;
1023 err:
1024 kfree(wl->fw_status);
1025 kfree(wl->raw_fw_status);
1026 return -ENOMEM;
1029 static int wl12xx_set_power_on(struct wl1271 *wl)
1031 int ret;
1033 msleep(WL1271_PRE_POWER_ON_SLEEP);
1034 ret = wl1271_power_on(wl);
1035 if (ret < 0)
1036 goto out;
1037 msleep(WL1271_POWER_ON_SLEEP);
1038 wl1271_io_reset(wl);
1039 wl1271_io_init(wl);
1041 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1042 if (ret < 0)
1043 goto fail;
1045 /* ELP module wake up */
1046 ret = wlcore_fw_wakeup(wl);
1047 if (ret < 0)
1048 goto fail;
1050 out:
1051 return ret;
1053 fail:
1054 wl1271_power_off(wl);
1055 return ret;
1058 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1060 int ret = 0;
1062 ret = wl12xx_set_power_on(wl);
1063 if (ret < 0)
1064 goto out;
1067 * For wl127x based devices we could use the default block
1068 * size (512 bytes), but due to a bug in the sdio driver, we
1069 * need to set it explicitly after the chip is powered on. To
1070 * simplify the code and since the performance impact is
1071 * negligible, we use the same block size for all different
1072 * chip types.
1074 * Check if the bus supports blocksize alignment and, if it
1075 * doesn't, make sure we don't have the quirk.
1077 if (!wl1271_set_block_size(wl))
1078 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1080 /* TODO: make sure the lower driver has set things up correctly */
1082 ret = wl1271_setup(wl);
1083 if (ret < 0)
1084 goto out;
1086 ret = wl12xx_fetch_firmware(wl, plt);
1087 if (ret < 0) {
1088 kfree(wl->fw_status);
1089 kfree(wl->raw_fw_status);
1090 kfree(wl->tx_res_if);
1093 out:
1094 return ret;
1097 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1099 int retries = WL1271_BOOT_RETRIES;
1100 struct wiphy *wiphy = wl->hw->wiphy;
1102 static const char* const PLT_MODE[] = {
1103 "PLT_OFF",
1104 "PLT_ON",
1105 "PLT_FEM_DETECT",
1106 "PLT_CHIP_AWAKE"
1109 int ret;
1111 mutex_lock(&wl->mutex);
1113 wl1271_notice("power up");
1115 if (wl->state != WLCORE_STATE_OFF) {
1116 wl1271_error("cannot go into PLT state because not "
1117 "in off state: %d", wl->state);
1118 ret = -EBUSY;
1119 goto out;
1122 /* Indicate to lower levels that we are now in PLT mode */
1123 wl->plt = true;
1124 wl->plt_mode = plt_mode;
1126 while (retries) {
1127 retries--;
1128 ret = wl12xx_chip_wakeup(wl, true);
1129 if (ret < 0)
1130 goto power_off;
1132 if (plt_mode != PLT_CHIP_AWAKE) {
1133 ret = wl->ops->plt_init(wl);
1134 if (ret < 0)
1135 goto power_off;
1138 wl->state = WLCORE_STATE_ON;
1139 wl1271_notice("firmware booted in PLT mode %s (%s)",
1140 PLT_MODE[plt_mode],
1141 wl->chip.fw_ver_str);
1143 /* update hw/fw version info in wiphy struct */
1144 wiphy->hw_version = wl->chip.id;
1145 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1146 sizeof(wiphy->fw_version));
1148 goto out;
1150 power_off:
1151 wl1271_power_off(wl);
1154 wl->plt = false;
1155 wl->plt_mode = PLT_OFF;
1157 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1158 WL1271_BOOT_RETRIES);
1159 out:
1160 mutex_unlock(&wl->mutex);
1162 return ret;
1165 int wl1271_plt_stop(struct wl1271 *wl)
1167 int ret = 0;
1169 wl1271_notice("power down");
1172 * Interrupts must be disabled before setting the state to OFF.
1173 * Otherwise, the interrupt handler might be called and exit without
1174 * reading the interrupt status.
1176 wlcore_disable_interrupts(wl);
1177 mutex_lock(&wl->mutex);
1178 if (!wl->plt) {
1179 mutex_unlock(&wl->mutex);
1182 * This will not necessarily enable interrupts as interrupts
1183 * may have been disabled when op_stop was called. It will,
1184 * however, balance the above call to disable_interrupts().
1186 wlcore_enable_interrupts(wl);
1188 wl1271_error("cannot power down because not in PLT "
1189 "state: %d", wl->state);
1190 ret = -EBUSY;
1191 goto out;
1194 mutex_unlock(&wl->mutex);
1196 wl1271_flush_deferred_work(wl);
1197 cancel_work_sync(&wl->netstack_work);
1198 cancel_work_sync(&wl->recovery_work);
1199 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1201 mutex_lock(&wl->mutex);
1202 wl1271_power_off(wl);
1203 wl->flags = 0;
1204 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1205 wl->state = WLCORE_STATE_OFF;
1206 wl->plt = false;
1207 wl->plt_mode = PLT_OFF;
1208 wl->rx_counter = 0;
1209 mutex_unlock(&wl->mutex);
1211 out:
1212 return ret;
1215 static void wl1271_op_tx(struct ieee80211_hw *hw,
1216 struct ieee80211_tx_control *control,
1217 struct sk_buff *skb)
1219 struct wl1271 *wl = hw->priv;
1220 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1221 struct ieee80211_vif *vif = info->control.vif;
1222 struct wl12xx_vif *wlvif = NULL;
1223 unsigned long flags;
1224 int q, mapping;
1225 u8 hlid;
1227 if (!vif) {
1228 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1229 ieee80211_free_txskb(hw, skb);
1230 return;
1233 wlvif = wl12xx_vif_to_data(vif);
1234 mapping = skb_get_queue_mapping(skb);
1235 q = wl1271_tx_get_queue(mapping);
1237 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1239 spin_lock_irqsave(&wl->wl_lock, flags);
1242 * drop the packet if the link is invalid or the queue is stopped
1243 * for any reason but watermark. Watermark is a "soft"-stop so we
1244 * allow these packets through.
1246 if (hlid == WL12XX_INVALID_LINK_ID ||
1247 (!test_bit(hlid, wlvif->links_map)) ||
1248 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1249 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1250 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1251 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1252 ieee80211_free_txskb(hw, skb);
1253 goto out;
1256 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1257 hlid, q, skb->len);
1258 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1260 wl->tx_queue_count[q]++;
1261 wlvif->tx_queue_count[q]++;
1264 * The workqueue is slow to process the tx_queue and we need stop
1265 * the queue here, otherwise the queue will get too long.
1267 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1268 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1269 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1270 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1271 wlcore_stop_queue_locked(wl, wlvif, q,
1272 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1276 * The chip specific setup must run before the first TX packet -
1277 * before that, the tx_work will not be initialized!
1280 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1281 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1282 ieee80211_queue_work(wl->hw, &wl->tx_work);
1284 out:
1285 spin_unlock_irqrestore(&wl->wl_lock, flags);
1288 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1290 unsigned long flags;
1291 int q;
1293 /* no need to queue a new dummy packet if one is already pending */
1294 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1295 return 0;
1297 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1299 spin_lock_irqsave(&wl->wl_lock, flags);
1300 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1301 wl->tx_queue_count[q]++;
1302 spin_unlock_irqrestore(&wl->wl_lock, flags);
1304 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1305 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1306 return wlcore_tx_work_locked(wl);
1309 * If the FW TX is busy, TX work will be scheduled by the threaded
1310 * interrupt handler function
1312 return 0;
1316 * The size of the dummy packet should be at least 1400 bytes. However, in
1317 * order to minimize the number of bus transactions, aligning it to 512 bytes
1318 * boundaries could be beneficial, performance wise
1320 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1322 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1324 struct sk_buff *skb;
1325 struct ieee80211_hdr_3addr *hdr;
1326 unsigned int dummy_packet_size;
1328 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1329 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1331 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1332 if (!skb) {
1333 wl1271_warning("Failed to allocate a dummy packet skb");
1334 return NULL;
1337 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1339 hdr = skb_put_zero(skb, sizeof(*hdr));
1340 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1341 IEEE80211_STYPE_NULLFUNC |
1342 IEEE80211_FCTL_TODS);
1344 skb_put_zero(skb, dummy_packet_size);
1346 /* Dummy packets require the TID to be management */
1347 skb->priority = WL1271_TID_MGMT;
1349 /* Initialize all fields that might be used */
1350 skb_set_queue_mapping(skb, 0);
1351 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1353 return skb;
1357 static int
1358 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1360 int num_fields = 0, in_field = 0, fields_size = 0;
1361 int i, pattern_len = 0;
1363 if (!p->mask) {
1364 wl1271_warning("No mask in WoWLAN pattern");
1365 return -EINVAL;
1369 * The pattern is broken up into segments of bytes at different offsets
1370 * that need to be checked by the FW filter. Each segment is called
1371 * a field in the FW API. We verify that the total number of fields
1372 * required for this pattern won't exceed FW limits (8)
1373 * as well as the total fields buffer won't exceed the FW limit.
1374 * Note that if there's a pattern which crosses Ethernet/IP header
1375 * boundary a new field is required.
1377 for (i = 0; i < p->pattern_len; i++) {
1378 if (test_bit(i, (unsigned long *)p->mask)) {
1379 if (!in_field) {
1380 in_field = 1;
1381 pattern_len = 1;
1382 } else {
1383 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1384 num_fields++;
1385 fields_size += pattern_len +
1386 RX_FILTER_FIELD_OVERHEAD;
1387 pattern_len = 1;
1388 } else
1389 pattern_len++;
1391 } else {
1392 if (in_field) {
1393 in_field = 0;
1394 fields_size += pattern_len +
1395 RX_FILTER_FIELD_OVERHEAD;
1396 num_fields++;
1401 if (in_field) {
1402 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1403 num_fields++;
1406 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1407 wl1271_warning("RX Filter too complex. Too many segments");
1408 return -EINVAL;
1411 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1412 wl1271_warning("RX filter pattern is too big");
1413 return -E2BIG;
1416 return 0;
1419 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1421 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1424 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1426 int i;
1428 if (filter == NULL)
1429 return;
1431 for (i = 0; i < filter->num_fields; i++)
1432 kfree(filter->fields[i].pattern);
1434 kfree(filter);
1437 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1438 u16 offset, u8 flags,
1439 const u8 *pattern, u8 len)
1441 struct wl12xx_rx_filter_field *field;
1443 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1444 wl1271_warning("Max fields per RX filter. can't alloc another");
1445 return -EINVAL;
1448 field = &filter->fields[filter->num_fields];
1450 field->pattern = kzalloc(len, GFP_KERNEL);
1451 if (!field->pattern) {
1452 wl1271_warning("Failed to allocate RX filter pattern");
1453 return -ENOMEM;
1456 filter->num_fields++;
1458 field->offset = cpu_to_le16(offset);
1459 field->flags = flags;
1460 field->len = len;
1461 memcpy(field->pattern, pattern, len);
1463 return 0;
1466 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1468 int i, fields_size = 0;
1470 for (i = 0; i < filter->num_fields; i++)
1471 fields_size += filter->fields[i].len +
1472 sizeof(struct wl12xx_rx_filter_field) -
1473 sizeof(u8 *);
1475 return fields_size;
1478 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1479 u8 *buf)
1481 int i;
1482 struct wl12xx_rx_filter_field *field;
1484 for (i = 0; i < filter->num_fields; i++) {
1485 field = (struct wl12xx_rx_filter_field *)buf;
1487 field->offset = filter->fields[i].offset;
1488 field->flags = filter->fields[i].flags;
1489 field->len = filter->fields[i].len;
1491 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1492 buf += sizeof(struct wl12xx_rx_filter_field) -
1493 sizeof(u8 *) + field->len;
1498 * Allocates an RX filter returned through f
1499 * which needs to be freed using rx_filter_free()
1501 static int
1502 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1503 struct wl12xx_rx_filter **f)
1505 int i, j, ret = 0;
1506 struct wl12xx_rx_filter *filter;
1507 u16 offset;
1508 u8 flags, len;
1510 filter = wl1271_rx_filter_alloc();
1511 if (!filter) {
1512 wl1271_warning("Failed to alloc rx filter");
1513 ret = -ENOMEM;
1514 goto err;
1517 i = 0;
1518 while (i < p->pattern_len) {
1519 if (!test_bit(i, (unsigned long *)p->mask)) {
1520 i++;
1521 continue;
1524 for (j = i; j < p->pattern_len; j++) {
1525 if (!test_bit(j, (unsigned long *)p->mask))
1526 break;
1528 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1529 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1530 break;
1533 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1534 offset = i;
1535 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1536 } else {
1537 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1538 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1541 len = j - i;
1543 ret = wl1271_rx_filter_alloc_field(filter,
1544 offset,
1545 flags,
1546 &p->pattern[i], len);
1547 if (ret)
1548 goto err;
1550 i = j;
1553 filter->action = FILTER_SIGNAL;
1555 *f = filter;
1556 return 0;
1558 err:
1559 wl1271_rx_filter_free(filter);
1560 *f = NULL;
1562 return ret;
1565 static int wl1271_configure_wowlan(struct wl1271 *wl,
1566 struct cfg80211_wowlan *wow)
1568 int i, ret;
1570 if (!wow || wow->any || !wow->n_patterns) {
1571 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1572 FILTER_SIGNAL);
1573 if (ret)
1574 goto out;
1576 ret = wl1271_rx_filter_clear_all(wl);
1577 if (ret)
1578 goto out;
1580 return 0;
1583 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1584 return -EINVAL;
1586 /* Validate all incoming patterns before clearing current FW state */
1587 for (i = 0; i < wow->n_patterns; i++) {
1588 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1589 if (ret) {
1590 wl1271_warning("Bad wowlan pattern %d", i);
1591 return ret;
1595 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1596 if (ret)
1597 goto out;
1599 ret = wl1271_rx_filter_clear_all(wl);
1600 if (ret)
1601 goto out;
1603 /* Translate WoWLAN patterns into filters */
1604 for (i = 0; i < wow->n_patterns; i++) {
1605 struct cfg80211_pkt_pattern *p;
1606 struct wl12xx_rx_filter *filter = NULL;
1608 p = &wow->patterns[i];
1610 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1611 if (ret) {
1612 wl1271_warning("Failed to create an RX filter from "
1613 "wowlan pattern %d", i);
1614 goto out;
1617 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1619 wl1271_rx_filter_free(filter);
1620 if (ret)
1621 goto out;
1624 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1626 out:
1627 return ret;
1630 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1631 struct wl12xx_vif *wlvif,
1632 struct cfg80211_wowlan *wow)
1634 int ret = 0;
1636 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1637 goto out;
1639 ret = wl1271_configure_wowlan(wl, wow);
1640 if (ret < 0)
1641 goto out;
1643 if ((wl->conf.conn.suspend_wake_up_event ==
1644 wl->conf.conn.wake_up_event) &&
1645 (wl->conf.conn.suspend_listen_interval ==
1646 wl->conf.conn.listen_interval))
1647 goto out;
1649 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1650 wl->conf.conn.suspend_wake_up_event,
1651 wl->conf.conn.suspend_listen_interval);
1653 if (ret < 0)
1654 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1655 out:
1656 return ret;
1660 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1661 struct wl12xx_vif *wlvif,
1662 struct cfg80211_wowlan *wow)
1664 int ret = 0;
1666 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1667 goto out;
1669 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1670 if (ret < 0)
1671 goto out;
1673 ret = wl1271_configure_wowlan(wl, wow);
1674 if (ret < 0)
1675 goto out;
1677 out:
1678 return ret;
1682 static int wl1271_configure_suspend(struct wl1271 *wl,
1683 struct wl12xx_vif *wlvif,
1684 struct cfg80211_wowlan *wow)
1686 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1687 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1688 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1689 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1690 return 0;
1693 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1695 int ret = 0;
1696 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1697 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1699 if ((!is_ap) && (!is_sta))
1700 return;
1702 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1703 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1704 return;
1706 wl1271_configure_wowlan(wl, NULL);
1708 if (is_sta) {
1709 if ((wl->conf.conn.suspend_wake_up_event ==
1710 wl->conf.conn.wake_up_event) &&
1711 (wl->conf.conn.suspend_listen_interval ==
1712 wl->conf.conn.listen_interval))
1713 return;
1715 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1716 wl->conf.conn.wake_up_event,
1717 wl->conf.conn.listen_interval);
1719 if (ret < 0)
1720 wl1271_error("resume: wake up conditions failed: %d",
1721 ret);
1723 } else if (is_ap) {
1724 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1728 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1729 struct cfg80211_wowlan *wow)
1731 struct wl1271 *wl = hw->priv;
1732 struct wl12xx_vif *wlvif;
1733 unsigned long flags;
1734 int ret;
1736 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1737 WARN_ON(!wow);
1739 /* we want to perform the recovery before suspending */
1740 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1741 wl1271_warning("postponing suspend to perform recovery");
1742 return -EBUSY;
1745 wl1271_tx_flush(wl);
1747 mutex_lock(&wl->mutex);
1749 ret = pm_runtime_get_sync(wl->dev);
1750 if (ret < 0) {
1751 pm_runtime_put_noidle(wl->dev);
1752 mutex_unlock(&wl->mutex);
1753 return ret;
1756 wl->wow_enabled = true;
1757 wl12xx_for_each_wlvif(wl, wlvif) {
1758 if (wlcore_is_p2p_mgmt(wlvif))
1759 continue;
1761 ret = wl1271_configure_suspend(wl, wlvif, wow);
1762 if (ret < 0) {
1763 mutex_unlock(&wl->mutex);
1764 wl1271_warning("couldn't prepare device to suspend");
1765 return ret;
1769 /* disable fast link flow control notifications from FW */
1770 ret = wlcore_hw_interrupt_notify(wl, false);
1771 if (ret < 0)
1772 goto out_sleep;
1774 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1775 ret = wlcore_hw_rx_ba_filter(wl,
1776 !!wl->conf.conn.suspend_rx_ba_activity);
1777 if (ret < 0)
1778 goto out_sleep;
1780 out_sleep:
1781 pm_runtime_put_noidle(wl->dev);
1782 mutex_unlock(&wl->mutex);
1784 if (ret < 0) {
1785 wl1271_warning("couldn't prepare device to suspend");
1786 return ret;
1789 /* flush any remaining work */
1790 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1792 flush_work(&wl->tx_work);
1795 * Cancel the watchdog even if above tx_flush failed. We will detect
1796 * it on resume anyway.
1798 cancel_delayed_work(&wl->tx_watchdog_work);
1801 * set suspended flag to avoid triggering a new threaded_irq
1802 * work.
1804 spin_lock_irqsave(&wl->wl_lock, flags);
1805 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1806 spin_unlock_irqrestore(&wl->wl_lock, flags);
1808 return pm_runtime_force_suspend(wl->dev);
1811 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1813 struct wl1271 *wl = hw->priv;
1814 struct wl12xx_vif *wlvif;
1815 unsigned long flags;
1816 bool run_irq_work = false, pending_recovery;
1817 int ret;
1819 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1820 wl->wow_enabled);
1821 WARN_ON(!wl->wow_enabled);
1823 ret = pm_runtime_force_resume(wl->dev);
1824 if (ret < 0) {
1825 wl1271_error("ELP wakeup failure!");
1826 goto out_sleep;
1830 * re-enable irq_work enqueuing, and call irq_work directly if
1831 * there is a pending work.
1833 spin_lock_irqsave(&wl->wl_lock, flags);
1834 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1835 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1836 run_irq_work = true;
1837 spin_unlock_irqrestore(&wl->wl_lock, flags);
1839 mutex_lock(&wl->mutex);
1841 /* test the recovery flag before calling any SDIO functions */
1842 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1843 &wl->flags);
1845 if (run_irq_work) {
1846 wl1271_debug(DEBUG_MAC80211,
1847 "run postponed irq_work directly");
1849 /* don't talk to the HW if recovery is pending */
1850 if (!pending_recovery) {
1851 ret = wlcore_irq_locked(wl);
1852 if (ret)
1853 wl12xx_queue_recovery_work(wl);
1856 wlcore_enable_interrupts(wl);
1859 if (pending_recovery) {
1860 wl1271_warning("queuing forgotten recovery on resume");
1861 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1862 goto out_sleep;
1865 ret = pm_runtime_get_sync(wl->dev);
1866 if (ret < 0) {
1867 pm_runtime_put_noidle(wl->dev);
1868 goto out;
1871 wl12xx_for_each_wlvif(wl, wlvif) {
1872 if (wlcore_is_p2p_mgmt(wlvif))
1873 continue;
1875 wl1271_configure_resume(wl, wlvif);
1878 ret = wlcore_hw_interrupt_notify(wl, true);
1879 if (ret < 0)
1880 goto out_sleep;
1882 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1883 ret = wlcore_hw_rx_ba_filter(wl, false);
1884 if (ret < 0)
1885 goto out_sleep;
1887 out_sleep:
1888 pm_runtime_mark_last_busy(wl->dev);
1889 pm_runtime_put_autosuspend(wl->dev);
1891 out:
1892 wl->wow_enabled = false;
1895 * Set a flag to re-init the watchdog on the first Tx after resume.
1896 * That way we avoid possible conditions where Tx-complete interrupts
1897 * fail to arrive and we perform a spurious recovery.
1899 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1900 mutex_unlock(&wl->mutex);
1902 return 0;
1905 static int wl1271_op_start(struct ieee80211_hw *hw)
1907 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1910 * We have to delay the booting of the hardware because
1911 * we need to know the local MAC address before downloading and
1912 * initializing the firmware. The MAC address cannot be changed
1913 * after boot, and without the proper MAC address, the firmware
1914 * will not function properly.
1916 * The MAC address is first known when the corresponding interface
1917 * is added. That is where we will initialize the hardware.
1920 return 0;
1923 static void wlcore_op_stop_locked(struct wl1271 *wl)
1925 int i;
1927 if (wl->state == WLCORE_STATE_OFF) {
1928 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1929 &wl->flags))
1930 wlcore_enable_interrupts(wl);
1932 return;
1936 * this must be before the cancel_work calls below, so that the work
1937 * functions don't perform further work.
1939 wl->state = WLCORE_STATE_OFF;
1942 * Use the nosync variant to disable interrupts, so the mutex could be
1943 * held while doing so without deadlocking.
1945 wlcore_disable_interrupts_nosync(wl);
1947 mutex_unlock(&wl->mutex);
1949 wlcore_synchronize_interrupts(wl);
1950 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1951 cancel_work_sync(&wl->recovery_work);
1952 wl1271_flush_deferred_work(wl);
1953 cancel_delayed_work_sync(&wl->scan_complete_work);
1954 cancel_work_sync(&wl->netstack_work);
1955 cancel_work_sync(&wl->tx_work);
1956 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1958 /* let's notify MAC80211 about the remaining pending TX frames */
1959 mutex_lock(&wl->mutex);
1960 wl12xx_tx_reset(wl);
1962 wl1271_power_off(wl);
1964 * In case a recovery was scheduled, interrupts were disabled to avoid
1965 * an interrupt storm. Now that the power is down, it is safe to
1966 * re-enable interrupts to balance the disable depth
1968 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1969 wlcore_enable_interrupts(wl);
1971 wl->band = NL80211_BAND_2GHZ;
1973 wl->rx_counter = 0;
1974 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1975 wl->channel_type = NL80211_CHAN_NO_HT;
1976 wl->tx_blocks_available = 0;
1977 wl->tx_allocated_blocks = 0;
1978 wl->tx_results_count = 0;
1979 wl->tx_packets_count = 0;
1980 wl->time_offset = 0;
1981 wl->ap_fw_ps_map = 0;
1982 wl->ap_ps_map = 0;
1983 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1984 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1985 memset(wl->links_map, 0, sizeof(wl->links_map));
1986 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1987 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1988 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1989 wl->active_sta_count = 0;
1990 wl->active_link_count = 0;
1992 /* The system link is always allocated */
1993 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1994 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1995 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1998 * this is performed after the cancel_work calls and the associated
1999 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2000 * get executed before all these vars have been reset.
2002 wl->flags = 0;
2004 wl->tx_blocks_freed = 0;
2006 for (i = 0; i < NUM_TX_QUEUES; i++) {
2007 wl->tx_pkts_freed[i] = 0;
2008 wl->tx_allocated_pkts[i] = 0;
2011 wl1271_debugfs_reset(wl);
2013 kfree(wl->raw_fw_status);
2014 wl->raw_fw_status = NULL;
2015 kfree(wl->fw_status);
2016 wl->fw_status = NULL;
2017 kfree(wl->tx_res_if);
2018 wl->tx_res_if = NULL;
2019 kfree(wl->target_mem_map);
2020 wl->target_mem_map = NULL;
2023 * FW channels must be re-calibrated after recovery,
2024 * save current Reg-Domain channel configuration and clear it.
2026 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2027 sizeof(wl->reg_ch_conf_pending));
2028 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2031 static void wlcore_op_stop(struct ieee80211_hw *hw)
2033 struct wl1271 *wl = hw->priv;
2035 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2037 mutex_lock(&wl->mutex);
2039 wlcore_op_stop_locked(wl);
2041 mutex_unlock(&wl->mutex);
2044 static void wlcore_channel_switch_work(struct work_struct *work)
2046 struct delayed_work *dwork;
2047 struct wl1271 *wl;
2048 struct ieee80211_vif *vif;
2049 struct wl12xx_vif *wlvif;
2050 int ret;
2052 dwork = to_delayed_work(work);
2053 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2054 wl = wlvif->wl;
2056 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2058 mutex_lock(&wl->mutex);
2060 if (unlikely(wl->state != WLCORE_STATE_ON))
2061 goto out;
2063 /* check the channel switch is still ongoing */
2064 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2065 goto out;
2067 vif = wl12xx_wlvif_to_vif(wlvif);
2068 ieee80211_chswitch_done(vif, false);
2070 ret = pm_runtime_get_sync(wl->dev);
2071 if (ret < 0) {
2072 pm_runtime_put_noidle(wl->dev);
2073 goto out;
2076 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2078 pm_runtime_mark_last_busy(wl->dev);
2079 pm_runtime_put_autosuspend(wl->dev);
2080 out:
2081 mutex_unlock(&wl->mutex);
2084 static void wlcore_connection_loss_work(struct work_struct *work)
2086 struct delayed_work *dwork;
2087 struct wl1271 *wl;
2088 struct ieee80211_vif *vif;
2089 struct wl12xx_vif *wlvif;
2091 dwork = to_delayed_work(work);
2092 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2093 wl = wlvif->wl;
2095 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2097 mutex_lock(&wl->mutex);
2099 if (unlikely(wl->state != WLCORE_STATE_ON))
2100 goto out;
2102 /* Call mac80211 connection loss */
2103 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2104 goto out;
2106 vif = wl12xx_wlvif_to_vif(wlvif);
2107 ieee80211_connection_loss(vif);
2108 out:
2109 mutex_unlock(&wl->mutex);
2112 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2114 struct delayed_work *dwork;
2115 struct wl1271 *wl;
2116 struct wl12xx_vif *wlvif;
2117 unsigned long time_spare;
2118 int ret;
2120 dwork = to_delayed_work(work);
2121 wlvif = container_of(dwork, struct wl12xx_vif,
2122 pending_auth_complete_work);
2123 wl = wlvif->wl;
2125 mutex_lock(&wl->mutex);
2127 if (unlikely(wl->state != WLCORE_STATE_ON))
2128 goto out;
2131 * Make sure a second really passed since the last auth reply. Maybe
2132 * a second auth reply arrived while we were stuck on the mutex.
2133 * Check for a little less than the timeout to protect from scheduler
2134 * irregularities.
2136 time_spare = jiffies +
2137 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2138 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2139 goto out;
2141 ret = pm_runtime_get_sync(wl->dev);
2142 if (ret < 0) {
2143 pm_runtime_put_noidle(wl->dev);
2144 goto out;
2147 /* cancel the ROC if active */
2148 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2150 pm_runtime_mark_last_busy(wl->dev);
2151 pm_runtime_put_autosuspend(wl->dev);
2152 out:
2153 mutex_unlock(&wl->mutex);
2156 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2158 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2159 WL12XX_MAX_RATE_POLICIES);
2160 if (policy >= WL12XX_MAX_RATE_POLICIES)
2161 return -EBUSY;
2163 __set_bit(policy, wl->rate_policies_map);
2164 *idx = policy;
2165 return 0;
2168 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2170 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2171 return;
2173 __clear_bit(*idx, wl->rate_policies_map);
2174 *idx = WL12XX_MAX_RATE_POLICIES;
2177 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2179 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2180 WLCORE_MAX_KLV_TEMPLATES);
2181 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2182 return -EBUSY;
2184 __set_bit(policy, wl->klv_templates_map);
2185 *idx = policy;
2186 return 0;
2189 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2191 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2192 return;
2194 __clear_bit(*idx, wl->klv_templates_map);
2195 *idx = WLCORE_MAX_KLV_TEMPLATES;
2198 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2200 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2202 switch (wlvif->bss_type) {
2203 case BSS_TYPE_AP_BSS:
2204 if (wlvif->p2p)
2205 return WL1271_ROLE_P2P_GO;
2206 else if (ieee80211_vif_is_mesh(vif))
2207 return WL1271_ROLE_MESH_POINT;
2208 else
2209 return WL1271_ROLE_AP;
2211 case BSS_TYPE_STA_BSS:
2212 if (wlvif->p2p)
2213 return WL1271_ROLE_P2P_CL;
2214 else
2215 return WL1271_ROLE_STA;
2217 case BSS_TYPE_IBSS:
2218 return WL1271_ROLE_IBSS;
2220 default:
2221 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2223 return WL12XX_INVALID_ROLE_TYPE;
2226 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2228 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2229 int i;
2231 /* clear everything but the persistent data */
2232 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2234 switch (ieee80211_vif_type_p2p(vif)) {
2235 case NL80211_IFTYPE_P2P_CLIENT:
2236 wlvif->p2p = 1;
2237 /* fall-through */
2238 case NL80211_IFTYPE_STATION:
2239 case NL80211_IFTYPE_P2P_DEVICE:
2240 wlvif->bss_type = BSS_TYPE_STA_BSS;
2241 break;
2242 case NL80211_IFTYPE_ADHOC:
2243 wlvif->bss_type = BSS_TYPE_IBSS;
2244 break;
2245 case NL80211_IFTYPE_P2P_GO:
2246 wlvif->p2p = 1;
2247 /* fall-through */
2248 case NL80211_IFTYPE_AP:
2249 case NL80211_IFTYPE_MESH_POINT:
2250 wlvif->bss_type = BSS_TYPE_AP_BSS;
2251 break;
2252 default:
2253 wlvif->bss_type = MAX_BSS_TYPE;
2254 return -EOPNOTSUPP;
2257 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2258 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2259 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2261 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2262 wlvif->bss_type == BSS_TYPE_IBSS) {
2263 /* init sta/ibss data */
2264 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2265 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2266 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2267 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2268 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2269 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2270 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2271 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2272 } else {
2273 /* init ap data */
2274 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2275 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2276 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2277 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2278 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2279 wl12xx_allocate_rate_policy(wl,
2280 &wlvif->ap.ucast_rate_idx[i]);
2281 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2283 * TODO: check if basic_rate shouldn't be
2284 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2285 * instead (the same thing for STA above).
2287 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2288 /* TODO: this seems to be used only for STA, check it */
2289 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2292 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2293 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2294 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2297 * mac80211 configures some values globally, while we treat them
2298 * per-interface. thus, on init, we have to copy them from wl
2300 wlvif->band = wl->band;
2301 wlvif->channel = wl->channel;
2302 wlvif->power_level = wl->power_level;
2303 wlvif->channel_type = wl->channel_type;
2305 INIT_WORK(&wlvif->rx_streaming_enable_work,
2306 wl1271_rx_streaming_enable_work);
2307 INIT_WORK(&wlvif->rx_streaming_disable_work,
2308 wl1271_rx_streaming_disable_work);
2309 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2310 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2311 wlcore_channel_switch_work);
2312 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2313 wlcore_connection_loss_work);
2314 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2315 wlcore_pending_auth_complete_work);
2316 INIT_LIST_HEAD(&wlvif->list);
2318 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2319 return 0;
2322 static int wl12xx_init_fw(struct wl1271 *wl)
2324 int retries = WL1271_BOOT_RETRIES;
2325 bool booted = false;
2326 struct wiphy *wiphy = wl->hw->wiphy;
2327 int ret;
2329 while (retries) {
2330 retries--;
2331 ret = wl12xx_chip_wakeup(wl, false);
2332 if (ret < 0)
2333 goto power_off;
2335 ret = wl->ops->boot(wl);
2336 if (ret < 0)
2337 goto power_off;
2339 ret = wl1271_hw_init(wl);
2340 if (ret < 0)
2341 goto irq_disable;
2343 booted = true;
2344 break;
2346 irq_disable:
2347 mutex_unlock(&wl->mutex);
2348 /* Unlocking the mutex in the middle of handling is
2349 inherently unsafe. In this case we deem it safe to do,
2350 because we need to let any possibly pending IRQ out of
2351 the system (and while we are WLCORE_STATE_OFF the IRQ
2352 work function will not do anything.) Also, any other
2353 possible concurrent operations will fail due to the
2354 current state, hence the wl1271 struct should be safe. */
2355 wlcore_disable_interrupts(wl);
2356 wl1271_flush_deferred_work(wl);
2357 cancel_work_sync(&wl->netstack_work);
2358 mutex_lock(&wl->mutex);
2359 power_off:
2360 wl1271_power_off(wl);
2363 if (!booted) {
2364 wl1271_error("firmware boot failed despite %d retries",
2365 WL1271_BOOT_RETRIES);
2366 goto out;
2369 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2371 /* update hw/fw version info in wiphy struct */
2372 wiphy->hw_version = wl->chip.id;
2373 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2374 sizeof(wiphy->fw_version));
2377 * Now we know if 11a is supported (info from the NVS), so disable
2378 * 11a channels if not supported
2380 if (!wl->enable_11a)
2381 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2383 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2384 wl->enable_11a ? "" : "not ");
2386 wl->state = WLCORE_STATE_ON;
2387 out:
2388 return ret;
2391 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2393 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2397 * Check whether a fw switch (i.e. moving from one loaded
2398 * fw to another) is needed. This function is also responsible
2399 * for updating wl->last_vif_count, so it must be called before
2400 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2401 * will be used).
2403 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2404 struct vif_counter_data vif_counter_data,
2405 bool add)
2407 enum wl12xx_fw_type current_fw = wl->fw_type;
2408 u8 vif_count = vif_counter_data.counter;
2410 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2411 return false;
2413 /* increase the vif count if this is a new vif */
2414 if (add && !vif_counter_data.cur_vif_running)
2415 vif_count++;
2417 wl->last_vif_count = vif_count;
2419 /* no need for fw change if the device is OFF */
2420 if (wl->state == WLCORE_STATE_OFF)
2421 return false;
2423 /* no need for fw change if a single fw is used */
2424 if (!wl->mr_fw_name)
2425 return false;
2427 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2428 return true;
2429 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2430 return true;
2432 return false;
2436 * Enter "forced psm". Make sure the sta is in psm against the ap,
2437 * to make the fw switch a bit more disconnection-persistent.
2439 static void wl12xx_force_active_psm(struct wl1271 *wl)
2441 struct wl12xx_vif *wlvif;
2443 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2444 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2448 struct wlcore_hw_queue_iter_data {
2449 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2450 /* current vif */
2451 struct ieee80211_vif *vif;
2452 /* is the current vif among those iterated */
2453 bool cur_running;
2456 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2457 struct ieee80211_vif *vif)
2459 struct wlcore_hw_queue_iter_data *iter_data = data;
2461 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2462 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2463 return;
2465 if (iter_data->cur_running || vif == iter_data->vif) {
2466 iter_data->cur_running = true;
2467 return;
2470 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2473 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2474 struct wl12xx_vif *wlvif)
2476 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2477 struct wlcore_hw_queue_iter_data iter_data = {};
2478 int i, q_base;
2480 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2481 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2482 return 0;
2485 iter_data.vif = vif;
2487 /* mark all bits taken by active interfaces */
2488 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2489 IEEE80211_IFACE_ITER_RESUME_ALL,
2490 wlcore_hw_queue_iter, &iter_data);
2492 /* the current vif is already running in mac80211 (resume/recovery) */
2493 if (iter_data.cur_running) {
2494 wlvif->hw_queue_base = vif->hw_queue[0];
2495 wl1271_debug(DEBUG_MAC80211,
2496 "using pre-allocated hw queue base %d",
2497 wlvif->hw_queue_base);
2499 /* interface type might have changed type */
2500 goto adjust_cab_queue;
2503 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2504 WLCORE_NUM_MAC_ADDRESSES);
2505 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2506 return -EBUSY;
2508 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2509 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2510 wlvif->hw_queue_base);
2512 for (i = 0; i < NUM_TX_QUEUES; i++) {
2513 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2514 /* register hw queues in mac80211 */
2515 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2518 adjust_cab_queue:
2519 /* the last places are reserved for cab queues per interface */
2520 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2521 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2522 wlvif->hw_queue_base / NUM_TX_QUEUES;
2523 else
2524 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2526 return 0;
2529 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2530 struct ieee80211_vif *vif)
2532 struct wl1271 *wl = hw->priv;
2533 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2534 struct vif_counter_data vif_count;
2535 int ret = 0;
2536 u8 role_type;
2538 if (wl->plt) {
2539 wl1271_error("Adding Interface not allowed while in PLT mode");
2540 return -EBUSY;
2543 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2544 IEEE80211_VIF_SUPPORTS_UAPSD |
2545 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2547 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2548 ieee80211_vif_type_p2p(vif), vif->addr);
2550 wl12xx_get_vif_count(hw, vif, &vif_count);
2552 mutex_lock(&wl->mutex);
2555 * in some very corner case HW recovery scenarios its possible to
2556 * get here before __wl1271_op_remove_interface is complete, so
2557 * opt out if that is the case.
2559 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2560 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2561 ret = -EBUSY;
2562 goto out;
2566 ret = wl12xx_init_vif_data(wl, vif);
2567 if (ret < 0)
2568 goto out;
2570 wlvif->wl = wl;
2571 role_type = wl12xx_get_role_type(wl, wlvif);
2572 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2573 ret = -EINVAL;
2574 goto out;
2577 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2578 if (ret < 0)
2579 goto out;
2582 * TODO: after the nvs issue will be solved, move this block
2583 * to start(), and make sure here the driver is ON.
2585 if (wl->state == WLCORE_STATE_OFF) {
2587 * we still need this in order to configure the fw
2588 * while uploading the nvs
2590 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2592 ret = wl12xx_init_fw(wl);
2593 if (ret < 0)
2594 goto out;
2598 * Call runtime PM only after possible wl12xx_init_fw() above
2599 * is done. Otherwise we do not have interrupts enabled.
2601 ret = pm_runtime_get_sync(wl->dev);
2602 if (ret < 0) {
2603 pm_runtime_put_noidle(wl->dev);
2604 goto out_unlock;
2607 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2608 wl12xx_force_active_psm(wl);
2609 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2610 mutex_unlock(&wl->mutex);
2611 wl1271_recovery_work(&wl->recovery_work);
2612 return 0;
2615 if (!wlcore_is_p2p_mgmt(wlvif)) {
2616 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2617 role_type, &wlvif->role_id);
2618 if (ret < 0)
2619 goto out;
2621 ret = wl1271_init_vif_specific(wl, vif);
2622 if (ret < 0)
2623 goto out;
2625 } else {
2626 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2627 &wlvif->dev_role_id);
2628 if (ret < 0)
2629 goto out;
2631 /* needed mainly for configuring rate policies */
2632 ret = wl1271_sta_hw_init(wl, wlvif);
2633 if (ret < 0)
2634 goto out;
2637 list_add(&wlvif->list, &wl->wlvif_list);
2638 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2640 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2641 wl->ap_count++;
2642 else
2643 wl->sta_count++;
2644 out:
2645 pm_runtime_mark_last_busy(wl->dev);
2646 pm_runtime_put_autosuspend(wl->dev);
2647 out_unlock:
2648 mutex_unlock(&wl->mutex);
2650 return ret;
2653 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2654 struct ieee80211_vif *vif,
2655 bool reset_tx_queues)
2657 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2658 int i, ret;
2659 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2661 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2663 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2664 return;
2666 /* because of hardware recovery, we may get here twice */
2667 if (wl->state == WLCORE_STATE_OFF)
2668 return;
2670 wl1271_info("down");
2672 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2673 wl->scan_wlvif == wlvif) {
2674 struct cfg80211_scan_info info = {
2675 .aborted = true,
2679 * Rearm the tx watchdog just before idling scan. This
2680 * prevents just-finished scans from triggering the watchdog
2682 wl12xx_rearm_tx_watchdog_locked(wl);
2684 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2685 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2686 wl->scan_wlvif = NULL;
2687 wl->scan.req = NULL;
2688 ieee80211_scan_completed(wl->hw, &info);
2691 if (wl->sched_vif == wlvif)
2692 wl->sched_vif = NULL;
2694 if (wl->roc_vif == vif) {
2695 wl->roc_vif = NULL;
2696 ieee80211_remain_on_channel_expired(wl->hw);
2699 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2700 /* disable active roles */
2701 ret = pm_runtime_get_sync(wl->dev);
2702 if (ret < 0) {
2703 pm_runtime_put_noidle(wl->dev);
2704 goto deinit;
2707 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2708 wlvif->bss_type == BSS_TYPE_IBSS) {
2709 if (wl12xx_dev_role_started(wlvif))
2710 wl12xx_stop_dev(wl, wlvif);
2713 if (!wlcore_is_p2p_mgmt(wlvif)) {
2714 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2715 if (ret < 0)
2716 goto deinit;
2717 } else {
2718 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2719 if (ret < 0)
2720 goto deinit;
2723 pm_runtime_mark_last_busy(wl->dev);
2724 pm_runtime_put_autosuspend(wl->dev);
2726 deinit:
2727 wl12xx_tx_reset_wlvif(wl, wlvif);
2729 /* clear all hlids (except system_hlid) */
2730 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2732 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2733 wlvif->bss_type == BSS_TYPE_IBSS) {
2734 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2735 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2736 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2737 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2738 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2739 } else {
2740 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2741 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2742 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2743 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2744 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2745 wl12xx_free_rate_policy(wl,
2746 &wlvif->ap.ucast_rate_idx[i]);
2747 wl1271_free_ap_keys(wl, wlvif);
2750 dev_kfree_skb(wlvif->probereq);
2751 wlvif->probereq = NULL;
2752 if (wl->last_wlvif == wlvif)
2753 wl->last_wlvif = NULL;
2754 list_del(&wlvif->list);
2755 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2756 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2757 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2759 if (is_ap)
2760 wl->ap_count--;
2761 else
2762 wl->sta_count--;
2765 * Last AP, have more stations. Configure sleep auth according to STA.
2766 * Don't do thin on unintended recovery.
2768 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2769 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2770 goto unlock;
2772 if (wl->ap_count == 0 && is_ap) {
2773 /* mask ap events */
2774 wl->event_mask &= ~wl->ap_event_mask;
2775 wl1271_event_unmask(wl);
2778 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2779 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2780 /* Configure for power according to debugfs */
2781 if (sta_auth != WL1271_PSM_ILLEGAL)
2782 wl1271_acx_sleep_auth(wl, sta_auth);
2783 /* Configure for ELP power saving */
2784 else
2785 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2788 unlock:
2789 mutex_unlock(&wl->mutex);
2791 del_timer_sync(&wlvif->rx_streaming_timer);
2792 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2793 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2794 cancel_work_sync(&wlvif->rc_update_work);
2795 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2796 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2797 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2799 mutex_lock(&wl->mutex);
2802 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2803 struct ieee80211_vif *vif)
2805 struct wl1271 *wl = hw->priv;
2806 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2807 struct wl12xx_vif *iter;
2808 struct vif_counter_data vif_count;
2810 wl12xx_get_vif_count(hw, vif, &vif_count);
2811 mutex_lock(&wl->mutex);
2813 if (wl->state == WLCORE_STATE_OFF ||
2814 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2815 goto out;
2818 * wl->vif can be null here if someone shuts down the interface
2819 * just when hardware recovery has been started.
2821 wl12xx_for_each_wlvif(wl, iter) {
2822 if (iter != wlvif)
2823 continue;
2825 __wl1271_op_remove_interface(wl, vif, true);
2826 break;
2828 WARN_ON(iter != wlvif);
2829 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2830 wl12xx_force_active_psm(wl);
2831 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2832 wl12xx_queue_recovery_work(wl);
2834 out:
2835 mutex_unlock(&wl->mutex);
2838 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2839 struct ieee80211_vif *vif,
2840 enum nl80211_iftype new_type, bool p2p)
2842 struct wl1271 *wl = hw->priv;
2843 int ret;
2845 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2846 wl1271_op_remove_interface(hw, vif);
2848 vif->type = new_type;
2849 vif->p2p = p2p;
2850 ret = wl1271_op_add_interface(hw, vif);
2852 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2853 return ret;
2856 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2858 int ret;
2859 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2862 * One of the side effects of the JOIN command is that is clears
2863 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2864 * to a WPA/WPA2 access point will therefore kill the data-path.
2865 * Currently the only valid scenario for JOIN during association
2866 * is on roaming, in which case we will also be given new keys.
2867 * Keep the below message for now, unless it starts bothering
2868 * users who really like to roam a lot :)
2870 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2871 wl1271_info("JOIN while associated.");
2873 /* clear encryption type */
2874 wlvif->encryption_type = KEY_NONE;
2876 if (is_ibss)
2877 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2878 else {
2879 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2881 * TODO: this is an ugly workaround for wl12xx fw
2882 * bug - we are not able to tx/rx after the first
2883 * start_sta, so make dummy start+stop calls,
2884 * and then call start_sta again.
2885 * this should be fixed in the fw.
2887 wl12xx_cmd_role_start_sta(wl, wlvif);
2888 wl12xx_cmd_role_stop_sta(wl, wlvif);
2891 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2894 return ret;
2897 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2898 int offset)
2900 u8 ssid_len;
2901 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2902 skb->len - offset);
2904 if (!ptr) {
2905 wl1271_error("No SSID in IEs!");
2906 return -ENOENT;
2909 ssid_len = ptr[1];
2910 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2911 wl1271_error("SSID is too long!");
2912 return -EINVAL;
2915 wlvif->ssid_len = ssid_len;
2916 memcpy(wlvif->ssid, ptr+2, ssid_len);
2917 return 0;
2920 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2922 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2923 struct sk_buff *skb;
2924 int ieoffset;
2926 /* we currently only support setting the ssid from the ap probe req */
2927 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2928 return -EINVAL;
2930 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2931 if (!skb)
2932 return -EINVAL;
2934 ieoffset = offsetof(struct ieee80211_mgmt,
2935 u.probe_req.variable);
2936 wl1271_ssid_set(wlvif, skb, ieoffset);
2937 dev_kfree_skb(skb);
2939 return 0;
2942 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2943 struct ieee80211_bss_conf *bss_conf,
2944 u32 sta_rate_set)
2946 int ieoffset;
2947 int ret;
2949 wlvif->aid = bss_conf->aid;
2950 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2951 wlvif->beacon_int = bss_conf->beacon_int;
2952 wlvif->wmm_enabled = bss_conf->qos;
2954 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2957 * with wl1271, we don't need to update the
2958 * beacon_int and dtim_period, because the firmware
2959 * updates it by itself when the first beacon is
2960 * received after a join.
2962 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2963 if (ret < 0)
2964 return ret;
2967 * Get a template for hardware connection maintenance
2969 dev_kfree_skb(wlvif->probereq);
2970 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2971 wlvif,
2972 NULL);
2973 ieoffset = offsetof(struct ieee80211_mgmt,
2974 u.probe_req.variable);
2975 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2977 /* enable the connection monitoring feature */
2978 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2979 if (ret < 0)
2980 return ret;
2983 * The join command disable the keep-alive mode, shut down its process,
2984 * and also clear the template config, so we need to reset it all after
2985 * the join. The acx_aid starts the keep-alive process, and the order
2986 * of the commands below is relevant.
2988 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2989 if (ret < 0)
2990 return ret;
2992 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2993 if (ret < 0)
2994 return ret;
2996 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2997 if (ret < 0)
2998 return ret;
3000 ret = wl1271_acx_keep_alive_config(wl, wlvif,
3001 wlvif->sta.klv_template_id,
3002 ACX_KEEP_ALIVE_TPL_VALID);
3003 if (ret < 0)
3004 return ret;
3007 * The default fw psm configuration is AUTO, while mac80211 default
3008 * setting is off (ACTIVE), so sync the fw with the correct value.
3010 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3011 if (ret < 0)
3012 return ret;
3014 if (sta_rate_set) {
3015 wlvif->rate_set =
3016 wl1271_tx_enabled_rates_get(wl,
3017 sta_rate_set,
3018 wlvif->band);
3019 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3020 if (ret < 0)
3021 return ret;
3024 return ret;
3027 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3029 int ret;
3030 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3032 /* make sure we are connected (sta) joined */
3033 if (sta &&
3034 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3035 return false;
3037 /* make sure we are joined (ibss) */
3038 if (!sta &&
3039 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3040 return false;
3042 if (sta) {
3043 /* use defaults when not associated */
3044 wlvif->aid = 0;
3046 /* free probe-request template */
3047 dev_kfree_skb(wlvif->probereq);
3048 wlvif->probereq = NULL;
3050 /* disable connection monitor features */
3051 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3052 if (ret < 0)
3053 return ret;
3055 /* Disable the keep-alive feature */
3056 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3057 if (ret < 0)
3058 return ret;
3060 /* disable beacon filtering */
3061 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3062 if (ret < 0)
3063 return ret;
3066 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3067 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3069 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3070 ieee80211_chswitch_done(vif, false);
3071 cancel_delayed_work(&wlvif->channel_switch_work);
3074 /* invalidate keep-alive template */
3075 wl1271_acx_keep_alive_config(wl, wlvif,
3076 wlvif->sta.klv_template_id,
3077 ACX_KEEP_ALIVE_TPL_INVALID);
3079 return 0;
3082 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3084 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3085 wlvif->rate_set = wlvif->basic_rate_set;
3088 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3089 bool idle)
3091 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3093 if (idle == cur_idle)
3094 return;
3096 if (idle) {
3097 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3098 } else {
3099 /* The current firmware only supports sched_scan in idle */
3100 if (wl->sched_vif == wlvif)
3101 wl->ops->sched_scan_stop(wl, wlvif);
3103 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3107 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3108 struct ieee80211_conf *conf, u32 changed)
3110 int ret;
3112 if (wlcore_is_p2p_mgmt(wlvif))
3113 return 0;
3115 if (conf->power_level != wlvif->power_level) {
3116 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3117 if (ret < 0)
3118 return ret;
3120 wlvif->power_level = conf->power_level;
3123 return 0;
3126 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3128 struct wl1271 *wl = hw->priv;
3129 struct wl12xx_vif *wlvif;
3130 struct ieee80211_conf *conf = &hw->conf;
3131 int ret = 0;
3133 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3134 " changed 0x%x",
3135 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3136 conf->power_level,
3137 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3138 changed);
3140 mutex_lock(&wl->mutex);
3142 if (changed & IEEE80211_CONF_CHANGE_POWER)
3143 wl->power_level = conf->power_level;
3145 if (unlikely(wl->state != WLCORE_STATE_ON))
3146 goto out;
3148 ret = pm_runtime_get_sync(wl->dev);
3149 if (ret < 0) {
3150 pm_runtime_put_noidle(wl->dev);
3151 goto out;
3154 /* configure each interface */
3155 wl12xx_for_each_wlvif(wl, wlvif) {
3156 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3157 if (ret < 0)
3158 goto out_sleep;
3161 out_sleep:
3162 pm_runtime_mark_last_busy(wl->dev);
3163 pm_runtime_put_autosuspend(wl->dev);
3165 out:
3166 mutex_unlock(&wl->mutex);
3168 return ret;
3171 struct wl1271_filter_params {
3172 bool enabled;
3173 int mc_list_length;
3174 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3177 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3178 struct netdev_hw_addr_list *mc_list)
3180 struct wl1271_filter_params *fp;
3181 struct netdev_hw_addr *ha;
3183 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3184 if (!fp) {
3185 wl1271_error("Out of memory setting filters.");
3186 return 0;
3189 /* update multicast filtering parameters */
3190 fp->mc_list_length = 0;
3191 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3192 fp->enabled = false;
3193 } else {
3194 fp->enabled = true;
3195 netdev_hw_addr_list_for_each(ha, mc_list) {
3196 memcpy(fp->mc_list[fp->mc_list_length],
3197 ha->addr, ETH_ALEN);
3198 fp->mc_list_length++;
3202 return (u64)(unsigned long)fp;
3205 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3206 FIF_FCSFAIL | \
3207 FIF_BCN_PRBRESP_PROMISC | \
3208 FIF_CONTROL | \
3209 FIF_OTHER_BSS)
3211 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3212 unsigned int changed,
3213 unsigned int *total, u64 multicast)
3215 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3216 struct wl1271 *wl = hw->priv;
3217 struct wl12xx_vif *wlvif;
3219 int ret;
3221 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3222 " total %x", changed, *total);
3224 mutex_lock(&wl->mutex);
3226 *total &= WL1271_SUPPORTED_FILTERS;
3227 changed &= WL1271_SUPPORTED_FILTERS;
3229 if (unlikely(wl->state != WLCORE_STATE_ON))
3230 goto out;
3232 ret = pm_runtime_get_sync(wl->dev);
3233 if (ret < 0) {
3234 pm_runtime_put_noidle(wl->dev);
3235 goto out;
3238 wl12xx_for_each_wlvif(wl, wlvif) {
3239 if (wlcore_is_p2p_mgmt(wlvif))
3240 continue;
3242 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3243 if (*total & FIF_ALLMULTI)
3244 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3245 false,
3246 NULL, 0);
3247 else if (fp)
3248 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3249 fp->enabled,
3250 fp->mc_list,
3251 fp->mc_list_length);
3252 if (ret < 0)
3253 goto out_sleep;
3257 * If interface in AP mode and created with allmulticast then disable
3258 * the firmware filters so that all multicast packets are passed
3259 * This is mandatory for MDNS based discovery protocols
3261 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3262 if (*total & FIF_ALLMULTI) {
3263 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3264 false,
3265 NULL, 0);
3266 if (ret < 0)
3267 goto out_sleep;
3273 * the fw doesn't provide an api to configure the filters. instead,
3274 * the filters configuration is based on the active roles / ROC
3275 * state.
3278 out_sleep:
3279 pm_runtime_mark_last_busy(wl->dev);
3280 pm_runtime_put_autosuspend(wl->dev);
3282 out:
3283 mutex_unlock(&wl->mutex);
3284 kfree(fp);
3287 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3288 u8 id, u8 key_type, u8 key_size,
3289 const u8 *key, u8 hlid, u32 tx_seq_32,
3290 u16 tx_seq_16)
3292 struct wl1271_ap_key *ap_key;
3293 int i;
3295 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3297 if (key_size > MAX_KEY_SIZE)
3298 return -EINVAL;
3301 * Find next free entry in ap_keys. Also check we are not replacing
3302 * an existing key.
3304 for (i = 0; i < MAX_NUM_KEYS; i++) {
3305 if (wlvif->ap.recorded_keys[i] == NULL)
3306 break;
3308 if (wlvif->ap.recorded_keys[i]->id == id) {
3309 wl1271_warning("trying to record key replacement");
3310 return -EINVAL;
3314 if (i == MAX_NUM_KEYS)
3315 return -EBUSY;
3317 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3318 if (!ap_key)
3319 return -ENOMEM;
3321 ap_key->id = id;
3322 ap_key->key_type = key_type;
3323 ap_key->key_size = key_size;
3324 memcpy(ap_key->key, key, key_size);
3325 ap_key->hlid = hlid;
3326 ap_key->tx_seq_32 = tx_seq_32;
3327 ap_key->tx_seq_16 = tx_seq_16;
3329 wlvif->ap.recorded_keys[i] = ap_key;
3330 return 0;
3333 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3335 int i;
3337 for (i = 0; i < MAX_NUM_KEYS; i++) {
3338 kfree(wlvif->ap.recorded_keys[i]);
3339 wlvif->ap.recorded_keys[i] = NULL;
3343 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3345 int i, ret = 0;
3346 struct wl1271_ap_key *key;
3347 bool wep_key_added = false;
3349 for (i = 0; i < MAX_NUM_KEYS; i++) {
3350 u8 hlid;
3351 if (wlvif->ap.recorded_keys[i] == NULL)
3352 break;
3354 key = wlvif->ap.recorded_keys[i];
3355 hlid = key->hlid;
3356 if (hlid == WL12XX_INVALID_LINK_ID)
3357 hlid = wlvif->ap.bcast_hlid;
3359 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3360 key->id, key->key_type,
3361 key->key_size, key->key,
3362 hlid, key->tx_seq_32,
3363 key->tx_seq_16);
3364 if (ret < 0)
3365 goto out;
3367 if (key->key_type == KEY_WEP)
3368 wep_key_added = true;
3371 if (wep_key_added) {
3372 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3373 wlvif->ap.bcast_hlid);
3374 if (ret < 0)
3375 goto out;
3378 out:
3379 wl1271_free_ap_keys(wl, wlvif);
3380 return ret;
3383 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3384 u16 action, u8 id, u8 key_type,
3385 u8 key_size, const u8 *key, u32 tx_seq_32,
3386 u16 tx_seq_16, struct ieee80211_sta *sta)
3388 int ret;
3389 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3391 if (is_ap) {
3392 struct wl1271_station *wl_sta;
3393 u8 hlid;
3395 if (sta) {
3396 wl_sta = (struct wl1271_station *)sta->drv_priv;
3397 hlid = wl_sta->hlid;
3398 } else {
3399 hlid = wlvif->ap.bcast_hlid;
3402 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3404 * We do not support removing keys after AP shutdown.
3405 * Pretend we do to make mac80211 happy.
3407 if (action != KEY_ADD_OR_REPLACE)
3408 return 0;
3410 ret = wl1271_record_ap_key(wl, wlvif, id,
3411 key_type, key_size,
3412 key, hlid, tx_seq_32,
3413 tx_seq_16);
3414 } else {
3415 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3416 id, key_type, key_size,
3417 key, hlid, tx_seq_32,
3418 tx_seq_16);
3421 if (ret < 0)
3422 return ret;
3423 } else {
3424 const u8 *addr;
3425 static const u8 bcast_addr[ETH_ALEN] = {
3426 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3429 addr = sta ? sta->addr : bcast_addr;
3431 if (is_zero_ether_addr(addr)) {
3432 /* We dont support TX only encryption */
3433 return -EOPNOTSUPP;
3436 /* The wl1271 does not allow to remove unicast keys - they
3437 will be cleared automatically on next CMD_JOIN. Ignore the
3438 request silently, as we dont want the mac80211 to emit
3439 an error message. */
3440 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3441 return 0;
3443 /* don't remove key if hlid was already deleted */
3444 if (action == KEY_REMOVE &&
3445 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3446 return 0;
3448 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3449 id, key_type, key_size,
3450 key, addr, tx_seq_32,
3451 tx_seq_16);
3452 if (ret < 0)
3453 return ret;
3457 return 0;
3460 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3461 struct ieee80211_vif *vif,
3462 struct ieee80211_sta *sta,
3463 struct ieee80211_key_conf *key_conf)
3465 struct wl1271 *wl = hw->priv;
3466 int ret;
3467 bool might_change_spare =
3468 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3469 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3471 if (might_change_spare) {
3473 * stop the queues and flush to ensure the next packets are
3474 * in sync with FW spare block accounting
3476 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3477 wl1271_tx_flush(wl);
3480 mutex_lock(&wl->mutex);
3482 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3483 ret = -EAGAIN;
3484 goto out_wake_queues;
3487 ret = pm_runtime_get_sync(wl->dev);
3488 if (ret < 0) {
3489 pm_runtime_put_noidle(wl->dev);
3490 goto out_wake_queues;
3493 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3495 pm_runtime_mark_last_busy(wl->dev);
3496 pm_runtime_put_autosuspend(wl->dev);
3498 out_wake_queues:
3499 if (might_change_spare)
3500 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3502 mutex_unlock(&wl->mutex);
3504 return ret;
3507 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3508 struct ieee80211_vif *vif,
3509 struct ieee80211_sta *sta,
3510 struct ieee80211_key_conf *key_conf)
3512 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3513 int ret;
3514 u32 tx_seq_32 = 0;
3515 u16 tx_seq_16 = 0;
3516 u8 key_type;
3517 u8 hlid;
3519 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3521 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3522 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3523 key_conf->cipher, key_conf->keyidx,
3524 key_conf->keylen, key_conf->flags);
3525 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3527 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3528 if (sta) {
3529 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3530 hlid = wl_sta->hlid;
3531 } else {
3532 hlid = wlvif->ap.bcast_hlid;
3534 else
3535 hlid = wlvif->sta.hlid;
3537 if (hlid != WL12XX_INVALID_LINK_ID) {
3538 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3539 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3540 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3543 switch (key_conf->cipher) {
3544 case WLAN_CIPHER_SUITE_WEP40:
3545 case WLAN_CIPHER_SUITE_WEP104:
3546 key_type = KEY_WEP;
3548 key_conf->hw_key_idx = key_conf->keyidx;
3549 break;
3550 case WLAN_CIPHER_SUITE_TKIP:
3551 key_type = KEY_TKIP;
3552 key_conf->hw_key_idx = key_conf->keyidx;
3553 break;
3554 case WLAN_CIPHER_SUITE_CCMP:
3555 key_type = KEY_AES;
3556 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3557 break;
3558 case WL1271_CIPHER_SUITE_GEM:
3559 key_type = KEY_GEM;
3560 break;
3561 default:
3562 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3564 return -EOPNOTSUPP;
3567 switch (cmd) {
3568 case SET_KEY:
3569 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3570 key_conf->keyidx, key_type,
3571 key_conf->keylen, key_conf->key,
3572 tx_seq_32, tx_seq_16, sta);
3573 if (ret < 0) {
3574 wl1271_error("Could not add or replace key");
3575 return ret;
3579 * reconfiguring arp response if the unicast (or common)
3580 * encryption key type was changed
3582 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3583 (sta || key_type == KEY_WEP) &&
3584 wlvif->encryption_type != key_type) {
3585 wlvif->encryption_type = key_type;
3586 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3587 if (ret < 0) {
3588 wl1271_warning("build arp rsp failed: %d", ret);
3589 return ret;
3592 break;
3594 case DISABLE_KEY:
3595 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3596 key_conf->keyidx, key_type,
3597 key_conf->keylen, key_conf->key,
3598 0, 0, sta);
3599 if (ret < 0) {
3600 wl1271_error("Could not remove key");
3601 return ret;
3603 break;
3605 default:
3606 wl1271_error("Unsupported key cmd 0x%x", cmd);
3607 return -EOPNOTSUPP;
3610 return ret;
3612 EXPORT_SYMBOL_GPL(wlcore_set_key);
3614 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3615 struct ieee80211_vif *vif,
3616 int key_idx)
3618 struct wl1271 *wl = hw->priv;
3619 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3620 int ret;
3622 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3623 key_idx);
3625 /* we don't handle unsetting of default key */
3626 if (key_idx == -1)
3627 return;
3629 mutex_lock(&wl->mutex);
3631 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3632 ret = -EAGAIN;
3633 goto out_unlock;
3636 ret = pm_runtime_get_sync(wl->dev);
3637 if (ret < 0) {
3638 pm_runtime_put_noidle(wl->dev);
3639 goto out_unlock;
3642 wlvif->default_key = key_idx;
3644 /* the default WEP key needs to be configured at least once */
3645 if (wlvif->encryption_type == KEY_WEP) {
3646 ret = wl12xx_cmd_set_default_wep_key(wl,
3647 key_idx,
3648 wlvif->sta.hlid);
3649 if (ret < 0)
3650 goto out_sleep;
3653 out_sleep:
3654 pm_runtime_mark_last_busy(wl->dev);
3655 pm_runtime_put_autosuspend(wl->dev);
3657 out_unlock:
3658 mutex_unlock(&wl->mutex);
3661 void wlcore_regdomain_config(struct wl1271 *wl)
3663 int ret;
3665 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3666 return;
3668 mutex_lock(&wl->mutex);
3670 if (unlikely(wl->state != WLCORE_STATE_ON))
3671 goto out;
3673 ret = pm_runtime_get_sync(wl->dev);
3674 if (ret < 0)
3675 goto out;
3677 ret = wlcore_cmd_regdomain_config_locked(wl);
3678 if (ret < 0) {
3679 wl12xx_queue_recovery_work(wl);
3680 goto out;
3683 pm_runtime_mark_last_busy(wl->dev);
3684 pm_runtime_put_autosuspend(wl->dev);
3685 out:
3686 mutex_unlock(&wl->mutex);
3689 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3690 struct ieee80211_vif *vif,
3691 struct ieee80211_scan_request *hw_req)
3693 struct cfg80211_scan_request *req = &hw_req->req;
3694 struct wl1271 *wl = hw->priv;
3695 int ret;
3696 u8 *ssid = NULL;
3697 size_t len = 0;
3699 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3701 if (req->n_ssids) {
3702 ssid = req->ssids[0].ssid;
3703 len = req->ssids[0].ssid_len;
3706 mutex_lock(&wl->mutex);
3708 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3710 * We cannot return -EBUSY here because cfg80211 will expect
3711 * a call to ieee80211_scan_completed if we do - in this case
3712 * there won't be any call.
3714 ret = -EAGAIN;
3715 goto out;
3718 ret = pm_runtime_get_sync(wl->dev);
3719 if (ret < 0) {
3720 pm_runtime_put_noidle(wl->dev);
3721 goto out;
3724 /* fail if there is any role in ROC */
3725 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3726 /* don't allow scanning right now */
3727 ret = -EBUSY;
3728 goto out_sleep;
3731 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3732 out_sleep:
3733 pm_runtime_mark_last_busy(wl->dev);
3734 pm_runtime_put_autosuspend(wl->dev);
3735 out:
3736 mutex_unlock(&wl->mutex);
3738 return ret;
3741 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3742 struct ieee80211_vif *vif)
3744 struct wl1271 *wl = hw->priv;
3745 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3746 struct cfg80211_scan_info info = {
3747 .aborted = true,
3749 int ret;
3751 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3753 mutex_lock(&wl->mutex);
3755 if (unlikely(wl->state != WLCORE_STATE_ON))
3756 goto out;
3758 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3759 goto out;
3761 ret = pm_runtime_get_sync(wl->dev);
3762 if (ret < 0) {
3763 pm_runtime_put_noidle(wl->dev);
3764 goto out;
3767 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3768 ret = wl->ops->scan_stop(wl, wlvif);
3769 if (ret < 0)
3770 goto out_sleep;
3774 * Rearm the tx watchdog just before idling scan. This
3775 * prevents just-finished scans from triggering the watchdog
3777 wl12xx_rearm_tx_watchdog_locked(wl);
3779 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3780 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3781 wl->scan_wlvif = NULL;
3782 wl->scan.req = NULL;
3783 ieee80211_scan_completed(wl->hw, &info);
3785 out_sleep:
3786 pm_runtime_mark_last_busy(wl->dev);
3787 pm_runtime_put_autosuspend(wl->dev);
3788 out:
3789 mutex_unlock(&wl->mutex);
3791 cancel_delayed_work_sync(&wl->scan_complete_work);
3794 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3795 struct ieee80211_vif *vif,
3796 struct cfg80211_sched_scan_request *req,
3797 struct ieee80211_scan_ies *ies)
3799 struct wl1271 *wl = hw->priv;
3800 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3801 int ret;
3803 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3805 mutex_lock(&wl->mutex);
3807 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3808 ret = -EAGAIN;
3809 goto out;
3812 ret = pm_runtime_get_sync(wl->dev);
3813 if (ret < 0) {
3814 pm_runtime_put_noidle(wl->dev);
3815 goto out;
3818 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3819 if (ret < 0)
3820 goto out_sleep;
3822 wl->sched_vif = wlvif;
3824 out_sleep:
3825 pm_runtime_mark_last_busy(wl->dev);
3826 pm_runtime_put_autosuspend(wl->dev);
3827 out:
3828 mutex_unlock(&wl->mutex);
3829 return ret;
3832 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3833 struct ieee80211_vif *vif)
3835 struct wl1271 *wl = hw->priv;
3836 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3837 int ret;
3839 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3841 mutex_lock(&wl->mutex);
3843 if (unlikely(wl->state != WLCORE_STATE_ON))
3844 goto out;
3846 ret = pm_runtime_get_sync(wl->dev);
3847 if (ret < 0) {
3848 pm_runtime_put_noidle(wl->dev);
3849 goto out;
3852 wl->ops->sched_scan_stop(wl, wlvif);
3854 pm_runtime_mark_last_busy(wl->dev);
3855 pm_runtime_put_autosuspend(wl->dev);
3856 out:
3857 mutex_unlock(&wl->mutex);
3859 return 0;
3862 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3864 struct wl1271 *wl = hw->priv;
3865 int ret = 0;
3867 mutex_lock(&wl->mutex);
3869 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3870 ret = -EAGAIN;
3871 goto out;
3874 ret = pm_runtime_get_sync(wl->dev);
3875 if (ret < 0) {
3876 pm_runtime_put_noidle(wl->dev);
3877 goto out;
3880 ret = wl1271_acx_frag_threshold(wl, value);
3881 if (ret < 0)
3882 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3884 pm_runtime_mark_last_busy(wl->dev);
3885 pm_runtime_put_autosuspend(wl->dev);
3887 out:
3888 mutex_unlock(&wl->mutex);
3890 return ret;
3893 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3895 struct wl1271 *wl = hw->priv;
3896 struct wl12xx_vif *wlvif;
3897 int ret = 0;
3899 mutex_lock(&wl->mutex);
3901 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3902 ret = -EAGAIN;
3903 goto out;
3906 ret = pm_runtime_get_sync(wl->dev);
3907 if (ret < 0) {
3908 pm_runtime_put_noidle(wl->dev);
3909 goto out;
3912 wl12xx_for_each_wlvif(wl, wlvif) {
3913 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3914 if (ret < 0)
3915 wl1271_warning("set rts threshold failed: %d", ret);
3917 pm_runtime_mark_last_busy(wl->dev);
3918 pm_runtime_put_autosuspend(wl->dev);
3920 out:
3921 mutex_unlock(&wl->mutex);
3923 return ret;
3926 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3928 int len;
3929 const u8 *next, *end = skb->data + skb->len;
3930 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3931 skb->len - ieoffset);
3932 if (!ie)
3933 return;
3934 len = ie[1] + 2;
3935 next = ie + len;
3936 memmove(ie, next, end - next);
3937 skb_trim(skb, skb->len - len);
3940 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3941 unsigned int oui, u8 oui_type,
3942 int ieoffset)
3944 int len;
3945 const u8 *next, *end = skb->data + skb->len;
3946 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3947 skb->data + ieoffset,
3948 skb->len - ieoffset);
3949 if (!ie)
3950 return;
3951 len = ie[1] + 2;
3952 next = ie + len;
3953 memmove(ie, next, end - next);
3954 skb_trim(skb, skb->len - len);
3957 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3958 struct ieee80211_vif *vif)
3960 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3961 struct sk_buff *skb;
3962 int ret;
3964 skb = ieee80211_proberesp_get(wl->hw, vif);
3965 if (!skb)
3966 return -EOPNOTSUPP;
3968 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3969 CMD_TEMPL_AP_PROBE_RESPONSE,
3970 skb->data,
3971 skb->len, 0,
3972 rates);
3973 dev_kfree_skb(skb);
3975 if (ret < 0)
3976 goto out;
3978 wl1271_debug(DEBUG_AP, "probe response updated");
3979 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3981 out:
3982 return ret;
3985 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3986 struct ieee80211_vif *vif,
3987 u8 *probe_rsp_data,
3988 size_t probe_rsp_len,
3989 u32 rates)
3991 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3992 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3993 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3994 int ssid_ie_offset, ie_offset, templ_len;
3995 const u8 *ptr;
3997 /* no need to change probe response if the SSID is set correctly */
3998 if (wlvif->ssid_len > 0)
3999 return wl1271_cmd_template_set(wl, wlvif->role_id,
4000 CMD_TEMPL_AP_PROBE_RESPONSE,
4001 probe_rsp_data,
4002 probe_rsp_len, 0,
4003 rates);
4005 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4006 wl1271_error("probe_rsp template too big");
4007 return -EINVAL;
4010 /* start searching from IE offset */
4011 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4013 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4014 probe_rsp_len - ie_offset);
4015 if (!ptr) {
4016 wl1271_error("No SSID in beacon!");
4017 return -EINVAL;
4020 ssid_ie_offset = ptr - probe_rsp_data;
4021 ptr += (ptr[1] + 2);
4023 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4025 /* insert SSID from bss_conf */
4026 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4027 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4028 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4029 bss_conf->ssid, bss_conf->ssid_len);
4030 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4032 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4033 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4034 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4036 return wl1271_cmd_template_set(wl, wlvif->role_id,
4037 CMD_TEMPL_AP_PROBE_RESPONSE,
4038 probe_rsp_templ,
4039 templ_len, 0,
4040 rates);
4043 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4044 struct ieee80211_vif *vif,
4045 struct ieee80211_bss_conf *bss_conf,
4046 u32 changed)
4048 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4049 int ret = 0;
4051 if (changed & BSS_CHANGED_ERP_SLOT) {
4052 if (bss_conf->use_short_slot)
4053 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4054 else
4055 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4056 if (ret < 0) {
4057 wl1271_warning("Set slot time failed %d", ret);
4058 goto out;
4062 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4063 if (bss_conf->use_short_preamble)
4064 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4065 else
4066 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4069 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4070 if (bss_conf->use_cts_prot)
4071 ret = wl1271_acx_cts_protect(wl, wlvif,
4072 CTSPROTECT_ENABLE);
4073 else
4074 ret = wl1271_acx_cts_protect(wl, wlvif,
4075 CTSPROTECT_DISABLE);
4076 if (ret < 0) {
4077 wl1271_warning("Set ctsprotect failed %d", ret);
4078 goto out;
4082 out:
4083 return ret;
4086 static int wlcore_set_beacon_template(struct wl1271 *wl,
4087 struct ieee80211_vif *vif,
4088 bool is_ap)
4090 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4091 struct ieee80211_hdr *hdr;
4092 u32 min_rate;
4093 int ret;
4094 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4095 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4096 u16 tmpl_id;
4098 if (!beacon) {
4099 ret = -EINVAL;
4100 goto out;
4103 wl1271_debug(DEBUG_MASTER, "beacon updated");
4105 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4106 if (ret < 0) {
4107 dev_kfree_skb(beacon);
4108 goto out;
4110 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4111 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4112 CMD_TEMPL_BEACON;
4113 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4114 beacon->data,
4115 beacon->len, 0,
4116 min_rate);
4117 if (ret < 0) {
4118 dev_kfree_skb(beacon);
4119 goto out;
4122 wlvif->wmm_enabled =
4123 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4124 WLAN_OUI_TYPE_MICROSOFT_WMM,
4125 beacon->data + ieoffset,
4126 beacon->len - ieoffset);
4129 * In case we already have a probe-resp beacon set explicitly
4130 * by usermode, don't use the beacon data.
4132 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4133 goto end_bcn;
4135 /* remove TIM ie from probe response */
4136 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4139 * remove p2p ie from probe response.
4140 * the fw reponds to probe requests that don't include
4141 * the p2p ie. probe requests with p2p ie will be passed,
4142 * and will be responded by the supplicant (the spec
4143 * forbids including the p2p ie when responding to probe
4144 * requests that didn't include it).
4146 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4147 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4149 hdr = (struct ieee80211_hdr *) beacon->data;
4150 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4151 IEEE80211_STYPE_PROBE_RESP);
4152 if (is_ap)
4153 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4154 beacon->data,
4155 beacon->len,
4156 min_rate);
4157 else
4158 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4159 CMD_TEMPL_PROBE_RESPONSE,
4160 beacon->data,
4161 beacon->len, 0,
4162 min_rate);
4163 end_bcn:
4164 dev_kfree_skb(beacon);
4165 if (ret < 0)
4166 goto out;
4168 out:
4169 return ret;
4172 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4173 struct ieee80211_vif *vif,
4174 struct ieee80211_bss_conf *bss_conf,
4175 u32 changed)
4177 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4178 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4179 int ret = 0;
4181 if (changed & BSS_CHANGED_BEACON_INT) {
4182 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4183 bss_conf->beacon_int);
4185 wlvif->beacon_int = bss_conf->beacon_int;
4188 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4189 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4191 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4194 if (changed & BSS_CHANGED_BEACON) {
4195 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4196 if (ret < 0)
4197 goto out;
4199 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4200 &wlvif->flags)) {
4201 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4202 if (ret < 0)
4203 goto out;
4206 out:
4207 if (ret != 0)
4208 wl1271_error("beacon info change failed: %d", ret);
4209 return ret;
4212 /* AP mode changes */
4213 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4214 struct ieee80211_vif *vif,
4215 struct ieee80211_bss_conf *bss_conf,
4216 u32 changed)
4218 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4219 int ret = 0;
4221 if (changed & BSS_CHANGED_BASIC_RATES) {
4222 u32 rates = bss_conf->basic_rates;
4224 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4225 wlvif->band);
4226 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4227 wlvif->basic_rate_set);
4229 ret = wl1271_init_ap_rates(wl, wlvif);
4230 if (ret < 0) {
4231 wl1271_error("AP rate policy change failed %d", ret);
4232 goto out;
4235 ret = wl1271_ap_init_templates(wl, vif);
4236 if (ret < 0)
4237 goto out;
4239 /* No need to set probe resp template for mesh */
4240 if (!ieee80211_vif_is_mesh(vif)) {
4241 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4242 wlvif->basic_rate,
4243 vif);
4244 if (ret < 0)
4245 goto out;
4248 ret = wlcore_set_beacon_template(wl, vif, true);
4249 if (ret < 0)
4250 goto out;
4253 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4254 if (ret < 0)
4255 goto out;
4257 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4258 if (bss_conf->enable_beacon) {
4259 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4260 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4261 if (ret < 0)
4262 goto out;
4264 ret = wl1271_ap_init_hwenc(wl, wlvif);
4265 if (ret < 0)
4266 goto out;
4268 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4269 wl1271_debug(DEBUG_AP, "started AP");
4271 } else {
4272 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4274 * AP might be in ROC in case we have just
4275 * sent auth reply. handle it.
4277 if (test_bit(wlvif->role_id, wl->roc_map))
4278 wl12xx_croc(wl, wlvif->role_id);
4280 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4281 if (ret < 0)
4282 goto out;
4284 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4285 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4286 &wlvif->flags);
4287 wl1271_debug(DEBUG_AP, "stopped AP");
4292 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4293 if (ret < 0)
4294 goto out;
4296 /* Handle HT information change */
4297 if ((changed & BSS_CHANGED_HT) &&
4298 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4299 ret = wl1271_acx_set_ht_information(wl, wlvif,
4300 bss_conf->ht_operation_mode);
4301 if (ret < 0) {
4302 wl1271_warning("Set ht information failed %d", ret);
4303 goto out;
4307 out:
4308 return;
4311 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4312 struct ieee80211_bss_conf *bss_conf,
4313 u32 sta_rate_set)
4315 u32 rates;
4316 int ret;
4318 wl1271_debug(DEBUG_MAC80211,
4319 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4320 bss_conf->bssid, bss_conf->aid,
4321 bss_conf->beacon_int,
4322 bss_conf->basic_rates, sta_rate_set);
4324 wlvif->beacon_int = bss_conf->beacon_int;
4325 rates = bss_conf->basic_rates;
4326 wlvif->basic_rate_set =
4327 wl1271_tx_enabled_rates_get(wl, rates,
4328 wlvif->band);
4329 wlvif->basic_rate =
4330 wl1271_tx_min_rate_get(wl,
4331 wlvif->basic_rate_set);
4333 if (sta_rate_set)
4334 wlvif->rate_set =
4335 wl1271_tx_enabled_rates_get(wl,
4336 sta_rate_set,
4337 wlvif->band);
4339 /* we only support sched_scan while not connected */
4340 if (wl->sched_vif == wlvif)
4341 wl->ops->sched_scan_stop(wl, wlvif);
4343 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4344 if (ret < 0)
4345 return ret;
4347 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4348 if (ret < 0)
4349 return ret;
4351 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4352 if (ret < 0)
4353 return ret;
4355 wlcore_set_ssid(wl, wlvif);
4357 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4359 return 0;
4362 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4364 int ret;
4366 /* revert back to minimum rates for the current band */
4367 wl1271_set_band_rate(wl, wlvif);
4368 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4370 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4371 if (ret < 0)
4372 return ret;
4374 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4375 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4376 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4377 if (ret < 0)
4378 return ret;
4381 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4382 return 0;
4384 /* STA/IBSS mode changes */
4385 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4386 struct ieee80211_vif *vif,
4387 struct ieee80211_bss_conf *bss_conf,
4388 u32 changed)
4390 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4391 bool do_join = false;
4392 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4393 bool ibss_joined = false;
4394 u32 sta_rate_set = 0;
4395 int ret;
4396 struct ieee80211_sta *sta;
4397 bool sta_exists = false;
4398 struct ieee80211_sta_ht_cap sta_ht_cap;
4400 if (is_ibss) {
4401 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4402 changed);
4403 if (ret < 0)
4404 goto out;
4407 if (changed & BSS_CHANGED_IBSS) {
4408 if (bss_conf->ibss_joined) {
4409 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4410 ibss_joined = true;
4411 } else {
4412 wlcore_unset_assoc(wl, wlvif);
4413 wl12xx_cmd_role_stop_sta(wl, wlvif);
4417 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4418 do_join = true;
4420 /* Need to update the SSID (for filtering etc) */
4421 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4422 do_join = true;
4424 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4425 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4426 bss_conf->enable_beacon ? "enabled" : "disabled");
4428 do_join = true;
4431 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4432 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4434 if (changed & BSS_CHANGED_CQM) {
4435 bool enable = false;
4436 if (bss_conf->cqm_rssi_thold)
4437 enable = true;
4438 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4439 bss_conf->cqm_rssi_thold,
4440 bss_conf->cqm_rssi_hyst);
4441 if (ret < 0)
4442 goto out;
4443 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4446 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4447 BSS_CHANGED_ASSOC)) {
4448 rcu_read_lock();
4449 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4450 if (sta) {
4451 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4453 /* save the supp_rates of the ap */
4454 sta_rate_set = sta->supp_rates[wlvif->band];
4455 if (sta->ht_cap.ht_supported)
4456 sta_rate_set |=
4457 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4458 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4459 sta_ht_cap = sta->ht_cap;
4460 sta_exists = true;
4463 rcu_read_unlock();
4466 if (changed & BSS_CHANGED_BSSID) {
4467 if (!is_zero_ether_addr(bss_conf->bssid)) {
4468 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4469 sta_rate_set);
4470 if (ret < 0)
4471 goto out;
4473 /* Need to update the BSSID (for filtering etc) */
4474 do_join = true;
4475 } else {
4476 ret = wlcore_clear_bssid(wl, wlvif);
4477 if (ret < 0)
4478 goto out;
4482 if (changed & BSS_CHANGED_IBSS) {
4483 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4484 bss_conf->ibss_joined);
4486 if (bss_conf->ibss_joined) {
4487 u32 rates = bss_conf->basic_rates;
4488 wlvif->basic_rate_set =
4489 wl1271_tx_enabled_rates_get(wl, rates,
4490 wlvif->band);
4491 wlvif->basic_rate =
4492 wl1271_tx_min_rate_get(wl,
4493 wlvif->basic_rate_set);
4495 /* by default, use 11b + OFDM rates */
4496 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4497 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4498 if (ret < 0)
4499 goto out;
4503 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4504 /* enable beacon filtering */
4505 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4506 if (ret < 0)
4507 goto out;
4510 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4511 if (ret < 0)
4512 goto out;
4514 if (do_join) {
4515 ret = wlcore_join(wl, wlvif);
4516 if (ret < 0) {
4517 wl1271_warning("cmd join failed %d", ret);
4518 goto out;
4522 if (changed & BSS_CHANGED_ASSOC) {
4523 if (bss_conf->assoc) {
4524 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4525 sta_rate_set);
4526 if (ret < 0)
4527 goto out;
4529 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4530 wl12xx_set_authorized(wl, wlvif);
4531 } else {
4532 wlcore_unset_assoc(wl, wlvif);
4536 if (changed & BSS_CHANGED_PS) {
4537 if ((bss_conf->ps) &&
4538 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4539 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4540 int ps_mode;
4541 char *ps_mode_str;
4543 if (wl->conf.conn.forced_ps) {
4544 ps_mode = STATION_POWER_SAVE_MODE;
4545 ps_mode_str = "forced";
4546 } else {
4547 ps_mode = STATION_AUTO_PS_MODE;
4548 ps_mode_str = "auto";
4551 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4553 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4554 if (ret < 0)
4555 wl1271_warning("enter %s ps failed %d",
4556 ps_mode_str, ret);
4557 } else if (!bss_conf->ps &&
4558 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4559 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4561 ret = wl1271_ps_set_mode(wl, wlvif,
4562 STATION_ACTIVE_MODE);
4563 if (ret < 0)
4564 wl1271_warning("exit auto ps failed %d", ret);
4568 /* Handle new association with HT. Do this after join. */
4569 if (sta_exists) {
4570 bool enabled =
4571 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4573 ret = wlcore_hw_set_peer_cap(wl,
4574 &sta_ht_cap,
4575 enabled,
4576 wlvif->rate_set,
4577 wlvif->sta.hlid);
4578 if (ret < 0) {
4579 wl1271_warning("Set ht cap failed %d", ret);
4580 goto out;
4584 if (enabled) {
4585 ret = wl1271_acx_set_ht_information(wl, wlvif,
4586 bss_conf->ht_operation_mode);
4587 if (ret < 0) {
4588 wl1271_warning("Set ht information failed %d",
4589 ret);
4590 goto out;
4595 /* Handle arp filtering. Done after join. */
4596 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4597 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4598 __be32 addr = bss_conf->arp_addr_list[0];
4599 wlvif->sta.qos = bss_conf->qos;
4600 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4602 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4603 wlvif->ip_addr = addr;
4605 * The template should have been configured only upon
4606 * association. however, it seems that the correct ip
4607 * isn't being set (when sending), so we have to
4608 * reconfigure the template upon every ip change.
4610 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4611 if (ret < 0) {
4612 wl1271_warning("build arp rsp failed: %d", ret);
4613 goto out;
4616 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4617 (ACX_ARP_FILTER_ARP_FILTERING |
4618 ACX_ARP_FILTER_AUTO_ARP),
4619 addr);
4620 } else {
4621 wlvif->ip_addr = 0;
4622 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4625 if (ret < 0)
4626 goto out;
4629 out:
4630 return;
4633 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4634 struct ieee80211_vif *vif,
4635 struct ieee80211_bss_conf *bss_conf,
4636 u32 changed)
4638 struct wl1271 *wl = hw->priv;
4639 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4640 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4641 int ret;
4643 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4644 wlvif->role_id, (int)changed);
4647 * make sure to cancel pending disconnections if our association
4648 * state changed
4650 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4651 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4653 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4654 !bss_conf->enable_beacon)
4655 wl1271_tx_flush(wl);
4657 mutex_lock(&wl->mutex);
4659 if (unlikely(wl->state != WLCORE_STATE_ON))
4660 goto out;
4662 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4663 goto out;
4665 ret = pm_runtime_get_sync(wl->dev);
4666 if (ret < 0) {
4667 pm_runtime_put_noidle(wl->dev);
4668 goto out;
4671 if ((changed & BSS_CHANGED_TXPOWER) &&
4672 bss_conf->txpower != wlvif->power_level) {
4674 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4675 if (ret < 0)
4676 goto out;
4678 wlvif->power_level = bss_conf->txpower;
4681 if (is_ap)
4682 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4683 else
4684 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4686 pm_runtime_mark_last_busy(wl->dev);
4687 pm_runtime_put_autosuspend(wl->dev);
4689 out:
4690 mutex_unlock(&wl->mutex);
4693 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4694 struct ieee80211_chanctx_conf *ctx)
4696 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4697 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4698 cfg80211_get_chandef_type(&ctx->def));
4699 return 0;
4702 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4703 struct ieee80211_chanctx_conf *ctx)
4705 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4706 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4707 cfg80211_get_chandef_type(&ctx->def));
4710 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4711 struct ieee80211_chanctx_conf *ctx,
4712 u32 changed)
4714 struct wl1271 *wl = hw->priv;
4715 struct wl12xx_vif *wlvif;
4716 int ret;
4717 int channel = ieee80211_frequency_to_channel(
4718 ctx->def.chan->center_freq);
4720 wl1271_debug(DEBUG_MAC80211,
4721 "mac80211 change chanctx %d (type %d) changed 0x%x",
4722 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4724 mutex_lock(&wl->mutex);
4726 ret = pm_runtime_get_sync(wl->dev);
4727 if (ret < 0) {
4728 pm_runtime_put_noidle(wl->dev);
4729 goto out;
4732 wl12xx_for_each_wlvif(wl, wlvif) {
4733 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4735 rcu_read_lock();
4736 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4737 rcu_read_unlock();
4738 continue;
4740 rcu_read_unlock();
4742 /* start radar if needed */
4743 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4744 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4745 ctx->radar_enabled && !wlvif->radar_enabled &&
4746 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4747 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4748 wlcore_hw_set_cac(wl, wlvif, true);
4749 wlvif->radar_enabled = true;
4753 pm_runtime_mark_last_busy(wl->dev);
4754 pm_runtime_put_autosuspend(wl->dev);
4755 out:
4756 mutex_unlock(&wl->mutex);
4759 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4760 struct ieee80211_vif *vif,
4761 struct ieee80211_chanctx_conf *ctx)
4763 struct wl1271 *wl = hw->priv;
4764 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4765 int channel = ieee80211_frequency_to_channel(
4766 ctx->def.chan->center_freq);
4767 int ret = -EINVAL;
4769 wl1271_debug(DEBUG_MAC80211,
4770 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4771 wlvif->role_id, channel,
4772 cfg80211_get_chandef_type(&ctx->def),
4773 ctx->radar_enabled, ctx->def.chan->dfs_state);
4775 mutex_lock(&wl->mutex);
4777 if (unlikely(wl->state != WLCORE_STATE_ON))
4778 goto out;
4780 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4781 goto out;
4783 ret = pm_runtime_get_sync(wl->dev);
4784 if (ret < 0) {
4785 pm_runtime_put_noidle(wl->dev);
4786 goto out;
4789 wlvif->band = ctx->def.chan->band;
4790 wlvif->channel = channel;
4791 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4793 /* update default rates according to the band */
4794 wl1271_set_band_rate(wl, wlvif);
4796 if (ctx->radar_enabled &&
4797 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4798 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4799 wlcore_hw_set_cac(wl, wlvif, true);
4800 wlvif->radar_enabled = true;
4803 pm_runtime_mark_last_busy(wl->dev);
4804 pm_runtime_put_autosuspend(wl->dev);
4805 out:
4806 mutex_unlock(&wl->mutex);
4808 return 0;
4811 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4812 struct ieee80211_vif *vif,
4813 struct ieee80211_chanctx_conf *ctx)
4815 struct wl1271 *wl = hw->priv;
4816 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4817 int ret;
4819 wl1271_debug(DEBUG_MAC80211,
4820 "mac80211 unassign chanctx (role %d) %d (type %d)",
4821 wlvif->role_id,
4822 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4823 cfg80211_get_chandef_type(&ctx->def));
4825 wl1271_tx_flush(wl);
4827 mutex_lock(&wl->mutex);
4829 if (unlikely(wl->state != WLCORE_STATE_ON))
4830 goto out;
4832 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4833 goto out;
4835 ret = pm_runtime_get_sync(wl->dev);
4836 if (ret < 0) {
4837 pm_runtime_put_noidle(wl->dev);
4838 goto out;
4841 if (wlvif->radar_enabled) {
4842 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4843 wlcore_hw_set_cac(wl, wlvif, false);
4844 wlvif->radar_enabled = false;
4847 pm_runtime_mark_last_busy(wl->dev);
4848 pm_runtime_put_autosuspend(wl->dev);
4849 out:
4850 mutex_unlock(&wl->mutex);
4853 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4854 struct wl12xx_vif *wlvif,
4855 struct ieee80211_chanctx_conf *new_ctx)
4857 int channel = ieee80211_frequency_to_channel(
4858 new_ctx->def.chan->center_freq);
4860 wl1271_debug(DEBUG_MAC80211,
4861 "switch vif (role %d) %d -> %d chan_type: %d",
4862 wlvif->role_id, wlvif->channel, channel,
4863 cfg80211_get_chandef_type(&new_ctx->def));
4865 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4866 return 0;
4868 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4870 if (wlvif->radar_enabled) {
4871 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4872 wlcore_hw_set_cac(wl, wlvif, false);
4873 wlvif->radar_enabled = false;
4876 wlvif->band = new_ctx->def.chan->band;
4877 wlvif->channel = channel;
4878 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4880 /* start radar if needed */
4881 if (new_ctx->radar_enabled) {
4882 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4883 wlcore_hw_set_cac(wl, wlvif, true);
4884 wlvif->radar_enabled = true;
4887 return 0;
4890 static int
4891 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4892 struct ieee80211_vif_chanctx_switch *vifs,
4893 int n_vifs,
4894 enum ieee80211_chanctx_switch_mode mode)
4896 struct wl1271 *wl = hw->priv;
4897 int i, ret;
4899 wl1271_debug(DEBUG_MAC80211,
4900 "mac80211 switch chanctx n_vifs %d mode %d",
4901 n_vifs, mode);
4903 mutex_lock(&wl->mutex);
4905 ret = pm_runtime_get_sync(wl->dev);
4906 if (ret < 0) {
4907 pm_runtime_put_noidle(wl->dev);
4908 goto out;
4911 for (i = 0; i < n_vifs; i++) {
4912 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4914 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4915 if (ret)
4916 goto out_sleep;
4918 out_sleep:
4919 pm_runtime_mark_last_busy(wl->dev);
4920 pm_runtime_put_autosuspend(wl->dev);
4921 out:
4922 mutex_unlock(&wl->mutex);
4924 return 0;
4927 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4928 struct ieee80211_vif *vif, u16 queue,
4929 const struct ieee80211_tx_queue_params *params)
4931 struct wl1271 *wl = hw->priv;
4932 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4933 u8 ps_scheme;
4934 int ret = 0;
4936 if (wlcore_is_p2p_mgmt(wlvif))
4937 return 0;
4939 mutex_lock(&wl->mutex);
4941 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4943 if (params->uapsd)
4944 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4945 else
4946 ps_scheme = CONF_PS_SCHEME_LEGACY;
4948 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4949 goto out;
4951 ret = pm_runtime_get_sync(wl->dev);
4952 if (ret < 0) {
4953 pm_runtime_put_noidle(wl->dev);
4954 goto out;
4958 * the txop is confed in units of 32us by the mac80211,
4959 * we need us
4961 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4962 params->cw_min, params->cw_max,
4963 params->aifs, params->txop << 5);
4964 if (ret < 0)
4965 goto out_sleep;
4967 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4968 CONF_CHANNEL_TYPE_EDCF,
4969 wl1271_tx_get_queue(queue),
4970 ps_scheme, CONF_ACK_POLICY_LEGACY,
4971 0, 0);
4973 out_sleep:
4974 pm_runtime_mark_last_busy(wl->dev);
4975 pm_runtime_put_autosuspend(wl->dev);
4977 out:
4978 mutex_unlock(&wl->mutex);
4980 return ret;
4983 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4984 struct ieee80211_vif *vif)
4987 struct wl1271 *wl = hw->priv;
4988 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4989 u64 mactime = ULLONG_MAX;
4990 int ret;
4992 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4994 mutex_lock(&wl->mutex);
4996 if (unlikely(wl->state != WLCORE_STATE_ON))
4997 goto out;
4999 ret = pm_runtime_get_sync(wl->dev);
5000 if (ret < 0) {
5001 pm_runtime_put_noidle(wl->dev);
5002 goto out;
5005 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5006 if (ret < 0)
5007 goto out_sleep;
5009 out_sleep:
5010 pm_runtime_mark_last_busy(wl->dev);
5011 pm_runtime_put_autosuspend(wl->dev);
5013 out:
5014 mutex_unlock(&wl->mutex);
5015 return mactime;
5018 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5019 struct survey_info *survey)
5021 struct ieee80211_conf *conf = &hw->conf;
5023 if (idx != 0)
5024 return -ENOENT;
5026 survey->channel = conf->chandef.chan;
5027 survey->filled = 0;
5028 return 0;
5031 static int wl1271_allocate_sta(struct wl1271 *wl,
5032 struct wl12xx_vif *wlvif,
5033 struct ieee80211_sta *sta)
5035 struct wl1271_station *wl_sta;
5036 int ret;
5039 if (wl->active_sta_count >= wl->max_ap_stations) {
5040 wl1271_warning("could not allocate HLID - too much stations");
5041 return -EBUSY;
5044 wl_sta = (struct wl1271_station *)sta->drv_priv;
5045 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5046 if (ret < 0) {
5047 wl1271_warning("could not allocate HLID - too many links");
5048 return -EBUSY;
5051 /* use the previous security seq, if this is a recovery/resume */
5052 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5054 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5055 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5056 wl->active_sta_count++;
5057 return 0;
5060 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5062 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5063 return;
5065 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5066 __clear_bit(hlid, &wl->ap_ps_map);
5067 __clear_bit(hlid, &wl->ap_fw_ps_map);
5070 * save the last used PN in the private part of iee80211_sta,
5071 * in case of recovery/suspend
5073 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5075 wl12xx_free_link(wl, wlvif, &hlid);
5076 wl->active_sta_count--;
5079 * rearm the tx watchdog when the last STA is freed - give the FW a
5080 * chance to return STA-buffered packets before complaining.
5082 if (wl->active_sta_count == 0)
5083 wl12xx_rearm_tx_watchdog_locked(wl);
5086 static int wl12xx_sta_add(struct wl1271 *wl,
5087 struct wl12xx_vif *wlvif,
5088 struct ieee80211_sta *sta)
5090 struct wl1271_station *wl_sta;
5091 int ret = 0;
5092 u8 hlid;
5094 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5096 ret = wl1271_allocate_sta(wl, wlvif, sta);
5097 if (ret < 0)
5098 return ret;
5100 wl_sta = (struct wl1271_station *)sta->drv_priv;
5101 hlid = wl_sta->hlid;
5103 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5104 if (ret < 0)
5105 wl1271_free_sta(wl, wlvif, hlid);
5107 return ret;
5110 static int wl12xx_sta_remove(struct wl1271 *wl,
5111 struct wl12xx_vif *wlvif,
5112 struct ieee80211_sta *sta)
5114 struct wl1271_station *wl_sta;
5115 int ret = 0, id;
5117 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5119 wl_sta = (struct wl1271_station *)sta->drv_priv;
5120 id = wl_sta->hlid;
5121 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5122 return -EINVAL;
5124 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5125 if (ret < 0)
5126 return ret;
5128 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5129 return ret;
5132 static void wlcore_roc_if_possible(struct wl1271 *wl,
5133 struct wl12xx_vif *wlvif)
5135 if (find_first_bit(wl->roc_map,
5136 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5137 return;
5139 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5140 return;
5142 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5146 * when wl_sta is NULL, we treat this call as if coming from a
5147 * pending auth reply.
5148 * wl->mutex must be taken and the FW must be awake when the call
5149 * takes place.
5151 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5152 struct wl1271_station *wl_sta, bool in_conn)
5154 if (in_conn) {
5155 if (WARN_ON(wl_sta && wl_sta->in_connection))
5156 return;
5158 if (!wlvif->ap_pending_auth_reply &&
5159 !wlvif->inconn_count)
5160 wlcore_roc_if_possible(wl, wlvif);
5162 if (wl_sta) {
5163 wl_sta->in_connection = true;
5164 wlvif->inconn_count++;
5165 } else {
5166 wlvif->ap_pending_auth_reply = true;
5168 } else {
5169 if (wl_sta && !wl_sta->in_connection)
5170 return;
5172 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5173 return;
5175 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5176 return;
5178 if (wl_sta) {
5179 wl_sta->in_connection = false;
5180 wlvif->inconn_count--;
5181 } else {
5182 wlvif->ap_pending_auth_reply = false;
5185 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5186 test_bit(wlvif->role_id, wl->roc_map))
5187 wl12xx_croc(wl, wlvif->role_id);
5191 static int wl12xx_update_sta_state(struct wl1271 *wl,
5192 struct wl12xx_vif *wlvif,
5193 struct ieee80211_sta *sta,
5194 enum ieee80211_sta_state old_state,
5195 enum ieee80211_sta_state new_state)
5197 struct wl1271_station *wl_sta;
5198 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5199 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5200 int ret;
5202 wl_sta = (struct wl1271_station *)sta->drv_priv;
5204 /* Add station (AP mode) */
5205 if (is_ap &&
5206 old_state == IEEE80211_STA_NOTEXIST &&
5207 new_state == IEEE80211_STA_NONE) {
5208 ret = wl12xx_sta_add(wl, wlvif, sta);
5209 if (ret)
5210 return ret;
5212 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5215 /* Remove station (AP mode) */
5216 if (is_ap &&
5217 old_state == IEEE80211_STA_NONE &&
5218 new_state == IEEE80211_STA_NOTEXIST) {
5219 /* must not fail */
5220 wl12xx_sta_remove(wl, wlvif, sta);
5222 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5225 /* Authorize station (AP mode) */
5226 if (is_ap &&
5227 new_state == IEEE80211_STA_AUTHORIZED) {
5228 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5229 if (ret < 0)
5230 return ret;
5232 /* reconfigure rates */
5233 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5234 if (ret < 0)
5235 return ret;
5237 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5238 wl_sta->hlid);
5239 if (ret)
5240 return ret;
5242 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5245 /* Authorize station */
5246 if (is_sta &&
5247 new_state == IEEE80211_STA_AUTHORIZED) {
5248 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5249 ret = wl12xx_set_authorized(wl, wlvif);
5250 if (ret)
5251 return ret;
5254 if (is_sta &&
5255 old_state == IEEE80211_STA_AUTHORIZED &&
5256 new_state == IEEE80211_STA_ASSOC) {
5257 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5258 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5261 /* save seq number on disassoc (suspend) */
5262 if (is_sta &&
5263 old_state == IEEE80211_STA_ASSOC &&
5264 new_state == IEEE80211_STA_AUTH) {
5265 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5266 wlvif->total_freed_pkts = 0;
5269 /* restore seq number on assoc (resume) */
5270 if (is_sta &&
5271 old_state == IEEE80211_STA_AUTH &&
5272 new_state == IEEE80211_STA_ASSOC) {
5273 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5276 /* clear ROCs on failure or authorization */
5277 if (is_sta &&
5278 (new_state == IEEE80211_STA_AUTHORIZED ||
5279 new_state == IEEE80211_STA_NOTEXIST)) {
5280 if (test_bit(wlvif->role_id, wl->roc_map))
5281 wl12xx_croc(wl, wlvif->role_id);
5284 if (is_sta &&
5285 old_state == IEEE80211_STA_NOTEXIST &&
5286 new_state == IEEE80211_STA_NONE) {
5287 if (find_first_bit(wl->roc_map,
5288 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5289 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5290 wl12xx_roc(wl, wlvif, wlvif->role_id,
5291 wlvif->band, wlvif->channel);
5294 return 0;
5297 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5298 struct ieee80211_vif *vif,
5299 struct ieee80211_sta *sta,
5300 enum ieee80211_sta_state old_state,
5301 enum ieee80211_sta_state new_state)
5303 struct wl1271 *wl = hw->priv;
5304 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5305 int ret;
5307 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5308 sta->aid, old_state, new_state);
5310 mutex_lock(&wl->mutex);
5312 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5313 ret = -EBUSY;
5314 goto out;
5317 ret = pm_runtime_get_sync(wl->dev);
5318 if (ret < 0) {
5319 pm_runtime_put_noidle(wl->dev);
5320 goto out;
5323 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5325 pm_runtime_mark_last_busy(wl->dev);
5326 pm_runtime_put_autosuspend(wl->dev);
5327 out:
5328 mutex_unlock(&wl->mutex);
5329 if (new_state < old_state)
5330 return 0;
5331 return ret;
5334 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5335 struct ieee80211_vif *vif,
5336 struct ieee80211_ampdu_params *params)
5338 struct wl1271 *wl = hw->priv;
5339 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5340 int ret;
5341 u8 hlid, *ba_bitmap;
5342 struct ieee80211_sta *sta = params->sta;
5343 enum ieee80211_ampdu_mlme_action action = params->action;
5344 u16 tid = params->tid;
5345 u16 *ssn = &params->ssn;
5347 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5348 tid);
5350 /* sanity check - the fields in FW are only 8bits wide */
5351 if (WARN_ON(tid > 0xFF))
5352 return -ENOTSUPP;
5354 mutex_lock(&wl->mutex);
5356 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5357 ret = -EAGAIN;
5358 goto out;
5361 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5362 hlid = wlvif->sta.hlid;
5363 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5364 struct wl1271_station *wl_sta;
5366 wl_sta = (struct wl1271_station *)sta->drv_priv;
5367 hlid = wl_sta->hlid;
5368 } else {
5369 ret = -EINVAL;
5370 goto out;
5373 ba_bitmap = &wl->links[hlid].ba_bitmap;
5375 ret = pm_runtime_get_sync(wl->dev);
5376 if (ret < 0) {
5377 pm_runtime_put_noidle(wl->dev);
5378 goto out;
5381 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5382 tid, action);
5384 switch (action) {
5385 case IEEE80211_AMPDU_RX_START:
5386 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5387 ret = -ENOTSUPP;
5388 break;
5391 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5392 ret = -EBUSY;
5393 wl1271_error("exceeded max RX BA sessions");
5394 break;
5397 if (*ba_bitmap & BIT(tid)) {
5398 ret = -EINVAL;
5399 wl1271_error("cannot enable RX BA session on active "
5400 "tid: %d", tid);
5401 break;
5404 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5405 hlid,
5406 params->buf_size);
5408 if (!ret) {
5409 *ba_bitmap |= BIT(tid);
5410 wl->ba_rx_session_count++;
5412 break;
5414 case IEEE80211_AMPDU_RX_STOP:
5415 if (!(*ba_bitmap & BIT(tid))) {
5417 * this happens on reconfig - so only output a debug
5418 * message for now, and don't fail the function.
5420 wl1271_debug(DEBUG_MAC80211,
5421 "no active RX BA session on tid: %d",
5422 tid);
5423 ret = 0;
5424 break;
5427 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5428 hlid, 0);
5429 if (!ret) {
5430 *ba_bitmap &= ~BIT(tid);
5431 wl->ba_rx_session_count--;
5433 break;
5436 * The BA initiator session management in FW independently.
5437 * Falling break here on purpose for all TX APDU commands.
5439 case IEEE80211_AMPDU_TX_START:
5440 case IEEE80211_AMPDU_TX_STOP_CONT:
5441 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5442 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5443 case IEEE80211_AMPDU_TX_OPERATIONAL:
5444 ret = -EINVAL;
5445 break;
5447 default:
5448 wl1271_error("Incorrect ampdu action id=%x\n", action);
5449 ret = -EINVAL;
5452 pm_runtime_mark_last_busy(wl->dev);
5453 pm_runtime_put_autosuspend(wl->dev);
5455 out:
5456 mutex_unlock(&wl->mutex);
5458 return ret;
5461 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5462 struct ieee80211_vif *vif,
5463 const struct cfg80211_bitrate_mask *mask)
5465 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5466 struct wl1271 *wl = hw->priv;
5467 int i, ret = 0;
5469 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5470 mask->control[NL80211_BAND_2GHZ].legacy,
5471 mask->control[NL80211_BAND_5GHZ].legacy);
5473 mutex_lock(&wl->mutex);
5475 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5476 wlvif->bitrate_masks[i] =
5477 wl1271_tx_enabled_rates_get(wl,
5478 mask->control[i].legacy,
5481 if (unlikely(wl->state != WLCORE_STATE_ON))
5482 goto out;
5484 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5485 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5487 ret = pm_runtime_get_sync(wl->dev);
5488 if (ret < 0) {
5489 pm_runtime_put_noidle(wl->dev);
5490 goto out;
5493 wl1271_set_band_rate(wl, wlvif);
5494 wlvif->basic_rate =
5495 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5496 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5498 pm_runtime_mark_last_busy(wl->dev);
5499 pm_runtime_put_autosuspend(wl->dev);
5501 out:
5502 mutex_unlock(&wl->mutex);
5504 return ret;
5507 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5508 struct ieee80211_vif *vif,
5509 struct ieee80211_channel_switch *ch_switch)
5511 struct wl1271 *wl = hw->priv;
5512 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5513 int ret;
5515 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5517 wl1271_tx_flush(wl);
5519 mutex_lock(&wl->mutex);
5521 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5522 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5523 ieee80211_chswitch_done(vif, false);
5524 goto out;
5525 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5526 goto out;
5529 ret = pm_runtime_get_sync(wl->dev);
5530 if (ret < 0) {
5531 pm_runtime_put_noidle(wl->dev);
5532 goto out;
5535 /* TODO: change mac80211 to pass vif as param */
5537 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5538 unsigned long delay_usec;
5540 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5541 if (ret)
5542 goto out_sleep;
5544 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5546 /* indicate failure 5 seconds after channel switch time */
5547 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5548 ch_switch->count;
5549 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5550 usecs_to_jiffies(delay_usec) +
5551 msecs_to_jiffies(5000));
5554 out_sleep:
5555 pm_runtime_mark_last_busy(wl->dev);
5556 pm_runtime_put_autosuspend(wl->dev);
5558 out:
5559 mutex_unlock(&wl->mutex);
5562 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5563 struct wl12xx_vif *wlvif,
5564 u8 eid)
5566 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5567 struct sk_buff *beacon =
5568 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5570 if (!beacon)
5571 return NULL;
5573 return cfg80211_find_ie(eid,
5574 beacon->data + ieoffset,
5575 beacon->len - ieoffset);
5578 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5579 u8 *csa_count)
5581 const u8 *ie;
5582 const struct ieee80211_channel_sw_ie *ie_csa;
5584 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5585 if (!ie)
5586 return -EINVAL;
5588 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5589 *csa_count = ie_csa->count;
5591 return 0;
5594 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5595 struct ieee80211_vif *vif,
5596 struct cfg80211_chan_def *chandef)
5598 struct wl1271 *wl = hw->priv;
5599 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5600 struct ieee80211_channel_switch ch_switch = {
5601 .block_tx = true,
5602 .chandef = *chandef,
5604 int ret;
5606 wl1271_debug(DEBUG_MAC80211,
5607 "mac80211 channel switch beacon (role %d)",
5608 wlvif->role_id);
5610 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5611 if (ret < 0) {
5612 wl1271_error("error getting beacon (for CSA counter)");
5613 return;
5616 mutex_lock(&wl->mutex);
5618 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5619 ret = -EBUSY;
5620 goto out;
5623 ret = pm_runtime_get_sync(wl->dev);
5624 if (ret < 0) {
5625 pm_runtime_put_noidle(wl->dev);
5626 goto out;
5629 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5630 if (ret)
5631 goto out_sleep;
5633 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5635 out_sleep:
5636 pm_runtime_mark_last_busy(wl->dev);
5637 pm_runtime_put_autosuspend(wl->dev);
5638 out:
5639 mutex_unlock(&wl->mutex);
5642 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5643 u32 queues, bool drop)
5645 struct wl1271 *wl = hw->priv;
5647 wl1271_tx_flush(wl);
5650 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5651 struct ieee80211_vif *vif,
5652 struct ieee80211_channel *chan,
5653 int duration,
5654 enum ieee80211_roc_type type)
5656 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5657 struct wl1271 *wl = hw->priv;
5658 int channel, active_roc, ret = 0;
5660 channel = ieee80211_frequency_to_channel(chan->center_freq);
5662 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5663 channel, wlvif->role_id);
5665 mutex_lock(&wl->mutex);
5667 if (unlikely(wl->state != WLCORE_STATE_ON))
5668 goto out;
5670 /* return EBUSY if we can't ROC right now */
5671 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5672 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5673 wl1271_warning("active roc on role %d", active_roc);
5674 ret = -EBUSY;
5675 goto out;
5678 ret = pm_runtime_get_sync(wl->dev);
5679 if (ret < 0) {
5680 pm_runtime_put_noidle(wl->dev);
5681 goto out;
5684 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5685 if (ret < 0)
5686 goto out_sleep;
5688 wl->roc_vif = vif;
5689 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5690 msecs_to_jiffies(duration));
5691 out_sleep:
5692 pm_runtime_mark_last_busy(wl->dev);
5693 pm_runtime_put_autosuspend(wl->dev);
5694 out:
5695 mutex_unlock(&wl->mutex);
5696 return ret;
5699 static int __wlcore_roc_completed(struct wl1271 *wl)
5701 struct wl12xx_vif *wlvif;
5702 int ret;
5704 /* already completed */
5705 if (unlikely(!wl->roc_vif))
5706 return 0;
5708 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5710 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5711 return -EBUSY;
5713 ret = wl12xx_stop_dev(wl, wlvif);
5714 if (ret < 0)
5715 return ret;
5717 wl->roc_vif = NULL;
5719 return 0;
5722 static int wlcore_roc_completed(struct wl1271 *wl)
5724 int ret;
5726 wl1271_debug(DEBUG_MAC80211, "roc complete");
5728 mutex_lock(&wl->mutex);
5730 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5731 ret = -EBUSY;
5732 goto out;
5735 ret = pm_runtime_get_sync(wl->dev);
5736 if (ret < 0) {
5737 pm_runtime_put_noidle(wl->dev);
5738 goto out;
5741 ret = __wlcore_roc_completed(wl);
5743 pm_runtime_mark_last_busy(wl->dev);
5744 pm_runtime_put_autosuspend(wl->dev);
5745 out:
5746 mutex_unlock(&wl->mutex);
5748 return ret;
5751 static void wlcore_roc_complete_work(struct work_struct *work)
5753 struct delayed_work *dwork;
5754 struct wl1271 *wl;
5755 int ret;
5757 dwork = to_delayed_work(work);
5758 wl = container_of(dwork, struct wl1271, roc_complete_work);
5760 ret = wlcore_roc_completed(wl);
5761 if (!ret)
5762 ieee80211_remain_on_channel_expired(wl->hw);
5765 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5767 struct wl1271 *wl = hw->priv;
5769 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5771 /* TODO: per-vif */
5772 wl1271_tx_flush(wl);
5775 * we can't just flush_work here, because it might deadlock
5776 * (as we might get called from the same workqueue)
5778 cancel_delayed_work_sync(&wl->roc_complete_work);
5779 wlcore_roc_completed(wl);
5781 return 0;
5784 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5785 struct ieee80211_vif *vif,
5786 struct ieee80211_sta *sta,
5787 u32 changed)
5789 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5791 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5793 if (!(changed & IEEE80211_RC_BW_CHANGED))
5794 return;
5796 /* this callback is atomic, so schedule a new work */
5797 wlvif->rc_update_bw = sta->bandwidth;
5798 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5799 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5802 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5803 struct ieee80211_vif *vif,
5804 struct ieee80211_sta *sta,
5805 struct station_info *sinfo)
5807 struct wl1271 *wl = hw->priv;
5808 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5809 s8 rssi_dbm;
5810 int ret;
5812 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5814 mutex_lock(&wl->mutex);
5816 if (unlikely(wl->state != WLCORE_STATE_ON))
5817 goto out;
5819 ret = pm_runtime_get_sync(wl->dev);
5820 if (ret < 0) {
5821 pm_runtime_put_noidle(wl->dev);
5822 goto out_sleep;
5825 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5826 if (ret < 0)
5827 goto out_sleep;
5829 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5830 sinfo->signal = rssi_dbm;
5832 out_sleep:
5833 pm_runtime_mark_last_busy(wl->dev);
5834 pm_runtime_put_autosuspend(wl->dev);
5836 out:
5837 mutex_unlock(&wl->mutex);
5840 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5841 struct ieee80211_sta *sta)
5843 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5844 struct wl1271 *wl = hw->priv;
5845 u8 hlid = wl_sta->hlid;
5847 /* return in units of Kbps */
5848 return (wl->links[hlid].fw_rate_mbps * 1000);
5851 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5853 struct wl1271 *wl = hw->priv;
5854 bool ret = false;
5856 mutex_lock(&wl->mutex);
5858 if (unlikely(wl->state != WLCORE_STATE_ON))
5859 goto out;
5861 /* packets are considered pending if in the TX queue or the FW */
5862 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5863 out:
5864 mutex_unlock(&wl->mutex);
5866 return ret;
5869 /* can't be const, mac80211 writes to this */
5870 static struct ieee80211_rate wl1271_rates[] = {
5871 { .bitrate = 10,
5872 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5873 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5874 { .bitrate = 20,
5875 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5876 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5877 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5878 { .bitrate = 55,
5879 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5880 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5881 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5882 { .bitrate = 110,
5883 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5884 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5885 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5886 { .bitrate = 60,
5887 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5888 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5889 { .bitrate = 90,
5890 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5891 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5892 { .bitrate = 120,
5893 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5894 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5895 { .bitrate = 180,
5896 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5897 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5898 { .bitrate = 240,
5899 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5900 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5901 { .bitrate = 360,
5902 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5903 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5904 { .bitrate = 480,
5905 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5906 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5907 { .bitrate = 540,
5908 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5909 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5912 /* can't be const, mac80211 writes to this */
5913 static struct ieee80211_channel wl1271_channels[] = {
5914 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5915 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5916 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5917 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5918 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5919 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5920 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5921 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5922 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5923 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5924 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5925 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5926 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5927 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5930 /* can't be const, mac80211 writes to this */
5931 static struct ieee80211_supported_band wl1271_band_2ghz = {
5932 .channels = wl1271_channels,
5933 .n_channels = ARRAY_SIZE(wl1271_channels),
5934 .bitrates = wl1271_rates,
5935 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5938 /* 5 GHz data rates for WL1273 */
5939 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5940 { .bitrate = 60,
5941 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5942 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5943 { .bitrate = 90,
5944 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5945 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5946 { .bitrate = 120,
5947 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5948 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5949 { .bitrate = 180,
5950 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5951 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5952 { .bitrate = 240,
5953 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5954 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5955 { .bitrate = 360,
5956 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5957 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5958 { .bitrate = 480,
5959 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5960 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5961 { .bitrate = 540,
5962 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5963 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5966 /* 5 GHz band channels for WL1273 */
5967 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5968 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5986 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5987 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5988 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5989 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5990 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5991 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5992 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5993 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5994 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5995 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5996 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5997 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5998 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6001 static struct ieee80211_supported_band wl1271_band_5ghz = {
6002 .channels = wl1271_channels_5ghz,
6003 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6004 .bitrates = wl1271_rates_5ghz,
6005 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6008 static const struct ieee80211_ops wl1271_ops = {
6009 .start = wl1271_op_start,
6010 .stop = wlcore_op_stop,
6011 .add_interface = wl1271_op_add_interface,
6012 .remove_interface = wl1271_op_remove_interface,
6013 .change_interface = wl12xx_op_change_interface,
6014 #ifdef CONFIG_PM
6015 .suspend = wl1271_op_suspend,
6016 .resume = wl1271_op_resume,
6017 #endif
6018 .config = wl1271_op_config,
6019 .prepare_multicast = wl1271_op_prepare_multicast,
6020 .configure_filter = wl1271_op_configure_filter,
6021 .tx = wl1271_op_tx,
6022 .set_key = wlcore_op_set_key,
6023 .hw_scan = wl1271_op_hw_scan,
6024 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6025 .sched_scan_start = wl1271_op_sched_scan_start,
6026 .sched_scan_stop = wl1271_op_sched_scan_stop,
6027 .bss_info_changed = wl1271_op_bss_info_changed,
6028 .set_frag_threshold = wl1271_op_set_frag_threshold,
6029 .set_rts_threshold = wl1271_op_set_rts_threshold,
6030 .conf_tx = wl1271_op_conf_tx,
6031 .get_tsf = wl1271_op_get_tsf,
6032 .get_survey = wl1271_op_get_survey,
6033 .sta_state = wl12xx_op_sta_state,
6034 .ampdu_action = wl1271_op_ampdu_action,
6035 .tx_frames_pending = wl1271_tx_frames_pending,
6036 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6037 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6038 .channel_switch = wl12xx_op_channel_switch,
6039 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6040 .flush = wlcore_op_flush,
6041 .remain_on_channel = wlcore_op_remain_on_channel,
6042 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6043 .add_chanctx = wlcore_op_add_chanctx,
6044 .remove_chanctx = wlcore_op_remove_chanctx,
6045 .change_chanctx = wlcore_op_change_chanctx,
6046 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6047 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6048 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6049 .sta_rc_update = wlcore_op_sta_rc_update,
6050 .sta_statistics = wlcore_op_sta_statistics,
6051 .get_expected_throughput = wlcore_op_get_expected_throughput,
6052 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6056 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6058 u8 idx;
6060 BUG_ON(band >= 2);
6062 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6063 wl1271_error("Illegal RX rate from HW: %d", rate);
6064 return 0;
6067 idx = wl->band_rate_to_idx[band][rate];
6068 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6069 wl1271_error("Unsupported RX rate from HW: %d", rate);
6070 return 0;
6073 return idx;
6076 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6078 int i;
6080 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6081 oui, nic);
6083 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6084 wl1271_warning("NIC part of the MAC address wraps around!");
6086 for (i = 0; i < wl->num_mac_addr; i++) {
6087 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6088 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6089 wl->addresses[i].addr[2] = (u8) oui;
6090 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6091 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6092 wl->addresses[i].addr[5] = (u8) nic;
6093 nic++;
6096 /* we may be one address short at the most */
6097 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6100 * turn on the LAA bit in the first address and use it as
6101 * the last address.
6103 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6104 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6105 memcpy(&wl->addresses[idx], &wl->addresses[0],
6106 sizeof(wl->addresses[0]));
6107 /* LAA bit */
6108 wl->addresses[idx].addr[0] |= BIT(1);
6111 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6112 wl->hw->wiphy->addresses = wl->addresses;
6115 static int wl12xx_get_hw_info(struct wl1271 *wl)
6117 int ret;
6119 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6120 if (ret < 0)
6121 goto out;
6123 wl->fuse_oui_addr = 0;
6124 wl->fuse_nic_addr = 0;
6126 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6127 if (ret < 0)
6128 goto out;
6130 if (wl->ops->get_mac)
6131 ret = wl->ops->get_mac(wl);
6133 out:
6134 return ret;
6137 static int wl1271_register_hw(struct wl1271 *wl)
6139 int ret;
6140 u32 oui_addr = 0, nic_addr = 0;
6141 struct platform_device *pdev = wl->pdev;
6142 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6144 if (wl->mac80211_registered)
6145 return 0;
6147 if (wl->nvs_len >= 12) {
6148 /* NOTE: The wl->nvs->nvs element must be first, in
6149 * order to simplify the casting, we assume it is at
6150 * the beginning of the wl->nvs structure.
6152 u8 *nvs_ptr = (u8 *)wl->nvs;
6154 oui_addr =
6155 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6156 nic_addr =
6157 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6160 /* if the MAC address is zeroed in the NVS derive from fuse */
6161 if (oui_addr == 0 && nic_addr == 0) {
6162 oui_addr = wl->fuse_oui_addr;
6163 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6164 nic_addr = wl->fuse_nic_addr + 1;
6167 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6168 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6169 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6170 wl1271_warning("This default nvs file can be removed from the file system");
6171 } else {
6172 wl1271_warning("Your device performance is not optimized.");
6173 wl1271_warning("Please use the calibrator tool to configure your device.");
6176 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6177 wl1271_warning("Fuse mac address is zero. using random mac");
6178 /* Use TI oui and a random nic */
6179 oui_addr = WLCORE_TI_OUI_ADDRESS;
6180 nic_addr = get_random_int();
6181 } else {
6182 oui_addr = wl->fuse_oui_addr;
6183 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6184 nic_addr = wl->fuse_nic_addr + 1;
6188 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6190 ret = ieee80211_register_hw(wl->hw);
6191 if (ret < 0) {
6192 wl1271_error("unable to register mac80211 hw: %d", ret);
6193 goto out;
6196 wl->mac80211_registered = true;
6198 wl1271_debugfs_init(wl);
6200 wl1271_notice("loaded");
6202 out:
6203 return ret;
6206 static void wl1271_unregister_hw(struct wl1271 *wl)
6208 if (wl->plt)
6209 wl1271_plt_stop(wl);
6211 ieee80211_unregister_hw(wl->hw);
6212 wl->mac80211_registered = false;
6216 static int wl1271_init_ieee80211(struct wl1271 *wl)
6218 int i;
6219 static const u32 cipher_suites[] = {
6220 WLAN_CIPHER_SUITE_WEP40,
6221 WLAN_CIPHER_SUITE_WEP104,
6222 WLAN_CIPHER_SUITE_TKIP,
6223 WLAN_CIPHER_SUITE_CCMP,
6224 WL1271_CIPHER_SUITE_GEM,
6227 /* The tx descriptor buffer */
6228 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6230 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6231 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6233 /* unit us */
6234 /* FIXME: find a proper value */
6235 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6237 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6238 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6239 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6240 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6241 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6242 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6243 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6244 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6245 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6246 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6247 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6248 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6249 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6250 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6252 wl->hw->wiphy->cipher_suites = cipher_suites;
6253 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6255 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6256 BIT(NL80211_IFTYPE_AP) |
6257 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6258 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6259 #ifdef CONFIG_MAC80211_MESH
6260 BIT(NL80211_IFTYPE_MESH_POINT) |
6261 #endif
6262 BIT(NL80211_IFTYPE_P2P_GO);
6264 wl->hw->wiphy->max_scan_ssids = 1;
6265 wl->hw->wiphy->max_sched_scan_ssids = 16;
6266 wl->hw->wiphy->max_match_sets = 16;
6268 * Maximum length of elements in scanning probe request templates
6269 * should be the maximum length possible for a template, without
6270 * the IEEE80211 header of the template
6272 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6273 sizeof(struct ieee80211_header);
6275 wl->hw->wiphy->max_sched_scan_reqs = 1;
6276 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6277 sizeof(struct ieee80211_header);
6279 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6281 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6282 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6283 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6285 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6287 /* make sure all our channels fit in the scanned_ch bitmask */
6288 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6289 ARRAY_SIZE(wl1271_channels_5ghz) >
6290 WL1271_MAX_CHANNELS);
6292 * clear channel flags from the previous usage
6293 * and restore max_power & max_antenna_gain values.
6295 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6296 wl1271_band_2ghz.channels[i].flags = 0;
6297 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6298 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6301 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6302 wl1271_band_5ghz.channels[i].flags = 0;
6303 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6304 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6308 * We keep local copies of the band structs because we need to
6309 * modify them on a per-device basis.
6311 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6312 sizeof(wl1271_band_2ghz));
6313 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6314 &wl->ht_cap[NL80211_BAND_2GHZ],
6315 sizeof(*wl->ht_cap));
6316 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6317 sizeof(wl1271_band_5ghz));
6318 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6319 &wl->ht_cap[NL80211_BAND_5GHZ],
6320 sizeof(*wl->ht_cap));
6322 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6323 &wl->bands[NL80211_BAND_2GHZ];
6324 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6325 &wl->bands[NL80211_BAND_5GHZ];
6328 * allow 4 queues per mac address we support +
6329 * 1 cab queue per mac + one global offchannel Tx queue
6331 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6333 /* the last queue is the offchannel queue */
6334 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6335 wl->hw->max_rates = 1;
6337 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6339 /* the FW answers probe-requests in AP-mode */
6340 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6341 wl->hw->wiphy->probe_resp_offload =
6342 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6343 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6344 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6346 /* allowed interface combinations */
6347 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6348 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6350 /* register vendor commands */
6351 wlcore_set_vendor_commands(wl->hw->wiphy);
6353 SET_IEEE80211_DEV(wl->hw, wl->dev);
6355 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6356 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6358 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6360 return 0;
6363 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6364 u32 mbox_size)
6366 struct ieee80211_hw *hw;
6367 struct wl1271 *wl;
6368 int i, j, ret;
6369 unsigned int order;
6371 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6372 if (!hw) {
6373 wl1271_error("could not alloc ieee80211_hw");
6374 ret = -ENOMEM;
6375 goto err_hw_alloc;
6378 wl = hw->priv;
6379 memset(wl, 0, sizeof(*wl));
6381 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6382 if (!wl->priv) {
6383 wl1271_error("could not alloc wl priv");
6384 ret = -ENOMEM;
6385 goto err_priv_alloc;
6388 INIT_LIST_HEAD(&wl->wlvif_list);
6390 wl->hw = hw;
6393 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6394 * we don't allocate any additional resource here, so that's fine.
6396 for (i = 0; i < NUM_TX_QUEUES; i++)
6397 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6398 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6400 skb_queue_head_init(&wl->deferred_rx_queue);
6401 skb_queue_head_init(&wl->deferred_tx_queue);
6403 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6404 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6405 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6406 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6407 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6408 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6410 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6411 if (!wl->freezable_wq) {
6412 ret = -ENOMEM;
6413 goto err_hw;
6416 wl->channel = 0;
6417 wl->rx_counter = 0;
6418 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6419 wl->band = NL80211_BAND_2GHZ;
6420 wl->channel_type = NL80211_CHAN_NO_HT;
6421 wl->flags = 0;
6422 wl->sg_enabled = true;
6423 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6424 wl->recovery_count = 0;
6425 wl->hw_pg_ver = -1;
6426 wl->ap_ps_map = 0;
6427 wl->ap_fw_ps_map = 0;
6428 wl->quirks = 0;
6429 wl->system_hlid = WL12XX_SYSTEM_HLID;
6430 wl->active_sta_count = 0;
6431 wl->active_link_count = 0;
6432 wl->fwlog_size = 0;
6434 /* The system link is always allocated */
6435 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6437 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6438 for (i = 0; i < wl->num_tx_desc; i++)
6439 wl->tx_frames[i] = NULL;
6441 spin_lock_init(&wl->wl_lock);
6443 wl->state = WLCORE_STATE_OFF;
6444 wl->fw_type = WL12XX_FW_TYPE_NONE;
6445 mutex_init(&wl->mutex);
6446 mutex_init(&wl->flush_mutex);
6447 init_completion(&wl->nvs_loading_complete);
6449 order = get_order(aggr_buf_size);
6450 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6451 if (!wl->aggr_buf) {
6452 ret = -ENOMEM;
6453 goto err_wq;
6455 wl->aggr_buf_size = aggr_buf_size;
6457 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6458 if (!wl->dummy_packet) {
6459 ret = -ENOMEM;
6460 goto err_aggr;
6463 /* Allocate one page for the FW log */
6464 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6465 if (!wl->fwlog) {
6466 ret = -ENOMEM;
6467 goto err_dummy_packet;
6470 wl->mbox_size = mbox_size;
6471 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6472 if (!wl->mbox) {
6473 ret = -ENOMEM;
6474 goto err_fwlog;
6477 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6478 if (!wl->buffer_32) {
6479 ret = -ENOMEM;
6480 goto err_mbox;
6483 return hw;
6485 err_mbox:
6486 kfree(wl->mbox);
6488 err_fwlog:
6489 free_page((unsigned long)wl->fwlog);
6491 err_dummy_packet:
6492 dev_kfree_skb(wl->dummy_packet);
6494 err_aggr:
6495 free_pages((unsigned long)wl->aggr_buf, order);
6497 err_wq:
6498 destroy_workqueue(wl->freezable_wq);
6500 err_hw:
6501 wl1271_debugfs_exit(wl);
6502 kfree(wl->priv);
6504 err_priv_alloc:
6505 ieee80211_free_hw(hw);
6507 err_hw_alloc:
6509 return ERR_PTR(ret);
6511 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6513 int wlcore_free_hw(struct wl1271 *wl)
6515 /* Unblock any fwlog readers */
6516 mutex_lock(&wl->mutex);
6517 wl->fwlog_size = -1;
6518 mutex_unlock(&wl->mutex);
6520 wlcore_sysfs_free(wl);
6522 kfree(wl->buffer_32);
6523 kfree(wl->mbox);
6524 free_page((unsigned long)wl->fwlog);
6525 dev_kfree_skb(wl->dummy_packet);
6526 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6528 wl1271_debugfs_exit(wl);
6530 vfree(wl->fw);
6531 wl->fw = NULL;
6532 wl->fw_type = WL12XX_FW_TYPE_NONE;
6533 kfree(wl->nvs);
6534 wl->nvs = NULL;
6536 kfree(wl->raw_fw_status);
6537 kfree(wl->fw_status);
6538 kfree(wl->tx_res_if);
6539 destroy_workqueue(wl->freezable_wq);
6541 kfree(wl->priv);
6542 ieee80211_free_hw(wl->hw);
6544 return 0;
6546 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6548 #ifdef CONFIG_PM
6549 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6550 .flags = WIPHY_WOWLAN_ANY,
6551 .n_patterns = WL1271_MAX_RX_FILTERS,
6552 .pattern_min_len = 1,
6553 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6555 #endif
6557 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6559 return IRQ_WAKE_THREAD;
6562 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6564 struct wl1271 *wl = context;
6565 struct platform_device *pdev = wl->pdev;
6566 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6567 struct resource *res;
6569 int ret;
6570 irq_handler_t hardirq_fn = NULL;
6572 if (fw) {
6573 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6574 if (!wl->nvs) {
6575 wl1271_error("Could not allocate nvs data");
6576 goto out;
6578 wl->nvs_len = fw->size;
6579 } else if (pdev_data->family->nvs_name) {
6580 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6581 pdev_data->family->nvs_name);
6582 wl->nvs = NULL;
6583 wl->nvs_len = 0;
6584 } else {
6585 wl->nvs = NULL;
6586 wl->nvs_len = 0;
6589 ret = wl->ops->setup(wl);
6590 if (ret < 0)
6591 goto out_free_nvs;
6593 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6595 /* adjust some runtime configuration parameters */
6596 wlcore_adjust_conf(wl);
6598 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6599 if (!res) {
6600 wl1271_error("Could not get IRQ resource");
6601 goto out_free_nvs;
6604 wl->irq = res->start;
6605 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6606 wl->if_ops = pdev_data->if_ops;
6608 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6609 hardirq_fn = wlcore_hardirq;
6610 else
6611 wl->irq_flags |= IRQF_ONESHOT;
6613 ret = wl12xx_set_power_on(wl);
6614 if (ret < 0)
6615 goto out_free_nvs;
6617 ret = wl12xx_get_hw_info(wl);
6618 if (ret < 0) {
6619 wl1271_error("couldn't get hw info");
6620 wl1271_power_off(wl);
6621 goto out_free_nvs;
6624 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6625 wl->irq_flags, pdev->name, wl);
6626 if (ret < 0) {
6627 wl1271_error("interrupt configuration failed");
6628 wl1271_power_off(wl);
6629 goto out_free_nvs;
6632 #ifdef CONFIG_PM
6633 ret = enable_irq_wake(wl->irq);
6634 if (!ret) {
6635 wl->irq_wake_enabled = true;
6636 device_init_wakeup(wl->dev, 1);
6637 if (pdev_data->pwr_in_suspend)
6638 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6640 #endif
6641 disable_irq(wl->irq);
6642 wl1271_power_off(wl);
6644 ret = wl->ops->identify_chip(wl);
6645 if (ret < 0)
6646 goto out_irq;
6648 ret = wl1271_init_ieee80211(wl);
6649 if (ret)
6650 goto out_irq;
6652 ret = wl1271_register_hw(wl);
6653 if (ret)
6654 goto out_irq;
6656 ret = wlcore_sysfs_init(wl);
6657 if (ret)
6658 goto out_unreg;
6660 wl->initialized = true;
6661 goto out;
6663 out_unreg:
6664 wl1271_unregister_hw(wl);
6666 out_irq:
6667 free_irq(wl->irq, wl);
6669 out_free_nvs:
6670 kfree(wl->nvs);
6672 out:
6673 release_firmware(fw);
6674 complete_all(&wl->nvs_loading_complete);
6677 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6679 struct wl1271 *wl = dev_get_drvdata(dev);
6680 struct wl12xx_vif *wlvif;
6681 int error;
6683 /* We do not enter elp sleep in PLT mode */
6684 if (wl->plt)
6685 return 0;
6687 /* Nothing to do if no ELP mode requested */
6688 if (wl->sleep_auth != WL1271_PSM_ELP)
6689 return 0;
6691 wl12xx_for_each_wlvif(wl, wlvif) {
6692 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6693 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6694 return -EBUSY;
6697 wl1271_debug(DEBUG_PSM, "chip to elp");
6698 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6699 if (error < 0) {
6700 wl12xx_queue_recovery_work(wl);
6702 return error;
6705 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6707 return 0;
6710 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6712 struct wl1271 *wl = dev_get_drvdata(dev);
6713 DECLARE_COMPLETION_ONSTACK(compl);
6714 unsigned long flags;
6715 int ret;
6716 unsigned long start_time = jiffies;
6717 bool pending = false;
6718 bool recovery = false;
6720 /* Nothing to do if no ELP mode requested */
6721 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6722 return 0;
6724 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6726 spin_lock_irqsave(&wl->wl_lock, flags);
6727 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6728 pending = true;
6729 else
6730 wl->elp_compl = &compl;
6731 spin_unlock_irqrestore(&wl->wl_lock, flags);
6733 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6734 if (ret < 0) {
6735 recovery = true;
6736 goto err;
6739 if (!pending) {
6740 ret = wait_for_completion_timeout(&compl,
6741 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6742 if (ret == 0) {
6743 wl1271_warning("ELP wakeup timeout!");
6745 /* Return no error for runtime PM for recovery */
6746 ret = 0;
6747 recovery = true;
6748 goto err;
6752 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6754 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6755 jiffies_to_msecs(jiffies - start_time));
6757 return 0;
6759 err:
6760 spin_lock_irqsave(&wl->wl_lock, flags);
6761 wl->elp_compl = NULL;
6762 spin_unlock_irqrestore(&wl->wl_lock, flags);
6764 if (recovery) {
6765 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6766 wl12xx_queue_recovery_work(wl);
6769 return ret;
6772 static const struct dev_pm_ops wlcore_pm_ops = {
6773 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6774 wlcore_runtime_resume,
6775 NULL)
6778 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6780 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6781 const char *nvs_name;
6782 int ret = 0;
6784 if (!wl->ops || !wl->ptable || !pdev_data)
6785 return -EINVAL;
6787 wl->dev = &pdev->dev;
6788 wl->pdev = pdev;
6789 platform_set_drvdata(pdev, wl);
6791 if (pdev_data->family && pdev_data->family->nvs_name) {
6792 nvs_name = pdev_data->family->nvs_name;
6793 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6794 nvs_name, &pdev->dev, GFP_KERNEL,
6795 wl, wlcore_nvs_cb);
6796 if (ret < 0) {
6797 wl1271_error("request_firmware_nowait failed for %s: %d",
6798 nvs_name, ret);
6799 complete_all(&wl->nvs_loading_complete);
6801 } else {
6802 wlcore_nvs_cb(NULL, wl);
6805 wl->dev->driver->pm = &wlcore_pm_ops;
6806 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6807 pm_runtime_use_autosuspend(wl->dev);
6808 pm_runtime_enable(wl->dev);
6810 return ret;
6812 EXPORT_SYMBOL_GPL(wlcore_probe);
6814 int wlcore_remove(struct platform_device *pdev)
6816 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6817 struct wl1271 *wl = platform_get_drvdata(pdev);
6818 int error;
6820 error = pm_runtime_get_sync(wl->dev);
6821 if (error < 0)
6822 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6824 wl->dev->driver->pm = NULL;
6826 if (pdev_data->family && pdev_data->family->nvs_name)
6827 wait_for_completion(&wl->nvs_loading_complete);
6828 if (!wl->initialized)
6829 return 0;
6831 if (wl->irq_wake_enabled) {
6832 device_init_wakeup(wl->dev, 0);
6833 disable_irq_wake(wl->irq);
6835 wl1271_unregister_hw(wl);
6837 pm_runtime_put_sync(wl->dev);
6838 pm_runtime_dont_use_autosuspend(wl->dev);
6839 pm_runtime_disable(wl->dev);
6841 free_irq(wl->irq, wl);
6842 wlcore_free_hw(wl);
6844 return 0;
6846 EXPORT_SYMBOL_GPL(wlcore_remove);
6848 u32 wl12xx_debug_level = DEBUG_NONE;
6849 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6850 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6851 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6853 module_param_named(fwlog, fwlog_param, charp, 0);
6854 MODULE_PARM_DESC(fwlog,
6855 "FW logger options: continuous, dbgpins or disable");
6857 module_param(fwlog_mem_blocks, int, 0600);
6858 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6860 module_param(bug_on_recovery, int, 0600);
6861 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6863 module_param(no_recovery, int, 0600);
6864 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6866 MODULE_LICENSE("GPL");
6867 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6868 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");