gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / wireless / ti / wlcore / main.c
blobf140f7d7f5532c78a46cb9218c4d31c6e10d6718
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 */
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_SUSPEND_SLEEP 100
34 #define WL1271_WAKEUP_TIMEOUT 500
36 static char *fwlog_param;
37 static int fwlog_mem_blocks = -1;
38 static int bug_on_recovery = -1;
39 static int no_recovery = -1;
41 static void __wl1271_op_remove_interface(struct wl1271 *wl,
42 struct ieee80211_vif *vif,
43 bool reset_tx_queues);
44 static void wlcore_op_stop_locked(struct wl1271 *wl);
45 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
47 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
49 int ret;
51 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
52 return -EINVAL;
54 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
55 return 0;
57 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
58 return 0;
60 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
61 if (ret < 0)
62 return ret;
64 wl1271_info("Association completed.");
65 return 0;
68 static void wl1271_reg_notify(struct wiphy *wiphy,
69 struct regulatory_request *request)
71 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
72 struct wl1271 *wl = hw->priv;
74 /* copy the current dfs region */
75 if (request)
76 wl->dfs_region = request->dfs_region;
78 wlcore_regdomain_config(wl);
81 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
82 bool enable)
84 int ret = 0;
86 /* we should hold wl->mutex */
87 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
88 if (ret < 0)
89 goto out;
91 if (enable)
92 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
93 else
94 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
95 out:
96 return ret;
100 * this function is being called when the rx_streaming interval
101 * has beed changed or rx_streaming should be disabled
103 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
105 int ret = 0;
106 int period = wl->conf.rx_streaming.interval;
108 /* don't reconfigure if rx_streaming is disabled */
109 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
110 goto out;
112 /* reconfigure/disable according to new streaming_period */
113 if (period &&
114 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
115 (wl->conf.rx_streaming.always ||
116 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
117 ret = wl1271_set_rx_streaming(wl, wlvif, true);
118 else {
119 ret = wl1271_set_rx_streaming(wl, wlvif, false);
120 /* don't cancel_work_sync since we might deadlock */
121 del_timer_sync(&wlvif->rx_streaming_timer);
123 out:
124 return ret;
127 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
129 int ret;
130 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
131 rx_streaming_enable_work);
132 struct wl1271 *wl = wlvif->wl;
134 mutex_lock(&wl->mutex);
136 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
137 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
138 (!wl->conf.rx_streaming.always &&
139 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
140 goto out;
142 if (!wl->conf.rx_streaming.interval)
143 goto out;
145 ret = pm_runtime_get_sync(wl->dev);
146 if (ret < 0) {
147 pm_runtime_put_noidle(wl->dev);
148 goto out;
151 ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 if (ret < 0)
153 goto out_sleep;
155 /* stop it after some time of inactivity */
156 mod_timer(&wlvif->rx_streaming_timer,
157 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
159 out_sleep:
160 pm_runtime_mark_last_busy(wl->dev);
161 pm_runtime_put_autosuspend(wl->dev);
162 out:
163 mutex_unlock(&wl->mutex);
166 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
168 int ret;
169 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
170 rx_streaming_disable_work);
171 struct wl1271 *wl = wlvif->wl;
173 mutex_lock(&wl->mutex);
175 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
176 goto out;
178 ret = pm_runtime_get_sync(wl->dev);
179 if (ret < 0) {
180 pm_runtime_put_noidle(wl->dev);
181 goto out;
184 ret = wl1271_set_rx_streaming(wl, wlvif, false);
185 if (ret)
186 goto out_sleep;
188 out_sleep:
189 pm_runtime_mark_last_busy(wl->dev);
190 pm_runtime_put_autosuspend(wl->dev);
191 out:
192 mutex_unlock(&wl->mutex);
195 static void wl1271_rx_streaming_timer(struct timer_list *t)
197 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
198 struct wl1271 *wl = wlvif->wl;
199 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
202 /* wl->mutex must be taken */
203 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
205 /* if the watchdog is not armed, don't do anything */
206 if (wl->tx_allocated_blocks == 0)
207 return;
209 cancel_delayed_work(&wl->tx_watchdog_work);
210 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
211 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
214 static void wlcore_rc_update_work(struct work_struct *work)
216 int ret;
217 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
218 rc_update_work);
219 struct wl1271 *wl = wlvif->wl;
220 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
222 mutex_lock(&wl->mutex);
224 if (unlikely(wl->state != WLCORE_STATE_ON))
225 goto out;
227 ret = pm_runtime_get_sync(wl->dev);
228 if (ret < 0) {
229 pm_runtime_put_noidle(wl->dev);
230 goto out;
233 if (ieee80211_vif_is_mesh(vif)) {
234 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
235 true, wlvif->sta.hlid);
236 if (ret < 0)
237 goto out_sleep;
238 } else {
239 wlcore_hw_sta_rc_update(wl, wlvif);
242 out_sleep:
243 pm_runtime_mark_last_busy(wl->dev);
244 pm_runtime_put_autosuspend(wl->dev);
245 out:
246 mutex_unlock(&wl->mutex);
249 static void wl12xx_tx_watchdog_work(struct work_struct *work)
251 struct delayed_work *dwork;
252 struct wl1271 *wl;
254 dwork = to_delayed_work(work);
255 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
257 mutex_lock(&wl->mutex);
259 if (unlikely(wl->state != WLCORE_STATE_ON))
260 goto out;
262 /* Tx went out in the meantime - everything is ok */
263 if (unlikely(wl->tx_allocated_blocks == 0))
264 goto out;
267 * if a ROC is in progress, we might not have any Tx for a long
268 * time (e.g. pending Tx on the non-ROC channels)
270 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
271 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
272 wl->conf.tx.tx_watchdog_timeout);
273 wl12xx_rearm_tx_watchdog_locked(wl);
274 goto out;
278 * if a scan is in progress, we might not have any Tx for a long
279 * time
281 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
282 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_rearm_tx_watchdog_locked(wl);
285 goto out;
289 * AP might cache a frame for a long time for a sleeping station,
290 * so rearm the timer if there's an AP interface with stations. If
291 * Tx is genuinely stuck we will most hopefully discover it when all
292 * stations are removed due to inactivity.
294 if (wl->active_sta_count) {
295 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
296 " %d stations",
297 wl->conf.tx.tx_watchdog_timeout,
298 wl->active_sta_count);
299 wl12xx_rearm_tx_watchdog_locked(wl);
300 goto out;
303 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
304 wl->conf.tx.tx_watchdog_timeout);
305 wl12xx_queue_recovery_work(wl);
307 out:
308 mutex_unlock(&wl->mutex);
311 static void wlcore_adjust_conf(struct wl1271 *wl)
314 if (fwlog_param) {
315 if (!strcmp(fwlog_param, "continuous")) {
316 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
318 } else if (!strcmp(fwlog_param, "dbgpins")) {
319 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
320 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
321 } else if (!strcmp(fwlog_param, "disable")) {
322 wl->conf.fwlog.mem_blocks = 0;
323 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
324 } else {
325 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
329 if (bug_on_recovery != -1)
330 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
332 if (no_recovery != -1)
333 wl->conf.recovery.no_recovery = (u8) no_recovery;
336 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
337 struct wl12xx_vif *wlvif,
338 u8 hlid, u8 tx_pkts)
340 bool fw_ps;
342 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
345 * Wake up from high level PS if the STA is asleep with too little
346 * packets in FW or if the STA is awake.
348 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
349 wl12xx_ps_link_end(wl, wlvif, hlid);
352 * Start high-level PS if the STA is asleep with enough blocks in FW.
353 * Make an exception if this is the only connected link. In this
354 * case FW-memory congestion is less of a problem.
355 * Note that a single connected STA means 2*ap_count + 1 active links,
356 * since we must account for the global and broadcast AP links
357 * for each AP. The "fw_ps" check assures us the other link is a STA
358 * connected to the AP. Otherwise the FW would not set the PSM bit.
360 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
361 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
362 wl12xx_ps_link_start(wl, wlvif, hlid, true);
365 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
366 struct wl12xx_vif *wlvif,
367 struct wl_fw_status *status)
369 unsigned long cur_fw_ps_map;
370 u8 hlid;
372 cur_fw_ps_map = status->link_ps_bitmap;
373 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
374 wl1271_debug(DEBUG_PSM,
375 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
376 wl->ap_fw_ps_map, cur_fw_ps_map,
377 wl->ap_fw_ps_map ^ cur_fw_ps_map);
379 wl->ap_fw_ps_map = cur_fw_ps_map;
382 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
383 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
384 wl->links[hlid].allocated_pkts);
387 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
389 struct wl12xx_vif *wlvif;
390 u32 old_tx_blk_count = wl->tx_blocks_available;
391 int avail, freed_blocks;
392 int i;
393 int ret;
394 struct wl1271_link *lnk;
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
397 wl->raw_fw_status,
398 wl->fw_status_len, false);
399 if (ret < 0)
400 return ret;
402 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
404 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
405 "drv_rx_counter = %d, tx_results_counter = %d)",
406 status->intr,
407 status->fw_rx_counter,
408 status->drv_rx_counter,
409 status->tx_results_counter);
411 for (i = 0; i < NUM_TX_QUEUES; i++) {
412 /* prevent wrap-around in freed-packets counter */
413 wl->tx_allocated_pkts[i] -=
414 (status->counters.tx_released_pkts[i] -
415 wl->tx_pkts_freed[i]) & 0xff;
417 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
421 for_each_set_bit(i, wl->links_map, wl->num_links) {
422 u8 diff;
423 lnk = &wl->links[i];
425 /* prevent wrap-around in freed-packets counter */
426 diff = (status->counters.tx_lnk_free_pkts[i] -
427 lnk->prev_freed_pkts) & 0xff;
429 if (diff == 0)
430 continue;
432 lnk->allocated_pkts -= diff;
433 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
435 /* accumulate the prev_freed_pkts counter */
436 lnk->total_freed_pkts += diff;
439 /* prevent wrap-around in total blocks counter */
440 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
441 freed_blocks = status->total_released_blks -
442 wl->tx_blocks_freed;
443 else
444 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
445 status->total_released_blks;
447 wl->tx_blocks_freed = status->total_released_blks;
449 wl->tx_allocated_blocks -= freed_blocks;
452 * If the FW freed some blocks:
453 * If we still have allocated blocks - re-arm the timer, Tx is
454 * not stuck. Otherwise, cancel the timer (no Tx currently).
456 if (freed_blocks) {
457 if (wl->tx_allocated_blocks)
458 wl12xx_rearm_tx_watchdog_locked(wl);
459 else
460 cancel_delayed_work(&wl->tx_watchdog_work);
463 avail = status->tx_total - wl->tx_allocated_blocks;
466 * The FW might change the total number of TX memblocks before
467 * we get a notification about blocks being released. Thus, the
468 * available blocks calculation might yield a temporary result
469 * which is lower than the actual available blocks. Keeping in
470 * mind that only blocks that were allocated can be moved from
471 * TX to RX, tx_blocks_available should never decrease here.
473 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
474 avail);
476 /* if more blocks are available now, tx work can be scheduled */
477 if (wl->tx_blocks_available > old_tx_blk_count)
478 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
480 /* for AP update num of allocated TX blocks per link and ps status */
481 wl12xx_for_each_wlvif_ap(wl, wlvif) {
482 wl12xx_irq_update_links_status(wl, wlvif, status);
485 /* update the host-chipset time offset */
486 wl->time_offset = (ktime_get_boottime_ns() >> 10) -
487 (s64)(status->fw_localtime);
489 wl->fw_fast_lnk_map = status->link_fast_bitmap;
491 return 0;
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
496 struct sk_buff *skb;
498 /* Pass all received frames to the network stack */
499 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 ieee80211_rx_ni(wl->hw, skb);
502 /* Return sent skbs to the network stack */
503 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 ieee80211_tx_status_ni(wl->hw, skb);
507 static void wl1271_netstack_work(struct work_struct *work)
509 struct wl1271 *wl =
510 container_of(work, struct wl1271, netstack_work);
512 do {
513 wl1271_flush_deferred_work(wl);
514 } while (skb_queue_len(&wl->deferred_rx_queue));
517 #define WL1271_IRQ_MAX_LOOPS 256
519 static int wlcore_irq_locked(struct wl1271 *wl)
521 int ret = 0;
522 u32 intr;
523 int loopcount = WL1271_IRQ_MAX_LOOPS;
524 bool done = false;
525 unsigned int defer_count;
526 unsigned long flags;
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
532 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
533 loopcount = 1;
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
537 if (unlikely(wl->state != WLCORE_STATE_ON))
538 goto out;
540 ret = pm_runtime_get_sync(wl->dev);
541 if (ret < 0) {
542 pm_runtime_put_noidle(wl->dev);
543 goto out;
546 while (!done && loopcount--) {
547 smp_mb__after_atomic();
549 ret = wlcore_fw_status(wl, wl->fw_status);
550 if (ret < 0)
551 goto out;
553 wlcore_hw_tx_immediate_compl(wl);
555 intr = wl->fw_status->intr;
556 intr &= WLCORE_ALL_INTR_MASK;
557 if (!intr) {
558 done = true;
559 continue;
562 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 wl1271_error("HW watchdog interrupt received! starting recovery.");
564 wl->watchdog_recovery = true;
565 ret = -EIO;
567 /* restarting the chip. ignore any other interrupt. */
568 goto out;
571 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 wl1271_error("SW watchdog interrupt received! "
573 "starting recovery.");
574 wl->watchdog_recovery = true;
575 ret = -EIO;
577 /* restarting the chip. ignore any other interrupt. */
578 goto out;
581 if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
584 ret = wlcore_rx(wl, wl->fw_status);
585 if (ret < 0)
586 goto out;
588 /* Check if any tx blocks were freed */
589 spin_lock_irqsave(&wl->wl_lock, flags);
590 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
591 wl1271_tx_total_queue_count(wl) > 0) {
592 spin_unlock_irqrestore(&wl->wl_lock, flags);
594 * In order to avoid starvation of the TX path,
595 * call the work function directly.
597 ret = wlcore_tx_work_locked(wl);
598 if (ret < 0)
599 goto out;
600 } else {
601 spin_unlock_irqrestore(&wl->wl_lock, flags);
604 /* check for tx results */
605 ret = wlcore_hw_tx_delayed_compl(wl);
606 if (ret < 0)
607 goto out;
609 /* Make sure the deferred queues don't get too long */
610 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
611 skb_queue_len(&wl->deferred_rx_queue);
612 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
613 wl1271_flush_deferred_work(wl);
616 if (intr & WL1271_ACX_INTR_EVENT_A) {
617 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
618 ret = wl1271_event_handle(wl, 0);
619 if (ret < 0)
620 goto out;
623 if (intr & WL1271_ACX_INTR_EVENT_B) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
625 ret = wl1271_event_handle(wl, 1);
626 if (ret < 0)
627 goto out;
630 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631 wl1271_debug(DEBUG_IRQ,
632 "WL1271_ACX_INTR_INIT_COMPLETE");
634 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
638 pm_runtime_mark_last_busy(wl->dev);
639 pm_runtime_put_autosuspend(wl->dev);
641 out:
642 return ret;
645 static irqreturn_t wlcore_irq(int irq, void *cookie)
647 int ret;
648 unsigned long flags;
649 struct wl1271 *wl = cookie;
651 /* complete the ELP completion */
652 spin_lock_irqsave(&wl->wl_lock, flags);
653 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
654 if (wl->elp_compl) {
655 complete(wl->elp_compl);
656 wl->elp_compl = NULL;
659 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
660 /* don't enqueue a work right now. mark it as pending */
661 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
662 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
663 disable_irq_nosync(wl->irq);
664 pm_wakeup_event(wl->dev, 0);
665 spin_unlock_irqrestore(&wl->wl_lock, flags);
666 goto out_handled;
668 spin_unlock_irqrestore(&wl->wl_lock, flags);
670 /* TX might be handled here, avoid redundant work */
671 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
672 cancel_work_sync(&wl->tx_work);
674 mutex_lock(&wl->mutex);
676 ret = wlcore_irq_locked(wl);
677 if (ret)
678 wl12xx_queue_recovery_work(wl);
680 spin_lock_irqsave(&wl->wl_lock, flags);
681 /* In case TX was not handled here, queue TX work */
682 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
683 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
684 wl1271_tx_total_queue_count(wl) > 0)
685 ieee80211_queue_work(wl->hw, &wl->tx_work);
686 spin_unlock_irqrestore(&wl->wl_lock, flags);
688 mutex_unlock(&wl->mutex);
690 out_handled:
691 spin_lock_irqsave(&wl->wl_lock, flags);
692 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
693 spin_unlock_irqrestore(&wl->wl_lock, flags);
695 return IRQ_HANDLED;
698 struct vif_counter_data {
699 u8 counter;
701 struct ieee80211_vif *cur_vif;
702 bool cur_vif_running;
705 static void wl12xx_vif_count_iter(void *data, u8 *mac,
706 struct ieee80211_vif *vif)
708 struct vif_counter_data *counter = data;
710 counter->counter++;
711 if (counter->cur_vif == vif)
712 counter->cur_vif_running = true;
715 /* caller must not hold wl->mutex, as it might deadlock */
716 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
717 struct ieee80211_vif *cur_vif,
718 struct vif_counter_data *data)
720 memset(data, 0, sizeof(*data));
721 data->cur_vif = cur_vif;
723 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
724 wl12xx_vif_count_iter, data);
727 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 const struct firmware *fw;
730 const char *fw_name;
731 enum wl12xx_fw_type fw_type;
732 int ret;
734 if (plt) {
735 fw_type = WL12XX_FW_TYPE_PLT;
736 fw_name = wl->plt_fw_name;
737 } else {
739 * we can't call wl12xx_get_vif_count() here because
740 * wl->mutex is taken, so use the cached last_vif_count value
742 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
743 fw_type = WL12XX_FW_TYPE_MULTI;
744 fw_name = wl->mr_fw_name;
745 } else {
746 fw_type = WL12XX_FW_TYPE_NORMAL;
747 fw_name = wl->sr_fw_name;
751 if (wl->fw_type == fw_type)
752 return 0;
754 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756 ret = request_firmware(&fw, fw_name, wl->dev);
758 if (ret < 0) {
759 wl1271_error("could not get firmware %s: %d", fw_name, ret);
760 return ret;
763 if (fw->size % 4) {
764 wl1271_error("firmware size is not multiple of 32 bits: %zu",
765 fw->size);
766 ret = -EILSEQ;
767 goto out;
770 vfree(wl->fw);
771 wl->fw_type = WL12XX_FW_TYPE_NONE;
772 wl->fw_len = fw->size;
773 wl->fw = vmalloc(wl->fw_len);
775 if (!wl->fw) {
776 wl1271_error("could not allocate memory for the firmware");
777 ret = -ENOMEM;
778 goto out;
781 memcpy(wl->fw, fw->data, wl->fw_len);
782 ret = 0;
783 wl->fw_type = fw_type;
784 out:
785 release_firmware(fw);
787 return ret;
790 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 /* Avoid a recursive recovery */
793 if (wl->state == WLCORE_STATE_ON) {
794 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
795 &wl->flags));
797 wl->state = WLCORE_STATE_RESTARTING;
798 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
799 ieee80211_queue_work(wl->hw, &wl->recovery_work);
803 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
805 size_t len;
807 /* Make sure we have enough room */
808 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
810 /* Fill the FW log file, consumed by the sysfs fwlog entry */
811 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
812 wl->fwlog_size += len;
814 return len;
817 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
819 u32 end_of_log = 0;
820 int error;
822 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
823 return;
825 wl1271_info("Reading FW panic log");
828 * Make sure the chip is awake and the logger isn't active.
829 * Do not send a stop fwlog command if the fw is hanged or if
830 * dbgpins are used (due to some fw bug).
832 error = pm_runtime_get_sync(wl->dev);
833 if (error < 0) {
834 pm_runtime_put_noidle(wl->dev);
835 return;
837 if (!wl->watchdog_recovery &&
838 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
839 wl12xx_cmd_stop_fwlog(wl);
841 /* Traverse the memory blocks linked list */
842 do {
843 end_of_log = wlcore_event_fw_logger(wl);
844 if (end_of_log == 0) {
845 msleep(100);
846 end_of_log = wlcore_event_fw_logger(wl);
848 } while (end_of_log != 0);
851 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
852 u8 hlid, struct ieee80211_sta *sta)
854 struct wl1271_station *wl_sta;
855 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
857 wl_sta = (void *)sta->drv_priv;
858 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
861 * increment the initial seq number on recovery to account for
862 * transmitted packets that we haven't yet got in the FW status
864 if (wlvif->encryption_type == KEY_GEM)
865 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
867 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
868 wl_sta->total_freed_pkts += sqn_recovery_padding;
871 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
872 struct wl12xx_vif *wlvif,
873 u8 hlid, const u8 *addr)
875 struct ieee80211_sta *sta;
876 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
878 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
879 is_zero_ether_addr(addr)))
880 return;
882 rcu_read_lock();
883 sta = ieee80211_find_sta(vif, addr);
884 if (sta)
885 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
886 rcu_read_unlock();
889 static void wlcore_print_recovery(struct wl1271 *wl)
891 u32 pc = 0;
892 u32 hint_sts = 0;
893 int ret;
895 wl1271_info("Hardware recovery in progress. FW ver: %s",
896 wl->chip.fw_ver_str);
898 /* change partitions momentarily so we can read the FW pc */
899 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
900 if (ret < 0)
901 return;
903 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
904 if (ret < 0)
905 return;
907 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
908 if (ret < 0)
909 return;
911 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
912 pc, hint_sts, ++wl->recovery_count);
914 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
918 static void wl1271_recovery_work(struct work_struct *work)
920 struct wl1271 *wl =
921 container_of(work, struct wl1271, recovery_work);
922 struct wl12xx_vif *wlvif;
923 struct ieee80211_vif *vif;
924 int error;
926 mutex_lock(&wl->mutex);
928 if (wl->state == WLCORE_STATE_OFF || wl->plt)
929 goto out_unlock;
931 error = pm_runtime_get_sync(wl->dev);
932 if (error < 0) {
933 wl1271_warning("Enable for recovery failed");
934 pm_runtime_put_noidle(wl->dev);
936 wlcore_disable_interrupts_nosync(wl);
938 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
939 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
940 wl12xx_read_fwlog_panic(wl);
941 wlcore_print_recovery(wl);
944 BUG_ON(wl->conf.recovery.bug_on_recovery &&
945 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
947 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
949 if (wl->conf.recovery.no_recovery) {
950 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
951 goto out_unlock;
954 /* Prevent spurious TX during FW restart */
955 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
957 /* reboot the chipset */
958 while (!list_empty(&wl->wlvif_list)) {
959 wlvif = list_first_entry(&wl->wlvif_list,
960 struct wl12xx_vif, list);
961 vif = wl12xx_wlvif_to_vif(wlvif);
963 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
964 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
965 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
966 vif->bss_conf.bssid);
969 __wl1271_op_remove_interface(wl, vif, false);
972 wlcore_op_stop_locked(wl);
973 pm_runtime_mark_last_busy(wl->dev);
974 pm_runtime_put_autosuspend(wl->dev);
976 ieee80211_restart_hw(wl->hw);
979 * Its safe to enable TX now - the queues are stopped after a request
980 * to restart the HW.
982 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
984 out_unlock:
985 wl->watchdog_recovery = false;
986 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
987 mutex_unlock(&wl->mutex);
990 static int wlcore_fw_wakeup(struct wl1271 *wl)
992 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
995 static int wl1271_setup(struct wl1271 *wl)
997 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
998 if (!wl->raw_fw_status)
999 goto err;
1001 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1002 if (!wl->fw_status)
1003 goto err;
1005 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1006 if (!wl->tx_res_if)
1007 goto err;
1009 return 0;
1010 err:
1011 kfree(wl->fw_status);
1012 kfree(wl->raw_fw_status);
1013 return -ENOMEM;
1016 static int wl12xx_set_power_on(struct wl1271 *wl)
1018 int ret;
1020 msleep(WL1271_PRE_POWER_ON_SLEEP);
1021 ret = wl1271_power_on(wl);
1022 if (ret < 0)
1023 goto out;
1024 msleep(WL1271_POWER_ON_SLEEP);
1025 wl1271_io_reset(wl);
1026 wl1271_io_init(wl);
1028 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1029 if (ret < 0)
1030 goto fail;
1032 /* ELP module wake up */
1033 ret = wlcore_fw_wakeup(wl);
1034 if (ret < 0)
1035 goto fail;
1037 out:
1038 return ret;
1040 fail:
1041 wl1271_power_off(wl);
1042 return ret;
1045 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1047 int ret = 0;
1049 ret = wl12xx_set_power_on(wl);
1050 if (ret < 0)
1051 goto out;
1054 * For wl127x based devices we could use the default block
1055 * size (512 bytes), but due to a bug in the sdio driver, we
1056 * need to set it explicitly after the chip is powered on. To
1057 * simplify the code and since the performance impact is
1058 * negligible, we use the same block size for all different
1059 * chip types.
1061 * Check if the bus supports blocksize alignment and, if it
1062 * doesn't, make sure we don't have the quirk.
1064 if (!wl1271_set_block_size(wl))
1065 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1067 /* TODO: make sure the lower driver has set things up correctly */
1069 ret = wl1271_setup(wl);
1070 if (ret < 0)
1071 goto out;
1073 ret = wl12xx_fetch_firmware(wl, plt);
1074 if (ret < 0) {
1075 kfree(wl->fw_status);
1076 kfree(wl->raw_fw_status);
1077 kfree(wl->tx_res_if);
1080 out:
1081 return ret;
1084 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1086 int retries = WL1271_BOOT_RETRIES;
1087 struct wiphy *wiphy = wl->hw->wiphy;
1089 static const char* const PLT_MODE[] = {
1090 "PLT_OFF",
1091 "PLT_ON",
1092 "PLT_FEM_DETECT",
1093 "PLT_CHIP_AWAKE"
1096 int ret;
1098 mutex_lock(&wl->mutex);
1100 wl1271_notice("power up");
1102 if (wl->state != WLCORE_STATE_OFF) {
1103 wl1271_error("cannot go into PLT state because not "
1104 "in off state: %d", wl->state);
1105 ret = -EBUSY;
1106 goto out;
1109 /* Indicate to lower levels that we are now in PLT mode */
1110 wl->plt = true;
1111 wl->plt_mode = plt_mode;
1113 while (retries) {
1114 retries--;
1115 ret = wl12xx_chip_wakeup(wl, true);
1116 if (ret < 0)
1117 goto power_off;
1119 if (plt_mode != PLT_CHIP_AWAKE) {
1120 ret = wl->ops->plt_init(wl);
1121 if (ret < 0)
1122 goto power_off;
1125 wl->state = WLCORE_STATE_ON;
1126 wl1271_notice("firmware booted in PLT mode %s (%s)",
1127 PLT_MODE[plt_mode],
1128 wl->chip.fw_ver_str);
1130 /* update hw/fw version info in wiphy struct */
1131 wiphy->hw_version = wl->chip.id;
1132 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1133 sizeof(wiphy->fw_version));
1135 goto out;
1137 power_off:
1138 wl1271_power_off(wl);
1141 wl->plt = false;
1142 wl->plt_mode = PLT_OFF;
1144 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1145 WL1271_BOOT_RETRIES);
1146 out:
1147 mutex_unlock(&wl->mutex);
1149 return ret;
1152 int wl1271_plt_stop(struct wl1271 *wl)
1154 int ret = 0;
1156 wl1271_notice("power down");
1159 * Interrupts must be disabled before setting the state to OFF.
1160 * Otherwise, the interrupt handler might be called and exit without
1161 * reading the interrupt status.
1163 wlcore_disable_interrupts(wl);
1164 mutex_lock(&wl->mutex);
1165 if (!wl->plt) {
1166 mutex_unlock(&wl->mutex);
1169 * This will not necessarily enable interrupts as interrupts
1170 * may have been disabled when op_stop was called. It will,
1171 * however, balance the above call to disable_interrupts().
1173 wlcore_enable_interrupts(wl);
1175 wl1271_error("cannot power down because not in PLT "
1176 "state: %d", wl->state);
1177 ret = -EBUSY;
1178 goto out;
1181 mutex_unlock(&wl->mutex);
1183 wl1271_flush_deferred_work(wl);
1184 cancel_work_sync(&wl->netstack_work);
1185 cancel_work_sync(&wl->recovery_work);
1186 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1188 mutex_lock(&wl->mutex);
1189 wl1271_power_off(wl);
1190 wl->flags = 0;
1191 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1192 wl->state = WLCORE_STATE_OFF;
1193 wl->plt = false;
1194 wl->plt_mode = PLT_OFF;
1195 wl->rx_counter = 0;
1196 mutex_unlock(&wl->mutex);
1198 out:
1199 return ret;
1202 static void wl1271_op_tx(struct ieee80211_hw *hw,
1203 struct ieee80211_tx_control *control,
1204 struct sk_buff *skb)
1206 struct wl1271 *wl = hw->priv;
1207 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1208 struct ieee80211_vif *vif = info->control.vif;
1209 struct wl12xx_vif *wlvif = NULL;
1210 unsigned long flags;
1211 int q, mapping;
1212 u8 hlid;
1214 if (!vif) {
1215 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1216 ieee80211_free_txskb(hw, skb);
1217 return;
1220 wlvif = wl12xx_vif_to_data(vif);
1221 mapping = skb_get_queue_mapping(skb);
1222 q = wl1271_tx_get_queue(mapping);
1224 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1226 spin_lock_irqsave(&wl->wl_lock, flags);
1229 * drop the packet if the link is invalid or the queue is stopped
1230 * for any reason but watermark. Watermark is a "soft"-stop so we
1231 * allow these packets through.
1233 if (hlid == WL12XX_INVALID_LINK_ID ||
1234 (!test_bit(hlid, wlvif->links_map)) ||
1235 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1236 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1237 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1238 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1239 ieee80211_free_txskb(hw, skb);
1240 goto out;
1243 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1244 hlid, q, skb->len);
1245 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1247 wl->tx_queue_count[q]++;
1248 wlvif->tx_queue_count[q]++;
1251 * The workqueue is slow to process the tx_queue and we need stop
1252 * the queue here, otherwise the queue will get too long.
1254 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1255 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1256 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1257 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1258 wlcore_stop_queue_locked(wl, wlvif, q,
1259 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1263 * The chip specific setup must run before the first TX packet -
1264 * before that, the tx_work will not be initialized!
1267 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1268 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1269 ieee80211_queue_work(wl->hw, &wl->tx_work);
1271 out:
1272 spin_unlock_irqrestore(&wl->wl_lock, flags);
1275 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1277 unsigned long flags;
1278 int q;
1280 /* no need to queue a new dummy packet if one is already pending */
1281 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1282 return 0;
1284 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1286 spin_lock_irqsave(&wl->wl_lock, flags);
1287 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1288 wl->tx_queue_count[q]++;
1289 spin_unlock_irqrestore(&wl->wl_lock, flags);
1291 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1292 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1293 return wlcore_tx_work_locked(wl);
1296 * If the FW TX is busy, TX work will be scheduled by the threaded
1297 * interrupt handler function
1299 return 0;
1303 * The size of the dummy packet should be at least 1400 bytes. However, in
1304 * order to minimize the number of bus transactions, aligning it to 512 bytes
1305 * boundaries could be beneficial, performance wise
1307 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1309 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1311 struct sk_buff *skb;
1312 struct ieee80211_hdr_3addr *hdr;
1313 unsigned int dummy_packet_size;
1315 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1316 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1318 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1319 if (!skb) {
1320 wl1271_warning("Failed to allocate a dummy packet skb");
1321 return NULL;
1324 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1326 hdr = skb_put_zero(skb, sizeof(*hdr));
1327 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1328 IEEE80211_STYPE_NULLFUNC |
1329 IEEE80211_FCTL_TODS);
1331 skb_put_zero(skb, dummy_packet_size);
1333 /* Dummy packets require the TID to be management */
1334 skb->priority = WL1271_TID_MGMT;
1336 /* Initialize all fields that might be used */
1337 skb_set_queue_mapping(skb, 0);
1338 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1340 return skb;
1344 static int
1345 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1347 int num_fields = 0, in_field = 0, fields_size = 0;
1348 int i, pattern_len = 0;
1350 if (!p->mask) {
1351 wl1271_warning("No mask in WoWLAN pattern");
1352 return -EINVAL;
1356 * The pattern is broken up into segments of bytes at different offsets
1357 * that need to be checked by the FW filter. Each segment is called
1358 * a field in the FW API. We verify that the total number of fields
1359 * required for this pattern won't exceed FW limits (8)
1360 * as well as the total fields buffer won't exceed the FW limit.
1361 * Note that if there's a pattern which crosses Ethernet/IP header
1362 * boundary a new field is required.
1364 for (i = 0; i < p->pattern_len; i++) {
1365 if (test_bit(i, (unsigned long *)p->mask)) {
1366 if (!in_field) {
1367 in_field = 1;
1368 pattern_len = 1;
1369 } else {
1370 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1371 num_fields++;
1372 fields_size += pattern_len +
1373 RX_FILTER_FIELD_OVERHEAD;
1374 pattern_len = 1;
1375 } else
1376 pattern_len++;
1378 } else {
1379 if (in_field) {
1380 in_field = 0;
1381 fields_size += pattern_len +
1382 RX_FILTER_FIELD_OVERHEAD;
1383 num_fields++;
1388 if (in_field) {
1389 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1390 num_fields++;
1393 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1394 wl1271_warning("RX Filter too complex. Too many segments");
1395 return -EINVAL;
1398 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1399 wl1271_warning("RX filter pattern is too big");
1400 return -E2BIG;
1403 return 0;
1406 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1408 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1411 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1413 int i;
1415 if (filter == NULL)
1416 return;
1418 for (i = 0; i < filter->num_fields; i++)
1419 kfree(filter->fields[i].pattern);
1421 kfree(filter);
1424 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1425 u16 offset, u8 flags,
1426 const u8 *pattern, u8 len)
1428 struct wl12xx_rx_filter_field *field;
1430 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1431 wl1271_warning("Max fields per RX filter. can't alloc another");
1432 return -EINVAL;
1435 field = &filter->fields[filter->num_fields];
1437 field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1438 if (!field->pattern) {
1439 wl1271_warning("Failed to allocate RX filter pattern");
1440 return -ENOMEM;
1443 filter->num_fields++;
1445 field->offset = cpu_to_le16(offset);
1446 field->flags = flags;
1447 field->len = len;
1449 return 0;
1452 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1454 int i, fields_size = 0;
1456 for (i = 0; i < filter->num_fields; i++)
1457 fields_size += filter->fields[i].len +
1458 sizeof(struct wl12xx_rx_filter_field) -
1459 sizeof(u8 *);
1461 return fields_size;
1464 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1465 u8 *buf)
1467 int i;
1468 struct wl12xx_rx_filter_field *field;
1470 for (i = 0; i < filter->num_fields; i++) {
1471 field = (struct wl12xx_rx_filter_field *)buf;
1473 field->offset = filter->fields[i].offset;
1474 field->flags = filter->fields[i].flags;
1475 field->len = filter->fields[i].len;
1477 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1478 buf += sizeof(struct wl12xx_rx_filter_field) -
1479 sizeof(u8 *) + field->len;
1484 * Allocates an RX filter returned through f
1485 * which needs to be freed using rx_filter_free()
1487 static int
1488 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1489 struct wl12xx_rx_filter **f)
1491 int i, j, ret = 0;
1492 struct wl12xx_rx_filter *filter;
1493 u16 offset;
1494 u8 flags, len;
1496 filter = wl1271_rx_filter_alloc();
1497 if (!filter) {
1498 wl1271_warning("Failed to alloc rx filter");
1499 ret = -ENOMEM;
1500 goto err;
1503 i = 0;
1504 while (i < p->pattern_len) {
1505 if (!test_bit(i, (unsigned long *)p->mask)) {
1506 i++;
1507 continue;
1510 for (j = i; j < p->pattern_len; j++) {
1511 if (!test_bit(j, (unsigned long *)p->mask))
1512 break;
1514 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1515 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1516 break;
1519 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1520 offset = i;
1521 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1522 } else {
1523 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1524 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1527 len = j - i;
1529 ret = wl1271_rx_filter_alloc_field(filter,
1530 offset,
1531 flags,
1532 &p->pattern[i], len);
1533 if (ret)
1534 goto err;
1536 i = j;
1539 filter->action = FILTER_SIGNAL;
1541 *f = filter;
1542 return 0;
1544 err:
1545 wl1271_rx_filter_free(filter);
1546 *f = NULL;
1548 return ret;
1551 static int wl1271_configure_wowlan(struct wl1271 *wl,
1552 struct cfg80211_wowlan *wow)
1554 int i, ret;
1556 if (!wow || wow->any || !wow->n_patterns) {
1557 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1558 FILTER_SIGNAL);
1559 if (ret)
1560 goto out;
1562 ret = wl1271_rx_filter_clear_all(wl);
1563 if (ret)
1564 goto out;
1566 return 0;
1569 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1570 return -EINVAL;
1572 /* Validate all incoming patterns before clearing current FW state */
1573 for (i = 0; i < wow->n_patterns; i++) {
1574 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1575 if (ret) {
1576 wl1271_warning("Bad wowlan pattern %d", i);
1577 return ret;
1581 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1582 if (ret)
1583 goto out;
1585 ret = wl1271_rx_filter_clear_all(wl);
1586 if (ret)
1587 goto out;
1589 /* Translate WoWLAN patterns into filters */
1590 for (i = 0; i < wow->n_patterns; i++) {
1591 struct cfg80211_pkt_pattern *p;
1592 struct wl12xx_rx_filter *filter = NULL;
1594 p = &wow->patterns[i];
1596 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1597 if (ret) {
1598 wl1271_warning("Failed to create an RX filter from "
1599 "wowlan pattern %d", i);
1600 goto out;
1603 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1605 wl1271_rx_filter_free(filter);
1606 if (ret)
1607 goto out;
1610 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1612 out:
1613 return ret;
1616 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1617 struct wl12xx_vif *wlvif,
1618 struct cfg80211_wowlan *wow)
1620 int ret = 0;
1622 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1623 goto out;
1625 ret = wl1271_configure_wowlan(wl, wow);
1626 if (ret < 0)
1627 goto out;
1629 if ((wl->conf.conn.suspend_wake_up_event ==
1630 wl->conf.conn.wake_up_event) &&
1631 (wl->conf.conn.suspend_listen_interval ==
1632 wl->conf.conn.listen_interval))
1633 goto out;
1635 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1636 wl->conf.conn.suspend_wake_up_event,
1637 wl->conf.conn.suspend_listen_interval);
1639 if (ret < 0)
1640 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1641 out:
1642 return ret;
1646 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1647 struct wl12xx_vif *wlvif,
1648 struct cfg80211_wowlan *wow)
1650 int ret = 0;
1652 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1653 goto out;
1655 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1656 if (ret < 0)
1657 goto out;
1659 ret = wl1271_configure_wowlan(wl, wow);
1660 if (ret < 0)
1661 goto out;
1663 out:
1664 return ret;
1668 static int wl1271_configure_suspend(struct wl1271 *wl,
1669 struct wl12xx_vif *wlvif,
1670 struct cfg80211_wowlan *wow)
1672 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1673 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1674 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1675 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1676 return 0;
1679 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1681 int ret = 0;
1682 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1683 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1685 if ((!is_ap) && (!is_sta))
1686 return;
1688 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1689 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1690 return;
1692 wl1271_configure_wowlan(wl, NULL);
1694 if (is_sta) {
1695 if ((wl->conf.conn.suspend_wake_up_event ==
1696 wl->conf.conn.wake_up_event) &&
1697 (wl->conf.conn.suspend_listen_interval ==
1698 wl->conf.conn.listen_interval))
1699 return;
1701 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1702 wl->conf.conn.wake_up_event,
1703 wl->conf.conn.listen_interval);
1705 if (ret < 0)
1706 wl1271_error("resume: wake up conditions failed: %d",
1707 ret);
1709 } else if (is_ap) {
1710 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1714 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1715 struct cfg80211_wowlan *wow)
1717 struct wl1271 *wl = hw->priv;
1718 struct wl12xx_vif *wlvif;
1719 unsigned long flags;
1720 int ret;
1722 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1723 WARN_ON(!wow);
1725 /* we want to perform the recovery before suspending */
1726 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1727 wl1271_warning("postponing suspend to perform recovery");
1728 return -EBUSY;
1731 wl1271_tx_flush(wl);
1733 mutex_lock(&wl->mutex);
1735 ret = pm_runtime_get_sync(wl->dev);
1736 if (ret < 0) {
1737 pm_runtime_put_noidle(wl->dev);
1738 mutex_unlock(&wl->mutex);
1739 return ret;
1742 wl->wow_enabled = true;
1743 wl12xx_for_each_wlvif(wl, wlvif) {
1744 if (wlcore_is_p2p_mgmt(wlvif))
1745 continue;
1747 ret = wl1271_configure_suspend(wl, wlvif, wow);
1748 if (ret < 0) {
1749 mutex_unlock(&wl->mutex);
1750 wl1271_warning("couldn't prepare device to suspend");
1751 return ret;
1755 /* disable fast link flow control notifications from FW */
1756 ret = wlcore_hw_interrupt_notify(wl, false);
1757 if (ret < 0)
1758 goto out_sleep;
1760 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1761 ret = wlcore_hw_rx_ba_filter(wl,
1762 !!wl->conf.conn.suspend_rx_ba_activity);
1763 if (ret < 0)
1764 goto out_sleep;
1766 out_sleep:
1767 pm_runtime_put_noidle(wl->dev);
1768 mutex_unlock(&wl->mutex);
1770 if (ret < 0) {
1771 wl1271_warning("couldn't prepare device to suspend");
1772 return ret;
1775 /* flush any remaining work */
1776 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1778 flush_work(&wl->tx_work);
1781 * Cancel the watchdog even if above tx_flush failed. We will detect
1782 * it on resume anyway.
1784 cancel_delayed_work(&wl->tx_watchdog_work);
1787 * set suspended flag to avoid triggering a new threaded_irq
1788 * work.
1790 spin_lock_irqsave(&wl->wl_lock, flags);
1791 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1792 spin_unlock_irqrestore(&wl->wl_lock, flags);
1794 return pm_runtime_force_suspend(wl->dev);
1797 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1799 struct wl1271 *wl = hw->priv;
1800 struct wl12xx_vif *wlvif;
1801 unsigned long flags;
1802 bool run_irq_work = false, pending_recovery;
1803 int ret;
1805 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1806 wl->wow_enabled);
1807 WARN_ON(!wl->wow_enabled);
1809 ret = pm_runtime_force_resume(wl->dev);
1810 if (ret < 0) {
1811 wl1271_error("ELP wakeup failure!");
1812 goto out_sleep;
1816 * re-enable irq_work enqueuing, and call irq_work directly if
1817 * there is a pending work.
1819 spin_lock_irqsave(&wl->wl_lock, flags);
1820 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1821 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1822 run_irq_work = true;
1823 spin_unlock_irqrestore(&wl->wl_lock, flags);
1825 mutex_lock(&wl->mutex);
1827 /* test the recovery flag before calling any SDIO functions */
1828 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1829 &wl->flags);
1831 if (run_irq_work) {
1832 wl1271_debug(DEBUG_MAC80211,
1833 "run postponed irq_work directly");
1835 /* don't talk to the HW if recovery is pending */
1836 if (!pending_recovery) {
1837 ret = wlcore_irq_locked(wl);
1838 if (ret)
1839 wl12xx_queue_recovery_work(wl);
1842 wlcore_enable_interrupts(wl);
1845 if (pending_recovery) {
1846 wl1271_warning("queuing forgotten recovery on resume");
1847 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1848 goto out_sleep;
1851 ret = pm_runtime_get_sync(wl->dev);
1852 if (ret < 0) {
1853 pm_runtime_put_noidle(wl->dev);
1854 goto out;
1857 wl12xx_for_each_wlvif(wl, wlvif) {
1858 if (wlcore_is_p2p_mgmt(wlvif))
1859 continue;
1861 wl1271_configure_resume(wl, wlvif);
1864 ret = wlcore_hw_interrupt_notify(wl, true);
1865 if (ret < 0)
1866 goto out_sleep;
1868 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1869 ret = wlcore_hw_rx_ba_filter(wl, false);
1870 if (ret < 0)
1871 goto out_sleep;
1873 out_sleep:
1874 pm_runtime_mark_last_busy(wl->dev);
1875 pm_runtime_put_autosuspend(wl->dev);
1877 out:
1878 wl->wow_enabled = false;
1881 * Set a flag to re-init the watchdog on the first Tx after resume.
1882 * That way we avoid possible conditions where Tx-complete interrupts
1883 * fail to arrive and we perform a spurious recovery.
1885 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1886 mutex_unlock(&wl->mutex);
1888 return 0;
1891 static int wl1271_op_start(struct ieee80211_hw *hw)
1893 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1896 * We have to delay the booting of the hardware because
1897 * we need to know the local MAC address before downloading and
1898 * initializing the firmware. The MAC address cannot be changed
1899 * after boot, and without the proper MAC address, the firmware
1900 * will not function properly.
1902 * The MAC address is first known when the corresponding interface
1903 * is added. That is where we will initialize the hardware.
1906 return 0;
1909 static void wlcore_op_stop_locked(struct wl1271 *wl)
1911 int i;
1913 if (wl->state == WLCORE_STATE_OFF) {
1914 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1915 &wl->flags))
1916 wlcore_enable_interrupts(wl);
1918 return;
1922 * this must be before the cancel_work calls below, so that the work
1923 * functions don't perform further work.
1925 wl->state = WLCORE_STATE_OFF;
1928 * Use the nosync variant to disable interrupts, so the mutex could be
1929 * held while doing so without deadlocking.
1931 wlcore_disable_interrupts_nosync(wl);
1933 mutex_unlock(&wl->mutex);
1935 wlcore_synchronize_interrupts(wl);
1936 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1937 cancel_work_sync(&wl->recovery_work);
1938 wl1271_flush_deferred_work(wl);
1939 cancel_delayed_work_sync(&wl->scan_complete_work);
1940 cancel_work_sync(&wl->netstack_work);
1941 cancel_work_sync(&wl->tx_work);
1942 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1944 /* let's notify MAC80211 about the remaining pending TX frames */
1945 mutex_lock(&wl->mutex);
1946 wl12xx_tx_reset(wl);
1948 wl1271_power_off(wl);
1950 * In case a recovery was scheduled, interrupts were disabled to avoid
1951 * an interrupt storm. Now that the power is down, it is safe to
1952 * re-enable interrupts to balance the disable depth
1954 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1955 wlcore_enable_interrupts(wl);
1957 wl->band = NL80211_BAND_2GHZ;
1959 wl->rx_counter = 0;
1960 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1961 wl->channel_type = NL80211_CHAN_NO_HT;
1962 wl->tx_blocks_available = 0;
1963 wl->tx_allocated_blocks = 0;
1964 wl->tx_results_count = 0;
1965 wl->tx_packets_count = 0;
1966 wl->time_offset = 0;
1967 wl->ap_fw_ps_map = 0;
1968 wl->ap_ps_map = 0;
1969 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1970 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1971 memset(wl->links_map, 0, sizeof(wl->links_map));
1972 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1973 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1974 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1975 wl->active_sta_count = 0;
1976 wl->active_link_count = 0;
1978 /* The system link is always allocated */
1979 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1980 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1981 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1984 * this is performed after the cancel_work calls and the associated
1985 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1986 * get executed before all these vars have been reset.
1988 wl->flags = 0;
1990 wl->tx_blocks_freed = 0;
1992 for (i = 0; i < NUM_TX_QUEUES; i++) {
1993 wl->tx_pkts_freed[i] = 0;
1994 wl->tx_allocated_pkts[i] = 0;
1997 wl1271_debugfs_reset(wl);
1999 kfree(wl->raw_fw_status);
2000 wl->raw_fw_status = NULL;
2001 kfree(wl->fw_status);
2002 wl->fw_status = NULL;
2003 kfree(wl->tx_res_if);
2004 wl->tx_res_if = NULL;
2005 kfree(wl->target_mem_map);
2006 wl->target_mem_map = NULL;
2009 * FW channels must be re-calibrated after recovery,
2010 * save current Reg-Domain channel configuration and clear it.
2012 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2013 sizeof(wl->reg_ch_conf_pending));
2014 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2017 static void wlcore_op_stop(struct ieee80211_hw *hw)
2019 struct wl1271 *wl = hw->priv;
2021 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2023 mutex_lock(&wl->mutex);
2025 wlcore_op_stop_locked(wl);
2027 mutex_unlock(&wl->mutex);
2030 static void wlcore_channel_switch_work(struct work_struct *work)
2032 struct delayed_work *dwork;
2033 struct wl1271 *wl;
2034 struct ieee80211_vif *vif;
2035 struct wl12xx_vif *wlvif;
2036 int ret;
2038 dwork = to_delayed_work(work);
2039 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2040 wl = wlvif->wl;
2042 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2044 mutex_lock(&wl->mutex);
2046 if (unlikely(wl->state != WLCORE_STATE_ON))
2047 goto out;
2049 /* check the channel switch is still ongoing */
2050 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2051 goto out;
2053 vif = wl12xx_wlvif_to_vif(wlvif);
2054 ieee80211_chswitch_done(vif, false);
2056 ret = pm_runtime_get_sync(wl->dev);
2057 if (ret < 0) {
2058 pm_runtime_put_noidle(wl->dev);
2059 goto out;
2062 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2064 pm_runtime_mark_last_busy(wl->dev);
2065 pm_runtime_put_autosuspend(wl->dev);
2066 out:
2067 mutex_unlock(&wl->mutex);
2070 static void wlcore_connection_loss_work(struct work_struct *work)
2072 struct delayed_work *dwork;
2073 struct wl1271 *wl;
2074 struct ieee80211_vif *vif;
2075 struct wl12xx_vif *wlvif;
2077 dwork = to_delayed_work(work);
2078 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2079 wl = wlvif->wl;
2081 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2083 mutex_lock(&wl->mutex);
2085 if (unlikely(wl->state != WLCORE_STATE_ON))
2086 goto out;
2088 /* Call mac80211 connection loss */
2089 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2090 goto out;
2092 vif = wl12xx_wlvif_to_vif(wlvif);
2093 ieee80211_connection_loss(vif);
2094 out:
2095 mutex_unlock(&wl->mutex);
2098 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2100 struct delayed_work *dwork;
2101 struct wl1271 *wl;
2102 struct wl12xx_vif *wlvif;
2103 unsigned long time_spare;
2104 int ret;
2106 dwork = to_delayed_work(work);
2107 wlvif = container_of(dwork, struct wl12xx_vif,
2108 pending_auth_complete_work);
2109 wl = wlvif->wl;
2111 mutex_lock(&wl->mutex);
2113 if (unlikely(wl->state != WLCORE_STATE_ON))
2114 goto out;
2117 * Make sure a second really passed since the last auth reply. Maybe
2118 * a second auth reply arrived while we were stuck on the mutex.
2119 * Check for a little less than the timeout to protect from scheduler
2120 * irregularities.
2122 time_spare = jiffies +
2123 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2124 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2125 goto out;
2127 ret = pm_runtime_get_sync(wl->dev);
2128 if (ret < 0) {
2129 pm_runtime_put_noidle(wl->dev);
2130 goto out;
2133 /* cancel the ROC if active */
2134 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2136 pm_runtime_mark_last_busy(wl->dev);
2137 pm_runtime_put_autosuspend(wl->dev);
2138 out:
2139 mutex_unlock(&wl->mutex);
2142 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2144 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2145 WL12XX_MAX_RATE_POLICIES);
2146 if (policy >= WL12XX_MAX_RATE_POLICIES)
2147 return -EBUSY;
2149 __set_bit(policy, wl->rate_policies_map);
2150 *idx = policy;
2151 return 0;
2154 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2156 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2157 return;
2159 __clear_bit(*idx, wl->rate_policies_map);
2160 *idx = WL12XX_MAX_RATE_POLICIES;
2163 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2165 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2166 WLCORE_MAX_KLV_TEMPLATES);
2167 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2168 return -EBUSY;
2170 __set_bit(policy, wl->klv_templates_map);
2171 *idx = policy;
2172 return 0;
2175 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2177 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2178 return;
2180 __clear_bit(*idx, wl->klv_templates_map);
2181 *idx = WLCORE_MAX_KLV_TEMPLATES;
2184 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2186 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2188 switch (wlvif->bss_type) {
2189 case BSS_TYPE_AP_BSS:
2190 if (wlvif->p2p)
2191 return WL1271_ROLE_P2P_GO;
2192 else if (ieee80211_vif_is_mesh(vif))
2193 return WL1271_ROLE_MESH_POINT;
2194 else
2195 return WL1271_ROLE_AP;
2197 case BSS_TYPE_STA_BSS:
2198 if (wlvif->p2p)
2199 return WL1271_ROLE_P2P_CL;
2200 else
2201 return WL1271_ROLE_STA;
2203 case BSS_TYPE_IBSS:
2204 return WL1271_ROLE_IBSS;
2206 default:
2207 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2209 return WL12XX_INVALID_ROLE_TYPE;
2212 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2214 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2215 int i;
2217 /* clear everything but the persistent data */
2218 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2220 switch (ieee80211_vif_type_p2p(vif)) {
2221 case NL80211_IFTYPE_P2P_CLIENT:
2222 wlvif->p2p = 1;
2223 /* fall-through */
2224 case NL80211_IFTYPE_STATION:
2225 case NL80211_IFTYPE_P2P_DEVICE:
2226 wlvif->bss_type = BSS_TYPE_STA_BSS;
2227 break;
2228 case NL80211_IFTYPE_ADHOC:
2229 wlvif->bss_type = BSS_TYPE_IBSS;
2230 break;
2231 case NL80211_IFTYPE_P2P_GO:
2232 wlvif->p2p = 1;
2233 /* fall-through */
2234 case NL80211_IFTYPE_AP:
2235 case NL80211_IFTYPE_MESH_POINT:
2236 wlvif->bss_type = BSS_TYPE_AP_BSS;
2237 break;
2238 default:
2239 wlvif->bss_type = MAX_BSS_TYPE;
2240 return -EOPNOTSUPP;
2243 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2244 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2245 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2247 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2248 wlvif->bss_type == BSS_TYPE_IBSS) {
2249 /* init sta/ibss data */
2250 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2251 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2252 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2253 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2254 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2255 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2256 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2257 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2258 } else {
2259 /* init ap data */
2260 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2261 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2262 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2263 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2264 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2265 wl12xx_allocate_rate_policy(wl,
2266 &wlvif->ap.ucast_rate_idx[i]);
2267 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2269 * TODO: check if basic_rate shouldn't be
2270 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2271 * instead (the same thing for STA above).
2273 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2274 /* TODO: this seems to be used only for STA, check it */
2275 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2278 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2279 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2280 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2283 * mac80211 configures some values globally, while we treat them
2284 * per-interface. thus, on init, we have to copy them from wl
2286 wlvif->band = wl->band;
2287 wlvif->channel = wl->channel;
2288 wlvif->power_level = wl->power_level;
2289 wlvif->channel_type = wl->channel_type;
2291 INIT_WORK(&wlvif->rx_streaming_enable_work,
2292 wl1271_rx_streaming_enable_work);
2293 INIT_WORK(&wlvif->rx_streaming_disable_work,
2294 wl1271_rx_streaming_disable_work);
2295 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2296 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2297 wlcore_channel_switch_work);
2298 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2299 wlcore_connection_loss_work);
2300 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2301 wlcore_pending_auth_complete_work);
2302 INIT_LIST_HEAD(&wlvif->list);
2304 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2305 return 0;
2308 static int wl12xx_init_fw(struct wl1271 *wl)
2310 int retries = WL1271_BOOT_RETRIES;
2311 bool booted = false;
2312 struct wiphy *wiphy = wl->hw->wiphy;
2313 int ret;
2315 while (retries) {
2316 retries--;
2317 ret = wl12xx_chip_wakeup(wl, false);
2318 if (ret < 0)
2319 goto power_off;
2321 ret = wl->ops->boot(wl);
2322 if (ret < 0)
2323 goto power_off;
2325 ret = wl1271_hw_init(wl);
2326 if (ret < 0)
2327 goto irq_disable;
2329 booted = true;
2330 break;
2332 irq_disable:
2333 mutex_unlock(&wl->mutex);
2334 /* Unlocking the mutex in the middle of handling is
2335 inherently unsafe. In this case we deem it safe to do,
2336 because we need to let any possibly pending IRQ out of
2337 the system (and while we are WLCORE_STATE_OFF the IRQ
2338 work function will not do anything.) Also, any other
2339 possible concurrent operations will fail due to the
2340 current state, hence the wl1271 struct should be safe. */
2341 wlcore_disable_interrupts(wl);
2342 wl1271_flush_deferred_work(wl);
2343 cancel_work_sync(&wl->netstack_work);
2344 mutex_lock(&wl->mutex);
2345 power_off:
2346 wl1271_power_off(wl);
2349 if (!booted) {
2350 wl1271_error("firmware boot failed despite %d retries",
2351 WL1271_BOOT_RETRIES);
2352 goto out;
2355 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2357 /* update hw/fw version info in wiphy struct */
2358 wiphy->hw_version = wl->chip.id;
2359 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2360 sizeof(wiphy->fw_version));
2363 * Now we know if 11a is supported (info from the NVS), so disable
2364 * 11a channels if not supported
2366 if (!wl->enable_11a)
2367 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2369 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2370 wl->enable_11a ? "" : "not ");
2372 wl->state = WLCORE_STATE_ON;
2373 out:
2374 return ret;
2377 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2379 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2383 * Check whether a fw switch (i.e. moving from one loaded
2384 * fw to another) is needed. This function is also responsible
2385 * for updating wl->last_vif_count, so it must be called before
2386 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2387 * will be used).
2389 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2390 struct vif_counter_data vif_counter_data,
2391 bool add)
2393 enum wl12xx_fw_type current_fw = wl->fw_type;
2394 u8 vif_count = vif_counter_data.counter;
2396 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2397 return false;
2399 /* increase the vif count if this is a new vif */
2400 if (add && !vif_counter_data.cur_vif_running)
2401 vif_count++;
2403 wl->last_vif_count = vif_count;
2405 /* no need for fw change if the device is OFF */
2406 if (wl->state == WLCORE_STATE_OFF)
2407 return false;
2409 /* no need for fw change if a single fw is used */
2410 if (!wl->mr_fw_name)
2411 return false;
2413 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2414 return true;
2415 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2416 return true;
2418 return false;
2422 * Enter "forced psm". Make sure the sta is in psm against the ap,
2423 * to make the fw switch a bit more disconnection-persistent.
2425 static void wl12xx_force_active_psm(struct wl1271 *wl)
2427 struct wl12xx_vif *wlvif;
2429 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2430 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2434 struct wlcore_hw_queue_iter_data {
2435 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2436 /* current vif */
2437 struct ieee80211_vif *vif;
2438 /* is the current vif among those iterated */
2439 bool cur_running;
2442 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2443 struct ieee80211_vif *vif)
2445 struct wlcore_hw_queue_iter_data *iter_data = data;
2447 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2448 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2449 return;
2451 if (iter_data->cur_running || vif == iter_data->vif) {
2452 iter_data->cur_running = true;
2453 return;
2456 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2459 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2460 struct wl12xx_vif *wlvif)
2462 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2463 struct wlcore_hw_queue_iter_data iter_data = {};
2464 int i, q_base;
2466 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2467 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2468 return 0;
2471 iter_data.vif = vif;
2473 /* mark all bits taken by active interfaces */
2474 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2475 IEEE80211_IFACE_ITER_RESUME_ALL,
2476 wlcore_hw_queue_iter, &iter_data);
2478 /* the current vif is already running in mac80211 (resume/recovery) */
2479 if (iter_data.cur_running) {
2480 wlvif->hw_queue_base = vif->hw_queue[0];
2481 wl1271_debug(DEBUG_MAC80211,
2482 "using pre-allocated hw queue base %d",
2483 wlvif->hw_queue_base);
2485 /* interface type might have changed type */
2486 goto adjust_cab_queue;
2489 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2490 WLCORE_NUM_MAC_ADDRESSES);
2491 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2492 return -EBUSY;
2494 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2495 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2496 wlvif->hw_queue_base);
2498 for (i = 0; i < NUM_TX_QUEUES; i++) {
2499 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2500 /* register hw queues in mac80211 */
2501 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2504 adjust_cab_queue:
2505 /* the last places are reserved for cab queues per interface */
2506 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2507 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2508 wlvif->hw_queue_base / NUM_TX_QUEUES;
2509 else
2510 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2512 return 0;
2515 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2516 struct ieee80211_vif *vif)
2518 struct wl1271 *wl = hw->priv;
2519 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2520 struct vif_counter_data vif_count;
2521 int ret = 0;
2522 u8 role_type;
2524 if (wl->plt) {
2525 wl1271_error("Adding Interface not allowed while in PLT mode");
2526 return -EBUSY;
2529 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2530 IEEE80211_VIF_SUPPORTS_UAPSD |
2531 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2533 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2534 ieee80211_vif_type_p2p(vif), vif->addr);
2536 wl12xx_get_vif_count(hw, vif, &vif_count);
2538 mutex_lock(&wl->mutex);
2541 * in some very corner case HW recovery scenarios its possible to
2542 * get here before __wl1271_op_remove_interface is complete, so
2543 * opt out if that is the case.
2545 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2546 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2547 ret = -EBUSY;
2548 goto out;
2552 ret = wl12xx_init_vif_data(wl, vif);
2553 if (ret < 0)
2554 goto out;
2556 wlvif->wl = wl;
2557 role_type = wl12xx_get_role_type(wl, wlvif);
2558 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2559 ret = -EINVAL;
2560 goto out;
2563 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2564 if (ret < 0)
2565 goto out;
2568 * TODO: after the nvs issue will be solved, move this block
2569 * to start(), and make sure here the driver is ON.
2571 if (wl->state == WLCORE_STATE_OFF) {
2573 * we still need this in order to configure the fw
2574 * while uploading the nvs
2576 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2578 ret = wl12xx_init_fw(wl);
2579 if (ret < 0)
2580 goto out;
2584 * Call runtime PM only after possible wl12xx_init_fw() above
2585 * is done. Otherwise we do not have interrupts enabled.
2587 ret = pm_runtime_get_sync(wl->dev);
2588 if (ret < 0) {
2589 pm_runtime_put_noidle(wl->dev);
2590 goto out_unlock;
2593 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2594 wl12xx_force_active_psm(wl);
2595 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2596 mutex_unlock(&wl->mutex);
2597 wl1271_recovery_work(&wl->recovery_work);
2598 return 0;
2601 if (!wlcore_is_p2p_mgmt(wlvif)) {
2602 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2603 role_type, &wlvif->role_id);
2604 if (ret < 0)
2605 goto out;
2607 ret = wl1271_init_vif_specific(wl, vif);
2608 if (ret < 0)
2609 goto out;
2611 } else {
2612 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2613 &wlvif->dev_role_id);
2614 if (ret < 0)
2615 goto out;
2617 /* needed mainly for configuring rate policies */
2618 ret = wl1271_sta_hw_init(wl, wlvif);
2619 if (ret < 0)
2620 goto out;
2623 list_add(&wlvif->list, &wl->wlvif_list);
2624 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2626 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2627 wl->ap_count++;
2628 else
2629 wl->sta_count++;
2630 out:
2631 pm_runtime_mark_last_busy(wl->dev);
2632 pm_runtime_put_autosuspend(wl->dev);
2633 out_unlock:
2634 mutex_unlock(&wl->mutex);
2636 return ret;
2639 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2640 struct ieee80211_vif *vif,
2641 bool reset_tx_queues)
2643 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2644 int i, ret;
2645 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2647 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2649 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2650 return;
2652 /* because of hardware recovery, we may get here twice */
2653 if (wl->state == WLCORE_STATE_OFF)
2654 return;
2656 wl1271_info("down");
2658 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2659 wl->scan_wlvif == wlvif) {
2660 struct cfg80211_scan_info info = {
2661 .aborted = true,
2665 * Rearm the tx watchdog just before idling scan. This
2666 * prevents just-finished scans from triggering the watchdog
2668 wl12xx_rearm_tx_watchdog_locked(wl);
2670 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2671 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2672 wl->scan_wlvif = NULL;
2673 wl->scan.req = NULL;
2674 ieee80211_scan_completed(wl->hw, &info);
2677 if (wl->sched_vif == wlvif)
2678 wl->sched_vif = NULL;
2680 if (wl->roc_vif == vif) {
2681 wl->roc_vif = NULL;
2682 ieee80211_remain_on_channel_expired(wl->hw);
2685 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2686 /* disable active roles */
2687 ret = pm_runtime_get_sync(wl->dev);
2688 if (ret < 0) {
2689 pm_runtime_put_noidle(wl->dev);
2690 goto deinit;
2693 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2694 wlvif->bss_type == BSS_TYPE_IBSS) {
2695 if (wl12xx_dev_role_started(wlvif))
2696 wl12xx_stop_dev(wl, wlvif);
2699 if (!wlcore_is_p2p_mgmt(wlvif)) {
2700 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2701 if (ret < 0)
2702 goto deinit;
2703 } else {
2704 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2705 if (ret < 0)
2706 goto deinit;
2709 pm_runtime_mark_last_busy(wl->dev);
2710 pm_runtime_put_autosuspend(wl->dev);
2712 deinit:
2713 wl12xx_tx_reset_wlvif(wl, wlvif);
2715 /* clear all hlids (except system_hlid) */
2716 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2718 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2719 wlvif->bss_type == BSS_TYPE_IBSS) {
2720 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2721 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2722 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2723 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2724 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2725 } else {
2726 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2727 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2728 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2729 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2730 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2731 wl12xx_free_rate_policy(wl,
2732 &wlvif->ap.ucast_rate_idx[i]);
2733 wl1271_free_ap_keys(wl, wlvif);
2736 dev_kfree_skb(wlvif->probereq);
2737 wlvif->probereq = NULL;
2738 if (wl->last_wlvif == wlvif)
2739 wl->last_wlvif = NULL;
2740 list_del(&wlvif->list);
2741 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2742 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2743 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2745 if (is_ap)
2746 wl->ap_count--;
2747 else
2748 wl->sta_count--;
2751 * Last AP, have more stations. Configure sleep auth according to STA.
2752 * Don't do thin on unintended recovery.
2754 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2755 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2756 goto unlock;
2758 if (wl->ap_count == 0 && is_ap) {
2759 /* mask ap events */
2760 wl->event_mask &= ~wl->ap_event_mask;
2761 wl1271_event_unmask(wl);
2764 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2765 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2766 /* Configure for power according to debugfs */
2767 if (sta_auth != WL1271_PSM_ILLEGAL)
2768 wl1271_acx_sleep_auth(wl, sta_auth);
2769 /* Configure for ELP power saving */
2770 else
2771 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2774 unlock:
2775 mutex_unlock(&wl->mutex);
2777 del_timer_sync(&wlvif->rx_streaming_timer);
2778 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2779 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2780 cancel_work_sync(&wlvif->rc_update_work);
2781 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2782 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2783 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2785 mutex_lock(&wl->mutex);
2788 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2789 struct ieee80211_vif *vif)
2791 struct wl1271 *wl = hw->priv;
2792 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2793 struct wl12xx_vif *iter;
2794 struct vif_counter_data vif_count;
2796 wl12xx_get_vif_count(hw, vif, &vif_count);
2797 mutex_lock(&wl->mutex);
2799 if (wl->state == WLCORE_STATE_OFF ||
2800 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2801 goto out;
2804 * wl->vif can be null here if someone shuts down the interface
2805 * just when hardware recovery has been started.
2807 wl12xx_for_each_wlvif(wl, iter) {
2808 if (iter != wlvif)
2809 continue;
2811 __wl1271_op_remove_interface(wl, vif, true);
2812 break;
2814 WARN_ON(iter != wlvif);
2815 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2816 wl12xx_force_active_psm(wl);
2817 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2818 wl12xx_queue_recovery_work(wl);
2820 out:
2821 mutex_unlock(&wl->mutex);
2824 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2825 struct ieee80211_vif *vif,
2826 enum nl80211_iftype new_type, bool p2p)
2828 struct wl1271 *wl = hw->priv;
2829 int ret;
2831 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2832 wl1271_op_remove_interface(hw, vif);
2834 vif->type = new_type;
2835 vif->p2p = p2p;
2836 ret = wl1271_op_add_interface(hw, vif);
2838 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2839 return ret;
2842 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2844 int ret;
2845 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2848 * One of the side effects of the JOIN command is that is clears
2849 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2850 * to a WPA/WPA2 access point will therefore kill the data-path.
2851 * Currently the only valid scenario for JOIN during association
2852 * is on roaming, in which case we will also be given new keys.
2853 * Keep the below message for now, unless it starts bothering
2854 * users who really like to roam a lot :)
2856 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2857 wl1271_info("JOIN while associated.");
2859 /* clear encryption type */
2860 wlvif->encryption_type = KEY_NONE;
2862 if (is_ibss)
2863 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2864 else {
2865 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2867 * TODO: this is an ugly workaround for wl12xx fw
2868 * bug - we are not able to tx/rx after the first
2869 * start_sta, so make dummy start+stop calls,
2870 * and then call start_sta again.
2871 * this should be fixed in the fw.
2873 wl12xx_cmd_role_start_sta(wl, wlvif);
2874 wl12xx_cmd_role_stop_sta(wl, wlvif);
2877 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2880 return ret;
2883 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2884 int offset)
2886 u8 ssid_len;
2887 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2888 skb->len - offset);
2890 if (!ptr) {
2891 wl1271_error("No SSID in IEs!");
2892 return -ENOENT;
2895 ssid_len = ptr[1];
2896 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2897 wl1271_error("SSID is too long!");
2898 return -EINVAL;
2901 wlvif->ssid_len = ssid_len;
2902 memcpy(wlvif->ssid, ptr+2, ssid_len);
2903 return 0;
2906 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2908 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2909 struct sk_buff *skb;
2910 int ieoffset;
2912 /* we currently only support setting the ssid from the ap probe req */
2913 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2914 return -EINVAL;
2916 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2917 if (!skb)
2918 return -EINVAL;
2920 ieoffset = offsetof(struct ieee80211_mgmt,
2921 u.probe_req.variable);
2922 wl1271_ssid_set(wlvif, skb, ieoffset);
2923 dev_kfree_skb(skb);
2925 return 0;
2928 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2929 struct ieee80211_bss_conf *bss_conf,
2930 u32 sta_rate_set)
2932 int ieoffset;
2933 int ret;
2935 wlvif->aid = bss_conf->aid;
2936 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2937 wlvif->beacon_int = bss_conf->beacon_int;
2938 wlvif->wmm_enabled = bss_conf->qos;
2940 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2943 * with wl1271, we don't need to update the
2944 * beacon_int and dtim_period, because the firmware
2945 * updates it by itself when the first beacon is
2946 * received after a join.
2948 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2949 if (ret < 0)
2950 return ret;
2953 * Get a template for hardware connection maintenance
2955 dev_kfree_skb(wlvif->probereq);
2956 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2957 wlvif,
2958 NULL);
2959 ieoffset = offsetof(struct ieee80211_mgmt,
2960 u.probe_req.variable);
2961 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2963 /* enable the connection monitoring feature */
2964 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2965 if (ret < 0)
2966 return ret;
2969 * The join command disable the keep-alive mode, shut down its process,
2970 * and also clear the template config, so we need to reset it all after
2971 * the join. The acx_aid starts the keep-alive process, and the order
2972 * of the commands below is relevant.
2974 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2975 if (ret < 0)
2976 return ret;
2978 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2979 if (ret < 0)
2980 return ret;
2982 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2983 if (ret < 0)
2984 return ret;
2986 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2987 wlvif->sta.klv_template_id,
2988 ACX_KEEP_ALIVE_TPL_VALID);
2989 if (ret < 0)
2990 return ret;
2993 * The default fw psm configuration is AUTO, while mac80211 default
2994 * setting is off (ACTIVE), so sync the fw with the correct value.
2996 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2997 if (ret < 0)
2998 return ret;
3000 if (sta_rate_set) {
3001 wlvif->rate_set =
3002 wl1271_tx_enabled_rates_get(wl,
3003 sta_rate_set,
3004 wlvif->band);
3005 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3006 if (ret < 0)
3007 return ret;
3010 return ret;
3013 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3015 int ret;
3016 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3018 /* make sure we are connected (sta) joined */
3019 if (sta &&
3020 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3021 return false;
3023 /* make sure we are joined (ibss) */
3024 if (!sta &&
3025 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3026 return false;
3028 if (sta) {
3029 /* use defaults when not associated */
3030 wlvif->aid = 0;
3032 /* free probe-request template */
3033 dev_kfree_skb(wlvif->probereq);
3034 wlvif->probereq = NULL;
3036 /* disable connection monitor features */
3037 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3038 if (ret < 0)
3039 return ret;
3041 /* Disable the keep-alive feature */
3042 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3043 if (ret < 0)
3044 return ret;
3046 /* disable beacon filtering */
3047 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3048 if (ret < 0)
3049 return ret;
3052 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3053 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3055 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3056 ieee80211_chswitch_done(vif, false);
3057 cancel_delayed_work(&wlvif->channel_switch_work);
3060 /* invalidate keep-alive template */
3061 wl1271_acx_keep_alive_config(wl, wlvif,
3062 wlvif->sta.klv_template_id,
3063 ACX_KEEP_ALIVE_TPL_INVALID);
3065 return 0;
3068 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3070 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3071 wlvif->rate_set = wlvif->basic_rate_set;
3074 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3075 bool idle)
3077 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3079 if (idle == cur_idle)
3080 return;
3082 if (idle) {
3083 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3084 } else {
3085 /* The current firmware only supports sched_scan in idle */
3086 if (wl->sched_vif == wlvif)
3087 wl->ops->sched_scan_stop(wl, wlvif);
3089 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3093 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3094 struct ieee80211_conf *conf, u32 changed)
3096 int ret;
3098 if (wlcore_is_p2p_mgmt(wlvif))
3099 return 0;
3101 if (conf->power_level != wlvif->power_level) {
3102 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3103 if (ret < 0)
3104 return ret;
3106 wlvif->power_level = conf->power_level;
3109 return 0;
3112 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3114 struct wl1271 *wl = hw->priv;
3115 struct wl12xx_vif *wlvif;
3116 struct ieee80211_conf *conf = &hw->conf;
3117 int ret = 0;
3119 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3120 " changed 0x%x",
3121 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3122 conf->power_level,
3123 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3124 changed);
3126 mutex_lock(&wl->mutex);
3128 if (changed & IEEE80211_CONF_CHANGE_POWER)
3129 wl->power_level = conf->power_level;
3131 if (unlikely(wl->state != WLCORE_STATE_ON))
3132 goto out;
3134 ret = pm_runtime_get_sync(wl->dev);
3135 if (ret < 0) {
3136 pm_runtime_put_noidle(wl->dev);
3137 goto out;
3140 /* configure each interface */
3141 wl12xx_for_each_wlvif(wl, wlvif) {
3142 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3143 if (ret < 0)
3144 goto out_sleep;
3147 out_sleep:
3148 pm_runtime_mark_last_busy(wl->dev);
3149 pm_runtime_put_autosuspend(wl->dev);
3151 out:
3152 mutex_unlock(&wl->mutex);
3154 return ret;
3157 struct wl1271_filter_params {
3158 bool enabled;
3159 int mc_list_length;
3160 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3163 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3164 struct netdev_hw_addr_list *mc_list)
3166 struct wl1271_filter_params *fp;
3167 struct netdev_hw_addr *ha;
3169 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3170 if (!fp) {
3171 wl1271_error("Out of memory setting filters.");
3172 return 0;
3175 /* update multicast filtering parameters */
3176 fp->mc_list_length = 0;
3177 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3178 fp->enabled = false;
3179 } else {
3180 fp->enabled = true;
3181 netdev_hw_addr_list_for_each(ha, mc_list) {
3182 memcpy(fp->mc_list[fp->mc_list_length],
3183 ha->addr, ETH_ALEN);
3184 fp->mc_list_length++;
3188 return (u64)(unsigned long)fp;
3191 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3192 FIF_FCSFAIL | \
3193 FIF_BCN_PRBRESP_PROMISC | \
3194 FIF_CONTROL | \
3195 FIF_OTHER_BSS)
3197 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3198 unsigned int changed,
3199 unsigned int *total, u64 multicast)
3201 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3202 struct wl1271 *wl = hw->priv;
3203 struct wl12xx_vif *wlvif;
3205 int ret;
3207 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3208 " total %x", changed, *total);
3210 mutex_lock(&wl->mutex);
3212 *total &= WL1271_SUPPORTED_FILTERS;
3213 changed &= WL1271_SUPPORTED_FILTERS;
3215 if (unlikely(wl->state != WLCORE_STATE_ON))
3216 goto out;
3218 ret = pm_runtime_get_sync(wl->dev);
3219 if (ret < 0) {
3220 pm_runtime_put_noidle(wl->dev);
3221 goto out;
3224 wl12xx_for_each_wlvif(wl, wlvif) {
3225 if (wlcore_is_p2p_mgmt(wlvif))
3226 continue;
3228 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3229 if (*total & FIF_ALLMULTI)
3230 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3231 false,
3232 NULL, 0);
3233 else if (fp)
3234 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3235 fp->enabled,
3236 fp->mc_list,
3237 fp->mc_list_length);
3238 if (ret < 0)
3239 goto out_sleep;
3243 * If interface in AP mode and created with allmulticast then disable
3244 * the firmware filters so that all multicast packets are passed
3245 * This is mandatory for MDNS based discovery protocols
3247 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3248 if (*total & FIF_ALLMULTI) {
3249 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3250 false,
3251 NULL, 0);
3252 if (ret < 0)
3253 goto out_sleep;
3259 * the fw doesn't provide an api to configure the filters. instead,
3260 * the filters configuration is based on the active roles / ROC
3261 * state.
3264 out_sleep:
3265 pm_runtime_mark_last_busy(wl->dev);
3266 pm_runtime_put_autosuspend(wl->dev);
3268 out:
3269 mutex_unlock(&wl->mutex);
3270 kfree(fp);
3273 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3274 u8 id, u8 key_type, u8 key_size,
3275 const u8 *key, u8 hlid, u32 tx_seq_32,
3276 u16 tx_seq_16, bool is_pairwise)
3278 struct wl1271_ap_key *ap_key;
3279 int i;
3281 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3283 if (key_size > MAX_KEY_SIZE)
3284 return -EINVAL;
3287 * Find next free entry in ap_keys. Also check we are not replacing
3288 * an existing key.
3290 for (i = 0; i < MAX_NUM_KEYS; i++) {
3291 if (wlvif->ap.recorded_keys[i] == NULL)
3292 break;
3294 if (wlvif->ap.recorded_keys[i]->id == id) {
3295 wl1271_warning("trying to record key replacement");
3296 return -EINVAL;
3300 if (i == MAX_NUM_KEYS)
3301 return -EBUSY;
3303 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3304 if (!ap_key)
3305 return -ENOMEM;
3307 ap_key->id = id;
3308 ap_key->key_type = key_type;
3309 ap_key->key_size = key_size;
3310 memcpy(ap_key->key, key, key_size);
3311 ap_key->hlid = hlid;
3312 ap_key->tx_seq_32 = tx_seq_32;
3313 ap_key->tx_seq_16 = tx_seq_16;
3314 ap_key->is_pairwise = is_pairwise;
3316 wlvif->ap.recorded_keys[i] = ap_key;
3317 return 0;
3320 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3322 int i;
3324 for (i = 0; i < MAX_NUM_KEYS; i++) {
3325 kfree(wlvif->ap.recorded_keys[i]);
3326 wlvif->ap.recorded_keys[i] = NULL;
3330 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3332 int i, ret = 0;
3333 struct wl1271_ap_key *key;
3334 bool wep_key_added = false;
3336 for (i = 0; i < MAX_NUM_KEYS; i++) {
3337 u8 hlid;
3338 if (wlvif->ap.recorded_keys[i] == NULL)
3339 break;
3341 key = wlvif->ap.recorded_keys[i];
3342 hlid = key->hlid;
3343 if (hlid == WL12XX_INVALID_LINK_ID)
3344 hlid = wlvif->ap.bcast_hlid;
3346 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3347 key->id, key->key_type,
3348 key->key_size, key->key,
3349 hlid, key->tx_seq_32,
3350 key->tx_seq_16, key->is_pairwise);
3351 if (ret < 0)
3352 goto out;
3354 if (key->key_type == KEY_WEP)
3355 wep_key_added = true;
3358 if (wep_key_added) {
3359 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3360 wlvif->ap.bcast_hlid);
3361 if (ret < 0)
3362 goto out;
3365 out:
3366 wl1271_free_ap_keys(wl, wlvif);
3367 return ret;
3370 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3371 u16 action, u8 id, u8 key_type,
3372 u8 key_size, const u8 *key, u32 tx_seq_32,
3373 u16 tx_seq_16, struct ieee80211_sta *sta,
3374 bool is_pairwise)
3376 int ret;
3377 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3379 if (is_ap) {
3380 struct wl1271_station *wl_sta;
3381 u8 hlid;
3383 if (sta) {
3384 wl_sta = (struct wl1271_station *)sta->drv_priv;
3385 hlid = wl_sta->hlid;
3386 } else {
3387 hlid = wlvif->ap.bcast_hlid;
3390 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3392 * We do not support removing keys after AP shutdown.
3393 * Pretend we do to make mac80211 happy.
3395 if (action != KEY_ADD_OR_REPLACE)
3396 return 0;
3398 ret = wl1271_record_ap_key(wl, wlvif, id,
3399 key_type, key_size,
3400 key, hlid, tx_seq_32,
3401 tx_seq_16, is_pairwise);
3402 } else {
3403 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3404 id, key_type, key_size,
3405 key, hlid, tx_seq_32,
3406 tx_seq_16, is_pairwise);
3409 if (ret < 0)
3410 return ret;
3411 } else {
3412 const u8 *addr;
3413 static const u8 bcast_addr[ETH_ALEN] = {
3414 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3417 addr = sta ? sta->addr : bcast_addr;
3419 if (is_zero_ether_addr(addr)) {
3420 /* We dont support TX only encryption */
3421 return -EOPNOTSUPP;
3424 /* The wl1271 does not allow to remove unicast keys - they
3425 will be cleared automatically on next CMD_JOIN. Ignore the
3426 request silently, as we dont want the mac80211 to emit
3427 an error message. */
3428 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3429 return 0;
3431 /* don't remove key if hlid was already deleted */
3432 if (action == KEY_REMOVE &&
3433 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3434 return 0;
3436 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3437 id, key_type, key_size,
3438 key, addr, tx_seq_32,
3439 tx_seq_16);
3440 if (ret < 0)
3441 return ret;
3445 return 0;
3448 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3449 struct ieee80211_vif *vif,
3450 struct ieee80211_sta *sta,
3451 struct ieee80211_key_conf *key_conf)
3453 struct wl1271 *wl = hw->priv;
3454 int ret;
3455 bool might_change_spare =
3456 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3457 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3459 if (might_change_spare) {
3461 * stop the queues and flush to ensure the next packets are
3462 * in sync with FW spare block accounting
3464 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3465 wl1271_tx_flush(wl);
3468 mutex_lock(&wl->mutex);
3470 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3471 ret = -EAGAIN;
3472 goto out_wake_queues;
3475 ret = pm_runtime_get_sync(wl->dev);
3476 if (ret < 0) {
3477 pm_runtime_put_noidle(wl->dev);
3478 goto out_wake_queues;
3481 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3483 pm_runtime_mark_last_busy(wl->dev);
3484 pm_runtime_put_autosuspend(wl->dev);
3486 out_wake_queues:
3487 if (might_change_spare)
3488 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3490 mutex_unlock(&wl->mutex);
3492 return ret;
3495 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3496 struct ieee80211_vif *vif,
3497 struct ieee80211_sta *sta,
3498 struct ieee80211_key_conf *key_conf)
3500 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3501 int ret;
3502 u32 tx_seq_32 = 0;
3503 u16 tx_seq_16 = 0;
3504 u8 key_type;
3505 u8 hlid;
3506 bool is_pairwise;
3508 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3510 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3511 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3512 key_conf->cipher, key_conf->keyidx,
3513 key_conf->keylen, key_conf->flags);
3514 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3516 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3517 if (sta) {
3518 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3519 hlid = wl_sta->hlid;
3520 } else {
3521 hlid = wlvif->ap.bcast_hlid;
3523 else
3524 hlid = wlvif->sta.hlid;
3526 if (hlid != WL12XX_INVALID_LINK_ID) {
3527 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3528 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3529 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3532 switch (key_conf->cipher) {
3533 case WLAN_CIPHER_SUITE_WEP40:
3534 case WLAN_CIPHER_SUITE_WEP104:
3535 key_type = KEY_WEP;
3537 key_conf->hw_key_idx = key_conf->keyidx;
3538 break;
3539 case WLAN_CIPHER_SUITE_TKIP:
3540 key_type = KEY_TKIP;
3541 key_conf->hw_key_idx = key_conf->keyidx;
3542 break;
3543 case WLAN_CIPHER_SUITE_CCMP:
3544 key_type = KEY_AES;
3545 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3546 break;
3547 case WL1271_CIPHER_SUITE_GEM:
3548 key_type = KEY_GEM;
3549 break;
3550 default:
3551 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3553 return -EOPNOTSUPP;
3556 is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3558 switch (cmd) {
3559 case SET_KEY:
3560 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3561 key_conf->keyidx, key_type,
3562 key_conf->keylen, key_conf->key,
3563 tx_seq_32, tx_seq_16, sta, is_pairwise);
3564 if (ret < 0) {
3565 wl1271_error("Could not add or replace key");
3566 return ret;
3570 * reconfiguring arp response if the unicast (or common)
3571 * encryption key type was changed
3573 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3574 (sta || key_type == KEY_WEP) &&
3575 wlvif->encryption_type != key_type) {
3576 wlvif->encryption_type = key_type;
3577 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3578 if (ret < 0) {
3579 wl1271_warning("build arp rsp failed: %d", ret);
3580 return ret;
3583 break;
3585 case DISABLE_KEY:
3586 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3587 key_conf->keyidx, key_type,
3588 key_conf->keylen, key_conf->key,
3589 0, 0, sta, is_pairwise);
3590 if (ret < 0) {
3591 wl1271_error("Could not remove key");
3592 return ret;
3594 break;
3596 default:
3597 wl1271_error("Unsupported key cmd 0x%x", cmd);
3598 return -EOPNOTSUPP;
3601 return ret;
3603 EXPORT_SYMBOL_GPL(wlcore_set_key);
3605 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3606 struct ieee80211_vif *vif,
3607 int key_idx)
3609 struct wl1271 *wl = hw->priv;
3610 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3611 int ret;
3613 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3614 key_idx);
3616 /* we don't handle unsetting of default key */
3617 if (key_idx == -1)
3618 return;
3620 mutex_lock(&wl->mutex);
3622 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3623 ret = -EAGAIN;
3624 goto out_unlock;
3627 ret = pm_runtime_get_sync(wl->dev);
3628 if (ret < 0) {
3629 pm_runtime_put_noidle(wl->dev);
3630 goto out_unlock;
3633 wlvif->default_key = key_idx;
3635 /* the default WEP key needs to be configured at least once */
3636 if (wlvif->encryption_type == KEY_WEP) {
3637 ret = wl12xx_cmd_set_default_wep_key(wl,
3638 key_idx,
3639 wlvif->sta.hlid);
3640 if (ret < 0)
3641 goto out_sleep;
3644 out_sleep:
3645 pm_runtime_mark_last_busy(wl->dev);
3646 pm_runtime_put_autosuspend(wl->dev);
3648 out_unlock:
3649 mutex_unlock(&wl->mutex);
3652 void wlcore_regdomain_config(struct wl1271 *wl)
3654 int ret;
3656 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3657 return;
3659 mutex_lock(&wl->mutex);
3661 if (unlikely(wl->state != WLCORE_STATE_ON))
3662 goto out;
3664 ret = pm_runtime_get_sync(wl->dev);
3665 if (ret < 0)
3666 goto out;
3668 ret = wlcore_cmd_regdomain_config_locked(wl);
3669 if (ret < 0) {
3670 wl12xx_queue_recovery_work(wl);
3671 goto out;
3674 pm_runtime_mark_last_busy(wl->dev);
3675 pm_runtime_put_autosuspend(wl->dev);
3676 out:
3677 mutex_unlock(&wl->mutex);
3680 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3681 struct ieee80211_vif *vif,
3682 struct ieee80211_scan_request *hw_req)
3684 struct cfg80211_scan_request *req = &hw_req->req;
3685 struct wl1271 *wl = hw->priv;
3686 int ret;
3687 u8 *ssid = NULL;
3688 size_t len = 0;
3690 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3692 if (req->n_ssids) {
3693 ssid = req->ssids[0].ssid;
3694 len = req->ssids[0].ssid_len;
3697 mutex_lock(&wl->mutex);
3699 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3701 * We cannot return -EBUSY here because cfg80211 will expect
3702 * a call to ieee80211_scan_completed if we do - in this case
3703 * there won't be any call.
3705 ret = -EAGAIN;
3706 goto out;
3709 ret = pm_runtime_get_sync(wl->dev);
3710 if (ret < 0) {
3711 pm_runtime_put_noidle(wl->dev);
3712 goto out;
3715 /* fail if there is any role in ROC */
3716 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3717 /* don't allow scanning right now */
3718 ret = -EBUSY;
3719 goto out_sleep;
3722 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3723 out_sleep:
3724 pm_runtime_mark_last_busy(wl->dev);
3725 pm_runtime_put_autosuspend(wl->dev);
3726 out:
3727 mutex_unlock(&wl->mutex);
3729 return ret;
3732 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3733 struct ieee80211_vif *vif)
3735 struct wl1271 *wl = hw->priv;
3736 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 struct cfg80211_scan_info info = {
3738 .aborted = true,
3740 int ret;
3742 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3744 mutex_lock(&wl->mutex);
3746 if (unlikely(wl->state != WLCORE_STATE_ON))
3747 goto out;
3749 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3750 goto out;
3752 ret = pm_runtime_get_sync(wl->dev);
3753 if (ret < 0) {
3754 pm_runtime_put_noidle(wl->dev);
3755 goto out;
3758 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3759 ret = wl->ops->scan_stop(wl, wlvif);
3760 if (ret < 0)
3761 goto out_sleep;
3765 * Rearm the tx watchdog just before idling scan. This
3766 * prevents just-finished scans from triggering the watchdog
3768 wl12xx_rearm_tx_watchdog_locked(wl);
3770 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3771 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3772 wl->scan_wlvif = NULL;
3773 wl->scan.req = NULL;
3774 ieee80211_scan_completed(wl->hw, &info);
3776 out_sleep:
3777 pm_runtime_mark_last_busy(wl->dev);
3778 pm_runtime_put_autosuspend(wl->dev);
3779 out:
3780 mutex_unlock(&wl->mutex);
3782 cancel_delayed_work_sync(&wl->scan_complete_work);
3785 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3786 struct ieee80211_vif *vif,
3787 struct cfg80211_sched_scan_request *req,
3788 struct ieee80211_scan_ies *ies)
3790 struct wl1271 *wl = hw->priv;
3791 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3792 int ret;
3794 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3796 mutex_lock(&wl->mutex);
3798 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3799 ret = -EAGAIN;
3800 goto out;
3803 ret = pm_runtime_get_sync(wl->dev);
3804 if (ret < 0) {
3805 pm_runtime_put_noidle(wl->dev);
3806 goto out;
3809 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3810 if (ret < 0)
3811 goto out_sleep;
3813 wl->sched_vif = wlvif;
3815 out_sleep:
3816 pm_runtime_mark_last_busy(wl->dev);
3817 pm_runtime_put_autosuspend(wl->dev);
3818 out:
3819 mutex_unlock(&wl->mutex);
3820 return ret;
3823 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3824 struct ieee80211_vif *vif)
3826 struct wl1271 *wl = hw->priv;
3827 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3828 int ret;
3830 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3832 mutex_lock(&wl->mutex);
3834 if (unlikely(wl->state != WLCORE_STATE_ON))
3835 goto out;
3837 ret = pm_runtime_get_sync(wl->dev);
3838 if (ret < 0) {
3839 pm_runtime_put_noidle(wl->dev);
3840 goto out;
3843 wl->ops->sched_scan_stop(wl, wlvif);
3845 pm_runtime_mark_last_busy(wl->dev);
3846 pm_runtime_put_autosuspend(wl->dev);
3847 out:
3848 mutex_unlock(&wl->mutex);
3850 return 0;
3853 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3855 struct wl1271 *wl = hw->priv;
3856 int ret = 0;
3858 mutex_lock(&wl->mutex);
3860 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3861 ret = -EAGAIN;
3862 goto out;
3865 ret = pm_runtime_get_sync(wl->dev);
3866 if (ret < 0) {
3867 pm_runtime_put_noidle(wl->dev);
3868 goto out;
3871 ret = wl1271_acx_frag_threshold(wl, value);
3872 if (ret < 0)
3873 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3875 pm_runtime_mark_last_busy(wl->dev);
3876 pm_runtime_put_autosuspend(wl->dev);
3878 out:
3879 mutex_unlock(&wl->mutex);
3881 return ret;
3884 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3886 struct wl1271 *wl = hw->priv;
3887 struct wl12xx_vif *wlvif;
3888 int ret = 0;
3890 mutex_lock(&wl->mutex);
3892 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3893 ret = -EAGAIN;
3894 goto out;
3897 ret = pm_runtime_get_sync(wl->dev);
3898 if (ret < 0) {
3899 pm_runtime_put_noidle(wl->dev);
3900 goto out;
3903 wl12xx_for_each_wlvif(wl, wlvif) {
3904 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3905 if (ret < 0)
3906 wl1271_warning("set rts threshold failed: %d", ret);
3908 pm_runtime_mark_last_busy(wl->dev);
3909 pm_runtime_put_autosuspend(wl->dev);
3911 out:
3912 mutex_unlock(&wl->mutex);
3914 return ret;
3917 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3919 int len;
3920 const u8 *next, *end = skb->data + skb->len;
3921 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3922 skb->len - ieoffset);
3923 if (!ie)
3924 return;
3925 len = ie[1] + 2;
3926 next = ie + len;
3927 memmove(ie, next, end - next);
3928 skb_trim(skb, skb->len - len);
3931 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3932 unsigned int oui, u8 oui_type,
3933 int ieoffset)
3935 int len;
3936 const u8 *next, *end = skb->data + skb->len;
3937 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3938 skb->data + ieoffset,
3939 skb->len - ieoffset);
3940 if (!ie)
3941 return;
3942 len = ie[1] + 2;
3943 next = ie + len;
3944 memmove(ie, next, end - next);
3945 skb_trim(skb, skb->len - len);
3948 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3949 struct ieee80211_vif *vif)
3951 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3952 struct sk_buff *skb;
3953 int ret;
3955 skb = ieee80211_proberesp_get(wl->hw, vif);
3956 if (!skb)
3957 return -EOPNOTSUPP;
3959 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3960 CMD_TEMPL_AP_PROBE_RESPONSE,
3961 skb->data,
3962 skb->len, 0,
3963 rates);
3964 dev_kfree_skb(skb);
3966 if (ret < 0)
3967 goto out;
3969 wl1271_debug(DEBUG_AP, "probe response updated");
3970 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3972 out:
3973 return ret;
3976 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3977 struct ieee80211_vif *vif,
3978 u8 *probe_rsp_data,
3979 size_t probe_rsp_len,
3980 u32 rates)
3982 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3983 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3984 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3985 int ssid_ie_offset, ie_offset, templ_len;
3986 const u8 *ptr;
3988 /* no need to change probe response if the SSID is set correctly */
3989 if (wlvif->ssid_len > 0)
3990 return wl1271_cmd_template_set(wl, wlvif->role_id,
3991 CMD_TEMPL_AP_PROBE_RESPONSE,
3992 probe_rsp_data,
3993 probe_rsp_len, 0,
3994 rates);
3996 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3997 wl1271_error("probe_rsp template too big");
3998 return -EINVAL;
4001 /* start searching from IE offset */
4002 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4004 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4005 probe_rsp_len - ie_offset);
4006 if (!ptr) {
4007 wl1271_error("No SSID in beacon!");
4008 return -EINVAL;
4011 ssid_ie_offset = ptr - probe_rsp_data;
4012 ptr += (ptr[1] + 2);
4014 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4016 /* insert SSID from bss_conf */
4017 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4018 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4019 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4020 bss_conf->ssid, bss_conf->ssid_len);
4021 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4023 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4024 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4025 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4027 return wl1271_cmd_template_set(wl, wlvif->role_id,
4028 CMD_TEMPL_AP_PROBE_RESPONSE,
4029 probe_rsp_templ,
4030 templ_len, 0,
4031 rates);
4034 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4035 struct ieee80211_vif *vif,
4036 struct ieee80211_bss_conf *bss_conf,
4037 u32 changed)
4039 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4040 int ret = 0;
4042 if (changed & BSS_CHANGED_ERP_SLOT) {
4043 if (bss_conf->use_short_slot)
4044 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4045 else
4046 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4047 if (ret < 0) {
4048 wl1271_warning("Set slot time failed %d", ret);
4049 goto out;
4053 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4054 if (bss_conf->use_short_preamble)
4055 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4056 else
4057 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4060 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4061 if (bss_conf->use_cts_prot)
4062 ret = wl1271_acx_cts_protect(wl, wlvif,
4063 CTSPROTECT_ENABLE);
4064 else
4065 ret = wl1271_acx_cts_protect(wl, wlvif,
4066 CTSPROTECT_DISABLE);
4067 if (ret < 0) {
4068 wl1271_warning("Set ctsprotect failed %d", ret);
4069 goto out;
4073 out:
4074 return ret;
4077 static int wlcore_set_beacon_template(struct wl1271 *wl,
4078 struct ieee80211_vif *vif,
4079 bool is_ap)
4081 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4082 struct ieee80211_hdr *hdr;
4083 u32 min_rate;
4084 int ret;
4085 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4086 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4087 u16 tmpl_id;
4089 if (!beacon) {
4090 ret = -EINVAL;
4091 goto out;
4094 wl1271_debug(DEBUG_MASTER, "beacon updated");
4096 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4097 if (ret < 0) {
4098 dev_kfree_skb(beacon);
4099 goto out;
4101 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4102 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4103 CMD_TEMPL_BEACON;
4104 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4105 beacon->data,
4106 beacon->len, 0,
4107 min_rate);
4108 if (ret < 0) {
4109 dev_kfree_skb(beacon);
4110 goto out;
4113 wlvif->wmm_enabled =
4114 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4115 WLAN_OUI_TYPE_MICROSOFT_WMM,
4116 beacon->data + ieoffset,
4117 beacon->len - ieoffset);
4120 * In case we already have a probe-resp beacon set explicitly
4121 * by usermode, don't use the beacon data.
4123 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4124 goto end_bcn;
4126 /* remove TIM ie from probe response */
4127 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4130 * remove p2p ie from probe response.
4131 * the fw reponds to probe requests that don't include
4132 * the p2p ie. probe requests with p2p ie will be passed,
4133 * and will be responded by the supplicant (the spec
4134 * forbids including the p2p ie when responding to probe
4135 * requests that didn't include it).
4137 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4138 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4140 hdr = (struct ieee80211_hdr *) beacon->data;
4141 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4142 IEEE80211_STYPE_PROBE_RESP);
4143 if (is_ap)
4144 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4145 beacon->data,
4146 beacon->len,
4147 min_rate);
4148 else
4149 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4150 CMD_TEMPL_PROBE_RESPONSE,
4151 beacon->data,
4152 beacon->len, 0,
4153 min_rate);
4154 end_bcn:
4155 dev_kfree_skb(beacon);
4156 if (ret < 0)
4157 goto out;
4159 out:
4160 return ret;
4163 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4164 struct ieee80211_vif *vif,
4165 struct ieee80211_bss_conf *bss_conf,
4166 u32 changed)
4168 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4169 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4170 int ret = 0;
4172 if (changed & BSS_CHANGED_BEACON_INT) {
4173 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4174 bss_conf->beacon_int);
4176 wlvif->beacon_int = bss_conf->beacon_int;
4179 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4180 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4182 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4185 if (changed & BSS_CHANGED_BEACON) {
4186 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4187 if (ret < 0)
4188 goto out;
4190 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4191 &wlvif->flags)) {
4192 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4193 if (ret < 0)
4194 goto out;
4197 out:
4198 if (ret != 0)
4199 wl1271_error("beacon info change failed: %d", ret);
4200 return ret;
4203 /* AP mode changes */
4204 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4205 struct ieee80211_vif *vif,
4206 struct ieee80211_bss_conf *bss_conf,
4207 u32 changed)
4209 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4210 int ret = 0;
4212 if (changed & BSS_CHANGED_BASIC_RATES) {
4213 u32 rates = bss_conf->basic_rates;
4215 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4216 wlvif->band);
4217 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4218 wlvif->basic_rate_set);
4220 ret = wl1271_init_ap_rates(wl, wlvif);
4221 if (ret < 0) {
4222 wl1271_error("AP rate policy change failed %d", ret);
4223 goto out;
4226 ret = wl1271_ap_init_templates(wl, vif);
4227 if (ret < 0)
4228 goto out;
4230 /* No need to set probe resp template for mesh */
4231 if (!ieee80211_vif_is_mesh(vif)) {
4232 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4233 wlvif->basic_rate,
4234 vif);
4235 if (ret < 0)
4236 goto out;
4239 ret = wlcore_set_beacon_template(wl, vif, true);
4240 if (ret < 0)
4241 goto out;
4244 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4245 if (ret < 0)
4246 goto out;
4248 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4249 if (bss_conf->enable_beacon) {
4250 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4251 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4252 if (ret < 0)
4253 goto out;
4255 ret = wl1271_ap_init_hwenc(wl, wlvif);
4256 if (ret < 0)
4257 goto out;
4259 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4260 wl1271_debug(DEBUG_AP, "started AP");
4262 } else {
4263 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4265 * AP might be in ROC in case we have just
4266 * sent auth reply. handle it.
4268 if (test_bit(wlvif->role_id, wl->roc_map))
4269 wl12xx_croc(wl, wlvif->role_id);
4271 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4272 if (ret < 0)
4273 goto out;
4275 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4276 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4277 &wlvif->flags);
4278 wl1271_debug(DEBUG_AP, "stopped AP");
4283 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4284 if (ret < 0)
4285 goto out;
4287 /* Handle HT information change */
4288 if ((changed & BSS_CHANGED_HT) &&
4289 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4290 ret = wl1271_acx_set_ht_information(wl, wlvif,
4291 bss_conf->ht_operation_mode);
4292 if (ret < 0) {
4293 wl1271_warning("Set ht information failed %d", ret);
4294 goto out;
4298 out:
4299 return;
4302 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4303 struct ieee80211_bss_conf *bss_conf,
4304 u32 sta_rate_set)
4306 u32 rates;
4307 int ret;
4309 wl1271_debug(DEBUG_MAC80211,
4310 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4311 bss_conf->bssid, bss_conf->aid,
4312 bss_conf->beacon_int,
4313 bss_conf->basic_rates, sta_rate_set);
4315 wlvif->beacon_int = bss_conf->beacon_int;
4316 rates = bss_conf->basic_rates;
4317 wlvif->basic_rate_set =
4318 wl1271_tx_enabled_rates_get(wl, rates,
4319 wlvif->band);
4320 wlvif->basic_rate =
4321 wl1271_tx_min_rate_get(wl,
4322 wlvif->basic_rate_set);
4324 if (sta_rate_set)
4325 wlvif->rate_set =
4326 wl1271_tx_enabled_rates_get(wl,
4327 sta_rate_set,
4328 wlvif->band);
4330 /* we only support sched_scan while not connected */
4331 if (wl->sched_vif == wlvif)
4332 wl->ops->sched_scan_stop(wl, wlvif);
4334 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4335 if (ret < 0)
4336 return ret;
4338 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4339 if (ret < 0)
4340 return ret;
4342 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4343 if (ret < 0)
4344 return ret;
4346 wlcore_set_ssid(wl, wlvif);
4348 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4350 return 0;
4353 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4355 int ret;
4357 /* revert back to minimum rates for the current band */
4358 wl1271_set_band_rate(wl, wlvif);
4359 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4361 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4362 if (ret < 0)
4363 return ret;
4365 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4366 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4367 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4368 if (ret < 0)
4369 return ret;
4372 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4373 return 0;
4375 /* STA/IBSS mode changes */
4376 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4377 struct ieee80211_vif *vif,
4378 struct ieee80211_bss_conf *bss_conf,
4379 u32 changed)
4381 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4382 bool do_join = false;
4383 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4384 bool ibss_joined = false;
4385 u32 sta_rate_set = 0;
4386 int ret;
4387 struct ieee80211_sta *sta;
4388 bool sta_exists = false;
4389 struct ieee80211_sta_ht_cap sta_ht_cap;
4391 if (is_ibss) {
4392 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4393 changed);
4394 if (ret < 0)
4395 goto out;
4398 if (changed & BSS_CHANGED_IBSS) {
4399 if (bss_conf->ibss_joined) {
4400 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4401 ibss_joined = true;
4402 } else {
4403 wlcore_unset_assoc(wl, wlvif);
4404 wl12xx_cmd_role_stop_sta(wl, wlvif);
4408 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4409 do_join = true;
4411 /* Need to update the SSID (for filtering etc) */
4412 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4413 do_join = true;
4415 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4416 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4417 bss_conf->enable_beacon ? "enabled" : "disabled");
4419 do_join = true;
4422 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4423 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4425 if (changed & BSS_CHANGED_CQM) {
4426 bool enable = false;
4427 if (bss_conf->cqm_rssi_thold)
4428 enable = true;
4429 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4430 bss_conf->cqm_rssi_thold,
4431 bss_conf->cqm_rssi_hyst);
4432 if (ret < 0)
4433 goto out;
4434 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4437 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4438 BSS_CHANGED_ASSOC)) {
4439 rcu_read_lock();
4440 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4441 if (sta) {
4442 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4444 /* save the supp_rates of the ap */
4445 sta_rate_set = sta->supp_rates[wlvif->band];
4446 if (sta->ht_cap.ht_supported)
4447 sta_rate_set |=
4448 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4449 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4450 sta_ht_cap = sta->ht_cap;
4451 sta_exists = true;
4454 rcu_read_unlock();
4457 if (changed & BSS_CHANGED_BSSID) {
4458 if (!is_zero_ether_addr(bss_conf->bssid)) {
4459 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4460 sta_rate_set);
4461 if (ret < 0)
4462 goto out;
4464 /* Need to update the BSSID (for filtering etc) */
4465 do_join = true;
4466 } else {
4467 ret = wlcore_clear_bssid(wl, wlvif);
4468 if (ret < 0)
4469 goto out;
4473 if (changed & BSS_CHANGED_IBSS) {
4474 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4475 bss_conf->ibss_joined);
4477 if (bss_conf->ibss_joined) {
4478 u32 rates = bss_conf->basic_rates;
4479 wlvif->basic_rate_set =
4480 wl1271_tx_enabled_rates_get(wl, rates,
4481 wlvif->band);
4482 wlvif->basic_rate =
4483 wl1271_tx_min_rate_get(wl,
4484 wlvif->basic_rate_set);
4486 /* by default, use 11b + OFDM rates */
4487 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4488 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4489 if (ret < 0)
4490 goto out;
4494 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4495 /* enable beacon filtering */
4496 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4497 if (ret < 0)
4498 goto out;
4501 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4502 if (ret < 0)
4503 goto out;
4505 if (do_join) {
4506 ret = wlcore_join(wl, wlvif);
4507 if (ret < 0) {
4508 wl1271_warning("cmd join failed %d", ret);
4509 goto out;
4513 if (changed & BSS_CHANGED_ASSOC) {
4514 if (bss_conf->assoc) {
4515 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4516 sta_rate_set);
4517 if (ret < 0)
4518 goto out;
4520 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4521 wl12xx_set_authorized(wl, wlvif);
4522 } else {
4523 wlcore_unset_assoc(wl, wlvif);
4527 if (changed & BSS_CHANGED_PS) {
4528 if ((bss_conf->ps) &&
4529 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4530 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4531 int ps_mode;
4532 char *ps_mode_str;
4534 if (wl->conf.conn.forced_ps) {
4535 ps_mode = STATION_POWER_SAVE_MODE;
4536 ps_mode_str = "forced";
4537 } else {
4538 ps_mode = STATION_AUTO_PS_MODE;
4539 ps_mode_str = "auto";
4542 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4544 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4545 if (ret < 0)
4546 wl1271_warning("enter %s ps failed %d",
4547 ps_mode_str, ret);
4548 } else if (!bss_conf->ps &&
4549 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4550 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4552 ret = wl1271_ps_set_mode(wl, wlvif,
4553 STATION_ACTIVE_MODE);
4554 if (ret < 0)
4555 wl1271_warning("exit auto ps failed %d", ret);
4559 /* Handle new association with HT. Do this after join. */
4560 if (sta_exists) {
4561 bool enabled =
4562 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4564 ret = wlcore_hw_set_peer_cap(wl,
4565 &sta_ht_cap,
4566 enabled,
4567 wlvif->rate_set,
4568 wlvif->sta.hlid);
4569 if (ret < 0) {
4570 wl1271_warning("Set ht cap failed %d", ret);
4571 goto out;
4575 if (enabled) {
4576 ret = wl1271_acx_set_ht_information(wl, wlvif,
4577 bss_conf->ht_operation_mode);
4578 if (ret < 0) {
4579 wl1271_warning("Set ht information failed %d",
4580 ret);
4581 goto out;
4586 /* Handle arp filtering. Done after join. */
4587 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4588 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4589 __be32 addr = bss_conf->arp_addr_list[0];
4590 wlvif->sta.qos = bss_conf->qos;
4591 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4593 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4594 wlvif->ip_addr = addr;
4596 * The template should have been configured only upon
4597 * association. however, it seems that the correct ip
4598 * isn't being set (when sending), so we have to
4599 * reconfigure the template upon every ip change.
4601 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4602 if (ret < 0) {
4603 wl1271_warning("build arp rsp failed: %d", ret);
4604 goto out;
4607 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4608 (ACX_ARP_FILTER_ARP_FILTERING |
4609 ACX_ARP_FILTER_AUTO_ARP),
4610 addr);
4611 } else {
4612 wlvif->ip_addr = 0;
4613 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4616 if (ret < 0)
4617 goto out;
4620 out:
4621 return;
4624 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4625 struct ieee80211_vif *vif,
4626 struct ieee80211_bss_conf *bss_conf,
4627 u32 changed)
4629 struct wl1271 *wl = hw->priv;
4630 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4631 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4632 int ret;
4634 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4635 wlvif->role_id, (int)changed);
4638 * make sure to cancel pending disconnections if our association
4639 * state changed
4641 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4642 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4644 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4645 !bss_conf->enable_beacon)
4646 wl1271_tx_flush(wl);
4648 mutex_lock(&wl->mutex);
4650 if (unlikely(wl->state != WLCORE_STATE_ON))
4651 goto out;
4653 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4654 goto out;
4656 ret = pm_runtime_get_sync(wl->dev);
4657 if (ret < 0) {
4658 pm_runtime_put_noidle(wl->dev);
4659 goto out;
4662 if ((changed & BSS_CHANGED_TXPOWER) &&
4663 bss_conf->txpower != wlvif->power_level) {
4665 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4666 if (ret < 0)
4667 goto out;
4669 wlvif->power_level = bss_conf->txpower;
4672 if (is_ap)
4673 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4674 else
4675 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4677 pm_runtime_mark_last_busy(wl->dev);
4678 pm_runtime_put_autosuspend(wl->dev);
4680 out:
4681 mutex_unlock(&wl->mutex);
4684 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4685 struct ieee80211_chanctx_conf *ctx)
4687 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4688 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4689 cfg80211_get_chandef_type(&ctx->def));
4690 return 0;
4693 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4694 struct ieee80211_chanctx_conf *ctx)
4696 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4697 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4698 cfg80211_get_chandef_type(&ctx->def));
4701 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4702 struct ieee80211_chanctx_conf *ctx,
4703 u32 changed)
4705 struct wl1271 *wl = hw->priv;
4706 struct wl12xx_vif *wlvif;
4707 int ret;
4708 int channel = ieee80211_frequency_to_channel(
4709 ctx->def.chan->center_freq);
4711 wl1271_debug(DEBUG_MAC80211,
4712 "mac80211 change chanctx %d (type %d) changed 0x%x",
4713 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4715 mutex_lock(&wl->mutex);
4717 ret = pm_runtime_get_sync(wl->dev);
4718 if (ret < 0) {
4719 pm_runtime_put_noidle(wl->dev);
4720 goto out;
4723 wl12xx_for_each_wlvif(wl, wlvif) {
4724 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4726 rcu_read_lock();
4727 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4728 rcu_read_unlock();
4729 continue;
4731 rcu_read_unlock();
4733 /* start radar if needed */
4734 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4735 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4736 ctx->radar_enabled && !wlvif->radar_enabled &&
4737 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4738 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4739 wlcore_hw_set_cac(wl, wlvif, true);
4740 wlvif->radar_enabled = true;
4744 pm_runtime_mark_last_busy(wl->dev);
4745 pm_runtime_put_autosuspend(wl->dev);
4746 out:
4747 mutex_unlock(&wl->mutex);
4750 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4751 struct ieee80211_vif *vif,
4752 struct ieee80211_chanctx_conf *ctx)
4754 struct wl1271 *wl = hw->priv;
4755 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4756 int channel = ieee80211_frequency_to_channel(
4757 ctx->def.chan->center_freq);
4758 int ret = -EINVAL;
4760 wl1271_debug(DEBUG_MAC80211,
4761 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4762 wlvif->role_id, channel,
4763 cfg80211_get_chandef_type(&ctx->def),
4764 ctx->radar_enabled, ctx->def.chan->dfs_state);
4766 mutex_lock(&wl->mutex);
4768 if (unlikely(wl->state != WLCORE_STATE_ON))
4769 goto out;
4771 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4772 goto out;
4774 ret = pm_runtime_get_sync(wl->dev);
4775 if (ret < 0) {
4776 pm_runtime_put_noidle(wl->dev);
4777 goto out;
4780 wlvif->band = ctx->def.chan->band;
4781 wlvif->channel = channel;
4782 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4784 /* update default rates according to the band */
4785 wl1271_set_band_rate(wl, wlvif);
4787 if (ctx->radar_enabled &&
4788 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4789 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4790 wlcore_hw_set_cac(wl, wlvif, true);
4791 wlvif->radar_enabled = true;
4794 pm_runtime_mark_last_busy(wl->dev);
4795 pm_runtime_put_autosuspend(wl->dev);
4796 out:
4797 mutex_unlock(&wl->mutex);
4799 return 0;
4802 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4803 struct ieee80211_vif *vif,
4804 struct ieee80211_chanctx_conf *ctx)
4806 struct wl1271 *wl = hw->priv;
4807 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4808 int ret;
4810 wl1271_debug(DEBUG_MAC80211,
4811 "mac80211 unassign chanctx (role %d) %d (type %d)",
4812 wlvif->role_id,
4813 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4814 cfg80211_get_chandef_type(&ctx->def));
4816 wl1271_tx_flush(wl);
4818 mutex_lock(&wl->mutex);
4820 if (unlikely(wl->state != WLCORE_STATE_ON))
4821 goto out;
4823 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4824 goto out;
4826 ret = pm_runtime_get_sync(wl->dev);
4827 if (ret < 0) {
4828 pm_runtime_put_noidle(wl->dev);
4829 goto out;
4832 if (wlvif->radar_enabled) {
4833 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4834 wlcore_hw_set_cac(wl, wlvif, false);
4835 wlvif->radar_enabled = false;
4838 pm_runtime_mark_last_busy(wl->dev);
4839 pm_runtime_put_autosuspend(wl->dev);
4840 out:
4841 mutex_unlock(&wl->mutex);
4844 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4845 struct wl12xx_vif *wlvif,
4846 struct ieee80211_chanctx_conf *new_ctx)
4848 int channel = ieee80211_frequency_to_channel(
4849 new_ctx->def.chan->center_freq);
4851 wl1271_debug(DEBUG_MAC80211,
4852 "switch vif (role %d) %d -> %d chan_type: %d",
4853 wlvif->role_id, wlvif->channel, channel,
4854 cfg80211_get_chandef_type(&new_ctx->def));
4856 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4857 return 0;
4859 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4861 if (wlvif->radar_enabled) {
4862 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4863 wlcore_hw_set_cac(wl, wlvif, false);
4864 wlvif->radar_enabled = false;
4867 wlvif->band = new_ctx->def.chan->band;
4868 wlvif->channel = channel;
4869 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4871 /* start radar if needed */
4872 if (new_ctx->radar_enabled) {
4873 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4874 wlcore_hw_set_cac(wl, wlvif, true);
4875 wlvif->radar_enabled = true;
4878 return 0;
4881 static int
4882 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4883 struct ieee80211_vif_chanctx_switch *vifs,
4884 int n_vifs,
4885 enum ieee80211_chanctx_switch_mode mode)
4887 struct wl1271 *wl = hw->priv;
4888 int i, ret;
4890 wl1271_debug(DEBUG_MAC80211,
4891 "mac80211 switch chanctx n_vifs %d mode %d",
4892 n_vifs, mode);
4894 mutex_lock(&wl->mutex);
4896 ret = pm_runtime_get_sync(wl->dev);
4897 if (ret < 0) {
4898 pm_runtime_put_noidle(wl->dev);
4899 goto out;
4902 for (i = 0; i < n_vifs; i++) {
4903 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4905 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4906 if (ret)
4907 goto out_sleep;
4909 out_sleep:
4910 pm_runtime_mark_last_busy(wl->dev);
4911 pm_runtime_put_autosuspend(wl->dev);
4912 out:
4913 mutex_unlock(&wl->mutex);
4915 return 0;
4918 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4919 struct ieee80211_vif *vif, u16 queue,
4920 const struct ieee80211_tx_queue_params *params)
4922 struct wl1271 *wl = hw->priv;
4923 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4924 u8 ps_scheme;
4925 int ret = 0;
4927 if (wlcore_is_p2p_mgmt(wlvif))
4928 return 0;
4930 mutex_lock(&wl->mutex);
4932 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4934 if (params->uapsd)
4935 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4936 else
4937 ps_scheme = CONF_PS_SCHEME_LEGACY;
4939 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4940 goto out;
4942 ret = pm_runtime_get_sync(wl->dev);
4943 if (ret < 0) {
4944 pm_runtime_put_noidle(wl->dev);
4945 goto out;
4949 * the txop is confed in units of 32us by the mac80211,
4950 * we need us
4952 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4953 params->cw_min, params->cw_max,
4954 params->aifs, params->txop << 5);
4955 if (ret < 0)
4956 goto out_sleep;
4958 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4959 CONF_CHANNEL_TYPE_EDCF,
4960 wl1271_tx_get_queue(queue),
4961 ps_scheme, CONF_ACK_POLICY_LEGACY,
4962 0, 0);
4964 out_sleep:
4965 pm_runtime_mark_last_busy(wl->dev);
4966 pm_runtime_put_autosuspend(wl->dev);
4968 out:
4969 mutex_unlock(&wl->mutex);
4971 return ret;
4974 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4975 struct ieee80211_vif *vif)
4978 struct wl1271 *wl = hw->priv;
4979 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4980 u64 mactime = ULLONG_MAX;
4981 int ret;
4983 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4985 mutex_lock(&wl->mutex);
4987 if (unlikely(wl->state != WLCORE_STATE_ON))
4988 goto out;
4990 ret = pm_runtime_get_sync(wl->dev);
4991 if (ret < 0) {
4992 pm_runtime_put_noidle(wl->dev);
4993 goto out;
4996 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4997 if (ret < 0)
4998 goto out_sleep;
5000 out_sleep:
5001 pm_runtime_mark_last_busy(wl->dev);
5002 pm_runtime_put_autosuspend(wl->dev);
5004 out:
5005 mutex_unlock(&wl->mutex);
5006 return mactime;
5009 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5010 struct survey_info *survey)
5012 struct ieee80211_conf *conf = &hw->conf;
5014 if (idx != 0)
5015 return -ENOENT;
5017 survey->channel = conf->chandef.chan;
5018 survey->filled = 0;
5019 return 0;
5022 static int wl1271_allocate_sta(struct wl1271 *wl,
5023 struct wl12xx_vif *wlvif,
5024 struct ieee80211_sta *sta)
5026 struct wl1271_station *wl_sta;
5027 int ret;
5030 if (wl->active_sta_count >= wl->max_ap_stations) {
5031 wl1271_warning("could not allocate HLID - too much stations");
5032 return -EBUSY;
5035 wl_sta = (struct wl1271_station *)sta->drv_priv;
5036 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5037 if (ret < 0) {
5038 wl1271_warning("could not allocate HLID - too many links");
5039 return -EBUSY;
5042 /* use the previous security seq, if this is a recovery/resume */
5043 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5045 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5046 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5047 wl->active_sta_count++;
5048 return 0;
5051 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5053 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5054 return;
5056 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5057 __clear_bit(hlid, &wl->ap_ps_map);
5058 __clear_bit(hlid, &wl->ap_fw_ps_map);
5061 * save the last used PN in the private part of iee80211_sta,
5062 * in case of recovery/suspend
5064 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5066 wl12xx_free_link(wl, wlvif, &hlid);
5067 wl->active_sta_count--;
5070 * rearm the tx watchdog when the last STA is freed - give the FW a
5071 * chance to return STA-buffered packets before complaining.
5073 if (wl->active_sta_count == 0)
5074 wl12xx_rearm_tx_watchdog_locked(wl);
5077 static int wl12xx_sta_add(struct wl1271 *wl,
5078 struct wl12xx_vif *wlvif,
5079 struct ieee80211_sta *sta)
5081 struct wl1271_station *wl_sta;
5082 int ret = 0;
5083 u8 hlid;
5085 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5087 ret = wl1271_allocate_sta(wl, wlvif, sta);
5088 if (ret < 0)
5089 return ret;
5091 wl_sta = (struct wl1271_station *)sta->drv_priv;
5092 hlid = wl_sta->hlid;
5094 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5095 if (ret < 0)
5096 wl1271_free_sta(wl, wlvif, hlid);
5098 return ret;
5101 static int wl12xx_sta_remove(struct wl1271 *wl,
5102 struct wl12xx_vif *wlvif,
5103 struct ieee80211_sta *sta)
5105 struct wl1271_station *wl_sta;
5106 int ret = 0, id;
5108 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5110 wl_sta = (struct wl1271_station *)sta->drv_priv;
5111 id = wl_sta->hlid;
5112 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5113 return -EINVAL;
5115 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5116 if (ret < 0)
5117 return ret;
5119 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5120 return ret;
5123 static void wlcore_roc_if_possible(struct wl1271 *wl,
5124 struct wl12xx_vif *wlvif)
5126 if (find_first_bit(wl->roc_map,
5127 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5128 return;
5130 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5131 return;
5133 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5137 * when wl_sta is NULL, we treat this call as if coming from a
5138 * pending auth reply.
5139 * wl->mutex must be taken and the FW must be awake when the call
5140 * takes place.
5142 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5143 struct wl1271_station *wl_sta, bool in_conn)
5145 if (in_conn) {
5146 if (WARN_ON(wl_sta && wl_sta->in_connection))
5147 return;
5149 if (!wlvif->ap_pending_auth_reply &&
5150 !wlvif->inconn_count)
5151 wlcore_roc_if_possible(wl, wlvif);
5153 if (wl_sta) {
5154 wl_sta->in_connection = true;
5155 wlvif->inconn_count++;
5156 } else {
5157 wlvif->ap_pending_auth_reply = true;
5159 } else {
5160 if (wl_sta && !wl_sta->in_connection)
5161 return;
5163 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5164 return;
5166 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5167 return;
5169 if (wl_sta) {
5170 wl_sta->in_connection = false;
5171 wlvif->inconn_count--;
5172 } else {
5173 wlvif->ap_pending_auth_reply = false;
5176 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5177 test_bit(wlvif->role_id, wl->roc_map))
5178 wl12xx_croc(wl, wlvif->role_id);
5182 static int wl12xx_update_sta_state(struct wl1271 *wl,
5183 struct wl12xx_vif *wlvif,
5184 struct ieee80211_sta *sta,
5185 enum ieee80211_sta_state old_state,
5186 enum ieee80211_sta_state new_state)
5188 struct wl1271_station *wl_sta;
5189 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5190 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5191 int ret;
5193 wl_sta = (struct wl1271_station *)sta->drv_priv;
5195 /* Add station (AP mode) */
5196 if (is_ap &&
5197 old_state == IEEE80211_STA_NOTEXIST &&
5198 new_state == IEEE80211_STA_NONE) {
5199 ret = wl12xx_sta_add(wl, wlvif, sta);
5200 if (ret)
5201 return ret;
5203 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5206 /* Remove station (AP mode) */
5207 if (is_ap &&
5208 old_state == IEEE80211_STA_NONE &&
5209 new_state == IEEE80211_STA_NOTEXIST) {
5210 /* must not fail */
5211 wl12xx_sta_remove(wl, wlvif, sta);
5213 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5216 /* Authorize station (AP mode) */
5217 if (is_ap &&
5218 new_state == IEEE80211_STA_AUTHORIZED) {
5219 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5220 if (ret < 0)
5221 return ret;
5223 /* reconfigure rates */
5224 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5225 if (ret < 0)
5226 return ret;
5228 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5229 wl_sta->hlid);
5230 if (ret)
5231 return ret;
5233 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5236 /* Authorize station */
5237 if (is_sta &&
5238 new_state == IEEE80211_STA_AUTHORIZED) {
5239 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5240 ret = wl12xx_set_authorized(wl, wlvif);
5241 if (ret)
5242 return ret;
5245 if (is_sta &&
5246 old_state == IEEE80211_STA_AUTHORIZED &&
5247 new_state == IEEE80211_STA_ASSOC) {
5248 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5249 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5252 /* save seq number on disassoc (suspend) */
5253 if (is_sta &&
5254 old_state == IEEE80211_STA_ASSOC &&
5255 new_state == IEEE80211_STA_AUTH) {
5256 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5257 wlvif->total_freed_pkts = 0;
5260 /* restore seq number on assoc (resume) */
5261 if (is_sta &&
5262 old_state == IEEE80211_STA_AUTH &&
5263 new_state == IEEE80211_STA_ASSOC) {
5264 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5267 /* clear ROCs on failure or authorization */
5268 if (is_sta &&
5269 (new_state == IEEE80211_STA_AUTHORIZED ||
5270 new_state == IEEE80211_STA_NOTEXIST)) {
5271 if (test_bit(wlvif->role_id, wl->roc_map))
5272 wl12xx_croc(wl, wlvif->role_id);
5275 if (is_sta &&
5276 old_state == IEEE80211_STA_NOTEXIST &&
5277 new_state == IEEE80211_STA_NONE) {
5278 if (find_first_bit(wl->roc_map,
5279 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5280 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5281 wl12xx_roc(wl, wlvif, wlvif->role_id,
5282 wlvif->band, wlvif->channel);
5285 return 0;
5288 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5289 struct ieee80211_vif *vif,
5290 struct ieee80211_sta *sta,
5291 enum ieee80211_sta_state old_state,
5292 enum ieee80211_sta_state new_state)
5294 struct wl1271 *wl = hw->priv;
5295 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5296 int ret;
5298 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5299 sta->aid, old_state, new_state);
5301 mutex_lock(&wl->mutex);
5303 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5304 ret = -EBUSY;
5305 goto out;
5308 ret = pm_runtime_get_sync(wl->dev);
5309 if (ret < 0) {
5310 pm_runtime_put_noidle(wl->dev);
5311 goto out;
5314 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5316 pm_runtime_mark_last_busy(wl->dev);
5317 pm_runtime_put_autosuspend(wl->dev);
5318 out:
5319 mutex_unlock(&wl->mutex);
5320 if (new_state < old_state)
5321 return 0;
5322 return ret;
5325 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5326 struct ieee80211_vif *vif,
5327 struct ieee80211_ampdu_params *params)
5329 struct wl1271 *wl = hw->priv;
5330 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5331 int ret;
5332 u8 hlid, *ba_bitmap;
5333 struct ieee80211_sta *sta = params->sta;
5334 enum ieee80211_ampdu_mlme_action action = params->action;
5335 u16 tid = params->tid;
5336 u16 *ssn = &params->ssn;
5338 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5339 tid);
5341 /* sanity check - the fields in FW are only 8bits wide */
5342 if (WARN_ON(tid > 0xFF))
5343 return -ENOTSUPP;
5345 mutex_lock(&wl->mutex);
5347 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5348 ret = -EAGAIN;
5349 goto out;
5352 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5353 hlid = wlvif->sta.hlid;
5354 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5355 struct wl1271_station *wl_sta;
5357 wl_sta = (struct wl1271_station *)sta->drv_priv;
5358 hlid = wl_sta->hlid;
5359 } else {
5360 ret = -EINVAL;
5361 goto out;
5364 ba_bitmap = &wl->links[hlid].ba_bitmap;
5366 ret = pm_runtime_get_sync(wl->dev);
5367 if (ret < 0) {
5368 pm_runtime_put_noidle(wl->dev);
5369 goto out;
5372 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5373 tid, action);
5375 switch (action) {
5376 case IEEE80211_AMPDU_RX_START:
5377 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5378 ret = -ENOTSUPP;
5379 break;
5382 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5383 ret = -EBUSY;
5384 wl1271_error("exceeded max RX BA sessions");
5385 break;
5388 if (*ba_bitmap & BIT(tid)) {
5389 ret = -EINVAL;
5390 wl1271_error("cannot enable RX BA session on active "
5391 "tid: %d", tid);
5392 break;
5395 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5396 hlid,
5397 params->buf_size);
5399 if (!ret) {
5400 *ba_bitmap |= BIT(tid);
5401 wl->ba_rx_session_count++;
5403 break;
5405 case IEEE80211_AMPDU_RX_STOP:
5406 if (!(*ba_bitmap & BIT(tid))) {
5408 * this happens on reconfig - so only output a debug
5409 * message for now, and don't fail the function.
5411 wl1271_debug(DEBUG_MAC80211,
5412 "no active RX BA session on tid: %d",
5413 tid);
5414 ret = 0;
5415 break;
5418 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5419 hlid, 0);
5420 if (!ret) {
5421 *ba_bitmap &= ~BIT(tid);
5422 wl->ba_rx_session_count--;
5424 break;
5427 * The BA initiator session management in FW independently.
5428 * Falling break here on purpose for all TX APDU commands.
5430 case IEEE80211_AMPDU_TX_START:
5431 case IEEE80211_AMPDU_TX_STOP_CONT:
5432 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5433 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5434 case IEEE80211_AMPDU_TX_OPERATIONAL:
5435 ret = -EINVAL;
5436 break;
5438 default:
5439 wl1271_error("Incorrect ampdu action id=%x\n", action);
5440 ret = -EINVAL;
5443 pm_runtime_mark_last_busy(wl->dev);
5444 pm_runtime_put_autosuspend(wl->dev);
5446 out:
5447 mutex_unlock(&wl->mutex);
5449 return ret;
5452 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5453 struct ieee80211_vif *vif,
5454 const struct cfg80211_bitrate_mask *mask)
5456 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5457 struct wl1271 *wl = hw->priv;
5458 int i, ret = 0;
5460 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5461 mask->control[NL80211_BAND_2GHZ].legacy,
5462 mask->control[NL80211_BAND_5GHZ].legacy);
5464 mutex_lock(&wl->mutex);
5466 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5467 wlvif->bitrate_masks[i] =
5468 wl1271_tx_enabled_rates_get(wl,
5469 mask->control[i].legacy,
5472 if (unlikely(wl->state != WLCORE_STATE_ON))
5473 goto out;
5475 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5476 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5478 ret = pm_runtime_get_sync(wl->dev);
5479 if (ret < 0) {
5480 pm_runtime_put_noidle(wl->dev);
5481 goto out;
5484 wl1271_set_band_rate(wl, wlvif);
5485 wlvif->basic_rate =
5486 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5487 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5489 pm_runtime_mark_last_busy(wl->dev);
5490 pm_runtime_put_autosuspend(wl->dev);
5492 out:
5493 mutex_unlock(&wl->mutex);
5495 return ret;
5498 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5499 struct ieee80211_vif *vif,
5500 struct ieee80211_channel_switch *ch_switch)
5502 struct wl1271 *wl = hw->priv;
5503 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5504 int ret;
5506 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5508 wl1271_tx_flush(wl);
5510 mutex_lock(&wl->mutex);
5512 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5513 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5514 ieee80211_chswitch_done(vif, false);
5515 goto out;
5516 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5517 goto out;
5520 ret = pm_runtime_get_sync(wl->dev);
5521 if (ret < 0) {
5522 pm_runtime_put_noidle(wl->dev);
5523 goto out;
5526 /* TODO: change mac80211 to pass vif as param */
5528 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5529 unsigned long delay_usec;
5531 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5532 if (ret)
5533 goto out_sleep;
5535 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5537 /* indicate failure 5 seconds after channel switch time */
5538 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5539 ch_switch->count;
5540 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5541 usecs_to_jiffies(delay_usec) +
5542 msecs_to_jiffies(5000));
5545 out_sleep:
5546 pm_runtime_mark_last_busy(wl->dev);
5547 pm_runtime_put_autosuspend(wl->dev);
5549 out:
5550 mutex_unlock(&wl->mutex);
5553 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5554 struct wl12xx_vif *wlvif,
5555 u8 eid)
5557 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5558 struct sk_buff *beacon =
5559 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5561 if (!beacon)
5562 return NULL;
5564 return cfg80211_find_ie(eid,
5565 beacon->data + ieoffset,
5566 beacon->len - ieoffset);
5569 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5570 u8 *csa_count)
5572 const u8 *ie;
5573 const struct ieee80211_channel_sw_ie *ie_csa;
5575 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5576 if (!ie)
5577 return -EINVAL;
5579 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5580 *csa_count = ie_csa->count;
5582 return 0;
5585 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5586 struct ieee80211_vif *vif,
5587 struct cfg80211_chan_def *chandef)
5589 struct wl1271 *wl = hw->priv;
5590 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5591 struct ieee80211_channel_switch ch_switch = {
5592 .block_tx = true,
5593 .chandef = *chandef,
5595 int ret;
5597 wl1271_debug(DEBUG_MAC80211,
5598 "mac80211 channel switch beacon (role %d)",
5599 wlvif->role_id);
5601 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5602 if (ret < 0) {
5603 wl1271_error("error getting beacon (for CSA counter)");
5604 return;
5607 mutex_lock(&wl->mutex);
5609 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5610 ret = -EBUSY;
5611 goto out;
5614 ret = pm_runtime_get_sync(wl->dev);
5615 if (ret < 0) {
5616 pm_runtime_put_noidle(wl->dev);
5617 goto out;
5620 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5621 if (ret)
5622 goto out_sleep;
5624 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5626 out_sleep:
5627 pm_runtime_mark_last_busy(wl->dev);
5628 pm_runtime_put_autosuspend(wl->dev);
5629 out:
5630 mutex_unlock(&wl->mutex);
5633 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5634 u32 queues, bool drop)
5636 struct wl1271 *wl = hw->priv;
5638 wl1271_tx_flush(wl);
5641 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5642 struct ieee80211_vif *vif,
5643 struct ieee80211_channel *chan,
5644 int duration,
5645 enum ieee80211_roc_type type)
5647 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5648 struct wl1271 *wl = hw->priv;
5649 int channel, active_roc, ret = 0;
5651 channel = ieee80211_frequency_to_channel(chan->center_freq);
5653 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5654 channel, wlvif->role_id);
5656 mutex_lock(&wl->mutex);
5658 if (unlikely(wl->state != WLCORE_STATE_ON))
5659 goto out;
5661 /* return EBUSY if we can't ROC right now */
5662 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5663 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5664 wl1271_warning("active roc on role %d", active_roc);
5665 ret = -EBUSY;
5666 goto out;
5669 ret = pm_runtime_get_sync(wl->dev);
5670 if (ret < 0) {
5671 pm_runtime_put_noidle(wl->dev);
5672 goto out;
5675 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5676 if (ret < 0)
5677 goto out_sleep;
5679 wl->roc_vif = vif;
5680 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5681 msecs_to_jiffies(duration));
5682 out_sleep:
5683 pm_runtime_mark_last_busy(wl->dev);
5684 pm_runtime_put_autosuspend(wl->dev);
5685 out:
5686 mutex_unlock(&wl->mutex);
5687 return ret;
5690 static int __wlcore_roc_completed(struct wl1271 *wl)
5692 struct wl12xx_vif *wlvif;
5693 int ret;
5695 /* already completed */
5696 if (unlikely(!wl->roc_vif))
5697 return 0;
5699 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5701 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5702 return -EBUSY;
5704 ret = wl12xx_stop_dev(wl, wlvif);
5705 if (ret < 0)
5706 return ret;
5708 wl->roc_vif = NULL;
5710 return 0;
5713 static int wlcore_roc_completed(struct wl1271 *wl)
5715 int ret;
5717 wl1271_debug(DEBUG_MAC80211, "roc complete");
5719 mutex_lock(&wl->mutex);
5721 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5722 ret = -EBUSY;
5723 goto out;
5726 ret = pm_runtime_get_sync(wl->dev);
5727 if (ret < 0) {
5728 pm_runtime_put_noidle(wl->dev);
5729 goto out;
5732 ret = __wlcore_roc_completed(wl);
5734 pm_runtime_mark_last_busy(wl->dev);
5735 pm_runtime_put_autosuspend(wl->dev);
5736 out:
5737 mutex_unlock(&wl->mutex);
5739 return ret;
5742 static void wlcore_roc_complete_work(struct work_struct *work)
5744 struct delayed_work *dwork;
5745 struct wl1271 *wl;
5746 int ret;
5748 dwork = to_delayed_work(work);
5749 wl = container_of(dwork, struct wl1271, roc_complete_work);
5751 ret = wlcore_roc_completed(wl);
5752 if (!ret)
5753 ieee80211_remain_on_channel_expired(wl->hw);
5756 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5757 struct ieee80211_vif *vif)
5759 struct wl1271 *wl = hw->priv;
5761 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5763 /* TODO: per-vif */
5764 wl1271_tx_flush(wl);
5767 * we can't just flush_work here, because it might deadlock
5768 * (as we might get called from the same workqueue)
5770 cancel_delayed_work_sync(&wl->roc_complete_work);
5771 wlcore_roc_completed(wl);
5773 return 0;
5776 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5777 struct ieee80211_vif *vif,
5778 struct ieee80211_sta *sta,
5779 u32 changed)
5781 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5783 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5785 if (!(changed & IEEE80211_RC_BW_CHANGED))
5786 return;
5788 /* this callback is atomic, so schedule a new work */
5789 wlvif->rc_update_bw = sta->bandwidth;
5790 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5791 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5794 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5795 struct ieee80211_vif *vif,
5796 struct ieee80211_sta *sta,
5797 struct station_info *sinfo)
5799 struct wl1271 *wl = hw->priv;
5800 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5801 s8 rssi_dbm;
5802 int ret;
5804 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5806 mutex_lock(&wl->mutex);
5808 if (unlikely(wl->state != WLCORE_STATE_ON))
5809 goto out;
5811 ret = pm_runtime_get_sync(wl->dev);
5812 if (ret < 0) {
5813 pm_runtime_put_noidle(wl->dev);
5814 goto out_sleep;
5817 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5818 if (ret < 0)
5819 goto out_sleep;
5821 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5822 sinfo->signal = rssi_dbm;
5824 out_sleep:
5825 pm_runtime_mark_last_busy(wl->dev);
5826 pm_runtime_put_autosuspend(wl->dev);
5828 out:
5829 mutex_unlock(&wl->mutex);
5832 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5833 struct ieee80211_sta *sta)
5835 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5836 struct wl1271 *wl = hw->priv;
5837 u8 hlid = wl_sta->hlid;
5839 /* return in units of Kbps */
5840 return (wl->links[hlid].fw_rate_mbps * 1000);
5843 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5845 struct wl1271 *wl = hw->priv;
5846 bool ret = false;
5848 mutex_lock(&wl->mutex);
5850 if (unlikely(wl->state != WLCORE_STATE_ON))
5851 goto out;
5853 /* packets are considered pending if in the TX queue or the FW */
5854 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5855 out:
5856 mutex_unlock(&wl->mutex);
5858 return ret;
5861 /* can't be const, mac80211 writes to this */
5862 static struct ieee80211_rate wl1271_rates[] = {
5863 { .bitrate = 10,
5864 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5865 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5866 { .bitrate = 20,
5867 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5868 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5869 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5870 { .bitrate = 55,
5871 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5872 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5873 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5874 { .bitrate = 110,
5875 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5876 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5877 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5878 { .bitrate = 60,
5879 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5880 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5881 { .bitrate = 90,
5882 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5883 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5884 { .bitrate = 120,
5885 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5886 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5887 { .bitrate = 180,
5888 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5889 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5890 { .bitrate = 240,
5891 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5892 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5893 { .bitrate = 360,
5894 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5895 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5896 { .bitrate = 480,
5897 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5898 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5899 { .bitrate = 540,
5900 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5901 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5904 /* can't be const, mac80211 writes to this */
5905 static struct ieee80211_channel wl1271_channels[] = {
5906 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5907 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5908 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5909 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5910 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5911 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5912 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5913 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5914 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5915 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5916 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5917 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5918 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5919 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5922 /* can't be const, mac80211 writes to this */
5923 static struct ieee80211_supported_band wl1271_band_2ghz = {
5924 .channels = wl1271_channels,
5925 .n_channels = ARRAY_SIZE(wl1271_channels),
5926 .bitrates = wl1271_rates,
5927 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5930 /* 5 GHz data rates for WL1273 */
5931 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5932 { .bitrate = 60,
5933 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5934 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5935 { .bitrate = 90,
5936 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5937 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5938 { .bitrate = 120,
5939 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5940 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5941 { .bitrate = 180,
5942 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5943 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5944 { .bitrate = 240,
5945 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5946 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5947 { .bitrate = 360,
5948 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5949 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5950 { .bitrate = 480,
5951 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5952 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5953 { .bitrate = 540,
5954 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5955 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5958 /* 5 GHz band channels for WL1273 */
5959 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5960 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5961 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5962 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5963 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5964 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5965 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5966 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5967 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5968 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5986 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5987 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5988 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5989 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5990 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5993 static struct ieee80211_supported_band wl1271_band_5ghz = {
5994 .channels = wl1271_channels_5ghz,
5995 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5996 .bitrates = wl1271_rates_5ghz,
5997 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6000 static const struct ieee80211_ops wl1271_ops = {
6001 .start = wl1271_op_start,
6002 .stop = wlcore_op_stop,
6003 .add_interface = wl1271_op_add_interface,
6004 .remove_interface = wl1271_op_remove_interface,
6005 .change_interface = wl12xx_op_change_interface,
6006 #ifdef CONFIG_PM
6007 .suspend = wl1271_op_suspend,
6008 .resume = wl1271_op_resume,
6009 #endif
6010 .config = wl1271_op_config,
6011 .prepare_multicast = wl1271_op_prepare_multicast,
6012 .configure_filter = wl1271_op_configure_filter,
6013 .tx = wl1271_op_tx,
6014 .set_key = wlcore_op_set_key,
6015 .hw_scan = wl1271_op_hw_scan,
6016 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6017 .sched_scan_start = wl1271_op_sched_scan_start,
6018 .sched_scan_stop = wl1271_op_sched_scan_stop,
6019 .bss_info_changed = wl1271_op_bss_info_changed,
6020 .set_frag_threshold = wl1271_op_set_frag_threshold,
6021 .set_rts_threshold = wl1271_op_set_rts_threshold,
6022 .conf_tx = wl1271_op_conf_tx,
6023 .get_tsf = wl1271_op_get_tsf,
6024 .get_survey = wl1271_op_get_survey,
6025 .sta_state = wl12xx_op_sta_state,
6026 .ampdu_action = wl1271_op_ampdu_action,
6027 .tx_frames_pending = wl1271_tx_frames_pending,
6028 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6029 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6030 .channel_switch = wl12xx_op_channel_switch,
6031 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6032 .flush = wlcore_op_flush,
6033 .remain_on_channel = wlcore_op_remain_on_channel,
6034 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6035 .add_chanctx = wlcore_op_add_chanctx,
6036 .remove_chanctx = wlcore_op_remove_chanctx,
6037 .change_chanctx = wlcore_op_change_chanctx,
6038 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6039 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6040 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6041 .sta_rc_update = wlcore_op_sta_rc_update,
6042 .sta_statistics = wlcore_op_sta_statistics,
6043 .get_expected_throughput = wlcore_op_get_expected_throughput,
6044 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6048 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6050 u8 idx;
6052 BUG_ON(band >= 2);
6054 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6055 wl1271_error("Illegal RX rate from HW: %d", rate);
6056 return 0;
6059 idx = wl->band_rate_to_idx[band][rate];
6060 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6061 wl1271_error("Unsupported RX rate from HW: %d", rate);
6062 return 0;
6065 return idx;
6068 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6070 int i;
6072 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6073 oui, nic);
6075 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6076 wl1271_warning("NIC part of the MAC address wraps around!");
6078 for (i = 0; i < wl->num_mac_addr; i++) {
6079 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6080 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6081 wl->addresses[i].addr[2] = (u8) oui;
6082 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6083 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6084 wl->addresses[i].addr[5] = (u8) nic;
6085 nic++;
6088 /* we may be one address short at the most */
6089 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6092 * turn on the LAA bit in the first address and use it as
6093 * the last address.
6095 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6096 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6097 memcpy(&wl->addresses[idx], &wl->addresses[0],
6098 sizeof(wl->addresses[0]));
6099 /* LAA bit */
6100 wl->addresses[idx].addr[0] |= BIT(1);
6103 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6104 wl->hw->wiphy->addresses = wl->addresses;
6107 static int wl12xx_get_hw_info(struct wl1271 *wl)
6109 int ret;
6111 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6112 if (ret < 0)
6113 goto out;
6115 wl->fuse_oui_addr = 0;
6116 wl->fuse_nic_addr = 0;
6118 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6119 if (ret < 0)
6120 goto out;
6122 if (wl->ops->get_mac)
6123 ret = wl->ops->get_mac(wl);
6125 out:
6126 return ret;
6129 static int wl1271_register_hw(struct wl1271 *wl)
6131 int ret;
6132 u32 oui_addr = 0, nic_addr = 0;
6133 struct platform_device *pdev = wl->pdev;
6134 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6136 if (wl->mac80211_registered)
6137 return 0;
6139 if (wl->nvs_len >= 12) {
6140 /* NOTE: The wl->nvs->nvs element must be first, in
6141 * order to simplify the casting, we assume it is at
6142 * the beginning of the wl->nvs structure.
6144 u8 *nvs_ptr = (u8 *)wl->nvs;
6146 oui_addr =
6147 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6148 nic_addr =
6149 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6152 /* if the MAC address is zeroed in the NVS derive from fuse */
6153 if (oui_addr == 0 && nic_addr == 0) {
6154 oui_addr = wl->fuse_oui_addr;
6155 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6156 nic_addr = wl->fuse_nic_addr + 1;
6159 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6160 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6161 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6162 wl1271_warning("This default nvs file can be removed from the file system");
6163 } else {
6164 wl1271_warning("Your device performance is not optimized.");
6165 wl1271_warning("Please use the calibrator tool to configure your device.");
6168 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6169 wl1271_warning("Fuse mac address is zero. using random mac");
6170 /* Use TI oui and a random nic */
6171 oui_addr = WLCORE_TI_OUI_ADDRESS;
6172 nic_addr = get_random_int();
6173 } else {
6174 oui_addr = wl->fuse_oui_addr;
6175 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6176 nic_addr = wl->fuse_nic_addr + 1;
6180 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6182 ret = ieee80211_register_hw(wl->hw);
6183 if (ret < 0) {
6184 wl1271_error("unable to register mac80211 hw: %d", ret);
6185 goto out;
6188 wl->mac80211_registered = true;
6190 wl1271_debugfs_init(wl);
6192 wl1271_notice("loaded");
6194 out:
6195 return ret;
6198 static void wl1271_unregister_hw(struct wl1271 *wl)
6200 if (wl->plt)
6201 wl1271_plt_stop(wl);
6203 ieee80211_unregister_hw(wl->hw);
6204 wl->mac80211_registered = false;
6208 static int wl1271_init_ieee80211(struct wl1271 *wl)
6210 int i;
6211 static const u32 cipher_suites[] = {
6212 WLAN_CIPHER_SUITE_WEP40,
6213 WLAN_CIPHER_SUITE_WEP104,
6214 WLAN_CIPHER_SUITE_TKIP,
6215 WLAN_CIPHER_SUITE_CCMP,
6216 WL1271_CIPHER_SUITE_GEM,
6219 /* The tx descriptor buffer */
6220 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6222 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6223 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6225 /* unit us */
6226 /* FIXME: find a proper value */
6227 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6229 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6230 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6231 ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6232 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6233 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6234 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6235 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6236 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6237 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6238 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6239 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6240 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6241 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6242 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6243 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6245 wl->hw->wiphy->cipher_suites = cipher_suites;
6246 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6248 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6249 BIT(NL80211_IFTYPE_AP) |
6250 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6251 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6252 #ifdef CONFIG_MAC80211_MESH
6253 BIT(NL80211_IFTYPE_MESH_POINT) |
6254 #endif
6255 BIT(NL80211_IFTYPE_P2P_GO);
6257 wl->hw->wiphy->max_scan_ssids = 1;
6258 wl->hw->wiphy->max_sched_scan_ssids = 16;
6259 wl->hw->wiphy->max_match_sets = 16;
6261 * Maximum length of elements in scanning probe request templates
6262 * should be the maximum length possible for a template, without
6263 * the IEEE80211 header of the template
6265 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6266 sizeof(struct ieee80211_header);
6268 wl->hw->wiphy->max_sched_scan_reqs = 1;
6269 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6270 sizeof(struct ieee80211_header);
6272 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6274 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6275 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6276 WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6277 WIPHY_FLAG_IBSS_RSN;
6279 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6281 /* make sure all our channels fit in the scanned_ch bitmask */
6282 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6283 ARRAY_SIZE(wl1271_channels_5ghz) >
6284 WL1271_MAX_CHANNELS);
6286 * clear channel flags from the previous usage
6287 * and restore max_power & max_antenna_gain values.
6289 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6290 wl1271_band_2ghz.channels[i].flags = 0;
6291 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6292 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6295 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6296 wl1271_band_5ghz.channels[i].flags = 0;
6297 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6298 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6302 * We keep local copies of the band structs because we need to
6303 * modify them on a per-device basis.
6305 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6306 sizeof(wl1271_band_2ghz));
6307 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6308 &wl->ht_cap[NL80211_BAND_2GHZ],
6309 sizeof(*wl->ht_cap));
6310 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6311 sizeof(wl1271_band_5ghz));
6312 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6313 &wl->ht_cap[NL80211_BAND_5GHZ],
6314 sizeof(*wl->ht_cap));
6316 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6317 &wl->bands[NL80211_BAND_2GHZ];
6318 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6319 &wl->bands[NL80211_BAND_5GHZ];
6322 * allow 4 queues per mac address we support +
6323 * 1 cab queue per mac + one global offchannel Tx queue
6325 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6327 /* the last queue is the offchannel queue */
6328 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6329 wl->hw->max_rates = 1;
6331 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6333 /* the FW answers probe-requests in AP-mode */
6334 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6335 wl->hw->wiphy->probe_resp_offload =
6336 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6337 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6338 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6340 /* allowed interface combinations */
6341 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6342 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6344 /* register vendor commands */
6345 wlcore_set_vendor_commands(wl->hw->wiphy);
6347 SET_IEEE80211_DEV(wl->hw, wl->dev);
6349 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6350 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6352 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6354 return 0;
6357 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6358 u32 mbox_size)
6360 struct ieee80211_hw *hw;
6361 struct wl1271 *wl;
6362 int i, j, ret;
6363 unsigned int order;
6365 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6366 if (!hw) {
6367 wl1271_error("could not alloc ieee80211_hw");
6368 ret = -ENOMEM;
6369 goto err_hw_alloc;
6372 wl = hw->priv;
6373 memset(wl, 0, sizeof(*wl));
6375 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6376 if (!wl->priv) {
6377 wl1271_error("could not alloc wl priv");
6378 ret = -ENOMEM;
6379 goto err_priv_alloc;
6382 INIT_LIST_HEAD(&wl->wlvif_list);
6384 wl->hw = hw;
6387 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6388 * we don't allocate any additional resource here, so that's fine.
6390 for (i = 0; i < NUM_TX_QUEUES; i++)
6391 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6392 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6394 skb_queue_head_init(&wl->deferred_rx_queue);
6395 skb_queue_head_init(&wl->deferred_tx_queue);
6397 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6398 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6399 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6400 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6401 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6402 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6404 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6405 if (!wl->freezable_wq) {
6406 ret = -ENOMEM;
6407 goto err_hw;
6410 wl->channel = 0;
6411 wl->rx_counter = 0;
6412 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6413 wl->band = NL80211_BAND_2GHZ;
6414 wl->channel_type = NL80211_CHAN_NO_HT;
6415 wl->flags = 0;
6416 wl->sg_enabled = true;
6417 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6418 wl->recovery_count = 0;
6419 wl->hw_pg_ver = -1;
6420 wl->ap_ps_map = 0;
6421 wl->ap_fw_ps_map = 0;
6422 wl->quirks = 0;
6423 wl->system_hlid = WL12XX_SYSTEM_HLID;
6424 wl->active_sta_count = 0;
6425 wl->active_link_count = 0;
6426 wl->fwlog_size = 0;
6428 /* The system link is always allocated */
6429 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6431 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6432 for (i = 0; i < wl->num_tx_desc; i++)
6433 wl->tx_frames[i] = NULL;
6435 spin_lock_init(&wl->wl_lock);
6437 wl->state = WLCORE_STATE_OFF;
6438 wl->fw_type = WL12XX_FW_TYPE_NONE;
6439 mutex_init(&wl->mutex);
6440 mutex_init(&wl->flush_mutex);
6441 init_completion(&wl->nvs_loading_complete);
6443 order = get_order(aggr_buf_size);
6444 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6445 if (!wl->aggr_buf) {
6446 ret = -ENOMEM;
6447 goto err_wq;
6449 wl->aggr_buf_size = aggr_buf_size;
6451 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6452 if (!wl->dummy_packet) {
6453 ret = -ENOMEM;
6454 goto err_aggr;
6457 /* Allocate one page for the FW log */
6458 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6459 if (!wl->fwlog) {
6460 ret = -ENOMEM;
6461 goto err_dummy_packet;
6464 wl->mbox_size = mbox_size;
6465 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6466 if (!wl->mbox) {
6467 ret = -ENOMEM;
6468 goto err_fwlog;
6471 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6472 if (!wl->buffer_32) {
6473 ret = -ENOMEM;
6474 goto err_mbox;
6477 return hw;
6479 err_mbox:
6480 kfree(wl->mbox);
6482 err_fwlog:
6483 free_page((unsigned long)wl->fwlog);
6485 err_dummy_packet:
6486 dev_kfree_skb(wl->dummy_packet);
6488 err_aggr:
6489 free_pages((unsigned long)wl->aggr_buf, order);
6491 err_wq:
6492 destroy_workqueue(wl->freezable_wq);
6494 err_hw:
6495 wl1271_debugfs_exit(wl);
6496 kfree(wl->priv);
6498 err_priv_alloc:
6499 ieee80211_free_hw(hw);
6501 err_hw_alloc:
6503 return ERR_PTR(ret);
6505 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6507 int wlcore_free_hw(struct wl1271 *wl)
6509 /* Unblock any fwlog readers */
6510 mutex_lock(&wl->mutex);
6511 wl->fwlog_size = -1;
6512 mutex_unlock(&wl->mutex);
6514 wlcore_sysfs_free(wl);
6516 kfree(wl->buffer_32);
6517 kfree(wl->mbox);
6518 free_page((unsigned long)wl->fwlog);
6519 dev_kfree_skb(wl->dummy_packet);
6520 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6522 wl1271_debugfs_exit(wl);
6524 vfree(wl->fw);
6525 wl->fw = NULL;
6526 wl->fw_type = WL12XX_FW_TYPE_NONE;
6527 kfree(wl->nvs);
6528 wl->nvs = NULL;
6530 kfree(wl->raw_fw_status);
6531 kfree(wl->fw_status);
6532 kfree(wl->tx_res_if);
6533 destroy_workqueue(wl->freezable_wq);
6535 kfree(wl->priv);
6536 ieee80211_free_hw(wl->hw);
6538 return 0;
6540 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6542 #ifdef CONFIG_PM
6543 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6544 .flags = WIPHY_WOWLAN_ANY,
6545 .n_patterns = WL1271_MAX_RX_FILTERS,
6546 .pattern_min_len = 1,
6547 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6549 #endif
6551 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6553 return IRQ_WAKE_THREAD;
6556 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6558 struct wl1271 *wl = context;
6559 struct platform_device *pdev = wl->pdev;
6560 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6561 struct resource *res;
6563 int ret;
6564 irq_handler_t hardirq_fn = NULL;
6566 if (fw) {
6567 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6568 if (!wl->nvs) {
6569 wl1271_error("Could not allocate nvs data");
6570 goto out;
6572 wl->nvs_len = fw->size;
6573 } else if (pdev_data->family->nvs_name) {
6574 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6575 pdev_data->family->nvs_name);
6576 wl->nvs = NULL;
6577 wl->nvs_len = 0;
6578 } else {
6579 wl->nvs = NULL;
6580 wl->nvs_len = 0;
6583 ret = wl->ops->setup(wl);
6584 if (ret < 0)
6585 goto out_free_nvs;
6587 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6589 /* adjust some runtime configuration parameters */
6590 wlcore_adjust_conf(wl);
6592 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6593 if (!res) {
6594 wl1271_error("Could not get IRQ resource");
6595 goto out_free_nvs;
6598 wl->irq = res->start;
6599 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6600 wl->if_ops = pdev_data->if_ops;
6602 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6603 hardirq_fn = wlcore_hardirq;
6604 else
6605 wl->irq_flags |= IRQF_ONESHOT;
6607 ret = wl12xx_set_power_on(wl);
6608 if (ret < 0)
6609 goto out_free_nvs;
6611 ret = wl12xx_get_hw_info(wl);
6612 if (ret < 0) {
6613 wl1271_error("couldn't get hw info");
6614 wl1271_power_off(wl);
6615 goto out_free_nvs;
6618 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6619 wl->irq_flags, pdev->name, wl);
6620 if (ret < 0) {
6621 wl1271_error("interrupt configuration failed");
6622 wl1271_power_off(wl);
6623 goto out_free_nvs;
6626 #ifdef CONFIG_PM
6627 device_init_wakeup(wl->dev, true);
6629 ret = enable_irq_wake(wl->irq);
6630 if (!ret) {
6631 wl->irq_wake_enabled = true;
6632 if (pdev_data->pwr_in_suspend)
6633 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6636 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6637 if (res) {
6638 wl->wakeirq = res->start;
6639 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6640 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6641 if (ret)
6642 wl->wakeirq = -ENODEV;
6643 } else {
6644 wl->wakeirq = -ENODEV;
6646 #endif
6647 disable_irq(wl->irq);
6648 wl1271_power_off(wl);
6650 ret = wl->ops->identify_chip(wl);
6651 if (ret < 0)
6652 goto out_irq;
6654 ret = wl1271_init_ieee80211(wl);
6655 if (ret)
6656 goto out_irq;
6658 ret = wl1271_register_hw(wl);
6659 if (ret)
6660 goto out_irq;
6662 ret = wlcore_sysfs_init(wl);
6663 if (ret)
6664 goto out_unreg;
6666 wl->initialized = true;
6667 goto out;
6669 out_unreg:
6670 wl1271_unregister_hw(wl);
6672 out_irq:
6673 if (wl->wakeirq >= 0)
6674 dev_pm_clear_wake_irq(wl->dev);
6675 device_init_wakeup(wl->dev, false);
6676 free_irq(wl->irq, wl);
6678 out_free_nvs:
6679 kfree(wl->nvs);
6681 out:
6682 release_firmware(fw);
6683 complete_all(&wl->nvs_loading_complete);
6686 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6688 struct wl1271 *wl = dev_get_drvdata(dev);
6689 struct wl12xx_vif *wlvif;
6690 int error;
6692 /* We do not enter elp sleep in PLT mode */
6693 if (wl->plt)
6694 return 0;
6696 /* Nothing to do if no ELP mode requested */
6697 if (wl->sleep_auth != WL1271_PSM_ELP)
6698 return 0;
6700 wl12xx_for_each_wlvif(wl, wlvif) {
6701 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6702 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6703 return -EBUSY;
6706 wl1271_debug(DEBUG_PSM, "chip to elp");
6707 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6708 if (error < 0) {
6709 wl12xx_queue_recovery_work(wl);
6711 return error;
6714 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6716 return 0;
6719 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6721 struct wl1271 *wl = dev_get_drvdata(dev);
6722 DECLARE_COMPLETION_ONSTACK(compl);
6723 unsigned long flags;
6724 int ret;
6725 unsigned long start_time = jiffies;
6726 bool pending = false;
6727 bool recovery = false;
6729 /* Nothing to do if no ELP mode requested */
6730 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6731 return 0;
6733 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6735 spin_lock_irqsave(&wl->wl_lock, flags);
6736 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6737 pending = true;
6738 else
6739 wl->elp_compl = &compl;
6740 spin_unlock_irqrestore(&wl->wl_lock, flags);
6742 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6743 if (ret < 0) {
6744 recovery = true;
6745 goto err;
6748 if (!pending) {
6749 ret = wait_for_completion_timeout(&compl,
6750 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6751 if (ret == 0) {
6752 wl1271_warning("ELP wakeup timeout!");
6754 /* Return no error for runtime PM for recovery */
6755 ret = 0;
6756 recovery = true;
6757 goto err;
6761 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6763 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6764 jiffies_to_msecs(jiffies - start_time));
6766 return 0;
6768 err:
6769 spin_lock_irqsave(&wl->wl_lock, flags);
6770 wl->elp_compl = NULL;
6771 spin_unlock_irqrestore(&wl->wl_lock, flags);
6773 if (recovery) {
6774 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6775 wl12xx_queue_recovery_work(wl);
6778 return ret;
6781 static const struct dev_pm_ops wlcore_pm_ops = {
6782 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6783 wlcore_runtime_resume,
6784 NULL)
6787 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6789 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6790 const char *nvs_name;
6791 int ret = 0;
6793 if (!wl->ops || !wl->ptable || !pdev_data)
6794 return -EINVAL;
6796 wl->dev = &pdev->dev;
6797 wl->pdev = pdev;
6798 platform_set_drvdata(pdev, wl);
6800 if (pdev_data->family && pdev_data->family->nvs_name) {
6801 nvs_name = pdev_data->family->nvs_name;
6802 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6803 nvs_name, &pdev->dev, GFP_KERNEL,
6804 wl, wlcore_nvs_cb);
6805 if (ret < 0) {
6806 wl1271_error("request_firmware_nowait failed for %s: %d",
6807 nvs_name, ret);
6808 complete_all(&wl->nvs_loading_complete);
6810 } else {
6811 wlcore_nvs_cb(NULL, wl);
6814 wl->dev->driver->pm = &wlcore_pm_ops;
6815 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6816 pm_runtime_use_autosuspend(wl->dev);
6817 pm_runtime_enable(wl->dev);
6819 return ret;
6821 EXPORT_SYMBOL_GPL(wlcore_probe);
6823 int wlcore_remove(struct platform_device *pdev)
6825 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6826 struct wl1271 *wl = platform_get_drvdata(pdev);
6827 int error;
6829 error = pm_runtime_get_sync(wl->dev);
6830 if (error < 0)
6831 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6833 wl->dev->driver->pm = NULL;
6835 if (pdev_data->family && pdev_data->family->nvs_name)
6836 wait_for_completion(&wl->nvs_loading_complete);
6837 if (!wl->initialized)
6838 return 0;
6840 if (wl->wakeirq >= 0) {
6841 dev_pm_clear_wake_irq(wl->dev);
6842 wl->wakeirq = -ENODEV;
6845 device_init_wakeup(wl->dev, false);
6847 if (wl->irq_wake_enabled)
6848 disable_irq_wake(wl->irq);
6850 wl1271_unregister_hw(wl);
6852 pm_runtime_put_sync(wl->dev);
6853 pm_runtime_dont_use_autosuspend(wl->dev);
6854 pm_runtime_disable(wl->dev);
6856 free_irq(wl->irq, wl);
6857 wlcore_free_hw(wl);
6859 return 0;
6861 EXPORT_SYMBOL_GPL(wlcore_remove);
6863 u32 wl12xx_debug_level = DEBUG_NONE;
6864 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6865 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6866 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6868 module_param_named(fwlog, fwlog_param, charp, 0);
6869 MODULE_PARM_DESC(fwlog,
6870 "FW logger options: continuous, dbgpins or disable");
6872 module_param(fwlog_mem_blocks, int, 0600);
6873 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6875 module_param(bug_on_recovery, int, 0600);
6876 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6878 module_param(no_recovery, int, 0600);
6879 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6881 MODULE_LICENSE("GPL");
6882 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6883 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");