Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / net / wireless / ti / wlcore / main.c
blob09714034dbf1cd246f5a398aad30384768fa066c
1 /*
2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
30 #include "wlcore.h"
31 #include "debug.h"
32 #include "wl12xx_80211.h"
33 #include "io.h"
34 #include "tx.h"
35 #include "ps.h"
36 #include "init.h"
37 #include "debugfs.h"
38 #include "testmode.h"
39 #include "vendor_cmd.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
44 #define WL1271_BOOT_RETRIES 3
45 #define WL1271_SUSPEND_SLEEP 100
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
60 int ret;
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63 return -EINVAL;
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66 return 0;
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69 return 0;
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72 if (ret < 0)
73 return ret;
75 wl1271_info("Association completed.");
76 return 0;
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
85 /* copy the current dfs region */
86 if (request)
87 wl->dfs_region = request->dfs_region;
89 wlcore_regdomain_config(wl);
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
93 bool enable)
95 int ret = 0;
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
99 if (ret < 0)
100 goto out;
102 if (enable)
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104 else
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106 out:
107 return ret;
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
116 int ret = 0;
117 int period = wl->conf.rx_streaming.interval;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
121 goto out;
123 /* reconfigure/disable according to new streaming_period */
124 if (period &&
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
129 else {
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
134 out:
135 return ret;
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
140 int ret;
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
145 mutex_lock(&wl->mutex);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 goto out;
153 if (!wl->conf.rx_streaming.interval)
154 goto out;
156 ret = wl1271_ps_elp_wakeup(wl);
157 if (ret < 0)
158 goto out;
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
161 if (ret < 0)
162 goto out_sleep;
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
168 out_sleep:
169 wl1271_ps_elp_sleep(wl);
170 out:
171 mutex_unlock(&wl->mutex);
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
176 int ret;
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
181 mutex_lock(&wl->mutex);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
184 goto out;
186 ret = wl1271_ps_elp_wakeup(wl);
187 if (ret < 0)
188 goto out;
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
191 if (ret)
192 goto out_sleep;
194 out_sleep:
195 wl1271_ps_elp_sleep(wl);
196 out:
197 mutex_unlock(&wl->mutex);
200 static void wl1271_rx_streaming_timer(struct timer_list *t)
202 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
212 return;
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
219 static void wlcore_rc_update_work(struct work_struct *work)
221 int ret;
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 rc_update_work);
224 struct wl1271 *wl = wlvif->wl;
225 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
227 mutex_lock(&wl->mutex);
229 if (unlikely(wl->state != WLCORE_STATE_ON))
230 goto out;
232 ret = wl1271_ps_elp_wakeup(wl);
233 if (ret < 0)
234 goto out;
236 if (ieee80211_vif_is_mesh(vif)) {
237 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
238 true, wlvif->sta.hlid);
239 if (ret < 0)
240 goto out_sleep;
241 } else {
242 wlcore_hw_sta_rc_update(wl, wlvif);
245 out_sleep:
246 wl1271_ps_elp_sleep(wl);
247 out:
248 mutex_unlock(&wl->mutex);
251 static void wl12xx_tx_watchdog_work(struct work_struct *work)
253 struct delayed_work *dwork;
254 struct wl1271 *wl;
256 dwork = to_delayed_work(work);
257 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
259 mutex_lock(&wl->mutex);
261 if (unlikely(wl->state != WLCORE_STATE_ON))
262 goto out;
264 /* Tx went out in the meantime - everything is ok */
265 if (unlikely(wl->tx_allocated_blocks == 0))
266 goto out;
269 * if a ROC is in progress, we might not have any Tx for a long
270 * time (e.g. pending Tx on the non-ROC channels)
272 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
273 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
274 wl->conf.tx.tx_watchdog_timeout);
275 wl12xx_rearm_tx_watchdog_locked(wl);
276 goto out;
280 * if a scan is in progress, we might not have any Tx for a long
281 * time
283 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
284 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
285 wl->conf.tx.tx_watchdog_timeout);
286 wl12xx_rearm_tx_watchdog_locked(wl);
287 goto out;
291 * AP might cache a frame for a long time for a sleeping station,
292 * so rearm the timer if there's an AP interface with stations. If
293 * Tx is genuinely stuck we will most hopefully discover it when all
294 * stations are removed due to inactivity.
296 if (wl->active_sta_count) {
297 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
298 " %d stations",
299 wl->conf.tx.tx_watchdog_timeout,
300 wl->active_sta_count);
301 wl12xx_rearm_tx_watchdog_locked(wl);
302 goto out;
305 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
306 wl->conf.tx.tx_watchdog_timeout);
307 wl12xx_queue_recovery_work(wl);
309 out:
310 mutex_unlock(&wl->mutex);
313 static void wlcore_adjust_conf(struct wl1271 *wl)
316 if (fwlog_param) {
317 if (!strcmp(fwlog_param, "continuous")) {
318 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
319 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
320 } else if (!strcmp(fwlog_param, "dbgpins")) {
321 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
322 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
323 } else if (!strcmp(fwlog_param, "disable")) {
324 wl->conf.fwlog.mem_blocks = 0;
325 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
326 } else {
327 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
331 if (bug_on_recovery != -1)
332 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
334 if (no_recovery != -1)
335 wl->conf.recovery.no_recovery = (u8) no_recovery;
338 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
339 struct wl12xx_vif *wlvif,
340 u8 hlid, u8 tx_pkts)
342 bool fw_ps;
344 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
347 * Wake up from high level PS if the STA is asleep with too little
348 * packets in FW or if the STA is awake.
350 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
351 wl12xx_ps_link_end(wl, wlvif, hlid);
354 * Start high-level PS if the STA is asleep with enough blocks in FW.
355 * Make an exception if this is the only connected link. In this
356 * case FW-memory congestion is less of a problem.
357 * Note that a single connected STA means 2*ap_count + 1 active links,
358 * since we must account for the global and broadcast AP links
359 * for each AP. The "fw_ps" check assures us the other link is a STA
360 * connected to the AP. Otherwise the FW would not set the PSM bit.
362 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
363 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
364 wl12xx_ps_link_start(wl, wlvif, hlid, true);
367 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
368 struct wl12xx_vif *wlvif,
369 struct wl_fw_status *status)
371 unsigned long cur_fw_ps_map;
372 u8 hlid;
374 cur_fw_ps_map = status->link_ps_bitmap;
375 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
376 wl1271_debug(DEBUG_PSM,
377 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
378 wl->ap_fw_ps_map, cur_fw_ps_map,
379 wl->ap_fw_ps_map ^ cur_fw_ps_map);
381 wl->ap_fw_ps_map = cur_fw_ps_map;
384 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
385 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
386 wl->links[hlid].allocated_pkts);
389 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
391 struct wl12xx_vif *wlvif;
392 u32 old_tx_blk_count = wl->tx_blocks_available;
393 int avail, freed_blocks;
394 int i;
395 int ret;
396 struct wl1271_link *lnk;
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
399 wl->raw_fw_status,
400 wl->fw_status_len, false);
401 if (ret < 0)
402 return ret;
404 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
406 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
408 status->intr,
409 status->fw_rx_counter,
410 status->drv_rx_counter,
411 status->tx_results_counter);
413 for (i = 0; i < NUM_TX_QUEUES; i++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl->tx_allocated_pkts[i] -=
416 (status->counters.tx_released_pkts[i] -
417 wl->tx_pkts_freed[i]) & 0xff;
419 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
423 for_each_set_bit(i, wl->links_map, wl->num_links) {
424 u8 diff;
425 lnk = &wl->links[i];
427 /* prevent wrap-around in freed-packets counter */
428 diff = (status->counters.tx_lnk_free_pkts[i] -
429 lnk->prev_freed_pkts) & 0xff;
431 if (diff == 0)
432 continue;
434 lnk->allocated_pkts -= diff;
435 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
437 /* accumulate the prev_freed_pkts counter */
438 lnk->total_freed_pkts += diff;
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 freed_blocks = status->total_released_blks -
444 wl->tx_blocks_freed;
445 else
446 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 status->total_released_blks;
449 wl->tx_blocks_freed = status->total_released_blks;
451 wl->tx_allocated_blocks -= freed_blocks;
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
458 if (freed_blocks) {
459 if (wl->tx_allocated_blocks)
460 wl12xx_rearm_tx_watchdog_locked(wl);
461 else
462 cancel_delayed_work(&wl->tx_watchdog_work);
465 avail = status->tx_total - wl->tx_allocated_blocks;
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
475 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
476 avail);
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl->tx_blocks_available > old_tx_blk_count)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 wl12xx_irq_update_links_status(wl, wlvif, status);
487 /* update the host-chipset time offset */
488 wl->time_offset = (ktime_get_boot_ns() >> 10) -
489 (s64)(status->fw_localtime);
491 wl->fw_fast_lnk_map = status->link_fast_bitmap;
493 return 0;
496 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 struct sk_buff *skb;
500 /* Pass all received frames to the network stack */
501 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
502 ieee80211_rx_ni(wl->hw, skb);
504 /* Return sent skbs to the network stack */
505 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
506 ieee80211_tx_status_ni(wl->hw, skb);
509 static void wl1271_netstack_work(struct work_struct *work)
511 struct wl1271 *wl =
512 container_of(work, struct wl1271, netstack_work);
514 do {
515 wl1271_flush_deferred_work(wl);
516 } while (skb_queue_len(&wl->deferred_rx_queue));
519 #define WL1271_IRQ_MAX_LOOPS 256
521 static int wlcore_irq_locked(struct wl1271 *wl)
523 int ret = 0;
524 u32 intr;
525 int loopcount = WL1271_IRQ_MAX_LOOPS;
526 bool done = false;
527 unsigned int defer_count;
528 unsigned long flags;
531 * In case edge triggered interrupt must be used, we cannot iterate
532 * more than once without introducing race conditions with the hardirq.
534 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
535 loopcount = 1;
537 wl1271_debug(DEBUG_IRQ, "IRQ work");
539 if (unlikely(wl->state != WLCORE_STATE_ON))
540 goto out;
542 ret = wl1271_ps_elp_wakeup(wl);
543 if (ret < 0)
544 goto out;
546 while (!done && loopcount--) {
548 * In order to avoid a race with the hardirq, clear the flag
549 * before acknowledging the chip. Since the mutex is held,
550 * wl1271_ps_elp_wakeup cannot be called concurrently.
552 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
553 smp_mb__after_atomic();
555 ret = wlcore_fw_status(wl, wl->fw_status);
556 if (ret < 0)
557 goto out;
559 wlcore_hw_tx_immediate_compl(wl);
561 intr = wl->fw_status->intr;
562 intr &= WLCORE_ALL_INTR_MASK;
563 if (!intr) {
564 done = true;
565 continue;
568 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
569 wl1271_error("HW watchdog interrupt received! starting recovery.");
570 wl->watchdog_recovery = true;
571 ret = -EIO;
573 /* restarting the chip. ignore any other interrupt. */
574 goto out;
577 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
578 wl1271_error("SW watchdog interrupt received! "
579 "starting recovery.");
580 wl->watchdog_recovery = true;
581 ret = -EIO;
583 /* restarting the chip. ignore any other interrupt. */
584 goto out;
587 if (likely(intr & WL1271_ACX_INTR_DATA)) {
588 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
590 ret = wlcore_rx(wl, wl->fw_status);
591 if (ret < 0)
592 goto out;
594 /* Check if any tx blocks were freed */
595 spin_lock_irqsave(&wl->wl_lock, flags);
596 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
597 wl1271_tx_total_queue_count(wl) > 0) {
598 spin_unlock_irqrestore(&wl->wl_lock, flags);
600 * In order to avoid starvation of the TX path,
601 * call the work function directly.
603 ret = wlcore_tx_work_locked(wl);
604 if (ret < 0)
605 goto out;
606 } else {
607 spin_unlock_irqrestore(&wl->wl_lock, flags);
610 /* check for tx results */
611 ret = wlcore_hw_tx_delayed_compl(wl);
612 if (ret < 0)
613 goto out;
615 /* Make sure the deferred queues don't get too long */
616 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
617 skb_queue_len(&wl->deferred_rx_queue);
618 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
619 wl1271_flush_deferred_work(wl);
622 if (intr & WL1271_ACX_INTR_EVENT_A) {
623 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
624 ret = wl1271_event_handle(wl, 0);
625 if (ret < 0)
626 goto out;
629 if (intr & WL1271_ACX_INTR_EVENT_B) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
631 ret = wl1271_event_handle(wl, 1);
632 if (ret < 0)
633 goto out;
636 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
637 wl1271_debug(DEBUG_IRQ,
638 "WL1271_ACX_INTR_INIT_COMPLETE");
640 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
641 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
644 wl1271_ps_elp_sleep(wl);
646 out:
647 return ret;
650 static irqreturn_t wlcore_irq(int irq, void *cookie)
652 int ret;
653 unsigned long flags;
654 struct wl1271 *wl = cookie;
656 /* complete the ELP completion */
657 spin_lock_irqsave(&wl->wl_lock, flags);
658 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
659 if (wl->elp_compl) {
660 complete(wl->elp_compl);
661 wl->elp_compl = NULL;
664 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
665 /* don't enqueue a work right now. mark it as pending */
666 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
667 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
668 disable_irq_nosync(wl->irq);
669 pm_wakeup_event(wl->dev, 0);
670 spin_unlock_irqrestore(&wl->wl_lock, flags);
671 return IRQ_HANDLED;
673 spin_unlock_irqrestore(&wl->wl_lock, flags);
675 /* TX might be handled here, avoid redundant work */
676 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
677 cancel_work_sync(&wl->tx_work);
679 mutex_lock(&wl->mutex);
681 ret = wlcore_irq_locked(wl);
682 if (ret)
683 wl12xx_queue_recovery_work(wl);
685 spin_lock_irqsave(&wl->wl_lock, flags);
686 /* In case TX was not handled here, queue TX work */
687 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
688 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
689 wl1271_tx_total_queue_count(wl) > 0)
690 ieee80211_queue_work(wl->hw, &wl->tx_work);
691 spin_unlock_irqrestore(&wl->wl_lock, flags);
693 mutex_unlock(&wl->mutex);
695 return IRQ_HANDLED;
698 struct vif_counter_data {
699 u8 counter;
701 struct ieee80211_vif *cur_vif;
702 bool cur_vif_running;
705 static void wl12xx_vif_count_iter(void *data, u8 *mac,
706 struct ieee80211_vif *vif)
708 struct vif_counter_data *counter = data;
710 counter->counter++;
711 if (counter->cur_vif == vif)
712 counter->cur_vif_running = true;
715 /* caller must not hold wl->mutex, as it might deadlock */
716 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
717 struct ieee80211_vif *cur_vif,
718 struct vif_counter_data *data)
720 memset(data, 0, sizeof(*data));
721 data->cur_vif = cur_vif;
723 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
724 wl12xx_vif_count_iter, data);
727 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 const struct firmware *fw;
730 const char *fw_name;
731 enum wl12xx_fw_type fw_type;
732 int ret;
734 if (plt) {
735 fw_type = WL12XX_FW_TYPE_PLT;
736 fw_name = wl->plt_fw_name;
737 } else {
739 * we can't call wl12xx_get_vif_count() here because
740 * wl->mutex is taken, so use the cached last_vif_count value
742 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
743 fw_type = WL12XX_FW_TYPE_MULTI;
744 fw_name = wl->mr_fw_name;
745 } else {
746 fw_type = WL12XX_FW_TYPE_NORMAL;
747 fw_name = wl->sr_fw_name;
751 if (wl->fw_type == fw_type)
752 return 0;
754 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756 ret = request_firmware(&fw, fw_name, wl->dev);
758 if (ret < 0) {
759 wl1271_error("could not get firmware %s: %d", fw_name, ret);
760 return ret;
763 if (fw->size % 4) {
764 wl1271_error("firmware size is not multiple of 32 bits: %zu",
765 fw->size);
766 ret = -EILSEQ;
767 goto out;
770 vfree(wl->fw);
771 wl->fw_type = WL12XX_FW_TYPE_NONE;
772 wl->fw_len = fw->size;
773 wl->fw = vmalloc(wl->fw_len);
775 if (!wl->fw) {
776 wl1271_error("could not allocate memory for the firmware");
777 ret = -ENOMEM;
778 goto out;
781 memcpy(wl->fw, fw->data, wl->fw_len);
782 ret = 0;
783 wl->fw_type = fw_type;
784 out:
785 release_firmware(fw);
787 return ret;
790 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 /* Avoid a recursive recovery */
793 if (wl->state == WLCORE_STATE_ON) {
794 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
795 &wl->flags));
797 wl->state = WLCORE_STATE_RESTARTING;
798 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
799 wl1271_ps_elp_wakeup(wl);
800 wlcore_disable_interrupts_nosync(wl);
801 ieee80211_queue_work(wl->hw, &wl->recovery_work);
805 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
807 size_t len;
809 /* Make sure we have enough room */
810 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
812 /* Fill the FW log file, consumed by the sysfs fwlog entry */
813 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
814 wl->fwlog_size += len;
816 return len;
819 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
821 u32 end_of_log = 0;
823 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
824 return;
826 wl1271_info("Reading FW panic log");
829 * Make sure the chip is awake and the logger isn't active.
830 * Do not send a stop fwlog command if the fw is hanged or if
831 * dbgpins are used (due to some fw bug).
833 if (wl1271_ps_elp_wakeup(wl))
834 return;
835 if (!wl->watchdog_recovery &&
836 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
837 wl12xx_cmd_stop_fwlog(wl);
839 /* Traverse the memory blocks linked list */
840 do {
841 end_of_log = wlcore_event_fw_logger(wl);
842 if (end_of_log == 0) {
843 msleep(100);
844 end_of_log = wlcore_event_fw_logger(wl);
846 } while (end_of_log != 0);
849 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
850 u8 hlid, struct ieee80211_sta *sta)
852 struct wl1271_station *wl_sta;
853 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
855 wl_sta = (void *)sta->drv_priv;
856 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
859 * increment the initial seq number on recovery to account for
860 * transmitted packets that we haven't yet got in the FW status
862 if (wlvif->encryption_type == KEY_GEM)
863 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
865 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
866 wl_sta->total_freed_pkts += sqn_recovery_padding;
869 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
870 struct wl12xx_vif *wlvif,
871 u8 hlid, const u8 *addr)
873 struct ieee80211_sta *sta;
874 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
876 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
877 is_zero_ether_addr(addr)))
878 return;
880 rcu_read_lock();
881 sta = ieee80211_find_sta(vif, addr);
882 if (sta)
883 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
884 rcu_read_unlock();
887 static void wlcore_print_recovery(struct wl1271 *wl)
889 u32 pc = 0;
890 u32 hint_sts = 0;
891 int ret;
893 wl1271_info("Hardware recovery in progress. FW ver: %s",
894 wl->chip.fw_ver_str);
896 /* change partitions momentarily so we can read the FW pc */
897 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
898 if (ret < 0)
899 return;
901 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
902 if (ret < 0)
903 return;
905 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
906 if (ret < 0)
907 return;
909 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
910 pc, hint_sts, ++wl->recovery_count);
912 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
916 static void wl1271_recovery_work(struct work_struct *work)
918 struct wl1271 *wl =
919 container_of(work, struct wl1271, recovery_work);
920 struct wl12xx_vif *wlvif;
921 struct ieee80211_vif *vif;
923 mutex_lock(&wl->mutex);
925 if (wl->state == WLCORE_STATE_OFF || wl->plt)
926 goto out_unlock;
928 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
929 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
930 wl12xx_read_fwlog_panic(wl);
931 wlcore_print_recovery(wl);
934 BUG_ON(wl->conf.recovery.bug_on_recovery &&
935 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
937 if (wl->conf.recovery.no_recovery) {
938 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
939 goto out_unlock;
942 /* Prevent spurious TX during FW restart */
943 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
945 /* reboot the chipset */
946 while (!list_empty(&wl->wlvif_list)) {
947 wlvif = list_first_entry(&wl->wlvif_list,
948 struct wl12xx_vif, list);
949 vif = wl12xx_wlvif_to_vif(wlvif);
951 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
952 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
953 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
954 vif->bss_conf.bssid);
957 __wl1271_op_remove_interface(wl, vif, false);
960 wlcore_op_stop_locked(wl);
962 ieee80211_restart_hw(wl->hw);
965 * Its safe to enable TX now - the queues are stopped after a request
966 * to restart the HW.
968 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970 out_unlock:
971 wl->watchdog_recovery = false;
972 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
973 mutex_unlock(&wl->mutex);
976 static int wlcore_fw_wakeup(struct wl1271 *wl)
978 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
981 static int wlcore_fw_sleep(struct wl1271 *wl)
983 int ret;
985 mutex_lock(&wl->mutex);
986 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
987 if (ret < 0) {
988 wl12xx_queue_recovery_work(wl);
989 goto out;
991 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
992 out:
993 mutex_unlock(&wl->mutex);
994 mdelay(WL1271_SUSPEND_SLEEP);
996 return 0;
999 static int wl1271_setup(struct wl1271 *wl)
1001 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1002 if (!wl->raw_fw_status)
1003 goto err;
1005 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1006 if (!wl->fw_status)
1007 goto err;
1009 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1010 if (!wl->tx_res_if)
1011 goto err;
1013 return 0;
1014 err:
1015 kfree(wl->fw_status);
1016 kfree(wl->raw_fw_status);
1017 return -ENOMEM;
1020 static int wl12xx_set_power_on(struct wl1271 *wl)
1022 int ret;
1024 msleep(WL1271_PRE_POWER_ON_SLEEP);
1025 ret = wl1271_power_on(wl);
1026 if (ret < 0)
1027 goto out;
1028 msleep(WL1271_POWER_ON_SLEEP);
1029 wl1271_io_reset(wl);
1030 wl1271_io_init(wl);
1032 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1033 if (ret < 0)
1034 goto fail;
1036 /* ELP module wake up */
1037 ret = wlcore_fw_wakeup(wl);
1038 if (ret < 0)
1039 goto fail;
1041 out:
1042 return ret;
1044 fail:
1045 wl1271_power_off(wl);
1046 return ret;
1049 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1051 int ret = 0;
1053 ret = wl12xx_set_power_on(wl);
1054 if (ret < 0)
1055 goto out;
1058 * For wl127x based devices we could use the default block
1059 * size (512 bytes), but due to a bug in the sdio driver, we
1060 * need to set it explicitly after the chip is powered on. To
1061 * simplify the code and since the performance impact is
1062 * negligible, we use the same block size for all different
1063 * chip types.
1065 * Check if the bus supports blocksize alignment and, if it
1066 * doesn't, make sure we don't have the quirk.
1068 if (!wl1271_set_block_size(wl))
1069 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1071 /* TODO: make sure the lower driver has set things up correctly */
1073 ret = wl1271_setup(wl);
1074 if (ret < 0)
1075 goto out;
1077 ret = wl12xx_fetch_firmware(wl, plt);
1078 if (ret < 0)
1079 goto out;
1081 out:
1082 return ret;
1085 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1087 int retries = WL1271_BOOT_RETRIES;
1088 struct wiphy *wiphy = wl->hw->wiphy;
1090 static const char* const PLT_MODE[] = {
1091 "PLT_OFF",
1092 "PLT_ON",
1093 "PLT_FEM_DETECT",
1094 "PLT_CHIP_AWAKE"
1097 int ret;
1099 mutex_lock(&wl->mutex);
1101 wl1271_notice("power up");
1103 if (wl->state != WLCORE_STATE_OFF) {
1104 wl1271_error("cannot go into PLT state because not "
1105 "in off state: %d", wl->state);
1106 ret = -EBUSY;
1107 goto out;
1110 /* Indicate to lower levels that we are now in PLT mode */
1111 wl->plt = true;
1112 wl->plt_mode = plt_mode;
1114 while (retries) {
1115 retries--;
1116 ret = wl12xx_chip_wakeup(wl, true);
1117 if (ret < 0)
1118 goto power_off;
1120 if (plt_mode != PLT_CHIP_AWAKE) {
1121 ret = wl->ops->plt_init(wl);
1122 if (ret < 0)
1123 goto power_off;
1126 wl->state = WLCORE_STATE_ON;
1127 wl1271_notice("firmware booted in PLT mode %s (%s)",
1128 PLT_MODE[plt_mode],
1129 wl->chip.fw_ver_str);
1131 /* update hw/fw version info in wiphy struct */
1132 wiphy->hw_version = wl->chip.id;
1133 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1134 sizeof(wiphy->fw_version));
1136 goto out;
1138 power_off:
1139 wl1271_power_off(wl);
1142 wl->plt = false;
1143 wl->plt_mode = PLT_OFF;
1145 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1146 WL1271_BOOT_RETRIES);
1147 out:
1148 mutex_unlock(&wl->mutex);
1150 return ret;
1153 int wl1271_plt_stop(struct wl1271 *wl)
1155 int ret = 0;
1157 wl1271_notice("power down");
1160 * Interrupts must be disabled before setting the state to OFF.
1161 * Otherwise, the interrupt handler might be called and exit without
1162 * reading the interrupt status.
1164 wlcore_disable_interrupts(wl);
1165 mutex_lock(&wl->mutex);
1166 if (!wl->plt) {
1167 mutex_unlock(&wl->mutex);
1170 * This will not necessarily enable interrupts as interrupts
1171 * may have been disabled when op_stop was called. It will,
1172 * however, balance the above call to disable_interrupts().
1174 wlcore_enable_interrupts(wl);
1176 wl1271_error("cannot power down because not in PLT "
1177 "state: %d", wl->state);
1178 ret = -EBUSY;
1179 goto out;
1182 mutex_unlock(&wl->mutex);
1184 wl1271_flush_deferred_work(wl);
1185 cancel_work_sync(&wl->netstack_work);
1186 cancel_work_sync(&wl->recovery_work);
1187 cancel_delayed_work_sync(&wl->elp_work);
1188 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1190 mutex_lock(&wl->mutex);
1191 wl1271_power_off(wl);
1192 wl->flags = 0;
1193 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1194 wl->state = WLCORE_STATE_OFF;
1195 wl->plt = false;
1196 wl->plt_mode = PLT_OFF;
1197 wl->rx_counter = 0;
1198 mutex_unlock(&wl->mutex);
1200 out:
1201 return ret;
1204 static void wl1271_op_tx(struct ieee80211_hw *hw,
1205 struct ieee80211_tx_control *control,
1206 struct sk_buff *skb)
1208 struct wl1271 *wl = hw->priv;
1209 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1210 struct ieee80211_vif *vif = info->control.vif;
1211 struct wl12xx_vif *wlvif = NULL;
1212 unsigned long flags;
1213 int q, mapping;
1214 u8 hlid;
1216 if (!vif) {
1217 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1218 ieee80211_free_txskb(hw, skb);
1219 return;
1222 wlvif = wl12xx_vif_to_data(vif);
1223 mapping = skb_get_queue_mapping(skb);
1224 q = wl1271_tx_get_queue(mapping);
1226 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1228 spin_lock_irqsave(&wl->wl_lock, flags);
1231 * drop the packet if the link is invalid or the queue is stopped
1232 * for any reason but watermark. Watermark is a "soft"-stop so we
1233 * allow these packets through.
1235 if (hlid == WL12XX_INVALID_LINK_ID ||
1236 (!test_bit(hlid, wlvif->links_map)) ||
1237 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1238 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1239 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1240 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1241 ieee80211_free_txskb(hw, skb);
1242 goto out;
1245 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1246 hlid, q, skb->len);
1247 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1249 wl->tx_queue_count[q]++;
1250 wlvif->tx_queue_count[q]++;
1253 * The workqueue is slow to process the tx_queue and we need stop
1254 * the queue here, otherwise the queue will get too long.
1256 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1257 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1258 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1259 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1260 wlcore_stop_queue_locked(wl, wlvif, q,
1261 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1265 * The chip specific setup must run before the first TX packet -
1266 * before that, the tx_work will not be initialized!
1269 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1270 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1271 ieee80211_queue_work(wl->hw, &wl->tx_work);
1273 out:
1274 spin_unlock_irqrestore(&wl->wl_lock, flags);
1277 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1279 unsigned long flags;
1280 int q;
1282 /* no need to queue a new dummy packet if one is already pending */
1283 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1284 return 0;
1286 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1288 spin_lock_irqsave(&wl->wl_lock, flags);
1289 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1290 wl->tx_queue_count[q]++;
1291 spin_unlock_irqrestore(&wl->wl_lock, flags);
1293 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1294 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1295 return wlcore_tx_work_locked(wl);
1298 * If the FW TX is busy, TX work will be scheduled by the threaded
1299 * interrupt handler function
1301 return 0;
1305 * The size of the dummy packet should be at least 1400 bytes. However, in
1306 * order to minimize the number of bus transactions, aligning it to 512 bytes
1307 * boundaries could be beneficial, performance wise
1309 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1311 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1313 struct sk_buff *skb;
1314 struct ieee80211_hdr_3addr *hdr;
1315 unsigned int dummy_packet_size;
1317 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1318 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1320 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1321 if (!skb) {
1322 wl1271_warning("Failed to allocate a dummy packet skb");
1323 return NULL;
1326 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1328 hdr = skb_put_zero(skb, sizeof(*hdr));
1329 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1330 IEEE80211_STYPE_NULLFUNC |
1331 IEEE80211_FCTL_TODS);
1333 skb_put_zero(skb, dummy_packet_size);
1335 /* Dummy packets require the TID to be management */
1336 skb->priority = WL1271_TID_MGMT;
1338 /* Initialize all fields that might be used */
1339 skb_set_queue_mapping(skb, 0);
1340 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1342 return skb;
1346 static int
1347 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1349 int num_fields = 0, in_field = 0, fields_size = 0;
1350 int i, pattern_len = 0;
1352 if (!p->mask) {
1353 wl1271_warning("No mask in WoWLAN pattern");
1354 return -EINVAL;
1358 * The pattern is broken up into segments of bytes at different offsets
1359 * that need to be checked by the FW filter. Each segment is called
1360 * a field in the FW API. We verify that the total number of fields
1361 * required for this pattern won't exceed FW limits (8)
1362 * as well as the total fields buffer won't exceed the FW limit.
1363 * Note that if there's a pattern which crosses Ethernet/IP header
1364 * boundary a new field is required.
1366 for (i = 0; i < p->pattern_len; i++) {
1367 if (test_bit(i, (unsigned long *)p->mask)) {
1368 if (!in_field) {
1369 in_field = 1;
1370 pattern_len = 1;
1371 } else {
1372 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1373 num_fields++;
1374 fields_size += pattern_len +
1375 RX_FILTER_FIELD_OVERHEAD;
1376 pattern_len = 1;
1377 } else
1378 pattern_len++;
1380 } else {
1381 if (in_field) {
1382 in_field = 0;
1383 fields_size += pattern_len +
1384 RX_FILTER_FIELD_OVERHEAD;
1385 num_fields++;
1390 if (in_field) {
1391 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1392 num_fields++;
1395 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1396 wl1271_warning("RX Filter too complex. Too many segments");
1397 return -EINVAL;
1400 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1401 wl1271_warning("RX filter pattern is too big");
1402 return -E2BIG;
1405 return 0;
1408 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1410 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1413 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1415 int i;
1417 if (filter == NULL)
1418 return;
1420 for (i = 0; i < filter->num_fields; i++)
1421 kfree(filter->fields[i].pattern);
1423 kfree(filter);
1426 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1427 u16 offset, u8 flags,
1428 const u8 *pattern, u8 len)
1430 struct wl12xx_rx_filter_field *field;
1432 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1433 wl1271_warning("Max fields per RX filter. can't alloc another");
1434 return -EINVAL;
1437 field = &filter->fields[filter->num_fields];
1439 field->pattern = kzalloc(len, GFP_KERNEL);
1440 if (!field->pattern) {
1441 wl1271_warning("Failed to allocate RX filter pattern");
1442 return -ENOMEM;
1445 filter->num_fields++;
1447 field->offset = cpu_to_le16(offset);
1448 field->flags = flags;
1449 field->len = len;
1450 memcpy(field->pattern, pattern, len);
1452 return 0;
1455 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1457 int i, fields_size = 0;
1459 for (i = 0; i < filter->num_fields; i++)
1460 fields_size += filter->fields[i].len +
1461 sizeof(struct wl12xx_rx_filter_field) -
1462 sizeof(u8 *);
1464 return fields_size;
1467 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1468 u8 *buf)
1470 int i;
1471 struct wl12xx_rx_filter_field *field;
1473 for (i = 0; i < filter->num_fields; i++) {
1474 field = (struct wl12xx_rx_filter_field *)buf;
1476 field->offset = filter->fields[i].offset;
1477 field->flags = filter->fields[i].flags;
1478 field->len = filter->fields[i].len;
1480 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1481 buf += sizeof(struct wl12xx_rx_filter_field) -
1482 sizeof(u8 *) + field->len;
1487 * Allocates an RX filter returned through f
1488 * which needs to be freed using rx_filter_free()
1490 static int
1491 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1492 struct wl12xx_rx_filter **f)
1494 int i, j, ret = 0;
1495 struct wl12xx_rx_filter *filter;
1496 u16 offset;
1497 u8 flags, len;
1499 filter = wl1271_rx_filter_alloc();
1500 if (!filter) {
1501 wl1271_warning("Failed to alloc rx filter");
1502 ret = -ENOMEM;
1503 goto err;
1506 i = 0;
1507 while (i < p->pattern_len) {
1508 if (!test_bit(i, (unsigned long *)p->mask)) {
1509 i++;
1510 continue;
1513 for (j = i; j < p->pattern_len; j++) {
1514 if (!test_bit(j, (unsigned long *)p->mask))
1515 break;
1517 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1518 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1519 break;
1522 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1523 offset = i;
1524 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1525 } else {
1526 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1527 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1530 len = j - i;
1532 ret = wl1271_rx_filter_alloc_field(filter,
1533 offset,
1534 flags,
1535 &p->pattern[i], len);
1536 if (ret)
1537 goto err;
1539 i = j;
1542 filter->action = FILTER_SIGNAL;
1544 *f = filter;
1545 return 0;
1547 err:
1548 wl1271_rx_filter_free(filter);
1549 *f = NULL;
1551 return ret;
1554 static int wl1271_configure_wowlan(struct wl1271 *wl,
1555 struct cfg80211_wowlan *wow)
1557 int i, ret;
1559 if (!wow || wow->any || !wow->n_patterns) {
1560 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1561 FILTER_SIGNAL);
1562 if (ret)
1563 goto out;
1565 ret = wl1271_rx_filter_clear_all(wl);
1566 if (ret)
1567 goto out;
1569 return 0;
1572 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1573 return -EINVAL;
1575 /* Validate all incoming patterns before clearing current FW state */
1576 for (i = 0; i < wow->n_patterns; i++) {
1577 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1578 if (ret) {
1579 wl1271_warning("Bad wowlan pattern %d", i);
1580 return ret;
1584 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1585 if (ret)
1586 goto out;
1588 ret = wl1271_rx_filter_clear_all(wl);
1589 if (ret)
1590 goto out;
1592 /* Translate WoWLAN patterns into filters */
1593 for (i = 0; i < wow->n_patterns; i++) {
1594 struct cfg80211_pkt_pattern *p;
1595 struct wl12xx_rx_filter *filter = NULL;
1597 p = &wow->patterns[i];
1599 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1600 if (ret) {
1601 wl1271_warning("Failed to create an RX filter from "
1602 "wowlan pattern %d", i);
1603 goto out;
1606 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1608 wl1271_rx_filter_free(filter);
1609 if (ret)
1610 goto out;
1613 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1615 out:
1616 return ret;
1619 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1620 struct wl12xx_vif *wlvif,
1621 struct cfg80211_wowlan *wow)
1623 int ret = 0;
1625 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1626 goto out;
1628 ret = wl1271_configure_wowlan(wl, wow);
1629 if (ret < 0)
1630 goto out;
1632 if ((wl->conf.conn.suspend_wake_up_event ==
1633 wl->conf.conn.wake_up_event) &&
1634 (wl->conf.conn.suspend_listen_interval ==
1635 wl->conf.conn.listen_interval))
1636 goto out;
1638 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1639 wl->conf.conn.suspend_wake_up_event,
1640 wl->conf.conn.suspend_listen_interval);
1642 if (ret < 0)
1643 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1644 out:
1645 return ret;
1649 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1650 struct wl12xx_vif *wlvif,
1651 struct cfg80211_wowlan *wow)
1653 int ret = 0;
1655 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1656 goto out;
1658 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1659 if (ret < 0)
1660 goto out;
1662 ret = wl1271_configure_wowlan(wl, wow);
1663 if (ret < 0)
1664 goto out;
1666 out:
1667 return ret;
1671 static int wl1271_configure_suspend(struct wl1271 *wl,
1672 struct wl12xx_vif *wlvif,
1673 struct cfg80211_wowlan *wow)
1675 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1676 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1677 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1678 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1679 return 0;
1682 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1684 int ret = 0;
1685 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1686 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1688 if ((!is_ap) && (!is_sta))
1689 return;
1691 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1692 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1693 return;
1695 wl1271_configure_wowlan(wl, NULL);
1697 if (is_sta) {
1698 if ((wl->conf.conn.suspend_wake_up_event ==
1699 wl->conf.conn.wake_up_event) &&
1700 (wl->conf.conn.suspend_listen_interval ==
1701 wl->conf.conn.listen_interval))
1702 return;
1704 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1705 wl->conf.conn.wake_up_event,
1706 wl->conf.conn.listen_interval);
1708 if (ret < 0)
1709 wl1271_error("resume: wake up conditions failed: %d",
1710 ret);
1712 } else if (is_ap) {
1713 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1717 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1718 struct cfg80211_wowlan *wow)
1720 struct wl1271 *wl = hw->priv;
1721 struct wl12xx_vif *wlvif;
1722 int ret;
1724 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1725 WARN_ON(!wow);
1727 /* we want to perform the recovery before suspending */
1728 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1729 wl1271_warning("postponing suspend to perform recovery");
1730 return -EBUSY;
1733 wl1271_tx_flush(wl);
1735 mutex_lock(&wl->mutex);
1737 ret = wl1271_ps_elp_wakeup(wl);
1738 if (ret < 0) {
1739 mutex_unlock(&wl->mutex);
1740 return ret;
1743 wl->wow_enabled = true;
1744 wl12xx_for_each_wlvif(wl, wlvif) {
1745 if (wlcore_is_p2p_mgmt(wlvif))
1746 continue;
1748 ret = wl1271_configure_suspend(wl, wlvif, wow);
1749 if (ret < 0) {
1750 mutex_unlock(&wl->mutex);
1751 wl1271_warning("couldn't prepare device to suspend");
1752 return ret;
1756 /* disable fast link flow control notifications from FW */
1757 ret = wlcore_hw_interrupt_notify(wl, false);
1758 if (ret < 0)
1759 goto out_sleep;
1761 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1762 ret = wlcore_hw_rx_ba_filter(wl,
1763 !!wl->conf.conn.suspend_rx_ba_activity);
1764 if (ret < 0)
1765 goto out_sleep;
1767 out_sleep:
1768 mutex_unlock(&wl->mutex);
1770 if (ret < 0) {
1771 wl1271_warning("couldn't prepare device to suspend");
1772 return ret;
1775 /* flush any remaining work */
1776 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1779 * disable and re-enable interrupts in order to flush
1780 * the threaded_irq
1782 wlcore_disable_interrupts(wl);
1785 * set suspended flag to avoid triggering a new threaded_irq
1786 * work. no need for spinlock as interrupts are disabled.
1788 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1790 wlcore_enable_interrupts(wl);
1791 flush_work(&wl->tx_work);
1792 flush_delayed_work(&wl->elp_work);
1795 * Cancel the watchdog even if above tx_flush failed. We will detect
1796 * it on resume anyway.
1798 cancel_delayed_work(&wl->tx_watchdog_work);
1801 * Use an immediate call for allowing the firmware to go into power
1802 * save during suspend.
1803 * Using a workque for this last write was only hapenning on resume
1804 * leaving the firmware with power save disabled during suspend,
1805 * while consuming full power during wowlan suspend.
1807 wlcore_fw_sleep(wl);
1809 return 0;
1812 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1814 struct wl1271 *wl = hw->priv;
1815 struct wl12xx_vif *wlvif;
1816 unsigned long flags;
1817 bool run_irq_work = false, pending_recovery;
1818 int ret;
1820 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1821 wl->wow_enabled);
1822 WARN_ON(!wl->wow_enabled);
1825 * re-enable irq_work enqueuing, and call irq_work directly if
1826 * there is a pending work.
1828 spin_lock_irqsave(&wl->wl_lock, flags);
1829 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1830 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1831 run_irq_work = true;
1832 spin_unlock_irqrestore(&wl->wl_lock, flags);
1834 mutex_lock(&wl->mutex);
1836 /* test the recovery flag before calling any SDIO functions */
1837 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1838 &wl->flags);
1840 if (run_irq_work) {
1841 wl1271_debug(DEBUG_MAC80211,
1842 "run postponed irq_work directly");
1844 /* don't talk to the HW if recovery is pending */
1845 if (!pending_recovery) {
1846 ret = wlcore_irq_locked(wl);
1847 if (ret)
1848 wl12xx_queue_recovery_work(wl);
1851 wlcore_enable_interrupts(wl);
1854 if (pending_recovery) {
1855 wl1271_warning("queuing forgotten recovery on resume");
1856 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1857 goto out_sleep;
1860 ret = wl1271_ps_elp_wakeup(wl);
1861 if (ret < 0)
1862 goto out;
1864 wl12xx_for_each_wlvif(wl, wlvif) {
1865 if (wlcore_is_p2p_mgmt(wlvif))
1866 continue;
1868 wl1271_configure_resume(wl, wlvif);
1871 ret = wlcore_hw_interrupt_notify(wl, true);
1872 if (ret < 0)
1873 goto out_sleep;
1875 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1876 ret = wlcore_hw_rx_ba_filter(wl, false);
1877 if (ret < 0)
1878 goto out_sleep;
1880 out_sleep:
1881 wl1271_ps_elp_sleep(wl);
1883 out:
1884 wl->wow_enabled = false;
1887 * Set a flag to re-init the watchdog on the first Tx after resume.
1888 * That way we avoid possible conditions where Tx-complete interrupts
1889 * fail to arrive and we perform a spurious recovery.
1891 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1892 mutex_unlock(&wl->mutex);
1894 return 0;
1897 static int wl1271_op_start(struct ieee80211_hw *hw)
1899 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1902 * We have to delay the booting of the hardware because
1903 * we need to know the local MAC address before downloading and
1904 * initializing the firmware. The MAC address cannot be changed
1905 * after boot, and without the proper MAC address, the firmware
1906 * will not function properly.
1908 * The MAC address is first known when the corresponding interface
1909 * is added. That is where we will initialize the hardware.
1912 return 0;
1915 static void wlcore_op_stop_locked(struct wl1271 *wl)
1917 int i;
1919 if (wl->state == WLCORE_STATE_OFF) {
1920 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1921 &wl->flags))
1922 wlcore_enable_interrupts(wl);
1924 return;
1928 * this must be before the cancel_work calls below, so that the work
1929 * functions don't perform further work.
1931 wl->state = WLCORE_STATE_OFF;
1934 * Use the nosync variant to disable interrupts, so the mutex could be
1935 * held while doing so without deadlocking.
1937 wlcore_disable_interrupts_nosync(wl);
1939 mutex_unlock(&wl->mutex);
1941 wlcore_synchronize_interrupts(wl);
1942 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1943 cancel_work_sync(&wl->recovery_work);
1944 wl1271_flush_deferred_work(wl);
1945 cancel_delayed_work_sync(&wl->scan_complete_work);
1946 cancel_work_sync(&wl->netstack_work);
1947 cancel_work_sync(&wl->tx_work);
1948 cancel_delayed_work_sync(&wl->elp_work);
1949 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1951 /* let's notify MAC80211 about the remaining pending TX frames */
1952 mutex_lock(&wl->mutex);
1953 wl12xx_tx_reset(wl);
1955 wl1271_power_off(wl);
1957 * In case a recovery was scheduled, interrupts were disabled to avoid
1958 * an interrupt storm. Now that the power is down, it is safe to
1959 * re-enable interrupts to balance the disable depth
1961 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1962 wlcore_enable_interrupts(wl);
1964 wl->band = NL80211_BAND_2GHZ;
1966 wl->rx_counter = 0;
1967 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1968 wl->channel_type = NL80211_CHAN_NO_HT;
1969 wl->tx_blocks_available = 0;
1970 wl->tx_allocated_blocks = 0;
1971 wl->tx_results_count = 0;
1972 wl->tx_packets_count = 0;
1973 wl->time_offset = 0;
1974 wl->ap_fw_ps_map = 0;
1975 wl->ap_ps_map = 0;
1976 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1977 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1978 memset(wl->links_map, 0, sizeof(wl->links_map));
1979 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1980 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1981 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1982 wl->active_sta_count = 0;
1983 wl->active_link_count = 0;
1985 /* The system link is always allocated */
1986 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1987 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1988 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1991 * this is performed after the cancel_work calls and the associated
1992 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1993 * get executed before all these vars have been reset.
1995 wl->flags = 0;
1997 wl->tx_blocks_freed = 0;
1999 for (i = 0; i < NUM_TX_QUEUES; i++) {
2000 wl->tx_pkts_freed[i] = 0;
2001 wl->tx_allocated_pkts[i] = 0;
2004 wl1271_debugfs_reset(wl);
2006 kfree(wl->raw_fw_status);
2007 wl->raw_fw_status = NULL;
2008 kfree(wl->fw_status);
2009 wl->fw_status = NULL;
2010 kfree(wl->tx_res_if);
2011 wl->tx_res_if = NULL;
2012 kfree(wl->target_mem_map);
2013 wl->target_mem_map = NULL;
2016 * FW channels must be re-calibrated after recovery,
2017 * save current Reg-Domain channel configuration and clear it.
2019 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2020 sizeof(wl->reg_ch_conf_pending));
2021 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2024 static void wlcore_op_stop(struct ieee80211_hw *hw)
2026 struct wl1271 *wl = hw->priv;
2028 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2030 mutex_lock(&wl->mutex);
2032 wlcore_op_stop_locked(wl);
2034 mutex_unlock(&wl->mutex);
2037 static void wlcore_channel_switch_work(struct work_struct *work)
2039 struct delayed_work *dwork;
2040 struct wl1271 *wl;
2041 struct ieee80211_vif *vif;
2042 struct wl12xx_vif *wlvif;
2043 int ret;
2045 dwork = to_delayed_work(work);
2046 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2047 wl = wlvif->wl;
2049 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2051 mutex_lock(&wl->mutex);
2053 if (unlikely(wl->state != WLCORE_STATE_ON))
2054 goto out;
2056 /* check the channel switch is still ongoing */
2057 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2058 goto out;
2060 vif = wl12xx_wlvif_to_vif(wlvif);
2061 ieee80211_chswitch_done(vif, false);
2063 ret = wl1271_ps_elp_wakeup(wl);
2064 if (ret < 0)
2065 goto out;
2067 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2069 wl1271_ps_elp_sleep(wl);
2070 out:
2071 mutex_unlock(&wl->mutex);
2074 static void wlcore_connection_loss_work(struct work_struct *work)
2076 struct delayed_work *dwork;
2077 struct wl1271 *wl;
2078 struct ieee80211_vif *vif;
2079 struct wl12xx_vif *wlvif;
2081 dwork = to_delayed_work(work);
2082 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2083 wl = wlvif->wl;
2085 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2087 mutex_lock(&wl->mutex);
2089 if (unlikely(wl->state != WLCORE_STATE_ON))
2090 goto out;
2092 /* Call mac80211 connection loss */
2093 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2094 goto out;
2096 vif = wl12xx_wlvif_to_vif(wlvif);
2097 ieee80211_connection_loss(vif);
2098 out:
2099 mutex_unlock(&wl->mutex);
2102 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2104 struct delayed_work *dwork;
2105 struct wl1271 *wl;
2106 struct wl12xx_vif *wlvif;
2107 unsigned long time_spare;
2108 int ret;
2110 dwork = to_delayed_work(work);
2111 wlvif = container_of(dwork, struct wl12xx_vif,
2112 pending_auth_complete_work);
2113 wl = wlvif->wl;
2115 mutex_lock(&wl->mutex);
2117 if (unlikely(wl->state != WLCORE_STATE_ON))
2118 goto out;
2121 * Make sure a second really passed since the last auth reply. Maybe
2122 * a second auth reply arrived while we were stuck on the mutex.
2123 * Check for a little less than the timeout to protect from scheduler
2124 * irregularities.
2126 time_spare = jiffies +
2127 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2128 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2129 goto out;
2131 ret = wl1271_ps_elp_wakeup(wl);
2132 if (ret < 0)
2133 goto out;
2135 /* cancel the ROC if active */
2136 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2138 wl1271_ps_elp_sleep(wl);
2139 out:
2140 mutex_unlock(&wl->mutex);
2143 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2145 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2146 WL12XX_MAX_RATE_POLICIES);
2147 if (policy >= WL12XX_MAX_RATE_POLICIES)
2148 return -EBUSY;
2150 __set_bit(policy, wl->rate_policies_map);
2151 *idx = policy;
2152 return 0;
2155 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2157 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2158 return;
2160 __clear_bit(*idx, wl->rate_policies_map);
2161 *idx = WL12XX_MAX_RATE_POLICIES;
2164 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2166 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2167 WLCORE_MAX_KLV_TEMPLATES);
2168 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2169 return -EBUSY;
2171 __set_bit(policy, wl->klv_templates_map);
2172 *idx = policy;
2173 return 0;
2176 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2178 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2179 return;
2181 __clear_bit(*idx, wl->klv_templates_map);
2182 *idx = WLCORE_MAX_KLV_TEMPLATES;
2185 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2187 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2189 switch (wlvif->bss_type) {
2190 case BSS_TYPE_AP_BSS:
2191 if (wlvif->p2p)
2192 return WL1271_ROLE_P2P_GO;
2193 else if (ieee80211_vif_is_mesh(vif))
2194 return WL1271_ROLE_MESH_POINT;
2195 else
2196 return WL1271_ROLE_AP;
2198 case BSS_TYPE_STA_BSS:
2199 if (wlvif->p2p)
2200 return WL1271_ROLE_P2P_CL;
2201 else
2202 return WL1271_ROLE_STA;
2204 case BSS_TYPE_IBSS:
2205 return WL1271_ROLE_IBSS;
2207 default:
2208 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2210 return WL12XX_INVALID_ROLE_TYPE;
2213 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2215 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2216 int i;
2218 /* clear everything but the persistent data */
2219 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2221 switch (ieee80211_vif_type_p2p(vif)) {
2222 case NL80211_IFTYPE_P2P_CLIENT:
2223 wlvif->p2p = 1;
2224 /* fall-through */
2225 case NL80211_IFTYPE_STATION:
2226 case NL80211_IFTYPE_P2P_DEVICE:
2227 wlvif->bss_type = BSS_TYPE_STA_BSS;
2228 break;
2229 case NL80211_IFTYPE_ADHOC:
2230 wlvif->bss_type = BSS_TYPE_IBSS;
2231 break;
2232 case NL80211_IFTYPE_P2P_GO:
2233 wlvif->p2p = 1;
2234 /* fall-through */
2235 case NL80211_IFTYPE_AP:
2236 case NL80211_IFTYPE_MESH_POINT:
2237 wlvif->bss_type = BSS_TYPE_AP_BSS;
2238 break;
2239 default:
2240 wlvif->bss_type = MAX_BSS_TYPE;
2241 return -EOPNOTSUPP;
2244 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2245 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2246 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2248 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2249 wlvif->bss_type == BSS_TYPE_IBSS) {
2250 /* init sta/ibss data */
2251 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2252 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2253 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2254 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2255 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2256 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2257 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2258 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2259 } else {
2260 /* init ap data */
2261 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2262 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2263 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2264 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2265 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2266 wl12xx_allocate_rate_policy(wl,
2267 &wlvif->ap.ucast_rate_idx[i]);
2268 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2270 * TODO: check if basic_rate shouldn't be
2271 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2272 * instead (the same thing for STA above).
2274 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2275 /* TODO: this seems to be used only for STA, check it */
2276 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2279 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2280 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2281 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2284 * mac80211 configures some values globally, while we treat them
2285 * per-interface. thus, on init, we have to copy them from wl
2287 wlvif->band = wl->band;
2288 wlvif->channel = wl->channel;
2289 wlvif->power_level = wl->power_level;
2290 wlvif->channel_type = wl->channel_type;
2292 INIT_WORK(&wlvif->rx_streaming_enable_work,
2293 wl1271_rx_streaming_enable_work);
2294 INIT_WORK(&wlvif->rx_streaming_disable_work,
2295 wl1271_rx_streaming_disable_work);
2296 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2297 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2298 wlcore_channel_switch_work);
2299 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2300 wlcore_connection_loss_work);
2301 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2302 wlcore_pending_auth_complete_work);
2303 INIT_LIST_HEAD(&wlvif->list);
2305 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2306 return 0;
2309 static int wl12xx_init_fw(struct wl1271 *wl)
2311 int retries = WL1271_BOOT_RETRIES;
2312 bool booted = false;
2313 struct wiphy *wiphy = wl->hw->wiphy;
2314 int ret;
2316 while (retries) {
2317 retries--;
2318 ret = wl12xx_chip_wakeup(wl, false);
2319 if (ret < 0)
2320 goto power_off;
2322 ret = wl->ops->boot(wl);
2323 if (ret < 0)
2324 goto power_off;
2326 ret = wl1271_hw_init(wl);
2327 if (ret < 0)
2328 goto irq_disable;
2330 booted = true;
2331 break;
2333 irq_disable:
2334 mutex_unlock(&wl->mutex);
2335 /* Unlocking the mutex in the middle of handling is
2336 inherently unsafe. In this case we deem it safe to do,
2337 because we need to let any possibly pending IRQ out of
2338 the system (and while we are WLCORE_STATE_OFF the IRQ
2339 work function will not do anything.) Also, any other
2340 possible concurrent operations will fail due to the
2341 current state, hence the wl1271 struct should be safe. */
2342 wlcore_disable_interrupts(wl);
2343 wl1271_flush_deferred_work(wl);
2344 cancel_work_sync(&wl->netstack_work);
2345 mutex_lock(&wl->mutex);
2346 power_off:
2347 wl1271_power_off(wl);
2350 if (!booted) {
2351 wl1271_error("firmware boot failed despite %d retries",
2352 WL1271_BOOT_RETRIES);
2353 goto out;
2356 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2358 /* update hw/fw version info in wiphy struct */
2359 wiphy->hw_version = wl->chip.id;
2360 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2361 sizeof(wiphy->fw_version));
2364 * Now we know if 11a is supported (info from the NVS), so disable
2365 * 11a channels if not supported
2367 if (!wl->enable_11a)
2368 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2370 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2371 wl->enable_11a ? "" : "not ");
2373 wl->state = WLCORE_STATE_ON;
2374 out:
2375 return ret;
2378 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2380 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2384 * Check whether a fw switch (i.e. moving from one loaded
2385 * fw to another) is needed. This function is also responsible
2386 * for updating wl->last_vif_count, so it must be called before
2387 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2388 * will be used).
2390 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2391 struct vif_counter_data vif_counter_data,
2392 bool add)
2394 enum wl12xx_fw_type current_fw = wl->fw_type;
2395 u8 vif_count = vif_counter_data.counter;
2397 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2398 return false;
2400 /* increase the vif count if this is a new vif */
2401 if (add && !vif_counter_data.cur_vif_running)
2402 vif_count++;
2404 wl->last_vif_count = vif_count;
2406 /* no need for fw change if the device is OFF */
2407 if (wl->state == WLCORE_STATE_OFF)
2408 return false;
2410 /* no need for fw change if a single fw is used */
2411 if (!wl->mr_fw_name)
2412 return false;
2414 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2415 return true;
2416 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2417 return true;
2419 return false;
2423 * Enter "forced psm". Make sure the sta is in psm against the ap,
2424 * to make the fw switch a bit more disconnection-persistent.
2426 static void wl12xx_force_active_psm(struct wl1271 *wl)
2428 struct wl12xx_vif *wlvif;
2430 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2431 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2435 struct wlcore_hw_queue_iter_data {
2436 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2437 /* current vif */
2438 struct ieee80211_vif *vif;
2439 /* is the current vif among those iterated */
2440 bool cur_running;
2443 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2444 struct ieee80211_vif *vif)
2446 struct wlcore_hw_queue_iter_data *iter_data = data;
2448 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2449 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2450 return;
2452 if (iter_data->cur_running || vif == iter_data->vif) {
2453 iter_data->cur_running = true;
2454 return;
2457 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2460 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2461 struct wl12xx_vif *wlvif)
2463 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2464 struct wlcore_hw_queue_iter_data iter_data = {};
2465 int i, q_base;
2467 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2468 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2469 return 0;
2472 iter_data.vif = vif;
2474 /* mark all bits taken by active interfaces */
2475 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2476 IEEE80211_IFACE_ITER_RESUME_ALL,
2477 wlcore_hw_queue_iter, &iter_data);
2479 /* the current vif is already running in mac80211 (resume/recovery) */
2480 if (iter_data.cur_running) {
2481 wlvif->hw_queue_base = vif->hw_queue[0];
2482 wl1271_debug(DEBUG_MAC80211,
2483 "using pre-allocated hw queue base %d",
2484 wlvif->hw_queue_base);
2486 /* interface type might have changed type */
2487 goto adjust_cab_queue;
2490 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2491 WLCORE_NUM_MAC_ADDRESSES);
2492 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2493 return -EBUSY;
2495 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2496 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2497 wlvif->hw_queue_base);
2499 for (i = 0; i < NUM_TX_QUEUES; i++) {
2500 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2501 /* register hw queues in mac80211 */
2502 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2505 adjust_cab_queue:
2506 /* the last places are reserved for cab queues per interface */
2507 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2508 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2509 wlvif->hw_queue_base / NUM_TX_QUEUES;
2510 else
2511 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2513 return 0;
2516 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2517 struct ieee80211_vif *vif)
2519 struct wl1271 *wl = hw->priv;
2520 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2521 struct vif_counter_data vif_count;
2522 int ret = 0;
2523 u8 role_type;
2525 if (wl->plt) {
2526 wl1271_error("Adding Interface not allowed while in PLT mode");
2527 return -EBUSY;
2530 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2531 IEEE80211_VIF_SUPPORTS_UAPSD |
2532 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2534 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2535 ieee80211_vif_type_p2p(vif), vif->addr);
2537 wl12xx_get_vif_count(hw, vif, &vif_count);
2539 mutex_lock(&wl->mutex);
2540 ret = wl1271_ps_elp_wakeup(wl);
2541 if (ret < 0)
2542 goto out_unlock;
2545 * in some very corner case HW recovery scenarios its possible to
2546 * get here before __wl1271_op_remove_interface is complete, so
2547 * opt out if that is the case.
2549 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2550 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2551 ret = -EBUSY;
2552 goto out;
2556 ret = wl12xx_init_vif_data(wl, vif);
2557 if (ret < 0)
2558 goto out;
2560 wlvif->wl = wl;
2561 role_type = wl12xx_get_role_type(wl, wlvif);
2562 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2563 ret = -EINVAL;
2564 goto out;
2567 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2568 if (ret < 0)
2569 goto out;
2571 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2572 wl12xx_force_active_psm(wl);
2573 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2574 mutex_unlock(&wl->mutex);
2575 wl1271_recovery_work(&wl->recovery_work);
2576 return 0;
2580 * TODO: after the nvs issue will be solved, move this block
2581 * to start(), and make sure here the driver is ON.
2583 if (wl->state == WLCORE_STATE_OFF) {
2585 * we still need this in order to configure the fw
2586 * while uploading the nvs
2588 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2590 ret = wl12xx_init_fw(wl);
2591 if (ret < 0)
2592 goto out;
2595 if (!wlcore_is_p2p_mgmt(wlvif)) {
2596 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2597 role_type, &wlvif->role_id);
2598 if (ret < 0)
2599 goto out;
2601 ret = wl1271_init_vif_specific(wl, vif);
2602 if (ret < 0)
2603 goto out;
2605 } else {
2606 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2607 &wlvif->dev_role_id);
2608 if (ret < 0)
2609 goto out;
2611 /* needed mainly for configuring rate policies */
2612 ret = wl1271_sta_hw_init(wl, wlvif);
2613 if (ret < 0)
2614 goto out;
2617 list_add(&wlvif->list, &wl->wlvif_list);
2618 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2620 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2621 wl->ap_count++;
2622 else
2623 wl->sta_count++;
2624 out:
2625 wl1271_ps_elp_sleep(wl);
2626 out_unlock:
2627 mutex_unlock(&wl->mutex);
2629 return ret;
2632 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2633 struct ieee80211_vif *vif,
2634 bool reset_tx_queues)
2636 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2637 int i, ret;
2638 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2640 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2642 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2643 return;
2645 /* because of hardware recovery, we may get here twice */
2646 if (wl->state == WLCORE_STATE_OFF)
2647 return;
2649 wl1271_info("down");
2651 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2652 wl->scan_wlvif == wlvif) {
2653 struct cfg80211_scan_info info = {
2654 .aborted = true,
2658 * Rearm the tx watchdog just before idling scan. This
2659 * prevents just-finished scans from triggering the watchdog
2661 wl12xx_rearm_tx_watchdog_locked(wl);
2663 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2664 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2665 wl->scan_wlvif = NULL;
2666 wl->scan.req = NULL;
2667 ieee80211_scan_completed(wl->hw, &info);
2670 if (wl->sched_vif == wlvif)
2671 wl->sched_vif = NULL;
2673 if (wl->roc_vif == vif) {
2674 wl->roc_vif = NULL;
2675 ieee80211_remain_on_channel_expired(wl->hw);
2678 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2679 /* disable active roles */
2680 ret = wl1271_ps_elp_wakeup(wl);
2681 if (ret < 0)
2682 goto deinit;
2684 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2685 wlvif->bss_type == BSS_TYPE_IBSS) {
2686 if (wl12xx_dev_role_started(wlvif))
2687 wl12xx_stop_dev(wl, wlvif);
2690 if (!wlcore_is_p2p_mgmt(wlvif)) {
2691 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2692 if (ret < 0)
2693 goto deinit;
2694 } else {
2695 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2696 if (ret < 0)
2697 goto deinit;
2700 wl1271_ps_elp_sleep(wl);
2702 deinit:
2703 wl12xx_tx_reset_wlvif(wl, wlvif);
2705 /* clear all hlids (except system_hlid) */
2706 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2708 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2709 wlvif->bss_type == BSS_TYPE_IBSS) {
2710 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2711 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2712 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2713 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2714 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2715 } else {
2716 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2717 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2718 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2719 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2720 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2721 wl12xx_free_rate_policy(wl,
2722 &wlvif->ap.ucast_rate_idx[i]);
2723 wl1271_free_ap_keys(wl, wlvif);
2726 dev_kfree_skb(wlvif->probereq);
2727 wlvif->probereq = NULL;
2728 if (wl->last_wlvif == wlvif)
2729 wl->last_wlvif = NULL;
2730 list_del(&wlvif->list);
2731 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2732 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2733 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2735 if (is_ap)
2736 wl->ap_count--;
2737 else
2738 wl->sta_count--;
2741 * Last AP, have more stations. Configure sleep auth according to STA.
2742 * Don't do thin on unintended recovery.
2744 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2745 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2746 goto unlock;
2748 if (wl->ap_count == 0 && is_ap) {
2749 /* mask ap events */
2750 wl->event_mask &= ~wl->ap_event_mask;
2751 wl1271_event_unmask(wl);
2754 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2755 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2756 /* Configure for power according to debugfs */
2757 if (sta_auth != WL1271_PSM_ILLEGAL)
2758 wl1271_acx_sleep_auth(wl, sta_auth);
2759 /* Configure for ELP power saving */
2760 else
2761 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2764 unlock:
2765 mutex_unlock(&wl->mutex);
2767 del_timer_sync(&wlvif->rx_streaming_timer);
2768 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2769 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2770 cancel_work_sync(&wlvif->rc_update_work);
2771 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2772 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2773 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2775 mutex_lock(&wl->mutex);
2778 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2779 struct ieee80211_vif *vif)
2781 struct wl1271 *wl = hw->priv;
2782 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2783 struct wl12xx_vif *iter;
2784 struct vif_counter_data vif_count;
2786 wl12xx_get_vif_count(hw, vif, &vif_count);
2787 mutex_lock(&wl->mutex);
2789 if (wl->state == WLCORE_STATE_OFF ||
2790 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2791 goto out;
2794 * wl->vif can be null here if someone shuts down the interface
2795 * just when hardware recovery has been started.
2797 wl12xx_for_each_wlvif(wl, iter) {
2798 if (iter != wlvif)
2799 continue;
2801 __wl1271_op_remove_interface(wl, vif, true);
2802 break;
2804 WARN_ON(iter != wlvif);
2805 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2806 wl12xx_force_active_psm(wl);
2807 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2808 wl12xx_queue_recovery_work(wl);
2810 out:
2811 mutex_unlock(&wl->mutex);
2814 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2815 struct ieee80211_vif *vif,
2816 enum nl80211_iftype new_type, bool p2p)
2818 struct wl1271 *wl = hw->priv;
2819 int ret;
2821 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2822 wl1271_op_remove_interface(hw, vif);
2824 vif->type = new_type;
2825 vif->p2p = p2p;
2826 ret = wl1271_op_add_interface(hw, vif);
2828 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2829 return ret;
2832 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2834 int ret;
2835 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2838 * One of the side effects of the JOIN command is that is clears
2839 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2840 * to a WPA/WPA2 access point will therefore kill the data-path.
2841 * Currently the only valid scenario for JOIN during association
2842 * is on roaming, in which case we will also be given new keys.
2843 * Keep the below message for now, unless it starts bothering
2844 * users who really like to roam a lot :)
2846 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2847 wl1271_info("JOIN while associated.");
2849 /* clear encryption type */
2850 wlvif->encryption_type = KEY_NONE;
2852 if (is_ibss)
2853 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2854 else {
2855 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2857 * TODO: this is an ugly workaround for wl12xx fw
2858 * bug - we are not able to tx/rx after the first
2859 * start_sta, so make dummy start+stop calls,
2860 * and then call start_sta again.
2861 * this should be fixed in the fw.
2863 wl12xx_cmd_role_start_sta(wl, wlvif);
2864 wl12xx_cmd_role_stop_sta(wl, wlvif);
2867 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2870 return ret;
2873 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2874 int offset)
2876 u8 ssid_len;
2877 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2878 skb->len - offset);
2880 if (!ptr) {
2881 wl1271_error("No SSID in IEs!");
2882 return -ENOENT;
2885 ssid_len = ptr[1];
2886 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2887 wl1271_error("SSID is too long!");
2888 return -EINVAL;
2891 wlvif->ssid_len = ssid_len;
2892 memcpy(wlvif->ssid, ptr+2, ssid_len);
2893 return 0;
2896 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2898 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2899 struct sk_buff *skb;
2900 int ieoffset;
2902 /* we currently only support setting the ssid from the ap probe req */
2903 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2904 return -EINVAL;
2906 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2907 if (!skb)
2908 return -EINVAL;
2910 ieoffset = offsetof(struct ieee80211_mgmt,
2911 u.probe_req.variable);
2912 wl1271_ssid_set(wlvif, skb, ieoffset);
2913 dev_kfree_skb(skb);
2915 return 0;
2918 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2919 struct ieee80211_bss_conf *bss_conf,
2920 u32 sta_rate_set)
2922 int ieoffset;
2923 int ret;
2925 wlvif->aid = bss_conf->aid;
2926 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2927 wlvif->beacon_int = bss_conf->beacon_int;
2928 wlvif->wmm_enabled = bss_conf->qos;
2930 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2933 * with wl1271, we don't need to update the
2934 * beacon_int and dtim_period, because the firmware
2935 * updates it by itself when the first beacon is
2936 * received after a join.
2938 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2939 if (ret < 0)
2940 return ret;
2943 * Get a template for hardware connection maintenance
2945 dev_kfree_skb(wlvif->probereq);
2946 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2947 wlvif,
2948 NULL);
2949 ieoffset = offsetof(struct ieee80211_mgmt,
2950 u.probe_req.variable);
2951 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2953 /* enable the connection monitoring feature */
2954 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2955 if (ret < 0)
2956 return ret;
2959 * The join command disable the keep-alive mode, shut down its process,
2960 * and also clear the template config, so we need to reset it all after
2961 * the join. The acx_aid starts the keep-alive process, and the order
2962 * of the commands below is relevant.
2964 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2965 if (ret < 0)
2966 return ret;
2968 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2969 if (ret < 0)
2970 return ret;
2972 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2973 if (ret < 0)
2974 return ret;
2976 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2977 wlvif->sta.klv_template_id,
2978 ACX_KEEP_ALIVE_TPL_VALID);
2979 if (ret < 0)
2980 return ret;
2983 * The default fw psm configuration is AUTO, while mac80211 default
2984 * setting is off (ACTIVE), so sync the fw with the correct value.
2986 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2987 if (ret < 0)
2988 return ret;
2990 if (sta_rate_set) {
2991 wlvif->rate_set =
2992 wl1271_tx_enabled_rates_get(wl,
2993 sta_rate_set,
2994 wlvif->band);
2995 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2996 if (ret < 0)
2997 return ret;
3000 return ret;
3003 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3005 int ret;
3006 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3008 /* make sure we are connected (sta) joined */
3009 if (sta &&
3010 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3011 return false;
3013 /* make sure we are joined (ibss) */
3014 if (!sta &&
3015 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3016 return false;
3018 if (sta) {
3019 /* use defaults when not associated */
3020 wlvif->aid = 0;
3022 /* free probe-request template */
3023 dev_kfree_skb(wlvif->probereq);
3024 wlvif->probereq = NULL;
3026 /* disable connection monitor features */
3027 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3028 if (ret < 0)
3029 return ret;
3031 /* Disable the keep-alive feature */
3032 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3033 if (ret < 0)
3034 return ret;
3036 /* disable beacon filtering */
3037 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3038 if (ret < 0)
3039 return ret;
3042 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3043 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3045 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3046 ieee80211_chswitch_done(vif, false);
3047 cancel_delayed_work(&wlvif->channel_switch_work);
3050 /* invalidate keep-alive template */
3051 wl1271_acx_keep_alive_config(wl, wlvif,
3052 wlvif->sta.klv_template_id,
3053 ACX_KEEP_ALIVE_TPL_INVALID);
3055 return 0;
3058 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3060 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3061 wlvif->rate_set = wlvif->basic_rate_set;
3064 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3065 bool idle)
3067 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3069 if (idle == cur_idle)
3070 return;
3072 if (idle) {
3073 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3074 } else {
3075 /* The current firmware only supports sched_scan in idle */
3076 if (wl->sched_vif == wlvif)
3077 wl->ops->sched_scan_stop(wl, wlvif);
3079 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3083 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3084 struct ieee80211_conf *conf, u32 changed)
3086 int ret;
3088 if (wlcore_is_p2p_mgmt(wlvif))
3089 return 0;
3091 if (conf->power_level != wlvif->power_level) {
3092 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3093 if (ret < 0)
3094 return ret;
3096 wlvif->power_level = conf->power_level;
3099 return 0;
3102 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3104 struct wl1271 *wl = hw->priv;
3105 struct wl12xx_vif *wlvif;
3106 struct ieee80211_conf *conf = &hw->conf;
3107 int ret = 0;
3109 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3110 " changed 0x%x",
3111 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3112 conf->power_level,
3113 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3114 changed);
3116 mutex_lock(&wl->mutex);
3118 if (changed & IEEE80211_CONF_CHANGE_POWER)
3119 wl->power_level = conf->power_level;
3121 if (unlikely(wl->state != WLCORE_STATE_ON))
3122 goto out;
3124 ret = wl1271_ps_elp_wakeup(wl);
3125 if (ret < 0)
3126 goto out;
3128 /* configure each interface */
3129 wl12xx_for_each_wlvif(wl, wlvif) {
3130 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3131 if (ret < 0)
3132 goto out_sleep;
3135 out_sleep:
3136 wl1271_ps_elp_sleep(wl);
3138 out:
3139 mutex_unlock(&wl->mutex);
3141 return ret;
3144 struct wl1271_filter_params {
3145 bool enabled;
3146 int mc_list_length;
3147 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3150 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3151 struct netdev_hw_addr_list *mc_list)
3153 struct wl1271_filter_params *fp;
3154 struct netdev_hw_addr *ha;
3156 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3157 if (!fp) {
3158 wl1271_error("Out of memory setting filters.");
3159 return 0;
3162 /* update multicast filtering parameters */
3163 fp->mc_list_length = 0;
3164 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3165 fp->enabled = false;
3166 } else {
3167 fp->enabled = true;
3168 netdev_hw_addr_list_for_each(ha, mc_list) {
3169 memcpy(fp->mc_list[fp->mc_list_length],
3170 ha->addr, ETH_ALEN);
3171 fp->mc_list_length++;
3175 return (u64)(unsigned long)fp;
3178 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3179 FIF_FCSFAIL | \
3180 FIF_BCN_PRBRESP_PROMISC | \
3181 FIF_CONTROL | \
3182 FIF_OTHER_BSS)
3184 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3185 unsigned int changed,
3186 unsigned int *total, u64 multicast)
3188 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3189 struct wl1271 *wl = hw->priv;
3190 struct wl12xx_vif *wlvif;
3192 int ret;
3194 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3195 " total %x", changed, *total);
3197 mutex_lock(&wl->mutex);
3199 *total &= WL1271_SUPPORTED_FILTERS;
3200 changed &= WL1271_SUPPORTED_FILTERS;
3202 if (unlikely(wl->state != WLCORE_STATE_ON))
3203 goto out;
3205 ret = wl1271_ps_elp_wakeup(wl);
3206 if (ret < 0)
3207 goto out;
3209 wl12xx_for_each_wlvif(wl, wlvif) {
3210 if (wlcore_is_p2p_mgmt(wlvif))
3211 continue;
3213 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3214 if (*total & FIF_ALLMULTI)
3215 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3216 false,
3217 NULL, 0);
3218 else if (fp)
3219 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3220 fp->enabled,
3221 fp->mc_list,
3222 fp->mc_list_length);
3223 if (ret < 0)
3224 goto out_sleep;
3228 * If interface in AP mode and created with allmulticast then disable
3229 * the firmware filters so that all multicast packets are passed
3230 * This is mandatory for MDNS based discovery protocols
3232 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3233 if (*total & FIF_ALLMULTI) {
3234 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3235 false,
3236 NULL, 0);
3237 if (ret < 0)
3238 goto out_sleep;
3244 * the fw doesn't provide an api to configure the filters. instead,
3245 * the filters configuration is based on the active roles / ROC
3246 * state.
3249 out_sleep:
3250 wl1271_ps_elp_sleep(wl);
3252 out:
3253 mutex_unlock(&wl->mutex);
3254 kfree(fp);
3257 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3258 u8 id, u8 key_type, u8 key_size,
3259 const u8 *key, u8 hlid, u32 tx_seq_32,
3260 u16 tx_seq_16)
3262 struct wl1271_ap_key *ap_key;
3263 int i;
3265 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3267 if (key_size > MAX_KEY_SIZE)
3268 return -EINVAL;
3271 * Find next free entry in ap_keys. Also check we are not replacing
3272 * an existing key.
3274 for (i = 0; i < MAX_NUM_KEYS; i++) {
3275 if (wlvif->ap.recorded_keys[i] == NULL)
3276 break;
3278 if (wlvif->ap.recorded_keys[i]->id == id) {
3279 wl1271_warning("trying to record key replacement");
3280 return -EINVAL;
3284 if (i == MAX_NUM_KEYS)
3285 return -EBUSY;
3287 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3288 if (!ap_key)
3289 return -ENOMEM;
3291 ap_key->id = id;
3292 ap_key->key_type = key_type;
3293 ap_key->key_size = key_size;
3294 memcpy(ap_key->key, key, key_size);
3295 ap_key->hlid = hlid;
3296 ap_key->tx_seq_32 = tx_seq_32;
3297 ap_key->tx_seq_16 = tx_seq_16;
3299 wlvif->ap.recorded_keys[i] = ap_key;
3300 return 0;
3303 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3305 int i;
3307 for (i = 0; i < MAX_NUM_KEYS; i++) {
3308 kfree(wlvif->ap.recorded_keys[i]);
3309 wlvif->ap.recorded_keys[i] = NULL;
3313 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3315 int i, ret = 0;
3316 struct wl1271_ap_key *key;
3317 bool wep_key_added = false;
3319 for (i = 0; i < MAX_NUM_KEYS; i++) {
3320 u8 hlid;
3321 if (wlvif->ap.recorded_keys[i] == NULL)
3322 break;
3324 key = wlvif->ap.recorded_keys[i];
3325 hlid = key->hlid;
3326 if (hlid == WL12XX_INVALID_LINK_ID)
3327 hlid = wlvif->ap.bcast_hlid;
3329 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3330 key->id, key->key_type,
3331 key->key_size, key->key,
3332 hlid, key->tx_seq_32,
3333 key->tx_seq_16);
3334 if (ret < 0)
3335 goto out;
3337 if (key->key_type == KEY_WEP)
3338 wep_key_added = true;
3341 if (wep_key_added) {
3342 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3343 wlvif->ap.bcast_hlid);
3344 if (ret < 0)
3345 goto out;
3348 out:
3349 wl1271_free_ap_keys(wl, wlvif);
3350 return ret;
3353 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3354 u16 action, u8 id, u8 key_type,
3355 u8 key_size, const u8 *key, u32 tx_seq_32,
3356 u16 tx_seq_16, struct ieee80211_sta *sta)
3358 int ret;
3359 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3361 if (is_ap) {
3362 struct wl1271_station *wl_sta;
3363 u8 hlid;
3365 if (sta) {
3366 wl_sta = (struct wl1271_station *)sta->drv_priv;
3367 hlid = wl_sta->hlid;
3368 } else {
3369 hlid = wlvif->ap.bcast_hlid;
3372 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3374 * We do not support removing keys after AP shutdown.
3375 * Pretend we do to make mac80211 happy.
3377 if (action != KEY_ADD_OR_REPLACE)
3378 return 0;
3380 ret = wl1271_record_ap_key(wl, wlvif, id,
3381 key_type, key_size,
3382 key, hlid, tx_seq_32,
3383 tx_seq_16);
3384 } else {
3385 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3386 id, key_type, key_size,
3387 key, hlid, tx_seq_32,
3388 tx_seq_16);
3391 if (ret < 0)
3392 return ret;
3393 } else {
3394 const u8 *addr;
3395 static const u8 bcast_addr[ETH_ALEN] = {
3396 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3399 addr = sta ? sta->addr : bcast_addr;
3401 if (is_zero_ether_addr(addr)) {
3402 /* We dont support TX only encryption */
3403 return -EOPNOTSUPP;
3406 /* The wl1271 does not allow to remove unicast keys - they
3407 will be cleared automatically on next CMD_JOIN. Ignore the
3408 request silently, as we dont want the mac80211 to emit
3409 an error message. */
3410 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3411 return 0;
3413 /* don't remove key if hlid was already deleted */
3414 if (action == KEY_REMOVE &&
3415 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3416 return 0;
3418 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3419 id, key_type, key_size,
3420 key, addr, tx_seq_32,
3421 tx_seq_16);
3422 if (ret < 0)
3423 return ret;
3427 return 0;
3430 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3431 struct ieee80211_vif *vif,
3432 struct ieee80211_sta *sta,
3433 struct ieee80211_key_conf *key_conf)
3435 struct wl1271 *wl = hw->priv;
3436 int ret;
3437 bool might_change_spare =
3438 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3439 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3441 if (might_change_spare) {
3443 * stop the queues and flush to ensure the next packets are
3444 * in sync with FW spare block accounting
3446 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3447 wl1271_tx_flush(wl);
3450 mutex_lock(&wl->mutex);
3452 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3453 ret = -EAGAIN;
3454 goto out_wake_queues;
3457 ret = wl1271_ps_elp_wakeup(wl);
3458 if (ret < 0)
3459 goto out_wake_queues;
3461 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3463 wl1271_ps_elp_sleep(wl);
3465 out_wake_queues:
3466 if (might_change_spare)
3467 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3469 mutex_unlock(&wl->mutex);
3471 return ret;
3474 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3475 struct ieee80211_vif *vif,
3476 struct ieee80211_sta *sta,
3477 struct ieee80211_key_conf *key_conf)
3479 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3480 int ret;
3481 u32 tx_seq_32 = 0;
3482 u16 tx_seq_16 = 0;
3483 u8 key_type;
3484 u8 hlid;
3486 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3488 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3489 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3490 key_conf->cipher, key_conf->keyidx,
3491 key_conf->keylen, key_conf->flags);
3492 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3494 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3495 if (sta) {
3496 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3497 hlid = wl_sta->hlid;
3498 } else {
3499 hlid = wlvif->ap.bcast_hlid;
3501 else
3502 hlid = wlvif->sta.hlid;
3504 if (hlid != WL12XX_INVALID_LINK_ID) {
3505 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3506 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3507 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3510 switch (key_conf->cipher) {
3511 case WLAN_CIPHER_SUITE_WEP40:
3512 case WLAN_CIPHER_SUITE_WEP104:
3513 key_type = KEY_WEP;
3515 key_conf->hw_key_idx = key_conf->keyidx;
3516 break;
3517 case WLAN_CIPHER_SUITE_TKIP:
3518 key_type = KEY_TKIP;
3519 key_conf->hw_key_idx = key_conf->keyidx;
3520 break;
3521 case WLAN_CIPHER_SUITE_CCMP:
3522 key_type = KEY_AES;
3523 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3524 break;
3525 case WL1271_CIPHER_SUITE_GEM:
3526 key_type = KEY_GEM;
3527 break;
3528 default:
3529 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3531 return -EOPNOTSUPP;
3534 switch (cmd) {
3535 case SET_KEY:
3536 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3537 key_conf->keyidx, key_type,
3538 key_conf->keylen, key_conf->key,
3539 tx_seq_32, tx_seq_16, sta);
3540 if (ret < 0) {
3541 wl1271_error("Could not add or replace key");
3542 return ret;
3546 * reconfiguring arp response if the unicast (or common)
3547 * encryption key type was changed
3549 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3550 (sta || key_type == KEY_WEP) &&
3551 wlvif->encryption_type != key_type) {
3552 wlvif->encryption_type = key_type;
3553 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3554 if (ret < 0) {
3555 wl1271_warning("build arp rsp failed: %d", ret);
3556 return ret;
3559 break;
3561 case DISABLE_KEY:
3562 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3563 key_conf->keyidx, key_type,
3564 key_conf->keylen, key_conf->key,
3565 0, 0, sta);
3566 if (ret < 0) {
3567 wl1271_error("Could not remove key");
3568 return ret;
3570 break;
3572 default:
3573 wl1271_error("Unsupported key cmd 0x%x", cmd);
3574 return -EOPNOTSUPP;
3577 return ret;
3579 EXPORT_SYMBOL_GPL(wlcore_set_key);
3581 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3582 struct ieee80211_vif *vif,
3583 int key_idx)
3585 struct wl1271 *wl = hw->priv;
3586 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3587 int ret;
3589 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3590 key_idx);
3592 /* we don't handle unsetting of default key */
3593 if (key_idx == -1)
3594 return;
3596 mutex_lock(&wl->mutex);
3598 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3599 ret = -EAGAIN;
3600 goto out_unlock;
3603 ret = wl1271_ps_elp_wakeup(wl);
3604 if (ret < 0)
3605 goto out_unlock;
3607 wlvif->default_key = key_idx;
3609 /* the default WEP key needs to be configured at least once */
3610 if (wlvif->encryption_type == KEY_WEP) {
3611 ret = wl12xx_cmd_set_default_wep_key(wl,
3612 key_idx,
3613 wlvif->sta.hlid);
3614 if (ret < 0)
3615 goto out_sleep;
3618 out_sleep:
3619 wl1271_ps_elp_sleep(wl);
3621 out_unlock:
3622 mutex_unlock(&wl->mutex);
3625 void wlcore_regdomain_config(struct wl1271 *wl)
3627 int ret;
3629 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3630 return;
3632 mutex_lock(&wl->mutex);
3634 if (unlikely(wl->state != WLCORE_STATE_ON))
3635 goto out;
3637 ret = wl1271_ps_elp_wakeup(wl);
3638 if (ret < 0)
3639 goto out;
3641 ret = wlcore_cmd_regdomain_config_locked(wl);
3642 if (ret < 0) {
3643 wl12xx_queue_recovery_work(wl);
3644 goto out;
3647 wl1271_ps_elp_sleep(wl);
3648 out:
3649 mutex_unlock(&wl->mutex);
3652 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3653 struct ieee80211_vif *vif,
3654 struct ieee80211_scan_request *hw_req)
3656 struct cfg80211_scan_request *req = &hw_req->req;
3657 struct wl1271 *wl = hw->priv;
3658 int ret;
3659 u8 *ssid = NULL;
3660 size_t len = 0;
3662 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3664 if (req->n_ssids) {
3665 ssid = req->ssids[0].ssid;
3666 len = req->ssids[0].ssid_len;
3669 mutex_lock(&wl->mutex);
3671 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3673 * We cannot return -EBUSY here because cfg80211 will expect
3674 * a call to ieee80211_scan_completed if we do - in this case
3675 * there won't be any call.
3677 ret = -EAGAIN;
3678 goto out;
3681 ret = wl1271_ps_elp_wakeup(wl);
3682 if (ret < 0)
3683 goto out;
3685 /* fail if there is any role in ROC */
3686 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3687 /* don't allow scanning right now */
3688 ret = -EBUSY;
3689 goto out_sleep;
3692 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3693 out_sleep:
3694 wl1271_ps_elp_sleep(wl);
3695 out:
3696 mutex_unlock(&wl->mutex);
3698 return ret;
3701 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3702 struct ieee80211_vif *vif)
3704 struct wl1271 *wl = hw->priv;
3705 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3706 struct cfg80211_scan_info info = {
3707 .aborted = true,
3709 int ret;
3711 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3713 mutex_lock(&wl->mutex);
3715 if (unlikely(wl->state != WLCORE_STATE_ON))
3716 goto out;
3718 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3719 goto out;
3721 ret = wl1271_ps_elp_wakeup(wl);
3722 if (ret < 0)
3723 goto out;
3725 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3726 ret = wl->ops->scan_stop(wl, wlvif);
3727 if (ret < 0)
3728 goto out_sleep;
3732 * Rearm the tx watchdog just before idling scan. This
3733 * prevents just-finished scans from triggering the watchdog
3735 wl12xx_rearm_tx_watchdog_locked(wl);
3737 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3738 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3739 wl->scan_wlvif = NULL;
3740 wl->scan.req = NULL;
3741 ieee80211_scan_completed(wl->hw, &info);
3743 out_sleep:
3744 wl1271_ps_elp_sleep(wl);
3745 out:
3746 mutex_unlock(&wl->mutex);
3748 cancel_delayed_work_sync(&wl->scan_complete_work);
3751 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3752 struct ieee80211_vif *vif,
3753 struct cfg80211_sched_scan_request *req,
3754 struct ieee80211_scan_ies *ies)
3756 struct wl1271 *wl = hw->priv;
3757 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3758 int ret;
3760 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3762 mutex_lock(&wl->mutex);
3764 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3765 ret = -EAGAIN;
3766 goto out;
3769 ret = wl1271_ps_elp_wakeup(wl);
3770 if (ret < 0)
3771 goto out;
3773 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3774 if (ret < 0)
3775 goto out_sleep;
3777 wl->sched_vif = wlvif;
3779 out_sleep:
3780 wl1271_ps_elp_sleep(wl);
3781 out:
3782 mutex_unlock(&wl->mutex);
3783 return ret;
3786 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3787 struct ieee80211_vif *vif)
3789 struct wl1271 *wl = hw->priv;
3790 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3791 int ret;
3793 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3795 mutex_lock(&wl->mutex);
3797 if (unlikely(wl->state != WLCORE_STATE_ON))
3798 goto out;
3800 ret = wl1271_ps_elp_wakeup(wl);
3801 if (ret < 0)
3802 goto out;
3804 wl->ops->sched_scan_stop(wl, wlvif);
3806 wl1271_ps_elp_sleep(wl);
3807 out:
3808 mutex_unlock(&wl->mutex);
3810 return 0;
3813 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3815 struct wl1271 *wl = hw->priv;
3816 int ret = 0;
3818 mutex_lock(&wl->mutex);
3820 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3821 ret = -EAGAIN;
3822 goto out;
3825 ret = wl1271_ps_elp_wakeup(wl);
3826 if (ret < 0)
3827 goto out;
3829 ret = wl1271_acx_frag_threshold(wl, value);
3830 if (ret < 0)
3831 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3833 wl1271_ps_elp_sleep(wl);
3835 out:
3836 mutex_unlock(&wl->mutex);
3838 return ret;
3841 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3843 struct wl1271 *wl = hw->priv;
3844 struct wl12xx_vif *wlvif;
3845 int ret = 0;
3847 mutex_lock(&wl->mutex);
3849 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3850 ret = -EAGAIN;
3851 goto out;
3854 ret = wl1271_ps_elp_wakeup(wl);
3855 if (ret < 0)
3856 goto out;
3858 wl12xx_for_each_wlvif(wl, wlvif) {
3859 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3860 if (ret < 0)
3861 wl1271_warning("set rts threshold failed: %d", ret);
3863 wl1271_ps_elp_sleep(wl);
3865 out:
3866 mutex_unlock(&wl->mutex);
3868 return ret;
3871 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3873 int len;
3874 const u8 *next, *end = skb->data + skb->len;
3875 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3876 skb->len - ieoffset);
3877 if (!ie)
3878 return;
3879 len = ie[1] + 2;
3880 next = ie + len;
3881 memmove(ie, next, end - next);
3882 skb_trim(skb, skb->len - len);
3885 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3886 unsigned int oui, u8 oui_type,
3887 int ieoffset)
3889 int len;
3890 const u8 *next, *end = skb->data + skb->len;
3891 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3892 skb->data + ieoffset,
3893 skb->len - ieoffset);
3894 if (!ie)
3895 return;
3896 len = ie[1] + 2;
3897 next = ie + len;
3898 memmove(ie, next, end - next);
3899 skb_trim(skb, skb->len - len);
3902 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3903 struct ieee80211_vif *vif)
3905 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3906 struct sk_buff *skb;
3907 int ret;
3909 skb = ieee80211_proberesp_get(wl->hw, vif);
3910 if (!skb)
3911 return -EOPNOTSUPP;
3913 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3914 CMD_TEMPL_AP_PROBE_RESPONSE,
3915 skb->data,
3916 skb->len, 0,
3917 rates);
3918 dev_kfree_skb(skb);
3920 if (ret < 0)
3921 goto out;
3923 wl1271_debug(DEBUG_AP, "probe response updated");
3924 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3926 out:
3927 return ret;
3930 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3931 struct ieee80211_vif *vif,
3932 u8 *probe_rsp_data,
3933 size_t probe_rsp_len,
3934 u32 rates)
3936 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3937 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3938 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3939 int ssid_ie_offset, ie_offset, templ_len;
3940 const u8 *ptr;
3942 /* no need to change probe response if the SSID is set correctly */
3943 if (wlvif->ssid_len > 0)
3944 return wl1271_cmd_template_set(wl, wlvif->role_id,
3945 CMD_TEMPL_AP_PROBE_RESPONSE,
3946 probe_rsp_data,
3947 probe_rsp_len, 0,
3948 rates);
3950 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3951 wl1271_error("probe_rsp template too big");
3952 return -EINVAL;
3955 /* start searching from IE offset */
3956 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3958 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3959 probe_rsp_len - ie_offset);
3960 if (!ptr) {
3961 wl1271_error("No SSID in beacon!");
3962 return -EINVAL;
3965 ssid_ie_offset = ptr - probe_rsp_data;
3966 ptr += (ptr[1] + 2);
3968 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3970 /* insert SSID from bss_conf */
3971 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3972 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3973 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3974 bss_conf->ssid, bss_conf->ssid_len);
3975 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3977 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3978 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3979 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3981 return wl1271_cmd_template_set(wl, wlvif->role_id,
3982 CMD_TEMPL_AP_PROBE_RESPONSE,
3983 probe_rsp_templ,
3984 templ_len, 0,
3985 rates);
3988 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3989 struct ieee80211_vif *vif,
3990 struct ieee80211_bss_conf *bss_conf,
3991 u32 changed)
3993 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3994 int ret = 0;
3996 if (changed & BSS_CHANGED_ERP_SLOT) {
3997 if (bss_conf->use_short_slot)
3998 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3999 else
4000 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4001 if (ret < 0) {
4002 wl1271_warning("Set slot time failed %d", ret);
4003 goto out;
4007 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4008 if (bss_conf->use_short_preamble)
4009 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4010 else
4011 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4014 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4015 if (bss_conf->use_cts_prot)
4016 ret = wl1271_acx_cts_protect(wl, wlvif,
4017 CTSPROTECT_ENABLE);
4018 else
4019 ret = wl1271_acx_cts_protect(wl, wlvif,
4020 CTSPROTECT_DISABLE);
4021 if (ret < 0) {
4022 wl1271_warning("Set ctsprotect failed %d", ret);
4023 goto out;
4027 out:
4028 return ret;
4031 static int wlcore_set_beacon_template(struct wl1271 *wl,
4032 struct ieee80211_vif *vif,
4033 bool is_ap)
4035 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4036 struct ieee80211_hdr *hdr;
4037 u32 min_rate;
4038 int ret;
4039 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4040 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4041 u16 tmpl_id;
4043 if (!beacon) {
4044 ret = -EINVAL;
4045 goto out;
4048 wl1271_debug(DEBUG_MASTER, "beacon updated");
4050 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4051 if (ret < 0) {
4052 dev_kfree_skb(beacon);
4053 goto out;
4055 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4056 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4057 CMD_TEMPL_BEACON;
4058 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4059 beacon->data,
4060 beacon->len, 0,
4061 min_rate);
4062 if (ret < 0) {
4063 dev_kfree_skb(beacon);
4064 goto out;
4067 wlvif->wmm_enabled =
4068 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4069 WLAN_OUI_TYPE_MICROSOFT_WMM,
4070 beacon->data + ieoffset,
4071 beacon->len - ieoffset);
4074 * In case we already have a probe-resp beacon set explicitly
4075 * by usermode, don't use the beacon data.
4077 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4078 goto end_bcn;
4080 /* remove TIM ie from probe response */
4081 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4084 * remove p2p ie from probe response.
4085 * the fw reponds to probe requests that don't include
4086 * the p2p ie. probe requests with p2p ie will be passed,
4087 * and will be responded by the supplicant (the spec
4088 * forbids including the p2p ie when responding to probe
4089 * requests that didn't include it).
4091 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4092 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4094 hdr = (struct ieee80211_hdr *) beacon->data;
4095 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4096 IEEE80211_STYPE_PROBE_RESP);
4097 if (is_ap)
4098 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4099 beacon->data,
4100 beacon->len,
4101 min_rate);
4102 else
4103 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4104 CMD_TEMPL_PROBE_RESPONSE,
4105 beacon->data,
4106 beacon->len, 0,
4107 min_rate);
4108 end_bcn:
4109 dev_kfree_skb(beacon);
4110 if (ret < 0)
4111 goto out;
4113 out:
4114 return ret;
4117 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4118 struct ieee80211_vif *vif,
4119 struct ieee80211_bss_conf *bss_conf,
4120 u32 changed)
4122 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4123 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4124 int ret = 0;
4126 if (changed & BSS_CHANGED_BEACON_INT) {
4127 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4128 bss_conf->beacon_int);
4130 wlvif->beacon_int = bss_conf->beacon_int;
4133 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4134 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4136 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4139 if (changed & BSS_CHANGED_BEACON) {
4140 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4141 if (ret < 0)
4142 goto out;
4144 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4145 &wlvif->flags)) {
4146 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4147 if (ret < 0)
4148 goto out;
4151 out:
4152 if (ret != 0)
4153 wl1271_error("beacon info change failed: %d", ret);
4154 return ret;
4157 /* AP mode changes */
4158 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4159 struct ieee80211_vif *vif,
4160 struct ieee80211_bss_conf *bss_conf,
4161 u32 changed)
4163 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4164 int ret = 0;
4166 if (changed & BSS_CHANGED_BASIC_RATES) {
4167 u32 rates = bss_conf->basic_rates;
4169 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4170 wlvif->band);
4171 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4172 wlvif->basic_rate_set);
4174 ret = wl1271_init_ap_rates(wl, wlvif);
4175 if (ret < 0) {
4176 wl1271_error("AP rate policy change failed %d", ret);
4177 goto out;
4180 ret = wl1271_ap_init_templates(wl, vif);
4181 if (ret < 0)
4182 goto out;
4184 /* No need to set probe resp template for mesh */
4185 if (!ieee80211_vif_is_mesh(vif)) {
4186 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4187 wlvif->basic_rate,
4188 vif);
4189 if (ret < 0)
4190 goto out;
4193 ret = wlcore_set_beacon_template(wl, vif, true);
4194 if (ret < 0)
4195 goto out;
4198 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4199 if (ret < 0)
4200 goto out;
4202 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4203 if (bss_conf->enable_beacon) {
4204 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4205 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4206 if (ret < 0)
4207 goto out;
4209 ret = wl1271_ap_init_hwenc(wl, wlvif);
4210 if (ret < 0)
4211 goto out;
4213 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4214 wl1271_debug(DEBUG_AP, "started AP");
4216 } else {
4217 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4219 * AP might be in ROC in case we have just
4220 * sent auth reply. handle it.
4222 if (test_bit(wlvif->role_id, wl->roc_map))
4223 wl12xx_croc(wl, wlvif->role_id);
4225 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4226 if (ret < 0)
4227 goto out;
4229 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4230 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4231 &wlvif->flags);
4232 wl1271_debug(DEBUG_AP, "stopped AP");
4237 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4238 if (ret < 0)
4239 goto out;
4241 /* Handle HT information change */
4242 if ((changed & BSS_CHANGED_HT) &&
4243 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4244 ret = wl1271_acx_set_ht_information(wl, wlvif,
4245 bss_conf->ht_operation_mode);
4246 if (ret < 0) {
4247 wl1271_warning("Set ht information failed %d", ret);
4248 goto out;
4252 out:
4253 return;
4256 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4257 struct ieee80211_bss_conf *bss_conf,
4258 u32 sta_rate_set)
4260 u32 rates;
4261 int ret;
4263 wl1271_debug(DEBUG_MAC80211,
4264 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4265 bss_conf->bssid, bss_conf->aid,
4266 bss_conf->beacon_int,
4267 bss_conf->basic_rates, sta_rate_set);
4269 wlvif->beacon_int = bss_conf->beacon_int;
4270 rates = bss_conf->basic_rates;
4271 wlvif->basic_rate_set =
4272 wl1271_tx_enabled_rates_get(wl, rates,
4273 wlvif->band);
4274 wlvif->basic_rate =
4275 wl1271_tx_min_rate_get(wl,
4276 wlvif->basic_rate_set);
4278 if (sta_rate_set)
4279 wlvif->rate_set =
4280 wl1271_tx_enabled_rates_get(wl,
4281 sta_rate_set,
4282 wlvif->band);
4284 /* we only support sched_scan while not connected */
4285 if (wl->sched_vif == wlvif)
4286 wl->ops->sched_scan_stop(wl, wlvif);
4288 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4289 if (ret < 0)
4290 return ret;
4292 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4293 if (ret < 0)
4294 return ret;
4296 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4297 if (ret < 0)
4298 return ret;
4300 wlcore_set_ssid(wl, wlvif);
4302 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4304 return 0;
4307 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4309 int ret;
4311 /* revert back to minimum rates for the current band */
4312 wl1271_set_band_rate(wl, wlvif);
4313 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4315 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4316 if (ret < 0)
4317 return ret;
4319 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4320 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4321 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4322 if (ret < 0)
4323 return ret;
4326 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4327 return 0;
4329 /* STA/IBSS mode changes */
4330 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4331 struct ieee80211_vif *vif,
4332 struct ieee80211_bss_conf *bss_conf,
4333 u32 changed)
4335 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4336 bool do_join = false;
4337 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4338 bool ibss_joined = false;
4339 u32 sta_rate_set = 0;
4340 int ret;
4341 struct ieee80211_sta *sta;
4342 bool sta_exists = false;
4343 struct ieee80211_sta_ht_cap sta_ht_cap;
4345 if (is_ibss) {
4346 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4347 changed);
4348 if (ret < 0)
4349 goto out;
4352 if (changed & BSS_CHANGED_IBSS) {
4353 if (bss_conf->ibss_joined) {
4354 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4355 ibss_joined = true;
4356 } else {
4357 wlcore_unset_assoc(wl, wlvif);
4358 wl12xx_cmd_role_stop_sta(wl, wlvif);
4362 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4363 do_join = true;
4365 /* Need to update the SSID (for filtering etc) */
4366 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4367 do_join = true;
4369 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4370 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4371 bss_conf->enable_beacon ? "enabled" : "disabled");
4373 do_join = true;
4376 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4377 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4379 if (changed & BSS_CHANGED_CQM) {
4380 bool enable = false;
4381 if (bss_conf->cqm_rssi_thold)
4382 enable = true;
4383 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4384 bss_conf->cqm_rssi_thold,
4385 bss_conf->cqm_rssi_hyst);
4386 if (ret < 0)
4387 goto out;
4388 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4391 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4392 BSS_CHANGED_ASSOC)) {
4393 rcu_read_lock();
4394 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4395 if (sta) {
4396 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4398 /* save the supp_rates of the ap */
4399 sta_rate_set = sta->supp_rates[wlvif->band];
4400 if (sta->ht_cap.ht_supported)
4401 sta_rate_set |=
4402 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4403 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4404 sta_ht_cap = sta->ht_cap;
4405 sta_exists = true;
4408 rcu_read_unlock();
4411 if (changed & BSS_CHANGED_BSSID) {
4412 if (!is_zero_ether_addr(bss_conf->bssid)) {
4413 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4414 sta_rate_set);
4415 if (ret < 0)
4416 goto out;
4418 /* Need to update the BSSID (for filtering etc) */
4419 do_join = true;
4420 } else {
4421 ret = wlcore_clear_bssid(wl, wlvif);
4422 if (ret < 0)
4423 goto out;
4427 if (changed & BSS_CHANGED_IBSS) {
4428 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4429 bss_conf->ibss_joined);
4431 if (bss_conf->ibss_joined) {
4432 u32 rates = bss_conf->basic_rates;
4433 wlvif->basic_rate_set =
4434 wl1271_tx_enabled_rates_get(wl, rates,
4435 wlvif->band);
4436 wlvif->basic_rate =
4437 wl1271_tx_min_rate_get(wl,
4438 wlvif->basic_rate_set);
4440 /* by default, use 11b + OFDM rates */
4441 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4442 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4443 if (ret < 0)
4444 goto out;
4448 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4449 /* enable beacon filtering */
4450 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4451 if (ret < 0)
4452 goto out;
4455 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4456 if (ret < 0)
4457 goto out;
4459 if (do_join) {
4460 ret = wlcore_join(wl, wlvif);
4461 if (ret < 0) {
4462 wl1271_warning("cmd join failed %d", ret);
4463 goto out;
4467 if (changed & BSS_CHANGED_ASSOC) {
4468 if (bss_conf->assoc) {
4469 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4470 sta_rate_set);
4471 if (ret < 0)
4472 goto out;
4474 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4475 wl12xx_set_authorized(wl, wlvif);
4476 } else {
4477 wlcore_unset_assoc(wl, wlvif);
4481 if (changed & BSS_CHANGED_PS) {
4482 if ((bss_conf->ps) &&
4483 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4484 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4485 int ps_mode;
4486 char *ps_mode_str;
4488 if (wl->conf.conn.forced_ps) {
4489 ps_mode = STATION_POWER_SAVE_MODE;
4490 ps_mode_str = "forced";
4491 } else {
4492 ps_mode = STATION_AUTO_PS_MODE;
4493 ps_mode_str = "auto";
4496 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4498 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4499 if (ret < 0)
4500 wl1271_warning("enter %s ps failed %d",
4501 ps_mode_str, ret);
4502 } else if (!bss_conf->ps &&
4503 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4504 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4506 ret = wl1271_ps_set_mode(wl, wlvif,
4507 STATION_ACTIVE_MODE);
4508 if (ret < 0)
4509 wl1271_warning("exit auto ps failed %d", ret);
4513 /* Handle new association with HT. Do this after join. */
4514 if (sta_exists) {
4515 bool enabled =
4516 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4518 ret = wlcore_hw_set_peer_cap(wl,
4519 &sta_ht_cap,
4520 enabled,
4521 wlvif->rate_set,
4522 wlvif->sta.hlid);
4523 if (ret < 0) {
4524 wl1271_warning("Set ht cap failed %d", ret);
4525 goto out;
4529 if (enabled) {
4530 ret = wl1271_acx_set_ht_information(wl, wlvif,
4531 bss_conf->ht_operation_mode);
4532 if (ret < 0) {
4533 wl1271_warning("Set ht information failed %d",
4534 ret);
4535 goto out;
4540 /* Handle arp filtering. Done after join. */
4541 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4542 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4543 __be32 addr = bss_conf->arp_addr_list[0];
4544 wlvif->sta.qos = bss_conf->qos;
4545 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4547 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4548 wlvif->ip_addr = addr;
4550 * The template should have been configured only upon
4551 * association. however, it seems that the correct ip
4552 * isn't being set (when sending), so we have to
4553 * reconfigure the template upon every ip change.
4555 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4556 if (ret < 0) {
4557 wl1271_warning("build arp rsp failed: %d", ret);
4558 goto out;
4561 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4562 (ACX_ARP_FILTER_ARP_FILTERING |
4563 ACX_ARP_FILTER_AUTO_ARP),
4564 addr);
4565 } else {
4566 wlvif->ip_addr = 0;
4567 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4570 if (ret < 0)
4571 goto out;
4574 out:
4575 return;
4578 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4579 struct ieee80211_vif *vif,
4580 struct ieee80211_bss_conf *bss_conf,
4581 u32 changed)
4583 struct wl1271 *wl = hw->priv;
4584 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4585 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4586 int ret;
4588 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4589 wlvif->role_id, (int)changed);
4592 * make sure to cancel pending disconnections if our association
4593 * state changed
4595 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4596 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4598 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4599 !bss_conf->enable_beacon)
4600 wl1271_tx_flush(wl);
4602 mutex_lock(&wl->mutex);
4604 if (unlikely(wl->state != WLCORE_STATE_ON))
4605 goto out;
4607 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4608 goto out;
4610 ret = wl1271_ps_elp_wakeup(wl);
4611 if (ret < 0)
4612 goto out;
4614 if ((changed & BSS_CHANGED_TXPOWER) &&
4615 bss_conf->txpower != wlvif->power_level) {
4617 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4618 if (ret < 0)
4619 goto out;
4621 wlvif->power_level = bss_conf->txpower;
4624 if (is_ap)
4625 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4626 else
4627 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4629 wl1271_ps_elp_sleep(wl);
4631 out:
4632 mutex_unlock(&wl->mutex);
4635 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4636 struct ieee80211_chanctx_conf *ctx)
4638 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4639 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4640 cfg80211_get_chandef_type(&ctx->def));
4641 return 0;
4644 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4645 struct ieee80211_chanctx_conf *ctx)
4647 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4648 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4649 cfg80211_get_chandef_type(&ctx->def));
4652 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4653 struct ieee80211_chanctx_conf *ctx,
4654 u32 changed)
4656 struct wl1271 *wl = hw->priv;
4657 struct wl12xx_vif *wlvif;
4658 int ret;
4659 int channel = ieee80211_frequency_to_channel(
4660 ctx->def.chan->center_freq);
4662 wl1271_debug(DEBUG_MAC80211,
4663 "mac80211 change chanctx %d (type %d) changed 0x%x",
4664 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4666 mutex_lock(&wl->mutex);
4668 ret = wl1271_ps_elp_wakeup(wl);
4669 if (ret < 0)
4670 goto out;
4672 wl12xx_for_each_wlvif(wl, wlvif) {
4673 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4675 rcu_read_lock();
4676 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4677 rcu_read_unlock();
4678 continue;
4680 rcu_read_unlock();
4682 /* start radar if needed */
4683 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4684 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4685 ctx->radar_enabled && !wlvif->radar_enabled &&
4686 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4687 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4688 wlcore_hw_set_cac(wl, wlvif, true);
4689 wlvif->radar_enabled = true;
4693 wl1271_ps_elp_sleep(wl);
4694 out:
4695 mutex_unlock(&wl->mutex);
4698 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4699 struct ieee80211_vif *vif,
4700 struct ieee80211_chanctx_conf *ctx)
4702 struct wl1271 *wl = hw->priv;
4703 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4704 int channel = ieee80211_frequency_to_channel(
4705 ctx->def.chan->center_freq);
4706 int ret = -EINVAL;
4708 wl1271_debug(DEBUG_MAC80211,
4709 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4710 wlvif->role_id, channel,
4711 cfg80211_get_chandef_type(&ctx->def),
4712 ctx->radar_enabled, ctx->def.chan->dfs_state);
4714 mutex_lock(&wl->mutex);
4716 if (unlikely(wl->state != WLCORE_STATE_ON))
4717 goto out;
4719 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4720 goto out;
4722 ret = wl1271_ps_elp_wakeup(wl);
4723 if (ret < 0)
4724 goto out;
4726 wlvif->band = ctx->def.chan->band;
4727 wlvif->channel = channel;
4728 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4730 /* update default rates according to the band */
4731 wl1271_set_band_rate(wl, wlvif);
4733 if (ctx->radar_enabled &&
4734 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4735 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4736 wlcore_hw_set_cac(wl, wlvif, true);
4737 wlvif->radar_enabled = true;
4740 wl1271_ps_elp_sleep(wl);
4741 out:
4742 mutex_unlock(&wl->mutex);
4744 return 0;
4747 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4748 struct ieee80211_vif *vif,
4749 struct ieee80211_chanctx_conf *ctx)
4751 struct wl1271 *wl = hw->priv;
4752 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4753 int ret;
4755 wl1271_debug(DEBUG_MAC80211,
4756 "mac80211 unassign chanctx (role %d) %d (type %d)",
4757 wlvif->role_id,
4758 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4759 cfg80211_get_chandef_type(&ctx->def));
4761 wl1271_tx_flush(wl);
4763 mutex_lock(&wl->mutex);
4765 if (unlikely(wl->state != WLCORE_STATE_ON))
4766 goto out;
4768 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4769 goto out;
4771 ret = wl1271_ps_elp_wakeup(wl);
4772 if (ret < 0)
4773 goto out;
4775 if (wlvif->radar_enabled) {
4776 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4777 wlcore_hw_set_cac(wl, wlvif, false);
4778 wlvif->radar_enabled = false;
4781 wl1271_ps_elp_sleep(wl);
4782 out:
4783 mutex_unlock(&wl->mutex);
4786 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4787 struct wl12xx_vif *wlvif,
4788 struct ieee80211_chanctx_conf *new_ctx)
4790 int channel = ieee80211_frequency_to_channel(
4791 new_ctx->def.chan->center_freq);
4793 wl1271_debug(DEBUG_MAC80211,
4794 "switch vif (role %d) %d -> %d chan_type: %d",
4795 wlvif->role_id, wlvif->channel, channel,
4796 cfg80211_get_chandef_type(&new_ctx->def));
4798 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4799 return 0;
4801 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4803 if (wlvif->radar_enabled) {
4804 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4805 wlcore_hw_set_cac(wl, wlvif, false);
4806 wlvif->radar_enabled = false;
4809 wlvif->band = new_ctx->def.chan->band;
4810 wlvif->channel = channel;
4811 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4813 /* start radar if needed */
4814 if (new_ctx->radar_enabled) {
4815 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4816 wlcore_hw_set_cac(wl, wlvif, true);
4817 wlvif->radar_enabled = true;
4820 return 0;
4823 static int
4824 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4825 struct ieee80211_vif_chanctx_switch *vifs,
4826 int n_vifs,
4827 enum ieee80211_chanctx_switch_mode mode)
4829 struct wl1271 *wl = hw->priv;
4830 int i, ret;
4832 wl1271_debug(DEBUG_MAC80211,
4833 "mac80211 switch chanctx n_vifs %d mode %d",
4834 n_vifs, mode);
4836 mutex_lock(&wl->mutex);
4838 ret = wl1271_ps_elp_wakeup(wl);
4839 if (ret < 0)
4840 goto out;
4842 for (i = 0; i < n_vifs; i++) {
4843 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4845 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4846 if (ret)
4847 goto out_sleep;
4849 out_sleep:
4850 wl1271_ps_elp_sleep(wl);
4851 out:
4852 mutex_unlock(&wl->mutex);
4854 return 0;
4857 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4858 struct ieee80211_vif *vif, u16 queue,
4859 const struct ieee80211_tx_queue_params *params)
4861 struct wl1271 *wl = hw->priv;
4862 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4863 u8 ps_scheme;
4864 int ret = 0;
4866 if (wlcore_is_p2p_mgmt(wlvif))
4867 return 0;
4869 mutex_lock(&wl->mutex);
4871 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4873 if (params->uapsd)
4874 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4875 else
4876 ps_scheme = CONF_PS_SCHEME_LEGACY;
4878 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4879 goto out;
4881 ret = wl1271_ps_elp_wakeup(wl);
4882 if (ret < 0)
4883 goto out;
4886 * the txop is confed in units of 32us by the mac80211,
4887 * we need us
4889 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4890 params->cw_min, params->cw_max,
4891 params->aifs, params->txop << 5);
4892 if (ret < 0)
4893 goto out_sleep;
4895 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4896 CONF_CHANNEL_TYPE_EDCF,
4897 wl1271_tx_get_queue(queue),
4898 ps_scheme, CONF_ACK_POLICY_LEGACY,
4899 0, 0);
4901 out_sleep:
4902 wl1271_ps_elp_sleep(wl);
4904 out:
4905 mutex_unlock(&wl->mutex);
4907 return ret;
4910 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4911 struct ieee80211_vif *vif)
4914 struct wl1271 *wl = hw->priv;
4915 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4916 u64 mactime = ULLONG_MAX;
4917 int ret;
4919 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4921 mutex_lock(&wl->mutex);
4923 if (unlikely(wl->state != WLCORE_STATE_ON))
4924 goto out;
4926 ret = wl1271_ps_elp_wakeup(wl);
4927 if (ret < 0)
4928 goto out;
4930 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4931 if (ret < 0)
4932 goto out_sleep;
4934 out_sleep:
4935 wl1271_ps_elp_sleep(wl);
4937 out:
4938 mutex_unlock(&wl->mutex);
4939 return mactime;
4942 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4943 struct survey_info *survey)
4945 struct ieee80211_conf *conf = &hw->conf;
4947 if (idx != 0)
4948 return -ENOENT;
4950 survey->channel = conf->chandef.chan;
4951 survey->filled = 0;
4952 return 0;
4955 static int wl1271_allocate_sta(struct wl1271 *wl,
4956 struct wl12xx_vif *wlvif,
4957 struct ieee80211_sta *sta)
4959 struct wl1271_station *wl_sta;
4960 int ret;
4963 if (wl->active_sta_count >= wl->max_ap_stations) {
4964 wl1271_warning("could not allocate HLID - too much stations");
4965 return -EBUSY;
4968 wl_sta = (struct wl1271_station *)sta->drv_priv;
4969 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4970 if (ret < 0) {
4971 wl1271_warning("could not allocate HLID - too many links");
4972 return -EBUSY;
4975 /* use the previous security seq, if this is a recovery/resume */
4976 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4978 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4979 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4980 wl->active_sta_count++;
4981 return 0;
4984 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4986 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4987 return;
4989 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4990 __clear_bit(hlid, &wl->ap_ps_map);
4991 __clear_bit(hlid, &wl->ap_fw_ps_map);
4994 * save the last used PN in the private part of iee80211_sta,
4995 * in case of recovery/suspend
4997 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4999 wl12xx_free_link(wl, wlvif, &hlid);
5000 wl->active_sta_count--;
5003 * rearm the tx watchdog when the last STA is freed - give the FW a
5004 * chance to return STA-buffered packets before complaining.
5006 if (wl->active_sta_count == 0)
5007 wl12xx_rearm_tx_watchdog_locked(wl);
5010 static int wl12xx_sta_add(struct wl1271 *wl,
5011 struct wl12xx_vif *wlvif,
5012 struct ieee80211_sta *sta)
5014 struct wl1271_station *wl_sta;
5015 int ret = 0;
5016 u8 hlid;
5018 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5020 ret = wl1271_allocate_sta(wl, wlvif, sta);
5021 if (ret < 0)
5022 return ret;
5024 wl_sta = (struct wl1271_station *)sta->drv_priv;
5025 hlid = wl_sta->hlid;
5027 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5028 if (ret < 0)
5029 wl1271_free_sta(wl, wlvif, hlid);
5031 return ret;
5034 static int wl12xx_sta_remove(struct wl1271 *wl,
5035 struct wl12xx_vif *wlvif,
5036 struct ieee80211_sta *sta)
5038 struct wl1271_station *wl_sta;
5039 int ret = 0, id;
5041 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5043 wl_sta = (struct wl1271_station *)sta->drv_priv;
5044 id = wl_sta->hlid;
5045 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5046 return -EINVAL;
5048 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5049 if (ret < 0)
5050 return ret;
5052 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5053 return ret;
5056 static void wlcore_roc_if_possible(struct wl1271 *wl,
5057 struct wl12xx_vif *wlvif)
5059 if (find_first_bit(wl->roc_map,
5060 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5061 return;
5063 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5064 return;
5066 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5070 * when wl_sta is NULL, we treat this call as if coming from a
5071 * pending auth reply.
5072 * wl->mutex must be taken and the FW must be awake when the call
5073 * takes place.
5075 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5076 struct wl1271_station *wl_sta, bool in_conn)
5078 if (in_conn) {
5079 if (WARN_ON(wl_sta && wl_sta->in_connection))
5080 return;
5082 if (!wlvif->ap_pending_auth_reply &&
5083 !wlvif->inconn_count)
5084 wlcore_roc_if_possible(wl, wlvif);
5086 if (wl_sta) {
5087 wl_sta->in_connection = true;
5088 wlvif->inconn_count++;
5089 } else {
5090 wlvif->ap_pending_auth_reply = true;
5092 } else {
5093 if (wl_sta && !wl_sta->in_connection)
5094 return;
5096 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5097 return;
5099 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5100 return;
5102 if (wl_sta) {
5103 wl_sta->in_connection = false;
5104 wlvif->inconn_count--;
5105 } else {
5106 wlvif->ap_pending_auth_reply = false;
5109 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5110 test_bit(wlvif->role_id, wl->roc_map))
5111 wl12xx_croc(wl, wlvif->role_id);
5115 static int wl12xx_update_sta_state(struct wl1271 *wl,
5116 struct wl12xx_vif *wlvif,
5117 struct ieee80211_sta *sta,
5118 enum ieee80211_sta_state old_state,
5119 enum ieee80211_sta_state new_state)
5121 struct wl1271_station *wl_sta;
5122 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5123 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5124 int ret;
5126 wl_sta = (struct wl1271_station *)sta->drv_priv;
5128 /* Add station (AP mode) */
5129 if (is_ap &&
5130 old_state == IEEE80211_STA_NOTEXIST &&
5131 new_state == IEEE80211_STA_NONE) {
5132 ret = wl12xx_sta_add(wl, wlvif, sta);
5133 if (ret)
5134 return ret;
5136 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5139 /* Remove station (AP mode) */
5140 if (is_ap &&
5141 old_state == IEEE80211_STA_NONE &&
5142 new_state == IEEE80211_STA_NOTEXIST) {
5143 /* must not fail */
5144 wl12xx_sta_remove(wl, wlvif, sta);
5146 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5149 /* Authorize station (AP mode) */
5150 if (is_ap &&
5151 new_state == IEEE80211_STA_AUTHORIZED) {
5152 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5153 if (ret < 0)
5154 return ret;
5156 /* reconfigure rates */
5157 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5158 if (ret < 0)
5159 return ret;
5161 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5162 wl_sta->hlid);
5163 if (ret)
5164 return ret;
5166 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5169 /* Authorize station */
5170 if (is_sta &&
5171 new_state == IEEE80211_STA_AUTHORIZED) {
5172 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5173 ret = wl12xx_set_authorized(wl, wlvif);
5174 if (ret)
5175 return ret;
5178 if (is_sta &&
5179 old_state == IEEE80211_STA_AUTHORIZED &&
5180 new_state == IEEE80211_STA_ASSOC) {
5181 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5182 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5185 /* save seq number on disassoc (suspend) */
5186 if (is_sta &&
5187 old_state == IEEE80211_STA_ASSOC &&
5188 new_state == IEEE80211_STA_AUTH) {
5189 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5190 wlvif->total_freed_pkts = 0;
5193 /* restore seq number on assoc (resume) */
5194 if (is_sta &&
5195 old_state == IEEE80211_STA_AUTH &&
5196 new_state == IEEE80211_STA_ASSOC) {
5197 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5200 /* clear ROCs on failure or authorization */
5201 if (is_sta &&
5202 (new_state == IEEE80211_STA_AUTHORIZED ||
5203 new_state == IEEE80211_STA_NOTEXIST)) {
5204 if (test_bit(wlvif->role_id, wl->roc_map))
5205 wl12xx_croc(wl, wlvif->role_id);
5208 if (is_sta &&
5209 old_state == IEEE80211_STA_NOTEXIST &&
5210 new_state == IEEE80211_STA_NONE) {
5211 if (find_first_bit(wl->roc_map,
5212 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5213 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5214 wl12xx_roc(wl, wlvif, wlvif->role_id,
5215 wlvif->band, wlvif->channel);
5218 return 0;
5221 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5222 struct ieee80211_vif *vif,
5223 struct ieee80211_sta *sta,
5224 enum ieee80211_sta_state old_state,
5225 enum ieee80211_sta_state new_state)
5227 struct wl1271 *wl = hw->priv;
5228 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5229 int ret;
5231 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5232 sta->aid, old_state, new_state);
5234 mutex_lock(&wl->mutex);
5236 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5237 ret = -EBUSY;
5238 goto out;
5241 ret = wl1271_ps_elp_wakeup(wl);
5242 if (ret < 0)
5243 goto out;
5245 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5247 wl1271_ps_elp_sleep(wl);
5248 out:
5249 mutex_unlock(&wl->mutex);
5250 if (new_state < old_state)
5251 return 0;
5252 return ret;
5255 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5256 struct ieee80211_vif *vif,
5257 struct ieee80211_ampdu_params *params)
5259 struct wl1271 *wl = hw->priv;
5260 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5261 int ret;
5262 u8 hlid, *ba_bitmap;
5263 struct ieee80211_sta *sta = params->sta;
5264 enum ieee80211_ampdu_mlme_action action = params->action;
5265 u16 tid = params->tid;
5266 u16 *ssn = &params->ssn;
5268 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5269 tid);
5271 /* sanity check - the fields in FW are only 8bits wide */
5272 if (WARN_ON(tid > 0xFF))
5273 return -ENOTSUPP;
5275 mutex_lock(&wl->mutex);
5277 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5278 ret = -EAGAIN;
5279 goto out;
5282 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5283 hlid = wlvif->sta.hlid;
5284 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5285 struct wl1271_station *wl_sta;
5287 wl_sta = (struct wl1271_station *)sta->drv_priv;
5288 hlid = wl_sta->hlid;
5289 } else {
5290 ret = -EINVAL;
5291 goto out;
5294 ba_bitmap = &wl->links[hlid].ba_bitmap;
5296 ret = wl1271_ps_elp_wakeup(wl);
5297 if (ret < 0)
5298 goto out;
5300 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5301 tid, action);
5303 switch (action) {
5304 case IEEE80211_AMPDU_RX_START:
5305 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5306 ret = -ENOTSUPP;
5307 break;
5310 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5311 ret = -EBUSY;
5312 wl1271_error("exceeded max RX BA sessions");
5313 break;
5316 if (*ba_bitmap & BIT(tid)) {
5317 ret = -EINVAL;
5318 wl1271_error("cannot enable RX BA session on active "
5319 "tid: %d", tid);
5320 break;
5323 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5324 hlid,
5325 params->buf_size);
5327 if (!ret) {
5328 *ba_bitmap |= BIT(tid);
5329 wl->ba_rx_session_count++;
5331 break;
5333 case IEEE80211_AMPDU_RX_STOP:
5334 if (!(*ba_bitmap & BIT(tid))) {
5336 * this happens on reconfig - so only output a debug
5337 * message for now, and don't fail the function.
5339 wl1271_debug(DEBUG_MAC80211,
5340 "no active RX BA session on tid: %d",
5341 tid);
5342 ret = 0;
5343 break;
5346 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5347 hlid, 0);
5348 if (!ret) {
5349 *ba_bitmap &= ~BIT(tid);
5350 wl->ba_rx_session_count--;
5352 break;
5355 * The BA initiator session management in FW independently.
5356 * Falling break here on purpose for all TX APDU commands.
5358 case IEEE80211_AMPDU_TX_START:
5359 case IEEE80211_AMPDU_TX_STOP_CONT:
5360 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5361 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5362 case IEEE80211_AMPDU_TX_OPERATIONAL:
5363 ret = -EINVAL;
5364 break;
5366 default:
5367 wl1271_error("Incorrect ampdu action id=%x\n", action);
5368 ret = -EINVAL;
5371 wl1271_ps_elp_sleep(wl);
5373 out:
5374 mutex_unlock(&wl->mutex);
5376 return ret;
5379 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5380 struct ieee80211_vif *vif,
5381 const struct cfg80211_bitrate_mask *mask)
5383 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5384 struct wl1271 *wl = hw->priv;
5385 int i, ret = 0;
5387 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5388 mask->control[NL80211_BAND_2GHZ].legacy,
5389 mask->control[NL80211_BAND_5GHZ].legacy);
5391 mutex_lock(&wl->mutex);
5393 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5394 wlvif->bitrate_masks[i] =
5395 wl1271_tx_enabled_rates_get(wl,
5396 mask->control[i].legacy,
5399 if (unlikely(wl->state != WLCORE_STATE_ON))
5400 goto out;
5402 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5403 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5405 ret = wl1271_ps_elp_wakeup(wl);
5406 if (ret < 0)
5407 goto out;
5409 wl1271_set_band_rate(wl, wlvif);
5410 wlvif->basic_rate =
5411 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5412 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5414 wl1271_ps_elp_sleep(wl);
5416 out:
5417 mutex_unlock(&wl->mutex);
5419 return ret;
5422 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5423 struct ieee80211_vif *vif,
5424 struct ieee80211_channel_switch *ch_switch)
5426 struct wl1271 *wl = hw->priv;
5427 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5428 int ret;
5430 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5432 wl1271_tx_flush(wl);
5434 mutex_lock(&wl->mutex);
5436 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5437 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5438 ieee80211_chswitch_done(vif, false);
5439 goto out;
5440 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5441 goto out;
5444 ret = wl1271_ps_elp_wakeup(wl);
5445 if (ret < 0)
5446 goto out;
5448 /* TODO: change mac80211 to pass vif as param */
5450 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5451 unsigned long delay_usec;
5453 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5454 if (ret)
5455 goto out_sleep;
5457 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5459 /* indicate failure 5 seconds after channel switch time */
5460 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5461 ch_switch->count;
5462 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5463 usecs_to_jiffies(delay_usec) +
5464 msecs_to_jiffies(5000));
5467 out_sleep:
5468 wl1271_ps_elp_sleep(wl);
5470 out:
5471 mutex_unlock(&wl->mutex);
5474 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5475 struct wl12xx_vif *wlvif,
5476 u8 eid)
5478 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5479 struct sk_buff *beacon =
5480 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5482 if (!beacon)
5483 return NULL;
5485 return cfg80211_find_ie(eid,
5486 beacon->data + ieoffset,
5487 beacon->len - ieoffset);
5490 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5491 u8 *csa_count)
5493 const u8 *ie;
5494 const struct ieee80211_channel_sw_ie *ie_csa;
5496 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5497 if (!ie)
5498 return -EINVAL;
5500 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5501 *csa_count = ie_csa->count;
5503 return 0;
5506 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5507 struct ieee80211_vif *vif,
5508 struct cfg80211_chan_def *chandef)
5510 struct wl1271 *wl = hw->priv;
5511 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5512 struct ieee80211_channel_switch ch_switch = {
5513 .block_tx = true,
5514 .chandef = *chandef,
5516 int ret;
5518 wl1271_debug(DEBUG_MAC80211,
5519 "mac80211 channel switch beacon (role %d)",
5520 wlvif->role_id);
5522 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5523 if (ret < 0) {
5524 wl1271_error("error getting beacon (for CSA counter)");
5525 return;
5528 mutex_lock(&wl->mutex);
5530 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5531 ret = -EBUSY;
5532 goto out;
5535 ret = wl1271_ps_elp_wakeup(wl);
5536 if (ret < 0)
5537 goto out;
5539 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5540 if (ret)
5541 goto out_sleep;
5543 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5545 out_sleep:
5546 wl1271_ps_elp_sleep(wl);
5547 out:
5548 mutex_unlock(&wl->mutex);
5551 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5552 u32 queues, bool drop)
5554 struct wl1271 *wl = hw->priv;
5556 wl1271_tx_flush(wl);
5559 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5560 struct ieee80211_vif *vif,
5561 struct ieee80211_channel *chan,
5562 int duration,
5563 enum ieee80211_roc_type type)
5565 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5566 struct wl1271 *wl = hw->priv;
5567 int channel, active_roc, ret = 0;
5569 channel = ieee80211_frequency_to_channel(chan->center_freq);
5571 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5572 channel, wlvif->role_id);
5574 mutex_lock(&wl->mutex);
5576 if (unlikely(wl->state != WLCORE_STATE_ON))
5577 goto out;
5579 /* return EBUSY if we can't ROC right now */
5580 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5581 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5582 wl1271_warning("active roc on role %d", active_roc);
5583 ret = -EBUSY;
5584 goto out;
5587 ret = wl1271_ps_elp_wakeup(wl);
5588 if (ret < 0)
5589 goto out;
5591 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5592 if (ret < 0)
5593 goto out_sleep;
5595 wl->roc_vif = vif;
5596 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5597 msecs_to_jiffies(duration));
5598 out_sleep:
5599 wl1271_ps_elp_sleep(wl);
5600 out:
5601 mutex_unlock(&wl->mutex);
5602 return ret;
5605 static int __wlcore_roc_completed(struct wl1271 *wl)
5607 struct wl12xx_vif *wlvif;
5608 int ret;
5610 /* already completed */
5611 if (unlikely(!wl->roc_vif))
5612 return 0;
5614 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5616 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5617 return -EBUSY;
5619 ret = wl12xx_stop_dev(wl, wlvif);
5620 if (ret < 0)
5621 return ret;
5623 wl->roc_vif = NULL;
5625 return 0;
5628 static int wlcore_roc_completed(struct wl1271 *wl)
5630 int ret;
5632 wl1271_debug(DEBUG_MAC80211, "roc complete");
5634 mutex_lock(&wl->mutex);
5636 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5637 ret = -EBUSY;
5638 goto out;
5641 ret = wl1271_ps_elp_wakeup(wl);
5642 if (ret < 0)
5643 goto out;
5645 ret = __wlcore_roc_completed(wl);
5647 wl1271_ps_elp_sleep(wl);
5648 out:
5649 mutex_unlock(&wl->mutex);
5651 return ret;
5654 static void wlcore_roc_complete_work(struct work_struct *work)
5656 struct delayed_work *dwork;
5657 struct wl1271 *wl;
5658 int ret;
5660 dwork = to_delayed_work(work);
5661 wl = container_of(dwork, struct wl1271, roc_complete_work);
5663 ret = wlcore_roc_completed(wl);
5664 if (!ret)
5665 ieee80211_remain_on_channel_expired(wl->hw);
5668 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5670 struct wl1271 *wl = hw->priv;
5672 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5674 /* TODO: per-vif */
5675 wl1271_tx_flush(wl);
5678 * we can't just flush_work here, because it might deadlock
5679 * (as we might get called from the same workqueue)
5681 cancel_delayed_work_sync(&wl->roc_complete_work);
5682 wlcore_roc_completed(wl);
5684 return 0;
5687 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5688 struct ieee80211_vif *vif,
5689 struct ieee80211_sta *sta,
5690 u32 changed)
5692 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5694 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5696 if (!(changed & IEEE80211_RC_BW_CHANGED))
5697 return;
5699 /* this callback is atomic, so schedule a new work */
5700 wlvif->rc_update_bw = sta->bandwidth;
5701 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5702 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5705 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5706 struct ieee80211_vif *vif,
5707 struct ieee80211_sta *sta,
5708 struct station_info *sinfo)
5710 struct wl1271 *wl = hw->priv;
5711 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5712 s8 rssi_dbm;
5713 int ret;
5715 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5717 mutex_lock(&wl->mutex);
5719 if (unlikely(wl->state != WLCORE_STATE_ON))
5720 goto out;
5722 ret = wl1271_ps_elp_wakeup(wl);
5723 if (ret < 0)
5724 goto out_sleep;
5726 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5727 if (ret < 0)
5728 goto out_sleep;
5730 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5731 sinfo->signal = rssi_dbm;
5733 out_sleep:
5734 wl1271_ps_elp_sleep(wl);
5736 out:
5737 mutex_unlock(&wl->mutex);
5740 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5741 struct ieee80211_sta *sta)
5743 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5744 struct wl1271 *wl = hw->priv;
5745 u8 hlid = wl_sta->hlid;
5747 /* return in units of Kbps */
5748 return (wl->links[hlid].fw_rate_mbps * 1000);
5751 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5753 struct wl1271 *wl = hw->priv;
5754 bool ret = false;
5756 mutex_lock(&wl->mutex);
5758 if (unlikely(wl->state != WLCORE_STATE_ON))
5759 goto out;
5761 /* packets are considered pending if in the TX queue or the FW */
5762 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5763 out:
5764 mutex_unlock(&wl->mutex);
5766 return ret;
5769 /* can't be const, mac80211 writes to this */
5770 static struct ieee80211_rate wl1271_rates[] = {
5771 { .bitrate = 10,
5772 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5773 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5774 { .bitrate = 20,
5775 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5776 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5777 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5778 { .bitrate = 55,
5779 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5780 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5781 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5782 { .bitrate = 110,
5783 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5784 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5785 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5786 { .bitrate = 60,
5787 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5788 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5789 { .bitrate = 90,
5790 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5791 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5792 { .bitrate = 120,
5793 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5794 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5795 { .bitrate = 180,
5796 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5797 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5798 { .bitrate = 240,
5799 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5800 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5801 { .bitrate = 360,
5802 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5803 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5804 { .bitrate = 480,
5805 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5806 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5807 { .bitrate = 540,
5808 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5809 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5812 /* can't be const, mac80211 writes to this */
5813 static struct ieee80211_channel wl1271_channels[] = {
5814 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5815 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5816 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5817 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5818 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5819 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5820 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5821 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5822 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5823 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5824 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5825 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5826 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5827 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5830 /* can't be const, mac80211 writes to this */
5831 static struct ieee80211_supported_band wl1271_band_2ghz = {
5832 .channels = wl1271_channels,
5833 .n_channels = ARRAY_SIZE(wl1271_channels),
5834 .bitrates = wl1271_rates,
5835 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5838 /* 5 GHz data rates for WL1273 */
5839 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5840 { .bitrate = 60,
5841 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5842 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5843 { .bitrate = 90,
5844 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5845 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5846 { .bitrate = 120,
5847 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5848 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5849 { .bitrate = 180,
5850 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5851 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5852 { .bitrate = 240,
5853 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5854 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5855 { .bitrate = 360,
5856 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5857 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5858 { .bitrate = 480,
5859 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5860 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5861 { .bitrate = 540,
5862 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5863 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5866 /* 5 GHz band channels for WL1273 */
5867 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5868 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5869 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5870 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5871 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5872 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5873 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5874 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5875 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5876 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5877 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5878 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5879 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5880 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5881 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5882 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5883 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5884 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5885 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5886 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5887 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5888 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5889 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5890 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5891 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5892 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5893 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5894 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5895 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5896 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5897 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5898 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5901 static struct ieee80211_supported_band wl1271_band_5ghz = {
5902 .channels = wl1271_channels_5ghz,
5903 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5904 .bitrates = wl1271_rates_5ghz,
5905 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5908 static const struct ieee80211_ops wl1271_ops = {
5909 .start = wl1271_op_start,
5910 .stop = wlcore_op_stop,
5911 .add_interface = wl1271_op_add_interface,
5912 .remove_interface = wl1271_op_remove_interface,
5913 .change_interface = wl12xx_op_change_interface,
5914 #ifdef CONFIG_PM
5915 .suspend = wl1271_op_suspend,
5916 .resume = wl1271_op_resume,
5917 #endif
5918 .config = wl1271_op_config,
5919 .prepare_multicast = wl1271_op_prepare_multicast,
5920 .configure_filter = wl1271_op_configure_filter,
5921 .tx = wl1271_op_tx,
5922 .set_key = wlcore_op_set_key,
5923 .hw_scan = wl1271_op_hw_scan,
5924 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5925 .sched_scan_start = wl1271_op_sched_scan_start,
5926 .sched_scan_stop = wl1271_op_sched_scan_stop,
5927 .bss_info_changed = wl1271_op_bss_info_changed,
5928 .set_frag_threshold = wl1271_op_set_frag_threshold,
5929 .set_rts_threshold = wl1271_op_set_rts_threshold,
5930 .conf_tx = wl1271_op_conf_tx,
5931 .get_tsf = wl1271_op_get_tsf,
5932 .get_survey = wl1271_op_get_survey,
5933 .sta_state = wl12xx_op_sta_state,
5934 .ampdu_action = wl1271_op_ampdu_action,
5935 .tx_frames_pending = wl1271_tx_frames_pending,
5936 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5937 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5938 .channel_switch = wl12xx_op_channel_switch,
5939 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5940 .flush = wlcore_op_flush,
5941 .remain_on_channel = wlcore_op_remain_on_channel,
5942 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5943 .add_chanctx = wlcore_op_add_chanctx,
5944 .remove_chanctx = wlcore_op_remove_chanctx,
5945 .change_chanctx = wlcore_op_change_chanctx,
5946 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5947 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5948 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5949 .sta_rc_update = wlcore_op_sta_rc_update,
5950 .sta_statistics = wlcore_op_sta_statistics,
5951 .get_expected_throughput = wlcore_op_get_expected_throughput,
5952 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5956 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5958 u8 idx;
5960 BUG_ON(band >= 2);
5962 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5963 wl1271_error("Illegal RX rate from HW: %d", rate);
5964 return 0;
5967 idx = wl->band_rate_to_idx[band][rate];
5968 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5969 wl1271_error("Unsupported RX rate from HW: %d", rate);
5970 return 0;
5973 return idx;
5976 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5978 int i;
5980 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5981 oui, nic);
5983 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5984 wl1271_warning("NIC part of the MAC address wraps around!");
5986 for (i = 0; i < wl->num_mac_addr; i++) {
5987 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5988 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5989 wl->addresses[i].addr[2] = (u8) oui;
5990 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5991 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5992 wl->addresses[i].addr[5] = (u8) nic;
5993 nic++;
5996 /* we may be one address short at the most */
5997 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6000 * turn on the LAA bit in the first address and use it as
6001 * the last address.
6003 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6004 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6005 memcpy(&wl->addresses[idx], &wl->addresses[0],
6006 sizeof(wl->addresses[0]));
6007 /* LAA bit */
6008 wl->addresses[idx].addr[0] |= BIT(1);
6011 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6012 wl->hw->wiphy->addresses = wl->addresses;
6015 static int wl12xx_get_hw_info(struct wl1271 *wl)
6017 int ret;
6019 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6020 if (ret < 0)
6021 goto out;
6023 wl->fuse_oui_addr = 0;
6024 wl->fuse_nic_addr = 0;
6026 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6027 if (ret < 0)
6028 goto out;
6030 if (wl->ops->get_mac)
6031 ret = wl->ops->get_mac(wl);
6033 out:
6034 return ret;
6037 static int wl1271_register_hw(struct wl1271 *wl)
6039 int ret;
6040 u32 oui_addr = 0, nic_addr = 0;
6041 struct platform_device *pdev = wl->pdev;
6042 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6044 if (wl->mac80211_registered)
6045 return 0;
6047 if (wl->nvs_len >= 12) {
6048 /* NOTE: The wl->nvs->nvs element must be first, in
6049 * order to simplify the casting, we assume it is at
6050 * the beginning of the wl->nvs structure.
6052 u8 *nvs_ptr = (u8 *)wl->nvs;
6054 oui_addr =
6055 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6056 nic_addr =
6057 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6060 /* if the MAC address is zeroed in the NVS derive from fuse */
6061 if (oui_addr == 0 && nic_addr == 0) {
6062 oui_addr = wl->fuse_oui_addr;
6063 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6064 nic_addr = wl->fuse_nic_addr + 1;
6067 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6068 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
6069 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6070 wl1271_warning("This default nvs file can be removed from the file system\n");
6071 } else {
6072 wl1271_warning("Your device performance is not optimized.\n");
6073 wl1271_warning("Please use the calibrator tool to configure your device.\n");
6076 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6077 wl1271_warning("Fuse mac address is zero. using random mac\n");
6078 /* Use TI oui and a random nic */
6079 oui_addr = WLCORE_TI_OUI_ADDRESS;
6080 nic_addr = get_random_int();
6081 } else {
6082 oui_addr = wl->fuse_oui_addr;
6083 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6084 nic_addr = wl->fuse_nic_addr + 1;
6088 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6090 ret = ieee80211_register_hw(wl->hw);
6091 if (ret < 0) {
6092 wl1271_error("unable to register mac80211 hw: %d", ret);
6093 goto out;
6096 wl->mac80211_registered = true;
6098 wl1271_debugfs_init(wl);
6100 wl1271_notice("loaded");
6102 out:
6103 return ret;
6106 static void wl1271_unregister_hw(struct wl1271 *wl)
6108 if (wl->plt)
6109 wl1271_plt_stop(wl);
6111 ieee80211_unregister_hw(wl->hw);
6112 wl->mac80211_registered = false;
6116 static int wl1271_init_ieee80211(struct wl1271 *wl)
6118 int i;
6119 static const u32 cipher_suites[] = {
6120 WLAN_CIPHER_SUITE_WEP40,
6121 WLAN_CIPHER_SUITE_WEP104,
6122 WLAN_CIPHER_SUITE_TKIP,
6123 WLAN_CIPHER_SUITE_CCMP,
6124 WL1271_CIPHER_SUITE_GEM,
6127 /* The tx descriptor buffer */
6128 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6130 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6131 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6133 /* unit us */
6134 /* FIXME: find a proper value */
6135 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6137 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6138 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6139 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6140 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6141 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6142 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6143 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6144 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6145 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6146 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6147 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6148 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6149 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6150 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6152 wl->hw->wiphy->cipher_suites = cipher_suites;
6153 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6155 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6156 BIT(NL80211_IFTYPE_AP) |
6157 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6158 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6159 #ifdef CONFIG_MAC80211_MESH
6160 BIT(NL80211_IFTYPE_MESH_POINT) |
6161 #endif
6162 BIT(NL80211_IFTYPE_P2P_GO);
6164 wl->hw->wiphy->max_scan_ssids = 1;
6165 wl->hw->wiphy->max_sched_scan_ssids = 16;
6166 wl->hw->wiphy->max_match_sets = 16;
6168 * Maximum length of elements in scanning probe request templates
6169 * should be the maximum length possible for a template, without
6170 * the IEEE80211 header of the template
6172 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6173 sizeof(struct ieee80211_header);
6175 wl->hw->wiphy->max_sched_scan_reqs = 1;
6176 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6177 sizeof(struct ieee80211_header);
6179 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6181 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6182 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6183 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6185 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6187 /* make sure all our channels fit in the scanned_ch bitmask */
6188 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6189 ARRAY_SIZE(wl1271_channels_5ghz) >
6190 WL1271_MAX_CHANNELS);
6192 * clear channel flags from the previous usage
6193 * and restore max_power & max_antenna_gain values.
6195 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6196 wl1271_band_2ghz.channels[i].flags = 0;
6197 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6198 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6201 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6202 wl1271_band_5ghz.channels[i].flags = 0;
6203 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6204 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6208 * We keep local copies of the band structs because we need to
6209 * modify them on a per-device basis.
6211 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6212 sizeof(wl1271_band_2ghz));
6213 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6214 &wl->ht_cap[NL80211_BAND_2GHZ],
6215 sizeof(*wl->ht_cap));
6216 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6217 sizeof(wl1271_band_5ghz));
6218 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6219 &wl->ht_cap[NL80211_BAND_5GHZ],
6220 sizeof(*wl->ht_cap));
6222 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6223 &wl->bands[NL80211_BAND_2GHZ];
6224 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6225 &wl->bands[NL80211_BAND_5GHZ];
6228 * allow 4 queues per mac address we support +
6229 * 1 cab queue per mac + one global offchannel Tx queue
6231 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6233 /* the last queue is the offchannel queue */
6234 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6235 wl->hw->max_rates = 1;
6237 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6239 /* the FW answers probe-requests in AP-mode */
6240 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6241 wl->hw->wiphy->probe_resp_offload =
6242 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6243 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6244 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6246 /* allowed interface combinations */
6247 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6248 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6250 /* register vendor commands */
6251 wlcore_set_vendor_commands(wl->hw->wiphy);
6253 SET_IEEE80211_DEV(wl->hw, wl->dev);
6255 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6256 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6258 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6260 return 0;
6263 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6264 u32 mbox_size)
6266 struct ieee80211_hw *hw;
6267 struct wl1271 *wl;
6268 int i, j, ret;
6269 unsigned int order;
6271 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6272 if (!hw) {
6273 wl1271_error("could not alloc ieee80211_hw");
6274 ret = -ENOMEM;
6275 goto err_hw_alloc;
6278 wl = hw->priv;
6279 memset(wl, 0, sizeof(*wl));
6281 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6282 if (!wl->priv) {
6283 wl1271_error("could not alloc wl priv");
6284 ret = -ENOMEM;
6285 goto err_priv_alloc;
6288 INIT_LIST_HEAD(&wl->wlvif_list);
6290 wl->hw = hw;
6293 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6294 * we don't allocate any additional resource here, so that's fine.
6296 for (i = 0; i < NUM_TX_QUEUES; i++)
6297 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6298 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6300 skb_queue_head_init(&wl->deferred_rx_queue);
6301 skb_queue_head_init(&wl->deferred_tx_queue);
6303 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6304 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6305 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6306 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6307 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6308 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6309 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6311 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6312 if (!wl->freezable_wq) {
6313 ret = -ENOMEM;
6314 goto err_hw;
6317 wl->channel = 0;
6318 wl->rx_counter = 0;
6319 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6320 wl->band = NL80211_BAND_2GHZ;
6321 wl->channel_type = NL80211_CHAN_NO_HT;
6322 wl->flags = 0;
6323 wl->sg_enabled = true;
6324 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6325 wl->recovery_count = 0;
6326 wl->hw_pg_ver = -1;
6327 wl->ap_ps_map = 0;
6328 wl->ap_fw_ps_map = 0;
6329 wl->quirks = 0;
6330 wl->system_hlid = WL12XX_SYSTEM_HLID;
6331 wl->active_sta_count = 0;
6332 wl->active_link_count = 0;
6333 wl->fwlog_size = 0;
6335 /* The system link is always allocated */
6336 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6338 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6339 for (i = 0; i < wl->num_tx_desc; i++)
6340 wl->tx_frames[i] = NULL;
6342 spin_lock_init(&wl->wl_lock);
6344 wl->state = WLCORE_STATE_OFF;
6345 wl->fw_type = WL12XX_FW_TYPE_NONE;
6346 mutex_init(&wl->mutex);
6347 mutex_init(&wl->flush_mutex);
6348 init_completion(&wl->nvs_loading_complete);
6350 order = get_order(aggr_buf_size);
6351 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6352 if (!wl->aggr_buf) {
6353 ret = -ENOMEM;
6354 goto err_wq;
6356 wl->aggr_buf_size = aggr_buf_size;
6358 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6359 if (!wl->dummy_packet) {
6360 ret = -ENOMEM;
6361 goto err_aggr;
6364 /* Allocate one page for the FW log */
6365 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6366 if (!wl->fwlog) {
6367 ret = -ENOMEM;
6368 goto err_dummy_packet;
6371 wl->mbox_size = mbox_size;
6372 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6373 if (!wl->mbox) {
6374 ret = -ENOMEM;
6375 goto err_fwlog;
6378 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6379 if (!wl->buffer_32) {
6380 ret = -ENOMEM;
6381 goto err_mbox;
6384 return hw;
6386 err_mbox:
6387 kfree(wl->mbox);
6389 err_fwlog:
6390 free_page((unsigned long)wl->fwlog);
6392 err_dummy_packet:
6393 dev_kfree_skb(wl->dummy_packet);
6395 err_aggr:
6396 free_pages((unsigned long)wl->aggr_buf, order);
6398 err_wq:
6399 destroy_workqueue(wl->freezable_wq);
6401 err_hw:
6402 wl1271_debugfs_exit(wl);
6403 kfree(wl->priv);
6405 err_priv_alloc:
6406 ieee80211_free_hw(hw);
6408 err_hw_alloc:
6410 return ERR_PTR(ret);
6412 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6414 int wlcore_free_hw(struct wl1271 *wl)
6416 /* Unblock any fwlog readers */
6417 mutex_lock(&wl->mutex);
6418 wl->fwlog_size = -1;
6419 mutex_unlock(&wl->mutex);
6421 wlcore_sysfs_free(wl);
6423 kfree(wl->buffer_32);
6424 kfree(wl->mbox);
6425 free_page((unsigned long)wl->fwlog);
6426 dev_kfree_skb(wl->dummy_packet);
6427 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6429 wl1271_debugfs_exit(wl);
6431 vfree(wl->fw);
6432 wl->fw = NULL;
6433 wl->fw_type = WL12XX_FW_TYPE_NONE;
6434 kfree(wl->nvs);
6435 wl->nvs = NULL;
6437 kfree(wl->raw_fw_status);
6438 kfree(wl->fw_status);
6439 kfree(wl->tx_res_if);
6440 destroy_workqueue(wl->freezable_wq);
6442 kfree(wl->priv);
6443 ieee80211_free_hw(wl->hw);
6445 return 0;
6447 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6449 #ifdef CONFIG_PM
6450 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6451 .flags = WIPHY_WOWLAN_ANY,
6452 .n_patterns = WL1271_MAX_RX_FILTERS,
6453 .pattern_min_len = 1,
6454 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6456 #endif
6458 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6460 return IRQ_WAKE_THREAD;
6463 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6465 struct wl1271 *wl = context;
6466 struct platform_device *pdev = wl->pdev;
6467 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6468 struct resource *res;
6470 int ret;
6471 irq_handler_t hardirq_fn = NULL;
6473 if (fw) {
6474 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6475 if (!wl->nvs) {
6476 wl1271_error("Could not allocate nvs data");
6477 goto out;
6479 wl->nvs_len = fw->size;
6480 } else if (pdev_data->family->nvs_name) {
6481 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6482 pdev_data->family->nvs_name);
6483 wl->nvs = NULL;
6484 wl->nvs_len = 0;
6485 } else {
6486 wl->nvs = NULL;
6487 wl->nvs_len = 0;
6490 ret = wl->ops->setup(wl);
6491 if (ret < 0)
6492 goto out_free_nvs;
6494 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6496 /* adjust some runtime configuration parameters */
6497 wlcore_adjust_conf(wl);
6499 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6500 if (!res) {
6501 wl1271_error("Could not get IRQ resource");
6502 goto out_free_nvs;
6505 wl->irq = res->start;
6506 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6507 wl->if_ops = pdev_data->if_ops;
6509 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6510 hardirq_fn = wlcore_hardirq;
6511 else
6512 wl->irq_flags |= IRQF_ONESHOT;
6514 ret = wl12xx_set_power_on(wl);
6515 if (ret < 0)
6516 goto out_free_nvs;
6518 ret = wl12xx_get_hw_info(wl);
6519 if (ret < 0) {
6520 wl1271_error("couldn't get hw info");
6521 wl1271_power_off(wl);
6522 goto out_free_nvs;
6525 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6526 wl->irq_flags, pdev->name, wl);
6527 if (ret < 0) {
6528 wl1271_error("interrupt configuration failed");
6529 wl1271_power_off(wl);
6530 goto out_free_nvs;
6533 #ifdef CONFIG_PM
6534 ret = enable_irq_wake(wl->irq);
6535 if (!ret) {
6536 wl->irq_wake_enabled = true;
6537 device_init_wakeup(wl->dev, 1);
6538 if (pdev_data->pwr_in_suspend)
6539 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6541 #endif
6542 disable_irq(wl->irq);
6543 wl1271_power_off(wl);
6545 ret = wl->ops->identify_chip(wl);
6546 if (ret < 0)
6547 goto out_irq;
6549 ret = wl1271_init_ieee80211(wl);
6550 if (ret)
6551 goto out_irq;
6553 ret = wl1271_register_hw(wl);
6554 if (ret)
6555 goto out_irq;
6557 ret = wlcore_sysfs_init(wl);
6558 if (ret)
6559 goto out_unreg;
6561 wl->initialized = true;
6562 goto out;
6564 out_unreg:
6565 wl1271_unregister_hw(wl);
6567 out_irq:
6568 free_irq(wl->irq, wl);
6570 out_free_nvs:
6571 kfree(wl->nvs);
6573 out:
6574 release_firmware(fw);
6575 complete_all(&wl->nvs_loading_complete);
6578 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6580 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6581 const char *nvs_name;
6582 int ret = 0;
6584 if (!wl->ops || !wl->ptable || !pdev_data)
6585 return -EINVAL;
6587 wl->dev = &pdev->dev;
6588 wl->pdev = pdev;
6589 platform_set_drvdata(pdev, wl);
6591 if (pdev_data->family && pdev_data->family->nvs_name) {
6592 nvs_name = pdev_data->family->nvs_name;
6593 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6594 nvs_name, &pdev->dev, GFP_KERNEL,
6595 wl, wlcore_nvs_cb);
6596 if (ret < 0) {
6597 wl1271_error("request_firmware_nowait failed for %s: %d",
6598 nvs_name, ret);
6599 complete_all(&wl->nvs_loading_complete);
6601 } else {
6602 wlcore_nvs_cb(NULL, wl);
6605 return ret;
6607 EXPORT_SYMBOL_GPL(wlcore_probe);
6609 int wlcore_remove(struct platform_device *pdev)
6611 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6612 struct wl1271 *wl = platform_get_drvdata(pdev);
6614 if (pdev_data->family && pdev_data->family->nvs_name)
6615 wait_for_completion(&wl->nvs_loading_complete);
6616 if (!wl->initialized)
6617 return 0;
6619 if (wl->irq_wake_enabled) {
6620 device_init_wakeup(wl->dev, 0);
6621 disable_irq_wake(wl->irq);
6623 wl1271_unregister_hw(wl);
6624 free_irq(wl->irq, wl);
6625 wlcore_free_hw(wl);
6627 return 0;
6629 EXPORT_SYMBOL_GPL(wlcore_remove);
6631 u32 wl12xx_debug_level = DEBUG_NONE;
6632 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6633 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6634 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6636 module_param_named(fwlog, fwlog_param, charp, 0);
6637 MODULE_PARM_DESC(fwlog,
6638 "FW logger options: continuous, dbgpins or disable");
6640 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6641 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6643 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6644 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6646 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6647 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6649 MODULE_LICENSE("GPL");
6650 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6651 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");