initial commit with v3.6.7
[linux-3.6.7-moxart.git] / drivers / net / wireless / ti / wlcore / main.c
blob72548609f71122b469991615c3dda3e2aabe598c
2 /*
3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static bool bug_on_recovery;
60 static bool no_recovery;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
71 int ret;
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 return -EINVAL;
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 return 0;
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 return 0;
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
83 if (ret < 0)
84 return ret;
86 wl12xx_croc(wl, wlvif->role_id);
88 wl1271_info("Association completed.");
89 return 0;
92 static int wl1271_reg_notify(struct wiphy *wiphy,
93 struct regulatory_request *request)
95 struct ieee80211_supported_band *band;
96 struct ieee80211_channel *ch;
97 int i;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 continue;
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
111 return 0;
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
115 bool enable)
117 int ret = 0;
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
121 if (ret < 0)
122 goto out;
124 if (enable)
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
126 else
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128 out:
129 return ret;
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
138 int ret = 0;
139 int period = wl->conf.rx_streaming.interval;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
143 goto out;
145 /* reconfigure/disable according to new streaming_period */
146 if (period &&
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 else {
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
156 out:
157 return ret;
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
162 int ret;
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
167 mutex_lock(&wl->mutex);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
173 goto out;
175 if (!wl->conf.rx_streaming.interval)
176 goto out;
178 ret = wl1271_ps_elp_wakeup(wl);
179 if (ret < 0)
180 goto out;
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
183 if (ret < 0)
184 goto out_sleep;
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
190 out_sleep:
191 wl1271_ps_elp_sleep(wl);
192 out:
193 mutex_unlock(&wl->mutex);
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
198 int ret;
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
203 mutex_lock(&wl->mutex);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
206 goto out;
208 ret = wl1271_ps_elp_wakeup(wl);
209 if (ret < 0)
210 goto out;
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
213 if (ret)
214 goto out_sleep;
216 out_sleep:
217 wl1271_ps_elp_sleep(wl);
218 out:
219 mutex_unlock(&wl->mutex);
222 static void wl1271_rx_streaming_timer(unsigned long data)
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
234 return;
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 struct delayed_work *dwork;
244 struct wl1271 *wl;
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 mutex_lock(&wl->mutex);
251 if (unlikely(wl->state == WL1271_STATE_OFF))
252 goto out;
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
256 goto out;
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
266 goto out;
270 * if a scan is in progress, we might not have any Tx for a long
271 * time
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
277 goto out;
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
288 " %d stations",
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
292 goto out;
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
299 out:
300 mutex_unlock(&wl->mutex);
303 static void wlcore_adjust_conf(struct wl1271 *wl)
305 /* Adjust settings according to optional module parameters */
306 if (fwlog_param) {
307 if (!strcmp(fwlog_param, "continuous")) {
308 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
309 } else if (!strcmp(fwlog_param, "ondemand")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
311 } else if (!strcmp(fwlog_param, "dbgpins")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 } else if (!strcmp(fwlog_param, "disable")) {
315 wl->conf.fwlog.mem_blocks = 0;
316 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 } else {
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
324 struct wl12xx_vif *wlvif,
325 u8 hlid, u8 tx_pkts)
327 bool fw_ps, single_sta;
329 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
330 single_sta = (wl->active_sta_count == 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
337 wl12xx_ps_link_end(wl, wlvif, hlid);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
345 wl12xx_ps_link_start(wl, wlvif, hlid, true);
348 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
349 struct wl12xx_vif *wlvif,
350 struct wl_fw_status_2 *status)
352 struct wl1271_link *lnk;
353 u32 cur_fw_ps_map;
354 u8 hlid, cnt;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
359 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
360 wl1271_debug(DEBUG_PSM,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl->ap_fw_ps_map, cur_fw_ps_map,
363 wl->ap_fw_ps_map ^ cur_fw_ps_map);
365 wl->ap_fw_ps_map = cur_fw_ps_map;
368 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
369 lnk = &wl->links[hlid];
370 cnt = status->counters.tx_lnk_free_pkts[hlid] -
371 lnk->prev_freed_pkts;
373 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
374 lnk->allocated_pkts -= cnt;
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 lnk->allocated_pkts);
381 static int wlcore_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
385 struct wl12xx_vif *wlvif;
386 struct timespec ts;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
389 int i;
390 size_t status_len;
391 int ret;
393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 sizeof(*status_2) + wl->fw_status_priv_len;
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
397 status_len, false);
398 if (ret < 0)
399 return ret;
401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
403 status_1->intr,
404 status_1->fw_rx_counter,
405 status_1->drv_rx_counter,
406 status_1->tx_results_counter);
408 for (i = 0; i < NUM_TX_QUEUES; i++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl->tx_allocated_pkts[i] -=
411 (status_2->counters.tx_released_pkts[i] -
412 wl->tx_pkts_freed[i]) & 0xff;
414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
417 /* prevent wrap-around in total blocks counter */
418 if (likely(wl->tx_blocks_freed <=
419 le32_to_cpu(status_2->total_released_blks)))
420 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
421 wl->tx_blocks_freed;
422 else
423 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
424 le32_to_cpu(status_2->total_released_blks);
426 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
428 wl->tx_allocated_blocks -= freed_blocks;
431 * If the FW freed some blocks:
432 * If we still have allocated blocks - re-arm the timer, Tx is
433 * not stuck. Otherwise, cancel the timer (no Tx currently).
435 if (freed_blocks) {
436 if (wl->tx_allocated_blocks)
437 wl12xx_rearm_tx_watchdog_locked(wl);
438 else
439 cancel_delayed_work(&wl->tx_watchdog_work);
442 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
445 * The FW might change the total number of TX memblocks before
446 * we get a notification about blocks being released. Thus, the
447 * available blocks calculation might yield a temporary result
448 * which is lower than the actual available blocks. Keeping in
449 * mind that only blocks that were allocated can be moved from
450 * TX to RX, tx_blocks_available should never decrease here.
452 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
453 avail);
455 /* if more blocks are available now, tx work can be scheduled */
456 if (wl->tx_blocks_available > old_tx_blk_count)
457 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
459 /* for AP update num of allocated TX blocks per link and ps status */
460 wl12xx_for_each_wlvif_ap(wl, wlvif) {
461 wl12xx_irq_update_links_status(wl, wlvif, status_2);
464 /* update the host-chipset time offset */
465 getnstimeofday(&ts);
466 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
467 (s64)le32_to_cpu(status_2->fw_localtime);
469 return 0;
472 static void wl1271_flush_deferred_work(struct wl1271 *wl)
474 struct sk_buff *skb;
476 /* Pass all received frames to the network stack */
477 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
478 ieee80211_rx_ni(wl->hw, skb);
480 /* Return sent skbs to the network stack */
481 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
482 ieee80211_tx_status_ni(wl->hw, skb);
485 static void wl1271_netstack_work(struct work_struct *work)
487 struct wl1271 *wl =
488 container_of(work, struct wl1271, netstack_work);
490 do {
491 wl1271_flush_deferred_work(wl);
492 } while (skb_queue_len(&wl->deferred_rx_queue));
495 #define WL1271_IRQ_MAX_LOOPS 256
497 static int wlcore_irq_locked(struct wl1271 *wl)
499 int ret = 0;
500 u32 intr;
501 int loopcount = WL1271_IRQ_MAX_LOOPS;
502 bool done = false;
503 unsigned int defer_count;
504 unsigned long flags;
507 * In case edge triggered interrupt must be used, we cannot iterate
508 * more than once without introducing race conditions with the hardirq.
510 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
511 loopcount = 1;
513 wl1271_debug(DEBUG_IRQ, "IRQ work");
515 if (unlikely(wl->state == WL1271_STATE_OFF))
516 goto out;
518 ret = wl1271_ps_elp_wakeup(wl);
519 if (ret < 0)
520 goto out;
522 while (!done && loopcount--) {
524 * In order to avoid a race with the hardirq, clear the flag
525 * before acknowledging the chip. Since the mutex is held,
526 * wl1271_ps_elp_wakeup cannot be called concurrently.
528 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
529 smp_mb__after_clear_bit();
531 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
532 if (ret < 0)
533 goto out;
535 wlcore_hw_tx_immediate_compl(wl);
537 intr = le32_to_cpu(wl->fw_status_1->intr);
538 intr &= WLCORE_ALL_INTR_MASK;
539 if (!intr) {
540 done = true;
541 continue;
544 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
545 wl1271_error("HW watchdog interrupt received! starting recovery.");
546 wl->watchdog_recovery = true;
547 ret = -EIO;
549 /* restarting the chip. ignore any other interrupt. */
550 goto out;
553 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
554 wl1271_error("SW watchdog interrupt received! "
555 "starting recovery.");
556 wl->watchdog_recovery = true;
557 ret = -EIO;
559 /* restarting the chip. ignore any other interrupt. */
560 goto out;
563 if (likely(intr & WL1271_ACX_INTR_DATA)) {
564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
566 ret = wlcore_rx(wl, wl->fw_status_1);
567 if (ret < 0)
568 goto out;
570 /* Check if any tx blocks were freed */
571 spin_lock_irqsave(&wl->wl_lock, flags);
572 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
573 wl1271_tx_total_queue_count(wl) > 0) {
574 spin_unlock_irqrestore(&wl->wl_lock, flags);
576 * In order to avoid starvation of the TX path,
577 * call the work function directly.
579 ret = wlcore_tx_work_locked(wl);
580 if (ret < 0)
581 goto out;
582 } else {
583 spin_unlock_irqrestore(&wl->wl_lock, flags);
586 /* check for tx results */
587 ret = wlcore_hw_tx_delayed_compl(wl);
588 if (ret < 0)
589 goto out;
591 /* Make sure the deferred queues don't get too long */
592 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
593 skb_queue_len(&wl->deferred_rx_queue);
594 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
595 wl1271_flush_deferred_work(wl);
598 if (intr & WL1271_ACX_INTR_EVENT_A) {
599 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
600 ret = wl1271_event_handle(wl, 0);
601 if (ret < 0)
602 goto out;
605 if (intr & WL1271_ACX_INTR_EVENT_B) {
606 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
607 ret = wl1271_event_handle(wl, 1);
608 if (ret < 0)
609 goto out;
612 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
613 wl1271_debug(DEBUG_IRQ,
614 "WL1271_ACX_INTR_INIT_COMPLETE");
616 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
617 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
620 wl1271_ps_elp_sleep(wl);
622 out:
623 return ret;
626 static irqreturn_t wlcore_irq(int irq, void *cookie)
628 int ret;
629 unsigned long flags;
630 struct wl1271 *wl = cookie;
632 /* TX might be handled here, avoid redundant work */
633 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
634 cancel_work_sync(&wl->tx_work);
636 mutex_lock(&wl->mutex);
638 ret = wlcore_irq_locked(wl);
639 if (ret)
640 wl12xx_queue_recovery_work(wl);
642 spin_lock_irqsave(&wl->wl_lock, flags);
643 /* In case TX was not handled here, queue TX work */
644 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
645 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
646 wl1271_tx_total_queue_count(wl) > 0)
647 ieee80211_queue_work(wl->hw, &wl->tx_work);
648 spin_unlock_irqrestore(&wl->wl_lock, flags);
650 mutex_unlock(&wl->mutex);
652 return IRQ_HANDLED;
655 struct vif_counter_data {
656 u8 counter;
658 struct ieee80211_vif *cur_vif;
659 bool cur_vif_running;
662 static void wl12xx_vif_count_iter(void *data, u8 *mac,
663 struct ieee80211_vif *vif)
665 struct vif_counter_data *counter = data;
667 counter->counter++;
668 if (counter->cur_vif == vif)
669 counter->cur_vif_running = true;
672 /* caller must not hold wl->mutex, as it might deadlock */
673 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
674 struct ieee80211_vif *cur_vif,
675 struct vif_counter_data *data)
677 memset(data, 0, sizeof(*data));
678 data->cur_vif = cur_vif;
680 ieee80211_iterate_active_interfaces(hw,
681 wl12xx_vif_count_iter, data);
684 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
686 const struct firmware *fw;
687 const char *fw_name;
688 enum wl12xx_fw_type fw_type;
689 int ret;
691 if (plt) {
692 fw_type = WL12XX_FW_TYPE_PLT;
693 fw_name = wl->plt_fw_name;
694 } else {
696 * we can't call wl12xx_get_vif_count() here because
697 * wl->mutex is taken, so use the cached last_vif_count value
699 if (wl->last_vif_count > 1) {
700 fw_type = WL12XX_FW_TYPE_MULTI;
701 fw_name = wl->mr_fw_name;
702 } else {
703 fw_type = WL12XX_FW_TYPE_NORMAL;
704 fw_name = wl->sr_fw_name;
708 if (wl->fw_type == fw_type)
709 return 0;
711 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
713 ret = request_firmware(&fw, fw_name, wl->dev);
715 if (ret < 0) {
716 wl1271_error("could not get firmware %s: %d", fw_name, ret);
717 return ret;
720 if (fw->size % 4) {
721 wl1271_error("firmware size is not multiple of 32 bits: %zu",
722 fw->size);
723 ret = -EILSEQ;
724 goto out;
727 vfree(wl->fw);
728 wl->fw_type = WL12XX_FW_TYPE_NONE;
729 wl->fw_len = fw->size;
730 wl->fw = vmalloc(wl->fw_len);
732 if (!wl->fw) {
733 wl1271_error("could not allocate memory for the firmware");
734 ret = -ENOMEM;
735 goto out;
738 memcpy(wl->fw, fw->data, wl->fw_len);
739 ret = 0;
740 wl->fw_type = fw_type;
741 out:
742 release_firmware(fw);
744 return ret;
747 static void wl1271_fetch_nvs(struct wl1271 *wl)
749 const struct firmware *fw;
750 int ret;
752 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
754 if (ret < 0) {
755 wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
756 WL12XX_NVS_NAME, ret);
757 return;
760 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
762 if (!wl->nvs) {
763 wl1271_error("could not allocate memory for the nvs file");
764 goto out;
767 wl->nvs_len = fw->size;
769 out:
770 release_firmware(fw);
773 void wl12xx_queue_recovery_work(struct wl1271 *wl)
775 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
777 /* Avoid a recursive recovery */
778 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
779 wlcore_disable_interrupts_nosync(wl);
780 ieee80211_queue_work(wl->hw, &wl->recovery_work);
784 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
786 size_t len = 0;
788 /* The FW log is a length-value list, find where the log end */
789 while (len < maxlen) {
790 if (memblock[len] == 0)
791 break;
792 if (len + memblock[len] + 1 > maxlen)
793 break;
794 len += memblock[len] + 1;
797 /* Make sure we have enough room */
798 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
800 /* Fill the FW log file, consumed by the sysfs fwlog entry */
801 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
802 wl->fwlog_size += len;
804 return len;
807 #define WLCORE_FW_LOG_END 0x2000000
809 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
811 u32 addr;
812 u32 offset;
813 u32 end_of_log;
814 u8 *block;
815 int ret;
817 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
818 (wl->conf.fwlog.mem_blocks == 0))
819 return;
821 wl1271_info("Reading FW panic log");
823 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
824 if (!block)
825 return;
828 * Make sure the chip is awake and the logger isn't active.
829 * Do not send a stop fwlog command if the fw is hanged.
831 if (wl1271_ps_elp_wakeup(wl))
832 goto out;
833 if (!wl->watchdog_recovery)
834 wl12xx_cmd_stop_fwlog(wl);
836 /* Read the first memory block address */
837 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
838 if (ret < 0)
839 goto out;
841 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
842 if (!addr)
843 goto out;
845 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
846 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
847 end_of_log = WLCORE_FW_LOG_END;
848 } else {
849 offset = sizeof(addr);
850 end_of_log = addr;
853 /* Traverse the memory blocks linked list */
854 do {
855 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
856 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
857 false);
858 if (ret < 0)
859 goto out;
862 * Memory blocks are linked to one another. The first 4 bytes
863 * of each memory block hold the hardware address of the next
864 * one. The last memory block points to the first one in
865 * on demand mode and is equal to 0x2000000 in continuous mode.
867 addr = le32_to_cpup((__le32 *)block);
868 if (!wl12xx_copy_fwlog(wl, block + offset,
869 WL12XX_HW_BLOCK_SIZE - offset))
870 break;
871 } while (addr && (addr != end_of_log));
873 wake_up_interruptible(&wl->fwlog_waitq);
875 out:
876 kfree(block);
879 static void wlcore_print_recovery(struct wl1271 *wl)
881 u32 pc = 0;
882 u32 hint_sts = 0;
883 int ret;
885 wl1271_info("Hardware recovery in progress. FW ver: %s",
886 wl->chip.fw_ver_str);
888 /* change partitions momentarily so we can read the FW pc */
889 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
890 if (ret < 0)
891 return;
893 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
894 if (ret < 0)
895 return;
897 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
898 if (ret < 0)
899 return;
901 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
903 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
907 static void wl1271_recovery_work(struct work_struct *work)
909 struct wl1271 *wl =
910 container_of(work, struct wl1271, recovery_work);
911 struct wl12xx_vif *wlvif;
912 struct ieee80211_vif *vif;
914 mutex_lock(&wl->mutex);
916 if (wl->state != WL1271_STATE_ON || wl->plt)
917 goto out_unlock;
919 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
920 wl12xx_read_fwlog_panic(wl);
921 wlcore_print_recovery(wl);
924 BUG_ON(bug_on_recovery &&
925 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
927 if (no_recovery) {
928 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
929 goto out_unlock;
933 * Advance security sequence number to overcome potential progress
934 * in the firmware during recovery. This doens't hurt if the network is
935 * not encrypted.
937 wl12xx_for_each_wlvif(wl, wlvif) {
938 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
939 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
940 wlvif->tx_security_seq +=
941 WL1271_TX_SQN_POST_RECOVERY_PADDING;
944 /* Prevent spurious TX during FW restart */
945 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
947 if (wl->sched_scanning) {
948 ieee80211_sched_scan_stopped(wl->hw);
949 wl->sched_scanning = false;
952 /* reboot the chipset */
953 while (!list_empty(&wl->wlvif_list)) {
954 wlvif = list_first_entry(&wl->wlvif_list,
955 struct wl12xx_vif, list);
956 vif = wl12xx_wlvif_to_vif(wlvif);
957 __wl1271_op_remove_interface(wl, vif, false);
960 wlcore_op_stop_locked(wl);
962 ieee80211_restart_hw(wl->hw);
965 * Its safe to enable TX now - the queues are stopped after a request
966 * to restart the HW.
968 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970 out_unlock:
971 wl->watchdog_recovery = false;
972 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
973 mutex_unlock(&wl->mutex);
976 static int wlcore_fw_wakeup(struct wl1271 *wl)
978 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
981 static int wl1271_setup(struct wl1271 *wl)
983 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
984 sizeof(*wl->fw_status_2) +
985 wl->fw_status_priv_len, GFP_KERNEL);
986 if (!wl->fw_status_1)
987 return -ENOMEM;
989 wl->fw_status_2 = (struct wl_fw_status_2 *)
990 (((u8 *) wl->fw_status_1) +
991 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
993 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
994 if (!wl->tx_res_if) {
995 kfree(wl->fw_status_1);
996 return -ENOMEM;
999 return 0;
1002 static int wl12xx_set_power_on(struct wl1271 *wl)
1004 int ret;
1006 msleep(WL1271_PRE_POWER_ON_SLEEP);
1007 ret = wl1271_power_on(wl);
1008 if (ret < 0)
1009 goto out;
1010 msleep(WL1271_POWER_ON_SLEEP);
1011 wl1271_io_reset(wl);
1012 wl1271_io_init(wl);
1014 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1015 if (ret < 0)
1016 goto fail;
1018 /* ELP module wake up */
1019 ret = wlcore_fw_wakeup(wl);
1020 if (ret < 0)
1021 goto fail;
1023 out:
1024 return ret;
1026 fail:
1027 wl1271_power_off(wl);
1028 return ret;
1031 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1033 int ret = 0;
1035 ret = wl12xx_set_power_on(wl);
1036 if (ret < 0)
1037 goto out;
1040 * For wl127x based devices we could use the default block
1041 * size (512 bytes), but due to a bug in the sdio driver, we
1042 * need to set it explicitly after the chip is powered on. To
1043 * simplify the code and since the performance impact is
1044 * negligible, we use the same block size for all different
1045 * chip types.
1047 * Check if the bus supports blocksize alignment and, if it
1048 * doesn't, make sure we don't have the quirk.
1050 if (!wl1271_set_block_size(wl))
1051 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1053 /* TODO: make sure the lower driver has set things up correctly */
1055 ret = wl1271_setup(wl);
1056 if (ret < 0)
1057 goto out;
1059 ret = wl12xx_fetch_firmware(wl, plt);
1060 if (ret < 0)
1061 goto out;
1063 out:
1064 return ret;
1067 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1069 int retries = WL1271_BOOT_RETRIES;
1070 struct wiphy *wiphy = wl->hw->wiphy;
1072 static const char* const PLT_MODE[] = {
1073 "PLT_OFF",
1074 "PLT_ON",
1075 "PLT_FEM_DETECT"
1078 int ret;
1080 mutex_lock(&wl->mutex);
1082 wl1271_notice("power up");
1084 if (wl->state != WL1271_STATE_OFF) {
1085 wl1271_error("cannot go into PLT state because not "
1086 "in off state: %d", wl->state);
1087 ret = -EBUSY;
1088 goto out;
1091 /* Indicate to lower levels that we are now in PLT mode */
1092 wl->plt = true;
1093 wl->plt_mode = plt_mode;
1095 while (retries) {
1096 retries--;
1097 ret = wl12xx_chip_wakeup(wl, true);
1098 if (ret < 0)
1099 goto power_off;
1101 ret = wl->ops->plt_init(wl);
1102 if (ret < 0)
1103 goto power_off;
1105 wl->state = WL1271_STATE_ON;
1106 wl1271_notice("firmware booted in PLT mode %s (%s)",
1107 PLT_MODE[plt_mode],
1108 wl->chip.fw_ver_str);
1110 /* update hw/fw version info in wiphy struct */
1111 wiphy->hw_version = wl->chip.id;
1112 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1113 sizeof(wiphy->fw_version));
1115 goto out;
1117 power_off:
1118 wl1271_power_off(wl);
1121 wl->plt = false;
1122 wl->plt_mode = PLT_OFF;
1124 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1125 WL1271_BOOT_RETRIES);
1126 out:
1127 mutex_unlock(&wl->mutex);
1129 return ret;
1132 int wl1271_plt_stop(struct wl1271 *wl)
1134 int ret = 0;
1136 wl1271_notice("power down");
1139 * Interrupts must be disabled before setting the state to OFF.
1140 * Otherwise, the interrupt handler might be called and exit without
1141 * reading the interrupt status.
1143 wlcore_disable_interrupts(wl);
1144 mutex_lock(&wl->mutex);
1145 if (!wl->plt) {
1146 mutex_unlock(&wl->mutex);
1149 * This will not necessarily enable interrupts as interrupts
1150 * may have been disabled when op_stop was called. It will,
1151 * however, balance the above call to disable_interrupts().
1153 wlcore_enable_interrupts(wl);
1155 wl1271_error("cannot power down because not in PLT "
1156 "state: %d", wl->state);
1157 ret = -EBUSY;
1158 goto out;
1161 mutex_unlock(&wl->mutex);
1163 wl1271_flush_deferred_work(wl);
1164 cancel_work_sync(&wl->netstack_work);
1165 cancel_work_sync(&wl->recovery_work);
1166 cancel_delayed_work_sync(&wl->elp_work);
1167 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1168 cancel_delayed_work_sync(&wl->connection_loss_work);
1170 mutex_lock(&wl->mutex);
1171 wl1271_power_off(wl);
1172 wl->flags = 0;
1173 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1174 wl->state = WL1271_STATE_OFF;
1175 wl->plt = false;
1176 wl->plt_mode = PLT_OFF;
1177 wl->rx_counter = 0;
1178 mutex_unlock(&wl->mutex);
1180 out:
1181 return ret;
1184 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1186 struct wl1271 *wl = hw->priv;
1187 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1188 struct ieee80211_vif *vif = info->control.vif;
1189 struct wl12xx_vif *wlvif = NULL;
1190 unsigned long flags;
1191 int q, mapping;
1192 u8 hlid;
1194 if (vif)
1195 wlvif = wl12xx_vif_to_data(vif);
1197 mapping = skb_get_queue_mapping(skb);
1198 q = wl1271_tx_get_queue(mapping);
1200 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
1202 spin_lock_irqsave(&wl->wl_lock, flags);
1205 * drop the packet if the link is invalid or the queue is stopped
1206 * for any reason but watermark. Watermark is a "soft"-stop so we
1207 * allow these packets through.
1209 if (hlid == WL12XX_INVALID_LINK_ID ||
1210 (wlvif && !test_bit(hlid, wlvif->links_map)) ||
1211 (wlcore_is_queue_stopped(wl, q) &&
1212 !wlcore_is_queue_stopped_by_reason(wl, q,
1213 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1214 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1215 ieee80211_free_txskb(hw, skb);
1216 goto out;
1219 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1220 hlid, q, skb->len);
1221 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1223 wl->tx_queue_count[q]++;
1226 * The workqueue is slow to process the tx_queue and we need stop
1227 * the queue here, otherwise the queue will get too long.
1229 if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1230 !wlcore_is_queue_stopped_by_reason(wl, q,
1231 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1232 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1233 wlcore_stop_queue_locked(wl, q,
1234 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1238 * The chip specific setup must run before the first TX packet -
1239 * before that, the tx_work will not be initialized!
1242 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1243 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1244 ieee80211_queue_work(wl->hw, &wl->tx_work);
1246 out:
1247 spin_unlock_irqrestore(&wl->wl_lock, flags);
1250 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1252 unsigned long flags;
1253 int q;
1255 /* no need to queue a new dummy packet if one is already pending */
1256 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1257 return 0;
1259 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1261 spin_lock_irqsave(&wl->wl_lock, flags);
1262 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1263 wl->tx_queue_count[q]++;
1264 spin_unlock_irqrestore(&wl->wl_lock, flags);
1266 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1267 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1268 return wlcore_tx_work_locked(wl);
1271 * If the FW TX is busy, TX work will be scheduled by the threaded
1272 * interrupt handler function
1274 return 0;
1278 * The size of the dummy packet should be at least 1400 bytes. However, in
1279 * order to minimize the number of bus transactions, aligning it to 512 bytes
1280 * boundaries could be beneficial, performance wise
1282 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1284 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1286 struct sk_buff *skb;
1287 struct ieee80211_hdr_3addr *hdr;
1288 unsigned int dummy_packet_size;
1290 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1291 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1293 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1294 if (!skb) {
1295 wl1271_warning("Failed to allocate a dummy packet skb");
1296 return NULL;
1299 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1301 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1302 memset(hdr, 0, sizeof(*hdr));
1303 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1304 IEEE80211_STYPE_NULLFUNC |
1305 IEEE80211_FCTL_TODS);
1307 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1309 /* Dummy packets require the TID to be management */
1310 skb->priority = WL1271_TID_MGMT;
1312 /* Initialize all fields that might be used */
1313 skb_set_queue_mapping(skb, 0);
1314 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1316 return skb;
1320 #ifdef CONFIG_PM
1321 static int
1322 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1324 int num_fields = 0, in_field = 0, fields_size = 0;
1325 int i, pattern_len = 0;
1327 if (!p->mask) {
1328 wl1271_warning("No mask in WoWLAN pattern");
1329 return -EINVAL;
1333 * The pattern is broken up into segments of bytes at different offsets
1334 * that need to be checked by the FW filter. Each segment is called
1335 * a field in the FW API. We verify that the total number of fields
1336 * required for this pattern won't exceed FW limits (8)
1337 * as well as the total fields buffer won't exceed the FW limit.
1338 * Note that if there's a pattern which crosses Ethernet/IP header
1339 * boundary a new field is required.
1341 for (i = 0; i < p->pattern_len; i++) {
1342 if (test_bit(i, (unsigned long *)p->mask)) {
1343 if (!in_field) {
1344 in_field = 1;
1345 pattern_len = 1;
1346 } else {
1347 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1348 num_fields++;
1349 fields_size += pattern_len +
1350 RX_FILTER_FIELD_OVERHEAD;
1351 pattern_len = 1;
1352 } else
1353 pattern_len++;
1355 } else {
1356 if (in_field) {
1357 in_field = 0;
1358 fields_size += pattern_len +
1359 RX_FILTER_FIELD_OVERHEAD;
1360 num_fields++;
1365 if (in_field) {
1366 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1367 num_fields++;
1370 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1371 wl1271_warning("RX Filter too complex. Too many segments");
1372 return -EINVAL;
1375 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1376 wl1271_warning("RX filter pattern is too big");
1377 return -E2BIG;
1380 return 0;
1383 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1385 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1388 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1390 int i;
1392 if (filter == NULL)
1393 return;
1395 for (i = 0; i < filter->num_fields; i++)
1396 kfree(filter->fields[i].pattern);
1398 kfree(filter);
1401 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1402 u16 offset, u8 flags,
1403 u8 *pattern, u8 len)
1405 struct wl12xx_rx_filter_field *field;
1407 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1408 wl1271_warning("Max fields per RX filter. can't alloc another");
1409 return -EINVAL;
1412 field = &filter->fields[filter->num_fields];
1414 field->pattern = kzalloc(len, GFP_KERNEL);
1415 if (!field->pattern) {
1416 wl1271_warning("Failed to allocate RX filter pattern");
1417 return -ENOMEM;
1420 filter->num_fields++;
1422 field->offset = cpu_to_le16(offset);
1423 field->flags = flags;
1424 field->len = len;
1425 memcpy(field->pattern, pattern, len);
1427 return 0;
1430 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1432 int i, fields_size = 0;
1434 for (i = 0; i < filter->num_fields; i++)
1435 fields_size += filter->fields[i].len +
1436 sizeof(struct wl12xx_rx_filter_field) -
1437 sizeof(u8 *);
1439 return fields_size;
1442 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1443 u8 *buf)
1445 int i;
1446 struct wl12xx_rx_filter_field *field;
1448 for (i = 0; i < filter->num_fields; i++) {
1449 field = (struct wl12xx_rx_filter_field *)buf;
1451 field->offset = filter->fields[i].offset;
1452 field->flags = filter->fields[i].flags;
1453 field->len = filter->fields[i].len;
1455 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1456 buf += sizeof(struct wl12xx_rx_filter_field) -
1457 sizeof(u8 *) + field->len;
1462 * Allocates an RX filter returned through f
1463 * which needs to be freed using rx_filter_free()
1465 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1466 struct cfg80211_wowlan_trig_pkt_pattern *p,
1467 struct wl12xx_rx_filter **f)
1469 int i, j, ret = 0;
1470 struct wl12xx_rx_filter *filter;
1471 u16 offset;
1472 u8 flags, len;
1474 filter = wl1271_rx_filter_alloc();
1475 if (!filter) {
1476 wl1271_warning("Failed to alloc rx filter");
1477 ret = -ENOMEM;
1478 goto err;
1481 i = 0;
1482 while (i < p->pattern_len) {
1483 if (!test_bit(i, (unsigned long *)p->mask)) {
1484 i++;
1485 continue;
1488 for (j = i; j < p->pattern_len; j++) {
1489 if (!test_bit(j, (unsigned long *)p->mask))
1490 break;
1492 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1493 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1494 break;
1497 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1498 offset = i;
1499 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1500 } else {
1501 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1502 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1505 len = j - i;
1507 ret = wl1271_rx_filter_alloc_field(filter,
1508 offset,
1509 flags,
1510 &p->pattern[i], len);
1511 if (ret)
1512 goto err;
1514 i = j;
1517 filter->action = FILTER_SIGNAL;
1519 *f = filter;
1520 return 0;
1522 err:
1523 wl1271_rx_filter_free(filter);
1524 *f = NULL;
1526 return ret;
1529 static int wl1271_configure_wowlan(struct wl1271 *wl,
1530 struct cfg80211_wowlan *wow)
1532 int i, ret;
1534 if (!wow || wow->any || !wow->n_patterns) {
1535 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1536 FILTER_SIGNAL);
1537 if (ret)
1538 goto out;
1540 ret = wl1271_rx_filter_clear_all(wl);
1541 if (ret)
1542 goto out;
1544 return 0;
1547 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1548 return -EINVAL;
1550 /* Validate all incoming patterns before clearing current FW state */
1551 for (i = 0; i < wow->n_patterns; i++) {
1552 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1553 if (ret) {
1554 wl1271_warning("Bad wowlan pattern %d", i);
1555 return ret;
1559 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1560 if (ret)
1561 goto out;
1563 ret = wl1271_rx_filter_clear_all(wl);
1564 if (ret)
1565 goto out;
1567 /* Translate WoWLAN patterns into filters */
1568 for (i = 0; i < wow->n_patterns; i++) {
1569 struct cfg80211_wowlan_trig_pkt_pattern *p;
1570 struct wl12xx_rx_filter *filter = NULL;
1572 p = &wow->patterns[i];
1574 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1575 if (ret) {
1576 wl1271_warning("Failed to create an RX filter from "
1577 "wowlan pattern %d", i);
1578 goto out;
1581 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1583 wl1271_rx_filter_free(filter);
1584 if (ret)
1585 goto out;
1588 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1590 out:
1591 return ret;
1594 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1595 struct wl12xx_vif *wlvif,
1596 struct cfg80211_wowlan *wow)
1598 int ret = 0;
1600 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1601 goto out;
1603 if ((wl->conf.conn.suspend_wake_up_event ==
1604 wl->conf.conn.wake_up_event) &&
1605 (wl->conf.conn.suspend_listen_interval ==
1606 wl->conf.conn.listen_interval))
1607 goto out;
1609 ret = wl1271_ps_elp_wakeup(wl);
1610 if (ret < 0)
1611 goto out;
1613 ret = wl1271_configure_wowlan(wl, wow);
1614 if (ret < 0)
1615 goto out_sleep;
1617 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1618 wl->conf.conn.suspend_wake_up_event,
1619 wl->conf.conn.suspend_listen_interval);
1621 if (ret < 0)
1622 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1624 out_sleep:
1625 wl1271_ps_elp_sleep(wl);
1626 out:
1627 return ret;
1631 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1632 struct wl12xx_vif *wlvif)
1634 int ret = 0;
1636 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1637 goto out;
1639 ret = wl1271_ps_elp_wakeup(wl);
1640 if (ret < 0)
1641 goto out;
1643 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1645 wl1271_ps_elp_sleep(wl);
1646 out:
1647 return ret;
1651 static int wl1271_configure_suspend(struct wl1271 *wl,
1652 struct wl12xx_vif *wlvif,
1653 struct cfg80211_wowlan *wow)
1655 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1656 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1657 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1658 return wl1271_configure_suspend_ap(wl, wlvif);
1659 return 0;
1662 static void wl1271_configure_resume(struct wl1271 *wl,
1663 struct wl12xx_vif *wlvif)
1665 int ret = 0;
1666 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1667 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1669 if ((!is_ap) && (!is_sta))
1670 return;
1672 if (is_sta &&
1673 ((wl->conf.conn.suspend_wake_up_event ==
1674 wl->conf.conn.wake_up_event) &&
1675 (wl->conf.conn.suspend_listen_interval ==
1676 wl->conf.conn.listen_interval)))
1677 return;
1679 ret = wl1271_ps_elp_wakeup(wl);
1680 if (ret < 0)
1681 return;
1683 if (is_sta) {
1684 wl1271_configure_wowlan(wl, NULL);
1686 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1687 wl->conf.conn.wake_up_event,
1688 wl->conf.conn.listen_interval);
1690 if (ret < 0)
1691 wl1271_error("resume: wake up conditions failed: %d",
1692 ret);
1694 } else if (is_ap) {
1695 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1698 wl1271_ps_elp_sleep(wl);
1701 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1702 struct cfg80211_wowlan *wow)
1704 struct wl1271 *wl = hw->priv;
1705 struct wl12xx_vif *wlvif;
1706 int ret;
1708 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1709 WARN_ON(!wow);
1711 /* we want to perform the recovery before suspending */
1712 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1713 wl1271_warning("postponing suspend to perform recovery");
1714 return -EBUSY;
1717 wl1271_tx_flush(wl);
1719 mutex_lock(&wl->mutex);
1720 wl->wow_enabled = true;
1721 wl12xx_for_each_wlvif(wl, wlvif) {
1722 ret = wl1271_configure_suspend(wl, wlvif, wow);
1723 if (ret < 0) {
1724 mutex_unlock(&wl->mutex);
1725 wl1271_warning("couldn't prepare device to suspend");
1726 return ret;
1729 mutex_unlock(&wl->mutex);
1730 /* flush any remaining work */
1731 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1734 * disable and re-enable interrupts in order to flush
1735 * the threaded_irq
1737 wlcore_disable_interrupts(wl);
1740 * set suspended flag to avoid triggering a new threaded_irq
1741 * work. no need for spinlock as interrupts are disabled.
1743 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1745 wlcore_enable_interrupts(wl);
1746 flush_work(&wl->tx_work);
1747 flush_delayed_work(&wl->elp_work);
1749 return 0;
1752 static int wl1271_op_resume(struct ieee80211_hw *hw)
1754 struct wl1271 *wl = hw->priv;
1755 struct wl12xx_vif *wlvif;
1756 unsigned long flags;
1757 bool run_irq_work = false, pending_recovery;
1758 int ret;
1760 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1761 wl->wow_enabled);
1762 WARN_ON(!wl->wow_enabled);
1765 * re-enable irq_work enqueuing, and call irq_work directly if
1766 * there is a pending work.
1768 spin_lock_irqsave(&wl->wl_lock, flags);
1769 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1770 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1771 run_irq_work = true;
1772 spin_unlock_irqrestore(&wl->wl_lock, flags);
1774 mutex_lock(&wl->mutex);
1776 /* test the recovery flag before calling any SDIO functions */
1777 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1778 &wl->flags);
1780 if (run_irq_work) {
1781 wl1271_debug(DEBUG_MAC80211,
1782 "run postponed irq_work directly");
1784 /* don't talk to the HW if recovery is pending */
1785 if (!pending_recovery) {
1786 ret = wlcore_irq_locked(wl);
1787 if (ret)
1788 wl12xx_queue_recovery_work(wl);
1791 wlcore_enable_interrupts(wl);
1794 if (pending_recovery) {
1795 wl1271_warning("queuing forgotten recovery on resume");
1796 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1797 goto out;
1800 wl12xx_for_each_wlvif(wl, wlvif) {
1801 wl1271_configure_resume(wl, wlvif);
1804 out:
1805 wl->wow_enabled = false;
1806 mutex_unlock(&wl->mutex);
1808 return 0;
1810 #endif
1812 static int wl1271_op_start(struct ieee80211_hw *hw)
1814 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1817 * We have to delay the booting of the hardware because
1818 * we need to know the local MAC address before downloading and
1819 * initializing the firmware. The MAC address cannot be changed
1820 * after boot, and without the proper MAC address, the firmware
1821 * will not function properly.
1823 * The MAC address is first known when the corresponding interface
1824 * is added. That is where we will initialize the hardware.
1827 return 0;
1830 static void wlcore_op_stop_locked(struct wl1271 *wl)
1832 int i;
1834 if (wl->state == WL1271_STATE_OFF) {
1835 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1836 &wl->flags))
1837 wlcore_enable_interrupts(wl);
1839 return;
1843 * this must be before the cancel_work calls below, so that the work
1844 * functions don't perform further work.
1846 wl->state = WL1271_STATE_OFF;
1849 * Use the nosync variant to disable interrupts, so the mutex could be
1850 * held while doing so without deadlocking.
1852 wlcore_disable_interrupts_nosync(wl);
1854 mutex_unlock(&wl->mutex);
1856 wlcore_synchronize_interrupts(wl);
1857 wl1271_flush_deferred_work(wl);
1858 cancel_delayed_work_sync(&wl->scan_complete_work);
1859 cancel_work_sync(&wl->netstack_work);
1860 cancel_work_sync(&wl->tx_work);
1861 cancel_delayed_work_sync(&wl->elp_work);
1862 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1863 cancel_delayed_work_sync(&wl->connection_loss_work);
1865 /* let's notify MAC80211 about the remaining pending TX frames */
1866 wl12xx_tx_reset(wl);
1867 mutex_lock(&wl->mutex);
1869 wl1271_power_off(wl);
1871 * In case a recovery was scheduled, interrupts were disabled to avoid
1872 * an interrupt storm. Now that the power is down, it is safe to
1873 * re-enable interrupts to balance the disable depth
1875 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1876 wlcore_enable_interrupts(wl);
1878 wl->band = IEEE80211_BAND_2GHZ;
1880 wl->rx_counter = 0;
1881 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1882 wl->channel_type = NL80211_CHAN_NO_HT;
1883 wl->tx_blocks_available = 0;
1884 wl->tx_allocated_blocks = 0;
1885 wl->tx_results_count = 0;
1886 wl->tx_packets_count = 0;
1887 wl->time_offset = 0;
1888 wl->ap_fw_ps_map = 0;
1889 wl->ap_ps_map = 0;
1890 wl->sched_scanning = false;
1891 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1892 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1893 memset(wl->links_map, 0, sizeof(wl->links_map));
1894 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1895 wl->active_sta_count = 0;
1897 /* The system link is always allocated */
1898 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1901 * this is performed after the cancel_work calls and the associated
1902 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1903 * get executed before all these vars have been reset.
1905 wl->flags = 0;
1907 wl->tx_blocks_freed = 0;
1909 for (i = 0; i < NUM_TX_QUEUES; i++) {
1910 wl->tx_pkts_freed[i] = 0;
1911 wl->tx_allocated_pkts[i] = 0;
1914 wl1271_debugfs_reset(wl);
1916 kfree(wl->fw_status_1);
1917 wl->fw_status_1 = NULL;
1918 wl->fw_status_2 = NULL;
1919 kfree(wl->tx_res_if);
1920 wl->tx_res_if = NULL;
1921 kfree(wl->target_mem_map);
1922 wl->target_mem_map = NULL;
1925 static void wlcore_op_stop(struct ieee80211_hw *hw)
1927 struct wl1271 *wl = hw->priv;
1929 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1931 mutex_lock(&wl->mutex);
1933 wlcore_op_stop_locked(wl);
1935 mutex_unlock(&wl->mutex);
1938 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1940 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1941 WL12XX_MAX_RATE_POLICIES);
1942 if (policy >= WL12XX_MAX_RATE_POLICIES)
1943 return -EBUSY;
1945 __set_bit(policy, wl->rate_policies_map);
1946 *idx = policy;
1947 return 0;
1950 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1952 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1953 return;
1955 __clear_bit(*idx, wl->rate_policies_map);
1956 *idx = WL12XX_MAX_RATE_POLICIES;
1959 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1961 switch (wlvif->bss_type) {
1962 case BSS_TYPE_AP_BSS:
1963 if (wlvif->p2p)
1964 return WL1271_ROLE_P2P_GO;
1965 else
1966 return WL1271_ROLE_AP;
1968 case BSS_TYPE_STA_BSS:
1969 if (wlvif->p2p)
1970 return WL1271_ROLE_P2P_CL;
1971 else
1972 return WL1271_ROLE_STA;
1974 case BSS_TYPE_IBSS:
1975 return WL1271_ROLE_IBSS;
1977 default:
1978 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1980 return WL12XX_INVALID_ROLE_TYPE;
1983 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1985 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1986 int i;
1988 /* clear everything but the persistent data */
1989 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1991 switch (ieee80211_vif_type_p2p(vif)) {
1992 case NL80211_IFTYPE_P2P_CLIENT:
1993 wlvif->p2p = 1;
1994 /* fall-through */
1995 case NL80211_IFTYPE_STATION:
1996 wlvif->bss_type = BSS_TYPE_STA_BSS;
1997 break;
1998 case NL80211_IFTYPE_ADHOC:
1999 wlvif->bss_type = BSS_TYPE_IBSS;
2000 break;
2001 case NL80211_IFTYPE_P2P_GO:
2002 wlvif->p2p = 1;
2003 /* fall-through */
2004 case NL80211_IFTYPE_AP:
2005 wlvif->bss_type = BSS_TYPE_AP_BSS;
2006 break;
2007 default:
2008 wlvif->bss_type = MAX_BSS_TYPE;
2009 return -EOPNOTSUPP;
2012 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2013 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2014 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2016 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2017 wlvif->bss_type == BSS_TYPE_IBSS) {
2018 /* init sta/ibss data */
2019 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2020 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2021 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2022 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2023 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2024 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2025 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2026 } else {
2027 /* init ap data */
2028 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2029 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2030 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2031 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2032 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2033 wl12xx_allocate_rate_policy(wl,
2034 &wlvif->ap.ucast_rate_idx[i]);
2035 wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
2037 * TODO: check if basic_rate shouldn't be
2038 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2039 * instead (the same thing for STA above).
2041 wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
2042 /* TODO: this seems to be used only for STA, check it */
2043 wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
2046 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2047 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2048 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2051 * mac80211 configures some values globally, while we treat them
2052 * per-interface. thus, on init, we have to copy them from wl
2054 wlvif->band = wl->band;
2055 wlvif->channel = wl->channel;
2056 wlvif->power_level = wl->power_level;
2057 wlvif->channel_type = wl->channel_type;
2059 INIT_WORK(&wlvif->rx_streaming_enable_work,
2060 wl1271_rx_streaming_enable_work);
2061 INIT_WORK(&wlvif->rx_streaming_disable_work,
2062 wl1271_rx_streaming_disable_work);
2063 INIT_LIST_HEAD(&wlvif->list);
2065 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2066 (unsigned long) wlvif);
2067 return 0;
2070 static bool wl12xx_init_fw(struct wl1271 *wl)
2072 int retries = WL1271_BOOT_RETRIES;
2073 bool booted = false;
2074 struct wiphy *wiphy = wl->hw->wiphy;
2075 int ret;
2077 while (retries) {
2078 retries--;
2079 ret = wl12xx_chip_wakeup(wl, false);
2080 if (ret < 0)
2081 goto power_off;
2083 ret = wl->ops->boot(wl);
2084 if (ret < 0)
2085 goto power_off;
2087 ret = wl1271_hw_init(wl);
2088 if (ret < 0)
2089 goto irq_disable;
2091 booted = true;
2092 break;
2094 irq_disable:
2095 mutex_unlock(&wl->mutex);
2096 /* Unlocking the mutex in the middle of handling is
2097 inherently unsafe. In this case we deem it safe to do,
2098 because we need to let any possibly pending IRQ out of
2099 the system (and while we are WL1271_STATE_OFF the IRQ
2100 work function will not do anything.) Also, any other
2101 possible concurrent operations will fail due to the
2102 current state, hence the wl1271 struct should be safe. */
2103 wlcore_disable_interrupts(wl);
2104 wl1271_flush_deferred_work(wl);
2105 cancel_work_sync(&wl->netstack_work);
2106 mutex_lock(&wl->mutex);
2107 power_off:
2108 wl1271_power_off(wl);
2111 if (!booted) {
2112 wl1271_error("firmware boot failed despite %d retries",
2113 WL1271_BOOT_RETRIES);
2114 goto out;
2117 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2119 /* update hw/fw version info in wiphy struct */
2120 wiphy->hw_version = wl->chip.id;
2121 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2122 sizeof(wiphy->fw_version));
2125 * Now we know if 11a is supported (info from the NVS), so disable
2126 * 11a channels if not supported
2128 if (!wl->enable_11a)
2129 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2131 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2132 wl->enable_11a ? "" : "not ");
2134 wl->state = WL1271_STATE_ON;
2135 out:
2136 return booted;
2139 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2141 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2145 * Check whether a fw switch (i.e. moving from one loaded
2146 * fw to another) is needed. This function is also responsible
2147 * for updating wl->last_vif_count, so it must be called before
2148 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2149 * will be used).
2151 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2152 struct vif_counter_data vif_counter_data,
2153 bool add)
2155 enum wl12xx_fw_type current_fw = wl->fw_type;
2156 u8 vif_count = vif_counter_data.counter;
2158 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2159 return false;
2161 /* increase the vif count if this is a new vif */
2162 if (add && !vif_counter_data.cur_vif_running)
2163 vif_count++;
2165 wl->last_vif_count = vif_count;
2167 /* no need for fw change if the device is OFF */
2168 if (wl->state == WL1271_STATE_OFF)
2169 return false;
2171 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2172 return true;
2173 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2174 return true;
2176 return false;
2180 * Enter "forced psm". Make sure the sta is in psm against the ap,
2181 * to make the fw switch a bit more disconnection-persistent.
2183 static void wl12xx_force_active_psm(struct wl1271 *wl)
2185 struct wl12xx_vif *wlvif;
2187 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2188 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2192 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2193 struct ieee80211_vif *vif)
2195 struct wl1271 *wl = hw->priv;
2196 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2197 struct vif_counter_data vif_count;
2198 int ret = 0;
2199 u8 role_type;
2200 bool booted = false;
2202 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2203 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2205 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2206 ieee80211_vif_type_p2p(vif), vif->addr);
2208 wl12xx_get_vif_count(hw, vif, &vif_count);
2210 mutex_lock(&wl->mutex);
2211 ret = wl1271_ps_elp_wakeup(wl);
2212 if (ret < 0)
2213 goto out_unlock;
2216 * in some very corner case HW recovery scenarios its possible to
2217 * get here before __wl1271_op_remove_interface is complete, so
2218 * opt out if that is the case.
2220 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2221 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2222 ret = -EBUSY;
2223 goto out;
2227 ret = wl12xx_init_vif_data(wl, vif);
2228 if (ret < 0)
2229 goto out;
2231 wlvif->wl = wl;
2232 role_type = wl12xx_get_role_type(wl, wlvif);
2233 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2234 ret = -EINVAL;
2235 goto out;
2238 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2239 wl12xx_force_active_psm(wl);
2240 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2241 mutex_unlock(&wl->mutex);
2242 wl1271_recovery_work(&wl->recovery_work);
2243 return 0;
2247 * TODO: after the nvs issue will be solved, move this block
2248 * to start(), and make sure here the driver is ON.
2250 if (wl->state == WL1271_STATE_OFF) {
2252 * we still need this in order to configure the fw
2253 * while uploading the nvs
2255 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2257 booted = wl12xx_init_fw(wl);
2258 if (!booted) {
2259 ret = -EINVAL;
2260 goto out;
2264 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2265 wlvif->bss_type == BSS_TYPE_IBSS) {
2267 * The device role is a special role used for
2268 * rx and tx frames prior to association (as
2269 * the STA role can get packets only from
2270 * its associated bssid)
2272 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2273 WL1271_ROLE_DEVICE,
2274 &wlvif->dev_role_id);
2275 if (ret < 0)
2276 goto out;
2279 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2280 role_type, &wlvif->role_id);
2281 if (ret < 0)
2282 goto out;
2284 ret = wl1271_init_vif_specific(wl, vif);
2285 if (ret < 0)
2286 goto out;
2288 list_add(&wlvif->list, &wl->wlvif_list);
2289 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2291 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2292 wl->ap_count++;
2293 else
2294 wl->sta_count++;
2295 out:
2296 wl1271_ps_elp_sleep(wl);
2297 out_unlock:
2298 mutex_unlock(&wl->mutex);
2300 return ret;
2303 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2304 struct ieee80211_vif *vif,
2305 bool reset_tx_queues)
2307 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2308 int i, ret;
2309 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2311 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2313 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2314 return;
2316 /* because of hardware recovery, we may get here twice */
2317 if (wl->state != WL1271_STATE_ON)
2318 return;
2320 wl1271_info("down");
2322 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2323 wl->scan_vif == vif) {
2325 * Rearm the tx watchdog just before idling scan. This
2326 * prevents just-finished scans from triggering the watchdog
2328 wl12xx_rearm_tx_watchdog_locked(wl);
2330 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2331 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2332 wl->scan_vif = NULL;
2333 wl->scan.req = NULL;
2334 ieee80211_scan_completed(wl->hw, true);
2337 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2338 /* disable active roles */
2339 ret = wl1271_ps_elp_wakeup(wl);
2340 if (ret < 0)
2341 goto deinit;
2343 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2344 wlvif->bss_type == BSS_TYPE_IBSS) {
2345 if (wl12xx_dev_role_started(wlvif))
2346 wl12xx_stop_dev(wl, wlvif);
2348 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2349 if (ret < 0)
2350 goto deinit;
2353 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2354 if (ret < 0)
2355 goto deinit;
2357 wl1271_ps_elp_sleep(wl);
2359 deinit:
2360 /* clear all hlids (except system_hlid) */
2361 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2363 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2364 wlvif->bss_type == BSS_TYPE_IBSS) {
2365 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2366 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2367 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2368 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2369 } else {
2370 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2371 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2372 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2373 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2374 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2375 wl12xx_free_rate_policy(wl,
2376 &wlvif->ap.ucast_rate_idx[i]);
2377 wl1271_free_ap_keys(wl, wlvif);
2380 dev_kfree_skb(wlvif->probereq);
2381 wlvif->probereq = NULL;
2382 wl12xx_tx_reset_wlvif(wl, wlvif);
2383 if (wl->last_wlvif == wlvif)
2384 wl->last_wlvif = NULL;
2385 list_del(&wlvif->list);
2386 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2387 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2388 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2390 if (is_ap)
2391 wl->ap_count--;
2392 else
2393 wl->sta_count--;
2396 * Last AP, have more stations. Configure sleep auth according to STA.
2397 * Don't do thin on unintended recovery.
2399 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2400 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2401 goto unlock;
2403 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2404 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2405 /* Configure for power according to debugfs */
2406 if (sta_auth != WL1271_PSM_ILLEGAL)
2407 wl1271_acx_sleep_auth(wl, sta_auth);
2408 /* Configure for power always on */
2409 else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
2410 wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
2411 /* Configure for ELP power saving */
2412 else
2413 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2416 unlock:
2417 mutex_unlock(&wl->mutex);
2419 del_timer_sync(&wlvif->rx_streaming_timer);
2420 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2421 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2423 mutex_lock(&wl->mutex);
2426 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2427 struct ieee80211_vif *vif)
2429 struct wl1271 *wl = hw->priv;
2430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2431 struct wl12xx_vif *iter;
2432 struct vif_counter_data vif_count;
2433 bool cancel_recovery = true;
2435 wl12xx_get_vif_count(hw, vif, &vif_count);
2436 mutex_lock(&wl->mutex);
2438 if (wl->state == WL1271_STATE_OFF ||
2439 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2440 goto out;
2443 * wl->vif can be null here if someone shuts down the interface
2444 * just when hardware recovery has been started.
2446 wl12xx_for_each_wlvif(wl, iter) {
2447 if (iter != wlvif)
2448 continue;
2450 __wl1271_op_remove_interface(wl, vif, true);
2451 break;
2453 WARN_ON(iter != wlvif);
2454 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2455 wl12xx_force_active_psm(wl);
2456 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2457 wl12xx_queue_recovery_work(wl);
2458 cancel_recovery = false;
2460 out:
2461 mutex_unlock(&wl->mutex);
2462 if (cancel_recovery)
2463 cancel_work_sync(&wl->recovery_work);
2466 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2467 struct ieee80211_vif *vif,
2468 enum nl80211_iftype new_type, bool p2p)
2470 struct wl1271 *wl = hw->priv;
2471 int ret;
2473 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2474 wl1271_op_remove_interface(hw, vif);
2476 vif->type = new_type;
2477 vif->p2p = p2p;
2478 ret = wl1271_op_add_interface(hw, vif);
2480 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2481 return ret;
2484 static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2485 bool set_assoc)
2487 int ret;
2488 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2491 * One of the side effects of the JOIN command is that is clears
2492 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2493 * to a WPA/WPA2 access point will therefore kill the data-path.
2494 * Currently the only valid scenario for JOIN during association
2495 * is on roaming, in which case we will also be given new keys.
2496 * Keep the below message for now, unless it starts bothering
2497 * users who really like to roam a lot :)
2499 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2500 wl1271_info("JOIN while associated.");
2502 /* clear encryption type */
2503 wlvif->encryption_type = KEY_NONE;
2505 if (set_assoc)
2506 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2508 if (is_ibss)
2509 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2510 else
2511 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2512 if (ret < 0)
2513 goto out;
2515 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2516 goto out;
2519 * The join command disable the keep-alive mode, shut down its process,
2520 * and also clear the template config, so we need to reset it all after
2521 * the join. The acx_aid starts the keep-alive process, and the order
2522 * of the commands below is relevant.
2524 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2525 if (ret < 0)
2526 goto out;
2528 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2529 if (ret < 0)
2530 goto out;
2532 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2533 if (ret < 0)
2534 goto out;
2536 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2537 CMD_TEMPL_KLV_IDX_NULL_DATA,
2538 ACX_KEEP_ALIVE_TPL_VALID);
2539 if (ret < 0)
2540 goto out;
2542 out:
2543 return ret;
2546 static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2548 int ret;
2550 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2551 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2553 wl12xx_cmd_stop_channel_switch(wl);
2554 ieee80211_chswitch_done(vif, false);
2557 /* to stop listening to a channel, we disconnect */
2558 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2559 if (ret < 0)
2560 goto out;
2562 /* reset TX security counters on a clean disconnect */
2563 wlvif->tx_security_last_seq_lsb = 0;
2564 wlvif->tx_security_seq = 0;
2566 out:
2567 return ret;
2570 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2572 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2573 wlvif->rate_set = wlvif->basic_rate_set;
2576 static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2577 bool idle)
2579 int ret;
2580 bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2582 if (idle == cur_idle)
2583 return 0;
2585 if (idle) {
2586 /* no need to croc if we weren't busy (e.g. during boot) */
2587 if (wl12xx_dev_role_started(wlvif)) {
2588 ret = wl12xx_stop_dev(wl, wlvif);
2589 if (ret < 0)
2590 goto out;
2592 wlvif->rate_set =
2593 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2594 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2595 if (ret < 0)
2596 goto out;
2597 ret = wl1271_acx_keep_alive_config(
2598 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2599 ACX_KEEP_ALIVE_TPL_INVALID);
2600 if (ret < 0)
2601 goto out;
2602 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2603 } else {
2604 /* The current firmware only supports sched_scan in idle */
2605 if (wl->sched_scanning) {
2606 wl1271_scan_sched_scan_stop(wl, wlvif);
2607 ieee80211_sched_scan_stopped(wl->hw);
2610 ret = wl12xx_start_dev(wl, wlvif);
2611 if (ret < 0)
2612 goto out;
2613 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
2616 out:
2617 return ret;
2620 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2621 struct ieee80211_conf *conf, u32 changed)
2623 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2624 int channel, ret;
2626 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2628 /* if the channel changes while joined, join again */
2629 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2630 ((wlvif->band != conf->channel->band) ||
2631 (wlvif->channel != channel) ||
2632 (wlvif->channel_type != conf->channel_type))) {
2633 /* send all pending packets */
2634 ret = wlcore_tx_work_locked(wl);
2635 if (ret < 0)
2636 return ret;
2638 wlvif->band = conf->channel->band;
2639 wlvif->channel = channel;
2640 wlvif->channel_type = conf->channel_type;
2642 if (is_ap) {
2643 wl1271_set_band_rate(wl, wlvif);
2644 ret = wl1271_init_ap_rates(wl, wlvif);
2645 if (ret < 0)
2646 wl1271_error("AP rate policy change failed %d",
2647 ret);
2648 } else {
2650 * FIXME: the mac80211 should really provide a fixed
2651 * rate to use here. for now, just use the smallest
2652 * possible rate for the band as a fixed rate for
2653 * association frames and other control messages.
2655 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2656 wl1271_set_band_rate(wl, wlvif);
2658 wlvif->basic_rate =
2659 wl1271_tx_min_rate_get(wl,
2660 wlvif->basic_rate_set);
2661 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2662 if (ret < 0)
2663 wl1271_warning("rate policy for channel "
2664 "failed %d", ret);
2667 * change the ROC channel. do it only if we are
2668 * not idle. otherwise, CROC will be called
2669 * anyway.
2671 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2672 &wlvif->flags) &&
2673 wl12xx_dev_role_started(wlvif) &&
2674 !(conf->flags & IEEE80211_CONF_IDLE)) {
2675 ret = wl12xx_stop_dev(wl, wlvif);
2676 if (ret < 0)
2677 return ret;
2679 ret = wl12xx_start_dev(wl, wlvif);
2680 if (ret < 0)
2681 return ret;
2686 if ((changed & IEEE80211_CONF_CHANGE_PS) && !is_ap) {
2688 if ((conf->flags & IEEE80211_CONF_PS) &&
2689 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
2690 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2692 int ps_mode;
2693 char *ps_mode_str;
2695 if (wl->conf.conn.forced_ps) {
2696 ps_mode = STATION_POWER_SAVE_MODE;
2697 ps_mode_str = "forced";
2698 } else {
2699 ps_mode = STATION_AUTO_PS_MODE;
2700 ps_mode_str = "auto";
2703 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
2705 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
2707 if (ret < 0)
2708 wl1271_warning("enter %s ps failed %d",
2709 ps_mode_str, ret);
2711 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2712 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
2714 wl1271_debug(DEBUG_PSM, "auto ps disabled");
2716 ret = wl1271_ps_set_mode(wl, wlvif,
2717 STATION_ACTIVE_MODE);
2718 if (ret < 0)
2719 wl1271_warning("exit auto ps failed %d", ret);
2723 if (conf->power_level != wlvif->power_level) {
2724 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2725 if (ret < 0)
2726 return ret;
2728 wlvif->power_level = conf->power_level;
2731 return 0;
2734 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2736 struct wl1271 *wl = hw->priv;
2737 struct wl12xx_vif *wlvif;
2738 struct ieee80211_conf *conf = &hw->conf;
2739 int channel, ret = 0;
2741 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2743 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2744 " changed 0x%x",
2745 channel,
2746 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2747 conf->power_level,
2748 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2749 changed);
2752 * mac80211 will go to idle nearly immediately after transmitting some
2753 * frames, such as the deauth. To make sure those frames reach the air,
2754 * wait here until the TX queue is fully flushed.
2756 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
2757 ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2758 (conf->flags & IEEE80211_CONF_IDLE)))
2759 wl1271_tx_flush(wl);
2761 mutex_lock(&wl->mutex);
2763 /* we support configuring the channel and band even while off */
2764 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2765 wl->band = conf->channel->band;
2766 wl->channel = channel;
2767 wl->channel_type = conf->channel_type;
2770 if (changed & IEEE80211_CONF_CHANGE_POWER)
2771 wl->power_level = conf->power_level;
2773 if (unlikely(wl->state == WL1271_STATE_OFF))
2774 goto out;
2776 ret = wl1271_ps_elp_wakeup(wl);
2777 if (ret < 0)
2778 goto out;
2780 /* configure each interface */
2781 wl12xx_for_each_wlvif(wl, wlvif) {
2782 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2783 if (ret < 0)
2784 goto out_sleep;
2787 out_sleep:
2788 wl1271_ps_elp_sleep(wl);
2790 out:
2791 mutex_unlock(&wl->mutex);
2793 return ret;
2796 struct wl1271_filter_params {
2797 bool enabled;
2798 int mc_list_length;
2799 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2802 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2803 struct netdev_hw_addr_list *mc_list)
2805 struct wl1271_filter_params *fp;
2806 struct netdev_hw_addr *ha;
2807 struct wl1271 *wl = hw->priv;
2809 if (unlikely(wl->state == WL1271_STATE_OFF))
2810 return 0;
2812 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2813 if (!fp) {
2814 wl1271_error("Out of memory setting filters.");
2815 return 0;
2818 /* update multicast filtering parameters */
2819 fp->mc_list_length = 0;
2820 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2821 fp->enabled = false;
2822 } else {
2823 fp->enabled = true;
2824 netdev_hw_addr_list_for_each(ha, mc_list) {
2825 memcpy(fp->mc_list[fp->mc_list_length],
2826 ha->addr, ETH_ALEN);
2827 fp->mc_list_length++;
2831 return (u64)(unsigned long)fp;
2834 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2835 FIF_ALLMULTI | \
2836 FIF_FCSFAIL | \
2837 FIF_BCN_PRBRESP_PROMISC | \
2838 FIF_CONTROL | \
2839 FIF_OTHER_BSS)
2841 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2842 unsigned int changed,
2843 unsigned int *total, u64 multicast)
2845 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2846 struct wl1271 *wl = hw->priv;
2847 struct wl12xx_vif *wlvif;
2849 int ret;
2851 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2852 " total %x", changed, *total);
2854 mutex_lock(&wl->mutex);
2856 *total &= WL1271_SUPPORTED_FILTERS;
2857 changed &= WL1271_SUPPORTED_FILTERS;
2859 if (unlikely(wl->state == WL1271_STATE_OFF))
2860 goto out;
2862 ret = wl1271_ps_elp_wakeup(wl);
2863 if (ret < 0)
2864 goto out;
2866 wl12xx_for_each_wlvif(wl, wlvif) {
2867 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2868 if (*total & FIF_ALLMULTI)
2869 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2870 false,
2871 NULL, 0);
2872 else if (fp)
2873 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2874 fp->enabled,
2875 fp->mc_list,
2876 fp->mc_list_length);
2877 if (ret < 0)
2878 goto out_sleep;
2883 * the fw doesn't provide an api to configure the filters. instead,
2884 * the filters configuration is based on the active roles / ROC
2885 * state.
2888 out_sleep:
2889 wl1271_ps_elp_sleep(wl);
2891 out:
2892 mutex_unlock(&wl->mutex);
2893 kfree(fp);
2896 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2897 u8 id, u8 key_type, u8 key_size,
2898 const u8 *key, u8 hlid, u32 tx_seq_32,
2899 u16 tx_seq_16)
2901 struct wl1271_ap_key *ap_key;
2902 int i;
2904 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
2906 if (key_size > MAX_KEY_SIZE)
2907 return -EINVAL;
2910 * Find next free entry in ap_keys. Also check we are not replacing
2911 * an existing key.
2913 for (i = 0; i < MAX_NUM_KEYS; i++) {
2914 if (wlvif->ap.recorded_keys[i] == NULL)
2915 break;
2917 if (wlvif->ap.recorded_keys[i]->id == id) {
2918 wl1271_warning("trying to record key replacement");
2919 return -EINVAL;
2923 if (i == MAX_NUM_KEYS)
2924 return -EBUSY;
2926 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
2927 if (!ap_key)
2928 return -ENOMEM;
2930 ap_key->id = id;
2931 ap_key->key_type = key_type;
2932 ap_key->key_size = key_size;
2933 memcpy(ap_key->key, key, key_size);
2934 ap_key->hlid = hlid;
2935 ap_key->tx_seq_32 = tx_seq_32;
2936 ap_key->tx_seq_16 = tx_seq_16;
2938 wlvif->ap.recorded_keys[i] = ap_key;
2939 return 0;
2942 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2944 int i;
2946 for (i = 0; i < MAX_NUM_KEYS; i++) {
2947 kfree(wlvif->ap.recorded_keys[i]);
2948 wlvif->ap.recorded_keys[i] = NULL;
2952 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2954 int i, ret = 0;
2955 struct wl1271_ap_key *key;
2956 bool wep_key_added = false;
2958 for (i = 0; i < MAX_NUM_KEYS; i++) {
2959 u8 hlid;
2960 if (wlvif->ap.recorded_keys[i] == NULL)
2961 break;
2963 key = wlvif->ap.recorded_keys[i];
2964 hlid = key->hlid;
2965 if (hlid == WL12XX_INVALID_LINK_ID)
2966 hlid = wlvif->ap.bcast_hlid;
2968 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2969 key->id, key->key_type,
2970 key->key_size, key->key,
2971 hlid, key->tx_seq_32,
2972 key->tx_seq_16);
2973 if (ret < 0)
2974 goto out;
2976 if (key->key_type == KEY_WEP)
2977 wep_key_added = true;
2980 if (wep_key_added) {
2981 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2982 wlvif->ap.bcast_hlid);
2983 if (ret < 0)
2984 goto out;
2987 out:
2988 wl1271_free_ap_keys(wl, wlvif);
2989 return ret;
2992 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2993 u16 action, u8 id, u8 key_type,
2994 u8 key_size, const u8 *key, u32 tx_seq_32,
2995 u16 tx_seq_16, struct ieee80211_sta *sta)
2997 int ret;
2998 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3000 if (is_ap) {
3001 struct wl1271_station *wl_sta;
3002 u8 hlid;
3004 if (sta) {
3005 wl_sta = (struct wl1271_station *)sta->drv_priv;
3006 hlid = wl_sta->hlid;
3007 } else {
3008 hlid = wlvif->ap.bcast_hlid;
3011 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3013 * We do not support removing keys after AP shutdown.
3014 * Pretend we do to make mac80211 happy.
3016 if (action != KEY_ADD_OR_REPLACE)
3017 return 0;
3019 ret = wl1271_record_ap_key(wl, wlvif, id,
3020 key_type, key_size,
3021 key, hlid, tx_seq_32,
3022 tx_seq_16);
3023 } else {
3024 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3025 id, key_type, key_size,
3026 key, hlid, tx_seq_32,
3027 tx_seq_16);
3030 if (ret < 0)
3031 return ret;
3032 } else {
3033 const u8 *addr;
3034 static const u8 bcast_addr[ETH_ALEN] = {
3035 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3038 addr = sta ? sta->addr : bcast_addr;
3040 if (is_zero_ether_addr(addr)) {
3041 /* We dont support TX only encryption */
3042 return -EOPNOTSUPP;
3045 /* The wl1271 does not allow to remove unicast keys - they
3046 will be cleared automatically on next CMD_JOIN. Ignore the
3047 request silently, as we dont want the mac80211 to emit
3048 an error message. */
3049 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3050 return 0;
3052 /* don't remove key if hlid was already deleted */
3053 if (action == KEY_REMOVE &&
3054 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3055 return 0;
3057 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3058 id, key_type, key_size,
3059 key, addr, tx_seq_32,
3060 tx_seq_16);
3061 if (ret < 0)
3062 return ret;
3064 /* the default WEP key needs to be configured at least once */
3065 if (key_type == KEY_WEP) {
3066 ret = wl12xx_cmd_set_default_wep_key(wl,
3067 wlvif->default_key,
3068 wlvif->sta.hlid);
3069 if (ret < 0)
3070 return ret;
3074 return 0;
3077 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3078 struct ieee80211_vif *vif,
3079 struct ieee80211_sta *sta,
3080 struct ieee80211_key_conf *key_conf)
3082 struct wl1271 *wl = hw->priv;
3084 return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3087 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3088 struct ieee80211_vif *vif,
3089 struct ieee80211_sta *sta,
3090 struct ieee80211_key_conf *key_conf)
3092 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3093 int ret;
3094 u32 tx_seq_32 = 0;
3095 u16 tx_seq_16 = 0;
3096 u8 key_type;
3098 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3100 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3101 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3102 key_conf->cipher, key_conf->keyidx,
3103 key_conf->keylen, key_conf->flags);
3104 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3106 mutex_lock(&wl->mutex);
3108 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3109 ret = -EAGAIN;
3110 goto out_unlock;
3113 ret = wl1271_ps_elp_wakeup(wl);
3114 if (ret < 0)
3115 goto out_unlock;
3117 switch (key_conf->cipher) {
3118 case WLAN_CIPHER_SUITE_WEP40:
3119 case WLAN_CIPHER_SUITE_WEP104:
3120 key_type = KEY_WEP;
3122 key_conf->hw_key_idx = key_conf->keyidx;
3123 break;
3124 case WLAN_CIPHER_SUITE_TKIP:
3125 key_type = KEY_TKIP;
3127 key_conf->hw_key_idx = key_conf->keyidx;
3128 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3129 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3130 break;
3131 case WLAN_CIPHER_SUITE_CCMP:
3132 key_type = KEY_AES;
3134 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3135 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3136 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3137 break;
3138 case WL1271_CIPHER_SUITE_GEM:
3139 key_type = KEY_GEM;
3140 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3141 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3142 break;
3143 default:
3144 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3146 ret = -EOPNOTSUPP;
3147 goto out_sleep;
3150 switch (cmd) {
3151 case SET_KEY:
3152 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3153 key_conf->keyidx, key_type,
3154 key_conf->keylen, key_conf->key,
3155 tx_seq_32, tx_seq_16, sta);
3156 if (ret < 0) {
3157 wl1271_error("Could not add or replace key");
3158 goto out_sleep;
3162 * reconfiguring arp response if the unicast (or common)
3163 * encryption key type was changed
3165 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3166 (sta || key_type == KEY_WEP) &&
3167 wlvif->encryption_type != key_type) {
3168 wlvif->encryption_type = key_type;
3169 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3170 if (ret < 0) {
3171 wl1271_warning("build arp rsp failed: %d", ret);
3172 goto out_sleep;
3175 break;
3177 case DISABLE_KEY:
3178 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3179 key_conf->keyidx, key_type,
3180 key_conf->keylen, key_conf->key,
3181 0, 0, sta);
3182 if (ret < 0) {
3183 wl1271_error("Could not remove key");
3184 goto out_sleep;
3186 break;
3188 default:
3189 wl1271_error("Unsupported key cmd 0x%x", cmd);
3190 ret = -EOPNOTSUPP;
3191 break;
3194 out_sleep:
3195 wl1271_ps_elp_sleep(wl);
3197 out_unlock:
3198 mutex_unlock(&wl->mutex);
3200 return ret;
3202 EXPORT_SYMBOL_GPL(wlcore_set_key);
3204 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3205 struct ieee80211_vif *vif,
3206 struct cfg80211_scan_request *req)
3208 struct wl1271 *wl = hw->priv;
3209 int ret;
3210 u8 *ssid = NULL;
3211 size_t len = 0;
3213 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3215 if (req->n_ssids) {
3216 ssid = req->ssids[0].ssid;
3217 len = req->ssids[0].ssid_len;
3220 mutex_lock(&wl->mutex);
3222 if (wl->state == WL1271_STATE_OFF) {
3224 * We cannot return -EBUSY here because cfg80211 will expect
3225 * a call to ieee80211_scan_completed if we do - in this case
3226 * there won't be any call.
3228 ret = -EAGAIN;
3229 goto out;
3232 ret = wl1271_ps_elp_wakeup(wl);
3233 if (ret < 0)
3234 goto out;
3236 /* fail if there is any role in ROC */
3237 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3238 /* don't allow scanning right now */
3239 ret = -EBUSY;
3240 goto out_sleep;
3243 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
3244 out_sleep:
3245 wl1271_ps_elp_sleep(wl);
3246 out:
3247 mutex_unlock(&wl->mutex);
3249 return ret;
3252 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3253 struct ieee80211_vif *vif)
3255 struct wl1271 *wl = hw->priv;
3256 int ret;
3258 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3260 mutex_lock(&wl->mutex);
3262 if (wl->state == WL1271_STATE_OFF)
3263 goto out;
3265 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3266 goto out;
3268 ret = wl1271_ps_elp_wakeup(wl);
3269 if (ret < 0)
3270 goto out;
3272 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3273 ret = wl1271_scan_stop(wl);
3274 if (ret < 0)
3275 goto out_sleep;
3279 * Rearm the tx watchdog just before idling scan. This
3280 * prevents just-finished scans from triggering the watchdog
3282 wl12xx_rearm_tx_watchdog_locked(wl);
3284 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3285 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3286 wl->scan_vif = NULL;
3287 wl->scan.req = NULL;
3288 ieee80211_scan_completed(wl->hw, true);
3290 out_sleep:
3291 wl1271_ps_elp_sleep(wl);
3292 out:
3293 mutex_unlock(&wl->mutex);
3295 cancel_delayed_work_sync(&wl->scan_complete_work);
3298 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3299 struct ieee80211_vif *vif,
3300 struct cfg80211_sched_scan_request *req,
3301 struct ieee80211_sched_scan_ies *ies)
3303 struct wl1271 *wl = hw->priv;
3304 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3305 int ret;
3307 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3309 mutex_lock(&wl->mutex);
3311 if (wl->state == WL1271_STATE_OFF) {
3312 ret = -EAGAIN;
3313 goto out;
3316 ret = wl1271_ps_elp_wakeup(wl);
3317 if (ret < 0)
3318 goto out;
3320 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
3321 if (ret < 0)
3322 goto out_sleep;
3324 ret = wl1271_scan_sched_scan_start(wl, wlvif);
3325 if (ret < 0)
3326 goto out_sleep;
3328 wl->sched_scanning = true;
3330 out_sleep:
3331 wl1271_ps_elp_sleep(wl);
3332 out:
3333 mutex_unlock(&wl->mutex);
3334 return ret;
3337 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3338 struct ieee80211_vif *vif)
3340 struct wl1271 *wl = hw->priv;
3341 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3342 int ret;
3344 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3346 mutex_lock(&wl->mutex);
3348 if (wl->state == WL1271_STATE_OFF)
3349 goto out;
3351 ret = wl1271_ps_elp_wakeup(wl);
3352 if (ret < 0)
3353 goto out;
3355 wl1271_scan_sched_scan_stop(wl, wlvif);
3357 wl1271_ps_elp_sleep(wl);
3358 out:
3359 mutex_unlock(&wl->mutex);
3362 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3364 struct wl1271 *wl = hw->priv;
3365 int ret = 0;
3367 mutex_lock(&wl->mutex);
3369 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3370 ret = -EAGAIN;
3371 goto out;
3374 ret = wl1271_ps_elp_wakeup(wl);
3375 if (ret < 0)
3376 goto out;
3378 ret = wl1271_acx_frag_threshold(wl, value);
3379 if (ret < 0)
3380 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3382 wl1271_ps_elp_sleep(wl);
3384 out:
3385 mutex_unlock(&wl->mutex);
3387 return ret;
3390 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3392 struct wl1271 *wl = hw->priv;
3393 struct wl12xx_vif *wlvif;
3394 int ret = 0;
3396 mutex_lock(&wl->mutex);
3398 if (unlikely(wl->state == WL1271_STATE_OFF)) {
3399 ret = -EAGAIN;
3400 goto out;
3403 ret = wl1271_ps_elp_wakeup(wl);
3404 if (ret < 0)
3405 goto out;
3407 wl12xx_for_each_wlvif(wl, wlvif) {
3408 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3409 if (ret < 0)
3410 wl1271_warning("set rts threshold failed: %d", ret);
3412 wl1271_ps_elp_sleep(wl);
3414 out:
3415 mutex_unlock(&wl->mutex);
3417 return ret;
3420 static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3421 int offset)
3423 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3424 u8 ssid_len;
3425 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3426 skb->len - offset);
3428 if (!ptr) {
3429 wl1271_error("No SSID in IEs!");
3430 return -ENOENT;
3433 ssid_len = ptr[1];
3434 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
3435 wl1271_error("SSID is too long!");
3436 return -EINVAL;
3439 wlvif->ssid_len = ssid_len;
3440 memcpy(wlvif->ssid, ptr+2, ssid_len);
3441 return 0;
3444 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3446 int len;
3447 const u8 *next, *end = skb->data + skb->len;
3448 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3449 skb->len - ieoffset);
3450 if (!ie)
3451 return;
3452 len = ie[1] + 2;
3453 next = ie + len;
3454 memmove(ie, next, end - next);
3455 skb_trim(skb, skb->len - len);
3458 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3459 unsigned int oui, u8 oui_type,
3460 int ieoffset)
3462 int len;
3463 const u8 *next, *end = skb->data + skb->len;
3464 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3465 skb->data + ieoffset,
3466 skb->len - ieoffset);
3467 if (!ie)
3468 return;
3469 len = ie[1] + 2;
3470 next = ie + len;
3471 memmove(ie, next, end - next);
3472 skb_trim(skb, skb->len - len);
3475 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3476 struct ieee80211_vif *vif)
3478 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3479 struct sk_buff *skb;
3480 int ret;
3482 skb = ieee80211_proberesp_get(wl->hw, vif);
3483 if (!skb)
3484 return -EOPNOTSUPP;
3486 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3487 CMD_TEMPL_AP_PROBE_RESPONSE,
3488 skb->data,
3489 skb->len, 0,
3490 rates);
3491 dev_kfree_skb(skb);
3493 if (ret < 0)
3494 goto out;
3496 wl1271_debug(DEBUG_AP, "probe response updated");
3497 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3499 out:
3500 return ret;
3503 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3504 struct ieee80211_vif *vif,
3505 u8 *probe_rsp_data,
3506 size_t probe_rsp_len,
3507 u32 rates)
3509 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3510 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3511 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3512 int ssid_ie_offset, ie_offset, templ_len;
3513 const u8 *ptr;
3515 /* no need to change probe response if the SSID is set correctly */
3516 if (wlvif->ssid_len > 0)
3517 return wl1271_cmd_template_set(wl, wlvif->role_id,
3518 CMD_TEMPL_AP_PROBE_RESPONSE,
3519 probe_rsp_data,
3520 probe_rsp_len, 0,
3521 rates);
3523 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3524 wl1271_error("probe_rsp template too big");
3525 return -EINVAL;
3528 /* start searching from IE offset */
3529 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3531 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3532 probe_rsp_len - ie_offset);
3533 if (!ptr) {
3534 wl1271_error("No SSID in beacon!");
3535 return -EINVAL;
3538 ssid_ie_offset = ptr - probe_rsp_data;
3539 ptr += (ptr[1] + 2);
3541 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3543 /* insert SSID from bss_conf */
3544 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3545 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3546 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3547 bss_conf->ssid, bss_conf->ssid_len);
3548 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3550 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3551 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3552 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3554 return wl1271_cmd_template_set(wl, wlvif->role_id,
3555 CMD_TEMPL_AP_PROBE_RESPONSE,
3556 probe_rsp_templ,
3557 templ_len, 0,
3558 rates);
3561 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3562 struct ieee80211_vif *vif,
3563 struct ieee80211_bss_conf *bss_conf,
3564 u32 changed)
3566 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3567 int ret = 0;
3569 if (changed & BSS_CHANGED_ERP_SLOT) {
3570 if (bss_conf->use_short_slot)
3571 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3572 else
3573 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3574 if (ret < 0) {
3575 wl1271_warning("Set slot time failed %d", ret);
3576 goto out;
3580 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3581 if (bss_conf->use_short_preamble)
3582 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3583 else
3584 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3587 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3588 if (bss_conf->use_cts_prot)
3589 ret = wl1271_acx_cts_protect(wl, wlvif,
3590 CTSPROTECT_ENABLE);
3591 else
3592 ret = wl1271_acx_cts_protect(wl, wlvif,
3593 CTSPROTECT_DISABLE);
3594 if (ret < 0) {
3595 wl1271_warning("Set ctsprotect failed %d", ret);
3596 goto out;
3600 out:
3601 return ret;
3604 static int wlcore_set_beacon_template(struct wl1271 *wl,
3605 struct ieee80211_vif *vif,
3606 bool is_ap)
3608 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3609 struct ieee80211_hdr *hdr;
3610 u32 min_rate;
3611 int ret;
3612 int ieoffset = offsetof(struct ieee80211_mgmt,
3613 u.beacon.variable);
3614 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3615 u16 tmpl_id;
3617 if (!beacon) {
3618 ret = -EINVAL;
3619 goto out;
3622 wl1271_debug(DEBUG_MASTER, "beacon updated");
3624 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3625 if (ret < 0) {
3626 dev_kfree_skb(beacon);
3627 goto out;
3629 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3630 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3631 CMD_TEMPL_BEACON;
3632 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3633 beacon->data,
3634 beacon->len, 0,
3635 min_rate);
3636 if (ret < 0) {
3637 dev_kfree_skb(beacon);
3638 goto out;
3642 * In case we already have a probe-resp beacon set explicitly
3643 * by usermode, don't use the beacon data.
3645 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3646 goto end_bcn;
3648 /* remove TIM ie from probe response */
3649 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3652 * remove p2p ie from probe response.
3653 * the fw reponds to probe requests that don't include
3654 * the p2p ie. probe requests with p2p ie will be passed,
3655 * and will be responded by the supplicant (the spec
3656 * forbids including the p2p ie when responding to probe
3657 * requests that didn't include it).
3659 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3660 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3662 hdr = (struct ieee80211_hdr *) beacon->data;
3663 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3664 IEEE80211_STYPE_PROBE_RESP);
3665 if (is_ap)
3666 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3667 beacon->data,
3668 beacon->len,
3669 min_rate);
3670 else
3671 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3672 CMD_TEMPL_PROBE_RESPONSE,
3673 beacon->data,
3674 beacon->len, 0,
3675 min_rate);
3676 end_bcn:
3677 dev_kfree_skb(beacon);
3678 if (ret < 0)
3679 goto out;
3681 out:
3682 return ret;
3685 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3686 struct ieee80211_vif *vif,
3687 struct ieee80211_bss_conf *bss_conf,
3688 u32 changed)
3690 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3691 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3692 int ret = 0;
3694 if ((changed & BSS_CHANGED_BEACON_INT)) {
3695 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3696 bss_conf->beacon_int);
3698 wlvif->beacon_int = bss_conf->beacon_int;
3701 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3702 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3704 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3707 if ((changed & BSS_CHANGED_BEACON)) {
3708 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3709 if (ret < 0)
3710 goto out;
3713 out:
3714 if (ret != 0)
3715 wl1271_error("beacon info change failed: %d", ret);
3716 return ret;
3719 /* AP mode changes */
3720 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3721 struct ieee80211_vif *vif,
3722 struct ieee80211_bss_conf *bss_conf,
3723 u32 changed)
3725 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3726 int ret = 0;
3728 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3729 u32 rates = bss_conf->basic_rates;
3731 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3732 wlvif->band);
3733 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3734 wlvif->basic_rate_set);
3736 ret = wl1271_init_ap_rates(wl, wlvif);
3737 if (ret < 0) {
3738 wl1271_error("AP rate policy change failed %d", ret);
3739 goto out;
3742 ret = wl1271_ap_init_templates(wl, vif);
3743 if (ret < 0)
3744 goto out;
3746 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3747 if (ret < 0)
3748 goto out;
3750 ret = wlcore_set_beacon_template(wl, vif, true);
3751 if (ret < 0)
3752 goto out;
3755 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3756 if (ret < 0)
3757 goto out;
3759 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3760 if (bss_conf->enable_beacon) {
3761 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3762 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3763 if (ret < 0)
3764 goto out;
3766 ret = wl1271_ap_init_hwenc(wl, wlvif);
3767 if (ret < 0)
3768 goto out;
3770 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3771 wl1271_debug(DEBUG_AP, "started AP");
3773 } else {
3774 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3775 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3776 if (ret < 0)
3777 goto out;
3779 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3780 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3781 &wlvif->flags);
3782 wl1271_debug(DEBUG_AP, "stopped AP");
3787 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3788 if (ret < 0)
3789 goto out;
3791 /* Handle HT information change */
3792 if ((changed & BSS_CHANGED_HT) &&
3793 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3794 ret = wl1271_acx_set_ht_information(wl, wlvif,
3795 bss_conf->ht_operation_mode);
3796 if (ret < 0) {
3797 wl1271_warning("Set ht information failed %d", ret);
3798 goto out;
3802 out:
3803 return;
3806 /* STA/IBSS mode changes */
3807 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3808 struct ieee80211_vif *vif,
3809 struct ieee80211_bss_conf *bss_conf,
3810 u32 changed)
3812 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3813 bool do_join = false, set_assoc = false;
3814 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3815 bool ibss_joined = false;
3816 u32 sta_rate_set = 0;
3817 int ret;
3818 struct ieee80211_sta *sta;
3819 bool sta_exists = false;
3820 struct ieee80211_sta_ht_cap sta_ht_cap;
3822 if (is_ibss) {
3823 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
3824 changed);
3825 if (ret < 0)
3826 goto out;
3829 if (changed & BSS_CHANGED_IBSS) {
3830 if (bss_conf->ibss_joined) {
3831 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3832 ibss_joined = true;
3833 } else {
3834 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3835 &wlvif->flags))
3836 wl1271_unjoin(wl, wlvif);
3840 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3841 do_join = true;
3843 /* Need to update the SSID (for filtering etc) */
3844 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3845 do_join = true;
3847 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3848 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3849 bss_conf->enable_beacon ? "enabled" : "disabled");
3851 do_join = true;
3854 if (changed & BSS_CHANGED_IDLE && !is_ibss) {
3855 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3856 if (ret < 0)
3857 wl1271_warning("idle mode change failed %d", ret);
3860 if ((changed & BSS_CHANGED_CQM)) {
3861 bool enable = false;
3862 if (bss_conf->cqm_rssi_thold)
3863 enable = true;
3864 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3865 bss_conf->cqm_rssi_thold,
3866 bss_conf->cqm_rssi_hyst);
3867 if (ret < 0)
3868 goto out;
3869 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3872 if (changed & BSS_CHANGED_BSSID)
3873 if (!is_zero_ether_addr(bss_conf->bssid)) {
3874 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3875 if (ret < 0)
3876 goto out;
3878 ret = wl1271_build_qos_null_data(wl, vif);
3879 if (ret < 0)
3880 goto out;
3883 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3884 rcu_read_lock();
3885 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3886 if (!sta)
3887 goto sta_not_found;
3889 /* save the supp_rates of the ap */
3890 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3891 if (sta->ht_cap.ht_supported)
3892 sta_rate_set |=
3893 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
3894 (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
3895 sta_ht_cap = sta->ht_cap;
3896 sta_exists = true;
3898 sta_not_found:
3899 rcu_read_unlock();
3902 if ((changed & BSS_CHANGED_ASSOC)) {
3903 if (bss_conf->assoc) {
3904 u32 rates;
3905 int ieoffset;
3906 wlvif->aid = bss_conf->aid;
3907 wlvif->channel_type = bss_conf->channel_type;
3908 wlvif->beacon_int = bss_conf->beacon_int;
3909 do_join = true;
3910 set_assoc = true;
3913 * use basic rates from AP, and determine lowest rate
3914 * to use with control frames.
3916 rates = bss_conf->basic_rates;
3917 wlvif->basic_rate_set =
3918 wl1271_tx_enabled_rates_get(wl, rates,
3919 wlvif->band);
3920 wlvif->basic_rate =
3921 wl1271_tx_min_rate_get(wl,
3922 wlvif->basic_rate_set);
3923 if (sta_rate_set)
3924 wlvif->rate_set =
3925 wl1271_tx_enabled_rates_get(wl,
3926 sta_rate_set,
3927 wlvif->band);
3928 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3929 if (ret < 0)
3930 goto out;
3933 * with wl1271, we don't need to update the
3934 * beacon_int and dtim_period, because the firmware
3935 * updates it by itself when the first beacon is
3936 * received after a join.
3938 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3939 if (ret < 0)
3940 goto out;
3943 * Get a template for hardware connection maintenance
3945 dev_kfree_skb(wlvif->probereq);
3946 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3947 wlvif,
3948 NULL);
3949 ieoffset = offsetof(struct ieee80211_mgmt,
3950 u.probe_req.variable);
3951 wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3953 /* enable the connection monitoring feature */
3954 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3955 if (ret < 0)
3956 goto out;
3957 } else {
3958 /* use defaults when not associated */
3959 bool was_assoc =
3960 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3961 &wlvif->flags);
3962 bool was_ifup =
3963 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3964 &wlvif->flags);
3965 wlvif->aid = 0;
3967 /* free probe-request template */
3968 dev_kfree_skb(wlvif->probereq);
3969 wlvif->probereq = NULL;
3971 /* revert back to minimum rates for the current band */
3972 wl1271_set_band_rate(wl, wlvif);
3973 wlvif->basic_rate =
3974 wl1271_tx_min_rate_get(wl,
3975 wlvif->basic_rate_set);
3976 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3977 if (ret < 0)
3978 goto out;
3980 /* disable connection monitor features */
3981 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3983 /* Disable the keep-alive feature */
3984 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3985 if (ret < 0)
3986 goto out;
3988 /* restore the bssid filter and go to dummy bssid */
3989 if (was_assoc) {
3991 * we might have to disable roc, if there was
3992 * no IF_OPER_UP notification.
3994 if (!was_ifup) {
3995 ret = wl12xx_croc(wl, wlvif->role_id);
3996 if (ret < 0)
3997 goto out;
4000 * (we also need to disable roc in case of
4001 * roaming on the same channel. until we will
4002 * have a better flow...)
4004 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
4005 ret = wl12xx_croc(wl,
4006 wlvif->dev_role_id);
4007 if (ret < 0)
4008 goto out;
4011 wl1271_unjoin(wl, wlvif);
4012 if (!bss_conf->idle)
4013 wl12xx_start_dev(wl, wlvif);
4018 if (changed & BSS_CHANGED_IBSS) {
4019 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4020 bss_conf->ibss_joined);
4022 if (bss_conf->ibss_joined) {
4023 u32 rates = bss_conf->basic_rates;
4024 wlvif->basic_rate_set =
4025 wl1271_tx_enabled_rates_get(wl, rates,
4026 wlvif->band);
4027 wlvif->basic_rate =
4028 wl1271_tx_min_rate_get(wl,
4029 wlvif->basic_rate_set);
4031 /* by default, use 11b + OFDM rates */
4032 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4033 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4034 if (ret < 0)
4035 goto out;
4039 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4040 if (ret < 0)
4041 goto out;
4043 if (do_join) {
4044 ret = wl1271_join(wl, wlvif, set_assoc);
4045 if (ret < 0) {
4046 wl1271_warning("cmd join failed %d", ret);
4047 goto out;
4050 /* ROC until connected (after EAPOL exchange) */
4051 if (!is_ibss) {
4052 ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
4053 if (ret < 0)
4054 goto out;
4056 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4057 wl12xx_set_authorized(wl, wlvif);
4060 * stop device role if started (we might already be in
4061 * STA/IBSS role).
4063 if (wl12xx_dev_role_started(wlvif)) {
4064 ret = wl12xx_stop_dev(wl, wlvif);
4065 if (ret < 0)
4066 goto out;
4070 /* Handle new association with HT. Do this after join. */
4071 if (sta_exists) {
4072 if ((changed & BSS_CHANGED_HT) &&
4073 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
4074 ret = wl1271_acx_set_ht_capabilities(wl,
4075 &sta_ht_cap,
4076 true,
4077 wlvif->sta.hlid);
4078 if (ret < 0) {
4079 wl1271_warning("Set ht cap true failed %d",
4080 ret);
4081 goto out;
4084 /* handle new association without HT and disassociation */
4085 else if (changed & BSS_CHANGED_ASSOC) {
4086 ret = wl1271_acx_set_ht_capabilities(wl,
4087 &sta_ht_cap,
4088 false,
4089 wlvif->sta.hlid);
4090 if (ret < 0) {
4091 wl1271_warning("Set ht cap false failed %d",
4092 ret);
4093 goto out;
4098 /* Handle HT information change. Done after join. */
4099 if ((changed & BSS_CHANGED_HT) &&
4100 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
4101 ret = wl1271_acx_set_ht_information(wl, wlvif,
4102 bss_conf->ht_operation_mode);
4103 if (ret < 0) {
4104 wl1271_warning("Set ht information failed %d", ret);
4105 goto out;
4109 /* Handle arp filtering. Done after join. */
4110 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4111 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4112 __be32 addr = bss_conf->arp_addr_list[0];
4113 wlvif->sta.qos = bss_conf->qos;
4114 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4116 if (bss_conf->arp_addr_cnt == 1 &&
4117 bss_conf->arp_filter_enabled) {
4118 wlvif->ip_addr = addr;
4120 * The template should have been configured only upon
4121 * association. however, it seems that the correct ip
4122 * isn't being set (when sending), so we have to
4123 * reconfigure the template upon every ip change.
4125 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4126 if (ret < 0) {
4127 wl1271_warning("build arp rsp failed: %d", ret);
4128 goto out;
4131 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4132 (ACX_ARP_FILTER_ARP_FILTERING |
4133 ACX_ARP_FILTER_AUTO_ARP),
4134 addr);
4135 } else {
4136 wlvif->ip_addr = 0;
4137 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4140 if (ret < 0)
4141 goto out;
4144 out:
4145 return;
4148 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4149 struct ieee80211_vif *vif,
4150 struct ieee80211_bss_conf *bss_conf,
4151 u32 changed)
4153 struct wl1271 *wl = hw->priv;
4154 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4155 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4156 int ret;
4158 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
4159 (int)changed);
4162 * make sure to cancel pending disconnections if our association
4163 * state changed
4165 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4166 cancel_delayed_work_sync(&wl->connection_loss_work);
4168 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4169 !bss_conf->enable_beacon)
4170 wl1271_tx_flush(wl);
4172 mutex_lock(&wl->mutex);
4174 if (unlikely(wl->state == WL1271_STATE_OFF))
4175 goto out;
4177 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4178 goto out;
4180 ret = wl1271_ps_elp_wakeup(wl);
4181 if (ret < 0)
4182 goto out;
4184 if (is_ap)
4185 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4186 else
4187 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4189 wl1271_ps_elp_sleep(wl);
4191 out:
4192 mutex_unlock(&wl->mutex);
4195 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4196 struct ieee80211_vif *vif, u16 queue,
4197 const struct ieee80211_tx_queue_params *params)
4199 struct wl1271 *wl = hw->priv;
4200 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4201 u8 ps_scheme;
4202 int ret = 0;
4204 mutex_lock(&wl->mutex);
4206 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4208 if (params->uapsd)
4209 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4210 else
4211 ps_scheme = CONF_PS_SCHEME_LEGACY;
4213 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4214 goto out;
4216 ret = wl1271_ps_elp_wakeup(wl);
4217 if (ret < 0)
4218 goto out;
4221 * the txop is confed in units of 32us by the mac80211,
4222 * we need us
4224 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4225 params->cw_min, params->cw_max,
4226 params->aifs, params->txop << 5);
4227 if (ret < 0)
4228 goto out_sleep;
4230 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4231 CONF_CHANNEL_TYPE_EDCF,
4232 wl1271_tx_get_queue(queue),
4233 ps_scheme, CONF_ACK_POLICY_LEGACY,
4234 0, 0);
4236 out_sleep:
4237 wl1271_ps_elp_sleep(wl);
4239 out:
4240 mutex_unlock(&wl->mutex);
4242 return ret;
4245 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4246 struct ieee80211_vif *vif)
4249 struct wl1271 *wl = hw->priv;
4250 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4251 u64 mactime = ULLONG_MAX;
4252 int ret;
4254 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4256 mutex_lock(&wl->mutex);
4258 if (unlikely(wl->state == WL1271_STATE_OFF))
4259 goto out;
4261 ret = wl1271_ps_elp_wakeup(wl);
4262 if (ret < 0)
4263 goto out;
4265 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4266 if (ret < 0)
4267 goto out_sleep;
4269 out_sleep:
4270 wl1271_ps_elp_sleep(wl);
4272 out:
4273 mutex_unlock(&wl->mutex);
4274 return mactime;
4277 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4278 struct survey_info *survey)
4280 struct ieee80211_conf *conf = &hw->conf;
4282 if (idx != 0)
4283 return -ENOENT;
4285 survey->channel = conf->channel;
4286 survey->filled = 0;
4287 return 0;
4290 static int wl1271_allocate_sta(struct wl1271 *wl,
4291 struct wl12xx_vif *wlvif,
4292 struct ieee80211_sta *sta)
4294 struct wl1271_station *wl_sta;
4295 int ret;
4298 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4299 wl1271_warning("could not allocate HLID - too much stations");
4300 return -EBUSY;
4303 wl_sta = (struct wl1271_station *)sta->drv_priv;
4304 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4305 if (ret < 0) {
4306 wl1271_warning("could not allocate HLID - too many links");
4307 return -EBUSY;
4310 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4311 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4312 wl->active_sta_count++;
4313 return 0;
4316 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4318 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4319 return;
4321 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4322 memset(wl->links[hlid].addr, 0, ETH_ALEN);
4323 wl->links[hlid].ba_bitmap = 0;
4324 __clear_bit(hlid, &wl->ap_ps_map);
4325 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4326 wl12xx_free_link(wl, wlvif, &hlid);
4327 wl->active_sta_count--;
4330 * rearm the tx watchdog when the last STA is freed - give the FW a
4331 * chance to return STA-buffered packets before complaining.
4333 if (wl->active_sta_count == 0)
4334 wl12xx_rearm_tx_watchdog_locked(wl);
4337 static int wl12xx_sta_add(struct wl1271 *wl,
4338 struct wl12xx_vif *wlvif,
4339 struct ieee80211_sta *sta)
4341 struct wl1271_station *wl_sta;
4342 int ret = 0;
4343 u8 hlid;
4345 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4347 ret = wl1271_allocate_sta(wl, wlvif, sta);
4348 if (ret < 0)
4349 return ret;
4351 wl_sta = (struct wl1271_station *)sta->drv_priv;
4352 hlid = wl_sta->hlid;
4354 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4355 if (ret < 0)
4356 wl1271_free_sta(wl, wlvif, hlid);
4358 return ret;
4361 static int wl12xx_sta_remove(struct wl1271 *wl,
4362 struct wl12xx_vif *wlvif,
4363 struct ieee80211_sta *sta)
4365 struct wl1271_station *wl_sta;
4366 int ret = 0, id;
4368 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4370 wl_sta = (struct wl1271_station *)sta->drv_priv;
4371 id = wl_sta->hlid;
4372 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4373 return -EINVAL;
4375 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4376 if (ret < 0)
4377 return ret;
4379 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4380 return ret;
4383 static int wl12xx_update_sta_state(struct wl1271 *wl,
4384 struct wl12xx_vif *wlvif,
4385 struct ieee80211_sta *sta,
4386 enum ieee80211_sta_state old_state,
4387 enum ieee80211_sta_state new_state)
4389 struct wl1271_station *wl_sta;
4390 u8 hlid;
4391 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4392 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4393 int ret;
4395 wl_sta = (struct wl1271_station *)sta->drv_priv;
4396 hlid = wl_sta->hlid;
4398 /* Add station (AP mode) */
4399 if (is_ap &&
4400 old_state == IEEE80211_STA_NOTEXIST &&
4401 new_state == IEEE80211_STA_NONE)
4402 return wl12xx_sta_add(wl, wlvif, sta);
4404 /* Remove station (AP mode) */
4405 if (is_ap &&
4406 old_state == IEEE80211_STA_NONE &&
4407 new_state == IEEE80211_STA_NOTEXIST) {
4408 /* must not fail */
4409 wl12xx_sta_remove(wl, wlvif, sta);
4410 return 0;
4413 /* Authorize station (AP mode) */
4414 if (is_ap &&
4415 new_state == IEEE80211_STA_AUTHORIZED) {
4416 ret = wl12xx_cmd_set_peer_state(wl, hlid);
4417 if (ret < 0)
4418 return ret;
4420 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4421 hlid);
4422 return ret;
4425 /* Authorize station */
4426 if (is_sta &&
4427 new_state == IEEE80211_STA_AUTHORIZED) {
4428 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4429 return wl12xx_set_authorized(wl, wlvif);
4432 if (is_sta &&
4433 old_state == IEEE80211_STA_AUTHORIZED &&
4434 new_state == IEEE80211_STA_ASSOC) {
4435 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4436 return 0;
4439 return 0;
4442 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4443 struct ieee80211_vif *vif,
4444 struct ieee80211_sta *sta,
4445 enum ieee80211_sta_state old_state,
4446 enum ieee80211_sta_state new_state)
4448 struct wl1271 *wl = hw->priv;
4449 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4450 int ret;
4452 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4453 sta->aid, old_state, new_state);
4455 mutex_lock(&wl->mutex);
4457 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4458 ret = -EBUSY;
4459 goto out;
4462 ret = wl1271_ps_elp_wakeup(wl);
4463 if (ret < 0)
4464 goto out;
4466 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4468 wl1271_ps_elp_sleep(wl);
4469 out:
4470 mutex_unlock(&wl->mutex);
4471 if (new_state < old_state)
4472 return 0;
4473 return ret;
4476 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4477 struct ieee80211_vif *vif,
4478 enum ieee80211_ampdu_mlme_action action,
4479 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4480 u8 buf_size)
4482 struct wl1271 *wl = hw->priv;
4483 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4484 int ret;
4485 u8 hlid, *ba_bitmap;
4487 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4488 tid);
4490 /* sanity check - the fields in FW are only 8bits wide */
4491 if (WARN_ON(tid > 0xFF))
4492 return -ENOTSUPP;
4494 mutex_lock(&wl->mutex);
4496 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4497 ret = -EAGAIN;
4498 goto out;
4501 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4502 hlid = wlvif->sta.hlid;
4503 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4504 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4505 struct wl1271_station *wl_sta;
4507 wl_sta = (struct wl1271_station *)sta->drv_priv;
4508 hlid = wl_sta->hlid;
4509 ba_bitmap = &wl->links[hlid].ba_bitmap;
4510 } else {
4511 ret = -EINVAL;
4512 goto out;
4515 ret = wl1271_ps_elp_wakeup(wl);
4516 if (ret < 0)
4517 goto out;
4519 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4520 tid, action);
4522 switch (action) {
4523 case IEEE80211_AMPDU_RX_START:
4524 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4525 ret = -ENOTSUPP;
4526 break;
4529 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4530 ret = -EBUSY;
4531 wl1271_error("exceeded max RX BA sessions");
4532 break;
4535 if (*ba_bitmap & BIT(tid)) {
4536 ret = -EINVAL;
4537 wl1271_error("cannot enable RX BA session on active "
4538 "tid: %d", tid);
4539 break;
4542 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4543 hlid);
4544 if (!ret) {
4545 *ba_bitmap |= BIT(tid);
4546 wl->ba_rx_session_count++;
4548 break;
4550 case IEEE80211_AMPDU_RX_STOP:
4551 if (!(*ba_bitmap & BIT(tid))) {
4553 * this happens on reconfig - so only output a debug
4554 * message for now, and don't fail the function.
4556 wl1271_debug(DEBUG_MAC80211,
4557 "no active RX BA session on tid: %d",
4558 tid);
4559 ret = 0;
4560 break;
4563 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4564 hlid);
4565 if (!ret) {
4566 *ba_bitmap &= ~BIT(tid);
4567 wl->ba_rx_session_count--;
4569 break;
4572 * The BA initiator session management in FW independently.
4573 * Falling break here on purpose for all TX APDU commands.
4575 case IEEE80211_AMPDU_TX_START:
4576 case IEEE80211_AMPDU_TX_STOP:
4577 case IEEE80211_AMPDU_TX_OPERATIONAL:
4578 ret = -EINVAL;
4579 break;
4581 default:
4582 wl1271_error("Incorrect ampdu action id=%x\n", action);
4583 ret = -EINVAL;
4586 wl1271_ps_elp_sleep(wl);
4588 out:
4589 mutex_unlock(&wl->mutex);
4591 return ret;
4594 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4595 struct ieee80211_vif *vif,
4596 const struct cfg80211_bitrate_mask *mask)
4598 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4599 struct wl1271 *wl = hw->priv;
4600 int i, ret = 0;
4602 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4603 mask->control[NL80211_BAND_2GHZ].legacy,
4604 mask->control[NL80211_BAND_5GHZ].legacy);
4606 mutex_lock(&wl->mutex);
4608 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4609 wlvif->bitrate_masks[i] =
4610 wl1271_tx_enabled_rates_get(wl,
4611 mask->control[i].legacy,
4614 if (unlikely(wl->state == WL1271_STATE_OFF))
4615 goto out;
4617 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4618 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4620 ret = wl1271_ps_elp_wakeup(wl);
4621 if (ret < 0)
4622 goto out;
4624 wl1271_set_band_rate(wl, wlvif);
4625 wlvif->basic_rate =
4626 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4627 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4629 wl1271_ps_elp_sleep(wl);
4631 out:
4632 mutex_unlock(&wl->mutex);
4634 return ret;
4637 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4638 struct ieee80211_channel_switch *ch_switch)
4640 struct wl1271 *wl = hw->priv;
4641 struct wl12xx_vif *wlvif;
4642 int ret;
4644 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4646 wl1271_tx_flush(wl);
4648 mutex_lock(&wl->mutex);
4650 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4651 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4652 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4653 ieee80211_chswitch_done(vif, false);
4655 goto out;
4658 ret = wl1271_ps_elp_wakeup(wl);
4659 if (ret < 0)
4660 goto out;
4662 /* TODO: change mac80211 to pass vif as param */
4663 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4664 ret = wl12xx_cmd_channel_switch(wl, wlvif, ch_switch);
4666 if (!ret)
4667 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4670 wl1271_ps_elp_sleep(wl);
4672 out:
4673 mutex_unlock(&wl->mutex);
4676 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4678 struct wl1271 *wl = hw->priv;
4680 wl1271_tx_flush(wl);
4683 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4685 struct wl1271 *wl = hw->priv;
4686 bool ret = false;
4688 mutex_lock(&wl->mutex);
4690 if (unlikely(wl->state == WL1271_STATE_OFF))
4691 goto out;
4693 /* packets are considered pending if in the TX queue or the FW */
4694 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4695 out:
4696 mutex_unlock(&wl->mutex);
4698 return ret;
4701 /* can't be const, mac80211 writes to this */
4702 static struct ieee80211_rate wl1271_rates[] = {
4703 { .bitrate = 10,
4704 .hw_value = CONF_HW_BIT_RATE_1MBPS,
4705 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
4706 { .bitrate = 20,
4707 .hw_value = CONF_HW_BIT_RATE_2MBPS,
4708 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
4709 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4710 { .bitrate = 55,
4711 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
4712 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
4713 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4714 { .bitrate = 110,
4715 .hw_value = CONF_HW_BIT_RATE_11MBPS,
4716 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
4717 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
4718 { .bitrate = 60,
4719 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4720 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4721 { .bitrate = 90,
4722 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4723 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4724 { .bitrate = 120,
4725 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4726 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4727 { .bitrate = 180,
4728 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4729 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4730 { .bitrate = 240,
4731 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4732 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4733 { .bitrate = 360,
4734 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4735 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4736 { .bitrate = 480,
4737 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4738 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4739 { .bitrate = 540,
4740 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4741 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4744 /* can't be const, mac80211 writes to this */
4745 static struct ieee80211_channel wl1271_channels[] = {
4746 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
4747 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
4748 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
4749 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
4750 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
4751 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
4752 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
4753 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
4754 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
4755 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
4756 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
4757 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
4758 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
4759 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4762 /* can't be const, mac80211 writes to this */
4763 static struct ieee80211_supported_band wl1271_band_2ghz = {
4764 .channels = wl1271_channels,
4765 .n_channels = ARRAY_SIZE(wl1271_channels),
4766 .bitrates = wl1271_rates,
4767 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4770 /* 5 GHz data rates for WL1273 */
4771 static struct ieee80211_rate wl1271_rates_5ghz[] = {
4772 { .bitrate = 60,
4773 .hw_value = CONF_HW_BIT_RATE_6MBPS,
4774 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
4775 { .bitrate = 90,
4776 .hw_value = CONF_HW_BIT_RATE_9MBPS,
4777 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
4778 { .bitrate = 120,
4779 .hw_value = CONF_HW_BIT_RATE_12MBPS,
4780 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
4781 { .bitrate = 180,
4782 .hw_value = CONF_HW_BIT_RATE_18MBPS,
4783 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
4784 { .bitrate = 240,
4785 .hw_value = CONF_HW_BIT_RATE_24MBPS,
4786 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
4787 { .bitrate = 360,
4788 .hw_value = CONF_HW_BIT_RATE_36MBPS,
4789 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
4790 { .bitrate = 480,
4791 .hw_value = CONF_HW_BIT_RATE_48MBPS,
4792 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
4793 { .bitrate = 540,
4794 .hw_value = CONF_HW_BIT_RATE_54MBPS,
4795 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
4798 /* 5 GHz band channels for WL1273 */
4799 static struct ieee80211_channel wl1271_channels_5ghz[] = {
4800 { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
4801 { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
4802 { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
4803 { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
4804 { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
4805 { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
4806 { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
4807 { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
4808 { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
4809 { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
4810 { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
4811 { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
4812 { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
4813 { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
4814 { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
4815 { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
4816 { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
4817 { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
4818 { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
4819 { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
4820 { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
4821 { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
4822 { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
4823 { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
4824 { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
4825 { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
4826 { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
4827 { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
4828 { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
4829 { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
4830 { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
4831 { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
4832 { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
4833 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4836 static struct ieee80211_supported_band wl1271_band_5ghz = {
4837 .channels = wl1271_channels_5ghz,
4838 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4839 .bitrates = wl1271_rates_5ghz,
4840 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4843 static const struct ieee80211_ops wl1271_ops = {
4844 .start = wl1271_op_start,
4845 .stop = wlcore_op_stop,
4846 .add_interface = wl1271_op_add_interface,
4847 .remove_interface = wl1271_op_remove_interface,
4848 .change_interface = wl12xx_op_change_interface,
4849 #ifdef CONFIG_PM
4850 .suspend = wl1271_op_suspend,
4851 .resume = wl1271_op_resume,
4852 #endif
4853 .config = wl1271_op_config,
4854 .prepare_multicast = wl1271_op_prepare_multicast,
4855 .configure_filter = wl1271_op_configure_filter,
4856 .tx = wl1271_op_tx,
4857 .set_key = wlcore_op_set_key,
4858 .hw_scan = wl1271_op_hw_scan,
4859 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
4860 .sched_scan_start = wl1271_op_sched_scan_start,
4861 .sched_scan_stop = wl1271_op_sched_scan_stop,
4862 .bss_info_changed = wl1271_op_bss_info_changed,
4863 .set_frag_threshold = wl1271_op_set_frag_threshold,
4864 .set_rts_threshold = wl1271_op_set_rts_threshold,
4865 .conf_tx = wl1271_op_conf_tx,
4866 .get_tsf = wl1271_op_get_tsf,
4867 .get_survey = wl1271_op_get_survey,
4868 .sta_state = wl12xx_op_sta_state,
4869 .ampdu_action = wl1271_op_ampdu_action,
4870 .tx_frames_pending = wl1271_tx_frames_pending,
4871 .set_bitrate_mask = wl12xx_set_bitrate_mask,
4872 .channel_switch = wl12xx_op_channel_switch,
4873 .flush = wlcore_op_flush,
4874 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
4878 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4880 u8 idx;
4882 BUG_ON(band >= 2);
4884 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4885 wl1271_error("Illegal RX rate from HW: %d", rate);
4886 return 0;
4889 idx = wl->band_rate_to_idx[band][rate];
4890 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4891 wl1271_error("Unsupported RX rate from HW: %d", rate);
4892 return 0;
4895 return idx;
4898 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
4899 struct device_attribute *attr,
4900 char *buf)
4902 struct wl1271 *wl = dev_get_drvdata(dev);
4903 ssize_t len;
4905 len = PAGE_SIZE;
4907 mutex_lock(&wl->mutex);
4908 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
4909 wl->sg_enabled);
4910 mutex_unlock(&wl->mutex);
4912 return len;
4916 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
4917 struct device_attribute *attr,
4918 const char *buf, size_t count)
4920 struct wl1271 *wl = dev_get_drvdata(dev);
4921 unsigned long res;
4922 int ret;
4924 ret = kstrtoul(buf, 10, &res);
4925 if (ret < 0) {
4926 wl1271_warning("incorrect value written to bt_coex_mode");
4927 return count;
4930 mutex_lock(&wl->mutex);
4932 res = !!res;
4934 if (res == wl->sg_enabled)
4935 goto out;
4937 wl->sg_enabled = res;
4939 if (wl->state == WL1271_STATE_OFF)
4940 goto out;
4942 ret = wl1271_ps_elp_wakeup(wl);
4943 if (ret < 0)
4944 goto out;
4946 wl1271_acx_sg_enable(wl, wl->sg_enabled);
4947 wl1271_ps_elp_sleep(wl);
4949 out:
4950 mutex_unlock(&wl->mutex);
4951 return count;
4954 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
4955 wl1271_sysfs_show_bt_coex_state,
4956 wl1271_sysfs_store_bt_coex_state);
4958 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4959 struct device_attribute *attr,
4960 char *buf)
4962 struct wl1271 *wl = dev_get_drvdata(dev);
4963 ssize_t len;
4965 len = PAGE_SIZE;
4967 mutex_lock(&wl->mutex);
4968 if (wl->hw_pg_ver >= 0)
4969 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
4970 else
4971 len = snprintf(buf, len, "n/a\n");
4972 mutex_unlock(&wl->mutex);
4974 return len;
4977 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4978 wl1271_sysfs_show_hw_pg_ver, NULL);
4980 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
4981 struct bin_attribute *bin_attr,
4982 char *buffer, loff_t pos, size_t count)
4984 struct device *dev = container_of(kobj, struct device, kobj);
4985 struct wl1271 *wl = dev_get_drvdata(dev);
4986 ssize_t len;
4987 int ret;
4989 ret = mutex_lock_interruptible(&wl->mutex);
4990 if (ret < 0)
4991 return -ERESTARTSYS;
4993 /* Let only one thread read the log at a time, blocking others */
4994 while (wl->fwlog_size == 0) {
4995 DEFINE_WAIT(wait);
4997 prepare_to_wait_exclusive(&wl->fwlog_waitq,
4998 &wait,
4999 TASK_INTERRUPTIBLE);
5001 if (wl->fwlog_size != 0) {
5002 finish_wait(&wl->fwlog_waitq, &wait);
5003 break;
5006 mutex_unlock(&wl->mutex);
5008 schedule();
5009 finish_wait(&wl->fwlog_waitq, &wait);
5011 if (signal_pending(current))
5012 return -ERESTARTSYS;
5014 ret = mutex_lock_interruptible(&wl->mutex);
5015 if (ret < 0)
5016 return -ERESTARTSYS;
5019 /* Check if the fwlog is still valid */
5020 if (wl->fwlog_size < 0) {
5021 mutex_unlock(&wl->mutex);
5022 return 0;
5025 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5026 len = min(count, (size_t)wl->fwlog_size);
5027 wl->fwlog_size -= len;
5028 memcpy(buffer, wl->fwlog, len);
5030 /* Make room for new messages */
5031 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5033 mutex_unlock(&wl->mutex);
5035 return len;
5038 static struct bin_attribute fwlog_attr = {
5039 .attr = {.name = "fwlog", .mode = S_IRUSR},
5040 .read = wl1271_sysfs_read_fwlog,
5043 static void wl1271_connection_loss_work(struct work_struct *work)
5045 struct delayed_work *dwork;
5046 struct wl1271 *wl;
5047 struct ieee80211_vif *vif;
5048 struct wl12xx_vif *wlvif;
5050 dwork = container_of(work, struct delayed_work, work);
5051 wl = container_of(dwork, struct wl1271, connection_loss_work);
5053 wl1271_info("Connection loss work.");
5055 mutex_lock(&wl->mutex);
5057 if (unlikely(wl->state == WL1271_STATE_OFF))
5058 goto out;
5060 /* Call mac80211 connection loss */
5061 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5062 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5063 goto out;
5064 vif = wl12xx_wlvif_to_vif(wlvif);
5065 ieee80211_connection_loss(vif);
5067 out:
5068 mutex_unlock(&wl->mutex);
5071 static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5072 u32 oui, u32 nic, int n)
5074 int i;
5076 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x, n %d",
5077 oui, nic, n);
5079 if (nic + n - 1 > 0xffffff)
5080 wl1271_warning("NIC part of the MAC address wraps around!");
5082 for (i = 0; i < n; i++) {
5083 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5084 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5085 wl->addresses[i].addr[2] = (u8) oui;
5086 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5087 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5088 wl->addresses[i].addr[5] = (u8) nic;
5089 nic++;
5092 wl->hw->wiphy->n_addresses = n;
5093 wl->hw->wiphy->addresses = wl->addresses;
5096 static int wl12xx_get_hw_info(struct wl1271 *wl)
5098 int ret;
5100 ret = wl12xx_set_power_on(wl);
5101 if (ret < 0)
5102 goto out;
5104 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5105 if (ret < 0)
5106 goto out;
5108 wl->fuse_oui_addr = 0;
5109 wl->fuse_nic_addr = 0;
5111 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5112 if (ret < 0)
5113 goto out;
5115 if (wl->ops->get_mac)
5116 ret = wl->ops->get_mac(wl);
5118 out:
5119 wl1271_power_off(wl);
5120 return ret;
5123 static int wl1271_register_hw(struct wl1271 *wl)
5125 int ret;
5126 u32 oui_addr = 0, nic_addr = 0;
5128 if (wl->mac80211_registered)
5129 return 0;
5131 wl1271_fetch_nvs(wl);
5132 if (wl->nvs != NULL) {
5133 /* NOTE: The wl->nvs->nvs element must be first, in
5134 * order to simplify the casting, we assume it is at
5135 * the beginning of the wl->nvs structure.
5137 u8 *nvs_ptr = (u8 *)wl->nvs;
5139 oui_addr =
5140 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5141 nic_addr =
5142 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5145 /* if the MAC address is zeroed in the NVS derive from fuse */
5146 if (oui_addr == 0 && nic_addr == 0) {
5147 oui_addr = wl->fuse_oui_addr;
5148 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5149 nic_addr = wl->fuse_nic_addr + 1;
5152 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr, 2);
5154 ret = ieee80211_register_hw(wl->hw);
5155 if (ret < 0) {
5156 wl1271_error("unable to register mac80211 hw: %d", ret);
5157 goto out;
5160 wl->mac80211_registered = true;
5162 wl1271_debugfs_init(wl);
5164 wl1271_notice("loaded");
5166 out:
5167 return ret;
5170 static void wl1271_unregister_hw(struct wl1271 *wl)
5172 if (wl->plt)
5173 wl1271_plt_stop(wl);
5175 ieee80211_unregister_hw(wl->hw);
5176 wl->mac80211_registered = false;
5180 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5182 .max = 2,
5183 .types = BIT(NL80211_IFTYPE_STATION),
5186 .max = 1,
5187 .types = BIT(NL80211_IFTYPE_AP) |
5188 BIT(NL80211_IFTYPE_P2P_GO) |
5189 BIT(NL80211_IFTYPE_P2P_CLIENT),
5193 static const struct ieee80211_iface_combination
5194 wlcore_iface_combinations[] = {
5196 .num_different_channels = 1,
5197 .max_interfaces = 2,
5198 .limits = wlcore_iface_limits,
5199 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5203 static int wl1271_init_ieee80211(struct wl1271 *wl)
5205 static const u32 cipher_suites[] = {
5206 WLAN_CIPHER_SUITE_WEP40,
5207 WLAN_CIPHER_SUITE_WEP104,
5208 WLAN_CIPHER_SUITE_TKIP,
5209 WLAN_CIPHER_SUITE_CCMP,
5210 WL1271_CIPHER_SUITE_GEM,
5213 /* The tx descriptor buffer */
5214 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5216 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5217 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5219 /* unit us */
5220 /* FIXME: find a proper value */
5221 wl->hw->channel_change_time = 10000;
5222 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5224 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5225 IEEE80211_HW_SUPPORTS_PS |
5226 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5227 IEEE80211_HW_SUPPORTS_UAPSD |
5228 IEEE80211_HW_HAS_RATE_CONTROL |
5229 IEEE80211_HW_CONNECTION_MONITOR |
5230 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5231 IEEE80211_HW_SPECTRUM_MGMT |
5232 IEEE80211_HW_AP_LINK_PS |
5233 IEEE80211_HW_AMPDU_AGGREGATION |
5234 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5235 IEEE80211_HW_SCAN_WHILE_IDLE;
5237 wl->hw->wiphy->cipher_suites = cipher_suites;
5238 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5240 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5241 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5242 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5243 wl->hw->wiphy->max_scan_ssids = 1;
5244 wl->hw->wiphy->max_sched_scan_ssids = 16;
5245 wl->hw->wiphy->max_match_sets = 16;
5247 * Maximum length of elements in scanning probe request templates
5248 * should be the maximum length possible for a template, without
5249 * the IEEE80211 header of the template
5251 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5252 sizeof(struct ieee80211_header);
5254 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5255 sizeof(struct ieee80211_header);
5257 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5258 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5260 /* make sure all our channels fit in the scanned_ch bitmask */
5261 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5262 ARRAY_SIZE(wl1271_channels_5ghz) >
5263 WL1271_MAX_CHANNELS);
5265 * We keep local copies of the band structs because we need to
5266 * modify them on a per-device basis.
5268 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5269 sizeof(wl1271_band_2ghz));
5270 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5271 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5272 sizeof(*wl->ht_cap));
5273 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5274 sizeof(wl1271_band_5ghz));
5275 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5276 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5277 sizeof(*wl->ht_cap));
5279 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5280 &wl->bands[IEEE80211_BAND_2GHZ];
5281 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5282 &wl->bands[IEEE80211_BAND_5GHZ];
5284 wl->hw->queues = 4;
5285 wl->hw->max_rates = 1;
5287 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5289 /* the FW answers probe-requests in AP-mode */
5290 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5291 wl->hw->wiphy->probe_resp_offload =
5292 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5293 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5294 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5296 /* allowed interface combinations */
5297 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5298 wl->hw->wiphy->n_iface_combinations =
5299 ARRAY_SIZE(wlcore_iface_combinations);
5301 SET_IEEE80211_DEV(wl->hw, wl->dev);
5303 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5304 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5306 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5308 return 0;
5311 #define WL1271_DEFAULT_CHANNEL 0
5313 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5315 struct ieee80211_hw *hw;
5316 struct wl1271 *wl;
5317 int i, j, ret;
5318 unsigned int order;
5320 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5322 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5323 if (!hw) {
5324 wl1271_error("could not alloc ieee80211_hw");
5325 ret = -ENOMEM;
5326 goto err_hw_alloc;
5329 wl = hw->priv;
5330 memset(wl, 0, sizeof(*wl));
5332 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5333 if (!wl->priv) {
5334 wl1271_error("could not alloc wl priv");
5335 ret = -ENOMEM;
5336 goto err_priv_alloc;
5339 INIT_LIST_HEAD(&wl->wlvif_list);
5341 wl->hw = hw;
5343 for (i = 0; i < NUM_TX_QUEUES; i++)
5344 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5345 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5347 skb_queue_head_init(&wl->deferred_rx_queue);
5348 skb_queue_head_init(&wl->deferred_tx_queue);
5350 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5351 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5352 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5353 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5354 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5355 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5356 INIT_DELAYED_WORK(&wl->connection_loss_work,
5357 wl1271_connection_loss_work);
5359 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5360 if (!wl->freezable_wq) {
5361 ret = -ENOMEM;
5362 goto err_hw;
5365 wl->channel = WL1271_DEFAULT_CHANNEL;
5366 wl->rx_counter = 0;
5367 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5368 wl->band = IEEE80211_BAND_2GHZ;
5369 wl->channel_type = NL80211_CHAN_NO_HT;
5370 wl->flags = 0;
5371 wl->sg_enabled = true;
5372 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5373 wl->hw_pg_ver = -1;
5374 wl->ap_ps_map = 0;
5375 wl->ap_fw_ps_map = 0;
5376 wl->quirks = 0;
5377 wl->platform_quirks = 0;
5378 wl->sched_scanning = false;
5379 wl->system_hlid = WL12XX_SYSTEM_HLID;
5380 wl->active_sta_count = 0;
5381 wl->fwlog_size = 0;
5382 init_waitqueue_head(&wl->fwlog_waitq);
5384 /* The system link is always allocated */
5385 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5387 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5388 for (i = 0; i < wl->num_tx_desc; i++)
5389 wl->tx_frames[i] = NULL;
5391 spin_lock_init(&wl->wl_lock);
5393 wl->state = WL1271_STATE_OFF;
5394 wl->fw_type = WL12XX_FW_TYPE_NONE;
5395 mutex_init(&wl->mutex);
5396 mutex_init(&wl->flush_mutex);
5398 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5399 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5400 if (!wl->aggr_buf) {
5401 ret = -ENOMEM;
5402 goto err_wq;
5405 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5406 if (!wl->dummy_packet) {
5407 ret = -ENOMEM;
5408 goto err_aggr;
5411 /* Allocate one page for the FW log */
5412 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5413 if (!wl->fwlog) {
5414 ret = -ENOMEM;
5415 goto err_dummy_packet;
5418 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
5419 if (!wl->mbox) {
5420 ret = -ENOMEM;
5421 goto err_fwlog;
5424 return hw;
5426 err_fwlog:
5427 free_page((unsigned long)wl->fwlog);
5429 err_dummy_packet:
5430 dev_kfree_skb(wl->dummy_packet);
5432 err_aggr:
5433 free_pages((unsigned long)wl->aggr_buf, order);
5435 err_wq:
5436 destroy_workqueue(wl->freezable_wq);
5438 err_hw:
5439 wl1271_debugfs_exit(wl);
5440 kfree(wl->priv);
5442 err_priv_alloc:
5443 ieee80211_free_hw(hw);
5445 err_hw_alloc:
5447 return ERR_PTR(ret);
5449 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5451 int wlcore_free_hw(struct wl1271 *wl)
5453 /* Unblock any fwlog readers */
5454 mutex_lock(&wl->mutex);
5455 wl->fwlog_size = -1;
5456 wake_up_interruptible_all(&wl->fwlog_waitq);
5457 mutex_unlock(&wl->mutex);
5459 device_remove_bin_file(wl->dev, &fwlog_attr);
5461 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5463 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5464 free_page((unsigned long)wl->fwlog);
5465 dev_kfree_skb(wl->dummy_packet);
5466 free_pages((unsigned long)wl->aggr_buf,
5467 get_order(WL1271_AGGR_BUFFER_SIZE));
5469 wl1271_debugfs_exit(wl);
5471 vfree(wl->fw);
5472 wl->fw = NULL;
5473 wl->fw_type = WL12XX_FW_TYPE_NONE;
5474 kfree(wl->nvs);
5475 wl->nvs = NULL;
5477 kfree(wl->fw_status_1);
5478 kfree(wl->tx_res_if);
5479 destroy_workqueue(wl->freezable_wq);
5481 kfree(wl->priv);
5482 ieee80211_free_hw(wl->hw);
5484 return 0;
5486 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5488 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5490 struct wl1271 *wl = cookie;
5491 unsigned long flags;
5493 wl1271_debug(DEBUG_IRQ, "IRQ");
5495 /* complete the ELP completion */
5496 spin_lock_irqsave(&wl->wl_lock, flags);
5497 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5498 if (wl->elp_compl) {
5499 complete(wl->elp_compl);
5500 wl->elp_compl = NULL;
5503 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5504 /* don't enqueue a work right now. mark it as pending */
5505 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5506 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5507 disable_irq_nosync(wl->irq);
5508 pm_wakeup_event(wl->dev, 0);
5509 spin_unlock_irqrestore(&wl->wl_lock, flags);
5510 return IRQ_HANDLED;
5512 spin_unlock_irqrestore(&wl->wl_lock, flags);
5514 return IRQ_WAKE_THREAD;
5517 int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5519 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5520 unsigned long irqflags;
5521 int ret;
5523 if (!wl->ops || !wl->ptable) {
5524 ret = -EINVAL;
5525 goto out_free_hw;
5528 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5530 /* adjust some runtime configuration parameters */
5531 wlcore_adjust_conf(wl);
5533 wl->irq = platform_get_irq(pdev, 0);
5534 wl->platform_quirks = pdata->platform_quirks;
5535 wl->set_power = pdata->set_power;
5536 wl->dev = &pdev->dev;
5537 wl->if_ops = pdata->ops;
5539 platform_set_drvdata(pdev, wl);
5541 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5542 irqflags = IRQF_TRIGGER_RISING;
5543 else
5544 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5546 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
5547 irqflags,
5548 pdev->name, wl);
5549 if (ret < 0) {
5550 wl1271_error("request_irq() failed: %d", ret);
5551 goto out_free_hw;
5554 #ifdef CONFIG_PM
5555 ret = enable_irq_wake(wl->irq);
5556 if (!ret) {
5557 wl->irq_wake_enabled = true;
5558 device_init_wakeup(wl->dev, 1);
5559 if (pdata->pwr_in_suspend) {
5560 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5561 wl->hw->wiphy->wowlan.n_patterns =
5562 WL1271_MAX_RX_FILTERS;
5563 wl->hw->wiphy->wowlan.pattern_min_len = 1;
5564 wl->hw->wiphy->wowlan.pattern_max_len =
5565 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
5568 #endif
5569 disable_irq(wl->irq);
5571 ret = wl12xx_get_hw_info(wl);
5572 if (ret < 0) {
5573 wl1271_error("couldn't get hw info");
5574 goto out_irq;
5577 ret = wl->ops->identify_chip(wl);
5578 if (ret < 0)
5579 goto out_irq;
5581 ret = wl1271_init_ieee80211(wl);
5582 if (ret)
5583 goto out_irq;
5585 ret = wl1271_register_hw(wl);
5586 if (ret)
5587 goto out_irq;
5589 /* Create sysfs file to control bt coex state */
5590 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5591 if (ret < 0) {
5592 wl1271_error("failed to create sysfs file bt_coex_state");
5593 goto out_unreg;
5596 /* Create sysfs file to get HW PG version */
5597 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5598 if (ret < 0) {
5599 wl1271_error("failed to create sysfs file hw_pg_ver");
5600 goto out_bt_coex_state;
5603 /* Create sysfs file for the FW log */
5604 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5605 if (ret < 0) {
5606 wl1271_error("failed to create sysfs file fwlog");
5607 goto out_hw_pg_ver;
5610 goto out;
5612 out_hw_pg_ver:
5613 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5615 out_bt_coex_state:
5616 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5618 out_unreg:
5619 wl1271_unregister_hw(wl);
5621 out_irq:
5622 free_irq(wl->irq, wl);
5624 out_free_hw:
5625 wlcore_free_hw(wl);
5627 out:
5628 return ret;
5630 EXPORT_SYMBOL_GPL(wlcore_probe);
5632 int __devexit wlcore_remove(struct platform_device *pdev)
5634 struct wl1271 *wl = platform_get_drvdata(pdev);
5636 if (wl->irq_wake_enabled) {
5637 device_init_wakeup(wl->dev, 0);
5638 disable_irq_wake(wl->irq);
5640 wl1271_unregister_hw(wl);
5641 free_irq(wl->irq, wl);
5642 wlcore_free_hw(wl);
5644 return 0;
5646 EXPORT_SYMBOL_GPL(wlcore_remove);
5648 u32 wl12xx_debug_level = DEBUG_NONE;
5649 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
5650 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
5651 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
5653 module_param_named(fwlog, fwlog_param, charp, 0);
5654 MODULE_PARM_DESC(fwlog,
5655 "FW logger options: continuous, ondemand, dbgpins or disable");
5657 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5658 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5660 module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5661 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5663 MODULE_LICENSE("GPL");
5664 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5665 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");