2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
32 #include "wl12xx_80211.h"
39 #include "vendor_cmd.h"
44 #define WL1271_BOOT_RETRIES 3
45 #define WL1271_SUSPEND_SLEEP 100
47 static char *fwlog_param
;
48 static int fwlog_mem_blocks
= -1;
49 static int bug_on_recovery
= -1;
50 static int no_recovery
= -1;
52 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
53 struct ieee80211_vif
*vif
,
54 bool reset_tx_queues
);
55 static void wlcore_op_stop_locked(struct wl1271
*wl
);
56 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
58 static int wl12xx_set_authorized(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
62 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
71 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wlvif
->sta
.hlid
);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy
*wiphy
,
80 struct regulatory_request
*request
)
82 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
83 struct wl1271
*wl
= hw
->priv
;
85 /* copy the current dfs region */
87 wl
->dfs_region
= request
->dfs_region
;
89 wlcore_regdomain_config(wl
);
92 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
97 /* we should hold wl->mutex */
98 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
117 int period
= wl
->conf
.rx_streaming
.interval
;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
123 /* reconfigure/disable according to new streaming_period */
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
126 (wl
->conf
.rx_streaming
.always
||
127 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
128 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
130 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif
->rx_streaming_timer
);
138 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
141 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
142 rx_streaming_enable_work
);
143 struct wl1271
*wl
= wlvif
->wl
;
145 mutex_lock(&wl
->mutex
);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
149 (!wl
->conf
.rx_streaming
.always
&&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
153 if (!wl
->conf
.rx_streaming
.interval
)
156 ret
= wl1271_ps_elp_wakeup(wl
);
160 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif
->rx_streaming_timer
,
166 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
169 wl1271_ps_elp_sleep(wl
);
171 mutex_unlock(&wl
->mutex
);
174 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
177 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
178 rx_streaming_disable_work
);
179 struct wl1271
*wl
= wlvif
->wl
;
181 mutex_lock(&wl
->mutex
);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
186 ret
= wl1271_ps_elp_wakeup(wl
);
190 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
195 wl1271_ps_elp_sleep(wl
);
197 mutex_unlock(&wl
->mutex
);
200 static void wl1271_rx_streaming_timer(struct timer_list
*t
)
202 struct wl12xx_vif
*wlvif
= from_timer(wlvif
, t
, rx_streaming_timer
);
203 struct wl1271
*wl
= wlvif
->wl
;
204 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl
->tx_allocated_blocks
== 0)
214 cancel_delayed_work(&wl
->tx_watchdog_work
);
215 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
216 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
219 static void wlcore_rc_update_work(struct work_struct
*work
)
222 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
224 struct wl1271
*wl
= wlvif
->wl
;
225 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
227 mutex_lock(&wl
->mutex
);
229 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
232 ret
= wl1271_ps_elp_wakeup(wl
);
236 if (ieee80211_vif_is_mesh(vif
)) {
237 ret
= wl1271_acx_set_ht_capabilities(wl
, &wlvif
->rc_ht_cap
,
238 true, wlvif
->sta
.hlid
);
242 wlcore_hw_sta_rc_update(wl
, wlvif
);
246 wl1271_ps_elp_sleep(wl
);
248 mutex_unlock(&wl
->mutex
);
251 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
253 struct delayed_work
*dwork
;
256 dwork
= to_delayed_work(work
);
257 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
259 mutex_lock(&wl
->mutex
);
261 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
264 /* Tx went out in the meantime - everything is ok */
265 if (unlikely(wl
->tx_allocated_blocks
== 0))
269 * if a ROC is in progress, we might not have any Tx for a long
270 * time (e.g. pending Tx on the non-ROC channels)
272 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
273 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
274 wl
->conf
.tx
.tx_watchdog_timeout
);
275 wl12xx_rearm_tx_watchdog_locked(wl
);
280 * if a scan is in progress, we might not have any Tx for a long
283 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
284 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
285 wl
->conf
.tx
.tx_watchdog_timeout
);
286 wl12xx_rearm_tx_watchdog_locked(wl
);
291 * AP might cache a frame for a long time for a sleeping station,
292 * so rearm the timer if there's an AP interface with stations. If
293 * Tx is genuinely stuck we will most hopefully discover it when all
294 * stations are removed due to inactivity.
296 if (wl
->active_sta_count
) {
297 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
299 wl
->conf
.tx
.tx_watchdog_timeout
,
300 wl
->active_sta_count
);
301 wl12xx_rearm_tx_watchdog_locked(wl
);
305 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
306 wl
->conf
.tx
.tx_watchdog_timeout
);
307 wl12xx_queue_recovery_work(wl
);
310 mutex_unlock(&wl
->mutex
);
313 static void wlcore_adjust_conf(struct wl1271
*wl
)
317 if (!strcmp(fwlog_param
, "continuous")) {
318 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
319 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_HOST
;
320 } else if (!strcmp(fwlog_param
, "dbgpins")) {
321 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
322 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
323 } else if (!strcmp(fwlog_param
, "disable")) {
324 wl
->conf
.fwlog
.mem_blocks
= 0;
325 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
327 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
331 if (bug_on_recovery
!= -1)
332 wl
->conf
.recovery
.bug_on_recovery
= (u8
) bug_on_recovery
;
334 if (no_recovery
!= -1)
335 wl
->conf
.recovery
.no_recovery
= (u8
) no_recovery
;
338 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
339 struct wl12xx_vif
*wlvif
,
344 fw_ps
= test_bit(hlid
, &wl
->ap_fw_ps_map
);
347 * Wake up from high level PS if the STA is asleep with too little
348 * packets in FW or if the STA is awake.
350 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
351 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
354 * Start high-level PS if the STA is asleep with enough blocks in FW.
355 * Make an exception if this is the only connected link. In this
356 * case FW-memory congestion is less of a problem.
357 * Note that a single connected STA means 2*ap_count + 1 active links,
358 * since we must account for the global and broadcast AP links
359 * for each AP. The "fw_ps" check assures us the other link is a STA
360 * connected to the AP. Otherwise the FW would not set the PSM bit.
362 else if (wl
->active_link_count
> (wl
->ap_count
*2 + 1) && fw_ps
&&
363 tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
364 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
367 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
368 struct wl12xx_vif
*wlvif
,
369 struct wl_fw_status
*status
)
371 unsigned long cur_fw_ps_map
;
374 cur_fw_ps_map
= status
->link_ps_bitmap
;
375 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
376 wl1271_debug(DEBUG_PSM
,
377 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
378 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
379 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
381 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
384 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, wl
->num_links
)
385 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
386 wl
->links
[hlid
].allocated_pkts
);
389 static int wlcore_fw_status(struct wl1271
*wl
, struct wl_fw_status
*status
)
391 struct wl12xx_vif
*wlvif
;
392 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
393 int avail
, freed_blocks
;
396 struct wl1271_link
*lnk
;
398 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
,
400 wl
->fw_status_len
, false);
404 wlcore_hw_convert_fw_status(wl
, wl
->raw_fw_status
, wl
->fw_status
);
406 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
409 status
->fw_rx_counter
,
410 status
->drv_rx_counter
,
411 status
->tx_results_counter
);
413 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl
->tx_allocated_pkts
[i
] -=
416 (status
->counters
.tx_released_pkts
[i
] -
417 wl
->tx_pkts_freed
[i
]) & 0xff;
419 wl
->tx_pkts_freed
[i
] = status
->counters
.tx_released_pkts
[i
];
423 for_each_set_bit(i
, wl
->links_map
, wl
->num_links
) {
427 /* prevent wrap-around in freed-packets counter */
428 diff
= (status
->counters
.tx_lnk_free_pkts
[i
] -
429 lnk
->prev_freed_pkts
) & 0xff;
434 lnk
->allocated_pkts
-= diff
;
435 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[i
];
437 /* accumulate the prev_freed_pkts counter */
438 lnk
->total_freed_pkts
+= diff
;
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl
->tx_blocks_freed
<= status
->total_released_blks
))
443 freed_blocks
= status
->total_released_blks
-
446 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
447 status
->total_released_blks
;
449 wl
->tx_blocks_freed
= status
->total_released_blks
;
451 wl
->tx_allocated_blocks
-= freed_blocks
;
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
459 if (wl
->tx_allocated_blocks
)
460 wl12xx_rearm_tx_watchdog_locked(wl
);
462 cancel_delayed_work(&wl
->tx_watchdog_work
);
465 avail
= status
->tx_total
- wl
->tx_allocated_blocks
;
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
475 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl
->tx_blocks_available
> old_tx_blk_count
)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
484 wl12xx_irq_update_links_status(wl
, wlvif
, status
);
487 /* update the host-chipset time offset */
488 wl
->time_offset
= (ktime_get_boot_ns() >> 10) -
489 (s64
)(status
->fw_localtime
);
491 wl
->fw_fast_lnk_map
= status
->link_fast_bitmap
;
496 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
500 /* Pass all received frames to the network stack */
501 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
502 ieee80211_rx_ni(wl
->hw
, skb
);
504 /* Return sent skbs to the network stack */
505 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
506 ieee80211_tx_status_ni(wl
->hw
, skb
);
509 static void wl1271_netstack_work(struct work_struct
*work
)
512 container_of(work
, struct wl1271
, netstack_work
);
515 wl1271_flush_deferred_work(wl
);
516 } while (skb_queue_len(&wl
->deferred_rx_queue
));
519 #define WL1271_IRQ_MAX_LOOPS 256
521 static int wlcore_irq_locked(struct wl1271
*wl
)
525 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
527 unsigned int defer_count
;
531 * In case edge triggered interrupt must be used, we cannot iterate
532 * more than once without introducing race conditions with the hardirq.
534 if (wl
->irq_flags
& (IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
))
537 wl1271_debug(DEBUG_IRQ
, "IRQ work");
539 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
542 ret
= wl1271_ps_elp_wakeup(wl
);
546 while (!done
&& loopcount
--) {
548 * In order to avoid a race with the hardirq, clear the flag
549 * before acknowledging the chip. Since the mutex is held,
550 * wl1271_ps_elp_wakeup cannot be called concurrently.
552 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
553 smp_mb__after_atomic();
555 ret
= wlcore_fw_status(wl
, wl
->fw_status
);
559 wlcore_hw_tx_immediate_compl(wl
);
561 intr
= wl
->fw_status
->intr
;
562 intr
&= WLCORE_ALL_INTR_MASK
;
568 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
569 wl1271_error("HW watchdog interrupt received! starting recovery.");
570 wl
->watchdog_recovery
= true;
573 /* restarting the chip. ignore any other interrupt. */
577 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
578 wl1271_error("SW watchdog interrupt received! "
579 "starting recovery.");
580 wl
->watchdog_recovery
= true;
583 /* restarting the chip. ignore any other interrupt. */
587 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
588 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
590 ret
= wlcore_rx(wl
, wl
->fw_status
);
594 /* Check if any tx blocks were freed */
595 spin_lock_irqsave(&wl
->wl_lock
, flags
);
596 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
597 wl1271_tx_total_queue_count(wl
) > 0) {
598 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
600 * In order to avoid starvation of the TX path,
601 * call the work function directly.
603 ret
= wlcore_tx_work_locked(wl
);
607 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
610 /* check for tx results */
611 ret
= wlcore_hw_tx_delayed_compl(wl
);
615 /* Make sure the deferred queues don't get too long */
616 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
617 skb_queue_len(&wl
->deferred_rx_queue
);
618 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
619 wl1271_flush_deferred_work(wl
);
622 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
623 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
624 ret
= wl1271_event_handle(wl
, 0);
629 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
630 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
631 ret
= wl1271_event_handle(wl
, 1);
636 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
637 wl1271_debug(DEBUG_IRQ
,
638 "WL1271_ACX_INTR_INIT_COMPLETE");
640 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
641 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
644 wl1271_ps_elp_sleep(wl
);
650 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
654 struct wl1271
*wl
= cookie
;
656 /* complete the ELP completion */
657 spin_lock_irqsave(&wl
->wl_lock
, flags
);
658 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
660 complete(wl
->elp_compl
);
661 wl
->elp_compl
= NULL
;
664 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
665 /* don't enqueue a work right now. mark it as pending */
666 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
667 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
668 disable_irq_nosync(wl
->irq
);
669 pm_wakeup_event(wl
->dev
, 0);
670 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
673 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
675 /* TX might be handled here, avoid redundant work */
676 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
677 cancel_work_sync(&wl
->tx_work
);
679 mutex_lock(&wl
->mutex
);
681 ret
= wlcore_irq_locked(wl
);
683 wl12xx_queue_recovery_work(wl
);
685 spin_lock_irqsave(&wl
->wl_lock
, flags
);
686 /* In case TX was not handled here, queue TX work */
687 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
688 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
689 wl1271_tx_total_queue_count(wl
) > 0)
690 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
691 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
693 mutex_unlock(&wl
->mutex
);
698 struct vif_counter_data
{
701 struct ieee80211_vif
*cur_vif
;
702 bool cur_vif_running
;
705 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
706 struct ieee80211_vif
*vif
)
708 struct vif_counter_data
*counter
= data
;
711 if (counter
->cur_vif
== vif
)
712 counter
->cur_vif_running
= true;
715 /* caller must not hold wl->mutex, as it might deadlock */
716 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
717 struct ieee80211_vif
*cur_vif
,
718 struct vif_counter_data
*data
)
720 memset(data
, 0, sizeof(*data
));
721 data
->cur_vif
= cur_vif
;
723 ieee80211_iterate_active_interfaces(hw
, IEEE80211_IFACE_ITER_RESUME_ALL
,
724 wl12xx_vif_count_iter
, data
);
727 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
729 const struct firmware
*fw
;
731 enum wl12xx_fw_type fw_type
;
735 fw_type
= WL12XX_FW_TYPE_PLT
;
736 fw_name
= wl
->plt_fw_name
;
739 * we can't call wl12xx_get_vif_count() here because
740 * wl->mutex is taken, so use the cached last_vif_count value
742 if (wl
->last_vif_count
> 1 && wl
->mr_fw_name
) {
743 fw_type
= WL12XX_FW_TYPE_MULTI
;
744 fw_name
= wl
->mr_fw_name
;
746 fw_type
= WL12XX_FW_TYPE_NORMAL
;
747 fw_name
= wl
->sr_fw_name
;
751 if (wl
->fw_type
== fw_type
)
754 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
756 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
759 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
764 wl1271_error("firmware size is not multiple of 32 bits: %zu",
771 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
772 wl
->fw_len
= fw
->size
;
773 wl
->fw
= vmalloc(wl
->fw_len
);
776 wl1271_error("could not allocate memory for the firmware");
781 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
783 wl
->fw_type
= fw_type
;
785 release_firmware(fw
);
790 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
792 /* Avoid a recursive recovery */
793 if (wl
->state
== WLCORE_STATE_ON
) {
794 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
,
797 wl
->state
= WLCORE_STATE_RESTARTING
;
798 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
799 wl1271_ps_elp_wakeup(wl
);
800 wlcore_disable_interrupts_nosync(wl
);
801 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
805 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
809 /* Make sure we have enough room */
810 len
= min_t(size_t, maxlen
, PAGE_SIZE
- wl
->fwlog_size
);
812 /* Fill the FW log file, consumed by the sysfs fwlog entry */
813 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
814 wl
->fwlog_size
+= len
;
819 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
823 if (wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
)
826 wl1271_info("Reading FW panic log");
829 * Make sure the chip is awake and the logger isn't active.
830 * Do not send a stop fwlog command if the fw is hanged or if
831 * dbgpins are used (due to some fw bug).
833 if (wl1271_ps_elp_wakeup(wl
))
835 if (!wl
->watchdog_recovery
&&
836 wl
->conf
.fwlog
.output
!= WL12XX_FWLOG_OUTPUT_DBG_PINS
)
837 wl12xx_cmd_stop_fwlog(wl
);
839 /* Traverse the memory blocks linked list */
841 end_of_log
= wlcore_event_fw_logger(wl
);
842 if (end_of_log
== 0) {
844 end_of_log
= wlcore_event_fw_logger(wl
);
846 } while (end_of_log
!= 0);
849 static void wlcore_save_freed_pkts(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
850 u8 hlid
, struct ieee80211_sta
*sta
)
852 struct wl1271_station
*wl_sta
;
853 u32 sqn_recovery_padding
= WL1271_TX_SQN_POST_RECOVERY_PADDING
;
855 wl_sta
= (void *)sta
->drv_priv
;
856 wl_sta
->total_freed_pkts
= wl
->links
[hlid
].total_freed_pkts
;
859 * increment the initial seq number on recovery to account for
860 * transmitted packets that we haven't yet got in the FW status
862 if (wlvif
->encryption_type
== KEY_GEM
)
863 sqn_recovery_padding
= WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM
;
865 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
866 wl_sta
->total_freed_pkts
+= sqn_recovery_padding
;
869 static void wlcore_save_freed_pkts_addr(struct wl1271
*wl
,
870 struct wl12xx_vif
*wlvif
,
871 u8 hlid
, const u8
*addr
)
873 struct ieee80211_sta
*sta
;
874 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
876 if (WARN_ON(hlid
== WL12XX_INVALID_LINK_ID
||
877 is_zero_ether_addr(addr
)))
881 sta
= ieee80211_find_sta(vif
, addr
);
883 wlcore_save_freed_pkts(wl
, wlvif
, hlid
, sta
);
887 static void wlcore_print_recovery(struct wl1271
*wl
)
893 wl1271_info("Hardware recovery in progress. FW ver: %s",
894 wl
->chip
.fw_ver_str
);
896 /* change partitions momentarily so we can read the FW pc */
897 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
901 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
905 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
909 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
910 pc
, hint_sts
, ++wl
->recovery_count
);
912 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
916 static void wl1271_recovery_work(struct work_struct
*work
)
919 container_of(work
, struct wl1271
, recovery_work
);
920 struct wl12xx_vif
*wlvif
;
921 struct ieee80211_vif
*vif
;
923 mutex_lock(&wl
->mutex
);
925 if (wl
->state
== WLCORE_STATE_OFF
|| wl
->plt
)
928 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
929 if (wl
->conf
.fwlog
.output
== WL12XX_FWLOG_OUTPUT_HOST
)
930 wl12xx_read_fwlog_panic(wl
);
931 wlcore_print_recovery(wl
);
934 BUG_ON(wl
->conf
.recovery
.bug_on_recovery
&&
935 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
937 if (wl
->conf
.recovery
.no_recovery
) {
938 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
942 /* Prevent spurious TX during FW restart */
943 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
945 /* reboot the chipset */
946 while (!list_empty(&wl
->wlvif_list
)) {
947 wlvif
= list_first_entry(&wl
->wlvif_list
,
948 struct wl12xx_vif
, list
);
949 vif
= wl12xx_wlvif_to_vif(wlvif
);
951 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
952 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
953 wlcore_save_freed_pkts_addr(wl
, wlvif
, wlvif
->sta
.hlid
,
954 vif
->bss_conf
.bssid
);
957 __wl1271_op_remove_interface(wl
, vif
, false);
960 wlcore_op_stop_locked(wl
);
962 ieee80211_restart_hw(wl
->hw
);
965 * Its safe to enable TX now - the queues are stopped after a request
968 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
971 wl
->watchdog_recovery
= false;
972 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
973 mutex_unlock(&wl
->mutex
);
976 static int wlcore_fw_wakeup(struct wl1271
*wl
)
978 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
981 static int wlcore_fw_sleep(struct wl1271
*wl
)
985 mutex_lock(&wl
->mutex
);
986 ret
= wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_SLEEP
);
988 wl12xx_queue_recovery_work(wl
);
991 set_bit(WL1271_FLAG_IN_ELP
, &wl
->flags
);
993 mutex_unlock(&wl
->mutex
);
994 mdelay(WL1271_SUSPEND_SLEEP
);
999 static int wl1271_setup(struct wl1271
*wl
)
1001 wl
->raw_fw_status
= kzalloc(wl
->fw_status_len
, GFP_KERNEL
);
1002 if (!wl
->raw_fw_status
)
1005 wl
->fw_status
= kzalloc(sizeof(*wl
->fw_status
), GFP_KERNEL
);
1009 wl
->tx_res_if
= kzalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
1015 kfree(wl
->fw_status
);
1016 kfree(wl
->raw_fw_status
);
1020 static int wl12xx_set_power_on(struct wl1271
*wl
)
1024 msleep(WL1271_PRE_POWER_ON_SLEEP
);
1025 ret
= wl1271_power_on(wl
);
1028 msleep(WL1271_POWER_ON_SLEEP
);
1029 wl1271_io_reset(wl
);
1032 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1036 /* ELP module wake up */
1037 ret
= wlcore_fw_wakeup(wl
);
1045 wl1271_power_off(wl
);
1049 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1053 ret
= wl12xx_set_power_on(wl
);
1058 * For wl127x based devices we could use the default block
1059 * size (512 bytes), but due to a bug in the sdio driver, we
1060 * need to set it explicitly after the chip is powered on. To
1061 * simplify the code and since the performance impact is
1062 * negligible, we use the same block size for all different
1065 * Check if the bus supports blocksize alignment and, if it
1066 * doesn't, make sure we don't have the quirk.
1068 if (!wl1271_set_block_size(wl
))
1069 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1071 /* TODO: make sure the lower driver has set things up correctly */
1073 ret
= wl1271_setup(wl
);
1077 ret
= wl12xx_fetch_firmware(wl
, plt
);
1085 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1087 int retries
= WL1271_BOOT_RETRIES
;
1088 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1090 static const char* const PLT_MODE
[] = {
1099 mutex_lock(&wl
->mutex
);
1101 wl1271_notice("power up");
1103 if (wl
->state
!= WLCORE_STATE_OFF
) {
1104 wl1271_error("cannot go into PLT state because not "
1105 "in off state: %d", wl
->state
);
1110 /* Indicate to lower levels that we are now in PLT mode */
1112 wl
->plt_mode
= plt_mode
;
1116 ret
= wl12xx_chip_wakeup(wl
, true);
1120 if (plt_mode
!= PLT_CHIP_AWAKE
) {
1121 ret
= wl
->ops
->plt_init(wl
);
1126 wl
->state
= WLCORE_STATE_ON
;
1127 wl1271_notice("firmware booted in PLT mode %s (%s)",
1129 wl
->chip
.fw_ver_str
);
1131 /* update hw/fw version info in wiphy struct */
1132 wiphy
->hw_version
= wl
->chip
.id
;
1133 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1134 sizeof(wiphy
->fw_version
));
1139 wl1271_power_off(wl
);
1143 wl
->plt_mode
= PLT_OFF
;
1145 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1146 WL1271_BOOT_RETRIES
);
1148 mutex_unlock(&wl
->mutex
);
1153 int wl1271_plt_stop(struct wl1271
*wl
)
1157 wl1271_notice("power down");
1160 * Interrupts must be disabled before setting the state to OFF.
1161 * Otherwise, the interrupt handler might be called and exit without
1162 * reading the interrupt status.
1164 wlcore_disable_interrupts(wl
);
1165 mutex_lock(&wl
->mutex
);
1167 mutex_unlock(&wl
->mutex
);
1170 * This will not necessarily enable interrupts as interrupts
1171 * may have been disabled when op_stop was called. It will,
1172 * however, balance the above call to disable_interrupts().
1174 wlcore_enable_interrupts(wl
);
1176 wl1271_error("cannot power down because not in PLT "
1177 "state: %d", wl
->state
);
1182 mutex_unlock(&wl
->mutex
);
1184 wl1271_flush_deferred_work(wl
);
1185 cancel_work_sync(&wl
->netstack_work
);
1186 cancel_work_sync(&wl
->recovery_work
);
1187 cancel_delayed_work_sync(&wl
->elp_work
);
1188 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1190 mutex_lock(&wl
->mutex
);
1191 wl1271_power_off(wl
);
1193 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1194 wl
->state
= WLCORE_STATE_OFF
;
1196 wl
->plt_mode
= PLT_OFF
;
1198 mutex_unlock(&wl
->mutex
);
1204 static void wl1271_op_tx(struct ieee80211_hw
*hw
,
1205 struct ieee80211_tx_control
*control
,
1206 struct sk_buff
*skb
)
1208 struct wl1271
*wl
= hw
->priv
;
1209 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1210 struct ieee80211_vif
*vif
= info
->control
.vif
;
1211 struct wl12xx_vif
*wlvif
= NULL
;
1212 unsigned long flags
;
1217 wl1271_debug(DEBUG_TX
, "DROP skb with no vif");
1218 ieee80211_free_txskb(hw
, skb
);
1222 wlvif
= wl12xx_vif_to_data(vif
);
1223 mapping
= skb_get_queue_mapping(skb
);
1224 q
= wl1271_tx_get_queue(mapping
);
1226 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
, control
->sta
);
1228 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1231 * drop the packet if the link is invalid or the queue is stopped
1232 * for any reason but watermark. Watermark is a "soft"-stop so we
1233 * allow these packets through.
1235 if (hlid
== WL12XX_INVALID_LINK_ID
||
1236 (!test_bit(hlid
, wlvif
->links_map
)) ||
1237 (wlcore_is_queue_stopped_locked(wl
, wlvif
, q
) &&
1238 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1239 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1240 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1241 ieee80211_free_txskb(hw
, skb
);
1245 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1247 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1249 wl
->tx_queue_count
[q
]++;
1250 wlvif
->tx_queue_count
[q
]++;
1253 * The workqueue is slow to process the tx_queue and we need stop
1254 * the queue here, otherwise the queue will get too long.
1256 if (wlvif
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1257 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1258 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1259 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1260 wlcore_stop_queue_locked(wl
, wlvif
, q
,
1261 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1265 * The chip specific setup must run before the first TX packet -
1266 * before that, the tx_work will not be initialized!
1269 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1270 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1271 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1274 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1277 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1279 unsigned long flags
;
1282 /* no need to queue a new dummy packet if one is already pending */
1283 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1286 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1288 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1289 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1290 wl
->tx_queue_count
[q
]++;
1291 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1293 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1294 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1295 return wlcore_tx_work_locked(wl
);
1298 * If the FW TX is busy, TX work will be scheduled by the threaded
1299 * interrupt handler function
1305 * The size of the dummy packet should be at least 1400 bytes. However, in
1306 * order to minimize the number of bus transactions, aligning it to 512 bytes
1307 * boundaries could be beneficial, performance wise
1309 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1311 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1313 struct sk_buff
*skb
;
1314 struct ieee80211_hdr_3addr
*hdr
;
1315 unsigned int dummy_packet_size
;
1317 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1318 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1320 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1322 wl1271_warning("Failed to allocate a dummy packet skb");
1326 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1328 hdr
= skb_put_zero(skb
, sizeof(*hdr
));
1329 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1330 IEEE80211_STYPE_NULLFUNC
|
1331 IEEE80211_FCTL_TODS
);
1333 skb_put_zero(skb
, dummy_packet_size
);
1335 /* Dummy packets require the TID to be management */
1336 skb
->priority
= WL1271_TID_MGMT
;
1338 /* Initialize all fields that might be used */
1339 skb_set_queue_mapping(skb
, 0);
1340 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1347 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern
*p
)
1349 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1350 int i
, pattern_len
= 0;
1353 wl1271_warning("No mask in WoWLAN pattern");
1358 * The pattern is broken up into segments of bytes at different offsets
1359 * that need to be checked by the FW filter. Each segment is called
1360 * a field in the FW API. We verify that the total number of fields
1361 * required for this pattern won't exceed FW limits (8)
1362 * as well as the total fields buffer won't exceed the FW limit.
1363 * Note that if there's a pattern which crosses Ethernet/IP header
1364 * boundary a new field is required.
1366 for (i
= 0; i
< p
->pattern_len
; i
++) {
1367 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1372 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1374 fields_size
+= pattern_len
+
1375 RX_FILTER_FIELD_OVERHEAD
;
1383 fields_size
+= pattern_len
+
1384 RX_FILTER_FIELD_OVERHEAD
;
1391 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1395 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1396 wl1271_warning("RX Filter too complex. Too many segments");
1400 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1401 wl1271_warning("RX filter pattern is too big");
1408 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1410 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1413 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1420 for (i
= 0; i
< filter
->num_fields
; i
++)
1421 kfree(filter
->fields
[i
].pattern
);
1426 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1427 u16 offset
, u8 flags
,
1428 const u8
*pattern
, u8 len
)
1430 struct wl12xx_rx_filter_field
*field
;
1432 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1433 wl1271_warning("Max fields per RX filter. can't alloc another");
1437 field
= &filter
->fields
[filter
->num_fields
];
1439 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1440 if (!field
->pattern
) {
1441 wl1271_warning("Failed to allocate RX filter pattern");
1445 filter
->num_fields
++;
1447 field
->offset
= cpu_to_le16(offset
);
1448 field
->flags
= flags
;
1450 memcpy(field
->pattern
, pattern
, len
);
1455 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1457 int i
, fields_size
= 0;
1459 for (i
= 0; i
< filter
->num_fields
; i
++)
1460 fields_size
+= filter
->fields
[i
].len
+
1461 sizeof(struct wl12xx_rx_filter_field
) -
1467 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1471 struct wl12xx_rx_filter_field
*field
;
1473 for (i
= 0; i
< filter
->num_fields
; i
++) {
1474 field
= (struct wl12xx_rx_filter_field
*)buf
;
1476 field
->offset
= filter
->fields
[i
].offset
;
1477 field
->flags
= filter
->fields
[i
].flags
;
1478 field
->len
= filter
->fields
[i
].len
;
1480 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1481 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1482 sizeof(u8
*) + field
->len
;
1487 * Allocates an RX filter returned through f
1488 * which needs to be freed using rx_filter_free()
1491 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern
*p
,
1492 struct wl12xx_rx_filter
**f
)
1495 struct wl12xx_rx_filter
*filter
;
1499 filter
= wl1271_rx_filter_alloc();
1501 wl1271_warning("Failed to alloc rx filter");
1507 while (i
< p
->pattern_len
) {
1508 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1513 for (j
= i
; j
< p
->pattern_len
; j
++) {
1514 if (!test_bit(j
, (unsigned long *)p
->mask
))
1517 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1518 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1522 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1524 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1526 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1527 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1532 ret
= wl1271_rx_filter_alloc_field(filter
,
1535 &p
->pattern
[i
], len
);
1542 filter
->action
= FILTER_SIGNAL
;
1548 wl1271_rx_filter_free(filter
);
1554 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1555 struct cfg80211_wowlan
*wow
)
1559 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1560 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1565 ret
= wl1271_rx_filter_clear_all(wl
);
1572 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1575 /* Validate all incoming patterns before clearing current FW state */
1576 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1577 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1579 wl1271_warning("Bad wowlan pattern %d", i
);
1584 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1588 ret
= wl1271_rx_filter_clear_all(wl
);
1592 /* Translate WoWLAN patterns into filters */
1593 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1594 struct cfg80211_pkt_pattern
*p
;
1595 struct wl12xx_rx_filter
*filter
= NULL
;
1597 p
= &wow
->patterns
[i
];
1599 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1601 wl1271_warning("Failed to create an RX filter from "
1602 "wowlan pattern %d", i
);
1606 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1608 wl1271_rx_filter_free(filter
);
1613 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1619 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1620 struct wl12xx_vif
*wlvif
,
1621 struct cfg80211_wowlan
*wow
)
1625 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1628 ret
= wl1271_configure_wowlan(wl
, wow
);
1632 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1633 wl
->conf
.conn
.wake_up_event
) &&
1634 (wl
->conf
.conn
.suspend_listen_interval
==
1635 wl
->conf
.conn
.listen_interval
))
1638 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1639 wl
->conf
.conn
.suspend_wake_up_event
,
1640 wl
->conf
.conn
.suspend_listen_interval
);
1643 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1649 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1650 struct wl12xx_vif
*wlvif
,
1651 struct cfg80211_wowlan
*wow
)
1655 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1658 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1662 ret
= wl1271_configure_wowlan(wl
, wow
);
1671 static int wl1271_configure_suspend(struct wl1271
*wl
,
1672 struct wl12xx_vif
*wlvif
,
1673 struct cfg80211_wowlan
*wow
)
1675 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1676 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1677 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1678 return wl1271_configure_suspend_ap(wl
, wlvif
, wow
);
1682 static void wl1271_configure_resume(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1685 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1686 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1688 if ((!is_ap
) && (!is_sta
))
1691 if ((is_sta
&& !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) ||
1692 (is_ap
&& !test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)))
1695 wl1271_configure_wowlan(wl
, NULL
);
1698 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1699 wl
->conf
.conn
.wake_up_event
) &&
1700 (wl
->conf
.conn
.suspend_listen_interval
==
1701 wl
->conf
.conn
.listen_interval
))
1704 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1705 wl
->conf
.conn
.wake_up_event
,
1706 wl
->conf
.conn
.listen_interval
);
1709 wl1271_error("resume: wake up conditions failed: %d",
1713 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1717 static int __maybe_unused
wl1271_op_suspend(struct ieee80211_hw
*hw
,
1718 struct cfg80211_wowlan
*wow
)
1720 struct wl1271
*wl
= hw
->priv
;
1721 struct wl12xx_vif
*wlvif
;
1724 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1727 /* we want to perform the recovery before suspending */
1728 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1729 wl1271_warning("postponing suspend to perform recovery");
1733 wl1271_tx_flush(wl
);
1735 mutex_lock(&wl
->mutex
);
1737 ret
= wl1271_ps_elp_wakeup(wl
);
1739 mutex_unlock(&wl
->mutex
);
1743 wl
->wow_enabled
= true;
1744 wl12xx_for_each_wlvif(wl
, wlvif
) {
1745 if (wlcore_is_p2p_mgmt(wlvif
))
1748 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1750 mutex_unlock(&wl
->mutex
);
1751 wl1271_warning("couldn't prepare device to suspend");
1756 /* disable fast link flow control notifications from FW */
1757 ret
= wlcore_hw_interrupt_notify(wl
, false);
1761 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1762 ret
= wlcore_hw_rx_ba_filter(wl
,
1763 !!wl
->conf
.conn
.suspend_rx_ba_activity
);
1768 mutex_unlock(&wl
->mutex
);
1771 wl1271_warning("couldn't prepare device to suspend");
1775 /* flush any remaining work */
1776 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1779 * disable and re-enable interrupts in order to flush
1782 wlcore_disable_interrupts(wl
);
1785 * set suspended flag to avoid triggering a new threaded_irq
1786 * work. no need for spinlock as interrupts are disabled.
1788 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1790 wlcore_enable_interrupts(wl
);
1791 flush_work(&wl
->tx_work
);
1792 flush_delayed_work(&wl
->elp_work
);
1795 * Cancel the watchdog even if above tx_flush failed. We will detect
1796 * it on resume anyway.
1798 cancel_delayed_work(&wl
->tx_watchdog_work
);
1801 * Use an immediate call for allowing the firmware to go into power
1802 * save during suspend.
1803 * Using a workque for this last write was only hapenning on resume
1804 * leaving the firmware with power save disabled during suspend,
1805 * while consuming full power during wowlan suspend.
1807 wlcore_fw_sleep(wl
);
1812 static int __maybe_unused
wl1271_op_resume(struct ieee80211_hw
*hw
)
1814 struct wl1271
*wl
= hw
->priv
;
1815 struct wl12xx_vif
*wlvif
;
1816 unsigned long flags
;
1817 bool run_irq_work
= false, pending_recovery
;
1820 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1822 WARN_ON(!wl
->wow_enabled
);
1825 * re-enable irq_work enqueuing, and call irq_work directly if
1826 * there is a pending work.
1828 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1829 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1830 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1831 run_irq_work
= true;
1832 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1834 mutex_lock(&wl
->mutex
);
1836 /* test the recovery flag before calling any SDIO functions */
1837 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1841 wl1271_debug(DEBUG_MAC80211
,
1842 "run postponed irq_work directly");
1844 /* don't talk to the HW if recovery is pending */
1845 if (!pending_recovery
) {
1846 ret
= wlcore_irq_locked(wl
);
1848 wl12xx_queue_recovery_work(wl
);
1851 wlcore_enable_interrupts(wl
);
1854 if (pending_recovery
) {
1855 wl1271_warning("queuing forgotten recovery on resume");
1856 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1860 ret
= wl1271_ps_elp_wakeup(wl
);
1864 wl12xx_for_each_wlvif(wl
, wlvif
) {
1865 if (wlcore_is_p2p_mgmt(wlvif
))
1868 wl1271_configure_resume(wl
, wlvif
);
1871 ret
= wlcore_hw_interrupt_notify(wl
, true);
1875 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1876 ret
= wlcore_hw_rx_ba_filter(wl
, false);
1881 wl1271_ps_elp_sleep(wl
);
1884 wl
->wow_enabled
= false;
1887 * Set a flag to re-init the watchdog on the first Tx after resume.
1888 * That way we avoid possible conditions where Tx-complete interrupts
1889 * fail to arrive and we perform a spurious recovery.
1891 set_bit(WL1271_FLAG_REINIT_TX_WDOG
, &wl
->flags
);
1892 mutex_unlock(&wl
->mutex
);
1897 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1899 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1902 * We have to delay the booting of the hardware because
1903 * we need to know the local MAC address before downloading and
1904 * initializing the firmware. The MAC address cannot be changed
1905 * after boot, and without the proper MAC address, the firmware
1906 * will not function properly.
1908 * The MAC address is first known when the corresponding interface
1909 * is added. That is where we will initialize the hardware.
1915 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1919 if (wl
->state
== WLCORE_STATE_OFF
) {
1920 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1922 wlcore_enable_interrupts(wl
);
1928 * this must be before the cancel_work calls below, so that the work
1929 * functions don't perform further work.
1931 wl
->state
= WLCORE_STATE_OFF
;
1934 * Use the nosync variant to disable interrupts, so the mutex could be
1935 * held while doing so without deadlocking.
1937 wlcore_disable_interrupts_nosync(wl
);
1939 mutex_unlock(&wl
->mutex
);
1941 wlcore_synchronize_interrupts(wl
);
1942 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1943 cancel_work_sync(&wl
->recovery_work
);
1944 wl1271_flush_deferred_work(wl
);
1945 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1946 cancel_work_sync(&wl
->netstack_work
);
1947 cancel_work_sync(&wl
->tx_work
);
1948 cancel_delayed_work_sync(&wl
->elp_work
);
1949 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1951 /* let's notify MAC80211 about the remaining pending TX frames */
1952 mutex_lock(&wl
->mutex
);
1953 wl12xx_tx_reset(wl
);
1955 wl1271_power_off(wl
);
1957 * In case a recovery was scheduled, interrupts were disabled to avoid
1958 * an interrupt storm. Now that the power is down, it is safe to
1959 * re-enable interrupts to balance the disable depth
1961 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1962 wlcore_enable_interrupts(wl
);
1964 wl
->band
= NL80211_BAND_2GHZ
;
1967 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1968 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1969 wl
->tx_blocks_available
= 0;
1970 wl
->tx_allocated_blocks
= 0;
1971 wl
->tx_results_count
= 0;
1972 wl
->tx_packets_count
= 0;
1973 wl
->time_offset
= 0;
1974 wl
->ap_fw_ps_map
= 0;
1976 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1977 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1978 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1979 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1980 memset(wl
->session_ids
, 0, sizeof(wl
->session_ids
));
1981 memset(wl
->rx_filter_enabled
, 0, sizeof(wl
->rx_filter_enabled
));
1982 wl
->active_sta_count
= 0;
1983 wl
->active_link_count
= 0;
1985 /* The system link is always allocated */
1986 wl
->links
[WL12XX_SYSTEM_HLID
].allocated_pkts
= 0;
1987 wl
->links
[WL12XX_SYSTEM_HLID
].prev_freed_pkts
= 0;
1988 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1991 * this is performed after the cancel_work calls and the associated
1992 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1993 * get executed before all these vars have been reset.
1997 wl
->tx_blocks_freed
= 0;
1999 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2000 wl
->tx_pkts_freed
[i
] = 0;
2001 wl
->tx_allocated_pkts
[i
] = 0;
2004 wl1271_debugfs_reset(wl
);
2006 kfree(wl
->raw_fw_status
);
2007 wl
->raw_fw_status
= NULL
;
2008 kfree(wl
->fw_status
);
2009 wl
->fw_status
= NULL
;
2010 kfree(wl
->tx_res_if
);
2011 wl
->tx_res_if
= NULL
;
2012 kfree(wl
->target_mem_map
);
2013 wl
->target_mem_map
= NULL
;
2016 * FW channels must be re-calibrated after recovery,
2017 * save current Reg-Domain channel configuration and clear it.
2019 memcpy(wl
->reg_ch_conf_pending
, wl
->reg_ch_conf_last
,
2020 sizeof(wl
->reg_ch_conf_pending
));
2021 memset(wl
->reg_ch_conf_last
, 0, sizeof(wl
->reg_ch_conf_last
));
2024 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
2026 struct wl1271
*wl
= hw
->priv
;
2028 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
2030 mutex_lock(&wl
->mutex
);
2032 wlcore_op_stop_locked(wl
);
2034 mutex_unlock(&wl
->mutex
);
2037 static void wlcore_channel_switch_work(struct work_struct
*work
)
2039 struct delayed_work
*dwork
;
2041 struct ieee80211_vif
*vif
;
2042 struct wl12xx_vif
*wlvif
;
2045 dwork
= to_delayed_work(work
);
2046 wlvif
= container_of(dwork
, struct wl12xx_vif
, channel_switch_work
);
2049 wl1271_info("channel switch failed (role_id: %d).", wlvif
->role_id
);
2051 mutex_lock(&wl
->mutex
);
2053 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2056 /* check the channel switch is still ongoing */
2057 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
))
2060 vif
= wl12xx_wlvif_to_vif(wlvif
);
2061 ieee80211_chswitch_done(vif
, false);
2063 ret
= wl1271_ps_elp_wakeup(wl
);
2067 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2069 wl1271_ps_elp_sleep(wl
);
2071 mutex_unlock(&wl
->mutex
);
2074 static void wlcore_connection_loss_work(struct work_struct
*work
)
2076 struct delayed_work
*dwork
;
2078 struct ieee80211_vif
*vif
;
2079 struct wl12xx_vif
*wlvif
;
2081 dwork
= to_delayed_work(work
);
2082 wlvif
= container_of(dwork
, struct wl12xx_vif
, connection_loss_work
);
2085 wl1271_info("Connection loss work (role_id: %d).", wlvif
->role_id
);
2087 mutex_lock(&wl
->mutex
);
2089 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2092 /* Call mac80211 connection loss */
2093 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2096 vif
= wl12xx_wlvif_to_vif(wlvif
);
2097 ieee80211_connection_loss(vif
);
2099 mutex_unlock(&wl
->mutex
);
2102 static void wlcore_pending_auth_complete_work(struct work_struct
*work
)
2104 struct delayed_work
*dwork
;
2106 struct wl12xx_vif
*wlvif
;
2107 unsigned long time_spare
;
2110 dwork
= to_delayed_work(work
);
2111 wlvif
= container_of(dwork
, struct wl12xx_vif
,
2112 pending_auth_complete_work
);
2115 mutex_lock(&wl
->mutex
);
2117 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2121 * Make sure a second really passed since the last auth reply. Maybe
2122 * a second auth reply arrived while we were stuck on the mutex.
2123 * Check for a little less than the timeout to protect from scheduler
2126 time_spare
= jiffies
+
2127 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT
- 50);
2128 if (!time_after(time_spare
, wlvif
->pending_auth_reply_time
))
2131 ret
= wl1271_ps_elp_wakeup(wl
);
2135 /* cancel the ROC if active */
2136 wlcore_update_inconn_sta(wl
, wlvif
, NULL
, false);
2138 wl1271_ps_elp_sleep(wl
);
2140 mutex_unlock(&wl
->mutex
);
2143 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
2145 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
2146 WL12XX_MAX_RATE_POLICIES
);
2147 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
2150 __set_bit(policy
, wl
->rate_policies_map
);
2155 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
2157 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
2160 __clear_bit(*idx
, wl
->rate_policies_map
);
2161 *idx
= WL12XX_MAX_RATE_POLICIES
;
2164 static int wlcore_allocate_klv_template(struct wl1271
*wl
, u8
*idx
)
2166 u8 policy
= find_first_zero_bit(wl
->klv_templates_map
,
2167 WLCORE_MAX_KLV_TEMPLATES
);
2168 if (policy
>= WLCORE_MAX_KLV_TEMPLATES
)
2171 __set_bit(policy
, wl
->klv_templates_map
);
2176 static void wlcore_free_klv_template(struct wl1271
*wl
, u8
*idx
)
2178 if (WARN_ON(*idx
>= WLCORE_MAX_KLV_TEMPLATES
))
2181 __clear_bit(*idx
, wl
->klv_templates_map
);
2182 *idx
= WLCORE_MAX_KLV_TEMPLATES
;
2185 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2187 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2189 switch (wlvif
->bss_type
) {
2190 case BSS_TYPE_AP_BSS
:
2192 return WL1271_ROLE_P2P_GO
;
2193 else if (ieee80211_vif_is_mesh(vif
))
2194 return WL1271_ROLE_MESH_POINT
;
2196 return WL1271_ROLE_AP
;
2198 case BSS_TYPE_STA_BSS
:
2200 return WL1271_ROLE_P2P_CL
;
2202 return WL1271_ROLE_STA
;
2205 return WL1271_ROLE_IBSS
;
2208 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
2210 return WL12XX_INVALID_ROLE_TYPE
;
2213 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
2215 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2218 /* clear everything but the persistent data */
2219 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
2221 switch (ieee80211_vif_type_p2p(vif
)) {
2222 case NL80211_IFTYPE_P2P_CLIENT
:
2225 case NL80211_IFTYPE_STATION
:
2226 case NL80211_IFTYPE_P2P_DEVICE
:
2227 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
2229 case NL80211_IFTYPE_ADHOC
:
2230 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2232 case NL80211_IFTYPE_P2P_GO
:
2235 case NL80211_IFTYPE_AP
:
2236 case NL80211_IFTYPE_MESH_POINT
:
2237 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2240 wlvif
->bss_type
= MAX_BSS_TYPE
;
2244 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2245 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2246 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2248 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2249 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2250 /* init sta/ibss data */
2251 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2252 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2253 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2254 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2255 wlcore_allocate_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2256 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2257 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2258 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2261 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2262 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2263 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2264 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2265 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2266 wl12xx_allocate_rate_policy(wl
,
2267 &wlvif
->ap
.ucast_rate_idx
[i
]);
2268 wlvif
->basic_rate_set
= CONF_TX_ENABLED_RATES
;
2270 * TODO: check if basic_rate shouldn't be
2271 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2272 * instead (the same thing for STA above).
2274 wlvif
->basic_rate
= CONF_TX_ENABLED_RATES
;
2275 /* TODO: this seems to be used only for STA, check it */
2276 wlvif
->rate_set
= CONF_TX_ENABLED_RATES
;
2279 wlvif
->bitrate_masks
[NL80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2280 wlvif
->bitrate_masks
[NL80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2281 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2284 * mac80211 configures some values globally, while we treat them
2285 * per-interface. thus, on init, we have to copy them from wl
2287 wlvif
->band
= wl
->band
;
2288 wlvif
->channel
= wl
->channel
;
2289 wlvif
->power_level
= wl
->power_level
;
2290 wlvif
->channel_type
= wl
->channel_type
;
2292 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2293 wl1271_rx_streaming_enable_work
);
2294 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2295 wl1271_rx_streaming_disable_work
);
2296 INIT_WORK(&wlvif
->rc_update_work
, wlcore_rc_update_work
);
2297 INIT_DELAYED_WORK(&wlvif
->channel_switch_work
,
2298 wlcore_channel_switch_work
);
2299 INIT_DELAYED_WORK(&wlvif
->connection_loss_work
,
2300 wlcore_connection_loss_work
);
2301 INIT_DELAYED_WORK(&wlvif
->pending_auth_complete_work
,
2302 wlcore_pending_auth_complete_work
);
2303 INIT_LIST_HEAD(&wlvif
->list
);
2305 timer_setup(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
, 0);
2309 static int wl12xx_init_fw(struct wl1271
*wl
)
2311 int retries
= WL1271_BOOT_RETRIES
;
2312 bool booted
= false;
2313 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2318 ret
= wl12xx_chip_wakeup(wl
, false);
2322 ret
= wl
->ops
->boot(wl
);
2326 ret
= wl1271_hw_init(wl
);
2334 mutex_unlock(&wl
->mutex
);
2335 /* Unlocking the mutex in the middle of handling is
2336 inherently unsafe. In this case we deem it safe to do,
2337 because we need to let any possibly pending IRQ out of
2338 the system (and while we are WLCORE_STATE_OFF the IRQ
2339 work function will not do anything.) Also, any other
2340 possible concurrent operations will fail due to the
2341 current state, hence the wl1271 struct should be safe. */
2342 wlcore_disable_interrupts(wl
);
2343 wl1271_flush_deferred_work(wl
);
2344 cancel_work_sync(&wl
->netstack_work
);
2345 mutex_lock(&wl
->mutex
);
2347 wl1271_power_off(wl
);
2351 wl1271_error("firmware boot failed despite %d retries",
2352 WL1271_BOOT_RETRIES
);
2356 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2358 /* update hw/fw version info in wiphy struct */
2359 wiphy
->hw_version
= wl
->chip
.id
;
2360 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2361 sizeof(wiphy
->fw_version
));
2364 * Now we know if 11a is supported (info from the NVS), so disable
2365 * 11a channels if not supported
2367 if (!wl
->enable_11a
)
2368 wiphy
->bands
[NL80211_BAND_5GHZ
]->n_channels
= 0;
2370 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2371 wl
->enable_11a
? "" : "not ");
2373 wl
->state
= WLCORE_STATE_ON
;
2378 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2380 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2384 * Check whether a fw switch (i.e. moving from one loaded
2385 * fw to another) is needed. This function is also responsible
2386 * for updating wl->last_vif_count, so it must be called before
2387 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2390 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2391 struct vif_counter_data vif_counter_data
,
2394 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2395 u8 vif_count
= vif_counter_data
.counter
;
2397 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2400 /* increase the vif count if this is a new vif */
2401 if (add
&& !vif_counter_data
.cur_vif_running
)
2404 wl
->last_vif_count
= vif_count
;
2406 /* no need for fw change if the device is OFF */
2407 if (wl
->state
== WLCORE_STATE_OFF
)
2410 /* no need for fw change if a single fw is used */
2411 if (!wl
->mr_fw_name
)
2414 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2416 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2423 * Enter "forced psm". Make sure the sta is in psm against the ap,
2424 * to make the fw switch a bit more disconnection-persistent.
2426 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2428 struct wl12xx_vif
*wlvif
;
2430 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2431 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2435 struct wlcore_hw_queue_iter_data
{
2436 unsigned long hw_queue_map
[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES
)];
2438 struct ieee80211_vif
*vif
;
2439 /* is the current vif among those iterated */
2443 static void wlcore_hw_queue_iter(void *data
, u8
*mac
,
2444 struct ieee80211_vif
*vif
)
2446 struct wlcore_hw_queue_iter_data
*iter_data
= data
;
2448 if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
||
2449 WARN_ON_ONCE(vif
->hw_queue
[0] == IEEE80211_INVAL_HW_QUEUE
))
2452 if (iter_data
->cur_running
|| vif
== iter_data
->vif
) {
2453 iter_data
->cur_running
= true;
2457 __set_bit(vif
->hw_queue
[0] / NUM_TX_QUEUES
, iter_data
->hw_queue_map
);
2460 static int wlcore_allocate_hw_queue_base(struct wl1271
*wl
,
2461 struct wl12xx_vif
*wlvif
)
2463 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2464 struct wlcore_hw_queue_iter_data iter_data
= {};
2467 if (vif
->type
== NL80211_IFTYPE_P2P_DEVICE
) {
2468 vif
->cab_queue
= IEEE80211_INVAL_HW_QUEUE
;
2472 iter_data
.vif
= vif
;
2474 /* mark all bits taken by active interfaces */
2475 ieee80211_iterate_active_interfaces_atomic(wl
->hw
,
2476 IEEE80211_IFACE_ITER_RESUME_ALL
,
2477 wlcore_hw_queue_iter
, &iter_data
);
2479 /* the current vif is already running in mac80211 (resume/recovery) */
2480 if (iter_data
.cur_running
) {
2481 wlvif
->hw_queue_base
= vif
->hw_queue
[0];
2482 wl1271_debug(DEBUG_MAC80211
,
2483 "using pre-allocated hw queue base %d",
2484 wlvif
->hw_queue_base
);
2486 /* interface type might have changed type */
2487 goto adjust_cab_queue
;
2490 q_base
= find_first_zero_bit(iter_data
.hw_queue_map
,
2491 WLCORE_NUM_MAC_ADDRESSES
);
2492 if (q_base
>= WLCORE_NUM_MAC_ADDRESSES
)
2495 wlvif
->hw_queue_base
= q_base
* NUM_TX_QUEUES
;
2496 wl1271_debug(DEBUG_MAC80211
, "allocating hw queue base: %d",
2497 wlvif
->hw_queue_base
);
2499 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2500 wl
->queue_stop_reasons
[wlvif
->hw_queue_base
+ i
] = 0;
2501 /* register hw queues in mac80211 */
2502 vif
->hw_queue
[i
] = wlvif
->hw_queue_base
+ i
;
2506 /* the last places are reserved for cab queues per interface */
2507 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2508 vif
->cab_queue
= NUM_TX_QUEUES
* WLCORE_NUM_MAC_ADDRESSES
+
2509 wlvif
->hw_queue_base
/ NUM_TX_QUEUES
;
2511 vif
->cab_queue
= IEEE80211_INVAL_HW_QUEUE
;
2516 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2517 struct ieee80211_vif
*vif
)
2519 struct wl1271
*wl
= hw
->priv
;
2520 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2521 struct vif_counter_data vif_count
;
2526 wl1271_error("Adding Interface not allowed while in PLT mode");
2530 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2531 IEEE80211_VIF_SUPPORTS_UAPSD
|
2532 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2534 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2535 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2537 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2539 mutex_lock(&wl
->mutex
);
2540 ret
= wl1271_ps_elp_wakeup(wl
);
2545 * in some very corner case HW recovery scenarios its possible to
2546 * get here before __wl1271_op_remove_interface is complete, so
2547 * opt out if that is the case.
2549 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2550 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2556 ret
= wl12xx_init_vif_data(wl
, vif
);
2561 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2562 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2567 ret
= wlcore_allocate_hw_queue_base(wl
, wlvif
);
2571 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2572 wl12xx_force_active_psm(wl
);
2573 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2574 mutex_unlock(&wl
->mutex
);
2575 wl1271_recovery_work(&wl
->recovery_work
);
2580 * TODO: after the nvs issue will be solved, move this block
2581 * to start(), and make sure here the driver is ON.
2583 if (wl
->state
== WLCORE_STATE_OFF
) {
2585 * we still need this in order to configure the fw
2586 * while uploading the nvs
2588 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2590 ret
= wl12xx_init_fw(wl
);
2595 if (!wlcore_is_p2p_mgmt(wlvif
)) {
2596 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2597 role_type
, &wlvif
->role_id
);
2601 ret
= wl1271_init_vif_specific(wl
, vif
);
2606 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
, WL1271_ROLE_DEVICE
,
2607 &wlvif
->dev_role_id
);
2611 /* needed mainly for configuring rate policies */
2612 ret
= wl1271_sta_hw_init(wl
, wlvif
);
2617 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2618 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2620 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2625 wl1271_ps_elp_sleep(wl
);
2627 mutex_unlock(&wl
->mutex
);
2632 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2633 struct ieee80211_vif
*vif
,
2634 bool reset_tx_queues
)
2636 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2638 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2640 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2642 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2645 /* because of hardware recovery, we may get here twice */
2646 if (wl
->state
== WLCORE_STATE_OFF
)
2649 wl1271_info("down");
2651 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2652 wl
->scan_wlvif
== wlvif
) {
2653 struct cfg80211_scan_info info
= {
2658 * Rearm the tx watchdog just before idling scan. This
2659 * prevents just-finished scans from triggering the watchdog
2661 wl12xx_rearm_tx_watchdog_locked(wl
);
2663 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2664 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2665 wl
->scan_wlvif
= NULL
;
2666 wl
->scan
.req
= NULL
;
2667 ieee80211_scan_completed(wl
->hw
, &info
);
2670 if (wl
->sched_vif
== wlvif
)
2671 wl
->sched_vif
= NULL
;
2673 if (wl
->roc_vif
== vif
) {
2675 ieee80211_remain_on_channel_expired(wl
->hw
);
2678 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2679 /* disable active roles */
2680 ret
= wl1271_ps_elp_wakeup(wl
);
2684 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2685 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2686 if (wl12xx_dev_role_started(wlvif
))
2687 wl12xx_stop_dev(wl
, wlvif
);
2690 if (!wlcore_is_p2p_mgmt(wlvif
)) {
2691 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2695 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->dev_role_id
);
2700 wl1271_ps_elp_sleep(wl
);
2703 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2705 /* clear all hlids (except system_hlid) */
2706 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2708 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2709 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2710 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2711 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2712 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2713 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2714 wlcore_free_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2716 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2717 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2718 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2719 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2720 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2721 wl12xx_free_rate_policy(wl
,
2722 &wlvif
->ap
.ucast_rate_idx
[i
]);
2723 wl1271_free_ap_keys(wl
, wlvif
);
2726 dev_kfree_skb(wlvif
->probereq
);
2727 wlvif
->probereq
= NULL
;
2728 if (wl
->last_wlvif
== wlvif
)
2729 wl
->last_wlvif
= NULL
;
2730 list_del(&wlvif
->list
);
2731 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2732 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2733 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2741 * Last AP, have more stations. Configure sleep auth according to STA.
2742 * Don't do thin on unintended recovery.
2744 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2745 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2748 if (wl
->ap_count
== 0 && is_ap
) {
2749 /* mask ap events */
2750 wl
->event_mask
&= ~wl
->ap_event_mask
;
2751 wl1271_event_unmask(wl
);
2754 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2755 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2756 /* Configure for power according to debugfs */
2757 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2758 wl1271_acx_sleep_auth(wl
, sta_auth
);
2759 /* Configure for ELP power saving */
2761 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2765 mutex_unlock(&wl
->mutex
);
2767 del_timer_sync(&wlvif
->rx_streaming_timer
);
2768 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2769 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2770 cancel_work_sync(&wlvif
->rc_update_work
);
2771 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
2772 cancel_delayed_work_sync(&wlvif
->channel_switch_work
);
2773 cancel_delayed_work_sync(&wlvif
->pending_auth_complete_work
);
2775 mutex_lock(&wl
->mutex
);
2778 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2779 struct ieee80211_vif
*vif
)
2781 struct wl1271
*wl
= hw
->priv
;
2782 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2783 struct wl12xx_vif
*iter
;
2784 struct vif_counter_data vif_count
;
2786 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2787 mutex_lock(&wl
->mutex
);
2789 if (wl
->state
== WLCORE_STATE_OFF
||
2790 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2794 * wl->vif can be null here if someone shuts down the interface
2795 * just when hardware recovery has been started.
2797 wl12xx_for_each_wlvif(wl
, iter
) {
2801 __wl1271_op_remove_interface(wl
, vif
, true);
2804 WARN_ON(iter
!= wlvif
);
2805 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2806 wl12xx_force_active_psm(wl
);
2807 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2808 wl12xx_queue_recovery_work(wl
);
2811 mutex_unlock(&wl
->mutex
);
2814 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2815 struct ieee80211_vif
*vif
,
2816 enum nl80211_iftype new_type
, bool p2p
)
2818 struct wl1271
*wl
= hw
->priv
;
2821 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2822 wl1271_op_remove_interface(hw
, vif
);
2824 vif
->type
= new_type
;
2826 ret
= wl1271_op_add_interface(hw
, vif
);
2828 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2832 static int wlcore_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2835 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2838 * One of the side effects of the JOIN command is that is clears
2839 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2840 * to a WPA/WPA2 access point will therefore kill the data-path.
2841 * Currently the only valid scenario for JOIN during association
2842 * is on roaming, in which case we will also be given new keys.
2843 * Keep the below message for now, unless it starts bothering
2844 * users who really like to roam a lot :)
2846 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2847 wl1271_info("JOIN while associated.");
2849 /* clear encryption type */
2850 wlvif
->encryption_type
= KEY_NONE
;
2853 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2855 if (wl
->quirks
& WLCORE_QUIRK_START_STA_FAILS
) {
2857 * TODO: this is an ugly workaround for wl12xx fw
2858 * bug - we are not able to tx/rx after the first
2859 * start_sta, so make dummy start+stop calls,
2860 * and then call start_sta again.
2861 * this should be fixed in the fw.
2863 wl12xx_cmd_role_start_sta(wl
, wlvif
);
2864 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2867 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2873 static int wl1271_ssid_set(struct wl12xx_vif
*wlvif
, struct sk_buff
*skb
,
2877 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
2881 wl1271_error("No SSID in IEs!");
2886 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
2887 wl1271_error("SSID is too long!");
2891 wlvif
->ssid_len
= ssid_len
;
2892 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
2896 static int wlcore_set_ssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2898 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2899 struct sk_buff
*skb
;
2902 /* we currently only support setting the ssid from the ap probe req */
2903 if (wlvif
->bss_type
!= BSS_TYPE_STA_BSS
)
2906 skb
= ieee80211_ap_probereq_get(wl
->hw
, vif
);
2910 ieoffset
= offsetof(struct ieee80211_mgmt
,
2911 u
.probe_req
.variable
);
2912 wl1271_ssid_set(wlvif
, skb
, ieoffset
);
2918 static int wlcore_set_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2919 struct ieee80211_bss_conf
*bss_conf
,
2925 wlvif
->aid
= bss_conf
->aid
;
2926 wlvif
->channel_type
= cfg80211_get_chandef_type(&bss_conf
->chandef
);
2927 wlvif
->beacon_int
= bss_conf
->beacon_int
;
2928 wlvif
->wmm_enabled
= bss_conf
->qos
;
2930 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2933 * with wl1271, we don't need to update the
2934 * beacon_int and dtim_period, because the firmware
2935 * updates it by itself when the first beacon is
2936 * received after a join.
2938 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
2943 * Get a template for hardware connection maintenance
2945 dev_kfree_skb(wlvif
->probereq
);
2946 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
2949 ieoffset
= offsetof(struct ieee80211_mgmt
,
2950 u
.probe_req
.variable
);
2951 wl1271_ssid_set(wlvif
, wlvif
->probereq
, ieoffset
);
2953 /* enable the connection monitoring feature */
2954 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
2959 * The join command disable the keep-alive mode, shut down its process,
2960 * and also clear the template config, so we need to reset it all after
2961 * the join. The acx_aid starts the keep-alive process, and the order
2962 * of the commands below is relevant.
2964 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2968 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2972 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2976 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2977 wlvif
->sta
.klv_template_id
,
2978 ACX_KEEP_ALIVE_TPL_VALID
);
2983 * The default fw psm configuration is AUTO, while mac80211 default
2984 * setting is off (ACTIVE), so sync the fw with the correct value.
2986 ret
= wl1271_ps_set_mode(wl
, wlvif
, STATION_ACTIVE_MODE
);
2992 wl1271_tx_enabled_rates_get(wl
,
2995 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3003 static int wlcore_unset_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3006 bool sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
3008 /* make sure we are connected (sta) joined */
3010 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
3013 /* make sure we are joined (ibss) */
3015 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
))
3019 /* use defaults when not associated */
3022 /* free probe-request template */
3023 dev_kfree_skb(wlvif
->probereq
);
3024 wlvif
->probereq
= NULL
;
3026 /* disable connection monitor features */
3027 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3031 /* Disable the keep-alive feature */
3032 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3036 /* disable beacon filtering */
3037 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
3042 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
3043 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
3045 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
3046 ieee80211_chswitch_done(vif
, false);
3047 cancel_delayed_work(&wlvif
->channel_switch_work
);
3050 /* invalidate keep-alive template */
3051 wl1271_acx_keep_alive_config(wl
, wlvif
,
3052 wlvif
->sta
.klv_template_id
,
3053 ACX_KEEP_ALIVE_TPL_INVALID
);
3058 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3060 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
3061 wlvif
->rate_set
= wlvif
->basic_rate_set
;
3064 static void wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3067 bool cur_idle
= !test_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
3069 if (idle
== cur_idle
)
3073 clear_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
3075 /* The current firmware only supports sched_scan in idle */
3076 if (wl
->sched_vif
== wlvif
)
3077 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3079 set_bit(WLVIF_FLAG_ACTIVE
, &wlvif
->flags
);
3083 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3084 struct ieee80211_conf
*conf
, u32 changed
)
3088 if (wlcore_is_p2p_mgmt(wlvif
))
3091 if (conf
->power_level
!= wlvif
->power_level
) {
3092 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
3096 wlvif
->power_level
= conf
->power_level
;
3102 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
3104 struct wl1271
*wl
= hw
->priv
;
3105 struct wl12xx_vif
*wlvif
;
3106 struct ieee80211_conf
*conf
= &hw
->conf
;
3109 wl1271_debug(DEBUG_MAC80211
, "mac80211 config psm %s power %d %s"
3111 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
3113 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
3116 mutex_lock(&wl
->mutex
);
3118 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
3119 wl
->power_level
= conf
->power_level
;
3121 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3124 ret
= wl1271_ps_elp_wakeup(wl
);
3128 /* configure each interface */
3129 wl12xx_for_each_wlvif(wl
, wlvif
) {
3130 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
3136 wl1271_ps_elp_sleep(wl
);
3139 mutex_unlock(&wl
->mutex
);
3144 struct wl1271_filter_params
{
3147 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
3150 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
3151 struct netdev_hw_addr_list
*mc_list
)
3153 struct wl1271_filter_params
*fp
;
3154 struct netdev_hw_addr
*ha
;
3156 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
3158 wl1271_error("Out of memory setting filters.");
3162 /* update multicast filtering parameters */
3163 fp
->mc_list_length
= 0;
3164 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
3165 fp
->enabled
= false;
3168 netdev_hw_addr_list_for_each(ha
, mc_list
) {
3169 memcpy(fp
->mc_list
[fp
->mc_list_length
],
3170 ha
->addr
, ETH_ALEN
);
3171 fp
->mc_list_length
++;
3175 return (u64
)(unsigned long)fp
;
3178 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3180 FIF_BCN_PRBRESP_PROMISC | \
3184 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
3185 unsigned int changed
,
3186 unsigned int *total
, u64 multicast
)
3188 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
3189 struct wl1271
*wl
= hw
->priv
;
3190 struct wl12xx_vif
*wlvif
;
3194 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
3195 " total %x", changed
, *total
);
3197 mutex_lock(&wl
->mutex
);
3199 *total
&= WL1271_SUPPORTED_FILTERS
;
3200 changed
&= WL1271_SUPPORTED_FILTERS
;
3202 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3205 ret
= wl1271_ps_elp_wakeup(wl
);
3209 wl12xx_for_each_wlvif(wl
, wlvif
) {
3210 if (wlcore_is_p2p_mgmt(wlvif
))
3213 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
3214 if (*total
& FIF_ALLMULTI
)
3215 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3219 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3222 fp
->mc_list_length
);
3228 * If interface in AP mode and created with allmulticast then disable
3229 * the firmware filters so that all multicast packets are passed
3230 * This is mandatory for MDNS based discovery protocols
3232 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
3233 if (*total
& FIF_ALLMULTI
) {
3234 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3244 * the fw doesn't provide an api to configure the filters. instead,
3245 * the filters configuration is based on the active roles / ROC
3250 wl1271_ps_elp_sleep(wl
);
3253 mutex_unlock(&wl
->mutex
);
3257 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3258 u8 id
, u8 key_type
, u8 key_size
,
3259 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
3262 struct wl1271_ap_key
*ap_key
;
3265 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
3267 if (key_size
> MAX_KEY_SIZE
)
3271 * Find next free entry in ap_keys. Also check we are not replacing
3274 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3275 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3278 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
3279 wl1271_warning("trying to record key replacement");
3284 if (i
== MAX_NUM_KEYS
)
3287 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
3292 ap_key
->key_type
= key_type
;
3293 ap_key
->key_size
= key_size
;
3294 memcpy(ap_key
->key
, key
, key_size
);
3295 ap_key
->hlid
= hlid
;
3296 ap_key
->tx_seq_32
= tx_seq_32
;
3297 ap_key
->tx_seq_16
= tx_seq_16
;
3299 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
3303 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3307 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3308 kfree(wlvif
->ap
.recorded_keys
[i
]);
3309 wlvif
->ap
.recorded_keys
[i
] = NULL
;
3313 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3316 struct wl1271_ap_key
*key
;
3317 bool wep_key_added
= false;
3319 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3321 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3324 key
= wlvif
->ap
.recorded_keys
[i
];
3326 if (hlid
== WL12XX_INVALID_LINK_ID
)
3327 hlid
= wlvif
->ap
.bcast_hlid
;
3329 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3330 key
->id
, key
->key_type
,
3331 key
->key_size
, key
->key
,
3332 hlid
, key
->tx_seq_32
,
3337 if (key
->key_type
== KEY_WEP
)
3338 wep_key_added
= true;
3341 if (wep_key_added
) {
3342 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
3343 wlvif
->ap
.bcast_hlid
);
3349 wl1271_free_ap_keys(wl
, wlvif
);
3353 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3354 u16 action
, u8 id
, u8 key_type
,
3355 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
3356 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
3359 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3362 struct wl1271_station
*wl_sta
;
3366 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
3367 hlid
= wl_sta
->hlid
;
3369 hlid
= wlvif
->ap
.bcast_hlid
;
3372 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3374 * We do not support removing keys after AP shutdown.
3375 * Pretend we do to make mac80211 happy.
3377 if (action
!= KEY_ADD_OR_REPLACE
)
3380 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3382 key
, hlid
, tx_seq_32
,
3385 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3386 id
, key_type
, key_size
,
3387 key
, hlid
, tx_seq_32
,
3395 static const u8 bcast_addr
[ETH_ALEN
] = {
3396 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3399 addr
= sta
? sta
->addr
: bcast_addr
;
3401 if (is_zero_ether_addr(addr
)) {
3402 /* We dont support TX only encryption */
3406 /* The wl1271 does not allow to remove unicast keys - they
3407 will be cleared automatically on next CMD_JOIN. Ignore the
3408 request silently, as we dont want the mac80211 to emit
3409 an error message. */
3410 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3413 /* don't remove key if hlid was already deleted */
3414 if (action
== KEY_REMOVE
&&
3415 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3418 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3419 id
, key_type
, key_size
,
3420 key
, addr
, tx_seq_32
,
3430 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3431 struct ieee80211_vif
*vif
,
3432 struct ieee80211_sta
*sta
,
3433 struct ieee80211_key_conf
*key_conf
)
3435 struct wl1271
*wl
= hw
->priv
;
3437 bool might_change_spare
=
3438 key_conf
->cipher
== WL1271_CIPHER_SUITE_GEM
||
3439 key_conf
->cipher
== WLAN_CIPHER_SUITE_TKIP
;
3441 if (might_change_spare
) {
3443 * stop the queues and flush to ensure the next packets are
3444 * in sync with FW spare block accounting
3446 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3447 wl1271_tx_flush(wl
);
3450 mutex_lock(&wl
->mutex
);
3452 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3454 goto out_wake_queues
;
3457 ret
= wl1271_ps_elp_wakeup(wl
);
3459 goto out_wake_queues
;
3461 ret
= wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3463 wl1271_ps_elp_sleep(wl
);
3466 if (might_change_spare
)
3467 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3469 mutex_unlock(&wl
->mutex
);
3474 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3475 struct ieee80211_vif
*vif
,
3476 struct ieee80211_sta
*sta
,
3477 struct ieee80211_key_conf
*key_conf
)
3479 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3486 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3488 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3489 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3490 key_conf
->cipher
, key_conf
->keyidx
,
3491 key_conf
->keylen
, key_conf
->flags
);
3492 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3494 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
3496 struct wl1271_station
*wl_sta
= (void *)sta
->drv_priv
;
3497 hlid
= wl_sta
->hlid
;
3499 hlid
= wlvif
->ap
.bcast_hlid
;
3502 hlid
= wlvif
->sta
.hlid
;
3504 if (hlid
!= WL12XX_INVALID_LINK_ID
) {
3505 u64 tx_seq
= wl
->links
[hlid
].total_freed_pkts
;
3506 tx_seq_32
= WL1271_TX_SECURITY_HI32(tx_seq
);
3507 tx_seq_16
= WL1271_TX_SECURITY_LO16(tx_seq
);
3510 switch (key_conf
->cipher
) {
3511 case WLAN_CIPHER_SUITE_WEP40
:
3512 case WLAN_CIPHER_SUITE_WEP104
:
3515 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3517 case WLAN_CIPHER_SUITE_TKIP
:
3518 key_type
= KEY_TKIP
;
3519 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3521 case WLAN_CIPHER_SUITE_CCMP
:
3523 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3525 case WL1271_CIPHER_SUITE_GEM
:
3529 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3536 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3537 key_conf
->keyidx
, key_type
,
3538 key_conf
->keylen
, key_conf
->key
,
3539 tx_seq_32
, tx_seq_16
, sta
);
3541 wl1271_error("Could not add or replace key");
3546 * reconfiguring arp response if the unicast (or common)
3547 * encryption key type was changed
3549 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3550 (sta
|| key_type
== KEY_WEP
) &&
3551 wlvif
->encryption_type
!= key_type
) {
3552 wlvif
->encryption_type
= key_type
;
3553 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3555 wl1271_warning("build arp rsp failed: %d", ret
);
3562 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3563 key_conf
->keyidx
, key_type
,
3564 key_conf
->keylen
, key_conf
->key
,
3567 wl1271_error("Could not remove key");
3573 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3579 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3581 static void wl1271_op_set_default_key_idx(struct ieee80211_hw
*hw
,
3582 struct ieee80211_vif
*vif
,
3585 struct wl1271
*wl
= hw
->priv
;
3586 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3589 wl1271_debug(DEBUG_MAC80211
, "mac80211 set default key idx %d",
3592 /* we don't handle unsetting of default key */
3596 mutex_lock(&wl
->mutex
);
3598 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3603 ret
= wl1271_ps_elp_wakeup(wl
);
3607 wlvif
->default_key
= key_idx
;
3609 /* the default WEP key needs to be configured at least once */
3610 if (wlvif
->encryption_type
== KEY_WEP
) {
3611 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3619 wl1271_ps_elp_sleep(wl
);
3622 mutex_unlock(&wl
->mutex
);
3625 void wlcore_regdomain_config(struct wl1271
*wl
)
3629 if (!(wl
->quirks
& WLCORE_QUIRK_REGDOMAIN_CONF
))
3632 mutex_lock(&wl
->mutex
);
3634 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3637 ret
= wl1271_ps_elp_wakeup(wl
);
3641 ret
= wlcore_cmd_regdomain_config_locked(wl
);
3643 wl12xx_queue_recovery_work(wl
);
3647 wl1271_ps_elp_sleep(wl
);
3649 mutex_unlock(&wl
->mutex
);
3652 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3653 struct ieee80211_vif
*vif
,
3654 struct ieee80211_scan_request
*hw_req
)
3656 struct cfg80211_scan_request
*req
= &hw_req
->req
;
3657 struct wl1271
*wl
= hw
->priv
;
3662 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3665 ssid
= req
->ssids
[0].ssid
;
3666 len
= req
->ssids
[0].ssid_len
;
3669 mutex_lock(&wl
->mutex
);
3671 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3673 * We cannot return -EBUSY here because cfg80211 will expect
3674 * a call to ieee80211_scan_completed if we do - in this case
3675 * there won't be any call.
3681 ret
= wl1271_ps_elp_wakeup(wl
);
3685 /* fail if there is any role in ROC */
3686 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3687 /* don't allow scanning right now */
3692 ret
= wlcore_scan(hw
->priv
, vif
, ssid
, len
, req
);
3694 wl1271_ps_elp_sleep(wl
);
3696 mutex_unlock(&wl
->mutex
);
3701 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3702 struct ieee80211_vif
*vif
)
3704 struct wl1271
*wl
= hw
->priv
;
3705 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3706 struct cfg80211_scan_info info
= {
3711 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3713 mutex_lock(&wl
->mutex
);
3715 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3718 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3721 ret
= wl1271_ps_elp_wakeup(wl
);
3725 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3726 ret
= wl
->ops
->scan_stop(wl
, wlvif
);
3732 * Rearm the tx watchdog just before idling scan. This
3733 * prevents just-finished scans from triggering the watchdog
3735 wl12xx_rearm_tx_watchdog_locked(wl
);
3737 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3738 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3739 wl
->scan_wlvif
= NULL
;
3740 wl
->scan
.req
= NULL
;
3741 ieee80211_scan_completed(wl
->hw
, &info
);
3744 wl1271_ps_elp_sleep(wl
);
3746 mutex_unlock(&wl
->mutex
);
3748 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3751 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3752 struct ieee80211_vif
*vif
,
3753 struct cfg80211_sched_scan_request
*req
,
3754 struct ieee80211_scan_ies
*ies
)
3756 struct wl1271
*wl
= hw
->priv
;
3757 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3760 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3762 mutex_lock(&wl
->mutex
);
3764 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3769 ret
= wl1271_ps_elp_wakeup(wl
);
3773 ret
= wl
->ops
->sched_scan_start(wl
, wlvif
, req
, ies
);
3777 wl
->sched_vif
= wlvif
;
3780 wl1271_ps_elp_sleep(wl
);
3782 mutex_unlock(&wl
->mutex
);
3786 static int wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3787 struct ieee80211_vif
*vif
)
3789 struct wl1271
*wl
= hw
->priv
;
3790 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3793 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3795 mutex_lock(&wl
->mutex
);
3797 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3800 ret
= wl1271_ps_elp_wakeup(wl
);
3804 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3806 wl1271_ps_elp_sleep(wl
);
3808 mutex_unlock(&wl
->mutex
);
3813 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3815 struct wl1271
*wl
= hw
->priv
;
3818 mutex_lock(&wl
->mutex
);
3820 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3825 ret
= wl1271_ps_elp_wakeup(wl
);
3829 ret
= wl1271_acx_frag_threshold(wl
, value
);
3831 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3833 wl1271_ps_elp_sleep(wl
);
3836 mutex_unlock(&wl
->mutex
);
3841 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3843 struct wl1271
*wl
= hw
->priv
;
3844 struct wl12xx_vif
*wlvif
;
3847 mutex_lock(&wl
->mutex
);
3849 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3854 ret
= wl1271_ps_elp_wakeup(wl
);
3858 wl12xx_for_each_wlvif(wl
, wlvif
) {
3859 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3861 wl1271_warning("set rts threshold failed: %d", ret
);
3863 wl1271_ps_elp_sleep(wl
);
3866 mutex_unlock(&wl
->mutex
);
3871 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3874 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3875 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3876 skb
->len
- ieoffset
);
3881 memmove(ie
, next
, end
- next
);
3882 skb_trim(skb
, skb
->len
- len
);
3885 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3886 unsigned int oui
, u8 oui_type
,
3890 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3891 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3892 skb
->data
+ ieoffset
,
3893 skb
->len
- ieoffset
);
3898 memmove(ie
, next
, end
- next
);
3899 skb_trim(skb
, skb
->len
- len
);
3902 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3903 struct ieee80211_vif
*vif
)
3905 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3906 struct sk_buff
*skb
;
3909 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3913 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3914 CMD_TEMPL_AP_PROBE_RESPONSE
,
3923 wl1271_debug(DEBUG_AP
, "probe response updated");
3924 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3930 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3931 struct ieee80211_vif
*vif
,
3933 size_t probe_rsp_len
,
3936 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3937 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3938 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3939 int ssid_ie_offset
, ie_offset
, templ_len
;
3942 /* no need to change probe response if the SSID is set correctly */
3943 if (wlvif
->ssid_len
> 0)
3944 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3945 CMD_TEMPL_AP_PROBE_RESPONSE
,
3950 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3951 wl1271_error("probe_rsp template too big");
3955 /* start searching from IE offset */
3956 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3958 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3959 probe_rsp_len
- ie_offset
);
3961 wl1271_error("No SSID in beacon!");
3965 ssid_ie_offset
= ptr
- probe_rsp_data
;
3966 ptr
+= (ptr
[1] + 2);
3968 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3970 /* insert SSID from bss_conf */
3971 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3972 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3973 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3974 bss_conf
->ssid
, bss_conf
->ssid_len
);
3975 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3977 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3978 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3979 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3981 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3982 CMD_TEMPL_AP_PROBE_RESPONSE
,
3988 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3989 struct ieee80211_vif
*vif
,
3990 struct ieee80211_bss_conf
*bss_conf
,
3993 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3996 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3997 if (bss_conf
->use_short_slot
)
3998 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
4000 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
4002 wl1271_warning("Set slot time failed %d", ret
);
4007 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
4008 if (bss_conf
->use_short_preamble
)
4009 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
4011 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
4014 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
4015 if (bss_conf
->use_cts_prot
)
4016 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
4019 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
4020 CTSPROTECT_DISABLE
);
4022 wl1271_warning("Set ctsprotect failed %d", ret
);
4031 static int wlcore_set_beacon_template(struct wl1271
*wl
,
4032 struct ieee80211_vif
*vif
,
4035 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4036 struct ieee80211_hdr
*hdr
;
4039 int ieoffset
= offsetof(struct ieee80211_mgmt
, u
.beacon
.variable
);
4040 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
4048 wl1271_debug(DEBUG_MASTER
, "beacon updated");
4050 ret
= wl1271_ssid_set(wlvif
, beacon
, ieoffset
);
4052 dev_kfree_skb(beacon
);
4055 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4056 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
4058 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
4063 dev_kfree_skb(beacon
);
4067 wlvif
->wmm_enabled
=
4068 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT
,
4069 WLAN_OUI_TYPE_MICROSOFT_WMM
,
4070 beacon
->data
+ ieoffset
,
4071 beacon
->len
- ieoffset
);
4074 * In case we already have a probe-resp beacon set explicitly
4075 * by usermode, don't use the beacon data.
4077 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
4080 /* remove TIM ie from probe response */
4081 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
4084 * remove p2p ie from probe response.
4085 * the fw reponds to probe requests that don't include
4086 * the p2p ie. probe requests with p2p ie will be passed,
4087 * and will be responded by the supplicant (the spec
4088 * forbids including the p2p ie when responding to probe
4089 * requests that didn't include it).
4091 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
4092 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
4094 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
4095 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
4096 IEEE80211_STYPE_PROBE_RESP
);
4098 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
4103 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
4104 CMD_TEMPL_PROBE_RESPONSE
,
4109 dev_kfree_skb(beacon
);
4117 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
4118 struct ieee80211_vif
*vif
,
4119 struct ieee80211_bss_conf
*bss_conf
,
4122 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4123 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4126 if (changed
& BSS_CHANGED_BEACON_INT
) {
4127 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
4128 bss_conf
->beacon_int
);
4130 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4133 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
4134 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4136 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
4139 if (changed
& BSS_CHANGED_BEACON
) {
4140 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
4144 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED
,
4146 ret
= wlcore_hw_dfs_master_restart(wl
, wlvif
);
4153 wl1271_error("beacon info change failed: %d", ret
);
4157 /* AP mode changes */
4158 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
4159 struct ieee80211_vif
*vif
,
4160 struct ieee80211_bss_conf
*bss_conf
,
4163 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4166 if (changed
& BSS_CHANGED_BASIC_RATES
) {
4167 u32 rates
= bss_conf
->basic_rates
;
4169 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
4171 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
4172 wlvif
->basic_rate_set
);
4174 ret
= wl1271_init_ap_rates(wl
, wlvif
);
4176 wl1271_error("AP rate policy change failed %d", ret
);
4180 ret
= wl1271_ap_init_templates(wl
, vif
);
4184 /* No need to set probe resp template for mesh */
4185 if (!ieee80211_vif_is_mesh(vif
)) {
4186 ret
= wl1271_ap_set_probe_resp_tmpl(wl
,
4193 ret
= wlcore_set_beacon_template(wl
, vif
, true);
4198 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
4202 if (changed
& BSS_CHANGED_BEACON_ENABLED
) {
4203 if (bss_conf
->enable_beacon
) {
4204 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4205 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
4209 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
4213 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4214 wl1271_debug(DEBUG_AP
, "started AP");
4217 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
4219 * AP might be in ROC in case we have just
4220 * sent auth reply. handle it.
4222 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4223 wl12xx_croc(wl
, wlvif
->role_id
);
4225 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
4229 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
4230 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
4232 wl1271_debug(DEBUG_AP
, "stopped AP");
4237 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4241 /* Handle HT information change */
4242 if ((changed
& BSS_CHANGED_HT
) &&
4243 (bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
)) {
4244 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4245 bss_conf
->ht_operation_mode
);
4247 wl1271_warning("Set ht information failed %d", ret
);
4256 static int wlcore_set_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
4257 struct ieee80211_bss_conf
*bss_conf
,
4263 wl1271_debug(DEBUG_MAC80211
,
4264 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4265 bss_conf
->bssid
, bss_conf
->aid
,
4266 bss_conf
->beacon_int
,
4267 bss_conf
->basic_rates
, sta_rate_set
);
4269 wlvif
->beacon_int
= bss_conf
->beacon_int
;
4270 rates
= bss_conf
->basic_rates
;
4271 wlvif
->basic_rate_set
=
4272 wl1271_tx_enabled_rates_get(wl
, rates
,
4275 wl1271_tx_min_rate_get(wl
,
4276 wlvif
->basic_rate_set
);
4280 wl1271_tx_enabled_rates_get(wl
,
4284 /* we only support sched_scan while not connected */
4285 if (wl
->sched_vif
== wlvif
)
4286 wl
->ops
->sched_scan_stop(wl
, wlvif
);
4288 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4292 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
4296 ret
= wl1271_build_qos_null_data(wl
, wl12xx_wlvif_to_vif(wlvif
));
4300 wlcore_set_ssid(wl
, wlvif
);
4302 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4307 static int wlcore_clear_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
4311 /* revert back to minimum rates for the current band */
4312 wl1271_set_band_rate(wl
, wlvif
);
4313 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4315 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4319 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4320 test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
)) {
4321 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4326 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4329 /* STA/IBSS mode changes */
4330 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
4331 struct ieee80211_vif
*vif
,
4332 struct ieee80211_bss_conf
*bss_conf
,
4335 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4336 bool do_join
= false;
4337 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
4338 bool ibss_joined
= false;
4339 u32 sta_rate_set
= 0;
4341 struct ieee80211_sta
*sta
;
4342 bool sta_exists
= false;
4343 struct ieee80211_sta_ht_cap sta_ht_cap
;
4346 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
4352 if (changed
& BSS_CHANGED_IBSS
) {
4353 if (bss_conf
->ibss_joined
) {
4354 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
4357 wlcore_unset_assoc(wl
, wlvif
);
4358 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4362 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
4365 /* Need to update the SSID (for filtering etc) */
4366 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
4369 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
4370 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
4371 bss_conf
->enable_beacon
? "enabled" : "disabled");
4376 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
)
4377 wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
4379 if (changed
& BSS_CHANGED_CQM
) {
4380 bool enable
= false;
4381 if (bss_conf
->cqm_rssi_thold
)
4383 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
4384 bss_conf
->cqm_rssi_thold
,
4385 bss_conf
->cqm_rssi_hyst
);
4388 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
4391 if (changed
& (BSS_CHANGED_BSSID
| BSS_CHANGED_HT
|
4392 BSS_CHANGED_ASSOC
)) {
4394 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
4396 u8
*rx_mask
= sta
->ht_cap
.mcs
.rx_mask
;
4398 /* save the supp_rates of the ap */
4399 sta_rate_set
= sta
->supp_rates
[wlvif
->band
];
4400 if (sta
->ht_cap
.ht_supported
)
4402 (rx_mask
[0] << HW_HT_RATES_OFFSET
) |
4403 (rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
4404 sta_ht_cap
= sta
->ht_cap
;
4411 if (changed
& BSS_CHANGED_BSSID
) {
4412 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
4413 ret
= wlcore_set_bssid(wl
, wlvif
, bss_conf
,
4418 /* Need to update the BSSID (for filtering etc) */
4421 ret
= wlcore_clear_bssid(wl
, wlvif
);
4427 if (changed
& BSS_CHANGED_IBSS
) {
4428 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4429 bss_conf
->ibss_joined
);
4431 if (bss_conf
->ibss_joined
) {
4432 u32 rates
= bss_conf
->basic_rates
;
4433 wlvif
->basic_rate_set
=
4434 wl1271_tx_enabled_rates_get(wl
, rates
,
4437 wl1271_tx_min_rate_get(wl
,
4438 wlvif
->basic_rate_set
);
4440 /* by default, use 11b + OFDM rates */
4441 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4442 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4448 if ((changed
& BSS_CHANGED_BEACON_INFO
) && bss_conf
->dtim_period
) {
4449 /* enable beacon filtering */
4450 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
4455 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4460 ret
= wlcore_join(wl
, wlvif
);
4462 wl1271_warning("cmd join failed %d", ret
);
4467 if (changed
& BSS_CHANGED_ASSOC
) {
4468 if (bss_conf
->assoc
) {
4469 ret
= wlcore_set_assoc(wl
, wlvif
, bss_conf
,
4474 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4475 wl12xx_set_authorized(wl
, wlvif
);
4477 wlcore_unset_assoc(wl
, wlvif
);
4481 if (changed
& BSS_CHANGED_PS
) {
4482 if ((bss_conf
->ps
) &&
4483 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
4484 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4488 if (wl
->conf
.conn
.forced_ps
) {
4489 ps_mode
= STATION_POWER_SAVE_MODE
;
4490 ps_mode_str
= "forced";
4492 ps_mode
= STATION_AUTO_PS_MODE
;
4493 ps_mode_str
= "auto";
4496 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
4498 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
4500 wl1271_warning("enter %s ps failed %d",
4502 } else if (!bss_conf
->ps
&&
4503 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4504 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
4506 ret
= wl1271_ps_set_mode(wl
, wlvif
,
4507 STATION_ACTIVE_MODE
);
4509 wl1271_warning("exit auto ps failed %d", ret
);
4513 /* Handle new association with HT. Do this after join. */
4516 bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
;
4518 ret
= wlcore_hw_set_peer_cap(wl
,
4524 wl1271_warning("Set ht cap failed %d", ret
);
4530 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4531 bss_conf
->ht_operation_mode
);
4533 wl1271_warning("Set ht information failed %d",
4540 /* Handle arp filtering. Done after join. */
4541 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4542 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4543 __be32 addr
= bss_conf
->arp_addr_list
[0];
4544 wlvif
->sta
.qos
= bss_conf
->qos
;
4545 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4547 if (bss_conf
->arp_addr_cnt
== 1 && bss_conf
->assoc
) {
4548 wlvif
->ip_addr
= addr
;
4550 * The template should have been configured only upon
4551 * association. however, it seems that the correct ip
4552 * isn't being set (when sending), so we have to
4553 * reconfigure the template upon every ip change.
4555 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4557 wl1271_warning("build arp rsp failed: %d", ret
);
4561 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4562 (ACX_ARP_FILTER_ARP_FILTERING
|
4563 ACX_ARP_FILTER_AUTO_ARP
),
4567 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4578 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4579 struct ieee80211_vif
*vif
,
4580 struct ieee80211_bss_conf
*bss_conf
,
4583 struct wl1271
*wl
= hw
->priv
;
4584 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4585 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4588 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info role %d changed 0x%x",
4589 wlvif
->role_id
, (int)changed
);
4592 * make sure to cancel pending disconnections if our association
4595 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4596 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
4598 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4599 !bss_conf
->enable_beacon
)
4600 wl1271_tx_flush(wl
);
4602 mutex_lock(&wl
->mutex
);
4604 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4607 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4610 ret
= wl1271_ps_elp_wakeup(wl
);
4614 if ((changed
& BSS_CHANGED_TXPOWER
) &&
4615 bss_conf
->txpower
!= wlvif
->power_level
) {
4617 ret
= wl1271_acx_tx_power(wl
, wlvif
, bss_conf
->txpower
);
4621 wlvif
->power_level
= bss_conf
->txpower
;
4625 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4627 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4629 wl1271_ps_elp_sleep(wl
);
4632 mutex_unlock(&wl
->mutex
);
4635 static int wlcore_op_add_chanctx(struct ieee80211_hw
*hw
,
4636 struct ieee80211_chanctx_conf
*ctx
)
4638 wl1271_debug(DEBUG_MAC80211
, "mac80211 add chanctx %d (type %d)",
4639 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4640 cfg80211_get_chandef_type(&ctx
->def
));
4644 static void wlcore_op_remove_chanctx(struct ieee80211_hw
*hw
,
4645 struct ieee80211_chanctx_conf
*ctx
)
4647 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove chanctx %d (type %d)",
4648 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4649 cfg80211_get_chandef_type(&ctx
->def
));
4652 static void wlcore_op_change_chanctx(struct ieee80211_hw
*hw
,
4653 struct ieee80211_chanctx_conf
*ctx
,
4656 struct wl1271
*wl
= hw
->priv
;
4657 struct wl12xx_vif
*wlvif
;
4659 int channel
= ieee80211_frequency_to_channel(
4660 ctx
->def
.chan
->center_freq
);
4662 wl1271_debug(DEBUG_MAC80211
,
4663 "mac80211 change chanctx %d (type %d) changed 0x%x",
4664 channel
, cfg80211_get_chandef_type(&ctx
->def
), changed
);
4666 mutex_lock(&wl
->mutex
);
4668 ret
= wl1271_ps_elp_wakeup(wl
);
4672 wl12xx_for_each_wlvif(wl
, wlvif
) {
4673 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4676 if (rcu_access_pointer(vif
->chanctx_conf
) != ctx
) {
4682 /* start radar if needed */
4683 if (changed
& IEEE80211_CHANCTX_CHANGE_RADAR
&&
4684 wlvif
->bss_type
== BSS_TYPE_AP_BSS
&&
4685 ctx
->radar_enabled
&& !wlvif
->radar_enabled
&&
4686 ctx
->def
.chan
->dfs_state
== NL80211_DFS_USABLE
) {
4687 wl1271_debug(DEBUG_MAC80211
, "Start radar detection");
4688 wlcore_hw_set_cac(wl
, wlvif
, true);
4689 wlvif
->radar_enabled
= true;
4693 wl1271_ps_elp_sleep(wl
);
4695 mutex_unlock(&wl
->mutex
);
4698 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw
*hw
,
4699 struct ieee80211_vif
*vif
,
4700 struct ieee80211_chanctx_conf
*ctx
)
4702 struct wl1271
*wl
= hw
->priv
;
4703 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4704 int channel
= ieee80211_frequency_to_channel(
4705 ctx
->def
.chan
->center_freq
);
4708 wl1271_debug(DEBUG_MAC80211
,
4709 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4710 wlvif
->role_id
, channel
,
4711 cfg80211_get_chandef_type(&ctx
->def
),
4712 ctx
->radar_enabled
, ctx
->def
.chan
->dfs_state
);
4714 mutex_lock(&wl
->mutex
);
4716 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4719 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4722 ret
= wl1271_ps_elp_wakeup(wl
);
4726 wlvif
->band
= ctx
->def
.chan
->band
;
4727 wlvif
->channel
= channel
;
4728 wlvif
->channel_type
= cfg80211_get_chandef_type(&ctx
->def
);
4730 /* update default rates according to the band */
4731 wl1271_set_band_rate(wl
, wlvif
);
4733 if (ctx
->radar_enabled
&&
4734 ctx
->def
.chan
->dfs_state
== NL80211_DFS_USABLE
) {
4735 wl1271_debug(DEBUG_MAC80211
, "Start radar detection");
4736 wlcore_hw_set_cac(wl
, wlvif
, true);
4737 wlvif
->radar_enabled
= true;
4740 wl1271_ps_elp_sleep(wl
);
4742 mutex_unlock(&wl
->mutex
);
4747 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw
*hw
,
4748 struct ieee80211_vif
*vif
,
4749 struct ieee80211_chanctx_conf
*ctx
)
4751 struct wl1271
*wl
= hw
->priv
;
4752 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4755 wl1271_debug(DEBUG_MAC80211
,
4756 "mac80211 unassign chanctx (role %d) %d (type %d)",
4758 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4759 cfg80211_get_chandef_type(&ctx
->def
));
4761 wl1271_tx_flush(wl
);
4763 mutex_lock(&wl
->mutex
);
4765 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4768 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4771 ret
= wl1271_ps_elp_wakeup(wl
);
4775 if (wlvif
->radar_enabled
) {
4776 wl1271_debug(DEBUG_MAC80211
, "Stop radar detection");
4777 wlcore_hw_set_cac(wl
, wlvif
, false);
4778 wlvif
->radar_enabled
= false;
4781 wl1271_ps_elp_sleep(wl
);
4783 mutex_unlock(&wl
->mutex
);
4786 static int __wlcore_switch_vif_chan(struct wl1271
*wl
,
4787 struct wl12xx_vif
*wlvif
,
4788 struct ieee80211_chanctx_conf
*new_ctx
)
4790 int channel
= ieee80211_frequency_to_channel(
4791 new_ctx
->def
.chan
->center_freq
);
4793 wl1271_debug(DEBUG_MAC80211
,
4794 "switch vif (role %d) %d -> %d chan_type: %d",
4795 wlvif
->role_id
, wlvif
->channel
, channel
,
4796 cfg80211_get_chandef_type(&new_ctx
->def
));
4798 if (WARN_ON_ONCE(wlvif
->bss_type
!= BSS_TYPE_AP_BSS
))
4801 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED
, &wlvif
->flags
));
4803 if (wlvif
->radar_enabled
) {
4804 wl1271_debug(DEBUG_MAC80211
, "Stop radar detection");
4805 wlcore_hw_set_cac(wl
, wlvif
, false);
4806 wlvif
->radar_enabled
= false;
4809 wlvif
->band
= new_ctx
->def
.chan
->band
;
4810 wlvif
->channel
= channel
;
4811 wlvif
->channel_type
= cfg80211_get_chandef_type(&new_ctx
->def
);
4813 /* start radar if needed */
4814 if (new_ctx
->radar_enabled
) {
4815 wl1271_debug(DEBUG_MAC80211
, "Start radar detection");
4816 wlcore_hw_set_cac(wl
, wlvif
, true);
4817 wlvif
->radar_enabled
= true;
4824 wlcore_op_switch_vif_chanctx(struct ieee80211_hw
*hw
,
4825 struct ieee80211_vif_chanctx_switch
*vifs
,
4827 enum ieee80211_chanctx_switch_mode mode
)
4829 struct wl1271
*wl
= hw
->priv
;
4832 wl1271_debug(DEBUG_MAC80211
,
4833 "mac80211 switch chanctx n_vifs %d mode %d",
4836 mutex_lock(&wl
->mutex
);
4838 ret
= wl1271_ps_elp_wakeup(wl
);
4842 for (i
= 0; i
< n_vifs
; i
++) {
4843 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vifs
[i
].vif
);
4845 ret
= __wlcore_switch_vif_chan(wl
, wlvif
, vifs
[i
].new_ctx
);
4850 wl1271_ps_elp_sleep(wl
);
4852 mutex_unlock(&wl
->mutex
);
4857 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4858 struct ieee80211_vif
*vif
, u16 queue
,
4859 const struct ieee80211_tx_queue_params
*params
)
4861 struct wl1271
*wl
= hw
->priv
;
4862 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4866 if (wlcore_is_p2p_mgmt(wlvif
))
4869 mutex_lock(&wl
->mutex
);
4871 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4874 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4876 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4878 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4881 ret
= wl1271_ps_elp_wakeup(wl
);
4886 * the txop is confed in units of 32us by the mac80211,
4889 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4890 params
->cw_min
, params
->cw_max
,
4891 params
->aifs
, params
->txop
<< 5);
4895 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4896 CONF_CHANNEL_TYPE_EDCF
,
4897 wl1271_tx_get_queue(queue
),
4898 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4902 wl1271_ps_elp_sleep(wl
);
4905 mutex_unlock(&wl
->mutex
);
4910 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4911 struct ieee80211_vif
*vif
)
4914 struct wl1271
*wl
= hw
->priv
;
4915 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4916 u64 mactime
= ULLONG_MAX
;
4919 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4921 mutex_lock(&wl
->mutex
);
4923 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4926 ret
= wl1271_ps_elp_wakeup(wl
);
4930 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4935 wl1271_ps_elp_sleep(wl
);
4938 mutex_unlock(&wl
->mutex
);
4942 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4943 struct survey_info
*survey
)
4945 struct ieee80211_conf
*conf
= &hw
->conf
;
4950 survey
->channel
= conf
->chandef
.chan
;
4955 static int wl1271_allocate_sta(struct wl1271
*wl
,
4956 struct wl12xx_vif
*wlvif
,
4957 struct ieee80211_sta
*sta
)
4959 struct wl1271_station
*wl_sta
;
4963 if (wl
->active_sta_count
>= wl
->max_ap_stations
) {
4964 wl1271_warning("could not allocate HLID - too much stations");
4968 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4969 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4971 wl1271_warning("could not allocate HLID - too many links");
4975 /* use the previous security seq, if this is a recovery/resume */
4976 wl
->links
[wl_sta
->hlid
].total_freed_pkts
= wl_sta
->total_freed_pkts
;
4978 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4979 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4980 wl
->active_sta_count
++;
4984 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4986 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4989 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4990 __clear_bit(hlid
, &wl
->ap_ps_map
);
4991 __clear_bit(hlid
, &wl
->ap_fw_ps_map
);
4994 * save the last used PN in the private part of iee80211_sta,
4995 * in case of recovery/suspend
4997 wlcore_save_freed_pkts_addr(wl
, wlvif
, hlid
, wl
->links
[hlid
].addr
);
4999 wl12xx_free_link(wl
, wlvif
, &hlid
);
5000 wl
->active_sta_count
--;
5003 * rearm the tx watchdog when the last STA is freed - give the FW a
5004 * chance to return STA-buffered packets before complaining.
5006 if (wl
->active_sta_count
== 0)
5007 wl12xx_rearm_tx_watchdog_locked(wl
);
5010 static int wl12xx_sta_add(struct wl1271
*wl
,
5011 struct wl12xx_vif
*wlvif
,
5012 struct ieee80211_sta
*sta
)
5014 struct wl1271_station
*wl_sta
;
5018 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
5020 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
5024 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
5025 hlid
= wl_sta
->hlid
;
5027 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
5029 wl1271_free_sta(wl
, wlvif
, hlid
);
5034 static int wl12xx_sta_remove(struct wl1271
*wl
,
5035 struct wl12xx_vif
*wlvif
,
5036 struct ieee80211_sta
*sta
)
5038 struct wl1271_station
*wl_sta
;
5041 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
5043 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
5045 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
5048 ret
= wl12xx_cmd_remove_peer(wl
, wlvif
, wl_sta
->hlid
);
5052 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
5056 static void wlcore_roc_if_possible(struct wl1271
*wl
,
5057 struct wl12xx_vif
*wlvif
)
5059 if (find_first_bit(wl
->roc_map
,
5060 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)
5063 if (WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
))
5066 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
, wlvif
->band
, wlvif
->channel
);
5070 * when wl_sta is NULL, we treat this call as if coming from a
5071 * pending auth reply.
5072 * wl->mutex must be taken and the FW must be awake when the call
5075 void wlcore_update_inconn_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
5076 struct wl1271_station
*wl_sta
, bool in_conn
)
5079 if (WARN_ON(wl_sta
&& wl_sta
->in_connection
))
5082 if (!wlvif
->ap_pending_auth_reply
&&
5083 !wlvif
->inconn_count
)
5084 wlcore_roc_if_possible(wl
, wlvif
);
5087 wl_sta
->in_connection
= true;
5088 wlvif
->inconn_count
++;
5090 wlvif
->ap_pending_auth_reply
= true;
5093 if (wl_sta
&& !wl_sta
->in_connection
)
5096 if (WARN_ON(!wl_sta
&& !wlvif
->ap_pending_auth_reply
))
5099 if (WARN_ON(wl_sta
&& !wlvif
->inconn_count
))
5103 wl_sta
->in_connection
= false;
5104 wlvif
->inconn_count
--;
5106 wlvif
->ap_pending_auth_reply
= false;
5109 if (!wlvif
->inconn_count
&& !wlvif
->ap_pending_auth_reply
&&
5110 test_bit(wlvif
->role_id
, wl
->roc_map
))
5111 wl12xx_croc(wl
, wlvif
->role_id
);
5115 static int wl12xx_update_sta_state(struct wl1271
*wl
,
5116 struct wl12xx_vif
*wlvif
,
5117 struct ieee80211_sta
*sta
,
5118 enum ieee80211_sta_state old_state
,
5119 enum ieee80211_sta_state new_state
)
5121 struct wl1271_station
*wl_sta
;
5122 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
5123 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
5126 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
5128 /* Add station (AP mode) */
5130 old_state
== IEEE80211_STA_NOTEXIST
&&
5131 new_state
== IEEE80211_STA_NONE
) {
5132 ret
= wl12xx_sta_add(wl
, wlvif
, sta
);
5136 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, true);
5139 /* Remove station (AP mode) */
5141 old_state
== IEEE80211_STA_NONE
&&
5142 new_state
== IEEE80211_STA_NOTEXIST
) {
5144 wl12xx_sta_remove(wl
, wlvif
, sta
);
5146 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
5149 /* Authorize station (AP mode) */
5151 new_state
== IEEE80211_STA_AUTHORIZED
) {
5152 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wl_sta
->hlid
);
5156 /* reconfigure rates */
5157 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, wl_sta
->hlid
);
5161 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
5166 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
5169 /* Authorize station */
5171 new_state
== IEEE80211_STA_AUTHORIZED
) {
5172 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
5173 ret
= wl12xx_set_authorized(wl
, wlvif
);
5179 old_state
== IEEE80211_STA_AUTHORIZED
&&
5180 new_state
== IEEE80211_STA_ASSOC
) {
5181 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
5182 clear_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
);
5185 /* save seq number on disassoc (suspend) */
5187 old_state
== IEEE80211_STA_ASSOC
&&
5188 new_state
== IEEE80211_STA_AUTH
) {
5189 wlcore_save_freed_pkts(wl
, wlvif
, wlvif
->sta
.hlid
, sta
);
5190 wlvif
->total_freed_pkts
= 0;
5193 /* restore seq number on assoc (resume) */
5195 old_state
== IEEE80211_STA_AUTH
&&
5196 new_state
== IEEE80211_STA_ASSOC
) {
5197 wlvif
->total_freed_pkts
= wl_sta
->total_freed_pkts
;
5200 /* clear ROCs on failure or authorization */
5202 (new_state
== IEEE80211_STA_AUTHORIZED
||
5203 new_state
== IEEE80211_STA_NOTEXIST
)) {
5204 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
5205 wl12xx_croc(wl
, wlvif
->role_id
);
5209 old_state
== IEEE80211_STA_NOTEXIST
&&
5210 new_state
== IEEE80211_STA_NONE
) {
5211 if (find_first_bit(wl
->roc_map
,
5212 WL12XX_MAX_ROLES
) >= WL12XX_MAX_ROLES
) {
5213 WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
);
5214 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
,
5215 wlvif
->band
, wlvif
->channel
);
5221 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
5222 struct ieee80211_vif
*vif
,
5223 struct ieee80211_sta
*sta
,
5224 enum ieee80211_sta_state old_state
,
5225 enum ieee80211_sta_state new_state
)
5227 struct wl1271
*wl
= hw
->priv
;
5228 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5231 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
5232 sta
->aid
, old_state
, new_state
);
5234 mutex_lock(&wl
->mutex
);
5236 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5241 ret
= wl1271_ps_elp_wakeup(wl
);
5245 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
5247 wl1271_ps_elp_sleep(wl
);
5249 mutex_unlock(&wl
->mutex
);
5250 if (new_state
< old_state
)
5255 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
5256 struct ieee80211_vif
*vif
,
5257 struct ieee80211_ampdu_params
*params
)
5259 struct wl1271
*wl
= hw
->priv
;
5260 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5262 u8 hlid
, *ba_bitmap
;
5263 struct ieee80211_sta
*sta
= params
->sta
;
5264 enum ieee80211_ampdu_mlme_action action
= params
->action
;
5265 u16 tid
= params
->tid
;
5266 u16
*ssn
= ¶ms
->ssn
;
5268 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
5271 /* sanity check - the fields in FW are only 8bits wide */
5272 if (WARN_ON(tid
> 0xFF))
5275 mutex_lock(&wl
->mutex
);
5277 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5282 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
5283 hlid
= wlvif
->sta
.hlid
;
5284 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
5285 struct wl1271_station
*wl_sta
;
5287 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
5288 hlid
= wl_sta
->hlid
;
5294 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
5296 ret
= wl1271_ps_elp_wakeup(wl
);
5300 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
5304 case IEEE80211_AMPDU_RX_START
:
5305 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
5310 if (wl
->ba_rx_session_count
>= wl
->ba_rx_session_count_max
) {
5312 wl1271_error("exceeded max RX BA sessions");
5316 if (*ba_bitmap
& BIT(tid
)) {
5318 wl1271_error("cannot enable RX BA session on active "
5323 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
5328 *ba_bitmap
|= BIT(tid
);
5329 wl
->ba_rx_session_count
++;
5333 case IEEE80211_AMPDU_RX_STOP
:
5334 if (!(*ba_bitmap
& BIT(tid
))) {
5336 * this happens on reconfig - so only output a debug
5337 * message for now, and don't fail the function.
5339 wl1271_debug(DEBUG_MAC80211
,
5340 "no active RX BA session on tid: %d",
5346 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
5349 *ba_bitmap
&= ~BIT(tid
);
5350 wl
->ba_rx_session_count
--;
5355 * The BA initiator session management in FW independently.
5356 * Falling break here on purpose for all TX APDU commands.
5358 case IEEE80211_AMPDU_TX_START
:
5359 case IEEE80211_AMPDU_TX_STOP_CONT
:
5360 case IEEE80211_AMPDU_TX_STOP_FLUSH
:
5361 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT
:
5362 case IEEE80211_AMPDU_TX_OPERATIONAL
:
5367 wl1271_error("Incorrect ampdu action id=%x\n", action
);
5371 wl1271_ps_elp_sleep(wl
);
5374 mutex_unlock(&wl
->mutex
);
5379 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
5380 struct ieee80211_vif
*vif
,
5381 const struct cfg80211_bitrate_mask
*mask
)
5383 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5384 struct wl1271
*wl
= hw
->priv
;
5387 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
5388 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
5389 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
5391 mutex_lock(&wl
->mutex
);
5393 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
5394 wlvif
->bitrate_masks
[i
] =
5395 wl1271_tx_enabled_rates_get(wl
,
5396 mask
->control
[i
].legacy
,
5399 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5402 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
5403 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
5405 ret
= wl1271_ps_elp_wakeup(wl
);
5409 wl1271_set_band_rate(wl
, wlvif
);
5411 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
5412 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
5414 wl1271_ps_elp_sleep(wl
);
5417 mutex_unlock(&wl
->mutex
);
5422 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
5423 struct ieee80211_vif
*vif
,
5424 struct ieee80211_channel_switch
*ch_switch
)
5426 struct wl1271
*wl
= hw
->priv
;
5427 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5430 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
5432 wl1271_tx_flush(wl
);
5434 mutex_lock(&wl
->mutex
);
5436 if (unlikely(wl
->state
== WLCORE_STATE_OFF
)) {
5437 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
5438 ieee80211_chswitch_done(vif
, false);
5440 } else if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5444 ret
= wl1271_ps_elp_wakeup(wl
);
5448 /* TODO: change mac80211 to pass vif as param */
5450 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
5451 unsigned long delay_usec
;
5453 ret
= wl
->ops
->channel_switch(wl
, wlvif
, ch_switch
);
5457 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
5459 /* indicate failure 5 seconds after channel switch time */
5460 delay_usec
= ieee80211_tu_to_usec(wlvif
->beacon_int
) *
5462 ieee80211_queue_delayed_work(hw
, &wlvif
->channel_switch_work
,
5463 usecs_to_jiffies(delay_usec
) +
5464 msecs_to_jiffies(5000));
5468 wl1271_ps_elp_sleep(wl
);
5471 mutex_unlock(&wl
->mutex
);
5474 static const void *wlcore_get_beacon_ie(struct wl1271
*wl
,
5475 struct wl12xx_vif
*wlvif
,
5478 int ieoffset
= offsetof(struct ieee80211_mgmt
, u
.beacon
.variable
);
5479 struct sk_buff
*beacon
=
5480 ieee80211_beacon_get(wl
->hw
, wl12xx_wlvif_to_vif(wlvif
));
5485 return cfg80211_find_ie(eid
,
5486 beacon
->data
+ ieoffset
,
5487 beacon
->len
- ieoffset
);
5490 static int wlcore_get_csa_count(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
5494 const struct ieee80211_channel_sw_ie
*ie_csa
;
5496 ie
= wlcore_get_beacon_ie(wl
, wlvif
, WLAN_EID_CHANNEL_SWITCH
);
5500 ie_csa
= (struct ieee80211_channel_sw_ie
*)&ie
[2];
5501 *csa_count
= ie_csa
->count
;
5506 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw
*hw
,
5507 struct ieee80211_vif
*vif
,
5508 struct cfg80211_chan_def
*chandef
)
5510 struct wl1271
*wl
= hw
->priv
;
5511 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5512 struct ieee80211_channel_switch ch_switch
= {
5514 .chandef
= *chandef
,
5518 wl1271_debug(DEBUG_MAC80211
,
5519 "mac80211 channel switch beacon (role %d)",
5522 ret
= wlcore_get_csa_count(wl
, wlvif
, &ch_switch
.count
);
5524 wl1271_error("error getting beacon (for CSA counter)");
5528 mutex_lock(&wl
->mutex
);
5530 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5535 ret
= wl1271_ps_elp_wakeup(wl
);
5539 ret
= wl
->ops
->channel_switch(wl
, wlvif
, &ch_switch
);
5543 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
5546 wl1271_ps_elp_sleep(wl
);
5548 mutex_unlock(&wl
->mutex
);
5551 static void wlcore_op_flush(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
5552 u32 queues
, bool drop
)
5554 struct wl1271
*wl
= hw
->priv
;
5556 wl1271_tx_flush(wl
);
5559 static int wlcore_op_remain_on_channel(struct ieee80211_hw
*hw
,
5560 struct ieee80211_vif
*vif
,
5561 struct ieee80211_channel
*chan
,
5563 enum ieee80211_roc_type type
)
5565 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5566 struct wl1271
*wl
= hw
->priv
;
5567 int channel
, active_roc
, ret
= 0;
5569 channel
= ieee80211_frequency_to_channel(chan
->center_freq
);
5571 wl1271_debug(DEBUG_MAC80211
, "mac80211 roc %d (%d)",
5572 channel
, wlvif
->role_id
);
5574 mutex_lock(&wl
->mutex
);
5576 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5579 /* return EBUSY if we can't ROC right now */
5580 active_roc
= find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
);
5581 if (wl
->roc_vif
|| active_roc
< WL12XX_MAX_ROLES
) {
5582 wl1271_warning("active roc on role %d", active_roc
);
5587 ret
= wl1271_ps_elp_wakeup(wl
);
5591 ret
= wl12xx_start_dev(wl
, wlvif
, chan
->band
, channel
);
5596 ieee80211_queue_delayed_work(hw
, &wl
->roc_complete_work
,
5597 msecs_to_jiffies(duration
));
5599 wl1271_ps_elp_sleep(wl
);
5601 mutex_unlock(&wl
->mutex
);
5605 static int __wlcore_roc_completed(struct wl1271
*wl
)
5607 struct wl12xx_vif
*wlvif
;
5610 /* already completed */
5611 if (unlikely(!wl
->roc_vif
))
5614 wlvif
= wl12xx_vif_to_data(wl
->roc_vif
);
5616 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
5619 ret
= wl12xx_stop_dev(wl
, wlvif
);
5628 static int wlcore_roc_completed(struct wl1271
*wl
)
5632 wl1271_debug(DEBUG_MAC80211
, "roc complete");
5634 mutex_lock(&wl
->mutex
);
5636 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5641 ret
= wl1271_ps_elp_wakeup(wl
);
5645 ret
= __wlcore_roc_completed(wl
);
5647 wl1271_ps_elp_sleep(wl
);
5649 mutex_unlock(&wl
->mutex
);
5654 static void wlcore_roc_complete_work(struct work_struct
*work
)
5656 struct delayed_work
*dwork
;
5660 dwork
= to_delayed_work(work
);
5661 wl
= container_of(dwork
, struct wl1271
, roc_complete_work
);
5663 ret
= wlcore_roc_completed(wl
);
5665 ieee80211_remain_on_channel_expired(wl
->hw
);
5668 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
5670 struct wl1271
*wl
= hw
->priv
;
5672 wl1271_debug(DEBUG_MAC80211
, "mac80211 croc");
5675 wl1271_tx_flush(wl
);
5678 * we can't just flush_work here, because it might deadlock
5679 * (as we might get called from the same workqueue)
5681 cancel_delayed_work_sync(&wl
->roc_complete_work
);
5682 wlcore_roc_completed(wl
);
5687 static void wlcore_op_sta_rc_update(struct ieee80211_hw
*hw
,
5688 struct ieee80211_vif
*vif
,
5689 struct ieee80211_sta
*sta
,
5692 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5694 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta_rc_update");
5696 if (!(changed
& IEEE80211_RC_BW_CHANGED
))
5699 /* this callback is atomic, so schedule a new work */
5700 wlvif
->rc_update_bw
= sta
->bandwidth
;
5701 memcpy(&wlvif
->rc_ht_cap
, &sta
->ht_cap
, sizeof(sta
->ht_cap
));
5702 ieee80211_queue_work(hw
, &wlvif
->rc_update_work
);
5705 static void wlcore_op_sta_statistics(struct ieee80211_hw
*hw
,
5706 struct ieee80211_vif
*vif
,
5707 struct ieee80211_sta
*sta
,
5708 struct station_info
*sinfo
)
5710 struct wl1271
*wl
= hw
->priv
;
5711 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5715 wl1271_debug(DEBUG_MAC80211
, "mac80211 get_rssi");
5717 mutex_lock(&wl
->mutex
);
5719 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5722 ret
= wl1271_ps_elp_wakeup(wl
);
5726 ret
= wlcore_acx_average_rssi(wl
, wlvif
, &rssi_dbm
);
5730 sinfo
->filled
|= BIT(NL80211_STA_INFO_SIGNAL
);
5731 sinfo
->signal
= rssi_dbm
;
5734 wl1271_ps_elp_sleep(wl
);
5737 mutex_unlock(&wl
->mutex
);
5740 static u32
wlcore_op_get_expected_throughput(struct ieee80211_hw
*hw
,
5741 struct ieee80211_sta
*sta
)
5743 struct wl1271_station
*wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
5744 struct wl1271
*wl
= hw
->priv
;
5745 u8 hlid
= wl_sta
->hlid
;
5747 /* return in units of Kbps */
5748 return (wl
->links
[hlid
].fw_rate_mbps
* 1000);
5751 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
5753 struct wl1271
*wl
= hw
->priv
;
5756 mutex_lock(&wl
->mutex
);
5758 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5761 /* packets are considered pending if in the TX queue or the FW */
5762 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
5764 mutex_unlock(&wl
->mutex
);
5769 /* can't be const, mac80211 writes to this */
5770 static struct ieee80211_rate wl1271_rates
[] = {
5772 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
5773 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
5775 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
5776 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
5777 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5779 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
5780 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
5781 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5783 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
5784 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
5785 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5787 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5788 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5790 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5791 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5793 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5794 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5796 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5797 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5799 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5800 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5802 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5803 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5805 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5806 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5808 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5809 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5812 /* can't be const, mac80211 writes to this */
5813 static struct ieee80211_channel wl1271_channels
[] = {
5814 { .hw_value
= 1, .center_freq
= 2412, .max_power
= WLCORE_MAX_TXPWR
},
5815 { .hw_value
= 2, .center_freq
= 2417, .max_power
= WLCORE_MAX_TXPWR
},
5816 { .hw_value
= 3, .center_freq
= 2422, .max_power
= WLCORE_MAX_TXPWR
},
5817 { .hw_value
= 4, .center_freq
= 2427, .max_power
= WLCORE_MAX_TXPWR
},
5818 { .hw_value
= 5, .center_freq
= 2432, .max_power
= WLCORE_MAX_TXPWR
},
5819 { .hw_value
= 6, .center_freq
= 2437, .max_power
= WLCORE_MAX_TXPWR
},
5820 { .hw_value
= 7, .center_freq
= 2442, .max_power
= WLCORE_MAX_TXPWR
},
5821 { .hw_value
= 8, .center_freq
= 2447, .max_power
= WLCORE_MAX_TXPWR
},
5822 { .hw_value
= 9, .center_freq
= 2452, .max_power
= WLCORE_MAX_TXPWR
},
5823 { .hw_value
= 10, .center_freq
= 2457, .max_power
= WLCORE_MAX_TXPWR
},
5824 { .hw_value
= 11, .center_freq
= 2462, .max_power
= WLCORE_MAX_TXPWR
},
5825 { .hw_value
= 12, .center_freq
= 2467, .max_power
= WLCORE_MAX_TXPWR
},
5826 { .hw_value
= 13, .center_freq
= 2472, .max_power
= WLCORE_MAX_TXPWR
},
5827 { .hw_value
= 14, .center_freq
= 2484, .max_power
= WLCORE_MAX_TXPWR
},
5830 /* can't be const, mac80211 writes to this */
5831 static struct ieee80211_supported_band wl1271_band_2ghz
= {
5832 .channels
= wl1271_channels
,
5833 .n_channels
= ARRAY_SIZE(wl1271_channels
),
5834 .bitrates
= wl1271_rates
,
5835 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
5838 /* 5 GHz data rates for WL1273 */
5839 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
5841 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5842 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5844 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5845 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5847 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5848 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5850 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5851 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5853 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5854 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5856 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5857 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5859 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5860 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5862 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5863 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5866 /* 5 GHz band channels for WL1273 */
5867 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
5868 { .hw_value
= 8, .center_freq
= 5040, .max_power
= WLCORE_MAX_TXPWR
},
5869 { .hw_value
= 12, .center_freq
= 5060, .max_power
= WLCORE_MAX_TXPWR
},
5870 { .hw_value
= 16, .center_freq
= 5080, .max_power
= WLCORE_MAX_TXPWR
},
5871 { .hw_value
= 34, .center_freq
= 5170, .max_power
= WLCORE_MAX_TXPWR
},
5872 { .hw_value
= 36, .center_freq
= 5180, .max_power
= WLCORE_MAX_TXPWR
},
5873 { .hw_value
= 38, .center_freq
= 5190, .max_power
= WLCORE_MAX_TXPWR
},
5874 { .hw_value
= 40, .center_freq
= 5200, .max_power
= WLCORE_MAX_TXPWR
},
5875 { .hw_value
= 42, .center_freq
= 5210, .max_power
= WLCORE_MAX_TXPWR
},
5876 { .hw_value
= 44, .center_freq
= 5220, .max_power
= WLCORE_MAX_TXPWR
},
5877 { .hw_value
= 46, .center_freq
= 5230, .max_power
= WLCORE_MAX_TXPWR
},
5878 { .hw_value
= 48, .center_freq
= 5240, .max_power
= WLCORE_MAX_TXPWR
},
5879 { .hw_value
= 52, .center_freq
= 5260, .max_power
= WLCORE_MAX_TXPWR
},
5880 { .hw_value
= 56, .center_freq
= 5280, .max_power
= WLCORE_MAX_TXPWR
},
5881 { .hw_value
= 60, .center_freq
= 5300, .max_power
= WLCORE_MAX_TXPWR
},
5882 { .hw_value
= 64, .center_freq
= 5320, .max_power
= WLCORE_MAX_TXPWR
},
5883 { .hw_value
= 100, .center_freq
= 5500, .max_power
= WLCORE_MAX_TXPWR
},
5884 { .hw_value
= 104, .center_freq
= 5520, .max_power
= WLCORE_MAX_TXPWR
},
5885 { .hw_value
= 108, .center_freq
= 5540, .max_power
= WLCORE_MAX_TXPWR
},
5886 { .hw_value
= 112, .center_freq
= 5560, .max_power
= WLCORE_MAX_TXPWR
},
5887 { .hw_value
= 116, .center_freq
= 5580, .max_power
= WLCORE_MAX_TXPWR
},
5888 { .hw_value
= 120, .center_freq
= 5600, .max_power
= WLCORE_MAX_TXPWR
},
5889 { .hw_value
= 124, .center_freq
= 5620, .max_power
= WLCORE_MAX_TXPWR
},
5890 { .hw_value
= 128, .center_freq
= 5640, .max_power
= WLCORE_MAX_TXPWR
},
5891 { .hw_value
= 132, .center_freq
= 5660, .max_power
= WLCORE_MAX_TXPWR
},
5892 { .hw_value
= 136, .center_freq
= 5680, .max_power
= WLCORE_MAX_TXPWR
},
5893 { .hw_value
= 140, .center_freq
= 5700, .max_power
= WLCORE_MAX_TXPWR
},
5894 { .hw_value
= 149, .center_freq
= 5745, .max_power
= WLCORE_MAX_TXPWR
},
5895 { .hw_value
= 153, .center_freq
= 5765, .max_power
= WLCORE_MAX_TXPWR
},
5896 { .hw_value
= 157, .center_freq
= 5785, .max_power
= WLCORE_MAX_TXPWR
},
5897 { .hw_value
= 161, .center_freq
= 5805, .max_power
= WLCORE_MAX_TXPWR
},
5898 { .hw_value
= 165, .center_freq
= 5825, .max_power
= WLCORE_MAX_TXPWR
},
5901 static struct ieee80211_supported_band wl1271_band_5ghz
= {
5902 .channels
= wl1271_channels_5ghz
,
5903 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
5904 .bitrates
= wl1271_rates_5ghz
,
5905 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
5908 static const struct ieee80211_ops wl1271_ops
= {
5909 .start
= wl1271_op_start
,
5910 .stop
= wlcore_op_stop
,
5911 .add_interface
= wl1271_op_add_interface
,
5912 .remove_interface
= wl1271_op_remove_interface
,
5913 .change_interface
= wl12xx_op_change_interface
,
5915 .suspend
= wl1271_op_suspend
,
5916 .resume
= wl1271_op_resume
,
5918 .config
= wl1271_op_config
,
5919 .prepare_multicast
= wl1271_op_prepare_multicast
,
5920 .configure_filter
= wl1271_op_configure_filter
,
5922 .set_key
= wlcore_op_set_key
,
5923 .hw_scan
= wl1271_op_hw_scan
,
5924 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
5925 .sched_scan_start
= wl1271_op_sched_scan_start
,
5926 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
5927 .bss_info_changed
= wl1271_op_bss_info_changed
,
5928 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
5929 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
5930 .conf_tx
= wl1271_op_conf_tx
,
5931 .get_tsf
= wl1271_op_get_tsf
,
5932 .get_survey
= wl1271_op_get_survey
,
5933 .sta_state
= wl12xx_op_sta_state
,
5934 .ampdu_action
= wl1271_op_ampdu_action
,
5935 .tx_frames_pending
= wl1271_tx_frames_pending
,
5936 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
5937 .set_default_unicast_key
= wl1271_op_set_default_key_idx
,
5938 .channel_switch
= wl12xx_op_channel_switch
,
5939 .channel_switch_beacon
= wlcore_op_channel_switch_beacon
,
5940 .flush
= wlcore_op_flush
,
5941 .remain_on_channel
= wlcore_op_remain_on_channel
,
5942 .cancel_remain_on_channel
= wlcore_op_cancel_remain_on_channel
,
5943 .add_chanctx
= wlcore_op_add_chanctx
,
5944 .remove_chanctx
= wlcore_op_remove_chanctx
,
5945 .change_chanctx
= wlcore_op_change_chanctx
,
5946 .assign_vif_chanctx
= wlcore_op_assign_vif_chanctx
,
5947 .unassign_vif_chanctx
= wlcore_op_unassign_vif_chanctx
,
5948 .switch_vif_chanctx
= wlcore_op_switch_vif_chanctx
,
5949 .sta_rc_update
= wlcore_op_sta_rc_update
,
5950 .sta_statistics
= wlcore_op_sta_statistics
,
5951 .get_expected_throughput
= wlcore_op_get_expected_throughput
,
5952 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
5956 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum nl80211_band band
)
5962 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
5963 wl1271_error("Illegal RX rate from HW: %d", rate
);
5967 idx
= wl
->band_rate_to_idx
[band
][rate
];
5968 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
5969 wl1271_error("Unsupported RX rate from HW: %d", rate
);
5976 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
, u32 oui
, u32 nic
)
5980 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x",
5983 if (nic
+ WLCORE_NUM_MAC_ADDRESSES
- wl
->num_mac_addr
> 0xffffff)
5984 wl1271_warning("NIC part of the MAC address wraps around!");
5986 for (i
= 0; i
< wl
->num_mac_addr
; i
++) {
5987 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5988 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5989 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5990 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5991 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5992 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5996 /* we may be one address short at the most */
5997 WARN_ON(wl
->num_mac_addr
+ 1 < WLCORE_NUM_MAC_ADDRESSES
);
6000 * turn on the LAA bit in the first address and use it as
6003 if (wl
->num_mac_addr
< WLCORE_NUM_MAC_ADDRESSES
) {
6004 int idx
= WLCORE_NUM_MAC_ADDRESSES
- 1;
6005 memcpy(&wl
->addresses
[idx
], &wl
->addresses
[0],
6006 sizeof(wl
->addresses
[0]));
6008 wl
->addresses
[idx
].addr
[0] |= BIT(1);
6011 wl
->hw
->wiphy
->n_addresses
= WLCORE_NUM_MAC_ADDRESSES
;
6012 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
6015 static int wl12xx_get_hw_info(struct wl1271
*wl
)
6019 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
6023 wl
->fuse_oui_addr
= 0;
6024 wl
->fuse_nic_addr
= 0;
6026 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
6030 if (wl
->ops
->get_mac
)
6031 ret
= wl
->ops
->get_mac(wl
);
6037 static int wl1271_register_hw(struct wl1271
*wl
)
6040 u32 oui_addr
= 0, nic_addr
= 0;
6041 struct platform_device
*pdev
= wl
->pdev
;
6042 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6044 if (wl
->mac80211_registered
)
6047 if (wl
->nvs_len
>= 12) {
6048 /* NOTE: The wl->nvs->nvs element must be first, in
6049 * order to simplify the casting, we assume it is at
6050 * the beginning of the wl->nvs structure.
6052 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
6055 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
6057 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
6060 /* if the MAC address is zeroed in the NVS derive from fuse */
6061 if (oui_addr
== 0 && nic_addr
== 0) {
6062 oui_addr
= wl
->fuse_oui_addr
;
6063 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6064 nic_addr
= wl
->fuse_nic_addr
+ 1;
6067 if (oui_addr
== 0xdeadbe && nic_addr
== 0xef0000) {
6068 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
6069 if (!strcmp(pdev_data
->family
->name
, "wl18xx")) {
6070 wl1271_warning("This default nvs file can be removed from the file system\n");
6072 wl1271_warning("Your device performance is not optimized.\n");
6073 wl1271_warning("Please use the calibrator tool to configure your device.\n");
6076 if (wl
->fuse_oui_addr
== 0 && wl
->fuse_nic_addr
== 0) {
6077 wl1271_warning("Fuse mac address is zero. using random mac\n");
6078 /* Use TI oui and a random nic */
6079 oui_addr
= WLCORE_TI_OUI_ADDRESS
;
6080 nic_addr
= get_random_int();
6082 oui_addr
= wl
->fuse_oui_addr
;
6083 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6084 nic_addr
= wl
->fuse_nic_addr
+ 1;
6088 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
);
6090 ret
= ieee80211_register_hw(wl
->hw
);
6092 wl1271_error("unable to register mac80211 hw: %d", ret
);
6096 wl
->mac80211_registered
= true;
6098 wl1271_debugfs_init(wl
);
6100 wl1271_notice("loaded");
6106 static void wl1271_unregister_hw(struct wl1271
*wl
)
6109 wl1271_plt_stop(wl
);
6111 ieee80211_unregister_hw(wl
->hw
);
6112 wl
->mac80211_registered
= false;
6116 static int wl1271_init_ieee80211(struct wl1271
*wl
)
6119 static const u32 cipher_suites
[] = {
6120 WLAN_CIPHER_SUITE_WEP40
,
6121 WLAN_CIPHER_SUITE_WEP104
,
6122 WLAN_CIPHER_SUITE_TKIP
,
6123 WLAN_CIPHER_SUITE_CCMP
,
6124 WL1271_CIPHER_SUITE_GEM
,
6127 /* The tx descriptor buffer */
6128 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
6130 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
6131 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
6134 /* FIXME: find a proper value */
6135 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
6137 ieee80211_hw_set(wl
->hw
, SUPPORT_FAST_XMIT
);
6138 ieee80211_hw_set(wl
->hw
, CHANCTX_STA_CSA
);
6139 ieee80211_hw_set(wl
->hw
, QUEUE_CONTROL
);
6140 ieee80211_hw_set(wl
->hw
, TX_AMPDU_SETUP_IN_HW
);
6141 ieee80211_hw_set(wl
->hw
, AMPDU_AGGREGATION
);
6142 ieee80211_hw_set(wl
->hw
, AP_LINK_PS
);
6143 ieee80211_hw_set(wl
->hw
, SPECTRUM_MGMT
);
6144 ieee80211_hw_set(wl
->hw
, REPORTS_TX_ACK_STATUS
);
6145 ieee80211_hw_set(wl
->hw
, CONNECTION_MONITOR
);
6146 ieee80211_hw_set(wl
->hw
, HAS_RATE_CONTROL
);
6147 ieee80211_hw_set(wl
->hw
, SUPPORTS_DYNAMIC_PS
);
6148 ieee80211_hw_set(wl
->hw
, SIGNAL_DBM
);
6149 ieee80211_hw_set(wl
->hw
, SUPPORTS_PS
);
6150 ieee80211_hw_set(wl
->hw
, SUPPORTS_TX_FRAG
);
6152 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
6153 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
6155 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
6156 BIT(NL80211_IFTYPE_AP
) |
6157 BIT(NL80211_IFTYPE_P2P_DEVICE
) |
6158 BIT(NL80211_IFTYPE_P2P_CLIENT
) |
6159 #ifdef CONFIG_MAC80211_MESH
6160 BIT(NL80211_IFTYPE_MESH_POINT
) |
6162 BIT(NL80211_IFTYPE_P2P_GO
);
6164 wl
->hw
->wiphy
->max_scan_ssids
= 1;
6165 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
6166 wl
->hw
->wiphy
->max_match_sets
= 16;
6168 * Maximum length of elements in scanning probe request templates
6169 * should be the maximum length possible for a template, without
6170 * the IEEE80211 header of the template
6172 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
6173 sizeof(struct ieee80211_header
);
6175 wl
->hw
->wiphy
->max_sched_scan_reqs
= 1;
6176 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
6177 sizeof(struct ieee80211_header
);
6179 wl
->hw
->wiphy
->max_remain_on_channel_duration
= 30000;
6181 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
6182 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
|
6183 WIPHY_FLAG_HAS_CHANNEL_SWITCH
;
6185 wl
->hw
->wiphy
->features
|= NL80211_FEATURE_AP_SCAN
;
6187 /* make sure all our channels fit in the scanned_ch bitmask */
6188 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
6189 ARRAY_SIZE(wl1271_channels_5ghz
) >
6190 WL1271_MAX_CHANNELS
);
6192 * clear channel flags from the previous usage
6193 * and restore max_power & max_antenna_gain values.
6195 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels
); i
++) {
6196 wl1271_band_2ghz
.channels
[i
].flags
= 0;
6197 wl1271_band_2ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
6198 wl1271_band_2ghz
.channels
[i
].max_antenna_gain
= 0;
6201 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels_5ghz
); i
++) {
6202 wl1271_band_5ghz
.channels
[i
].flags
= 0;
6203 wl1271_band_5ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
6204 wl1271_band_5ghz
.channels
[i
].max_antenna_gain
= 0;
6208 * We keep local copies of the band structs because we need to
6209 * modify them on a per-device basis.
6211 memcpy(&wl
->bands
[NL80211_BAND_2GHZ
], &wl1271_band_2ghz
,
6212 sizeof(wl1271_band_2ghz
));
6213 memcpy(&wl
->bands
[NL80211_BAND_2GHZ
].ht_cap
,
6214 &wl
->ht_cap
[NL80211_BAND_2GHZ
],
6215 sizeof(*wl
->ht_cap
));
6216 memcpy(&wl
->bands
[NL80211_BAND_5GHZ
], &wl1271_band_5ghz
,
6217 sizeof(wl1271_band_5ghz
));
6218 memcpy(&wl
->bands
[NL80211_BAND_5GHZ
].ht_cap
,
6219 &wl
->ht_cap
[NL80211_BAND_5GHZ
],
6220 sizeof(*wl
->ht_cap
));
6222 wl
->hw
->wiphy
->bands
[NL80211_BAND_2GHZ
] =
6223 &wl
->bands
[NL80211_BAND_2GHZ
];
6224 wl
->hw
->wiphy
->bands
[NL80211_BAND_5GHZ
] =
6225 &wl
->bands
[NL80211_BAND_5GHZ
];
6228 * allow 4 queues per mac address we support +
6229 * 1 cab queue per mac + one global offchannel Tx queue
6231 wl
->hw
->queues
= (NUM_TX_QUEUES
+ 1) * WLCORE_NUM_MAC_ADDRESSES
+ 1;
6233 /* the last queue is the offchannel queue */
6234 wl
->hw
->offchannel_tx_hw_queue
= wl
->hw
->queues
- 1;
6235 wl
->hw
->max_rates
= 1;
6237 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
6239 /* the FW answers probe-requests in AP-mode */
6240 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
6241 wl
->hw
->wiphy
->probe_resp_offload
=
6242 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
6243 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
6244 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
6246 /* allowed interface combinations */
6247 wl
->hw
->wiphy
->iface_combinations
= wl
->iface_combinations
;
6248 wl
->hw
->wiphy
->n_iface_combinations
= wl
->n_iface_combinations
;
6250 /* register vendor commands */
6251 wlcore_set_vendor_commands(wl
->hw
->wiphy
);
6253 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
6255 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
6256 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
6258 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
6263 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
, u32 aggr_buf_size
,
6266 struct ieee80211_hw
*hw
;
6271 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
6273 wl1271_error("could not alloc ieee80211_hw");
6279 memset(wl
, 0, sizeof(*wl
));
6281 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
6283 wl1271_error("could not alloc wl priv");
6285 goto err_priv_alloc
;
6288 INIT_LIST_HEAD(&wl
->wlvif_list
);
6293 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6294 * we don't allocate any additional resource here, so that's fine.
6296 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
6297 for (j
= 0; j
< WLCORE_MAX_LINKS
; j
++)
6298 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
6300 skb_queue_head_init(&wl
->deferred_rx_queue
);
6301 skb_queue_head_init(&wl
->deferred_tx_queue
);
6303 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
6304 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
6305 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
6306 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
6307 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
6308 INIT_DELAYED_WORK(&wl
->roc_complete_work
, wlcore_roc_complete_work
);
6309 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
6311 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
6312 if (!wl
->freezable_wq
) {
6319 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
6320 wl
->band
= NL80211_BAND_2GHZ
;
6321 wl
->channel_type
= NL80211_CHAN_NO_HT
;
6323 wl
->sg_enabled
= true;
6324 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
6325 wl
->recovery_count
= 0;
6328 wl
->ap_fw_ps_map
= 0;
6330 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
6331 wl
->active_sta_count
= 0;
6332 wl
->active_link_count
= 0;
6335 /* The system link is always allocated */
6336 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
6338 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
6339 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
6340 wl
->tx_frames
[i
] = NULL
;
6342 spin_lock_init(&wl
->wl_lock
);
6344 wl
->state
= WLCORE_STATE_OFF
;
6345 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
6346 mutex_init(&wl
->mutex
);
6347 mutex_init(&wl
->flush_mutex
);
6348 init_completion(&wl
->nvs_loading_complete
);
6350 order
= get_order(aggr_buf_size
);
6351 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
6352 if (!wl
->aggr_buf
) {
6356 wl
->aggr_buf_size
= aggr_buf_size
;
6358 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
6359 if (!wl
->dummy_packet
) {
6364 /* Allocate one page for the FW log */
6365 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
6368 goto err_dummy_packet
;
6371 wl
->mbox_size
= mbox_size
;
6372 wl
->mbox
= kmalloc(wl
->mbox_size
, GFP_KERNEL
| GFP_DMA
);
6378 wl
->buffer_32
= kmalloc(sizeof(*wl
->buffer_32
), GFP_KERNEL
);
6379 if (!wl
->buffer_32
) {
6390 free_page((unsigned long)wl
->fwlog
);
6393 dev_kfree_skb(wl
->dummy_packet
);
6396 free_pages((unsigned long)wl
->aggr_buf
, order
);
6399 destroy_workqueue(wl
->freezable_wq
);
6402 wl1271_debugfs_exit(wl
);
6406 ieee80211_free_hw(hw
);
6410 return ERR_PTR(ret
);
6412 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
6414 int wlcore_free_hw(struct wl1271
*wl
)
6416 /* Unblock any fwlog readers */
6417 mutex_lock(&wl
->mutex
);
6418 wl
->fwlog_size
= -1;
6419 mutex_unlock(&wl
->mutex
);
6421 wlcore_sysfs_free(wl
);
6423 kfree(wl
->buffer_32
);
6425 free_page((unsigned long)wl
->fwlog
);
6426 dev_kfree_skb(wl
->dummy_packet
);
6427 free_pages((unsigned long)wl
->aggr_buf
, get_order(wl
->aggr_buf_size
));
6429 wl1271_debugfs_exit(wl
);
6433 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
6437 kfree(wl
->raw_fw_status
);
6438 kfree(wl
->fw_status
);
6439 kfree(wl
->tx_res_if
);
6440 destroy_workqueue(wl
->freezable_wq
);
6443 ieee80211_free_hw(wl
->hw
);
6447 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
6450 static const struct wiphy_wowlan_support wlcore_wowlan_support
= {
6451 .flags
= WIPHY_WOWLAN_ANY
,
6452 .n_patterns
= WL1271_MAX_RX_FILTERS
,
6453 .pattern_min_len
= 1,
6454 .pattern_max_len
= WL1271_RX_FILTER_MAX_PATTERN_SIZE
,
6458 static irqreturn_t
wlcore_hardirq(int irq
, void *cookie
)
6460 return IRQ_WAKE_THREAD
;
6463 static void wlcore_nvs_cb(const struct firmware
*fw
, void *context
)
6465 struct wl1271
*wl
= context
;
6466 struct platform_device
*pdev
= wl
->pdev
;
6467 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6468 struct resource
*res
;
6471 irq_handler_t hardirq_fn
= NULL
;
6474 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
6476 wl1271_error("Could not allocate nvs data");
6479 wl
->nvs_len
= fw
->size
;
6480 } else if (pdev_data
->family
->nvs_name
) {
6481 wl1271_debug(DEBUG_BOOT
, "Could not get nvs file %s",
6482 pdev_data
->family
->nvs_name
);
6490 ret
= wl
->ops
->setup(wl
);
6494 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
6496 /* adjust some runtime configuration parameters */
6497 wlcore_adjust_conf(wl
);
6499 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
6501 wl1271_error("Could not get IRQ resource");
6505 wl
->irq
= res
->start
;
6506 wl
->irq_flags
= res
->flags
& IRQF_TRIGGER_MASK
;
6507 wl
->if_ops
= pdev_data
->if_ops
;
6509 if (wl
->irq_flags
& (IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
))
6510 hardirq_fn
= wlcore_hardirq
;
6512 wl
->irq_flags
|= IRQF_ONESHOT
;
6514 ret
= wl12xx_set_power_on(wl
);
6518 ret
= wl12xx_get_hw_info(wl
);
6520 wl1271_error("couldn't get hw info");
6521 wl1271_power_off(wl
);
6525 ret
= request_threaded_irq(wl
->irq
, hardirq_fn
, wlcore_irq
,
6526 wl
->irq_flags
, pdev
->name
, wl
);
6528 wl1271_error("interrupt configuration failed");
6529 wl1271_power_off(wl
);
6534 ret
= enable_irq_wake(wl
->irq
);
6536 wl
->irq_wake_enabled
= true;
6537 device_init_wakeup(wl
->dev
, 1);
6538 if (pdev_data
->pwr_in_suspend
)
6539 wl
->hw
->wiphy
->wowlan
= &wlcore_wowlan_support
;
6542 disable_irq(wl
->irq
);
6543 wl1271_power_off(wl
);
6545 ret
= wl
->ops
->identify_chip(wl
);
6549 ret
= wl1271_init_ieee80211(wl
);
6553 ret
= wl1271_register_hw(wl
);
6557 ret
= wlcore_sysfs_init(wl
);
6561 wl
->initialized
= true;
6565 wl1271_unregister_hw(wl
);
6568 free_irq(wl
->irq
, wl
);
6574 release_firmware(fw
);
6575 complete_all(&wl
->nvs_loading_complete
);
6578 int wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
6580 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6581 const char *nvs_name
;
6584 if (!wl
->ops
|| !wl
->ptable
|| !pdev_data
)
6587 wl
->dev
= &pdev
->dev
;
6589 platform_set_drvdata(pdev
, wl
);
6591 if (pdev_data
->family
&& pdev_data
->family
->nvs_name
) {
6592 nvs_name
= pdev_data
->family
->nvs_name
;
6593 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
6594 nvs_name
, &pdev
->dev
, GFP_KERNEL
,
6597 wl1271_error("request_firmware_nowait failed for %s: %d",
6599 complete_all(&wl
->nvs_loading_complete
);
6602 wlcore_nvs_cb(NULL
, wl
);
6607 EXPORT_SYMBOL_GPL(wlcore_probe
);
6609 int wlcore_remove(struct platform_device
*pdev
)
6611 struct wlcore_platdev_data
*pdev_data
= dev_get_platdata(&pdev
->dev
);
6612 struct wl1271
*wl
= platform_get_drvdata(pdev
);
6614 if (pdev_data
->family
&& pdev_data
->family
->nvs_name
)
6615 wait_for_completion(&wl
->nvs_loading_complete
);
6616 if (!wl
->initialized
)
6619 if (wl
->irq_wake_enabled
) {
6620 device_init_wakeup(wl
->dev
, 0);
6621 disable_irq_wake(wl
->irq
);
6623 wl1271_unregister_hw(wl
);
6624 free_irq(wl
->irq
, wl
);
6629 EXPORT_SYMBOL_GPL(wlcore_remove
);
6631 u32 wl12xx_debug_level
= DEBUG_NONE
;
6632 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
6633 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
6634 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
6636 module_param_named(fwlog
, fwlog_param
, charp
, 0);
6637 MODULE_PARM_DESC(fwlog
,
6638 "FW logger options: continuous, dbgpins or disable");
6640 module_param(fwlog_mem_blocks
, int, S_IRUSR
| S_IWUSR
);
6641 MODULE_PARM_DESC(fwlog_mem_blocks
, "fwlog mem_blocks");
6643 module_param(bug_on_recovery
, int, S_IRUSR
| S_IWUSR
);
6644 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
6646 module_param(no_recovery
, int, S_IRUSR
| S_IWUSR
);
6647 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
6649 MODULE_LICENSE("GPL");
6650 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6651 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");