3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static bool bug_on_recovery
;
60 static bool no_recovery
;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wlcore_op_stop_locked(struct wl1271
*wl
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
->sta
.hlid
);
86 wl12xx_croc(wl
, wlvif
->role_id
);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy
*wiphy
,
93 struct regulatory_request
*request
)
95 struct ieee80211_supported_band
*band
;
96 struct ieee80211_channel
*ch
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
114 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
119 /* we should hold wl->mutex */
120 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
139 int period
= wl
->conf
.rx_streaming
.interval
;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
148 (wl
->conf
.rx_streaming
.always
||
149 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
150 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
152 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif
->rx_streaming_timer
);
160 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
163 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
164 rx_streaming_enable_work
);
165 struct wl1271
*wl
= wlvif
->wl
;
167 mutex_lock(&wl
->mutex
);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
171 (!wl
->conf
.rx_streaming
.always
&&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
175 if (!wl
->conf
.rx_streaming
.interval
)
178 ret
= wl1271_ps_elp_wakeup(wl
);
182 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif
->rx_streaming_timer
,
188 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
191 wl1271_ps_elp_sleep(wl
);
193 mutex_unlock(&wl
->mutex
);
196 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
199 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
200 rx_streaming_disable_work
);
201 struct wl1271
*wl
= wlvif
->wl
;
203 mutex_lock(&wl
->mutex
);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
208 ret
= wl1271_ps_elp_wakeup(wl
);
212 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
217 wl1271_ps_elp_sleep(wl
);
219 mutex_unlock(&wl
->mutex
);
222 static void wl1271_rx_streaming_timer(unsigned long data
)
224 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
225 struct wl1271
*wl
= wlvif
->wl
;
226 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl
->tx_allocated_blocks
== 0)
236 cancel_delayed_work(&wl
->tx_watchdog_work
);
237 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
238 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
241 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
243 struct delayed_work
*dwork
;
246 dwork
= container_of(work
, struct delayed_work
, work
);
247 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
249 mutex_lock(&wl
->mutex
);
251 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl
->tx_allocated_blocks
== 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
263 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
264 wl
->conf
.tx
.tx_watchdog_timeout
);
265 wl12xx_rearm_tx_watchdog_locked(wl
);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl
->active_sta_count
) {
287 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
289 wl
->conf
.tx
.tx_watchdog_timeout
,
290 wl
->active_sta_count
);
291 wl12xx_rearm_tx_watchdog_locked(wl
);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl
->conf
.tx
.tx_watchdog_timeout
);
297 wl12xx_queue_recovery_work(wl
);
300 mutex_unlock(&wl
->mutex
);
303 static void wlcore_adjust_conf(struct wl1271
*wl
)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
324 struct wl12xx_vif
*wlvif
,
327 bool fw_ps
, single_sta
;
329 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
330 single_sta
= (wl
->active_sta_count
== 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
337 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
345 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
348 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
349 struct wl12xx_vif
*wlvif
,
350 struct wl_fw_status_2
*status
)
352 struct wl1271_link
*lnk
;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
359 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
360 wl1271_debug(DEBUG_PSM
,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
363 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
365 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
368 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
) {
369 lnk
= &wl
->links
[hlid
];
370 cnt
= status
->counters
.tx_lnk_free_pkts
[hlid
] -
371 lnk
->prev_freed_pkts
;
373 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[hlid
];
374 lnk
->allocated_pkts
-= cnt
;
376 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
377 lnk
->allocated_pkts
);
381 static int wlcore_fw_status(struct wl1271
*wl
,
382 struct wl_fw_status_1
*status_1
,
383 struct wl_fw_status_2
*status_2
)
385 struct wl12xx_vif
*wlvif
;
387 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
388 int avail
, freed_blocks
;
393 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
394 sizeof(*status_2
) + wl
->fw_status_priv_len
;
396 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
401 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1
->fw_rx_counter
,
405 status_1
->drv_rx_counter
,
406 status_1
->tx_results_counter
);
408 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl
->tx_allocated_pkts
[i
] -=
411 (status_2
->counters
.tx_released_pkts
[i
] -
412 wl
->tx_pkts_freed
[i
]) & 0xff;
414 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
417 /* prevent wrap-around in total blocks counter */
418 if (likely(wl
->tx_blocks_freed
<=
419 le32_to_cpu(status_2
->total_released_blks
)))
420 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
423 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
424 le32_to_cpu(status_2
->total_released_blks
);
426 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
428 wl
->tx_allocated_blocks
-= freed_blocks
;
431 * If the FW freed some blocks:
432 * If we still have allocated blocks - re-arm the timer, Tx is
433 * not stuck. Otherwise, cancel the timer (no Tx currently).
436 if (wl
->tx_allocated_blocks
)
437 wl12xx_rearm_tx_watchdog_locked(wl
);
439 cancel_delayed_work(&wl
->tx_watchdog_work
);
442 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
445 * The FW might change the total number of TX memblocks before
446 * we get a notification about blocks being released. Thus, the
447 * available blocks calculation might yield a temporary result
448 * which is lower than the actual available blocks. Keeping in
449 * mind that only blocks that were allocated can be moved from
450 * TX to RX, tx_blocks_available should never decrease here.
452 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
455 /* if more blocks are available now, tx work can be scheduled */
456 if (wl
->tx_blocks_available
> old_tx_blk_count
)
457 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
459 /* for AP update num of allocated TX blocks per link and ps status */
460 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
461 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
464 /* update the host-chipset time offset */
466 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
467 (s64
)le32_to_cpu(status_2
->fw_localtime
);
472 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
476 /* Pass all received frames to the network stack */
477 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
478 ieee80211_rx_ni(wl
->hw
, skb
);
480 /* Return sent skbs to the network stack */
481 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
482 ieee80211_tx_status_ni(wl
->hw
, skb
);
485 static void wl1271_netstack_work(struct work_struct
*work
)
488 container_of(work
, struct wl1271
, netstack_work
);
491 wl1271_flush_deferred_work(wl
);
492 } while (skb_queue_len(&wl
->deferred_rx_queue
));
495 #define WL1271_IRQ_MAX_LOOPS 256
497 static int wlcore_irq_locked(struct wl1271
*wl
)
501 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
503 unsigned int defer_count
;
507 * In case edge triggered interrupt must be used, we cannot iterate
508 * more than once without introducing race conditions with the hardirq.
510 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
513 wl1271_debug(DEBUG_IRQ
, "IRQ work");
515 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
518 ret
= wl1271_ps_elp_wakeup(wl
);
522 while (!done
&& loopcount
--) {
524 * In order to avoid a race with the hardirq, clear the flag
525 * before acknowledging the chip. Since the mutex is held,
526 * wl1271_ps_elp_wakeup cannot be called concurrently.
528 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
529 smp_mb__after_clear_bit();
531 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
535 wlcore_hw_tx_immediate_compl(wl
);
537 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
538 intr
&= WLCORE_ALL_INTR_MASK
;
544 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
545 wl1271_error("HW watchdog interrupt received! starting recovery.");
546 wl
->watchdog_recovery
= true;
549 /* restarting the chip. ignore any other interrupt. */
553 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
554 wl1271_error("SW watchdog interrupt received! "
555 "starting recovery.");
556 wl
->watchdog_recovery
= true;
559 /* restarting the chip. ignore any other interrupt. */
563 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
564 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
566 ret
= wlcore_rx(wl
, wl
->fw_status_1
);
570 /* Check if any tx blocks were freed */
571 spin_lock_irqsave(&wl
->wl_lock
, flags
);
572 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
573 wl1271_tx_total_queue_count(wl
) > 0) {
574 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
576 * In order to avoid starvation of the TX path,
577 * call the work function directly.
579 ret
= wlcore_tx_work_locked(wl
);
583 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
586 /* check for tx results */
587 ret
= wlcore_hw_tx_delayed_compl(wl
);
591 /* Make sure the deferred queues don't get too long */
592 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
593 skb_queue_len(&wl
->deferred_rx_queue
);
594 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
595 wl1271_flush_deferred_work(wl
);
598 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
599 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
600 ret
= wl1271_event_handle(wl
, 0);
605 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
606 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
607 ret
= wl1271_event_handle(wl
, 1);
612 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
613 wl1271_debug(DEBUG_IRQ
,
614 "WL1271_ACX_INTR_INIT_COMPLETE");
616 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
617 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
620 wl1271_ps_elp_sleep(wl
);
626 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
630 struct wl1271
*wl
= cookie
;
632 /* TX might be handled here, avoid redundant work */
633 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
634 cancel_work_sync(&wl
->tx_work
);
636 mutex_lock(&wl
->mutex
);
638 ret
= wlcore_irq_locked(wl
);
640 wl12xx_queue_recovery_work(wl
);
642 spin_lock_irqsave(&wl
->wl_lock
, flags
);
643 /* In case TX was not handled here, queue TX work */
644 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
645 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
646 wl1271_tx_total_queue_count(wl
) > 0)
647 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
648 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
650 mutex_unlock(&wl
->mutex
);
655 struct vif_counter_data
{
658 struct ieee80211_vif
*cur_vif
;
659 bool cur_vif_running
;
662 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
663 struct ieee80211_vif
*vif
)
665 struct vif_counter_data
*counter
= data
;
668 if (counter
->cur_vif
== vif
)
669 counter
->cur_vif_running
= true;
672 /* caller must not hold wl->mutex, as it might deadlock */
673 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
674 struct ieee80211_vif
*cur_vif
,
675 struct vif_counter_data
*data
)
677 memset(data
, 0, sizeof(*data
));
678 data
->cur_vif
= cur_vif
;
680 ieee80211_iterate_active_interfaces(hw
,
681 wl12xx_vif_count_iter
, data
);
684 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
686 const struct firmware
*fw
;
688 enum wl12xx_fw_type fw_type
;
692 fw_type
= WL12XX_FW_TYPE_PLT
;
693 fw_name
= wl
->plt_fw_name
;
696 * we can't call wl12xx_get_vif_count() here because
697 * wl->mutex is taken, so use the cached last_vif_count value
699 if (wl
->last_vif_count
> 1) {
700 fw_type
= WL12XX_FW_TYPE_MULTI
;
701 fw_name
= wl
->mr_fw_name
;
703 fw_type
= WL12XX_FW_TYPE_NORMAL
;
704 fw_name
= wl
->sr_fw_name
;
708 if (wl
->fw_type
== fw_type
)
711 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
713 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
716 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
721 wl1271_error("firmware size is not multiple of 32 bits: %zu",
728 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
729 wl
->fw_len
= fw
->size
;
730 wl
->fw
= vmalloc(wl
->fw_len
);
733 wl1271_error("could not allocate memory for the firmware");
738 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
740 wl
->fw_type
= fw_type
;
742 release_firmware(fw
);
747 static void wl1271_fetch_nvs(struct wl1271
*wl
)
749 const struct firmware
*fw
;
752 ret
= request_firmware(&fw
, WL12XX_NVS_NAME
, wl
->dev
);
755 wl1271_debug(DEBUG_BOOT
, "could not get nvs file %s: %d",
756 WL12XX_NVS_NAME
, ret
);
760 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
763 wl1271_error("could not allocate memory for the nvs file");
767 wl
->nvs_len
= fw
->size
;
770 release_firmware(fw
);
773 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
775 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
777 /* Avoid a recursive recovery */
778 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
779 wlcore_disable_interrupts_nosync(wl
);
780 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
784 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
788 /* The FW log is a length-value list, find where the log end */
789 while (len
< maxlen
) {
790 if (memblock
[len
] == 0)
792 if (len
+ memblock
[len
] + 1 > maxlen
)
794 len
+= memblock
[len
] + 1;
797 /* Make sure we have enough room */
798 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
800 /* Fill the FW log file, consumed by the sysfs fwlog entry */
801 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
802 wl
->fwlog_size
+= len
;
807 #define WLCORE_FW_LOG_END 0x2000000
809 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
817 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
818 (wl
->conf
.fwlog
.mem_blocks
== 0))
821 wl1271_info("Reading FW panic log");
823 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
828 * Make sure the chip is awake and the logger isn't active.
829 * Do not send a stop fwlog command if the fw is hanged.
831 if (wl1271_ps_elp_wakeup(wl
))
833 if (!wl
->watchdog_recovery
)
834 wl12xx_cmd_stop_fwlog(wl
);
836 /* Read the first memory block address */
837 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
841 addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
845 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
846 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
847 end_of_log
= WLCORE_FW_LOG_END
;
849 offset
= sizeof(addr
);
853 /* Traverse the memory blocks linked list */
855 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
856 ret
= wlcore_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
862 * Memory blocks are linked to one another. The first 4 bytes
863 * of each memory block hold the hardware address of the next
864 * one. The last memory block points to the first one in
865 * on demand mode and is equal to 0x2000000 in continuous mode.
867 addr
= le32_to_cpup((__le32
*)block
);
868 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
869 WL12XX_HW_BLOCK_SIZE
- offset
))
871 } while (addr
&& (addr
!= end_of_log
));
873 wake_up_interruptible(&wl
->fwlog_waitq
);
879 static void wlcore_print_recovery(struct wl1271
*wl
)
885 wl1271_info("Hardware recovery in progress. FW ver: %s",
886 wl
->chip
.fw_ver_str
);
888 /* change partitions momentarily so we can read the FW pc */
889 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
893 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
897 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
901 wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc
, hint_sts
);
903 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
907 static void wl1271_recovery_work(struct work_struct
*work
)
910 container_of(work
, struct wl1271
, recovery_work
);
911 struct wl12xx_vif
*wlvif
;
912 struct ieee80211_vif
*vif
;
914 mutex_lock(&wl
->mutex
);
916 if (wl
->state
!= WL1271_STATE_ON
|| wl
->plt
)
919 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
920 wl12xx_read_fwlog_panic(wl
);
921 wlcore_print_recovery(wl
);
924 BUG_ON(bug_on_recovery
&&
925 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
928 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
933 * Advance security sequence number to overcome potential progress
934 * in the firmware during recovery. This doens't hurt if the network is
937 wl12xx_for_each_wlvif(wl
, wlvif
) {
938 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
939 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
940 wlvif
->tx_security_seq
+=
941 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
944 /* Prevent spurious TX during FW restart */
945 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
947 if (wl
->sched_scanning
) {
948 ieee80211_sched_scan_stopped(wl
->hw
);
949 wl
->sched_scanning
= false;
952 /* reboot the chipset */
953 while (!list_empty(&wl
->wlvif_list
)) {
954 wlvif
= list_first_entry(&wl
->wlvif_list
,
955 struct wl12xx_vif
, list
);
956 vif
= wl12xx_wlvif_to_vif(wlvif
);
957 __wl1271_op_remove_interface(wl
, vif
, false);
960 wlcore_op_stop_locked(wl
);
962 ieee80211_restart_hw(wl
->hw
);
965 * Its safe to enable TX now - the queues are stopped after a request
968 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
971 wl
->watchdog_recovery
= false;
972 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
973 mutex_unlock(&wl
->mutex
);
976 static int wlcore_fw_wakeup(struct wl1271
*wl
)
978 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
981 static int wl1271_setup(struct wl1271
*wl
)
983 wl
->fw_status_1
= kmalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
984 sizeof(*wl
->fw_status_2
) +
985 wl
->fw_status_priv_len
, GFP_KERNEL
);
986 if (!wl
->fw_status_1
)
989 wl
->fw_status_2
= (struct wl_fw_status_2
*)
990 (((u8
*) wl
->fw_status_1
) +
991 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
993 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
994 if (!wl
->tx_res_if
) {
995 kfree(wl
->fw_status_1
);
1002 static int wl12xx_set_power_on(struct wl1271
*wl
)
1006 msleep(WL1271_PRE_POWER_ON_SLEEP
);
1007 ret
= wl1271_power_on(wl
);
1010 msleep(WL1271_POWER_ON_SLEEP
);
1011 wl1271_io_reset(wl
);
1014 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1018 /* ELP module wake up */
1019 ret
= wlcore_fw_wakeup(wl
);
1027 wl1271_power_off(wl
);
1031 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1035 ret
= wl12xx_set_power_on(wl
);
1040 * For wl127x based devices we could use the default block
1041 * size (512 bytes), but due to a bug in the sdio driver, we
1042 * need to set it explicitly after the chip is powered on. To
1043 * simplify the code and since the performance impact is
1044 * negligible, we use the same block size for all different
1047 * Check if the bus supports blocksize alignment and, if it
1048 * doesn't, make sure we don't have the quirk.
1050 if (!wl1271_set_block_size(wl
))
1051 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1053 /* TODO: make sure the lower driver has set things up correctly */
1055 ret
= wl1271_setup(wl
);
1059 ret
= wl12xx_fetch_firmware(wl
, plt
);
1067 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1069 int retries
= WL1271_BOOT_RETRIES
;
1070 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1072 static const char* const PLT_MODE
[] = {
1080 mutex_lock(&wl
->mutex
);
1082 wl1271_notice("power up");
1084 if (wl
->state
!= WL1271_STATE_OFF
) {
1085 wl1271_error("cannot go into PLT state because not "
1086 "in off state: %d", wl
->state
);
1091 /* Indicate to lower levels that we are now in PLT mode */
1093 wl
->plt_mode
= plt_mode
;
1097 ret
= wl12xx_chip_wakeup(wl
, true);
1101 ret
= wl
->ops
->plt_init(wl
);
1105 wl
->state
= WL1271_STATE_ON
;
1106 wl1271_notice("firmware booted in PLT mode %s (%s)",
1108 wl
->chip
.fw_ver_str
);
1110 /* update hw/fw version info in wiphy struct */
1111 wiphy
->hw_version
= wl
->chip
.id
;
1112 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1113 sizeof(wiphy
->fw_version
));
1118 wl1271_power_off(wl
);
1122 wl
->plt_mode
= PLT_OFF
;
1124 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1125 WL1271_BOOT_RETRIES
);
1127 mutex_unlock(&wl
->mutex
);
1132 int wl1271_plt_stop(struct wl1271
*wl
)
1136 wl1271_notice("power down");
1139 * Interrupts must be disabled before setting the state to OFF.
1140 * Otherwise, the interrupt handler might be called and exit without
1141 * reading the interrupt status.
1143 wlcore_disable_interrupts(wl
);
1144 mutex_lock(&wl
->mutex
);
1146 mutex_unlock(&wl
->mutex
);
1149 * This will not necessarily enable interrupts as interrupts
1150 * may have been disabled when op_stop was called. It will,
1151 * however, balance the above call to disable_interrupts().
1153 wlcore_enable_interrupts(wl
);
1155 wl1271_error("cannot power down because not in PLT "
1156 "state: %d", wl
->state
);
1161 mutex_unlock(&wl
->mutex
);
1163 wl1271_flush_deferred_work(wl
);
1164 cancel_work_sync(&wl
->netstack_work
);
1165 cancel_work_sync(&wl
->recovery_work
);
1166 cancel_delayed_work_sync(&wl
->elp_work
);
1167 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1168 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1170 mutex_lock(&wl
->mutex
);
1171 wl1271_power_off(wl
);
1173 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1174 wl
->state
= WL1271_STATE_OFF
;
1176 wl
->plt_mode
= PLT_OFF
;
1178 mutex_unlock(&wl
->mutex
);
1184 static void wl1271_op_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1186 struct wl1271
*wl
= hw
->priv
;
1187 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1188 struct ieee80211_vif
*vif
= info
->control
.vif
;
1189 struct wl12xx_vif
*wlvif
= NULL
;
1190 unsigned long flags
;
1195 wlvif
= wl12xx_vif_to_data(vif
);
1197 mapping
= skb_get_queue_mapping(skb
);
1198 q
= wl1271_tx_get_queue(mapping
);
1200 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
);
1202 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1205 * drop the packet if the link is invalid or the queue is stopped
1206 * for any reason but watermark. Watermark is a "soft"-stop so we
1207 * allow these packets through.
1209 if (hlid
== WL12XX_INVALID_LINK_ID
||
1210 (wlvif
&& !test_bit(hlid
, wlvif
->links_map
)) ||
1211 (wlcore_is_queue_stopped(wl
, q
) &&
1212 !wlcore_is_queue_stopped_by_reason(wl
, q
,
1213 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1214 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1215 ieee80211_free_txskb(hw
, skb
);
1219 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1221 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1223 wl
->tx_queue_count
[q
]++;
1226 * The workqueue is slow to process the tx_queue and we need stop
1227 * the queue here, otherwise the queue will get too long.
1229 if (wl
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1230 !wlcore_is_queue_stopped_by_reason(wl
, q
,
1231 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1232 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1233 wlcore_stop_queue_locked(wl
, q
,
1234 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1238 * The chip specific setup must run before the first TX packet -
1239 * before that, the tx_work will not be initialized!
1242 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1243 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1244 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1247 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1250 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1252 unsigned long flags
;
1255 /* no need to queue a new dummy packet if one is already pending */
1256 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1259 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1261 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1262 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1263 wl
->tx_queue_count
[q
]++;
1264 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1266 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1267 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1268 return wlcore_tx_work_locked(wl
);
1271 * If the FW TX is busy, TX work will be scheduled by the threaded
1272 * interrupt handler function
1278 * The size of the dummy packet should be at least 1400 bytes. However, in
1279 * order to minimize the number of bus transactions, aligning it to 512 bytes
1280 * boundaries could be beneficial, performance wise
1282 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1284 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1286 struct sk_buff
*skb
;
1287 struct ieee80211_hdr_3addr
*hdr
;
1288 unsigned int dummy_packet_size
;
1290 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1291 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1293 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1295 wl1271_warning("Failed to allocate a dummy packet skb");
1299 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1301 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1302 memset(hdr
, 0, sizeof(*hdr
));
1303 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1304 IEEE80211_STYPE_NULLFUNC
|
1305 IEEE80211_FCTL_TODS
);
1307 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1309 /* Dummy packets require the TID to be management */
1310 skb
->priority
= WL1271_TID_MGMT
;
1312 /* Initialize all fields that might be used */
1313 skb_set_queue_mapping(skb
, 0);
1314 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1322 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1324 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1325 int i
, pattern_len
= 0;
1328 wl1271_warning("No mask in WoWLAN pattern");
1333 * The pattern is broken up into segments of bytes at different offsets
1334 * that need to be checked by the FW filter. Each segment is called
1335 * a field in the FW API. We verify that the total number of fields
1336 * required for this pattern won't exceed FW limits (8)
1337 * as well as the total fields buffer won't exceed the FW limit.
1338 * Note that if there's a pattern which crosses Ethernet/IP header
1339 * boundary a new field is required.
1341 for (i
= 0; i
< p
->pattern_len
; i
++) {
1342 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1347 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1349 fields_size
+= pattern_len
+
1350 RX_FILTER_FIELD_OVERHEAD
;
1358 fields_size
+= pattern_len
+
1359 RX_FILTER_FIELD_OVERHEAD
;
1366 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1370 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1371 wl1271_warning("RX Filter too complex. Too many segments");
1375 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1376 wl1271_warning("RX filter pattern is too big");
1383 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1385 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1388 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1395 for (i
= 0; i
< filter
->num_fields
; i
++)
1396 kfree(filter
->fields
[i
].pattern
);
1401 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1402 u16 offset
, u8 flags
,
1403 u8
*pattern
, u8 len
)
1405 struct wl12xx_rx_filter_field
*field
;
1407 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1408 wl1271_warning("Max fields per RX filter. can't alloc another");
1412 field
= &filter
->fields
[filter
->num_fields
];
1414 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1415 if (!field
->pattern
) {
1416 wl1271_warning("Failed to allocate RX filter pattern");
1420 filter
->num_fields
++;
1422 field
->offset
= cpu_to_le16(offset
);
1423 field
->flags
= flags
;
1425 memcpy(field
->pattern
, pattern
, len
);
1430 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1432 int i
, fields_size
= 0;
1434 for (i
= 0; i
< filter
->num_fields
; i
++)
1435 fields_size
+= filter
->fields
[i
].len
+
1436 sizeof(struct wl12xx_rx_filter_field
) -
1442 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1446 struct wl12xx_rx_filter_field
*field
;
1448 for (i
= 0; i
< filter
->num_fields
; i
++) {
1449 field
= (struct wl12xx_rx_filter_field
*)buf
;
1451 field
->offset
= filter
->fields
[i
].offset
;
1452 field
->flags
= filter
->fields
[i
].flags
;
1453 field
->len
= filter
->fields
[i
].len
;
1455 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1456 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1457 sizeof(u8
*) + field
->len
;
1462 * Allocates an RX filter returned through f
1463 * which needs to be freed using rx_filter_free()
1465 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1466 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1467 struct wl12xx_rx_filter
**f
)
1470 struct wl12xx_rx_filter
*filter
;
1474 filter
= wl1271_rx_filter_alloc();
1476 wl1271_warning("Failed to alloc rx filter");
1482 while (i
< p
->pattern_len
) {
1483 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1488 for (j
= i
; j
< p
->pattern_len
; j
++) {
1489 if (!test_bit(j
, (unsigned long *)p
->mask
))
1492 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1493 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1497 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1499 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1501 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1502 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1507 ret
= wl1271_rx_filter_alloc_field(filter
,
1510 &p
->pattern
[i
], len
);
1517 filter
->action
= FILTER_SIGNAL
;
1523 wl1271_rx_filter_free(filter
);
1529 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1530 struct cfg80211_wowlan
*wow
)
1534 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1535 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1540 ret
= wl1271_rx_filter_clear_all(wl
);
1547 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1550 /* Validate all incoming patterns before clearing current FW state */
1551 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1552 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1554 wl1271_warning("Bad wowlan pattern %d", i
);
1559 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1563 ret
= wl1271_rx_filter_clear_all(wl
);
1567 /* Translate WoWLAN patterns into filters */
1568 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1569 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1570 struct wl12xx_rx_filter
*filter
= NULL
;
1572 p
= &wow
->patterns
[i
];
1574 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1576 wl1271_warning("Failed to create an RX filter from "
1577 "wowlan pattern %d", i
);
1581 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1583 wl1271_rx_filter_free(filter
);
1588 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1594 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1595 struct wl12xx_vif
*wlvif
,
1596 struct cfg80211_wowlan
*wow
)
1600 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1603 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1604 wl
->conf
.conn
.wake_up_event
) &&
1605 (wl
->conf
.conn
.suspend_listen_interval
==
1606 wl
->conf
.conn
.listen_interval
))
1609 ret
= wl1271_ps_elp_wakeup(wl
);
1613 ret
= wl1271_configure_wowlan(wl
, wow
);
1617 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1618 wl
->conf
.conn
.suspend_wake_up_event
,
1619 wl
->conf
.conn
.suspend_listen_interval
);
1622 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1625 wl1271_ps_elp_sleep(wl
);
1631 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1632 struct wl12xx_vif
*wlvif
)
1636 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1639 ret
= wl1271_ps_elp_wakeup(wl
);
1643 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1645 wl1271_ps_elp_sleep(wl
);
1651 static int wl1271_configure_suspend(struct wl1271
*wl
,
1652 struct wl12xx_vif
*wlvif
,
1653 struct cfg80211_wowlan
*wow
)
1655 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1656 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1657 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1658 return wl1271_configure_suspend_ap(wl
, wlvif
);
1662 static void wl1271_configure_resume(struct wl1271
*wl
,
1663 struct wl12xx_vif
*wlvif
)
1666 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1667 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1669 if ((!is_ap
) && (!is_sta
))
1673 ((wl
->conf
.conn
.suspend_wake_up_event
==
1674 wl
->conf
.conn
.wake_up_event
) &&
1675 (wl
->conf
.conn
.suspend_listen_interval
==
1676 wl
->conf
.conn
.listen_interval
)))
1679 ret
= wl1271_ps_elp_wakeup(wl
);
1684 wl1271_configure_wowlan(wl
, NULL
);
1686 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1687 wl
->conf
.conn
.wake_up_event
,
1688 wl
->conf
.conn
.listen_interval
);
1691 wl1271_error("resume: wake up conditions failed: %d",
1695 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1698 wl1271_ps_elp_sleep(wl
);
1701 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1702 struct cfg80211_wowlan
*wow
)
1704 struct wl1271
*wl
= hw
->priv
;
1705 struct wl12xx_vif
*wlvif
;
1708 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1711 /* we want to perform the recovery before suspending */
1712 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1713 wl1271_warning("postponing suspend to perform recovery");
1717 wl1271_tx_flush(wl
);
1719 mutex_lock(&wl
->mutex
);
1720 wl
->wow_enabled
= true;
1721 wl12xx_for_each_wlvif(wl
, wlvif
) {
1722 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1724 mutex_unlock(&wl
->mutex
);
1725 wl1271_warning("couldn't prepare device to suspend");
1729 mutex_unlock(&wl
->mutex
);
1730 /* flush any remaining work */
1731 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1734 * disable and re-enable interrupts in order to flush
1737 wlcore_disable_interrupts(wl
);
1740 * set suspended flag to avoid triggering a new threaded_irq
1741 * work. no need for spinlock as interrupts are disabled.
1743 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1745 wlcore_enable_interrupts(wl
);
1746 flush_work(&wl
->tx_work
);
1747 flush_delayed_work(&wl
->elp_work
);
1752 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1754 struct wl1271
*wl
= hw
->priv
;
1755 struct wl12xx_vif
*wlvif
;
1756 unsigned long flags
;
1757 bool run_irq_work
= false, pending_recovery
;
1760 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1762 WARN_ON(!wl
->wow_enabled
);
1765 * re-enable irq_work enqueuing, and call irq_work directly if
1766 * there is a pending work.
1768 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1769 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1770 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1771 run_irq_work
= true;
1772 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1774 mutex_lock(&wl
->mutex
);
1776 /* test the recovery flag before calling any SDIO functions */
1777 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1781 wl1271_debug(DEBUG_MAC80211
,
1782 "run postponed irq_work directly");
1784 /* don't talk to the HW if recovery is pending */
1785 if (!pending_recovery
) {
1786 ret
= wlcore_irq_locked(wl
);
1788 wl12xx_queue_recovery_work(wl
);
1791 wlcore_enable_interrupts(wl
);
1794 if (pending_recovery
) {
1795 wl1271_warning("queuing forgotten recovery on resume");
1796 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1800 wl12xx_for_each_wlvif(wl
, wlvif
) {
1801 wl1271_configure_resume(wl
, wlvif
);
1805 wl
->wow_enabled
= false;
1806 mutex_unlock(&wl
->mutex
);
1812 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1814 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1817 * We have to delay the booting of the hardware because
1818 * we need to know the local MAC address before downloading and
1819 * initializing the firmware. The MAC address cannot be changed
1820 * after boot, and without the proper MAC address, the firmware
1821 * will not function properly.
1823 * The MAC address is first known when the corresponding interface
1824 * is added. That is where we will initialize the hardware.
1830 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1834 if (wl
->state
== WL1271_STATE_OFF
) {
1835 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1837 wlcore_enable_interrupts(wl
);
1843 * this must be before the cancel_work calls below, so that the work
1844 * functions don't perform further work.
1846 wl
->state
= WL1271_STATE_OFF
;
1849 * Use the nosync variant to disable interrupts, so the mutex could be
1850 * held while doing so without deadlocking.
1852 wlcore_disable_interrupts_nosync(wl
);
1854 mutex_unlock(&wl
->mutex
);
1856 wlcore_synchronize_interrupts(wl
);
1857 wl1271_flush_deferred_work(wl
);
1858 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1859 cancel_work_sync(&wl
->netstack_work
);
1860 cancel_work_sync(&wl
->tx_work
);
1861 cancel_delayed_work_sync(&wl
->elp_work
);
1862 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1863 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1865 /* let's notify MAC80211 about the remaining pending TX frames */
1866 wl12xx_tx_reset(wl
);
1867 mutex_lock(&wl
->mutex
);
1869 wl1271_power_off(wl
);
1871 * In case a recovery was scheduled, interrupts were disabled to avoid
1872 * an interrupt storm. Now that the power is down, it is safe to
1873 * re-enable interrupts to balance the disable depth
1875 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1876 wlcore_enable_interrupts(wl
);
1878 wl
->band
= IEEE80211_BAND_2GHZ
;
1881 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1882 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1883 wl
->tx_blocks_available
= 0;
1884 wl
->tx_allocated_blocks
= 0;
1885 wl
->tx_results_count
= 0;
1886 wl
->tx_packets_count
= 0;
1887 wl
->time_offset
= 0;
1888 wl
->ap_fw_ps_map
= 0;
1890 wl
->sched_scanning
= false;
1891 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1892 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1893 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1894 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1895 wl
->active_sta_count
= 0;
1897 /* The system link is always allocated */
1898 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1901 * this is performed after the cancel_work calls and the associated
1902 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1903 * get executed before all these vars have been reset.
1907 wl
->tx_blocks_freed
= 0;
1909 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1910 wl
->tx_pkts_freed
[i
] = 0;
1911 wl
->tx_allocated_pkts
[i
] = 0;
1914 wl1271_debugfs_reset(wl
);
1916 kfree(wl
->fw_status_1
);
1917 wl
->fw_status_1
= NULL
;
1918 wl
->fw_status_2
= NULL
;
1919 kfree(wl
->tx_res_if
);
1920 wl
->tx_res_if
= NULL
;
1921 kfree(wl
->target_mem_map
);
1922 wl
->target_mem_map
= NULL
;
1925 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
1927 struct wl1271
*wl
= hw
->priv
;
1929 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1931 mutex_lock(&wl
->mutex
);
1933 wlcore_op_stop_locked(wl
);
1935 mutex_unlock(&wl
->mutex
);
1938 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
1940 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
1941 WL12XX_MAX_RATE_POLICIES
);
1942 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
1945 __set_bit(policy
, wl
->rate_policies_map
);
1950 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
1952 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
1955 __clear_bit(*idx
, wl
->rate_policies_map
);
1956 *idx
= WL12XX_MAX_RATE_POLICIES
;
1959 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1961 switch (wlvif
->bss_type
) {
1962 case BSS_TYPE_AP_BSS
:
1964 return WL1271_ROLE_P2P_GO
;
1966 return WL1271_ROLE_AP
;
1968 case BSS_TYPE_STA_BSS
:
1970 return WL1271_ROLE_P2P_CL
;
1972 return WL1271_ROLE_STA
;
1975 return WL1271_ROLE_IBSS
;
1978 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
1980 return WL12XX_INVALID_ROLE_TYPE
;
1983 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
1985 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
1988 /* clear everything but the persistent data */
1989 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
1991 switch (ieee80211_vif_type_p2p(vif
)) {
1992 case NL80211_IFTYPE_P2P_CLIENT
:
1995 case NL80211_IFTYPE_STATION
:
1996 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
1998 case NL80211_IFTYPE_ADHOC
:
1999 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2001 case NL80211_IFTYPE_P2P_GO
:
2004 case NL80211_IFTYPE_AP
:
2005 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2008 wlvif
->bss_type
= MAX_BSS_TYPE
;
2012 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2013 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2014 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2016 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2017 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2018 /* init sta/ibss data */
2019 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2020 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2021 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2022 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2023 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2024 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2025 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2028 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2029 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2030 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2031 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2032 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2033 wl12xx_allocate_rate_policy(wl
,
2034 &wlvif
->ap
.ucast_rate_idx
[i
]);
2035 wlvif
->basic_rate_set
= CONF_TX_AP_ENABLED_RATES
;
2037 * TODO: check if basic_rate shouldn't be
2038 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2039 * instead (the same thing for STA above).
2041 wlvif
->basic_rate
= CONF_TX_AP_ENABLED_RATES
;
2042 /* TODO: this seems to be used only for STA, check it */
2043 wlvif
->rate_set
= CONF_TX_AP_ENABLED_RATES
;
2046 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2047 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2048 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2051 * mac80211 configures some values globally, while we treat them
2052 * per-interface. thus, on init, we have to copy them from wl
2054 wlvif
->band
= wl
->band
;
2055 wlvif
->channel
= wl
->channel
;
2056 wlvif
->power_level
= wl
->power_level
;
2057 wlvif
->channel_type
= wl
->channel_type
;
2059 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2060 wl1271_rx_streaming_enable_work
);
2061 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2062 wl1271_rx_streaming_disable_work
);
2063 INIT_LIST_HEAD(&wlvif
->list
);
2065 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2066 (unsigned long) wlvif
);
2070 static bool wl12xx_init_fw(struct wl1271
*wl
)
2072 int retries
= WL1271_BOOT_RETRIES
;
2073 bool booted
= false;
2074 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2079 ret
= wl12xx_chip_wakeup(wl
, false);
2083 ret
= wl
->ops
->boot(wl
);
2087 ret
= wl1271_hw_init(wl
);
2095 mutex_unlock(&wl
->mutex
);
2096 /* Unlocking the mutex in the middle of handling is
2097 inherently unsafe. In this case we deem it safe to do,
2098 because we need to let any possibly pending IRQ out of
2099 the system (and while we are WL1271_STATE_OFF the IRQ
2100 work function will not do anything.) Also, any other
2101 possible concurrent operations will fail due to the
2102 current state, hence the wl1271 struct should be safe. */
2103 wlcore_disable_interrupts(wl
);
2104 wl1271_flush_deferred_work(wl
);
2105 cancel_work_sync(&wl
->netstack_work
);
2106 mutex_lock(&wl
->mutex
);
2108 wl1271_power_off(wl
);
2112 wl1271_error("firmware boot failed despite %d retries",
2113 WL1271_BOOT_RETRIES
);
2117 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2119 /* update hw/fw version info in wiphy struct */
2120 wiphy
->hw_version
= wl
->chip
.id
;
2121 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2122 sizeof(wiphy
->fw_version
));
2125 * Now we know if 11a is supported (info from the NVS), so disable
2126 * 11a channels if not supported
2128 if (!wl
->enable_11a
)
2129 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2131 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2132 wl
->enable_11a
? "" : "not ");
2134 wl
->state
= WL1271_STATE_ON
;
2139 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2141 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2145 * Check whether a fw switch (i.e. moving from one loaded
2146 * fw to another) is needed. This function is also responsible
2147 * for updating wl->last_vif_count, so it must be called before
2148 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2151 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2152 struct vif_counter_data vif_counter_data
,
2155 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2156 u8 vif_count
= vif_counter_data
.counter
;
2158 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2161 /* increase the vif count if this is a new vif */
2162 if (add
&& !vif_counter_data
.cur_vif_running
)
2165 wl
->last_vif_count
= vif_count
;
2167 /* no need for fw change if the device is OFF */
2168 if (wl
->state
== WL1271_STATE_OFF
)
2171 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2173 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2180 * Enter "forced psm". Make sure the sta is in psm against the ap,
2181 * to make the fw switch a bit more disconnection-persistent.
2183 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2185 struct wl12xx_vif
*wlvif
;
2187 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2188 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2192 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2193 struct ieee80211_vif
*vif
)
2195 struct wl1271
*wl
= hw
->priv
;
2196 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2197 struct vif_counter_data vif_count
;
2200 bool booted
= false;
2202 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2203 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2205 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2206 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2208 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2210 mutex_lock(&wl
->mutex
);
2211 ret
= wl1271_ps_elp_wakeup(wl
);
2216 * in some very corner case HW recovery scenarios its possible to
2217 * get here before __wl1271_op_remove_interface is complete, so
2218 * opt out if that is the case.
2220 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2221 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2227 ret
= wl12xx_init_vif_data(wl
, vif
);
2232 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2233 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2238 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2239 wl12xx_force_active_psm(wl
);
2240 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2241 mutex_unlock(&wl
->mutex
);
2242 wl1271_recovery_work(&wl
->recovery_work
);
2247 * TODO: after the nvs issue will be solved, move this block
2248 * to start(), and make sure here the driver is ON.
2250 if (wl
->state
== WL1271_STATE_OFF
) {
2252 * we still need this in order to configure the fw
2253 * while uploading the nvs
2255 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2257 booted
= wl12xx_init_fw(wl
);
2264 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2265 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2267 * The device role is a special role used for
2268 * rx and tx frames prior to association (as
2269 * the STA role can get packets only from
2270 * its associated bssid)
2272 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2274 &wlvif
->dev_role_id
);
2279 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2280 role_type
, &wlvif
->role_id
);
2284 ret
= wl1271_init_vif_specific(wl
, vif
);
2288 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2289 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2291 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2296 wl1271_ps_elp_sleep(wl
);
2298 mutex_unlock(&wl
->mutex
);
2303 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2304 struct ieee80211_vif
*vif
,
2305 bool reset_tx_queues
)
2307 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2309 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2311 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2313 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2316 /* because of hardware recovery, we may get here twice */
2317 if (wl
->state
!= WL1271_STATE_ON
)
2320 wl1271_info("down");
2322 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2323 wl
->scan_vif
== vif
) {
2325 * Rearm the tx watchdog just before idling scan. This
2326 * prevents just-finished scans from triggering the watchdog
2328 wl12xx_rearm_tx_watchdog_locked(wl
);
2330 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2331 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2332 wl
->scan_vif
= NULL
;
2333 wl
->scan
.req
= NULL
;
2334 ieee80211_scan_completed(wl
->hw
, true);
2337 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2338 /* disable active roles */
2339 ret
= wl1271_ps_elp_wakeup(wl
);
2343 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2344 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2345 if (wl12xx_dev_role_started(wlvif
))
2346 wl12xx_stop_dev(wl
, wlvif
);
2348 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->dev_role_id
);
2353 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2357 wl1271_ps_elp_sleep(wl
);
2360 /* clear all hlids (except system_hlid) */
2361 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2363 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2364 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2365 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2366 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2367 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2368 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2370 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2371 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2372 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2373 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2374 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2375 wl12xx_free_rate_policy(wl
,
2376 &wlvif
->ap
.ucast_rate_idx
[i
]);
2377 wl1271_free_ap_keys(wl
, wlvif
);
2380 dev_kfree_skb(wlvif
->probereq
);
2381 wlvif
->probereq
= NULL
;
2382 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2383 if (wl
->last_wlvif
== wlvif
)
2384 wl
->last_wlvif
= NULL
;
2385 list_del(&wlvif
->list
);
2386 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2387 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2388 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2396 * Last AP, have more stations. Configure sleep auth according to STA.
2397 * Don't do thin on unintended recovery.
2399 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2400 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2403 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2404 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2405 /* Configure for power according to debugfs */
2406 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2407 wl1271_acx_sleep_auth(wl
, sta_auth
);
2408 /* Configure for power always on */
2409 else if (wl
->quirks
& WLCORE_QUIRK_NO_ELP
)
2410 wl1271_acx_sleep_auth(wl
, WL1271_PSM_CAM
);
2411 /* Configure for ELP power saving */
2413 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2417 mutex_unlock(&wl
->mutex
);
2419 del_timer_sync(&wlvif
->rx_streaming_timer
);
2420 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2421 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2423 mutex_lock(&wl
->mutex
);
2426 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2427 struct ieee80211_vif
*vif
)
2429 struct wl1271
*wl
= hw
->priv
;
2430 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2431 struct wl12xx_vif
*iter
;
2432 struct vif_counter_data vif_count
;
2433 bool cancel_recovery
= true;
2435 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2436 mutex_lock(&wl
->mutex
);
2438 if (wl
->state
== WL1271_STATE_OFF
||
2439 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2443 * wl->vif can be null here if someone shuts down the interface
2444 * just when hardware recovery has been started.
2446 wl12xx_for_each_wlvif(wl
, iter
) {
2450 __wl1271_op_remove_interface(wl
, vif
, true);
2453 WARN_ON(iter
!= wlvif
);
2454 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2455 wl12xx_force_active_psm(wl
);
2456 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2457 wl12xx_queue_recovery_work(wl
);
2458 cancel_recovery
= false;
2461 mutex_unlock(&wl
->mutex
);
2462 if (cancel_recovery
)
2463 cancel_work_sync(&wl
->recovery_work
);
2466 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2467 struct ieee80211_vif
*vif
,
2468 enum nl80211_iftype new_type
, bool p2p
)
2470 struct wl1271
*wl
= hw
->priv
;
2473 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2474 wl1271_op_remove_interface(hw
, vif
);
2476 vif
->type
= new_type
;
2478 ret
= wl1271_op_add_interface(hw
, vif
);
2480 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2484 static int wl1271_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2488 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2491 * One of the side effects of the JOIN command is that is clears
2492 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2493 * to a WPA/WPA2 access point will therefore kill the data-path.
2494 * Currently the only valid scenario for JOIN during association
2495 * is on roaming, in which case we will also be given new keys.
2496 * Keep the below message for now, unless it starts bothering
2497 * users who really like to roam a lot :)
2499 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2500 wl1271_info("JOIN while associated.");
2502 /* clear encryption type */
2503 wlvif
->encryption_type
= KEY_NONE
;
2506 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2509 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2511 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2515 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2519 * The join command disable the keep-alive mode, shut down its process,
2520 * and also clear the template config, so we need to reset it all after
2521 * the join. The acx_aid starts the keep-alive process, and the order
2522 * of the commands below is relevant.
2524 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2528 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2532 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2536 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2537 CMD_TEMPL_KLV_IDX_NULL_DATA
,
2538 ACX_KEEP_ALIVE_TPL_VALID
);
2546 static int wl1271_unjoin(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2550 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2551 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2553 wl12xx_cmd_stop_channel_switch(wl
);
2554 ieee80211_chswitch_done(vif
, false);
2557 /* to stop listening to a channel, we disconnect */
2558 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2562 /* reset TX security counters on a clean disconnect */
2563 wlvif
->tx_security_last_seq_lsb
= 0;
2564 wlvif
->tx_security_seq
= 0;
2570 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2572 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2573 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2576 static int wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2580 bool cur_idle
= !test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2582 if (idle
== cur_idle
)
2586 /* no need to croc if we weren't busy (e.g. during boot) */
2587 if (wl12xx_dev_role_started(wlvif
)) {
2588 ret
= wl12xx_stop_dev(wl
, wlvif
);
2593 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
2594 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2597 ret
= wl1271_acx_keep_alive_config(
2598 wl
, wlvif
, CMD_TEMPL_KLV_IDX_NULL_DATA
,
2599 ACX_KEEP_ALIVE_TPL_INVALID
);
2602 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2604 /* The current firmware only supports sched_scan in idle */
2605 if (wl
->sched_scanning
) {
2606 wl1271_scan_sched_scan_stop(wl
, wlvif
);
2607 ieee80211_sched_scan_stopped(wl
->hw
);
2610 ret
= wl12xx_start_dev(wl
, wlvif
);
2613 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2620 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2621 struct ieee80211_conf
*conf
, u32 changed
)
2623 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2626 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2628 /* if the channel changes while joined, join again */
2629 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
&&
2630 ((wlvif
->band
!= conf
->channel
->band
) ||
2631 (wlvif
->channel
!= channel
) ||
2632 (wlvif
->channel_type
!= conf
->channel_type
))) {
2633 /* send all pending packets */
2634 ret
= wlcore_tx_work_locked(wl
);
2638 wlvif
->band
= conf
->channel
->band
;
2639 wlvif
->channel
= channel
;
2640 wlvif
->channel_type
= conf
->channel_type
;
2643 wl1271_set_band_rate(wl
, wlvif
);
2644 ret
= wl1271_init_ap_rates(wl
, wlvif
);
2646 wl1271_error("AP rate policy change failed %d",
2650 * FIXME: the mac80211 should really provide a fixed
2651 * rate to use here. for now, just use the smallest
2652 * possible rate for the band as a fixed rate for
2653 * association frames and other control messages.
2655 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2656 wl1271_set_band_rate(wl
, wlvif
);
2659 wl1271_tx_min_rate_get(wl
,
2660 wlvif
->basic_rate_set
);
2661 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2663 wl1271_warning("rate policy for channel "
2667 * change the ROC channel. do it only if we are
2668 * not idle. otherwise, CROC will be called
2671 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
,
2673 wl12xx_dev_role_started(wlvif
) &&
2674 !(conf
->flags
& IEEE80211_CONF_IDLE
)) {
2675 ret
= wl12xx_stop_dev(wl
, wlvif
);
2679 ret
= wl12xx_start_dev(wl
, wlvif
);
2686 if ((changed
& IEEE80211_CONF_CHANGE_PS
) && !is_ap
) {
2688 if ((conf
->flags
& IEEE80211_CONF_PS
) &&
2689 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
2690 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2695 if (wl
->conf
.conn
.forced_ps
) {
2696 ps_mode
= STATION_POWER_SAVE_MODE
;
2697 ps_mode_str
= "forced";
2699 ps_mode
= STATION_AUTO_PS_MODE
;
2700 ps_mode_str
= "auto";
2703 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
2705 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
2708 wl1271_warning("enter %s ps failed %d",
2711 } else if (!(conf
->flags
& IEEE80211_CONF_PS
) &&
2712 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2714 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
2716 ret
= wl1271_ps_set_mode(wl
, wlvif
,
2717 STATION_ACTIVE_MODE
);
2719 wl1271_warning("exit auto ps failed %d", ret
);
2723 if (conf
->power_level
!= wlvif
->power_level
) {
2724 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2728 wlvif
->power_level
= conf
->power_level
;
2734 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2736 struct wl1271
*wl
= hw
->priv
;
2737 struct wl12xx_vif
*wlvif
;
2738 struct ieee80211_conf
*conf
= &hw
->conf
;
2739 int channel
, ret
= 0;
2741 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2743 wl1271_debug(DEBUG_MAC80211
, "mac80211 config ch %d psm %s power %d %s"
2746 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2748 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2752 * mac80211 will go to idle nearly immediately after transmitting some
2753 * frames, such as the deauth. To make sure those frames reach the air,
2754 * wait here until the TX queue is fully flushed.
2756 if ((changed
& IEEE80211_CONF_CHANGE_CHANNEL
) ||
2757 ((changed
& IEEE80211_CONF_CHANGE_IDLE
) &&
2758 (conf
->flags
& IEEE80211_CONF_IDLE
)))
2759 wl1271_tx_flush(wl
);
2761 mutex_lock(&wl
->mutex
);
2763 /* we support configuring the channel and band even while off */
2764 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2765 wl
->band
= conf
->channel
->band
;
2766 wl
->channel
= channel
;
2767 wl
->channel_type
= conf
->channel_type
;
2770 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2771 wl
->power_level
= conf
->power_level
;
2773 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2776 ret
= wl1271_ps_elp_wakeup(wl
);
2780 /* configure each interface */
2781 wl12xx_for_each_wlvif(wl
, wlvif
) {
2782 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2788 wl1271_ps_elp_sleep(wl
);
2791 mutex_unlock(&wl
->mutex
);
2796 struct wl1271_filter_params
{
2799 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2802 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2803 struct netdev_hw_addr_list
*mc_list
)
2805 struct wl1271_filter_params
*fp
;
2806 struct netdev_hw_addr
*ha
;
2807 struct wl1271
*wl
= hw
->priv
;
2809 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2812 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2814 wl1271_error("Out of memory setting filters.");
2818 /* update multicast filtering parameters */
2819 fp
->mc_list_length
= 0;
2820 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2821 fp
->enabled
= false;
2824 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2825 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2826 ha
->addr
, ETH_ALEN
);
2827 fp
->mc_list_length
++;
2831 return (u64
)(unsigned long)fp
;
2834 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2837 FIF_BCN_PRBRESP_PROMISC | \
2841 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2842 unsigned int changed
,
2843 unsigned int *total
, u64 multicast
)
2845 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2846 struct wl1271
*wl
= hw
->priv
;
2847 struct wl12xx_vif
*wlvif
;
2851 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2852 " total %x", changed
, *total
);
2854 mutex_lock(&wl
->mutex
);
2856 *total
&= WL1271_SUPPORTED_FILTERS
;
2857 changed
&= WL1271_SUPPORTED_FILTERS
;
2859 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2862 ret
= wl1271_ps_elp_wakeup(wl
);
2866 wl12xx_for_each_wlvif(wl
, wlvif
) {
2867 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
2868 if (*total
& FIF_ALLMULTI
)
2869 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2873 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2876 fp
->mc_list_length
);
2883 * the fw doesn't provide an api to configure the filters. instead,
2884 * the filters configuration is based on the active roles / ROC
2889 wl1271_ps_elp_sleep(wl
);
2892 mutex_unlock(&wl
->mutex
);
2896 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2897 u8 id
, u8 key_type
, u8 key_size
,
2898 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
2901 struct wl1271_ap_key
*ap_key
;
2904 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
2906 if (key_size
> MAX_KEY_SIZE
)
2910 * Find next free entry in ap_keys. Also check we are not replacing
2913 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2914 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2917 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
2918 wl1271_warning("trying to record key replacement");
2923 if (i
== MAX_NUM_KEYS
)
2926 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
2931 ap_key
->key_type
= key_type
;
2932 ap_key
->key_size
= key_size
;
2933 memcpy(ap_key
->key
, key
, key_size
);
2934 ap_key
->hlid
= hlid
;
2935 ap_key
->tx_seq_32
= tx_seq_32
;
2936 ap_key
->tx_seq_16
= tx_seq_16
;
2938 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
2942 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2946 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2947 kfree(wlvif
->ap
.recorded_keys
[i
]);
2948 wlvif
->ap
.recorded_keys
[i
] = NULL
;
2952 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2955 struct wl1271_ap_key
*key
;
2956 bool wep_key_added
= false;
2958 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2960 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2963 key
= wlvif
->ap
.recorded_keys
[i
];
2965 if (hlid
== WL12XX_INVALID_LINK_ID
)
2966 hlid
= wlvif
->ap
.bcast_hlid
;
2968 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2969 key
->id
, key
->key_type
,
2970 key
->key_size
, key
->key
,
2971 hlid
, key
->tx_seq_32
,
2976 if (key
->key_type
== KEY_WEP
)
2977 wep_key_added
= true;
2980 if (wep_key_added
) {
2981 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
2982 wlvif
->ap
.bcast_hlid
);
2988 wl1271_free_ap_keys(wl
, wlvif
);
2992 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2993 u16 action
, u8 id
, u8 key_type
,
2994 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
2995 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
2998 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3001 struct wl1271_station
*wl_sta
;
3005 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
3006 hlid
= wl_sta
->hlid
;
3008 hlid
= wlvif
->ap
.bcast_hlid
;
3011 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3013 * We do not support removing keys after AP shutdown.
3014 * Pretend we do to make mac80211 happy.
3016 if (action
!= KEY_ADD_OR_REPLACE
)
3019 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3021 key
, hlid
, tx_seq_32
,
3024 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3025 id
, key_type
, key_size
,
3026 key
, hlid
, tx_seq_32
,
3034 static const u8 bcast_addr
[ETH_ALEN
] = {
3035 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3038 addr
= sta
? sta
->addr
: bcast_addr
;
3040 if (is_zero_ether_addr(addr
)) {
3041 /* We dont support TX only encryption */
3045 /* The wl1271 does not allow to remove unicast keys - they
3046 will be cleared automatically on next CMD_JOIN. Ignore the
3047 request silently, as we dont want the mac80211 to emit
3048 an error message. */
3049 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3052 /* don't remove key if hlid was already deleted */
3053 if (action
== KEY_REMOVE
&&
3054 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3057 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3058 id
, key_type
, key_size
,
3059 key
, addr
, tx_seq_32
,
3064 /* the default WEP key needs to be configured at least once */
3065 if (key_type
== KEY_WEP
) {
3066 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3077 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3078 struct ieee80211_vif
*vif
,
3079 struct ieee80211_sta
*sta
,
3080 struct ieee80211_key_conf
*key_conf
)
3082 struct wl1271
*wl
= hw
->priv
;
3084 return wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3087 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3088 struct ieee80211_vif
*vif
,
3089 struct ieee80211_sta
*sta
,
3090 struct ieee80211_key_conf
*key_conf
)
3092 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3098 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3100 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3101 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3102 key_conf
->cipher
, key_conf
->keyidx
,
3103 key_conf
->keylen
, key_conf
->flags
);
3104 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3106 mutex_lock(&wl
->mutex
);
3108 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3113 ret
= wl1271_ps_elp_wakeup(wl
);
3117 switch (key_conf
->cipher
) {
3118 case WLAN_CIPHER_SUITE_WEP40
:
3119 case WLAN_CIPHER_SUITE_WEP104
:
3122 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3124 case WLAN_CIPHER_SUITE_TKIP
:
3125 key_type
= KEY_TKIP
;
3127 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3128 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3129 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3131 case WLAN_CIPHER_SUITE_CCMP
:
3134 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3135 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3136 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3138 case WL1271_CIPHER_SUITE_GEM
:
3140 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3141 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3144 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3152 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3153 key_conf
->keyidx
, key_type
,
3154 key_conf
->keylen
, key_conf
->key
,
3155 tx_seq_32
, tx_seq_16
, sta
);
3157 wl1271_error("Could not add or replace key");
3162 * reconfiguring arp response if the unicast (or common)
3163 * encryption key type was changed
3165 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3166 (sta
|| key_type
== KEY_WEP
) &&
3167 wlvif
->encryption_type
!= key_type
) {
3168 wlvif
->encryption_type
= key_type
;
3169 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3171 wl1271_warning("build arp rsp failed: %d", ret
);
3178 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3179 key_conf
->keyidx
, key_type
,
3180 key_conf
->keylen
, key_conf
->key
,
3183 wl1271_error("Could not remove key");
3189 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3195 wl1271_ps_elp_sleep(wl
);
3198 mutex_unlock(&wl
->mutex
);
3202 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3204 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3205 struct ieee80211_vif
*vif
,
3206 struct cfg80211_scan_request
*req
)
3208 struct wl1271
*wl
= hw
->priv
;
3213 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3216 ssid
= req
->ssids
[0].ssid
;
3217 len
= req
->ssids
[0].ssid_len
;
3220 mutex_lock(&wl
->mutex
);
3222 if (wl
->state
== WL1271_STATE_OFF
) {
3224 * We cannot return -EBUSY here because cfg80211 will expect
3225 * a call to ieee80211_scan_completed if we do - in this case
3226 * there won't be any call.
3232 ret
= wl1271_ps_elp_wakeup(wl
);
3236 /* fail if there is any role in ROC */
3237 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3238 /* don't allow scanning right now */
3243 ret
= wl1271_scan(hw
->priv
, vif
, ssid
, len
, req
);
3245 wl1271_ps_elp_sleep(wl
);
3247 mutex_unlock(&wl
->mutex
);
3252 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3253 struct ieee80211_vif
*vif
)
3255 struct wl1271
*wl
= hw
->priv
;
3258 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3260 mutex_lock(&wl
->mutex
);
3262 if (wl
->state
== WL1271_STATE_OFF
)
3265 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3268 ret
= wl1271_ps_elp_wakeup(wl
);
3272 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3273 ret
= wl1271_scan_stop(wl
);
3279 * Rearm the tx watchdog just before idling scan. This
3280 * prevents just-finished scans from triggering the watchdog
3282 wl12xx_rearm_tx_watchdog_locked(wl
);
3284 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3285 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3286 wl
->scan_vif
= NULL
;
3287 wl
->scan
.req
= NULL
;
3288 ieee80211_scan_completed(wl
->hw
, true);
3291 wl1271_ps_elp_sleep(wl
);
3293 mutex_unlock(&wl
->mutex
);
3295 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3298 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3299 struct ieee80211_vif
*vif
,
3300 struct cfg80211_sched_scan_request
*req
,
3301 struct ieee80211_sched_scan_ies
*ies
)
3303 struct wl1271
*wl
= hw
->priv
;
3304 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3307 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3309 mutex_lock(&wl
->mutex
);
3311 if (wl
->state
== WL1271_STATE_OFF
) {
3316 ret
= wl1271_ps_elp_wakeup(wl
);
3320 ret
= wl1271_scan_sched_scan_config(wl
, wlvif
, req
, ies
);
3324 ret
= wl1271_scan_sched_scan_start(wl
, wlvif
);
3328 wl
->sched_scanning
= true;
3331 wl1271_ps_elp_sleep(wl
);
3333 mutex_unlock(&wl
->mutex
);
3337 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3338 struct ieee80211_vif
*vif
)
3340 struct wl1271
*wl
= hw
->priv
;
3341 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3344 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3346 mutex_lock(&wl
->mutex
);
3348 if (wl
->state
== WL1271_STATE_OFF
)
3351 ret
= wl1271_ps_elp_wakeup(wl
);
3355 wl1271_scan_sched_scan_stop(wl
, wlvif
);
3357 wl1271_ps_elp_sleep(wl
);
3359 mutex_unlock(&wl
->mutex
);
3362 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3364 struct wl1271
*wl
= hw
->priv
;
3367 mutex_lock(&wl
->mutex
);
3369 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3374 ret
= wl1271_ps_elp_wakeup(wl
);
3378 ret
= wl1271_acx_frag_threshold(wl
, value
);
3380 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3382 wl1271_ps_elp_sleep(wl
);
3385 mutex_unlock(&wl
->mutex
);
3390 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3392 struct wl1271
*wl
= hw
->priv
;
3393 struct wl12xx_vif
*wlvif
;
3396 mutex_lock(&wl
->mutex
);
3398 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3403 ret
= wl1271_ps_elp_wakeup(wl
);
3407 wl12xx_for_each_wlvif(wl
, wlvif
) {
3408 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3410 wl1271_warning("set rts threshold failed: %d", ret
);
3412 wl1271_ps_elp_sleep(wl
);
3415 mutex_unlock(&wl
->mutex
);
3420 static int wl1271_ssid_set(struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
3423 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3425 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
3429 wl1271_error("No SSID in IEs!");
3434 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
3435 wl1271_error("SSID is too long!");
3439 wlvif
->ssid_len
= ssid_len
;
3440 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
3444 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3447 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3448 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3449 skb
->len
- ieoffset
);
3454 memmove(ie
, next
, end
- next
);
3455 skb_trim(skb
, skb
->len
- len
);
3458 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3459 unsigned int oui
, u8 oui_type
,
3463 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3464 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3465 skb
->data
+ ieoffset
,
3466 skb
->len
- ieoffset
);
3471 memmove(ie
, next
, end
- next
);
3472 skb_trim(skb
, skb
->len
- len
);
3475 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3476 struct ieee80211_vif
*vif
)
3478 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3479 struct sk_buff
*skb
;
3482 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3486 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3487 CMD_TEMPL_AP_PROBE_RESPONSE
,
3496 wl1271_debug(DEBUG_AP
, "probe response updated");
3497 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3503 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3504 struct ieee80211_vif
*vif
,
3506 size_t probe_rsp_len
,
3509 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3510 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3511 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3512 int ssid_ie_offset
, ie_offset
, templ_len
;
3515 /* no need to change probe response if the SSID is set correctly */
3516 if (wlvif
->ssid_len
> 0)
3517 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3518 CMD_TEMPL_AP_PROBE_RESPONSE
,
3523 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3524 wl1271_error("probe_rsp template too big");
3528 /* start searching from IE offset */
3529 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3531 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3532 probe_rsp_len
- ie_offset
);
3534 wl1271_error("No SSID in beacon!");
3538 ssid_ie_offset
= ptr
- probe_rsp_data
;
3539 ptr
+= (ptr
[1] + 2);
3541 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3543 /* insert SSID from bss_conf */
3544 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3545 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3546 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3547 bss_conf
->ssid
, bss_conf
->ssid_len
);
3548 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3550 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3551 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3552 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3554 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3555 CMD_TEMPL_AP_PROBE_RESPONSE
,
3561 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3562 struct ieee80211_vif
*vif
,
3563 struct ieee80211_bss_conf
*bss_conf
,
3566 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3569 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3570 if (bss_conf
->use_short_slot
)
3571 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3573 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3575 wl1271_warning("Set slot time failed %d", ret
);
3580 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3581 if (bss_conf
->use_short_preamble
)
3582 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3584 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3587 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3588 if (bss_conf
->use_cts_prot
)
3589 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3592 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3593 CTSPROTECT_DISABLE
);
3595 wl1271_warning("Set ctsprotect failed %d", ret
);
3604 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3605 struct ieee80211_vif
*vif
,
3608 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3609 struct ieee80211_hdr
*hdr
;
3612 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3614 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3622 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3624 ret
= wl1271_ssid_set(vif
, beacon
, ieoffset
);
3626 dev_kfree_skb(beacon
);
3629 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3630 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3632 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3637 dev_kfree_skb(beacon
);
3642 * In case we already have a probe-resp beacon set explicitly
3643 * by usermode, don't use the beacon data.
3645 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3648 /* remove TIM ie from probe response */
3649 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3652 * remove p2p ie from probe response.
3653 * the fw reponds to probe requests that don't include
3654 * the p2p ie. probe requests with p2p ie will be passed,
3655 * and will be responded by the supplicant (the spec
3656 * forbids including the p2p ie when responding to probe
3657 * requests that didn't include it).
3659 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3660 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3662 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3663 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3664 IEEE80211_STYPE_PROBE_RESP
);
3666 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3671 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3672 CMD_TEMPL_PROBE_RESPONSE
,
3677 dev_kfree_skb(beacon
);
3685 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3686 struct ieee80211_vif
*vif
,
3687 struct ieee80211_bss_conf
*bss_conf
,
3690 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3691 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3694 if ((changed
& BSS_CHANGED_BEACON_INT
)) {
3695 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3696 bss_conf
->beacon_int
);
3698 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3701 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3702 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3704 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
3707 if ((changed
& BSS_CHANGED_BEACON
)) {
3708 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
3715 wl1271_error("beacon info change failed: %d", ret
);
3719 /* AP mode changes */
3720 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3721 struct ieee80211_vif
*vif
,
3722 struct ieee80211_bss_conf
*bss_conf
,
3725 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3728 if ((changed
& BSS_CHANGED_BASIC_RATES
)) {
3729 u32 rates
= bss_conf
->basic_rates
;
3731 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3733 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3734 wlvif
->basic_rate_set
);
3736 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3738 wl1271_error("AP rate policy change failed %d", ret
);
3742 ret
= wl1271_ap_init_templates(wl
, vif
);
3746 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
3750 ret
= wlcore_set_beacon_template(wl
, vif
, true);
3755 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3759 if ((changed
& BSS_CHANGED_BEACON_ENABLED
)) {
3760 if (bss_conf
->enable_beacon
) {
3761 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3762 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3766 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3770 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3771 wl1271_debug(DEBUG_AP
, "started AP");
3774 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3775 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3779 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3780 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3782 wl1271_debug(DEBUG_AP
, "stopped AP");
3787 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3791 /* Handle HT information change */
3792 if ((changed
& BSS_CHANGED_HT
) &&
3793 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3794 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3795 bss_conf
->ht_operation_mode
);
3797 wl1271_warning("Set ht information failed %d", ret
);
3806 /* STA/IBSS mode changes */
3807 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
3808 struct ieee80211_vif
*vif
,
3809 struct ieee80211_bss_conf
*bss_conf
,
3812 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3813 bool do_join
= false, set_assoc
= false;
3814 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
3815 bool ibss_joined
= false;
3816 u32 sta_rate_set
= 0;
3818 struct ieee80211_sta
*sta
;
3819 bool sta_exists
= false;
3820 struct ieee80211_sta_ht_cap sta_ht_cap
;
3823 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
3829 if (changed
& BSS_CHANGED_IBSS
) {
3830 if (bss_conf
->ibss_joined
) {
3831 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
3834 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
,
3836 wl1271_unjoin(wl
, wlvif
);
3840 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
3843 /* Need to update the SSID (for filtering etc) */
3844 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
3847 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
3848 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
3849 bss_conf
->enable_beacon
? "enabled" : "disabled");
3854 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
) {
3855 ret
= wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
3857 wl1271_warning("idle mode change failed %d", ret
);
3860 if ((changed
& BSS_CHANGED_CQM
)) {
3861 bool enable
= false;
3862 if (bss_conf
->cqm_rssi_thold
)
3864 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
3865 bss_conf
->cqm_rssi_thold
,
3866 bss_conf
->cqm_rssi_hyst
);
3869 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
3872 if (changed
& BSS_CHANGED_BSSID
)
3873 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
3874 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
3878 ret
= wl1271_build_qos_null_data(wl
, vif
);
3883 if (changed
& (BSS_CHANGED_ASSOC
| BSS_CHANGED_HT
)) {
3885 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
3889 /* save the supp_rates of the ap */
3890 sta_rate_set
= sta
->supp_rates
[wl
->hw
->conf
.channel
->band
];
3891 if (sta
->ht_cap
.ht_supported
)
3893 (sta
->ht_cap
.mcs
.rx_mask
[0] << HW_HT_RATES_OFFSET
) |
3894 (sta
->ht_cap
.mcs
.rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
3895 sta_ht_cap
= sta
->ht_cap
;
3902 if ((changed
& BSS_CHANGED_ASSOC
)) {
3903 if (bss_conf
->assoc
) {
3906 wlvif
->aid
= bss_conf
->aid
;
3907 wlvif
->channel_type
= bss_conf
->channel_type
;
3908 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3913 * use basic rates from AP, and determine lowest rate
3914 * to use with control frames.
3916 rates
= bss_conf
->basic_rates
;
3917 wlvif
->basic_rate_set
=
3918 wl1271_tx_enabled_rates_get(wl
, rates
,
3921 wl1271_tx_min_rate_get(wl
,
3922 wlvif
->basic_rate_set
);
3925 wl1271_tx_enabled_rates_get(wl
,
3928 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3933 * with wl1271, we don't need to update the
3934 * beacon_int and dtim_period, because the firmware
3935 * updates it by itself when the first beacon is
3936 * received after a join.
3938 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
3943 * Get a template for hardware connection maintenance
3945 dev_kfree_skb(wlvif
->probereq
);
3946 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
3949 ieoffset
= offsetof(struct ieee80211_mgmt
,
3950 u
.probe_req
.variable
);
3951 wl1271_ssid_set(vif
, wlvif
->probereq
, ieoffset
);
3953 /* enable the connection monitoring feature */
3954 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
3958 /* use defaults when not associated */
3960 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
,
3963 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT
,
3967 /* free probe-request template */
3968 dev_kfree_skb(wlvif
->probereq
);
3969 wlvif
->probereq
= NULL
;
3971 /* revert back to minimum rates for the current band */
3972 wl1271_set_band_rate(wl
, wlvif
);
3974 wl1271_tx_min_rate_get(wl
,
3975 wlvif
->basic_rate_set
);
3976 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3980 /* disable connection monitor features */
3981 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3983 /* Disable the keep-alive feature */
3984 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3988 /* restore the bssid filter and go to dummy bssid */
3991 * we might have to disable roc, if there was
3992 * no IF_OPER_UP notification.
3995 ret
= wl12xx_croc(wl
, wlvif
->role_id
);
4000 * (we also need to disable roc in case of
4001 * roaming on the same channel. until we will
4002 * have a better flow...)
4004 if (test_bit(wlvif
->dev_role_id
, wl
->roc_map
)) {
4005 ret
= wl12xx_croc(wl
,
4006 wlvif
->dev_role_id
);
4011 wl1271_unjoin(wl
, wlvif
);
4012 if (!bss_conf
->idle
)
4013 wl12xx_start_dev(wl
, wlvif
);
4018 if (changed
& BSS_CHANGED_IBSS
) {
4019 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4020 bss_conf
->ibss_joined
);
4022 if (bss_conf
->ibss_joined
) {
4023 u32 rates
= bss_conf
->basic_rates
;
4024 wlvif
->basic_rate_set
=
4025 wl1271_tx_enabled_rates_get(wl
, rates
,
4028 wl1271_tx_min_rate_get(wl
,
4029 wlvif
->basic_rate_set
);
4031 /* by default, use 11b + OFDM rates */
4032 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4033 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4039 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4044 ret
= wl1271_join(wl
, wlvif
, set_assoc
);
4046 wl1271_warning("cmd join failed %d", ret
);
4050 /* ROC until connected (after EAPOL exchange) */
4052 ret
= wl12xx_roc(wl
, wlvif
, wlvif
->role_id
);
4056 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4057 wl12xx_set_authorized(wl
, wlvif
);
4060 * stop device role if started (we might already be in
4063 if (wl12xx_dev_role_started(wlvif
)) {
4064 ret
= wl12xx_stop_dev(wl
, wlvif
);
4070 /* Handle new association with HT. Do this after join. */
4072 if ((changed
& BSS_CHANGED_HT
) &&
4073 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
4074 ret
= wl1271_acx_set_ht_capabilities(wl
,
4079 wl1271_warning("Set ht cap true failed %d",
4084 /* handle new association without HT and disassociation */
4085 else if (changed
& BSS_CHANGED_ASSOC
) {
4086 ret
= wl1271_acx_set_ht_capabilities(wl
,
4091 wl1271_warning("Set ht cap false failed %d",
4098 /* Handle HT information change. Done after join. */
4099 if ((changed
& BSS_CHANGED_HT
) &&
4100 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
4101 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4102 bss_conf
->ht_operation_mode
);
4104 wl1271_warning("Set ht information failed %d", ret
);
4109 /* Handle arp filtering. Done after join. */
4110 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4111 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4112 __be32 addr
= bss_conf
->arp_addr_list
[0];
4113 wlvif
->sta
.qos
= bss_conf
->qos
;
4114 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4116 if (bss_conf
->arp_addr_cnt
== 1 &&
4117 bss_conf
->arp_filter_enabled
) {
4118 wlvif
->ip_addr
= addr
;
4120 * The template should have been configured only upon
4121 * association. however, it seems that the correct ip
4122 * isn't being set (when sending), so we have to
4123 * reconfigure the template upon every ip change.
4125 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4127 wl1271_warning("build arp rsp failed: %d", ret
);
4131 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4132 (ACX_ARP_FILTER_ARP_FILTERING
|
4133 ACX_ARP_FILTER_AUTO_ARP
),
4137 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4148 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4149 struct ieee80211_vif
*vif
,
4150 struct ieee80211_bss_conf
*bss_conf
,
4153 struct wl1271
*wl
= hw
->priv
;
4154 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4155 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4158 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info changed 0x%x",
4162 * make sure to cancel pending disconnections if our association
4165 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4166 cancel_delayed_work_sync(&wl
->connection_loss_work
);
4168 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4169 !bss_conf
->enable_beacon
)
4170 wl1271_tx_flush(wl
);
4172 mutex_lock(&wl
->mutex
);
4174 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4177 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4180 ret
= wl1271_ps_elp_wakeup(wl
);
4185 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4187 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4189 wl1271_ps_elp_sleep(wl
);
4192 mutex_unlock(&wl
->mutex
);
4195 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4196 struct ieee80211_vif
*vif
, u16 queue
,
4197 const struct ieee80211_tx_queue_params
*params
)
4199 struct wl1271
*wl
= hw
->priv
;
4200 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4204 mutex_lock(&wl
->mutex
);
4206 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4209 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4211 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4213 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4216 ret
= wl1271_ps_elp_wakeup(wl
);
4221 * the txop is confed in units of 32us by the mac80211,
4224 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4225 params
->cw_min
, params
->cw_max
,
4226 params
->aifs
, params
->txop
<< 5);
4230 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4231 CONF_CHANNEL_TYPE_EDCF
,
4232 wl1271_tx_get_queue(queue
),
4233 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4237 wl1271_ps_elp_sleep(wl
);
4240 mutex_unlock(&wl
->mutex
);
4245 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4246 struct ieee80211_vif
*vif
)
4249 struct wl1271
*wl
= hw
->priv
;
4250 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4251 u64 mactime
= ULLONG_MAX
;
4254 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4256 mutex_lock(&wl
->mutex
);
4258 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4261 ret
= wl1271_ps_elp_wakeup(wl
);
4265 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4270 wl1271_ps_elp_sleep(wl
);
4273 mutex_unlock(&wl
->mutex
);
4277 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4278 struct survey_info
*survey
)
4280 struct ieee80211_conf
*conf
= &hw
->conf
;
4285 survey
->channel
= conf
->channel
;
4290 static int wl1271_allocate_sta(struct wl1271
*wl
,
4291 struct wl12xx_vif
*wlvif
,
4292 struct ieee80211_sta
*sta
)
4294 struct wl1271_station
*wl_sta
;
4298 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4299 wl1271_warning("could not allocate HLID - too much stations");
4303 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4304 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4306 wl1271_warning("could not allocate HLID - too many links");
4310 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4311 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4312 wl
->active_sta_count
++;
4316 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4318 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4321 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4322 memset(wl
->links
[hlid
].addr
, 0, ETH_ALEN
);
4323 wl
->links
[hlid
].ba_bitmap
= 0;
4324 __clear_bit(hlid
, &wl
->ap_ps_map
);
4325 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4326 wl12xx_free_link(wl
, wlvif
, &hlid
);
4327 wl
->active_sta_count
--;
4330 * rearm the tx watchdog when the last STA is freed - give the FW a
4331 * chance to return STA-buffered packets before complaining.
4333 if (wl
->active_sta_count
== 0)
4334 wl12xx_rearm_tx_watchdog_locked(wl
);
4337 static int wl12xx_sta_add(struct wl1271
*wl
,
4338 struct wl12xx_vif
*wlvif
,
4339 struct ieee80211_sta
*sta
)
4341 struct wl1271_station
*wl_sta
;
4345 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4347 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4351 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4352 hlid
= wl_sta
->hlid
;
4354 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4356 wl1271_free_sta(wl
, wlvif
, hlid
);
4361 static int wl12xx_sta_remove(struct wl1271
*wl
,
4362 struct wl12xx_vif
*wlvif
,
4363 struct ieee80211_sta
*sta
)
4365 struct wl1271_station
*wl_sta
;
4368 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4370 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4372 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4375 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4379 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4383 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4384 struct wl12xx_vif
*wlvif
,
4385 struct ieee80211_sta
*sta
,
4386 enum ieee80211_sta_state old_state
,
4387 enum ieee80211_sta_state new_state
)
4389 struct wl1271_station
*wl_sta
;
4391 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4392 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4395 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4396 hlid
= wl_sta
->hlid
;
4398 /* Add station (AP mode) */
4400 old_state
== IEEE80211_STA_NOTEXIST
&&
4401 new_state
== IEEE80211_STA_NONE
)
4402 return wl12xx_sta_add(wl
, wlvif
, sta
);
4404 /* Remove station (AP mode) */
4406 old_state
== IEEE80211_STA_NONE
&&
4407 new_state
== IEEE80211_STA_NOTEXIST
) {
4409 wl12xx_sta_remove(wl
, wlvif
, sta
);
4413 /* Authorize station (AP mode) */
4415 new_state
== IEEE80211_STA_AUTHORIZED
) {
4416 ret
= wl12xx_cmd_set_peer_state(wl
, hlid
);
4420 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4425 /* Authorize station */
4427 new_state
== IEEE80211_STA_AUTHORIZED
) {
4428 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4429 return wl12xx_set_authorized(wl
, wlvif
);
4433 old_state
== IEEE80211_STA_AUTHORIZED
&&
4434 new_state
== IEEE80211_STA_ASSOC
) {
4435 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4442 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4443 struct ieee80211_vif
*vif
,
4444 struct ieee80211_sta
*sta
,
4445 enum ieee80211_sta_state old_state
,
4446 enum ieee80211_sta_state new_state
)
4448 struct wl1271
*wl
= hw
->priv
;
4449 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4452 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4453 sta
->aid
, old_state
, new_state
);
4455 mutex_lock(&wl
->mutex
);
4457 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4462 ret
= wl1271_ps_elp_wakeup(wl
);
4466 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4468 wl1271_ps_elp_sleep(wl
);
4470 mutex_unlock(&wl
->mutex
);
4471 if (new_state
< old_state
)
4476 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4477 struct ieee80211_vif
*vif
,
4478 enum ieee80211_ampdu_mlme_action action
,
4479 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4482 struct wl1271
*wl
= hw
->priv
;
4483 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4485 u8 hlid
, *ba_bitmap
;
4487 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4490 /* sanity check - the fields in FW are only 8bits wide */
4491 if (WARN_ON(tid
> 0xFF))
4494 mutex_lock(&wl
->mutex
);
4496 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4501 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4502 hlid
= wlvif
->sta
.hlid
;
4503 ba_bitmap
= &wlvif
->sta
.ba_rx_bitmap
;
4504 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4505 struct wl1271_station
*wl_sta
;
4507 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4508 hlid
= wl_sta
->hlid
;
4509 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4515 ret
= wl1271_ps_elp_wakeup(wl
);
4519 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4523 case IEEE80211_AMPDU_RX_START
:
4524 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4529 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4531 wl1271_error("exceeded max RX BA sessions");
4535 if (*ba_bitmap
& BIT(tid
)) {
4537 wl1271_error("cannot enable RX BA session on active "
4542 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4545 *ba_bitmap
|= BIT(tid
);
4546 wl
->ba_rx_session_count
++;
4550 case IEEE80211_AMPDU_RX_STOP
:
4551 if (!(*ba_bitmap
& BIT(tid
))) {
4553 * this happens on reconfig - so only output a debug
4554 * message for now, and don't fail the function.
4556 wl1271_debug(DEBUG_MAC80211
,
4557 "no active RX BA session on tid: %d",
4563 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4566 *ba_bitmap
&= ~BIT(tid
);
4567 wl
->ba_rx_session_count
--;
4572 * The BA initiator session management in FW independently.
4573 * Falling break here on purpose for all TX APDU commands.
4575 case IEEE80211_AMPDU_TX_START
:
4576 case IEEE80211_AMPDU_TX_STOP
:
4577 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4582 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4586 wl1271_ps_elp_sleep(wl
);
4589 mutex_unlock(&wl
->mutex
);
4594 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4595 struct ieee80211_vif
*vif
,
4596 const struct cfg80211_bitrate_mask
*mask
)
4598 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4599 struct wl1271
*wl
= hw
->priv
;
4602 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4603 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4604 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4606 mutex_lock(&wl
->mutex
);
4608 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
4609 wlvif
->bitrate_masks
[i
] =
4610 wl1271_tx_enabled_rates_get(wl
,
4611 mask
->control
[i
].legacy
,
4614 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4617 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4618 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4620 ret
= wl1271_ps_elp_wakeup(wl
);
4624 wl1271_set_band_rate(wl
, wlvif
);
4626 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4627 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4629 wl1271_ps_elp_sleep(wl
);
4632 mutex_unlock(&wl
->mutex
);
4637 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4638 struct ieee80211_channel_switch
*ch_switch
)
4640 struct wl1271
*wl
= hw
->priv
;
4641 struct wl12xx_vif
*wlvif
;
4644 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4646 wl1271_tx_flush(wl
);
4648 mutex_lock(&wl
->mutex
);
4650 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4651 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4652 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4653 ieee80211_chswitch_done(vif
, false);
4658 ret
= wl1271_ps_elp_wakeup(wl
);
4662 /* TODO: change mac80211 to pass vif as param */
4663 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4664 ret
= wl12xx_cmd_channel_switch(wl
, wlvif
, ch_switch
);
4667 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4670 wl1271_ps_elp_sleep(wl
);
4673 mutex_unlock(&wl
->mutex
);
4676 static void wlcore_op_flush(struct ieee80211_hw
*hw
, bool drop
)
4678 struct wl1271
*wl
= hw
->priv
;
4680 wl1271_tx_flush(wl
);
4683 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
4685 struct wl1271
*wl
= hw
->priv
;
4688 mutex_lock(&wl
->mutex
);
4690 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4693 /* packets are considered pending if in the TX queue or the FW */
4694 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
4696 mutex_unlock(&wl
->mutex
);
4701 /* can't be const, mac80211 writes to this */
4702 static struct ieee80211_rate wl1271_rates
[] = {
4704 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
4705 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
4707 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
4708 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
4709 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4711 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
4712 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
4713 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4715 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
4716 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
4717 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4719 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4720 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4722 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4723 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4725 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4726 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4728 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4729 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4731 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4732 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4734 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4735 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4737 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4738 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4740 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4741 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4744 /* can't be const, mac80211 writes to this */
4745 static struct ieee80211_channel wl1271_channels
[] = {
4746 { .hw_value
= 1, .center_freq
= 2412, .max_power
= 25 },
4747 { .hw_value
= 2, .center_freq
= 2417, .max_power
= 25 },
4748 { .hw_value
= 3, .center_freq
= 2422, .max_power
= 25 },
4749 { .hw_value
= 4, .center_freq
= 2427, .max_power
= 25 },
4750 { .hw_value
= 5, .center_freq
= 2432, .max_power
= 25 },
4751 { .hw_value
= 6, .center_freq
= 2437, .max_power
= 25 },
4752 { .hw_value
= 7, .center_freq
= 2442, .max_power
= 25 },
4753 { .hw_value
= 8, .center_freq
= 2447, .max_power
= 25 },
4754 { .hw_value
= 9, .center_freq
= 2452, .max_power
= 25 },
4755 { .hw_value
= 10, .center_freq
= 2457, .max_power
= 25 },
4756 { .hw_value
= 11, .center_freq
= 2462, .max_power
= 25 },
4757 { .hw_value
= 12, .center_freq
= 2467, .max_power
= 25 },
4758 { .hw_value
= 13, .center_freq
= 2472, .max_power
= 25 },
4759 { .hw_value
= 14, .center_freq
= 2484, .max_power
= 25 },
4762 /* can't be const, mac80211 writes to this */
4763 static struct ieee80211_supported_band wl1271_band_2ghz
= {
4764 .channels
= wl1271_channels
,
4765 .n_channels
= ARRAY_SIZE(wl1271_channels
),
4766 .bitrates
= wl1271_rates
,
4767 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
4770 /* 5 GHz data rates for WL1273 */
4771 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
4773 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4774 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4776 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4777 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4779 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4780 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4782 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4783 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4785 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4786 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4788 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4789 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4791 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4792 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4794 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4795 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4798 /* 5 GHz band channels for WL1273 */
4799 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
4800 { .hw_value
= 7, .center_freq
= 5035, .max_power
= 25 },
4801 { .hw_value
= 8, .center_freq
= 5040, .max_power
= 25 },
4802 { .hw_value
= 9, .center_freq
= 5045, .max_power
= 25 },
4803 { .hw_value
= 11, .center_freq
= 5055, .max_power
= 25 },
4804 { .hw_value
= 12, .center_freq
= 5060, .max_power
= 25 },
4805 { .hw_value
= 16, .center_freq
= 5080, .max_power
= 25 },
4806 { .hw_value
= 34, .center_freq
= 5170, .max_power
= 25 },
4807 { .hw_value
= 36, .center_freq
= 5180, .max_power
= 25 },
4808 { .hw_value
= 38, .center_freq
= 5190, .max_power
= 25 },
4809 { .hw_value
= 40, .center_freq
= 5200, .max_power
= 25 },
4810 { .hw_value
= 42, .center_freq
= 5210, .max_power
= 25 },
4811 { .hw_value
= 44, .center_freq
= 5220, .max_power
= 25 },
4812 { .hw_value
= 46, .center_freq
= 5230, .max_power
= 25 },
4813 { .hw_value
= 48, .center_freq
= 5240, .max_power
= 25 },
4814 { .hw_value
= 52, .center_freq
= 5260, .max_power
= 25 },
4815 { .hw_value
= 56, .center_freq
= 5280, .max_power
= 25 },
4816 { .hw_value
= 60, .center_freq
= 5300, .max_power
= 25 },
4817 { .hw_value
= 64, .center_freq
= 5320, .max_power
= 25 },
4818 { .hw_value
= 100, .center_freq
= 5500, .max_power
= 25 },
4819 { .hw_value
= 104, .center_freq
= 5520, .max_power
= 25 },
4820 { .hw_value
= 108, .center_freq
= 5540, .max_power
= 25 },
4821 { .hw_value
= 112, .center_freq
= 5560, .max_power
= 25 },
4822 { .hw_value
= 116, .center_freq
= 5580, .max_power
= 25 },
4823 { .hw_value
= 120, .center_freq
= 5600, .max_power
= 25 },
4824 { .hw_value
= 124, .center_freq
= 5620, .max_power
= 25 },
4825 { .hw_value
= 128, .center_freq
= 5640, .max_power
= 25 },
4826 { .hw_value
= 132, .center_freq
= 5660, .max_power
= 25 },
4827 { .hw_value
= 136, .center_freq
= 5680, .max_power
= 25 },
4828 { .hw_value
= 140, .center_freq
= 5700, .max_power
= 25 },
4829 { .hw_value
= 149, .center_freq
= 5745, .max_power
= 25 },
4830 { .hw_value
= 153, .center_freq
= 5765, .max_power
= 25 },
4831 { .hw_value
= 157, .center_freq
= 5785, .max_power
= 25 },
4832 { .hw_value
= 161, .center_freq
= 5805, .max_power
= 25 },
4833 { .hw_value
= 165, .center_freq
= 5825, .max_power
= 25 },
4836 static struct ieee80211_supported_band wl1271_band_5ghz
= {
4837 .channels
= wl1271_channels_5ghz
,
4838 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
4839 .bitrates
= wl1271_rates_5ghz
,
4840 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
4843 static const struct ieee80211_ops wl1271_ops
= {
4844 .start
= wl1271_op_start
,
4845 .stop
= wlcore_op_stop
,
4846 .add_interface
= wl1271_op_add_interface
,
4847 .remove_interface
= wl1271_op_remove_interface
,
4848 .change_interface
= wl12xx_op_change_interface
,
4850 .suspend
= wl1271_op_suspend
,
4851 .resume
= wl1271_op_resume
,
4853 .config
= wl1271_op_config
,
4854 .prepare_multicast
= wl1271_op_prepare_multicast
,
4855 .configure_filter
= wl1271_op_configure_filter
,
4857 .set_key
= wlcore_op_set_key
,
4858 .hw_scan
= wl1271_op_hw_scan
,
4859 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
4860 .sched_scan_start
= wl1271_op_sched_scan_start
,
4861 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
4862 .bss_info_changed
= wl1271_op_bss_info_changed
,
4863 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
4864 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
4865 .conf_tx
= wl1271_op_conf_tx
,
4866 .get_tsf
= wl1271_op_get_tsf
,
4867 .get_survey
= wl1271_op_get_survey
,
4868 .sta_state
= wl12xx_op_sta_state
,
4869 .ampdu_action
= wl1271_op_ampdu_action
,
4870 .tx_frames_pending
= wl1271_tx_frames_pending
,
4871 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
4872 .channel_switch
= wl12xx_op_channel_switch
,
4873 .flush
= wlcore_op_flush
,
4874 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
4878 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
4884 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
4885 wl1271_error("Illegal RX rate from HW: %d", rate
);
4889 idx
= wl
->band_rate_to_idx
[band
][rate
];
4890 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
4891 wl1271_error("Unsupported RX rate from HW: %d", rate
);
4898 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
4899 struct device_attribute
*attr
,
4902 struct wl1271
*wl
= dev_get_drvdata(dev
);
4907 mutex_lock(&wl
->mutex
);
4908 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
4910 mutex_unlock(&wl
->mutex
);
4916 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
4917 struct device_attribute
*attr
,
4918 const char *buf
, size_t count
)
4920 struct wl1271
*wl
= dev_get_drvdata(dev
);
4924 ret
= kstrtoul(buf
, 10, &res
);
4926 wl1271_warning("incorrect value written to bt_coex_mode");
4930 mutex_lock(&wl
->mutex
);
4934 if (res
== wl
->sg_enabled
)
4937 wl
->sg_enabled
= res
;
4939 if (wl
->state
== WL1271_STATE_OFF
)
4942 ret
= wl1271_ps_elp_wakeup(wl
);
4946 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
4947 wl1271_ps_elp_sleep(wl
);
4950 mutex_unlock(&wl
->mutex
);
4954 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
4955 wl1271_sysfs_show_bt_coex_state
,
4956 wl1271_sysfs_store_bt_coex_state
);
4958 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
4959 struct device_attribute
*attr
,
4962 struct wl1271
*wl
= dev_get_drvdata(dev
);
4967 mutex_lock(&wl
->mutex
);
4968 if (wl
->hw_pg_ver
>= 0)
4969 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
4971 len
= snprintf(buf
, len
, "n/a\n");
4972 mutex_unlock(&wl
->mutex
);
4977 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
4978 wl1271_sysfs_show_hw_pg_ver
, NULL
);
4980 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
4981 struct bin_attribute
*bin_attr
,
4982 char *buffer
, loff_t pos
, size_t count
)
4984 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
4985 struct wl1271
*wl
= dev_get_drvdata(dev
);
4989 ret
= mutex_lock_interruptible(&wl
->mutex
);
4991 return -ERESTARTSYS
;
4993 /* Let only one thread read the log at a time, blocking others */
4994 while (wl
->fwlog_size
== 0) {
4997 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
4999 TASK_INTERRUPTIBLE
);
5001 if (wl
->fwlog_size
!= 0) {
5002 finish_wait(&wl
->fwlog_waitq
, &wait
);
5006 mutex_unlock(&wl
->mutex
);
5009 finish_wait(&wl
->fwlog_waitq
, &wait
);
5011 if (signal_pending(current
))
5012 return -ERESTARTSYS
;
5014 ret
= mutex_lock_interruptible(&wl
->mutex
);
5016 return -ERESTARTSYS
;
5019 /* Check if the fwlog is still valid */
5020 if (wl
->fwlog_size
< 0) {
5021 mutex_unlock(&wl
->mutex
);
5025 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5026 len
= min(count
, (size_t)wl
->fwlog_size
);
5027 wl
->fwlog_size
-= len
;
5028 memcpy(buffer
, wl
->fwlog
, len
);
5030 /* Make room for new messages */
5031 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
5033 mutex_unlock(&wl
->mutex
);
5038 static struct bin_attribute fwlog_attr
= {
5039 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
5040 .read
= wl1271_sysfs_read_fwlog
,
5043 static void wl1271_connection_loss_work(struct work_struct
*work
)
5045 struct delayed_work
*dwork
;
5047 struct ieee80211_vif
*vif
;
5048 struct wl12xx_vif
*wlvif
;
5050 dwork
= container_of(work
, struct delayed_work
, work
);
5051 wl
= container_of(dwork
, struct wl1271
, connection_loss_work
);
5053 wl1271_info("Connection loss work.");
5055 mutex_lock(&wl
->mutex
);
5057 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
5060 /* Call mac80211 connection loss */
5061 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
5062 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
5064 vif
= wl12xx_wlvif_to_vif(wlvif
);
5065 ieee80211_connection_loss(vif
);
5068 mutex_unlock(&wl
->mutex
);
5071 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
,
5072 u32 oui
, u32 nic
, int n
)
5076 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x, n %d",
5079 if (nic
+ n
- 1 > 0xffffff)
5080 wl1271_warning("NIC part of the MAC address wraps around!");
5082 for (i
= 0; i
< n
; i
++) {
5083 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5084 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5085 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5086 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5087 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5088 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5092 wl
->hw
->wiphy
->n_addresses
= n
;
5093 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5096 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5100 ret
= wl12xx_set_power_on(wl
);
5104 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5108 wl
->fuse_oui_addr
= 0;
5109 wl
->fuse_nic_addr
= 0;
5111 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5115 if (wl
->ops
->get_mac
)
5116 ret
= wl
->ops
->get_mac(wl
);
5119 wl1271_power_off(wl
);
5123 static int wl1271_register_hw(struct wl1271
*wl
)
5126 u32 oui_addr
= 0, nic_addr
= 0;
5128 if (wl
->mac80211_registered
)
5131 wl1271_fetch_nvs(wl
);
5132 if (wl
->nvs
!= NULL
) {
5133 /* NOTE: The wl->nvs->nvs element must be first, in
5134 * order to simplify the casting, we assume it is at
5135 * the beginning of the wl->nvs structure.
5137 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5140 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5142 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5145 /* if the MAC address is zeroed in the NVS derive from fuse */
5146 if (oui_addr
== 0 && nic_addr
== 0) {
5147 oui_addr
= wl
->fuse_oui_addr
;
5148 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5149 nic_addr
= wl
->fuse_nic_addr
+ 1;
5152 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
, 2);
5154 ret
= ieee80211_register_hw(wl
->hw
);
5156 wl1271_error("unable to register mac80211 hw: %d", ret
);
5160 wl
->mac80211_registered
= true;
5162 wl1271_debugfs_init(wl
);
5164 wl1271_notice("loaded");
5170 static void wl1271_unregister_hw(struct wl1271
*wl
)
5173 wl1271_plt_stop(wl
);
5175 ieee80211_unregister_hw(wl
->hw
);
5176 wl
->mac80211_registered
= false;
5180 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5183 .types
= BIT(NL80211_IFTYPE_STATION
),
5187 .types
= BIT(NL80211_IFTYPE_AP
) |
5188 BIT(NL80211_IFTYPE_P2P_GO
) |
5189 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5193 static const struct ieee80211_iface_combination
5194 wlcore_iface_combinations
[] = {
5196 .num_different_channels
= 1,
5197 .max_interfaces
= 2,
5198 .limits
= wlcore_iface_limits
,
5199 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5203 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5205 static const u32 cipher_suites
[] = {
5206 WLAN_CIPHER_SUITE_WEP40
,
5207 WLAN_CIPHER_SUITE_WEP104
,
5208 WLAN_CIPHER_SUITE_TKIP
,
5209 WLAN_CIPHER_SUITE_CCMP
,
5210 WL1271_CIPHER_SUITE_GEM
,
5213 /* The tx descriptor buffer */
5214 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5216 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5217 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5220 /* FIXME: find a proper value */
5221 wl
->hw
->channel_change_time
= 10000;
5222 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5224 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5225 IEEE80211_HW_SUPPORTS_PS
|
5226 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5227 IEEE80211_HW_SUPPORTS_UAPSD
|
5228 IEEE80211_HW_HAS_RATE_CONTROL
|
5229 IEEE80211_HW_CONNECTION_MONITOR
|
5230 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5231 IEEE80211_HW_SPECTRUM_MGMT
|
5232 IEEE80211_HW_AP_LINK_PS
|
5233 IEEE80211_HW_AMPDU_AGGREGATION
|
5234 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5235 IEEE80211_HW_SCAN_WHILE_IDLE
;
5237 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5238 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5240 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5241 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5242 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5243 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5244 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5245 wl
->hw
->wiphy
->max_match_sets
= 16;
5247 * Maximum length of elements in scanning probe request templates
5248 * should be the maximum length possible for a template, without
5249 * the IEEE80211 header of the template
5251 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5252 sizeof(struct ieee80211_header
);
5254 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5255 sizeof(struct ieee80211_header
);
5257 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5258 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5260 /* make sure all our channels fit in the scanned_ch bitmask */
5261 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5262 ARRAY_SIZE(wl1271_channels_5ghz
) >
5263 WL1271_MAX_CHANNELS
);
5265 * We keep local copies of the band structs because we need to
5266 * modify them on a per-device basis.
5268 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5269 sizeof(wl1271_band_2ghz
));
5270 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5271 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5272 sizeof(*wl
->ht_cap
));
5273 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5274 sizeof(wl1271_band_5ghz
));
5275 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5276 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5277 sizeof(*wl
->ht_cap
));
5279 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5280 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5281 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5282 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5285 wl
->hw
->max_rates
= 1;
5287 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5289 /* the FW answers probe-requests in AP-mode */
5290 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5291 wl
->hw
->wiphy
->probe_resp_offload
=
5292 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5293 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5294 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5296 /* allowed interface combinations */
5297 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5298 wl
->hw
->wiphy
->n_iface_combinations
=
5299 ARRAY_SIZE(wlcore_iface_combinations
);
5301 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5303 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5304 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5306 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5311 #define WL1271_DEFAULT_CHANNEL 0
5313 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
)
5315 struct ieee80211_hw
*hw
;
5320 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5322 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5324 wl1271_error("could not alloc ieee80211_hw");
5330 memset(wl
, 0, sizeof(*wl
));
5332 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5334 wl1271_error("could not alloc wl priv");
5336 goto err_priv_alloc
;
5339 INIT_LIST_HEAD(&wl
->wlvif_list
);
5343 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5344 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5345 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5347 skb_queue_head_init(&wl
->deferred_rx_queue
);
5348 skb_queue_head_init(&wl
->deferred_tx_queue
);
5350 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5351 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5352 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5353 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5354 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5355 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5356 INIT_DELAYED_WORK(&wl
->connection_loss_work
,
5357 wl1271_connection_loss_work
);
5359 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5360 if (!wl
->freezable_wq
) {
5365 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5367 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5368 wl
->band
= IEEE80211_BAND_2GHZ
;
5369 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5371 wl
->sg_enabled
= true;
5372 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5375 wl
->ap_fw_ps_map
= 0;
5377 wl
->platform_quirks
= 0;
5378 wl
->sched_scanning
= false;
5379 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5380 wl
->active_sta_count
= 0;
5382 init_waitqueue_head(&wl
->fwlog_waitq
);
5384 /* The system link is always allocated */
5385 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5387 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5388 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5389 wl
->tx_frames
[i
] = NULL
;
5391 spin_lock_init(&wl
->wl_lock
);
5393 wl
->state
= WL1271_STATE_OFF
;
5394 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5395 mutex_init(&wl
->mutex
);
5396 mutex_init(&wl
->flush_mutex
);
5398 order
= get_order(WL1271_AGGR_BUFFER_SIZE
);
5399 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5400 if (!wl
->aggr_buf
) {
5405 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5406 if (!wl
->dummy_packet
) {
5411 /* Allocate one page for the FW log */
5412 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5415 goto err_dummy_packet
;
5418 wl
->mbox
= kmalloc(sizeof(*wl
->mbox
), GFP_KERNEL
| GFP_DMA
);
5427 free_page((unsigned long)wl
->fwlog
);
5430 dev_kfree_skb(wl
->dummy_packet
);
5433 free_pages((unsigned long)wl
->aggr_buf
, order
);
5436 destroy_workqueue(wl
->freezable_wq
);
5439 wl1271_debugfs_exit(wl
);
5443 ieee80211_free_hw(hw
);
5447 return ERR_PTR(ret
);
5449 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5451 int wlcore_free_hw(struct wl1271
*wl
)
5453 /* Unblock any fwlog readers */
5454 mutex_lock(&wl
->mutex
);
5455 wl
->fwlog_size
= -1;
5456 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5457 mutex_unlock(&wl
->mutex
);
5459 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5461 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5463 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5464 free_page((unsigned long)wl
->fwlog
);
5465 dev_kfree_skb(wl
->dummy_packet
);
5466 free_pages((unsigned long)wl
->aggr_buf
,
5467 get_order(WL1271_AGGR_BUFFER_SIZE
));
5469 wl1271_debugfs_exit(wl
);
5473 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5477 kfree(wl
->fw_status_1
);
5478 kfree(wl
->tx_res_if
);
5479 destroy_workqueue(wl
->freezable_wq
);
5482 ieee80211_free_hw(wl
->hw
);
5486 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5488 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5490 struct wl1271
*wl
= cookie
;
5491 unsigned long flags
;
5493 wl1271_debug(DEBUG_IRQ
, "IRQ");
5495 /* complete the ELP completion */
5496 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5497 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5498 if (wl
->elp_compl
) {
5499 complete(wl
->elp_compl
);
5500 wl
->elp_compl
= NULL
;
5503 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5504 /* don't enqueue a work right now. mark it as pending */
5505 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5506 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5507 disable_irq_nosync(wl
->irq
);
5508 pm_wakeup_event(wl
->dev
, 0);
5509 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5512 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5514 return IRQ_WAKE_THREAD
;
5517 int __devinit
wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
5519 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5520 unsigned long irqflags
;
5523 if (!wl
->ops
|| !wl
->ptable
) {
5528 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5530 /* adjust some runtime configuration parameters */
5531 wlcore_adjust_conf(wl
);
5533 wl
->irq
= platform_get_irq(pdev
, 0);
5534 wl
->platform_quirks
= pdata
->platform_quirks
;
5535 wl
->set_power
= pdata
->set_power
;
5536 wl
->dev
= &pdev
->dev
;
5537 wl
->if_ops
= pdata
->ops
;
5539 platform_set_drvdata(pdev
, wl
);
5541 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
5542 irqflags
= IRQF_TRIGGER_RISING
;
5544 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
5546 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wlcore_irq
,
5550 wl1271_error("request_irq() failed: %d", ret
);
5555 ret
= enable_irq_wake(wl
->irq
);
5557 wl
->irq_wake_enabled
= true;
5558 device_init_wakeup(wl
->dev
, 1);
5559 if (pdata
->pwr_in_suspend
) {
5560 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
5561 wl
->hw
->wiphy
->wowlan
.n_patterns
=
5562 WL1271_MAX_RX_FILTERS
;
5563 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
5564 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
5565 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
5569 disable_irq(wl
->irq
);
5571 ret
= wl12xx_get_hw_info(wl
);
5573 wl1271_error("couldn't get hw info");
5577 ret
= wl
->ops
->identify_chip(wl
);
5581 ret
= wl1271_init_ieee80211(wl
);
5585 ret
= wl1271_register_hw(wl
);
5589 /* Create sysfs file to control bt coex state */
5590 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
5592 wl1271_error("failed to create sysfs file bt_coex_state");
5596 /* Create sysfs file to get HW PG version */
5597 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5599 wl1271_error("failed to create sysfs file hw_pg_ver");
5600 goto out_bt_coex_state
;
5603 /* Create sysfs file for the FW log */
5604 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
5606 wl1271_error("failed to create sysfs file fwlog");
5613 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5616 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5619 wl1271_unregister_hw(wl
);
5622 free_irq(wl
->irq
, wl
);
5630 EXPORT_SYMBOL_GPL(wlcore_probe
);
5632 int __devexit
wlcore_remove(struct platform_device
*pdev
)
5634 struct wl1271
*wl
= platform_get_drvdata(pdev
);
5636 if (wl
->irq_wake_enabled
) {
5637 device_init_wakeup(wl
->dev
, 0);
5638 disable_irq_wake(wl
->irq
);
5640 wl1271_unregister_hw(wl
);
5641 free_irq(wl
->irq
, wl
);
5646 EXPORT_SYMBOL_GPL(wlcore_remove
);
5648 u32 wl12xx_debug_level
= DEBUG_NONE
;
5649 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
5650 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
5651 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
5653 module_param_named(fwlog
, fwlog_param
, charp
, 0);
5654 MODULE_PARM_DESC(fwlog
,
5655 "FW logger options: continuous, ondemand, dbgpins or disable");
5657 module_param(bug_on_recovery
, bool, S_IRUSR
| S_IWUSR
);
5658 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
5660 module_param(no_recovery
, bool, S_IRUSR
| S_IWUSR
);
5661 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
5663 MODULE_LICENSE("GPL");
5664 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5665 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");