spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / net / wireless / ath / carl9170 / main.c
blobdb774212161bc820225fb9f14564302be55cc3be
1 /*
2 * Atheros CARL9170 driver
4 * mac80211 interaction code
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
51 static bool modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
55 int modparam_noht;
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
65 struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
79 #undef RATE
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
150 #undef CHAN
152 #define CARL9170_HT_CAP \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
205 synchronize_rcu();
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
222 if (drop_queued) {
223 int i;
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
240 carl9170_tx_status(ar, skb, false);
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
250 static void carl9170_flush_ba(struct ar9170 *ar)
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
254 struct sk_buff *skb;
256 __skb_queue_head_init(&free);
258 rcu_read_lock();
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 rcu_read_unlock();
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
277 static void carl9170_zap_queues(struct ar9170 *ar)
279 struct carl9170_vif_info *cvif;
280 unsigned int i;
282 carl9170_ampdu_gc(ar);
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
290 struct sk_buff *skb;
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
299 spin_unlock_bh(&ar->tx_status[i].lock);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
314 rcu_read_lock();
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
318 cvif->beacon = NULL;
319 spin_unlock_bh(&ar->beacon_lock);
321 rcu_read_unlock();
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331 do { \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
336 } while (0)
338 static int carl9170_op_start(struct ieee80211_hw *hw)
340 struct ar9170 *ar = hw->priv;
341 int err, i;
343 mutex_lock(&ar->mutex);
345 carl9170_zap_queues(ar);
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
352 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
356 ar->usedkeys = 1;
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
361 ar->rx_software_decryption = false;
362 ar->disable_offload = false;
364 for (i = 0; i < ar->hw->queues; i++) {
365 ar->queue_stop_timeout[i] = jiffies;
366 ar->max_queue_stop_timeout[i] = 0;
369 atomic_set(&ar->mem_allocs, 0);
371 err = carl9170_usb_open(ar);
372 if (err)
373 goto out;
375 err = carl9170_init_mac(ar);
376 if (err)
377 goto out;
379 err = carl9170_set_qos(ar);
380 if (err)
381 goto out;
383 if (ar->fw.rx_filter) {
384 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
385 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
386 if (err)
387 goto out;
390 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
391 AR9170_DMA_TRIGGER_RXQ);
392 if (err)
393 goto out;
395 /* Clear key-cache */
396 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
397 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
398 0, NULL, 0);
399 if (err)
400 goto out;
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 1, NULL, 0);
404 if (err)
405 goto out;
407 if (i < AR9170_CAM_MAX_USER) {
408 err = carl9170_disable_key(ar, i);
409 if (err)
410 goto out;
414 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
416 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
417 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
419 ieee80211_wake_queues(ar->hw);
420 err = 0;
422 out:
423 mutex_unlock(&ar->mutex);
424 return err;
427 static void carl9170_cancel_worker(struct ar9170 *ar)
429 cancel_delayed_work_sync(&ar->stat_work);
430 cancel_delayed_work_sync(&ar->tx_janitor);
431 #ifdef CONFIG_CARL9170_LEDS
432 cancel_delayed_work_sync(&ar->led_work);
433 #endif /* CONFIG_CARL9170_LEDS */
434 cancel_work_sync(&ar->ps_work);
435 cancel_work_sync(&ar->ping_work);
436 cancel_work_sync(&ar->ampdu_work);
439 static void carl9170_op_stop(struct ieee80211_hw *hw)
441 struct ar9170 *ar = hw->priv;
443 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
445 ieee80211_stop_queues(ar->hw);
447 mutex_lock(&ar->mutex);
448 if (IS_ACCEPTING_CMD(ar)) {
449 RCU_INIT_POINTER(ar->beacon_iter, NULL);
451 carl9170_led_set_state(ar, 0);
453 /* stop DMA */
454 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
455 carl9170_usb_stop(ar);
458 carl9170_zap_queues(ar);
459 mutex_unlock(&ar->mutex);
461 carl9170_cancel_worker(ar);
464 static void carl9170_restart_work(struct work_struct *work)
466 struct ar9170 *ar = container_of(work, struct ar9170,
467 restart_work);
468 int err;
470 ar->usedkeys = 0;
471 ar->filter_state = 0;
472 carl9170_cancel_worker(ar);
474 mutex_lock(&ar->mutex);
475 err = carl9170_usb_restart(ar);
476 if (net_ratelimit()) {
477 if (err) {
478 dev_err(&ar->udev->dev, "Failed to restart device "
479 " (%d).\n", err);
480 } else {
481 dev_info(&ar->udev->dev, "device restarted "
482 "successfully.\n");
486 carl9170_zap_queues(ar);
487 mutex_unlock(&ar->mutex);
488 if (!err) {
489 ar->restart_counter++;
490 atomic_set(&ar->pending_restarts, 0);
492 ieee80211_restart_hw(ar->hw);
493 } else {
495 * The reset was unsuccessful and the device seems to
496 * be dead. But there's still one option: a low-level
497 * usb subsystem reset...
500 carl9170_usb_reset(ar);
504 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
506 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
509 * Sometimes, an error can trigger several different reset events.
510 * By ignoring these *surplus* reset events, the device won't be
511 * killed again, right after it has recovered.
513 if (atomic_inc_return(&ar->pending_restarts) > 1) {
514 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
515 return;
518 ieee80211_stop_queues(ar->hw);
520 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
522 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
523 !WARN_ON(r >= __CARL9170_RR_LAST))
524 ar->last_reason = r;
526 if (!ar->registered)
527 return;
529 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
530 ieee80211_queue_work(ar->hw, &ar->restart_work);
531 else
532 carl9170_usb_reset(ar);
535 * At this point, the device instance might have vanished/disabled.
536 * So, don't put any code which access the ar9170 struct
537 * without proper protection.
541 static void carl9170_ping_work(struct work_struct *work)
543 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
544 int err;
546 if (!IS_STARTED(ar))
547 return;
549 mutex_lock(&ar->mutex);
550 err = carl9170_echo_test(ar, 0xdeadbeef);
551 if (err)
552 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
553 mutex_unlock(&ar->mutex);
556 static int carl9170_init_interface(struct ar9170 *ar,
557 struct ieee80211_vif *vif)
559 struct ath_common *common = &ar->common;
560 int err;
562 if (!vif) {
563 WARN_ON_ONCE(IS_STARTED(ar));
564 return 0;
567 memcpy(common->macaddr, vif->addr, ETH_ALEN);
569 if (modparam_nohwcrypt ||
570 ((vif->type != NL80211_IFTYPE_STATION) &&
571 (vif->type != NL80211_IFTYPE_AP))) {
572 ar->rx_software_decryption = true;
573 ar->disable_offload = true;
576 err = carl9170_set_operating_mode(ar);
577 return err;
580 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
581 struct ieee80211_vif *vif)
583 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
584 struct ieee80211_vif *main_vif;
585 struct ar9170 *ar = hw->priv;
586 int vif_id = -1, err = 0;
588 mutex_lock(&ar->mutex);
589 rcu_read_lock();
590 if (vif_priv->active) {
592 * Skip the interface structure initialization,
593 * if the vif survived the _restart call.
595 vif_id = vif_priv->id;
596 vif_priv->enable_beacon = false;
598 spin_lock_bh(&ar->beacon_lock);
599 dev_kfree_skb_any(vif_priv->beacon);
600 vif_priv->beacon = NULL;
601 spin_unlock_bh(&ar->beacon_lock);
603 goto init;
606 main_vif = carl9170_get_main_vif(ar);
608 if (main_vif) {
609 switch (main_vif->type) {
610 case NL80211_IFTYPE_STATION:
611 if (vif->type == NL80211_IFTYPE_STATION)
612 break;
614 err = -EBUSY;
615 rcu_read_unlock();
617 goto unlock;
619 case NL80211_IFTYPE_AP:
620 if ((vif->type == NL80211_IFTYPE_STATION) ||
621 (vif->type == NL80211_IFTYPE_WDS) ||
622 (vif->type == NL80211_IFTYPE_AP))
623 break;
625 err = -EBUSY;
626 rcu_read_unlock();
627 goto unlock;
629 default:
630 rcu_read_unlock();
631 goto unlock;
635 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
637 if (vif_id < 0) {
638 rcu_read_unlock();
640 err = -ENOSPC;
641 goto unlock;
644 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
646 vif_priv->active = true;
647 vif_priv->id = vif_id;
648 vif_priv->enable_beacon = false;
649 ar->vifs++;
650 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
651 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
653 init:
654 if (carl9170_get_main_vif(ar) == vif) {
655 rcu_assign_pointer(ar->beacon_iter, vif_priv);
656 rcu_read_unlock();
658 err = carl9170_init_interface(ar, vif);
659 if (err)
660 goto unlock;
661 } else {
662 rcu_read_unlock();
663 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
665 if (err)
666 goto unlock;
669 if (ar->fw.tx_seq_table) {
670 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
672 if (err)
673 goto unlock;
676 unlock:
677 if (err && (vif_id >= 0)) {
678 vif_priv->active = false;
679 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
680 ar->vifs--;
681 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
682 list_del_rcu(&vif_priv->list);
683 mutex_unlock(&ar->mutex);
684 synchronize_rcu();
685 } else {
686 if (ar->vifs > 1)
687 ar->ps.off_override |= PS_OFF_VIF;
689 mutex_unlock(&ar->mutex);
692 return err;
695 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
696 struct ieee80211_vif *vif)
698 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
699 struct ieee80211_vif *main_vif;
700 struct ar9170 *ar = hw->priv;
701 unsigned int id;
703 mutex_lock(&ar->mutex);
705 if (WARN_ON_ONCE(!vif_priv->active))
706 goto unlock;
708 ar->vifs--;
710 rcu_read_lock();
711 main_vif = carl9170_get_main_vif(ar);
713 id = vif_priv->id;
715 vif_priv->active = false;
716 WARN_ON(vif_priv->enable_beacon);
717 vif_priv->enable_beacon = false;
718 list_del_rcu(&vif_priv->list);
719 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
721 if (vif == main_vif) {
722 rcu_read_unlock();
724 if (ar->vifs) {
725 WARN_ON(carl9170_init_interface(ar,
726 carl9170_get_main_vif(ar)));
727 } else {
728 carl9170_set_operating_mode(ar);
730 } else {
731 rcu_read_unlock();
733 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
736 carl9170_update_beacon(ar, false);
737 carl9170_flush_cab(ar, id);
739 spin_lock_bh(&ar->beacon_lock);
740 dev_kfree_skb_any(vif_priv->beacon);
741 vif_priv->beacon = NULL;
742 spin_unlock_bh(&ar->beacon_lock);
744 bitmap_release_region(&ar->vif_bitmap, id, 0);
746 carl9170_set_beacon_timers(ar);
748 if (ar->vifs == 1)
749 ar->ps.off_override &= ~PS_OFF_VIF;
751 unlock:
752 mutex_unlock(&ar->mutex);
754 synchronize_rcu();
757 void carl9170_ps_check(struct ar9170 *ar)
759 ieee80211_queue_work(ar->hw, &ar->ps_work);
762 /* caller must hold ar->mutex */
763 static int carl9170_ps_update(struct ar9170 *ar)
765 bool ps = false;
766 int err = 0;
768 if (!ar->ps.off_override)
769 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
771 if (ps != ar->ps.state) {
772 err = carl9170_powersave(ar, ps);
773 if (err)
774 return err;
776 if (ar->ps.state && !ps) {
777 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
778 ar->ps.last_action);
781 if (ps)
782 ar->ps.last_slept = jiffies;
784 ar->ps.last_action = jiffies;
785 ar->ps.state = ps;
788 return 0;
791 static void carl9170_ps_work(struct work_struct *work)
793 struct ar9170 *ar = container_of(work, struct ar9170,
794 ps_work);
795 mutex_lock(&ar->mutex);
796 if (IS_STARTED(ar))
797 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
798 mutex_unlock(&ar->mutex);
801 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
803 int err;
805 if (noise) {
806 err = carl9170_get_noisefloor(ar);
807 if (err)
808 return err;
811 if (ar->fw.hw_counters) {
812 err = carl9170_collect_tally(ar);
813 if (err)
814 return err;
817 if (flush)
818 memset(&ar->tally, 0, sizeof(ar->tally));
820 return 0;
823 static void carl9170_stat_work(struct work_struct *work)
825 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
826 int err;
828 mutex_lock(&ar->mutex);
829 err = carl9170_update_survey(ar, false, true);
830 mutex_unlock(&ar->mutex);
832 if (err)
833 return;
835 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
836 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
839 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
841 struct ar9170 *ar = hw->priv;
842 int err = 0;
844 mutex_lock(&ar->mutex);
845 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
846 /* TODO */
847 err = 0;
850 if (changed & IEEE80211_CONF_CHANGE_PS) {
851 err = carl9170_ps_update(ar);
852 if (err)
853 goto out;
856 if (changed & IEEE80211_CONF_CHANGE_POWER) {
857 /* TODO */
858 err = 0;
861 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
862 /* TODO */
863 err = 0;
866 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
867 /* adjust slot time for 5 GHz */
868 err = carl9170_set_slot_time(ar);
869 if (err)
870 goto out;
872 err = carl9170_update_survey(ar, true, false);
873 if (err)
874 goto out;
876 err = carl9170_set_channel(ar, hw->conf.channel,
877 hw->conf.channel_type, CARL9170_RFI_NONE);
878 if (err)
879 goto out;
881 err = carl9170_update_survey(ar, false, true);
882 if (err)
883 goto out;
885 err = carl9170_set_dyn_sifs_ack(ar);
886 if (err)
887 goto out;
889 err = carl9170_set_rts_cts_rate(ar);
890 if (err)
891 goto out;
894 out:
895 mutex_unlock(&ar->mutex);
896 return err;
899 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
900 struct netdev_hw_addr_list *mc_list)
902 struct netdev_hw_addr *ha;
903 u64 mchash;
905 /* always get broadcast frames */
906 mchash = 1ULL << (0xff >> 2);
908 netdev_hw_addr_list_for_each(ha, mc_list)
909 mchash |= 1ULL << (ha->addr[5] >> 2);
911 return mchash;
914 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
915 unsigned int changed_flags,
916 unsigned int *new_flags,
917 u64 multicast)
919 struct ar9170 *ar = hw->priv;
921 /* mask supported flags */
922 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
924 if (!IS_ACCEPTING_CMD(ar))
925 return;
927 mutex_lock(&ar->mutex);
929 ar->filter_state = *new_flags;
931 * We can support more by setting the sniffer bit and
932 * then checking the error flags, later.
935 if (*new_flags & FIF_ALLMULTI)
936 multicast = ~0ULL;
938 if (multicast != ar->cur_mc_hash)
939 WARN_ON(carl9170_update_multicast(ar, multicast));
941 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
942 ar->sniffer_enabled = !!(*new_flags &
943 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
945 WARN_ON(carl9170_set_operating_mode(ar));
948 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
949 u32 rx_filter = 0;
951 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
952 rx_filter |= CARL9170_RX_FILTER_BAD;
954 if (!(*new_flags & FIF_CONTROL))
955 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
957 if (!(*new_flags & FIF_PSPOLL))
958 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
960 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
961 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
962 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
965 WARN_ON(carl9170_rx_filter(ar, rx_filter));
968 mutex_unlock(&ar->mutex);
972 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
973 struct ieee80211_vif *vif,
974 struct ieee80211_bss_conf *bss_conf,
975 u32 changed)
977 struct ar9170 *ar = hw->priv;
978 struct ath_common *common = &ar->common;
979 int err = 0;
980 struct carl9170_vif_info *vif_priv;
981 struct ieee80211_vif *main_vif;
983 mutex_lock(&ar->mutex);
984 vif_priv = (void *) vif->drv_priv;
985 main_vif = carl9170_get_main_vif(ar);
986 if (WARN_ON(!main_vif))
987 goto out;
989 if (changed & BSS_CHANGED_BEACON_ENABLED) {
990 struct carl9170_vif_info *iter;
991 int i = 0;
993 vif_priv->enable_beacon = bss_conf->enable_beacon;
994 rcu_read_lock();
995 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
996 if (iter->active && iter->enable_beacon)
997 i++;
1000 rcu_read_unlock();
1002 ar->beacon_enabled = i;
1005 if (changed & BSS_CHANGED_BEACON) {
1006 err = carl9170_update_beacon(ar, false);
1007 if (err)
1008 goto out;
1011 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1012 BSS_CHANGED_BEACON_INT)) {
1014 if (main_vif != vif) {
1015 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1016 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1020 * Therefore a hard limit for the broadcast traffic should
1021 * prevent false alarms.
1023 if (vif->type != NL80211_IFTYPE_STATION &&
1024 (bss_conf->beacon_int * bss_conf->dtim_period >=
1025 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1026 err = -EINVAL;
1027 goto out;
1030 err = carl9170_set_beacon_timers(ar);
1031 if (err)
1032 goto out;
1035 if (changed & BSS_CHANGED_HT) {
1036 /* TODO */
1037 err = 0;
1038 if (err)
1039 goto out;
1042 if (main_vif != vif)
1043 goto out;
1046 * The following settings can only be changed by the
1047 * master interface.
1050 if (changed & BSS_CHANGED_BSSID) {
1051 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1052 err = carl9170_set_operating_mode(ar);
1053 if (err)
1054 goto out;
1057 if (changed & BSS_CHANGED_ASSOC) {
1058 ar->common.curaid = bss_conf->aid;
1059 err = carl9170_set_beacon_timers(ar);
1060 if (err)
1061 goto out;
1064 if (changed & BSS_CHANGED_ERP_SLOT) {
1065 err = carl9170_set_slot_time(ar);
1066 if (err)
1067 goto out;
1070 if (changed & BSS_CHANGED_BASIC_RATES) {
1071 err = carl9170_set_mac_rates(ar);
1072 if (err)
1073 goto out;
1076 out:
1077 WARN_ON_ONCE(err && IS_STARTED(ar));
1078 mutex_unlock(&ar->mutex);
1081 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1082 struct ieee80211_vif *vif)
1084 struct ar9170 *ar = hw->priv;
1085 struct carl9170_tsf_rsp tsf;
1086 int err;
1088 mutex_lock(&ar->mutex);
1089 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1090 0, NULL, sizeof(tsf), &tsf);
1091 mutex_unlock(&ar->mutex);
1092 if (WARN_ON(err))
1093 return 0;
1095 return le64_to_cpu(tsf.tsf_64);
1098 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1099 struct ieee80211_vif *vif,
1100 struct ieee80211_sta *sta,
1101 struct ieee80211_key_conf *key)
1103 struct ar9170 *ar = hw->priv;
1104 int err = 0, i;
1105 u8 ktype;
1107 if (ar->disable_offload || !vif)
1108 return -EOPNOTSUPP;
1111 * We have to fall back to software encryption, whenever
1112 * the user choose to participates in an IBSS or is connected
1113 * to more than one network.
1115 * This is very unfortunate, because some machines cannot handle
1116 * the high througput speed in 802.11n networks.
1119 if (!is_main_vif(ar, vif)) {
1120 mutex_lock(&ar->mutex);
1121 goto err_softw;
1125 * While the hardware supports *catch-all* key, for offloading
1126 * group-key en-/de-cryption. The way of how the hardware
1127 * decides which keyId maps to which key, remains a mystery...
1129 if ((vif->type != NL80211_IFTYPE_STATION &&
1130 vif->type != NL80211_IFTYPE_ADHOC) &&
1131 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1132 return -EOPNOTSUPP;
1134 switch (key->cipher) {
1135 case WLAN_CIPHER_SUITE_WEP40:
1136 ktype = AR9170_ENC_ALG_WEP64;
1137 break;
1138 case WLAN_CIPHER_SUITE_WEP104:
1139 ktype = AR9170_ENC_ALG_WEP128;
1140 break;
1141 case WLAN_CIPHER_SUITE_TKIP:
1142 ktype = AR9170_ENC_ALG_TKIP;
1143 break;
1144 case WLAN_CIPHER_SUITE_CCMP:
1145 ktype = AR9170_ENC_ALG_AESCCMP;
1146 break;
1147 default:
1148 return -EOPNOTSUPP;
1151 mutex_lock(&ar->mutex);
1152 if (cmd == SET_KEY) {
1153 if (!IS_STARTED(ar)) {
1154 err = -EOPNOTSUPP;
1155 goto out;
1158 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1159 sta = NULL;
1161 i = 64 + key->keyidx;
1162 } else {
1163 for (i = 0; i < 64; i++)
1164 if (!(ar->usedkeys & BIT(i)))
1165 break;
1166 if (i == 64)
1167 goto err_softw;
1170 key->hw_key_idx = i;
1172 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1173 ktype, 0, key->key,
1174 min_t(u8, 16, key->keylen));
1175 if (err)
1176 goto out;
1178 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1179 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1180 NULL, ktype, 1,
1181 key->key + 16, 16);
1182 if (err)
1183 goto out;
1186 * hardware is not capable generating MMIC
1187 * of fragmented frames!
1189 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1192 if (i < 64)
1193 ar->usedkeys |= BIT(i);
1195 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1196 } else {
1197 if (!IS_STARTED(ar)) {
1198 /* The device is gone... together with the key ;-) */
1199 err = 0;
1200 goto out;
1203 if (key->hw_key_idx < 64) {
1204 ar->usedkeys &= ~BIT(key->hw_key_idx);
1205 } else {
1206 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1207 AR9170_ENC_ALG_NONE, 0,
1208 NULL, 0);
1209 if (err)
1210 goto out;
1212 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1213 err = carl9170_upload_key(ar, key->hw_key_idx,
1214 NULL,
1215 AR9170_ENC_ALG_NONE,
1216 1, NULL, 0);
1217 if (err)
1218 goto out;
1223 err = carl9170_disable_key(ar, key->hw_key_idx);
1224 if (err)
1225 goto out;
1228 out:
1229 mutex_unlock(&ar->mutex);
1230 return err;
1232 err_softw:
1233 if (!ar->rx_software_decryption) {
1234 ar->rx_software_decryption = true;
1235 carl9170_set_operating_mode(ar);
1237 mutex_unlock(&ar->mutex);
1238 return -ENOSPC;
1241 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1242 struct ieee80211_vif *vif,
1243 struct ieee80211_sta *sta)
1245 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1246 unsigned int i;
1248 atomic_set(&sta_info->pending_frames, 0);
1250 if (sta->ht_cap.ht_supported) {
1251 if (sta->ht_cap.ampdu_density > 6) {
1253 * HW does support 16us AMPDU density.
1254 * No HT-Xmit for station.
1257 return 0;
1260 for (i = 0; i < CARL9170_NUM_TID; i++)
1261 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1263 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1264 sta_info->ht_sta = true;
1267 return 0;
1270 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1271 struct ieee80211_vif *vif,
1272 struct ieee80211_sta *sta)
1274 struct ar9170 *ar = hw->priv;
1275 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1276 unsigned int i;
1277 bool cleanup = false;
1279 if (sta->ht_cap.ht_supported) {
1281 sta_info->ht_sta = false;
1283 rcu_read_lock();
1284 for (i = 0; i < CARL9170_NUM_TID; i++) {
1285 struct carl9170_sta_tid *tid_info;
1287 tid_info = rcu_dereference(sta_info->agg[i]);
1288 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1290 if (!tid_info)
1291 continue;
1293 spin_lock_bh(&ar->tx_ampdu_list_lock);
1294 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1295 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1296 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1297 cleanup = true;
1299 rcu_read_unlock();
1301 if (cleanup)
1302 carl9170_ampdu_gc(ar);
1305 return 0;
1308 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1309 struct ieee80211_vif *vif, u16 queue,
1310 const struct ieee80211_tx_queue_params *param)
1312 struct ar9170 *ar = hw->priv;
1313 int ret;
1315 mutex_lock(&ar->mutex);
1316 if (queue < ar->hw->queues) {
1317 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1318 ret = carl9170_set_qos(ar);
1319 } else {
1320 ret = -EINVAL;
1323 mutex_unlock(&ar->mutex);
1324 return ret;
1327 static void carl9170_ampdu_work(struct work_struct *work)
1329 struct ar9170 *ar = container_of(work, struct ar9170,
1330 ampdu_work);
1332 if (!IS_STARTED(ar))
1333 return;
1335 mutex_lock(&ar->mutex);
1336 carl9170_ampdu_gc(ar);
1337 mutex_unlock(&ar->mutex);
1340 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1341 struct ieee80211_vif *vif,
1342 enum ieee80211_ampdu_mlme_action action,
1343 struct ieee80211_sta *sta,
1344 u16 tid, u16 *ssn, u8 buf_size)
1346 struct ar9170 *ar = hw->priv;
1347 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1348 struct carl9170_sta_tid *tid_info;
1350 if (modparam_noht)
1351 return -EOPNOTSUPP;
1353 switch (action) {
1354 case IEEE80211_AMPDU_TX_START:
1355 if (!sta_info->ht_sta)
1356 return -EOPNOTSUPP;
1358 rcu_read_lock();
1359 if (rcu_dereference(sta_info->agg[tid])) {
1360 rcu_read_unlock();
1361 return -EBUSY;
1364 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1365 GFP_ATOMIC);
1366 if (!tid_info) {
1367 rcu_read_unlock();
1368 return -ENOMEM;
1371 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1372 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1373 tid_info->tid = tid;
1374 tid_info->max = sta_info->ampdu_max_len;
1376 INIT_LIST_HEAD(&tid_info->list);
1377 INIT_LIST_HEAD(&tid_info->tmp_list);
1378 skb_queue_head_init(&tid_info->queue);
1379 spin_lock_init(&tid_info->lock);
1381 spin_lock_bh(&ar->tx_ampdu_list_lock);
1382 ar->tx_ampdu_list_len++;
1383 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1384 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1385 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1386 rcu_read_unlock();
1388 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1389 break;
1391 case IEEE80211_AMPDU_TX_STOP:
1392 rcu_read_lock();
1393 tid_info = rcu_dereference(sta_info->agg[tid]);
1394 if (tid_info) {
1395 spin_lock_bh(&ar->tx_ampdu_list_lock);
1396 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1397 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1398 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1401 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1402 rcu_read_unlock();
1404 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1405 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1406 break;
1408 case IEEE80211_AMPDU_TX_OPERATIONAL:
1409 rcu_read_lock();
1410 tid_info = rcu_dereference(sta_info->agg[tid]);
1412 sta_info->stats[tid].clear = true;
1413 sta_info->stats[tid].req = false;
1415 if (tid_info) {
1416 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1417 tid_info->state = CARL9170_TID_STATE_IDLE;
1419 rcu_read_unlock();
1421 if (WARN_ON_ONCE(!tid_info))
1422 return -EFAULT;
1424 break;
1426 case IEEE80211_AMPDU_RX_START:
1427 case IEEE80211_AMPDU_RX_STOP:
1428 /* Handled by hardware */
1429 break;
1431 default:
1432 return -EOPNOTSUPP;
1435 return 0;
1438 #ifdef CONFIG_CARL9170_WPC
1439 static int carl9170_register_wps_button(struct ar9170 *ar)
1441 struct input_dev *input;
1442 int err;
1444 if (!(ar->features & CARL9170_WPS_BUTTON))
1445 return 0;
1447 input = input_allocate_device();
1448 if (!input)
1449 return -ENOMEM;
1451 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1452 wiphy_name(ar->hw->wiphy));
1454 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1455 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1457 input->name = ar->wps.name;
1458 input->phys = ar->wps.phys;
1459 input->id.bustype = BUS_USB;
1460 input->dev.parent = &ar->hw->wiphy->dev;
1462 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1464 err = input_register_device(input);
1465 if (err) {
1466 input_free_device(input);
1467 return err;
1470 ar->wps.pbc = input;
1471 return 0;
1473 #endif /* CONFIG_CARL9170_WPC */
1475 #ifdef CONFIG_CARL9170_HWRNG
1476 static int carl9170_rng_get(struct ar9170 *ar)
1479 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1480 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1482 static const __le32 rng_load[RW] = {
1483 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1485 u32 buf[RW];
1487 unsigned int i, off = 0, transfer, count;
1488 int err;
1490 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1492 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1493 return -EAGAIN;
1495 count = ARRAY_SIZE(ar->rng.cache);
1496 while (count) {
1497 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1498 RB, (u8 *) rng_load,
1499 RB, (u8 *) buf);
1500 if (err)
1501 return err;
1503 transfer = min_t(unsigned int, count, RW);
1504 for (i = 0; i < transfer; i++)
1505 ar->rng.cache[off + i] = buf[i];
1507 off += transfer;
1508 count -= transfer;
1511 ar->rng.cache_idx = 0;
1513 #undef RW
1514 #undef RB
1515 return 0;
1518 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1520 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1521 int ret = -EIO;
1523 mutex_lock(&ar->mutex);
1524 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1525 ret = carl9170_rng_get(ar);
1526 if (ret) {
1527 mutex_unlock(&ar->mutex);
1528 return ret;
1532 *data = ar->rng.cache[ar->rng.cache_idx++];
1533 mutex_unlock(&ar->mutex);
1535 return sizeof(u16);
1538 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1540 if (ar->rng.initialized) {
1541 hwrng_unregister(&ar->rng.rng);
1542 ar->rng.initialized = false;
1546 static int carl9170_register_hwrng(struct ar9170 *ar)
1548 int err;
1550 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1551 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1552 ar->rng.rng.name = ar->rng.name;
1553 ar->rng.rng.data_read = carl9170_rng_read;
1554 ar->rng.rng.priv = (unsigned long)ar;
1556 if (WARN_ON(ar->rng.initialized))
1557 return -EALREADY;
1559 err = hwrng_register(&ar->rng.rng);
1560 if (err) {
1561 dev_err(&ar->udev->dev, "Failed to register the random "
1562 "number generator (%d)\n", err);
1563 return err;
1566 ar->rng.initialized = true;
1568 err = carl9170_rng_get(ar);
1569 if (err) {
1570 carl9170_unregister_hwrng(ar);
1571 return err;
1574 return 0;
1576 #endif /* CONFIG_CARL9170_HWRNG */
1578 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1579 struct survey_info *survey)
1581 struct ar9170 *ar = hw->priv;
1582 struct ieee80211_channel *chan;
1583 struct ieee80211_supported_band *band;
1584 int err, b, i;
1586 chan = ar->channel;
1587 if (!chan)
1588 return -ENODEV;
1590 if (idx == chan->hw_value) {
1591 mutex_lock(&ar->mutex);
1592 err = carl9170_update_survey(ar, false, true);
1593 mutex_unlock(&ar->mutex);
1594 if (err)
1595 return err;
1598 for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1599 band = ar->hw->wiphy->bands[b];
1601 if (!band)
1602 continue;
1604 for (i = 0; i < band->n_channels; i++) {
1605 if (band->channels[i].hw_value == idx) {
1606 chan = &band->channels[i];
1607 goto found;
1611 return -ENOENT;
1613 found:
1614 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1616 survey->channel = chan;
1617 survey->filled = SURVEY_INFO_NOISE_DBM;
1619 if (ar->channel == chan)
1620 survey->filled |= SURVEY_INFO_IN_USE;
1622 if (ar->fw.hw_counters) {
1623 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1624 SURVEY_INFO_CHANNEL_TIME_BUSY |
1625 SURVEY_INFO_CHANNEL_TIME_TX;
1628 return 0;
1631 static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1633 struct ar9170 *ar = hw->priv;
1634 unsigned int vid;
1636 mutex_lock(&ar->mutex);
1637 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1638 carl9170_flush_cab(ar, vid);
1640 carl9170_flush(ar, drop);
1641 mutex_unlock(&ar->mutex);
1644 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1645 struct ieee80211_low_level_stats *stats)
1647 struct ar9170 *ar = hw->priv;
1649 memset(stats, 0, sizeof(*stats));
1650 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1651 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1652 return 0;
1655 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1656 struct ieee80211_vif *vif,
1657 enum sta_notify_cmd cmd,
1658 struct ieee80211_sta *sta)
1660 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1662 switch (cmd) {
1663 case STA_NOTIFY_SLEEP:
1664 sta_info->sleeping = true;
1665 if (atomic_read(&sta_info->pending_frames))
1666 ieee80211_sta_block_awake(hw, sta, true);
1667 break;
1669 case STA_NOTIFY_AWAKE:
1670 sta_info->sleeping = false;
1671 break;
1675 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1677 struct ar9170 *ar = hw->priv;
1679 return !!atomic_read(&ar->tx_total_queued);
1682 static const struct ieee80211_ops carl9170_ops = {
1683 .start = carl9170_op_start,
1684 .stop = carl9170_op_stop,
1685 .tx = carl9170_op_tx,
1686 .flush = carl9170_op_flush,
1687 .add_interface = carl9170_op_add_interface,
1688 .remove_interface = carl9170_op_remove_interface,
1689 .config = carl9170_op_config,
1690 .prepare_multicast = carl9170_op_prepare_multicast,
1691 .configure_filter = carl9170_op_configure_filter,
1692 .conf_tx = carl9170_op_conf_tx,
1693 .bss_info_changed = carl9170_op_bss_info_changed,
1694 .get_tsf = carl9170_op_get_tsf,
1695 .set_key = carl9170_op_set_key,
1696 .sta_add = carl9170_op_sta_add,
1697 .sta_remove = carl9170_op_sta_remove,
1698 .sta_notify = carl9170_op_sta_notify,
1699 .get_survey = carl9170_op_get_survey,
1700 .get_stats = carl9170_op_get_stats,
1701 .ampdu_action = carl9170_op_ampdu_action,
1702 .tx_frames_pending = carl9170_tx_frames_pending,
1705 void *carl9170_alloc(size_t priv_size)
1707 struct ieee80211_hw *hw;
1708 struct ar9170 *ar;
1709 struct sk_buff *skb;
1710 int i;
1713 * this buffer is used for rx stream reconstruction.
1714 * Under heavy load this device (or the transport layer?)
1715 * tends to split the streams into separate rx descriptors.
1718 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1719 if (!skb)
1720 goto err_nomem;
1722 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1723 if (!hw)
1724 goto err_nomem;
1726 ar = hw->priv;
1727 ar->hw = hw;
1728 ar->rx_failover = skb;
1730 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1731 ar->rx_has_plcp = false;
1734 * Here's a hidden pitfall!
1736 * All 4 AC queues work perfectly well under _legacy_ operation.
1737 * However as soon as aggregation is enabled, the traffic flow
1738 * gets very bumpy. Therefore we have to _switch_ to a
1739 * software AC with a single HW queue.
1741 hw->queues = __AR9170_NUM_TXQ;
1743 mutex_init(&ar->mutex);
1744 spin_lock_init(&ar->beacon_lock);
1745 spin_lock_init(&ar->cmd_lock);
1746 spin_lock_init(&ar->tx_stats_lock);
1747 spin_lock_init(&ar->tx_ampdu_list_lock);
1748 spin_lock_init(&ar->mem_lock);
1749 spin_lock_init(&ar->state_lock);
1750 atomic_set(&ar->pending_restarts, 0);
1751 ar->vifs = 0;
1752 for (i = 0; i < ar->hw->queues; i++) {
1753 skb_queue_head_init(&ar->tx_status[i]);
1754 skb_queue_head_init(&ar->tx_pending[i]);
1756 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1757 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1758 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1759 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1760 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1761 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1762 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1763 rcu_assign_pointer(ar->tx_ampdu_iter,
1764 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1766 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1767 INIT_LIST_HEAD(&ar->vif_list);
1768 init_completion(&ar->tx_flush);
1770 /* firmware decides which modes we support */
1771 hw->wiphy->interface_modes = 0;
1773 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1774 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1775 IEEE80211_HW_SUPPORTS_PS |
1776 IEEE80211_HW_PS_NULLFUNC_STACK |
1777 IEEE80211_HW_NEED_DTIM_PERIOD |
1778 IEEE80211_HW_SIGNAL_DBM;
1780 if (!modparam_noht) {
1782 * see the comment above, why we allow the user
1783 * to disable HT by a module parameter.
1785 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1788 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1789 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1790 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1792 hw->max_rates = CARL9170_TX_MAX_RATES;
1793 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1795 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1796 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1798 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1799 return ar;
1801 err_nomem:
1802 kfree_skb(skb);
1803 return ERR_PTR(-ENOMEM);
1806 static int carl9170_read_eeprom(struct ar9170 *ar)
1808 #define RW 8 /* number of words to read at once */
1809 #define RB (sizeof(u32) * RW)
1810 u8 *eeprom = (void *)&ar->eeprom;
1811 __le32 offsets[RW];
1812 int i, j, err;
1814 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1816 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1817 #ifndef __CHECKER__
1818 /* don't want to handle trailing remains */
1819 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1820 #endif
1822 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1823 for (j = 0; j < RW; j++)
1824 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1825 RB * i + 4 * j);
1827 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1828 RB, (u8 *) &offsets,
1829 RB, eeprom + RB * i);
1830 if (err)
1831 return err;
1834 #undef RW
1835 #undef RB
1836 return 0;
1839 static int carl9170_parse_eeprom(struct ar9170 *ar)
1841 struct ath_regulatory *regulatory = &ar->common.regulatory;
1842 unsigned int rx_streams, tx_streams, tx_params = 0;
1843 int bands = 0;
1844 int chans = 0;
1846 if (ar->eeprom.length == cpu_to_le16(0xffff))
1847 return -ENODATA;
1849 rx_streams = hweight8(ar->eeprom.rx_mask);
1850 tx_streams = hweight8(ar->eeprom.tx_mask);
1852 if (rx_streams != tx_streams) {
1853 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1855 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1856 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1858 tx_params = (tx_streams - 1) <<
1859 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1861 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1862 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1865 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1866 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1867 &carl9170_band_2GHz;
1868 chans += carl9170_band_2GHz.n_channels;
1869 bands++;
1871 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1872 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1873 &carl9170_band_5GHz;
1874 chans += carl9170_band_5GHz.n_channels;
1875 bands++;
1878 if (!bands)
1879 return -EINVAL;
1881 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1882 if (!ar->survey)
1883 return -ENOMEM;
1884 ar->num_channels = chans;
1887 * I measured this, a bandswitch takes roughly
1888 * 135 ms and a frequency switch about 80.
1890 * FIXME: measure these values again once EEPROM settings
1891 * are used, that will influence them!
1893 if (bands == 2)
1894 ar->hw->channel_change_time = 135 * 1000;
1895 else
1896 ar->hw->channel_change_time = 80 * 1000;
1898 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1900 /* second part of wiphy init */
1901 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1903 return 0;
1906 static int carl9170_reg_notifier(struct wiphy *wiphy,
1907 struct regulatory_request *request)
1909 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1910 struct ar9170 *ar = hw->priv;
1912 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1915 int carl9170_register(struct ar9170 *ar)
1917 struct ath_regulatory *regulatory = &ar->common.regulatory;
1918 int err = 0, i;
1920 if (WARN_ON(ar->mem_bitmap))
1921 return -EINVAL;
1923 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1924 sizeof(unsigned long), GFP_KERNEL);
1926 if (!ar->mem_bitmap)
1927 return -ENOMEM;
1929 /* try to read EEPROM, init MAC addr */
1930 err = carl9170_read_eeprom(ar);
1931 if (err)
1932 return err;
1934 err = carl9170_fw_fix_eeprom(ar);
1935 if (err)
1936 return err;
1938 err = carl9170_parse_eeprom(ar);
1939 if (err)
1940 return err;
1942 err = ath_regd_init(regulatory, ar->hw->wiphy,
1943 carl9170_reg_notifier);
1944 if (err)
1945 return err;
1947 if (modparam_noht) {
1948 carl9170_band_2GHz.ht_cap.ht_supported = false;
1949 carl9170_band_5GHz.ht_cap.ht_supported = false;
1952 for (i = 0; i < ar->fw.vif_num; i++) {
1953 ar->vif_priv[i].id = i;
1954 ar->vif_priv[i].vif = NULL;
1957 err = ieee80211_register_hw(ar->hw);
1958 if (err)
1959 return err;
1961 /* mac80211 interface is now registered */
1962 ar->registered = true;
1964 if (!ath_is_world_regd(regulatory))
1965 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1967 #ifdef CONFIG_CARL9170_DEBUGFS
1968 carl9170_debugfs_register(ar);
1969 #endif /* CONFIG_CARL9170_DEBUGFS */
1971 err = carl9170_led_init(ar);
1972 if (err)
1973 goto err_unreg;
1975 #ifdef CONFIG_CARL9170_LEDS
1976 err = carl9170_led_register(ar);
1977 if (err)
1978 goto err_unreg;
1979 #endif /* CONFIG_CARL9170_LEDS */
1981 #ifdef CONFIG_CARL9170_WPC
1982 err = carl9170_register_wps_button(ar);
1983 if (err)
1984 goto err_unreg;
1985 #endif /* CONFIG_CARL9170_WPC */
1987 #ifdef CONFIG_CARL9170_HWRNG
1988 err = carl9170_register_hwrng(ar);
1989 if (err)
1990 goto err_unreg;
1991 #endif /* CONFIG_CARL9170_HWRNG */
1993 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
1994 wiphy_name(ar->hw->wiphy));
1996 return 0;
1998 err_unreg:
1999 carl9170_unregister(ar);
2000 return err;
2003 void carl9170_unregister(struct ar9170 *ar)
2005 if (!ar->registered)
2006 return;
2008 ar->registered = false;
2010 #ifdef CONFIG_CARL9170_LEDS
2011 carl9170_led_unregister(ar);
2012 #endif /* CONFIG_CARL9170_LEDS */
2014 #ifdef CONFIG_CARL9170_DEBUGFS
2015 carl9170_debugfs_unregister(ar);
2016 #endif /* CONFIG_CARL9170_DEBUGFS */
2018 #ifdef CONFIG_CARL9170_WPC
2019 if (ar->wps.pbc) {
2020 input_unregister_device(ar->wps.pbc);
2021 ar->wps.pbc = NULL;
2023 #endif /* CONFIG_CARL9170_WPC */
2025 #ifdef CONFIG_CARL9170_HWRNG
2026 carl9170_unregister_hwrng(ar);
2027 #endif /* CONFIG_CARL9170_HWRNG */
2029 carl9170_cancel_worker(ar);
2030 cancel_work_sync(&ar->restart_work);
2032 ieee80211_unregister_hw(ar->hw);
2035 void carl9170_free(struct ar9170 *ar)
2037 WARN_ON(ar->registered);
2038 WARN_ON(IS_INITIALIZED(ar));
2040 kfree_skb(ar->rx_failover);
2041 ar->rx_failover = NULL;
2043 kfree(ar->mem_bitmap);
2044 ar->mem_bitmap = NULL;
2046 kfree(ar->survey);
2047 ar->survey = NULL;
2049 mutex_destroy(&ar->mutex);
2051 ieee80211_free_hw(ar->hw);