WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / wireless / ath / carl9170 / main.c
blobcca3b086aa701ffd757f404fcff1583a2d34ef68
1 /*
2 * Atheros CARL9170 driver
4 * mac80211 interaction code
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <linux/random.h>
44 #include <net/mac80211.h>
45 #include <net/cfg80211.h>
46 #include "hw.h"
47 #include "carl9170.h"
48 #include "cmd.h"
50 static bool modparam_nohwcrypt;
51 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444);
52 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54 int modparam_noht;
55 module_param_named(noht, modparam_noht, int, 0444);
56 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
59 .bitrate = (_bitrate), \
60 .flags = (_flags), \
61 .hw_value = (_hw_rate) | (_txpidx) << 4, \
64 struct ieee80211_rate __carl9170_ratetable[] = {
65 RATE(10, 0, 0, 0),
66 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(60, 0xb, 0, 0),
70 RATE(90, 0xf, 0, 0),
71 RATE(120, 0xa, 0, 0),
72 RATE(180, 0xe, 0, 0),
73 RATE(240, 0x9, 0, 0),
74 RATE(360, 0xd, 1, 0),
75 RATE(480, 0x8, 2, 0),
76 RATE(540, 0xc, 3, 0),
78 #undef RATE
80 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
81 #define carl9170_g_ratetable_size 12
82 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
83 #define carl9170_a_ratetable_size 8
86 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87 * array in phy.c so that we don't have to do frequency lookups!
89 #define CHAN(_freq, _idx) { \
90 .center_freq = (_freq), \
91 .hw_value = (_idx), \
92 .max_power = 18, /* XXX */ \
95 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 CHAN(2412, 0),
97 CHAN(2417, 1),
98 CHAN(2422, 2),
99 CHAN(2427, 3),
100 CHAN(2432, 4),
101 CHAN(2437, 5),
102 CHAN(2442, 6),
103 CHAN(2447, 7),
104 CHAN(2452, 8),
105 CHAN(2457, 9),
106 CHAN(2462, 10),
107 CHAN(2467, 11),
108 CHAN(2472, 12),
109 CHAN(2484, 13),
112 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 CHAN(4920, 14),
114 CHAN(4940, 15),
115 CHAN(4960, 16),
116 CHAN(4980, 17),
117 CHAN(5040, 18),
118 CHAN(5060, 19),
119 CHAN(5080, 20),
120 CHAN(5180, 21),
121 CHAN(5200, 22),
122 CHAN(5220, 23),
123 CHAN(5240, 24),
124 CHAN(5260, 25),
125 CHAN(5280, 26),
126 CHAN(5300, 27),
127 CHAN(5320, 28),
128 CHAN(5500, 29),
129 CHAN(5520, 30),
130 CHAN(5540, 31),
131 CHAN(5560, 32),
132 CHAN(5580, 33),
133 CHAN(5600, 34),
134 CHAN(5620, 35),
135 CHAN(5640, 36),
136 CHAN(5660, 37),
137 CHAN(5680, 38),
138 CHAN(5700, 39),
139 CHAN(5745, 40),
140 CHAN(5765, 41),
141 CHAN(5785, 42),
142 CHAN(5805, 43),
143 CHAN(5825, 44),
144 CHAN(5170, 45),
145 CHAN(5190, 46),
146 CHAN(5210, 47),
147 CHAN(5230, 48),
149 #undef CHAN
151 #define CARL9170_HT_CAP \
153 .ht_supported = true, \
154 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
155 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
156 IEEE80211_HT_CAP_SGI_40 | \
157 IEEE80211_HT_CAP_DSSSCCK40 | \
158 IEEE80211_HT_CAP_SM_PS, \
159 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
160 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
161 .mcs = { \
162 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
163 .rx_highest = cpu_to_le16(300), \
164 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
165 }, \
168 static struct ieee80211_supported_band carl9170_band_2GHz = {
169 .channels = carl9170_2ghz_chantable,
170 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
171 .bitrates = carl9170_g_ratetable,
172 .n_bitrates = carl9170_g_ratetable_size,
173 .ht_cap = CARL9170_HT_CAP,
176 static struct ieee80211_supported_band carl9170_band_5GHz = {
177 .channels = carl9170_5ghz_chantable,
178 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
179 .bitrates = carl9170_a_ratetable,
180 .n_bitrates = carl9170_a_ratetable_size,
181 .ht_cap = CARL9170_HT_CAP,
184 static void carl9170_ampdu_gc(struct ar9170 *ar)
186 struct carl9170_sta_tid *tid_info;
187 LIST_HEAD(tid_gc);
189 rcu_read_lock();
190 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 spin_lock_bh(&ar->tx_ampdu_list_lock);
192 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 tid_info->state = CARL9170_TID_STATE_KILLED;
194 list_del_rcu(&tid_info->list);
195 ar->tx_ampdu_list_len--;
196 list_add_tail(&tid_info->tmp_list, &tid_gc);
198 spin_unlock_bh(&ar->tx_ampdu_list_lock);
201 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 rcu_read_unlock();
204 synchronize_rcu();
206 while (!list_empty(&tid_gc)) {
207 struct sk_buff *skb;
208 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 tmp_list);
211 while ((skb = __skb_dequeue(&tid_info->queue)))
212 carl9170_tx_status(ar, skb, false);
214 list_del_init(&tid_info->tmp_list);
215 kfree(tid_info);
219 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221 if (drop_queued) {
222 int i;
225 * We can only drop frames which have not been uploaded
226 * to the device yet.
229 for (i = 0; i < ar->hw->queues; i++) {
230 struct sk_buff *skb;
232 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 struct ieee80211_tx_info *info;
235 info = IEEE80211_SKB_CB(skb);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 atomic_dec(&ar->tx_ampdu_upload);
239 carl9170_tx_status(ar, skb, false);
244 /* Wait for all other outstanding frames to timeout. */
245 if (atomic_read(&ar->tx_total_queued))
246 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
249 static void carl9170_flush_ba(struct ar9170 *ar)
251 struct sk_buff_head free;
252 struct carl9170_sta_tid *tid_info;
253 struct sk_buff *skb;
255 __skb_queue_head_init(&free);
257 rcu_read_lock();
258 spin_lock_bh(&ar->tx_ampdu_list_lock);
259 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 tid_info->state = CARL9170_TID_STATE_SUSPEND;
263 spin_lock(&tid_info->lock);
264 while ((skb = __skb_dequeue(&tid_info->queue)))
265 __skb_queue_tail(&free, skb);
266 spin_unlock(&tid_info->lock);
269 spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 rcu_read_unlock();
272 while ((skb = __skb_dequeue(&free)))
273 carl9170_tx_status(ar, skb, false);
276 static void carl9170_zap_queues(struct ar9170 *ar)
278 struct carl9170_vif_info *cvif;
279 unsigned int i;
281 carl9170_ampdu_gc(ar);
283 carl9170_flush_ba(ar);
284 carl9170_flush(ar, true);
286 for (i = 0; i < ar->hw->queues; i++) {
287 spin_lock_bh(&ar->tx_status[i].lock);
288 while (!skb_queue_empty(&ar->tx_status[i])) {
289 struct sk_buff *skb;
291 skb = skb_peek(&ar->tx_status[i]);
292 carl9170_tx_get_skb(skb);
293 spin_unlock_bh(&ar->tx_status[i].lock);
294 carl9170_tx_drop(ar, skb);
295 spin_lock_bh(&ar->tx_status[i].lock);
296 carl9170_tx_put_skb(skb);
298 spin_unlock_bh(&ar->tx_status[i].lock);
301 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
305 /* reinitialize queues statistics */
306 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 for (i = 0; i < ar->hw->queues; i++)
308 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
311 ar->mem_bitmap[i] = 0;
313 rcu_read_lock();
314 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
315 spin_lock_bh(&ar->beacon_lock);
316 dev_kfree_skb_any(cvif->beacon);
317 cvif->beacon = NULL;
318 spin_unlock_bh(&ar->beacon_lock);
320 rcu_read_unlock();
322 atomic_set(&ar->tx_ampdu_upload, 0);
323 atomic_set(&ar->tx_ampdu_scheduler, 0);
324 atomic_set(&ar->tx_total_pending, 0);
325 atomic_set(&ar->tx_total_queued, 0);
326 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
329 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
330 do { \
331 queue.aifs = ai_fs; \
332 queue.cw_min = cwmin; \
333 queue.cw_max = cwmax; \
334 queue.txop = _txop; \
335 } while (0)
337 static int carl9170_op_start(struct ieee80211_hw *hw)
339 struct ar9170 *ar = hw->priv;
340 int err, i;
342 mutex_lock(&ar->mutex);
344 carl9170_zap_queues(ar);
346 /* reset QoS defaults */
347 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
353 ar->current_factor = ar->current_density = -1;
354 /* "The first key is unique." */
355 ar->usedkeys = 1;
356 ar->filter_state = 0;
357 ar->ps.last_action = jiffies;
358 ar->ps.last_slept = jiffies;
359 ar->erp_mode = CARL9170_ERP_AUTO;
361 /* Set "disable hw crypto offload" whenever the module parameter
362 * nohwcrypt is true or if the firmware does not support it.
364 ar->disable_offload = modparam_nohwcrypt |
365 ar->fw.disable_offload_fw;
366 ar->rx_software_decryption = ar->disable_offload;
368 for (i = 0; i < ar->hw->queues; i++) {
369 ar->queue_stop_timeout[i] = jiffies;
370 ar->max_queue_stop_timeout[i] = 0;
373 atomic_set(&ar->mem_allocs, 0);
375 err = carl9170_usb_open(ar);
376 if (err)
377 goto out;
379 err = carl9170_init_mac(ar);
380 if (err)
381 goto out;
383 err = carl9170_set_qos(ar);
384 if (err)
385 goto out;
387 if (ar->fw.rx_filter) {
388 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
389 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
390 if (err)
391 goto out;
394 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
395 AR9170_DMA_TRIGGER_RXQ);
396 if (err)
397 goto out;
399 /* Clear key-cache */
400 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
401 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
402 0, NULL, 0);
403 if (err)
404 goto out;
406 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
407 1, NULL, 0);
408 if (err)
409 goto out;
411 if (i < AR9170_CAM_MAX_USER) {
412 err = carl9170_disable_key(ar, i);
413 if (err)
414 goto out;
418 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
420 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
421 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
423 ieee80211_wake_queues(ar->hw);
424 err = 0;
426 out:
427 mutex_unlock(&ar->mutex);
428 return err;
431 static void carl9170_cancel_worker(struct ar9170 *ar)
433 cancel_delayed_work_sync(&ar->stat_work);
434 cancel_delayed_work_sync(&ar->tx_janitor);
435 #ifdef CONFIG_CARL9170_LEDS
436 cancel_delayed_work_sync(&ar->led_work);
437 #endif /* CONFIG_CARL9170_LEDS */
438 cancel_work_sync(&ar->ps_work);
439 cancel_work_sync(&ar->ping_work);
440 cancel_work_sync(&ar->ampdu_work);
443 static void carl9170_op_stop(struct ieee80211_hw *hw)
445 struct ar9170 *ar = hw->priv;
447 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
449 ieee80211_stop_queues(ar->hw);
451 mutex_lock(&ar->mutex);
452 if (IS_ACCEPTING_CMD(ar)) {
453 RCU_INIT_POINTER(ar->beacon_iter, NULL);
455 carl9170_led_set_state(ar, 0);
457 /* stop DMA */
458 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
459 carl9170_usb_stop(ar);
462 carl9170_zap_queues(ar);
463 mutex_unlock(&ar->mutex);
465 carl9170_cancel_worker(ar);
468 static void carl9170_restart_work(struct work_struct *work)
470 struct ar9170 *ar = container_of(work, struct ar9170,
471 restart_work);
472 int err = -EIO;
474 ar->usedkeys = 0;
475 ar->filter_state = 0;
476 carl9170_cancel_worker(ar);
478 mutex_lock(&ar->mutex);
479 if (!ar->force_usb_reset) {
480 err = carl9170_usb_restart(ar);
481 if (net_ratelimit()) {
482 if (err)
483 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
484 else
485 dev_info(&ar->udev->dev, "device restarted successfully.\n");
488 carl9170_zap_queues(ar);
489 mutex_unlock(&ar->mutex);
491 if (!err && !ar->force_usb_reset) {
492 ar->restart_counter++;
493 atomic_set(&ar->pending_restarts, 0);
495 ieee80211_restart_hw(ar->hw);
496 } else {
498 * The reset was unsuccessful and the device seems to
499 * be dead. But there's still one option: a low-level
500 * usb subsystem reset...
503 carl9170_usb_reset(ar);
507 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
509 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
512 * Sometimes, an error can trigger several different reset events.
513 * By ignoring these *surplus* reset events, the device won't be
514 * killed again, right after it has recovered.
516 if (atomic_inc_return(&ar->pending_restarts) > 1) {
517 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
518 return;
521 ieee80211_stop_queues(ar->hw);
523 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
525 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
526 !WARN_ON(r >= __CARL9170_RR_LAST))
527 ar->last_reason = r;
529 if (!ar->registered)
530 return;
532 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
533 ar->force_usb_reset = true;
535 ieee80211_queue_work(ar->hw, &ar->restart_work);
538 * At this point, the device instance might have vanished/disabled.
539 * So, don't put any code which access the ar9170 struct
540 * without proper protection.
544 static void carl9170_ping_work(struct work_struct *work)
546 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
547 int err;
549 if (!IS_STARTED(ar))
550 return;
552 mutex_lock(&ar->mutex);
553 err = carl9170_echo_test(ar, 0xdeadbeef);
554 if (err)
555 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
556 mutex_unlock(&ar->mutex);
559 static int carl9170_init_interface(struct ar9170 *ar,
560 struct ieee80211_vif *vif)
562 struct ath_common *common = &ar->common;
563 int err;
565 if (!vif) {
566 WARN_ON_ONCE(IS_STARTED(ar));
567 return 0;
570 memcpy(common->macaddr, vif->addr, ETH_ALEN);
572 /* We have to fall back to software crypto, whenever
573 * the user choose to participates in an IBSS. HW
574 * offload for IBSS RSN is not supported by this driver.
576 * NOTE: If the previous main interface has already
577 * disabled hw crypto offload, we have to keep this
578 * previous disable_offload setting as it was.
579 * Altough ideally, we should notify mac80211 and tell
580 * it to forget about any HW crypto offload for now.
582 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
583 (vif->type != NL80211_IFTYPE_AP));
585 /* The driver used to have P2P GO+CLIENT support,
586 * but since this was dropped and we don't know if
587 * there are any gremlins lurking in the shadows,
588 * so best we keep HW offload disabled for P2P.
590 ar->disable_offload |= vif->p2p;
592 ar->rx_software_decryption = ar->disable_offload;
594 err = carl9170_set_operating_mode(ar);
595 return err;
598 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
599 struct ieee80211_vif *vif)
601 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
602 struct ieee80211_vif *main_vif, *old_main = NULL;
603 struct ar9170 *ar = hw->priv;
604 int vif_id = -1, err = 0;
606 mutex_lock(&ar->mutex);
607 rcu_read_lock();
608 if (vif_priv->active) {
610 * Skip the interface structure initialization,
611 * if the vif survived the _restart call.
613 vif_id = vif_priv->id;
614 vif_priv->enable_beacon = false;
616 spin_lock_bh(&ar->beacon_lock);
617 dev_kfree_skb_any(vif_priv->beacon);
618 vif_priv->beacon = NULL;
619 spin_unlock_bh(&ar->beacon_lock);
621 goto init;
624 /* Because the AR9170 HW's MAC doesn't provide full support for
625 * multiple, independent interfaces [of different operation modes].
626 * We have to select ONE main interface [main mode of HW], but we
627 * can have multiple slaves [AKA: entry in the ACK-table].
629 * The first (from HEAD/TOP) interface in the ar->vif_list is
630 * always the main intf. All following intfs in this list
631 * are considered to be slave intfs.
633 main_vif = carl9170_get_main_vif(ar);
635 if (main_vif) {
636 switch (main_vif->type) {
637 case NL80211_IFTYPE_STATION:
638 if (vif->type == NL80211_IFTYPE_STATION)
639 break;
641 err = -EBUSY;
642 rcu_read_unlock();
644 goto unlock;
646 case NL80211_IFTYPE_MESH_POINT:
647 case NL80211_IFTYPE_AP:
648 if ((vif->type == NL80211_IFTYPE_STATION) ||
649 (vif->type == NL80211_IFTYPE_AP) ||
650 (vif->type == NL80211_IFTYPE_MESH_POINT))
651 break;
653 err = -EBUSY;
654 rcu_read_unlock();
655 goto unlock;
657 default:
658 rcu_read_unlock();
659 goto unlock;
663 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
665 if (vif_id < 0) {
666 rcu_read_unlock();
668 err = -ENOSPC;
669 goto unlock;
672 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
674 vif_priv->active = true;
675 vif_priv->id = vif_id;
676 vif_priv->enable_beacon = false;
677 ar->vifs++;
678 if (old_main) {
679 /* We end up in here, if the main interface is being replaced.
680 * Put the new main interface at the HEAD of the list and the
681 * previous inteface will automatically become second in line.
683 list_add_rcu(&vif_priv->list, &ar->vif_list);
684 } else {
685 /* Add new inteface. If the list is empty, it will become the
686 * main inteface, otherwise it will be slave.
688 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
690 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
692 init:
693 main_vif = carl9170_get_main_vif(ar);
695 if (main_vif == vif) {
696 rcu_assign_pointer(ar->beacon_iter, vif_priv);
697 rcu_read_unlock();
699 if (old_main) {
700 struct carl9170_vif_info *old_main_priv =
701 (void *) old_main->drv_priv;
702 /* downgrade old main intf to slave intf.
703 * NOTE: We are no longer under rcu_read_lock.
704 * But we are still holding ar->mutex, so the
705 * vif data [id, addr] is safe.
707 err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
708 old_main->addr);
709 if (err)
710 goto unlock;
713 err = carl9170_init_interface(ar, vif);
714 if (err)
715 goto unlock;
716 } else {
717 rcu_read_unlock();
718 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
720 if (err)
721 goto unlock;
724 if (ar->fw.tx_seq_table) {
725 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
727 if (err)
728 goto unlock;
731 unlock:
732 if (err && (vif_id >= 0)) {
733 vif_priv->active = false;
734 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
735 ar->vifs--;
736 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
737 list_del_rcu(&vif_priv->list);
738 mutex_unlock(&ar->mutex);
739 synchronize_rcu();
740 } else {
741 if (ar->vifs > 1)
742 ar->ps.off_override |= PS_OFF_VIF;
744 mutex_unlock(&ar->mutex);
747 return err;
750 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
751 struct ieee80211_vif *vif)
753 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
754 struct ieee80211_vif *main_vif;
755 struct ar9170 *ar = hw->priv;
756 unsigned int id;
758 mutex_lock(&ar->mutex);
760 if (WARN_ON_ONCE(!vif_priv->active))
761 goto unlock;
763 ar->vifs--;
765 rcu_read_lock();
766 main_vif = carl9170_get_main_vif(ar);
768 id = vif_priv->id;
770 vif_priv->active = false;
771 WARN_ON(vif_priv->enable_beacon);
772 vif_priv->enable_beacon = false;
773 list_del_rcu(&vif_priv->list);
774 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
776 if (vif == main_vif) {
777 rcu_read_unlock();
779 if (ar->vifs) {
780 WARN_ON(carl9170_init_interface(ar,
781 carl9170_get_main_vif(ar)));
782 } else {
783 carl9170_set_operating_mode(ar);
785 } else {
786 rcu_read_unlock();
788 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
791 carl9170_update_beacon(ar, false);
792 carl9170_flush_cab(ar, id);
794 spin_lock_bh(&ar->beacon_lock);
795 dev_kfree_skb_any(vif_priv->beacon);
796 vif_priv->beacon = NULL;
797 spin_unlock_bh(&ar->beacon_lock);
799 bitmap_release_region(&ar->vif_bitmap, id, 0);
801 carl9170_set_beacon_timers(ar);
803 if (ar->vifs == 1)
804 ar->ps.off_override &= ~PS_OFF_VIF;
806 unlock:
807 mutex_unlock(&ar->mutex);
809 synchronize_rcu();
812 void carl9170_ps_check(struct ar9170 *ar)
814 ieee80211_queue_work(ar->hw, &ar->ps_work);
817 /* caller must hold ar->mutex */
818 static int carl9170_ps_update(struct ar9170 *ar)
820 bool ps = false;
821 int err = 0;
823 if (!ar->ps.off_override)
824 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
826 if (ps != ar->ps.state) {
827 err = carl9170_powersave(ar, ps);
828 if (err)
829 return err;
831 if (ar->ps.state && !ps) {
832 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
833 ar->ps.last_action);
836 if (ps)
837 ar->ps.last_slept = jiffies;
839 ar->ps.last_action = jiffies;
840 ar->ps.state = ps;
843 return 0;
846 static void carl9170_ps_work(struct work_struct *work)
848 struct ar9170 *ar = container_of(work, struct ar9170,
849 ps_work);
850 mutex_lock(&ar->mutex);
851 if (IS_STARTED(ar))
852 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
853 mutex_unlock(&ar->mutex);
856 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
858 int err;
860 if (noise) {
861 err = carl9170_get_noisefloor(ar);
862 if (err)
863 return err;
866 if (ar->fw.hw_counters) {
867 err = carl9170_collect_tally(ar);
868 if (err)
869 return err;
872 if (flush)
873 memset(&ar->tally, 0, sizeof(ar->tally));
875 return 0;
878 static void carl9170_stat_work(struct work_struct *work)
880 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
881 int err;
883 mutex_lock(&ar->mutex);
884 err = carl9170_update_survey(ar, false, true);
885 mutex_unlock(&ar->mutex);
887 if (err)
888 return;
890 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
891 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
894 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
896 struct ar9170 *ar = hw->priv;
897 int err = 0;
899 mutex_lock(&ar->mutex);
900 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
901 /* TODO */
902 err = 0;
905 if (changed & IEEE80211_CONF_CHANGE_PS) {
906 err = carl9170_ps_update(ar);
907 if (err)
908 goto out;
911 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
912 /* TODO */
913 err = 0;
916 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
917 enum nl80211_channel_type channel_type =
918 cfg80211_get_chandef_type(&hw->conf.chandef);
920 /* adjust slot time for 5 GHz */
921 err = carl9170_set_slot_time(ar);
922 if (err)
923 goto out;
925 err = carl9170_update_survey(ar, true, false);
926 if (err)
927 goto out;
929 err = carl9170_set_channel(ar, hw->conf.chandef.chan,
930 channel_type);
931 if (err)
932 goto out;
934 err = carl9170_update_survey(ar, false, true);
935 if (err)
936 goto out;
938 err = carl9170_set_dyn_sifs_ack(ar);
939 if (err)
940 goto out;
942 err = carl9170_set_rts_cts_rate(ar);
943 if (err)
944 goto out;
947 if (changed & IEEE80211_CONF_CHANGE_POWER) {
948 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
949 if (err)
950 goto out;
953 out:
954 mutex_unlock(&ar->mutex);
955 return err;
958 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
959 struct netdev_hw_addr_list *mc_list)
961 struct netdev_hw_addr *ha;
962 u64 mchash;
964 /* always get broadcast frames */
965 mchash = 1ULL << (0xff >> 2);
967 netdev_hw_addr_list_for_each(ha, mc_list)
968 mchash |= 1ULL << (ha->addr[5] >> 2);
970 return mchash;
973 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
974 unsigned int changed_flags,
975 unsigned int *new_flags,
976 u64 multicast)
978 struct ar9170 *ar = hw->priv;
980 /* mask supported flags */
981 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
983 if (!IS_ACCEPTING_CMD(ar))
984 return;
986 mutex_lock(&ar->mutex);
988 ar->filter_state = *new_flags;
990 * We can support more by setting the sniffer bit and
991 * then checking the error flags, later.
994 if (*new_flags & FIF_ALLMULTI)
995 multicast = ~0ULL;
997 if (multicast != ar->cur_mc_hash)
998 WARN_ON(carl9170_update_multicast(ar, multicast));
1000 if (changed_flags & FIF_OTHER_BSS) {
1001 ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
1003 WARN_ON(carl9170_set_operating_mode(ar));
1006 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1007 u32 rx_filter = 0;
1009 if (!ar->fw.ba_filter)
1010 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1012 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1013 rx_filter |= CARL9170_RX_FILTER_BAD;
1015 if (!(*new_flags & FIF_CONTROL))
1016 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1018 if (!(*new_flags & FIF_PSPOLL))
1019 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1021 if (!(*new_flags & FIF_OTHER_BSS)) {
1022 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1023 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1026 WARN_ON(carl9170_rx_filter(ar, rx_filter));
1029 mutex_unlock(&ar->mutex);
1033 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1034 struct ieee80211_vif *vif,
1035 struct ieee80211_bss_conf *bss_conf,
1036 u32 changed)
1038 struct ar9170 *ar = hw->priv;
1039 struct ath_common *common = &ar->common;
1040 int err = 0;
1041 struct carl9170_vif_info *vif_priv;
1042 struct ieee80211_vif *main_vif;
1044 mutex_lock(&ar->mutex);
1045 vif_priv = (void *) vif->drv_priv;
1046 main_vif = carl9170_get_main_vif(ar);
1047 if (WARN_ON(!main_vif))
1048 goto out;
1050 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1051 struct carl9170_vif_info *iter;
1052 int i = 0;
1054 vif_priv->enable_beacon = bss_conf->enable_beacon;
1055 rcu_read_lock();
1056 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1057 if (iter->active && iter->enable_beacon)
1058 i++;
1061 rcu_read_unlock();
1063 ar->beacon_enabled = i;
1066 if (changed & BSS_CHANGED_BEACON) {
1067 err = carl9170_update_beacon(ar, false);
1068 if (err)
1069 goto out;
1072 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1073 BSS_CHANGED_BEACON_INT)) {
1075 if (main_vif != vif) {
1076 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1077 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1081 * Therefore a hard limit for the broadcast traffic should
1082 * prevent false alarms.
1084 if (vif->type != NL80211_IFTYPE_STATION &&
1085 (bss_conf->beacon_int * bss_conf->dtim_period >=
1086 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1087 err = -EINVAL;
1088 goto out;
1091 err = carl9170_set_beacon_timers(ar);
1092 if (err)
1093 goto out;
1096 if (changed & BSS_CHANGED_HT) {
1097 /* TODO */
1098 err = 0;
1099 if (err)
1100 goto out;
1103 if (main_vif != vif)
1104 goto out;
1107 * The following settings can only be changed by the
1108 * master interface.
1111 if (changed & BSS_CHANGED_BSSID) {
1112 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1113 err = carl9170_set_operating_mode(ar);
1114 if (err)
1115 goto out;
1118 if (changed & BSS_CHANGED_ASSOC) {
1119 ar->common.curaid = bss_conf->aid;
1120 err = carl9170_set_beacon_timers(ar);
1121 if (err)
1122 goto out;
1125 if (changed & BSS_CHANGED_ERP_SLOT) {
1126 err = carl9170_set_slot_time(ar);
1127 if (err)
1128 goto out;
1131 if (changed & BSS_CHANGED_BASIC_RATES) {
1132 err = carl9170_set_mac_rates(ar);
1133 if (err)
1134 goto out;
1137 out:
1138 WARN_ON_ONCE(err && IS_STARTED(ar));
1139 mutex_unlock(&ar->mutex);
1142 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1143 struct ieee80211_vif *vif)
1145 struct ar9170 *ar = hw->priv;
1146 struct carl9170_tsf_rsp tsf;
1147 int err;
1149 mutex_lock(&ar->mutex);
1150 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1151 0, NULL, sizeof(tsf), &tsf);
1152 mutex_unlock(&ar->mutex);
1153 if (WARN_ON(err))
1154 return 0;
1156 return le64_to_cpu(tsf.tsf_64);
1159 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1160 struct ieee80211_vif *vif,
1161 struct ieee80211_sta *sta,
1162 struct ieee80211_key_conf *key)
1164 struct ar9170 *ar = hw->priv;
1165 int err = 0, i;
1166 u8 ktype;
1168 if (ar->disable_offload || !vif)
1169 return -EOPNOTSUPP;
1171 /* Fall back to software encryption whenever the driver is connected
1172 * to more than one network.
1174 * This is very unfortunate, because some machines cannot handle
1175 * the high througput speed in 802.11n networks.
1178 if (!is_main_vif(ar, vif)) {
1179 mutex_lock(&ar->mutex);
1180 goto err_softw;
1184 * While the hardware supports *catch-all* key, for offloading
1185 * group-key en-/de-cryption. The way of how the hardware
1186 * decides which keyId maps to which key, remains a mystery...
1188 if ((vif->type != NL80211_IFTYPE_STATION &&
1189 vif->type != NL80211_IFTYPE_ADHOC) &&
1190 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1191 return -EOPNOTSUPP;
1193 switch (key->cipher) {
1194 case WLAN_CIPHER_SUITE_WEP40:
1195 ktype = AR9170_ENC_ALG_WEP64;
1196 break;
1197 case WLAN_CIPHER_SUITE_WEP104:
1198 ktype = AR9170_ENC_ALG_WEP128;
1199 break;
1200 case WLAN_CIPHER_SUITE_TKIP:
1201 ktype = AR9170_ENC_ALG_TKIP;
1202 break;
1203 case WLAN_CIPHER_SUITE_CCMP:
1204 ktype = AR9170_ENC_ALG_AESCCMP;
1205 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1206 break;
1207 default:
1208 return -EOPNOTSUPP;
1211 mutex_lock(&ar->mutex);
1212 if (cmd == SET_KEY) {
1213 if (!IS_STARTED(ar)) {
1214 err = -EOPNOTSUPP;
1215 goto out;
1218 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1219 sta = NULL;
1221 i = 64 + key->keyidx;
1222 } else {
1223 for (i = 0; i < 64; i++)
1224 if (!(ar->usedkeys & BIT(i)))
1225 break;
1226 if (i == 64)
1227 goto err_softw;
1230 key->hw_key_idx = i;
1232 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1233 ktype, 0, key->key,
1234 min_t(u8, 16, key->keylen));
1235 if (err)
1236 goto out;
1238 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1239 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1240 NULL, ktype, 1,
1241 key->key + 16, 16);
1242 if (err)
1243 goto out;
1246 * hardware is not capable generating MMIC
1247 * of fragmented frames!
1249 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1252 if (i < 64)
1253 ar->usedkeys |= BIT(i);
1255 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1256 } else {
1257 if (!IS_STARTED(ar)) {
1258 /* The device is gone... together with the key ;-) */
1259 err = 0;
1260 goto out;
1263 if (key->hw_key_idx < 64) {
1264 ar->usedkeys &= ~BIT(key->hw_key_idx);
1265 } else {
1266 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1267 AR9170_ENC_ALG_NONE, 0,
1268 NULL, 0);
1269 if (err)
1270 goto out;
1272 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1273 err = carl9170_upload_key(ar, key->hw_key_idx,
1274 NULL,
1275 AR9170_ENC_ALG_NONE,
1276 1, NULL, 0);
1277 if (err)
1278 goto out;
1283 err = carl9170_disable_key(ar, key->hw_key_idx);
1284 if (err)
1285 goto out;
1288 out:
1289 mutex_unlock(&ar->mutex);
1290 return err;
1292 err_softw:
1293 if (!ar->rx_software_decryption) {
1294 ar->rx_software_decryption = true;
1295 carl9170_set_operating_mode(ar);
1297 mutex_unlock(&ar->mutex);
1298 return -ENOSPC;
1301 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1302 struct ieee80211_vif *vif,
1303 struct ieee80211_sta *sta)
1305 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1306 unsigned int i;
1308 atomic_set(&sta_info->pending_frames, 0);
1310 if (sta->ht_cap.ht_supported) {
1311 if (sta->ht_cap.ampdu_density > 6) {
1313 * HW does support 16us AMPDU density.
1314 * No HT-Xmit for station.
1317 return 0;
1320 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1321 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1323 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1324 sta_info->ht_sta = true;
1327 return 0;
1330 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1331 struct ieee80211_vif *vif,
1332 struct ieee80211_sta *sta)
1334 struct ar9170 *ar = hw->priv;
1335 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1336 unsigned int i;
1337 bool cleanup = false;
1339 if (sta->ht_cap.ht_supported) {
1341 sta_info->ht_sta = false;
1343 rcu_read_lock();
1344 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1345 struct carl9170_sta_tid *tid_info;
1347 tid_info = rcu_dereference(sta_info->agg[i]);
1348 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1350 if (!tid_info)
1351 continue;
1353 spin_lock_bh(&ar->tx_ampdu_list_lock);
1354 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1355 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1356 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1357 cleanup = true;
1359 rcu_read_unlock();
1361 if (cleanup)
1362 carl9170_ampdu_gc(ar);
1365 return 0;
1368 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1369 struct ieee80211_vif *vif, u16 queue,
1370 const struct ieee80211_tx_queue_params *param)
1372 struct ar9170 *ar = hw->priv;
1373 int ret;
1375 mutex_lock(&ar->mutex);
1376 memcpy(&ar->edcf[ar9170_qmap(queue)], param, sizeof(*param));
1377 ret = carl9170_set_qos(ar);
1378 mutex_unlock(&ar->mutex);
1379 return ret;
1382 static void carl9170_ampdu_work(struct work_struct *work)
1384 struct ar9170 *ar = container_of(work, struct ar9170,
1385 ampdu_work);
1387 if (!IS_STARTED(ar))
1388 return;
1390 mutex_lock(&ar->mutex);
1391 carl9170_ampdu_gc(ar);
1392 mutex_unlock(&ar->mutex);
1395 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1396 struct ieee80211_vif *vif,
1397 struct ieee80211_ampdu_params *params)
1399 struct ieee80211_sta *sta = params->sta;
1400 enum ieee80211_ampdu_mlme_action action = params->action;
1401 u16 tid = params->tid;
1402 u16 *ssn = &params->ssn;
1403 struct ar9170 *ar = hw->priv;
1404 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1405 struct carl9170_sta_tid *tid_info;
1407 if (modparam_noht)
1408 return -EOPNOTSUPP;
1410 switch (action) {
1411 case IEEE80211_AMPDU_TX_START:
1412 if (!sta_info->ht_sta)
1413 return -EOPNOTSUPP;
1415 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1416 GFP_ATOMIC);
1417 if (!tid_info)
1418 return -ENOMEM;
1420 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1421 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1422 tid_info->tid = tid;
1423 tid_info->max = sta_info->ampdu_max_len;
1424 tid_info->sta = sta;
1425 tid_info->vif = vif;
1427 INIT_LIST_HEAD(&tid_info->list);
1428 INIT_LIST_HEAD(&tid_info->tmp_list);
1429 skb_queue_head_init(&tid_info->queue);
1430 spin_lock_init(&tid_info->lock);
1432 spin_lock_bh(&ar->tx_ampdu_list_lock);
1433 ar->tx_ampdu_list_len++;
1434 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1435 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1436 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1438 return IEEE80211_AMPDU_TX_START_IMMEDIATE;
1440 case IEEE80211_AMPDU_TX_STOP_CONT:
1441 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1442 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1443 rcu_read_lock();
1444 tid_info = rcu_dereference(sta_info->agg[tid]);
1445 if (tid_info) {
1446 spin_lock_bh(&ar->tx_ampdu_list_lock);
1447 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1448 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1449 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1452 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1453 rcu_read_unlock();
1455 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1456 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1457 break;
1459 case IEEE80211_AMPDU_TX_OPERATIONAL:
1460 rcu_read_lock();
1461 tid_info = rcu_dereference(sta_info->agg[tid]);
1463 sta_info->stats[tid].clear = true;
1464 sta_info->stats[tid].req = false;
1466 if (tid_info) {
1467 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1468 tid_info->state = CARL9170_TID_STATE_IDLE;
1470 rcu_read_unlock();
1472 if (WARN_ON_ONCE(!tid_info))
1473 return -EFAULT;
1475 break;
1477 case IEEE80211_AMPDU_RX_START:
1478 case IEEE80211_AMPDU_RX_STOP:
1479 /* Handled by hardware */
1480 break;
1482 default:
1483 return -EOPNOTSUPP;
1486 return 0;
1489 #ifdef CONFIG_CARL9170_WPC
1490 static int carl9170_register_wps_button(struct ar9170 *ar)
1492 struct input_dev *input;
1493 int err;
1495 if (!(ar->features & CARL9170_WPS_BUTTON))
1496 return 0;
1498 input = input_allocate_device();
1499 if (!input)
1500 return -ENOMEM;
1502 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1503 wiphy_name(ar->hw->wiphy));
1505 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1506 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1508 input->name = ar->wps.name;
1509 input->phys = ar->wps.phys;
1510 input->id.bustype = BUS_USB;
1511 input->dev.parent = &ar->hw->wiphy->dev;
1513 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1515 err = input_register_device(input);
1516 if (err) {
1517 input_free_device(input);
1518 return err;
1521 ar->wps.pbc = input;
1522 return 0;
1524 #endif /* CONFIG_CARL9170_WPC */
1526 #ifdef CONFIG_CARL9170_HWRNG
1527 static int carl9170_rng_get(struct ar9170 *ar)
1530 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1531 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1533 static const __le32 rng_load[RW] = {
1534 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1536 u32 buf[RW];
1538 unsigned int i, off = 0, transfer, count;
1539 int err;
1541 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1543 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1544 return -EAGAIN;
1546 count = ARRAY_SIZE(ar->rng.cache);
1547 while (count) {
1548 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1549 RB, (u8 *) rng_load,
1550 RB, (u8 *) buf);
1551 if (err)
1552 return err;
1554 transfer = min_t(unsigned int, count, RW);
1555 for (i = 0; i < transfer; i++)
1556 ar->rng.cache[off + i] = buf[i];
1558 off += transfer;
1559 count -= transfer;
1562 ar->rng.cache_idx = 0;
1564 #undef RW
1565 #undef RB
1566 return 0;
1569 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1571 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1572 int ret = -EIO;
1574 mutex_lock(&ar->mutex);
1575 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1576 ret = carl9170_rng_get(ar);
1577 if (ret) {
1578 mutex_unlock(&ar->mutex);
1579 return ret;
1583 *data = ar->rng.cache[ar->rng.cache_idx++];
1584 mutex_unlock(&ar->mutex);
1586 return sizeof(u16);
1589 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1591 if (ar->rng.initialized) {
1592 hwrng_unregister(&ar->rng.rng);
1593 ar->rng.initialized = false;
1597 static int carl9170_register_hwrng(struct ar9170 *ar)
1599 int err;
1601 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1602 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1603 ar->rng.rng.name = ar->rng.name;
1604 ar->rng.rng.data_read = carl9170_rng_read;
1605 ar->rng.rng.priv = (unsigned long)ar;
1607 if (WARN_ON(ar->rng.initialized))
1608 return -EALREADY;
1610 err = hwrng_register(&ar->rng.rng);
1611 if (err) {
1612 dev_err(&ar->udev->dev, "Failed to register the random "
1613 "number generator (%d)\n", err);
1614 return err;
1617 ar->rng.initialized = true;
1619 err = carl9170_rng_get(ar);
1620 if (err) {
1621 carl9170_unregister_hwrng(ar);
1622 return err;
1625 return 0;
1627 #endif /* CONFIG_CARL9170_HWRNG */
1629 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1630 struct survey_info *survey)
1632 struct ar9170 *ar = hw->priv;
1633 struct ieee80211_channel *chan;
1634 struct ieee80211_supported_band *band;
1635 int err, b, i;
1637 chan = ar->channel;
1638 if (!chan)
1639 return -ENODEV;
1641 if (idx == chan->hw_value) {
1642 mutex_lock(&ar->mutex);
1643 err = carl9170_update_survey(ar, false, true);
1644 mutex_unlock(&ar->mutex);
1645 if (err)
1646 return err;
1649 for (b = 0; b < NUM_NL80211_BANDS; b++) {
1650 band = ar->hw->wiphy->bands[b];
1652 if (!band)
1653 continue;
1655 for (i = 0; i < band->n_channels; i++) {
1656 if (band->channels[i].hw_value == idx) {
1657 chan = &band->channels[i];
1658 goto found;
1662 return -ENOENT;
1664 found:
1665 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1667 survey->channel = chan;
1668 survey->filled = SURVEY_INFO_NOISE_DBM;
1670 if (ar->channel == chan)
1671 survey->filled |= SURVEY_INFO_IN_USE;
1673 if (ar->fw.hw_counters) {
1674 survey->filled |= SURVEY_INFO_TIME |
1675 SURVEY_INFO_TIME_BUSY |
1676 SURVEY_INFO_TIME_TX;
1679 return 0;
1682 static void carl9170_op_flush(struct ieee80211_hw *hw,
1683 struct ieee80211_vif *vif,
1684 u32 queues, bool drop)
1686 struct ar9170 *ar = hw->priv;
1687 unsigned int vid;
1689 mutex_lock(&ar->mutex);
1690 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1691 carl9170_flush_cab(ar, vid);
1693 carl9170_flush(ar, drop);
1694 mutex_unlock(&ar->mutex);
1697 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1698 struct ieee80211_low_level_stats *stats)
1700 struct ar9170 *ar = hw->priv;
1702 memset(stats, 0, sizeof(*stats));
1703 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1704 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1705 return 0;
1708 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1709 struct ieee80211_vif *vif,
1710 enum sta_notify_cmd cmd,
1711 struct ieee80211_sta *sta)
1713 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1715 switch (cmd) {
1716 case STA_NOTIFY_SLEEP:
1717 sta_info->sleeping = true;
1718 if (atomic_read(&sta_info->pending_frames))
1719 ieee80211_sta_block_awake(hw, sta, true);
1720 break;
1722 case STA_NOTIFY_AWAKE:
1723 sta_info->sleeping = false;
1724 break;
1728 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1730 struct ar9170 *ar = hw->priv;
1732 return !!atomic_read(&ar->tx_total_queued);
1735 static const struct ieee80211_ops carl9170_ops = {
1736 .start = carl9170_op_start,
1737 .stop = carl9170_op_stop,
1738 .tx = carl9170_op_tx,
1739 .flush = carl9170_op_flush,
1740 .add_interface = carl9170_op_add_interface,
1741 .remove_interface = carl9170_op_remove_interface,
1742 .config = carl9170_op_config,
1743 .prepare_multicast = carl9170_op_prepare_multicast,
1744 .configure_filter = carl9170_op_configure_filter,
1745 .conf_tx = carl9170_op_conf_tx,
1746 .bss_info_changed = carl9170_op_bss_info_changed,
1747 .get_tsf = carl9170_op_get_tsf,
1748 .set_key = carl9170_op_set_key,
1749 .sta_add = carl9170_op_sta_add,
1750 .sta_remove = carl9170_op_sta_remove,
1751 .sta_notify = carl9170_op_sta_notify,
1752 .get_survey = carl9170_op_get_survey,
1753 .get_stats = carl9170_op_get_stats,
1754 .ampdu_action = carl9170_op_ampdu_action,
1755 .tx_frames_pending = carl9170_tx_frames_pending,
1758 void *carl9170_alloc(size_t priv_size)
1760 struct ieee80211_hw *hw;
1761 struct ar9170 *ar;
1762 struct sk_buff *skb;
1763 int i;
1766 * this buffer is used for rx stream reconstruction.
1767 * Under heavy load this device (or the transport layer?)
1768 * tends to split the streams into separate rx descriptors.
1771 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1772 if (!skb)
1773 goto err_nomem;
1775 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1776 if (!hw)
1777 goto err_nomem;
1779 ar = hw->priv;
1780 ar->hw = hw;
1781 ar->rx_failover = skb;
1783 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1784 ar->rx_has_plcp = false;
1787 * Here's a hidden pitfall!
1789 * All 4 AC queues work perfectly well under _legacy_ operation.
1790 * However as soon as aggregation is enabled, the traffic flow
1791 * gets very bumpy. Therefore we have to _switch_ to a
1792 * software AC with a single HW queue.
1794 hw->queues = __AR9170_NUM_TXQ;
1796 mutex_init(&ar->mutex);
1797 spin_lock_init(&ar->beacon_lock);
1798 spin_lock_init(&ar->cmd_lock);
1799 spin_lock_init(&ar->tx_stats_lock);
1800 spin_lock_init(&ar->tx_ampdu_list_lock);
1801 spin_lock_init(&ar->mem_lock);
1802 spin_lock_init(&ar->state_lock);
1803 atomic_set(&ar->pending_restarts, 0);
1804 ar->vifs = 0;
1805 for (i = 0; i < ar->hw->queues; i++) {
1806 skb_queue_head_init(&ar->tx_status[i]);
1807 skb_queue_head_init(&ar->tx_pending[i]);
1809 INIT_LIST_HEAD(&ar->bar_list[i]);
1810 spin_lock_init(&ar->bar_list_lock[i]);
1812 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1813 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1814 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1815 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1816 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1817 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1818 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1819 rcu_assign_pointer(ar->tx_ampdu_iter,
1820 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1822 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1823 INIT_LIST_HEAD(&ar->vif_list);
1824 init_completion(&ar->tx_flush);
1826 /* firmware decides which modes we support */
1827 hw->wiphy->interface_modes = 0;
1829 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1830 ieee80211_hw_set(hw, MFP_CAPABLE);
1831 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1832 ieee80211_hw_set(hw, SUPPORTS_PS);
1833 ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1834 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1835 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1836 ieee80211_hw_set(hw, SIGNAL_DBM);
1837 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1839 if (!modparam_noht) {
1841 * see the comment above, why we allow the user
1842 * to disable HT by a module parameter.
1844 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1847 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1848 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1849 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1851 hw->max_rates = CARL9170_TX_MAX_RATES;
1852 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1854 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1855 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1857 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1859 return ar;
1861 err_nomem:
1862 kfree_skb(skb);
1863 return ERR_PTR(-ENOMEM);
1866 static int carl9170_read_eeprom(struct ar9170 *ar)
1868 #define RW 8 /* number of words to read at once */
1869 #define RB (sizeof(u32) * RW)
1870 u8 *eeprom = (void *)&ar->eeprom;
1871 __le32 offsets[RW];
1872 int i, j, err;
1874 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1876 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1877 #ifndef __CHECKER__
1878 /* don't want to handle trailing remains */
1879 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1880 #endif
1882 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1883 for (j = 0; j < RW; j++)
1884 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1885 RB * i + 4 * j);
1887 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1888 RB, (u8 *) &offsets,
1889 RB, eeprom + RB * i);
1890 if (err)
1891 return err;
1894 #undef RW
1895 #undef RB
1896 return 0;
1899 static int carl9170_parse_eeprom(struct ar9170 *ar)
1901 struct ath_regulatory *regulatory = &ar->common.regulatory;
1902 unsigned int rx_streams, tx_streams, tx_params = 0;
1903 int bands = 0;
1904 int chans = 0;
1906 if (ar->eeprom.length == cpu_to_le16(0xffff))
1907 return -ENODATA;
1909 rx_streams = hweight8(ar->eeprom.rx_mask);
1910 tx_streams = hweight8(ar->eeprom.tx_mask);
1912 if (rx_streams != tx_streams) {
1913 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1915 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1916 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1918 tx_params = (tx_streams - 1) <<
1919 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1921 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1922 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1925 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1926 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1927 &carl9170_band_2GHz;
1928 chans += carl9170_band_2GHz.n_channels;
1929 bands++;
1931 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1932 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1933 &carl9170_band_5GHz;
1934 chans += carl9170_band_5GHz.n_channels;
1935 bands++;
1938 if (!bands)
1939 return -EINVAL;
1941 ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
1942 if (!ar->survey)
1943 return -ENOMEM;
1944 ar->num_channels = chans;
1946 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1948 /* second part of wiphy init */
1949 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1951 return 0;
1954 static void carl9170_reg_notifier(struct wiphy *wiphy,
1955 struct regulatory_request *request)
1957 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1958 struct ar9170 *ar = hw->priv;
1960 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1963 int carl9170_register(struct ar9170 *ar)
1965 struct ath_regulatory *regulatory = &ar->common.regulatory;
1966 int err = 0, i;
1968 if (WARN_ON(ar->mem_bitmap))
1969 return -EINVAL;
1971 ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
1972 sizeof(unsigned long),
1973 GFP_KERNEL);
1975 if (!ar->mem_bitmap)
1976 return -ENOMEM;
1978 /* try to read EEPROM, init MAC addr */
1979 err = carl9170_read_eeprom(ar);
1980 if (err)
1981 return err;
1983 err = carl9170_parse_eeprom(ar);
1984 if (err)
1985 return err;
1987 err = ath_regd_init(regulatory, ar->hw->wiphy,
1988 carl9170_reg_notifier);
1989 if (err)
1990 return err;
1992 if (modparam_noht) {
1993 carl9170_band_2GHz.ht_cap.ht_supported = false;
1994 carl9170_band_5GHz.ht_cap.ht_supported = false;
1997 for (i = 0; i < ar->fw.vif_num; i++) {
1998 ar->vif_priv[i].id = i;
1999 ar->vif_priv[i].vif = NULL;
2002 err = ieee80211_register_hw(ar->hw);
2003 if (err)
2004 return err;
2006 /* mac80211 interface is now registered */
2007 ar->registered = true;
2009 if (!ath_is_world_regd(regulatory))
2010 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2012 #ifdef CONFIG_CARL9170_DEBUGFS
2013 carl9170_debugfs_register(ar);
2014 #endif /* CONFIG_CARL9170_DEBUGFS */
2016 err = carl9170_led_init(ar);
2017 if (err)
2018 goto err_unreg;
2020 #ifdef CONFIG_CARL9170_LEDS
2021 err = carl9170_led_register(ar);
2022 if (err)
2023 goto err_unreg;
2024 #endif /* CONFIG_CARL9170_LEDS */
2026 #ifdef CONFIG_CARL9170_WPC
2027 err = carl9170_register_wps_button(ar);
2028 if (err)
2029 goto err_unreg;
2030 #endif /* CONFIG_CARL9170_WPC */
2032 #ifdef CONFIG_CARL9170_HWRNG
2033 err = carl9170_register_hwrng(ar);
2034 if (err)
2035 goto err_unreg;
2036 #endif /* CONFIG_CARL9170_HWRNG */
2038 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2039 wiphy_name(ar->hw->wiphy));
2041 return 0;
2043 err_unreg:
2044 carl9170_unregister(ar);
2045 return err;
2048 void carl9170_unregister(struct ar9170 *ar)
2050 if (!ar->registered)
2051 return;
2053 ar->registered = false;
2055 #ifdef CONFIG_CARL9170_LEDS
2056 carl9170_led_unregister(ar);
2057 #endif /* CONFIG_CARL9170_LEDS */
2059 #ifdef CONFIG_CARL9170_DEBUGFS
2060 carl9170_debugfs_unregister(ar);
2061 #endif /* CONFIG_CARL9170_DEBUGFS */
2063 #ifdef CONFIG_CARL9170_WPC
2064 if (ar->wps.pbc) {
2065 input_unregister_device(ar->wps.pbc);
2066 ar->wps.pbc = NULL;
2068 #endif /* CONFIG_CARL9170_WPC */
2070 #ifdef CONFIG_CARL9170_HWRNG
2071 carl9170_unregister_hwrng(ar);
2072 #endif /* CONFIG_CARL9170_HWRNG */
2074 carl9170_cancel_worker(ar);
2075 cancel_work_sync(&ar->restart_work);
2077 ieee80211_unregister_hw(ar->hw);
2080 void carl9170_free(struct ar9170 *ar)
2082 WARN_ON(ar->registered);
2083 WARN_ON(IS_INITIALIZED(ar));
2085 kfree_skb(ar->rx_failover);
2086 ar->rx_failover = NULL;
2088 kfree(ar->mem_bitmap);
2089 ar->mem_bitmap = NULL;
2091 kfree(ar->survey);
2092 ar->survey = NULL;
2094 mutex_destroy(&ar->mutex);
2096 ieee80211_free_hw(ar->hw);