spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / net / wireless / ath / ath9k / init.c
blob53a005d288aa53949da6e1317c57e294a092a907
1 /*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/ath9k_platform.h>
20 #include <linux/module.h>
22 #include "ath9k.h"
24 static char *dev_info = "ath9k";
26 MODULE_AUTHOR("Atheros Communications");
27 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
28 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
29 MODULE_LICENSE("Dual BSD/GPL");
31 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
32 module_param_named(debug, ath9k_debug, uint, 0);
33 MODULE_PARM_DESC(debug, "Debugging mask");
35 int ath9k_modparam_nohwcrypt;
36 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
37 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
39 int led_blink;
40 module_param_named(blink, led_blink, int, 0444);
41 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
43 static int ath9k_btcoex_enable;
44 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
45 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
47 bool is_ath9k_unloaded;
48 /* We use the hw_value as an index into our private channel structure */
50 #define CHAN2G(_freq, _idx) { \
51 .band = IEEE80211_BAND_2GHZ, \
52 .center_freq = (_freq), \
53 .hw_value = (_idx), \
54 .max_power = 20, \
57 #define CHAN5G(_freq, _idx) { \
58 .band = IEEE80211_BAND_5GHZ, \
59 .center_freq = (_freq), \
60 .hw_value = (_idx), \
61 .max_power = 20, \
64 /* Some 2 GHz radios are actually tunable on 2312-2732
65 * on 5 MHz steps, we support the channels which we know
66 * we have calibration data for all cards though to make
67 * this static */
68 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
69 CHAN2G(2412, 0), /* Channel 1 */
70 CHAN2G(2417, 1), /* Channel 2 */
71 CHAN2G(2422, 2), /* Channel 3 */
72 CHAN2G(2427, 3), /* Channel 4 */
73 CHAN2G(2432, 4), /* Channel 5 */
74 CHAN2G(2437, 5), /* Channel 6 */
75 CHAN2G(2442, 6), /* Channel 7 */
76 CHAN2G(2447, 7), /* Channel 8 */
77 CHAN2G(2452, 8), /* Channel 9 */
78 CHAN2G(2457, 9), /* Channel 10 */
79 CHAN2G(2462, 10), /* Channel 11 */
80 CHAN2G(2467, 11), /* Channel 12 */
81 CHAN2G(2472, 12), /* Channel 13 */
82 CHAN2G(2484, 13), /* Channel 14 */
85 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
86 * on 5 MHz steps, we support the channels which we know
87 * we have calibration data for all cards though to make
88 * this static */
89 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
90 /* _We_ call this UNII 1 */
91 CHAN5G(5180, 14), /* Channel 36 */
92 CHAN5G(5200, 15), /* Channel 40 */
93 CHAN5G(5220, 16), /* Channel 44 */
94 CHAN5G(5240, 17), /* Channel 48 */
95 /* _We_ call this UNII 2 */
96 CHAN5G(5260, 18), /* Channel 52 */
97 CHAN5G(5280, 19), /* Channel 56 */
98 CHAN5G(5300, 20), /* Channel 60 */
99 CHAN5G(5320, 21), /* Channel 64 */
100 /* _We_ call this "Middle band" */
101 CHAN5G(5500, 22), /* Channel 100 */
102 CHAN5G(5520, 23), /* Channel 104 */
103 CHAN5G(5540, 24), /* Channel 108 */
104 CHAN5G(5560, 25), /* Channel 112 */
105 CHAN5G(5580, 26), /* Channel 116 */
106 CHAN5G(5600, 27), /* Channel 120 */
107 CHAN5G(5620, 28), /* Channel 124 */
108 CHAN5G(5640, 29), /* Channel 128 */
109 CHAN5G(5660, 30), /* Channel 132 */
110 CHAN5G(5680, 31), /* Channel 136 */
111 CHAN5G(5700, 32), /* Channel 140 */
112 /* _We_ call this UNII 3 */
113 CHAN5G(5745, 33), /* Channel 149 */
114 CHAN5G(5765, 34), /* Channel 153 */
115 CHAN5G(5785, 35), /* Channel 157 */
116 CHAN5G(5805, 36), /* Channel 161 */
117 CHAN5G(5825, 37), /* Channel 165 */
120 /* Atheros hardware rate code addition for short premble */
121 #define SHPCHECK(__hw_rate, __flags) \
122 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
124 #define RATE(_bitrate, _hw_rate, _flags) { \
125 .bitrate = (_bitrate), \
126 .flags = (_flags), \
127 .hw_value = (_hw_rate), \
128 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
131 static struct ieee80211_rate ath9k_legacy_rates[] = {
132 RATE(10, 0x1b, 0),
133 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
134 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
135 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
136 RATE(60, 0x0b, 0),
137 RATE(90, 0x0f, 0),
138 RATE(120, 0x0a, 0),
139 RATE(180, 0x0e, 0),
140 RATE(240, 0x09, 0),
141 RATE(360, 0x0d, 0),
142 RATE(480, 0x08, 0),
143 RATE(540, 0x0c, 0),
146 #ifdef CONFIG_MAC80211_LEDS
147 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
148 { .throughput = 0 * 1024, .blink_time = 334 },
149 { .throughput = 1 * 1024, .blink_time = 260 },
150 { .throughput = 5 * 1024, .blink_time = 220 },
151 { .throughput = 10 * 1024, .blink_time = 190 },
152 { .throughput = 20 * 1024, .blink_time = 170 },
153 { .throughput = 50 * 1024, .blink_time = 150 },
154 { .throughput = 70 * 1024, .blink_time = 130 },
155 { .throughput = 100 * 1024, .blink_time = 110 },
156 { .throughput = 200 * 1024, .blink_time = 80 },
157 { .throughput = 300 * 1024, .blink_time = 50 },
159 #endif
161 static void ath9k_deinit_softc(struct ath_softc *sc);
164 * Read and write, they both share the same lock. We do this to serialize
165 * reads and writes on Atheros 802.11n PCI devices only. This is required
166 * as the FIFO on these devices can only accept sanely 2 requests.
169 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
171 struct ath_hw *ah = (struct ath_hw *) hw_priv;
172 struct ath_common *common = ath9k_hw_common(ah);
173 struct ath_softc *sc = (struct ath_softc *) common->priv;
175 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
176 unsigned long flags;
177 spin_lock_irqsave(&sc->sc_serial_rw, flags);
178 iowrite32(val, sc->mem + reg_offset);
179 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
180 } else
181 iowrite32(val, sc->mem + reg_offset);
184 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
186 struct ath_hw *ah = (struct ath_hw *) hw_priv;
187 struct ath_common *common = ath9k_hw_common(ah);
188 struct ath_softc *sc = (struct ath_softc *) common->priv;
189 u32 val;
191 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
192 unsigned long flags;
193 spin_lock_irqsave(&sc->sc_serial_rw, flags);
194 val = ioread32(sc->mem + reg_offset);
195 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
196 } else
197 val = ioread32(sc->mem + reg_offset);
198 return val;
201 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
202 u32 set, u32 clr)
204 u32 val;
206 val = ioread32(sc->mem + reg_offset);
207 val &= ~clr;
208 val |= set;
209 iowrite32(val, sc->mem + reg_offset);
211 return val;
214 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
216 struct ath_hw *ah = (struct ath_hw *) hw_priv;
217 struct ath_common *common = ath9k_hw_common(ah);
218 struct ath_softc *sc = (struct ath_softc *) common->priv;
219 unsigned long uninitialized_var(flags);
220 u32 val;
222 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
223 spin_lock_irqsave(&sc->sc_serial_rw, flags);
224 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
225 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
226 } else
227 val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
229 return val;
232 /**************************/
233 /* Initialization */
234 /**************************/
236 static void setup_ht_cap(struct ath_softc *sc,
237 struct ieee80211_sta_ht_cap *ht_info)
239 struct ath_hw *ah = sc->sc_ah;
240 struct ath_common *common = ath9k_hw_common(ah);
241 u8 tx_streams, rx_streams;
242 int i, max_streams;
244 ht_info->ht_supported = true;
245 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
246 IEEE80211_HT_CAP_SM_PS |
247 IEEE80211_HT_CAP_SGI_40 |
248 IEEE80211_HT_CAP_DSSSCCK40;
250 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
251 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
253 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
254 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
256 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
257 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
259 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
260 max_streams = 1;
261 else if (AR_SREV_9462(ah))
262 max_streams = 2;
263 else if (AR_SREV_9300_20_OR_LATER(ah))
264 max_streams = 3;
265 else
266 max_streams = 2;
268 if (AR_SREV_9280_20_OR_LATER(ah)) {
269 if (max_streams >= 2)
270 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
271 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
274 /* set up supported mcs set */
275 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
276 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
277 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
279 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
280 tx_streams, rx_streams);
282 if (tx_streams != rx_streams) {
283 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
284 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
285 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
288 for (i = 0; i < rx_streams; i++)
289 ht_info->mcs.rx_mask[i] = 0xff;
291 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
294 static int ath9k_reg_notifier(struct wiphy *wiphy,
295 struct regulatory_request *request)
297 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
298 struct ath_softc *sc = hw->priv;
299 struct ath_hw *ah = sc->sc_ah;
300 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
301 int ret;
303 ret = ath_reg_notifier_apply(wiphy, request, reg);
305 /* Set tx power */
306 if (ah->curchan) {
307 sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
308 ath9k_ps_wakeup(sc);
309 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
310 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
311 ath9k_ps_restore(sc);
314 return ret;
318 * This function will allocate both the DMA descriptor structure, and the
319 * buffers it contains. These are used to contain the descriptors used
320 * by the system.
322 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
323 struct list_head *head, const char *name,
324 int nbuf, int ndesc, bool is_tx)
326 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
327 u8 *ds;
328 struct ath_buf *bf;
329 int i, bsize, error, desc_len;
331 ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
332 name, nbuf, ndesc);
334 INIT_LIST_HEAD(head);
336 if (is_tx)
337 desc_len = sc->sc_ah->caps.tx_desc_len;
338 else
339 desc_len = sizeof(struct ath_desc);
341 /* ath_desc must be a multiple of DWORDs */
342 if ((desc_len % 4) != 0) {
343 ath_err(common, "ath_desc not DWORD aligned\n");
344 BUG_ON((desc_len % 4) != 0);
345 error = -ENOMEM;
346 goto fail;
349 dd->dd_desc_len = desc_len * nbuf * ndesc;
352 * Need additional DMA memory because we can't use
353 * descriptors that cross the 4K page boundary. Assume
354 * one skipped descriptor per 4K page.
356 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
357 u32 ndesc_skipped =
358 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
359 u32 dma_len;
361 while (ndesc_skipped) {
362 dma_len = ndesc_skipped * desc_len;
363 dd->dd_desc_len += dma_len;
365 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
369 /* allocate descriptors */
370 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
371 &dd->dd_desc_paddr, GFP_KERNEL);
372 if (dd->dd_desc == NULL) {
373 error = -ENOMEM;
374 goto fail;
376 ds = (u8 *) dd->dd_desc;
377 ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
378 name, ds, (u32) dd->dd_desc_len,
379 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
381 /* allocate buffers */
382 bsize = sizeof(struct ath_buf) * nbuf;
383 bf = kzalloc(bsize, GFP_KERNEL);
384 if (bf == NULL) {
385 error = -ENOMEM;
386 goto fail2;
388 dd->dd_bufptr = bf;
390 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
391 bf->bf_desc = ds;
392 bf->bf_daddr = DS2PHYS(dd, ds);
394 if (!(sc->sc_ah->caps.hw_caps &
395 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
397 * Skip descriptor addresses which can cause 4KB
398 * boundary crossing (addr + length) with a 32 dword
399 * descriptor fetch.
401 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
402 BUG_ON((caddr_t) bf->bf_desc >=
403 ((caddr_t) dd->dd_desc +
404 dd->dd_desc_len));
406 ds += (desc_len * ndesc);
407 bf->bf_desc = ds;
408 bf->bf_daddr = DS2PHYS(dd, ds);
411 list_add_tail(&bf->list, head);
413 return 0;
414 fail2:
415 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
416 dd->dd_desc_paddr);
417 fail:
418 memset(dd, 0, sizeof(*dd));
419 return error;
422 static int ath9k_init_btcoex(struct ath_softc *sc)
424 struct ath_txq *txq;
425 struct ath_hw *ah = sc->sc_ah;
426 int r;
428 switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
429 case ATH_BTCOEX_CFG_NONE:
430 break;
431 case ATH_BTCOEX_CFG_2WIRE:
432 ath9k_hw_btcoex_init_2wire(sc->sc_ah);
433 break;
434 case ATH_BTCOEX_CFG_3WIRE:
435 ath9k_hw_btcoex_init_3wire(sc->sc_ah);
436 r = ath_init_btcoex_timer(sc);
437 if (r)
438 return -1;
439 txq = sc->tx.txq_map[WME_AC_BE];
440 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
441 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
442 break;
443 case ATH_BTCOEX_CFG_MCI:
444 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
445 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
446 INIT_LIST_HEAD(&sc->btcoex.mci.info);
448 r = ath_mci_setup(sc);
449 if (r)
450 return r;
452 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
453 ah->btcoex_hw.mci.ready = false;
454 ah->btcoex_hw.mci.bt_state = 0;
455 ah->btcoex_hw.mci.bt_ver_major = 3;
456 ah->btcoex_hw.mci.bt_ver_minor = 0;
457 ah->btcoex_hw.mci.bt_version_known = false;
458 ah->btcoex_hw.mci.update_2g5g = true;
459 ah->btcoex_hw.mci.is_2g = true;
460 ah->btcoex_hw.mci.wlan_channels_update = false;
461 ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
462 ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
463 ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
464 ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
465 ah->btcoex_hw.mci.query_bt = true;
466 ah->btcoex_hw.mci.unhalt_bt_gpm = true;
467 ah->btcoex_hw.mci.halted_bt_gpm = false;
468 ah->btcoex_hw.mci.need_flush_btinfo = false;
469 ah->btcoex_hw.mci.wlan_cal_seq = 0;
470 ah->btcoex_hw.mci.wlan_cal_done = 0;
471 ah->btcoex_hw.mci.config = 0x2201;
473 break;
474 default:
475 WARN_ON(1);
476 break;
479 return 0;
482 static int ath9k_init_queues(struct ath_softc *sc)
484 int i = 0;
486 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
487 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
489 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
490 ath_cabq_update(sc);
492 for (i = 0; i < WME_NUM_AC; i++) {
493 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
494 sc->tx.txq_map[i]->mac80211_qnum = i;
496 return 0;
499 static int ath9k_init_channels_rates(struct ath_softc *sc)
501 void *channels;
503 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
504 ARRAY_SIZE(ath9k_5ghz_chantable) !=
505 ATH9K_NUM_CHANNELS);
507 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
508 channels = kmemdup(ath9k_2ghz_chantable,
509 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
510 if (!channels)
511 return -ENOMEM;
513 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
514 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
515 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
516 ARRAY_SIZE(ath9k_2ghz_chantable);
517 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
518 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
519 ARRAY_SIZE(ath9k_legacy_rates);
522 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
523 channels = kmemdup(ath9k_5ghz_chantable,
524 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
525 if (!channels) {
526 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
527 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
528 return -ENOMEM;
531 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
532 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
533 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
534 ARRAY_SIZE(ath9k_5ghz_chantable);
535 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
536 ath9k_legacy_rates + 4;
537 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
538 ARRAY_SIZE(ath9k_legacy_rates) - 4;
540 return 0;
543 static void ath9k_init_misc(struct ath_softc *sc)
545 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
546 int i = 0;
547 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
549 sc->config.txpowlimit = ATH_TXPOWER_MAX;
551 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
552 sc->sc_flags |= SC_OP_TXAGGR;
553 sc->sc_flags |= SC_OP_RXAGGR;
556 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
558 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
560 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
562 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
563 sc->beacon.bslot[i] = NULL;
565 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
566 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
569 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
570 const struct ath_bus_ops *bus_ops)
572 struct ath9k_platform_data *pdata = sc->dev->platform_data;
573 struct ath_hw *ah = NULL;
574 struct ath_common *common;
575 int ret = 0, i;
576 int csz = 0;
578 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
579 if (!ah)
580 return -ENOMEM;
582 ah->hw = sc->hw;
583 ah->hw_version.devid = devid;
584 ah->reg_ops.read = ath9k_ioread32;
585 ah->reg_ops.write = ath9k_iowrite32;
586 ah->reg_ops.rmw = ath9k_reg_rmw;
587 atomic_set(&ah->intr_ref_cnt, -1);
588 sc->sc_ah = ah;
590 if (!pdata) {
591 ah->ah_flags |= AH_USE_EEPROM;
592 sc->sc_ah->led_pin = -1;
593 } else {
594 sc->sc_ah->gpio_mask = pdata->gpio_mask;
595 sc->sc_ah->gpio_val = pdata->gpio_val;
596 sc->sc_ah->led_pin = pdata->led_pin;
597 ah->is_clk_25mhz = pdata->is_clk_25mhz;
598 ah->get_mac_revision = pdata->get_mac_revision;
599 ah->external_reset = pdata->external_reset;
602 common = ath9k_hw_common(ah);
603 common->ops = &ah->reg_ops;
604 common->bus_ops = bus_ops;
605 common->ah = ah;
606 common->hw = sc->hw;
607 common->priv = sc;
608 common->debug_mask = ath9k_debug;
609 common->btcoex_enabled = ath9k_btcoex_enable == 1;
610 common->disable_ani = false;
611 spin_lock_init(&common->cc_lock);
613 spin_lock_init(&sc->sc_serial_rw);
614 spin_lock_init(&sc->sc_pm_lock);
615 mutex_init(&sc->mutex);
616 #ifdef CONFIG_ATH9K_DEBUGFS
617 spin_lock_init(&sc->nodes_lock);
618 spin_lock_init(&sc->debug.samp_lock);
619 INIT_LIST_HEAD(&sc->nodes);
620 #endif
621 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
622 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
623 (unsigned long)sc);
626 * Cache line size is used to size and align various
627 * structures used to communicate with the hardware.
629 ath_read_cachesize(common, &csz);
630 common->cachelsz = csz << 2; /* convert to bytes */
632 /* Initializes the hardware for all supported chipsets */
633 ret = ath9k_hw_init(ah);
634 if (ret)
635 goto err_hw;
637 if (pdata && pdata->macaddr)
638 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
640 ret = ath9k_init_queues(sc);
641 if (ret)
642 goto err_queues;
644 ret = ath9k_init_btcoex(sc);
645 if (ret)
646 goto err_btcoex;
648 ret = ath9k_init_channels_rates(sc);
649 if (ret)
650 goto err_btcoex;
652 ath9k_cmn_init_crypto(sc->sc_ah);
653 ath9k_init_misc(sc);
655 return 0;
657 err_btcoex:
658 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
659 if (ATH_TXQ_SETUP(sc, i))
660 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
661 err_queues:
662 ath9k_hw_deinit(ah);
663 err_hw:
665 kfree(ah);
666 sc->sc_ah = NULL;
668 return ret;
671 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
673 struct ieee80211_supported_band *sband;
674 struct ieee80211_channel *chan;
675 struct ath_hw *ah = sc->sc_ah;
676 int i;
678 sband = &sc->sbands[band];
679 for (i = 0; i < sband->n_channels; i++) {
680 chan = &sband->channels[i];
681 ah->curchan = &ah->channels[chan->hw_value];
682 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
683 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
687 static void ath9k_init_txpower_limits(struct ath_softc *sc)
689 struct ath_hw *ah = sc->sc_ah;
690 struct ath9k_channel *curchan = ah->curchan;
692 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
693 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
694 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
695 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
697 ah->curchan = curchan;
700 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
702 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
703 return;
705 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
706 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
707 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
708 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
712 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
714 struct ath_hw *ah = sc->sc_ah;
715 struct ath_common *common = ath9k_hw_common(ah);
717 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
718 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
719 IEEE80211_HW_SIGNAL_DBM |
720 IEEE80211_HW_SUPPORTS_PS |
721 IEEE80211_HW_PS_NULLFUNC_STACK |
722 IEEE80211_HW_SPECTRUM_MGMT |
723 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
725 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
726 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
728 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
729 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
731 hw->wiphy->interface_modes =
732 BIT(NL80211_IFTYPE_P2P_GO) |
733 BIT(NL80211_IFTYPE_P2P_CLIENT) |
734 BIT(NL80211_IFTYPE_AP) |
735 BIT(NL80211_IFTYPE_WDS) |
736 BIT(NL80211_IFTYPE_STATION) |
737 BIT(NL80211_IFTYPE_ADHOC) |
738 BIT(NL80211_IFTYPE_MESH_POINT);
740 if (AR_SREV_5416(sc->sc_ah))
741 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
743 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
744 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
746 hw->queues = 4;
747 hw->max_rates = 4;
748 hw->channel_change_time = 5000;
749 hw->max_listen_interval = 10;
750 hw->max_rate_tries = 10;
751 hw->sta_data_size = sizeof(struct ath_node);
752 hw->vif_data_size = sizeof(struct ath_vif);
754 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
755 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
757 /* single chain devices with rx diversity */
758 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
759 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
761 sc->ant_rx = hw->wiphy->available_antennas_rx;
762 sc->ant_tx = hw->wiphy->available_antennas_tx;
764 #ifdef CONFIG_ATH9K_RATE_CONTROL
765 hw->rate_control_algorithm = "ath9k_rate_control";
766 #endif
768 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
769 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
770 &sc->sbands[IEEE80211_BAND_2GHZ];
771 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
772 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
773 &sc->sbands[IEEE80211_BAND_5GHZ];
775 ath9k_reload_chainmask_settings(sc);
777 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
780 int ath9k_init_device(u16 devid, struct ath_softc *sc,
781 const struct ath_bus_ops *bus_ops)
783 struct ieee80211_hw *hw = sc->hw;
784 struct ath_common *common;
785 struct ath_hw *ah;
786 int error = 0;
787 struct ath_regulatory *reg;
789 /* Bring up device */
790 error = ath9k_init_softc(devid, sc, bus_ops);
791 if (error != 0)
792 goto error_init;
794 ah = sc->sc_ah;
795 common = ath9k_hw_common(ah);
796 ath9k_set_hw_capab(sc, hw);
798 /* Initialize regulatory */
799 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
800 ath9k_reg_notifier);
801 if (error)
802 goto error_regd;
804 reg = &common->regulatory;
806 /* Setup TX DMA */
807 error = ath_tx_init(sc, ATH_TXBUF);
808 if (error != 0)
809 goto error_tx;
811 /* Setup RX DMA */
812 error = ath_rx_init(sc, ATH_RXBUF);
813 if (error != 0)
814 goto error_rx;
816 ath9k_init_txpower_limits(sc);
818 #ifdef CONFIG_MAC80211_LEDS
819 /* must be initialized before ieee80211_register_hw */
820 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
821 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
822 ARRAY_SIZE(ath9k_tpt_blink));
823 #endif
825 INIT_WORK(&sc->hw_reset_work, ath_reset_work);
826 INIT_WORK(&sc->hw_check_work, ath_hw_check);
827 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
828 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
830 /* Register with mac80211 */
831 error = ieee80211_register_hw(hw);
832 if (error)
833 goto error_register;
835 error = ath9k_init_debug(ah);
836 if (error) {
837 ath_err(common, "Unable to create debugfs files\n");
838 goto error_world;
841 /* Handle world regulatory */
842 if (!ath_is_world_regd(reg)) {
843 error = regulatory_hint(hw->wiphy, reg->alpha2);
844 if (error)
845 goto error_world;
848 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
850 ath_init_leds(sc);
851 ath_start_rfkill_poll(sc);
853 return 0;
855 error_world:
856 ieee80211_unregister_hw(hw);
857 error_register:
858 ath_rx_cleanup(sc);
859 error_rx:
860 ath_tx_cleanup(sc);
861 error_tx:
862 /* Nothing */
863 error_regd:
864 ath9k_deinit_softc(sc);
865 error_init:
866 return error;
869 /*****************************/
870 /* De-Initialization */
871 /*****************************/
873 static void ath9k_deinit_softc(struct ath_softc *sc)
875 int i = 0;
877 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
878 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
880 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
881 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
883 if ((sc->btcoex.no_stomp_timer) &&
884 ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
885 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
887 if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
888 ath_mci_cleanup(sc);
890 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
891 if (ATH_TXQ_SETUP(sc, i))
892 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
894 ath9k_hw_deinit(sc->sc_ah);
896 kfree(sc->sc_ah);
897 sc->sc_ah = NULL;
900 void ath9k_deinit_device(struct ath_softc *sc)
902 struct ieee80211_hw *hw = sc->hw;
904 ath9k_ps_wakeup(sc);
906 wiphy_rfkill_stop_polling(sc->hw->wiphy);
907 ath_deinit_leds(sc);
909 ath9k_ps_restore(sc);
911 ieee80211_unregister_hw(hw);
912 ath_rx_cleanup(sc);
913 ath_tx_cleanup(sc);
914 ath9k_deinit_softc(sc);
917 void ath_descdma_cleanup(struct ath_softc *sc,
918 struct ath_descdma *dd,
919 struct list_head *head)
921 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
922 dd->dd_desc_paddr);
924 INIT_LIST_HEAD(head);
925 kfree(dd->dd_bufptr);
926 memset(dd, 0, sizeof(*dd));
929 /************************/
930 /* Module Hooks */
931 /************************/
933 static int __init ath9k_init(void)
935 int error;
937 /* Register rate control algorithm */
938 error = ath_rate_control_register();
939 if (error != 0) {
940 printk(KERN_ERR
941 "ath9k: Unable to register rate control "
942 "algorithm: %d\n",
943 error);
944 goto err_out;
947 error = ath_pci_init();
948 if (error < 0) {
949 printk(KERN_ERR
950 "ath9k: No PCI devices found, driver not installed.\n");
951 error = -ENODEV;
952 goto err_rate_unregister;
955 error = ath_ahb_init();
956 if (error < 0) {
957 error = -ENODEV;
958 goto err_pci_exit;
961 return 0;
963 err_pci_exit:
964 ath_pci_exit();
966 err_rate_unregister:
967 ath_rate_control_unregister();
968 err_out:
969 return error;
971 module_init(ath9k_init);
973 static void __exit ath9k_exit(void)
975 is_ath9k_unloaded = true;
976 ath_ahb_exit();
977 ath_pci_exit();
978 ath_rate_control_unregister();
979 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
981 module_exit(ath9k_exit);