2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
8 * Modified for gPXE, July 2009, by Joshua Oreman <oremanj@rwcr.net>
9 * Original from Linux kernel 2.6.30.
11 * All rights reserved.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer,
18 * without modification.
19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
20 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
21 * redistribution must be conditioned upon including a substantially
22 * similar Disclaimer requirement for further binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
35 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
36 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
37 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
39 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
42 * THE POSSIBILITY OF SUCH DAMAGES.
46 FILE_LICENCE ( BSD3
);
49 #include <gpxe/malloc.h>
50 #include <gpxe/timer.h>
51 #include <gpxe/netdevice.h>
53 #include <gpxe/pci_io.h>
58 #define ATH5K_CALIB_INTERVAL 10 /* Calibrate PHY every 10 seconds */
59 #define ATH5K_RETRIES 4 /* Number of times to retry packet sends */
60 #define ATH5K_DESC_ALIGN 16 /* Alignment for TX/RX descriptors */
67 static struct pci_device_id ath5k_nics
[] = {
68 PCI_ROM(0x168c, 0x0207, "ath5210e", "Atheros 5210 early", AR5K_AR5210
),
69 PCI_ROM(0x168c, 0x0007, "ath5210", "Atheros 5210", AR5K_AR5210
),
70 PCI_ROM(0x168c, 0x0011, "ath5311", "Atheros 5311 (AHB)", AR5K_AR5211
),
71 PCI_ROM(0x168c, 0x0012, "ath5211", "Atheros 5211", AR5K_AR5211
),
72 PCI_ROM(0x168c, 0x0013, "ath5212", "Atheros 5212", AR5K_AR5212
),
73 PCI_ROM(0xa727, 0x0013, "ath5212c","3com Ath 5212", AR5K_AR5212
),
74 PCI_ROM(0x10b7, 0x0013, "rdag675", "3com 3CRDAG675", AR5K_AR5212
),
75 PCI_ROM(0x168c, 0x1014, "ath5212m", "Ath 5212 miniPCI", AR5K_AR5212
),
76 PCI_ROM(0x168c, 0x0014, "ath5212x14", "Atheros 5212 x14", AR5K_AR5212
),
77 PCI_ROM(0x168c, 0x0015, "ath5212x15", "Atheros 5212 x15", AR5K_AR5212
),
78 PCI_ROM(0x168c, 0x0016, "ath5212x16", "Atheros 5212 x16", AR5K_AR5212
),
79 PCI_ROM(0x168c, 0x0017, "ath5212x17", "Atheros 5212 x17", AR5K_AR5212
),
80 PCI_ROM(0x168c, 0x0018, "ath5212x18", "Atheros 5212 x18", AR5K_AR5212
),
81 PCI_ROM(0x168c, 0x0019, "ath5212x19", "Atheros 5212 x19", AR5K_AR5212
),
82 PCI_ROM(0x168c, 0x001a, "ath2413", "Atheros 2413 Griffin", AR5K_AR5212
),
83 PCI_ROM(0x168c, 0x001b, "ath5413", "Atheros 5413 Eagle", AR5K_AR5212
),
84 PCI_ROM(0x168c, 0x001c, "ath5212e", "Atheros 5212 PCI-E", AR5K_AR5212
),
85 PCI_ROM(0x168c, 0x001d, "ath2417", "Atheros 2417 Nala", AR5K_AR5212
),
89 static const struct ath5k_srev_name srev_names
[] = {
90 { "5210", AR5K_VERSION_MAC
, AR5K_SREV_AR5210
},
91 { "5311", AR5K_VERSION_MAC
, AR5K_SREV_AR5311
},
92 { "5311A", AR5K_VERSION_MAC
, AR5K_SREV_AR5311A
},
93 { "5311B", AR5K_VERSION_MAC
, AR5K_SREV_AR5311B
},
94 { "5211", AR5K_VERSION_MAC
, AR5K_SREV_AR5211
},
95 { "5212", AR5K_VERSION_MAC
, AR5K_SREV_AR5212
},
96 { "5213", AR5K_VERSION_MAC
, AR5K_SREV_AR5213
},
97 { "5213A", AR5K_VERSION_MAC
, AR5K_SREV_AR5213A
},
98 { "2413", AR5K_VERSION_MAC
, AR5K_SREV_AR2413
},
99 { "2414", AR5K_VERSION_MAC
, AR5K_SREV_AR2414
},
100 { "5424", AR5K_VERSION_MAC
, AR5K_SREV_AR5424
},
101 { "5413", AR5K_VERSION_MAC
, AR5K_SREV_AR5413
},
102 { "5414", AR5K_VERSION_MAC
, AR5K_SREV_AR5414
},
103 { "2415", AR5K_VERSION_MAC
, AR5K_SREV_AR2415
},
104 { "5416", AR5K_VERSION_MAC
, AR5K_SREV_AR5416
},
105 { "5418", AR5K_VERSION_MAC
, AR5K_SREV_AR5418
},
106 { "2425", AR5K_VERSION_MAC
, AR5K_SREV_AR2425
},
107 { "2417", AR5K_VERSION_MAC
, AR5K_SREV_AR2417
},
108 { "xxxxx", AR5K_VERSION_MAC
, AR5K_SREV_UNKNOWN
},
109 { "5110", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5110
},
110 { "5111", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5111
},
111 { "5111A", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5111A
},
112 { "2111", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2111
},
113 { "5112", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5112
},
114 { "5112A", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5112A
},
115 { "5112B", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5112B
},
116 { "2112", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2112
},
117 { "2112A", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2112A
},
118 { "2112B", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2112B
},
119 { "2413", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2413
},
120 { "5413", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5413
},
121 { "2316", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2316
},
122 { "2317", AR5K_VERSION_RAD
, AR5K_SREV_RAD_2317
},
123 { "5424", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5424
},
124 { "5133", AR5K_VERSION_RAD
, AR5K_SREV_RAD_5133
},
125 { "xxxxx", AR5K_VERSION_RAD
, AR5K_SREV_UNKNOWN
},
128 #define ATH5K_SPMBL_NO 1
129 #define ATH5K_SPMBL_YES 2
130 #define ATH5K_SPMBL_BOTH 3
132 static const struct {
137 { 10, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_1M
},
138 { 20, ATH5K_SPMBL_NO
, ATH5K_RATE_CODE_2M
},
139 { 55, ATH5K_SPMBL_NO
, ATH5K_RATE_CODE_5_5M
},
140 { 110, ATH5K_SPMBL_NO
, ATH5K_RATE_CODE_11M
},
141 { 60, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_6M
},
142 { 90, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_9M
},
143 { 120, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_12M
},
144 { 180, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_18M
},
145 { 240, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_24M
},
146 { 360, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_36M
},
147 { 480, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_48M
},
148 { 540, ATH5K_SPMBL_BOTH
, ATH5K_RATE_CODE_54M
},
149 { 20, ATH5K_SPMBL_YES
, ATH5K_RATE_CODE_2M
| AR5K_SET_SHORT_PREAMBLE
},
150 { 55, ATH5K_SPMBL_YES
, ATH5K_RATE_CODE_5_5M
| AR5K_SET_SHORT_PREAMBLE
},
151 { 110, ATH5K_SPMBL_YES
, ATH5K_RATE_CODE_11M
| AR5K_SET_SHORT_PREAMBLE
},
155 #define ATH5K_NR_RATES 15
158 * Prototypes - PCI stack related functions
160 static int ath5k_probe(struct pci_device
*pdev
,
161 const struct pci_device_id
*id
);
162 static void ath5k_remove(struct pci_device
*pdev
);
164 struct pci_driver ath5k_pci_driver __pci_driver
= {
166 .id_count
= sizeof(ath5k_nics
) / sizeof(ath5k_nics
[0]),
167 .probe
= ath5k_probe
,
168 .remove
= ath5k_remove
,
174 * Prototypes - MAC 802.11 stack related functions
176 static int ath5k_tx(struct net80211_device
*dev
, struct io_buffer
*skb
);
177 static int ath5k_reset(struct ath5k_softc
*sc
, struct net80211_channel
*chan
);
178 static int ath5k_reset_wake(struct ath5k_softc
*sc
);
179 static int ath5k_start(struct net80211_device
*dev
);
180 static void ath5k_stop(struct net80211_device
*dev
);
181 static int ath5k_config(struct net80211_device
*dev
, int changed
);
182 static void ath5k_poll(struct net80211_device
*dev
);
183 static void ath5k_irq(struct net80211_device
*dev
, int enable
);
185 static struct net80211_device_operations ath5k_ops
= {
188 .transmit
= ath5k_tx
,
191 .config
= ath5k_config
,
195 * Prototypes - Internal functions
198 static int ath5k_attach(struct net80211_device
*dev
);
199 static void ath5k_detach(struct net80211_device
*dev
);
200 /* Channel/mode setup */
201 static unsigned int ath5k_copy_channels(struct ath5k_hw
*ah
,
202 struct net80211_channel
*channels
,
205 static int ath5k_setup_bands(struct net80211_device
*dev
);
206 static int ath5k_chan_set(struct ath5k_softc
*sc
,
207 struct net80211_channel
*chan
);
208 static void ath5k_setcurmode(struct ath5k_softc
*sc
,
210 static void ath5k_mode_setup(struct ath5k_softc
*sc
);
212 /* Descriptor setup */
213 static int ath5k_desc_alloc(struct ath5k_softc
*sc
);
214 static void ath5k_desc_free(struct ath5k_softc
*sc
);
216 static int ath5k_rxbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
);
217 static int ath5k_txbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
);
219 static inline void ath5k_txbuf_free(struct ath5k_softc
*sc
,
220 struct ath5k_buf
*bf
)
225 net80211_tx_complete(sc
->dev
, bf
->iob
, 0, ECANCELED
);
229 static inline void ath5k_rxbuf_free(struct ath5k_softc
*sc __unused
,
230 struct ath5k_buf
*bf
)
237 static int ath5k_txq_setup(struct ath5k_softc
*sc
,
238 int qtype
, int subtype
);
239 static void ath5k_txq_drainq(struct ath5k_softc
*sc
,
240 struct ath5k_txq
*txq
);
241 static void ath5k_txq_cleanup(struct ath5k_softc
*sc
);
242 static void ath5k_txq_release(struct ath5k_softc
*sc
);
244 static int ath5k_rx_start(struct ath5k_softc
*sc
);
245 static void ath5k_rx_stop(struct ath5k_softc
*sc
);
247 static void ath5k_tx_processq(struct ath5k_softc
*sc
,
248 struct ath5k_txq
*txq
);
250 /* Interrupt handling */
251 static int ath5k_init(struct ath5k_softc
*sc
);
252 static int ath5k_stop_hw(struct ath5k_softc
*sc
);
254 static void ath5k_calibrate(struct ath5k_softc
*sc
);
257 static void ath5k_configure_filter(struct ath5k_softc
*sc
);
259 /********************\
260 * PCI Initialization *
261 \********************/
265 ath5k_chip_name(enum ath5k_srev_type type
, u16 val
)
267 const char *name
= "xxxxx";
270 for (i
= 0; i
< ARRAY_SIZE(srev_names
); i
++) {
271 if (srev_names
[i
].sr_type
!= type
)
274 if ((val
& 0xf0) == srev_names
[i
].sr_val
)
275 name
= srev_names
[i
].sr_name
;
277 if ((val
& 0xff) == srev_names
[i
].sr_val
) {
278 name
= srev_names
[i
].sr_name
;
287 static int ath5k_probe(struct pci_device
*pdev
,
288 const struct pci_device_id
*id
)
291 struct ath5k_softc
*sc
;
292 struct net80211_device
*dev
;
296 adjust_pci_device(pdev
);
299 * Cache line size is used to size and align various
300 * structures used to communicate with the hardware.
302 pci_read_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, &csz
);
305 * We must have this setup properly for rx buffer
306 * DMA to work so force a reasonable value here if it
310 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, csz
);
313 * The default setting of latency timer yields poor results,
314 * set it to the value used by other systems. It may be worth
315 * tweaking this setting more.
317 pci_write_config_byte(pdev
, PCI_LATENCY_TIMER
, 0xa8);
320 * Disable the RETRY_TIMEOUT register (0x41) to keep
321 * PCI Tx retries from interfering with C3 CPU state.
323 pci_write_config_byte(pdev
, 0x41, 0);
325 mem
= ioremap(pdev
->membase
, 0x10000);
327 DBG("ath5k: cannot remap PCI memory region\n");
333 * Allocate dev (net80211 main struct)
334 * and dev->priv (driver private data)
336 dev
= net80211_alloc(sizeof(*sc
));
338 DBG("ath5k: cannot allocate 802.11 device\n");
343 /* Initialize driver private data */
348 sc
->hwinfo
= zalloc(sizeof(*sc
->hwinfo
));
350 DBG("ath5k: cannot allocate 802.11 hardware info structure\n");
355 sc
->hwinfo
->flags
= NET80211_HW_RX_HAS_FCS
;
356 sc
->hwinfo
->signal_type
= NET80211_SIGNAL_DB
;
357 sc
->hwinfo
->signal_max
= 40; /* 35dB should give perfect 54Mbps */
358 sc
->hwinfo
->channel_change_time
= 5000;
360 /* Avoid working with the device until setup is complete */
361 sc
->status
|= ATH_STAT_INVALID
;
364 sc
->cachelsz
= csz
* 4; /* convert to bytes */
366 DBG("ath5k: register base at %p (%08lx)\n", sc
->iobase
, pdev
->membase
);
367 DBG("ath5k: cache line size %d\n", sc
->cachelsz
);
369 /* Set private data */
370 pci_set_drvdata(pdev
, dev
);
371 dev
->netdev
->dev
= (struct device
*)pdev
;
373 /* Initialize device */
374 ret
= ath5k_hw_attach(sc
, id
->driver_data
, &sc
->ah
);
376 goto err_free_hwinfo
;
378 /* Finish private driver data initialization */
379 ret
= ath5k_attach(dev
);
384 DBG("Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
385 ath5k_chip_name(AR5K_VERSION_MAC
, sc
->ah
->ah_mac_srev
),
386 sc
->ah
->ah_mac_srev
, sc
->ah
->ah_phy_revision
);
388 if (!sc
->ah
->ah_single_chip
) {
389 /* Single chip radio (!RF5111) */
390 if (sc
->ah
->ah_radio_5ghz_revision
&&
391 !sc
->ah
->ah_radio_2ghz_revision
) {
392 /* No 5GHz support -> report 2GHz radio */
393 if (!(sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11A
)) {
394 DBG("RF%s 2GHz radio found (0x%x)\n",
395 ath5k_chip_name(AR5K_VERSION_RAD
,
396 sc
->ah
->ah_radio_5ghz_revision
),
397 sc
->ah
->ah_radio_5ghz_revision
);
398 /* No 2GHz support (5110 and some
399 * 5Ghz only cards) -> report 5Ghz radio */
400 } else if (!(sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11B
)) {
401 DBG("RF%s 5GHz radio found (0x%x)\n",
402 ath5k_chip_name(AR5K_VERSION_RAD
,
403 sc
->ah
->ah_radio_5ghz_revision
),
404 sc
->ah
->ah_radio_5ghz_revision
);
405 /* Multiband radio */
407 DBG("RF%s multiband radio found (0x%x)\n",
408 ath5k_chip_name(AR5K_VERSION_RAD
,
409 sc
->ah
->ah_radio_5ghz_revision
),
410 sc
->ah
->ah_radio_5ghz_revision
);
413 /* Multi chip radio (RF5111 - RF2111) ->
414 * report both 2GHz/5GHz radios */
415 else if (sc
->ah
->ah_radio_5ghz_revision
&&
416 sc
->ah
->ah_radio_2ghz_revision
) {
417 DBG("RF%s 5GHz radio found (0x%x)\n",
418 ath5k_chip_name(AR5K_VERSION_RAD
,
419 sc
->ah
->ah_radio_5ghz_revision
),
420 sc
->ah
->ah_radio_5ghz_revision
);
421 DBG("RF%s 2GHz radio found (0x%x)\n",
422 ath5k_chip_name(AR5K_VERSION_RAD
,
423 sc
->ah
->ah_radio_2ghz_revision
),
424 sc
->ah
->ah_radio_2ghz_revision
);
430 sc
->status
&= ~ATH_STAT_INVALID
;
434 ath5k_hw_detach(sc
->ah
);
445 static void ath5k_remove(struct pci_device
*pdev
)
447 struct net80211_device
*dev
= pci_get_drvdata(pdev
);
448 struct ath5k_softc
*sc
= dev
->priv
;
451 ath5k_hw_detach(sc
->ah
);
458 /***********************\
459 * Driver Initialization *
460 \***********************/
463 ath5k_attach(struct net80211_device
*dev
)
465 struct ath5k_softc
*sc
= dev
->priv
;
466 struct ath5k_hw
*ah
= sc
->ah
;
470 * Collect the channel list. The 802.11 layer
471 * is resposible for filtering this list based
472 * on settings like the phy mode and regulatory
473 * domain restrictions.
475 ret
= ath5k_setup_bands(dev
);
477 DBG("ath5k: can't get channels\n");
481 /* NB: setup here so ath5k_rate_update is happy */
482 if (ah
->ah_modes
& AR5K_MODE_BIT_11A
)
483 ath5k_setcurmode(sc
, AR5K_MODE_11A
);
485 ath5k_setcurmode(sc
, AR5K_MODE_11B
);
488 * Allocate tx+rx descriptors and populate the lists.
490 ret
= ath5k_desc_alloc(sc
);
492 DBG("ath5k: can't allocate descriptors\n");
497 * Allocate hardware transmit queues. Note that hw functions
498 * handle reseting these queues at the needed time.
500 ret
= ath5k_txq_setup(sc
, AR5K_TX_QUEUE_DATA
, AR5K_WME_AC_BE
);
502 DBG("ath5k: can't setup xmit queue\n");
506 sc
->last_calib_ticks
= currticks();
508 ret
= ath5k_eeprom_read_mac(ah
, sc
->hwinfo
->hwaddr
);
510 DBG("ath5k: unable to read address from EEPROM: 0x%04x\n",
515 memset(sc
->bssidmask
, 0xff, ETH_ALEN
);
516 ath5k_hw_set_bssid_mask(sc
->ah
, sc
->bssidmask
);
518 ret
= net80211_register(sc
->dev
, &ath5k_ops
, sc
->hwinfo
);
520 DBG("ath5k: can't register ieee80211 hw\n");
526 ath5k_txq_release(sc
);
534 ath5k_detach(struct net80211_device
*dev
)
536 struct ath5k_softc
*sc
= dev
->priv
;
538 net80211_unregister(dev
);
540 ath5k_txq_release(sc
);
546 /********************\
547 * Channel/mode setup *
548 \********************/
551 * Convert IEEE channel number to MHz frequency.
554 ath5k_ieee2mhz(short chan
)
557 return 2407 + 5 * chan
;
561 return 2212 + 20 * chan
;
562 return 5000 + 5 * chan
;
566 ath5k_copy_channels(struct ath5k_hw
*ah
,
567 struct net80211_channel
*channels
,
568 unsigned int mode
, unsigned int max
)
570 unsigned int i
, count
, size
, chfreq
, freq
, ch
;
572 if (!(ah
->ah_modes
& (1 << mode
)))
577 case AR5K_MODE_11A_TURBO
:
578 /* 1..220, but 2GHz frequencies are filtered by check_channel */
580 chfreq
= CHANNEL_5GHZ
;
584 case AR5K_MODE_11G_TURBO
:
586 chfreq
= CHANNEL_2GHZ
;
592 for (i
= 0, count
= 0; i
< size
&& max
> 0; i
++) {
594 freq
= ath5k_ieee2mhz(ch
);
596 /* Check if channel is supported by the chipset */
597 if (!ath5k_channel_ok(ah
, freq
, chfreq
))
600 /* Write channel info and increment counter */
601 channels
[count
].center_freq
= freq
;
602 channels
[count
].maxpower
= 0; /* use regulatory */
603 channels
[count
].band
= (chfreq
== CHANNEL_2GHZ
) ?
604 NET80211_BAND_2GHZ
: NET80211_BAND_5GHZ
;
608 channels
[count
].hw_value
= chfreq
| CHANNEL_OFDM
;
610 case AR5K_MODE_11A_TURBO
:
611 case AR5K_MODE_11G_TURBO
:
612 channels
[count
].hw_value
= chfreq
|
613 CHANNEL_OFDM
| CHANNEL_TURBO
;
616 channels
[count
].hw_value
= CHANNEL_B
;
627 ath5k_setup_bands(struct net80211_device
*dev
)
629 struct ath5k_softc
*sc
= dev
->priv
;
630 struct ath5k_hw
*ah
= sc
->ah
;
631 int max_c
, count_c
= 0;
635 max_c
= sizeof(sc
->hwinfo
->channels
) / sizeof(sc
->hwinfo
->channels
[0]);
638 if (sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11G
) {
640 band
= NET80211_BAND_2GHZ
;
641 sc
->hwinfo
->bands
= NET80211_BAND_BIT_2GHZ
;
642 sc
->hwinfo
->modes
= (NET80211_MODE_G
| NET80211_MODE_B
);
644 for (i
= 0; i
< 12; i
++)
645 sc
->hwinfo
->rates
[band
][i
] = ath5k_rates
[i
].bitrate
;
646 sc
->hwinfo
->nr_rates
[band
] = 12;
648 sc
->hwinfo
->nr_channels
=
649 ath5k_copy_channels(ah
, sc
->hwinfo
->channels
,
650 AR5K_MODE_11G
, max_c
);
651 count_c
= sc
->hwinfo
->nr_channels
;
653 } else if (sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11B
) {
655 band
= NET80211_BAND_2GHZ
;
656 sc
->hwinfo
->bands
= NET80211_BAND_BIT_2GHZ
;
657 sc
->hwinfo
->modes
= NET80211_MODE_B
;
659 for (i
= 0; i
< 4; i
++)
660 sc
->hwinfo
->rates
[band
][i
] = ath5k_rates
[i
].bitrate
;
661 sc
->hwinfo
->nr_rates
[band
] = 4;
663 sc
->hwinfo
->nr_channels
=
664 ath5k_copy_channels(ah
, sc
->hwinfo
->channels
,
665 AR5K_MODE_11B
, max_c
);
666 count_c
= sc
->hwinfo
->nr_channels
;
670 /* 5GHz band, A mode */
671 if (sc
->ah
->ah_capabilities
.cap_mode
& AR5K_MODE_BIT_11A
) {
672 band
= NET80211_BAND_5GHZ
;
673 sc
->hwinfo
->bands
|= NET80211_BAND_BIT_5GHZ
;
674 sc
->hwinfo
->modes
|= NET80211_MODE_A
;
676 for (i
= 0; i
< 8; i
++)
677 sc
->hwinfo
->rates
[band
][i
] = ath5k_rates
[i
+4].bitrate
;
678 sc
->hwinfo
->nr_rates
[band
] = 8;
680 sc
->hwinfo
->nr_channels
=
681 ath5k_copy_channels(ah
, sc
->hwinfo
->channels
,
682 AR5K_MODE_11B
, max_c
);
683 count_c
= sc
->hwinfo
->nr_channels
;
691 * Set/change channels. If the channel is really being changed,
692 * it's done by reseting the chip. To accomplish this we must
693 * first cleanup any pending DMA, then restart stuff after a la
697 ath5k_chan_set(struct ath5k_softc
*sc
, struct net80211_channel
*chan
)
699 if (chan
->center_freq
!= sc
->curchan
->center_freq
||
700 chan
->hw_value
!= sc
->curchan
->hw_value
) {
702 * To switch channels clear any pending DMA operations;
703 * wait long enough for the RX fifo to drain, reset the
704 * hardware at the new frequency, and then re-enable
705 * the relevant bits of the h/w.
707 DBG2("ath5k: resetting for channel change (%d -> %d MHz)\n",
708 sc
->curchan
->center_freq
, chan
->center_freq
);
709 return ath5k_reset(sc
, chan
);
716 ath5k_setcurmode(struct ath5k_softc
*sc
, unsigned int mode
)
720 if (mode
== AR5K_MODE_11A
) {
721 sc
->curband
= NET80211_BAND_5GHZ
;
723 sc
->curband
= NET80211_BAND_2GHZ
;
728 ath5k_mode_setup(struct ath5k_softc
*sc
)
730 struct ath5k_hw
*ah
= sc
->ah
;
733 /* configure rx filter */
734 rfilt
= sc
->filter_flags
;
735 ath5k_hw_set_rx_filter(ah
, rfilt
);
737 if (ath5k_hw_hasbssidmask(ah
))
738 ath5k_hw_set_bssid_mask(ah
, sc
->bssidmask
);
740 /* configure operational mode */
741 ath5k_hw_set_opmode(ah
);
743 ath5k_hw_set_mcast_filter(ah
, 0, 0);
747 ath5k_hw_rix_to_bitrate(int hw_rix
)
751 for (i
= 0; i
< ATH5K_NR_RATES
; i
++) {
752 if (ath5k_rates
[i
].hw_code
== hw_rix
)
753 return ath5k_rates
[i
].bitrate
;
756 DBG("ath5k: invalid rix %02x\n", hw_rix
);
757 return 10; /* use lowest rate */
760 int ath5k_bitrate_to_hw_rix(int bitrate
)
764 for (i
= 0; i
< ATH5K_NR_RATES
; i
++) {
765 if (ath5k_rates
[i
].bitrate
== bitrate
)
766 return ath5k_rates
[i
].hw_code
;
769 DBG("ath5k: invalid bitrate %d\n", bitrate
);
770 return ATH5K_RATE_CODE_1M
; /* use lowest rate */
777 static struct io_buffer
*
778 ath5k_rx_iob_alloc(struct ath5k_softc
*sc
, u32
*iob_addr
)
780 struct io_buffer
*iob
;
784 * Allocate buffer with headroom_needed space for the
785 * fake physical layer header at the start.
787 iob
= alloc_iob(sc
->rxbufsize
+ sc
->cachelsz
- 1);
790 DBG("ath5k: can't alloc iobuf of size %d\n",
791 sc
->rxbufsize
+ sc
->cachelsz
- 1);
795 *iob_addr
= virt_to_bus(iob
->data
);
798 * Cache-line-align. This is important (for the
799 * 5210 at least) as not doing so causes bogus data
802 off
= *iob_addr
% sc
->cachelsz
;
804 iob_reserve(iob
, sc
->cachelsz
- off
);
805 *iob_addr
+= sc
->cachelsz
- off
;
812 ath5k_rxbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
)
814 struct ath5k_hw
*ah
= sc
->ah
;
815 struct io_buffer
*iob
= bf
->iob
;
816 struct ath5k_desc
*ds
;
819 iob
= ath5k_rx_iob_alloc(sc
, &bf
->iobaddr
);
826 * Setup descriptors. For receive we always terminate
827 * the descriptor list with a self-linked entry so we'll
828 * not get overrun under high load (as can happen with a
829 * 5212 when ANI processing enables PHY error frames).
831 * To insure the last descriptor is self-linked we create
832 * each descriptor as self-linked and add it to the end. As
833 * each additional descriptor is added the previous self-linked
834 * entry is ``fixed'' naturally. This should be safe even
835 * if DMA is happening. When processing RX interrupts we
836 * never remove/process the last, self-linked, entry on the
837 * descriptor list. This insures the hardware always has
838 * someplace to write a new frame.
841 ds
->ds_link
= bf
->daddr
; /* link to self */
842 ds
->ds_data
= bf
->iobaddr
;
843 if (ah
->ah_setup_rx_desc(ah
, ds
,
844 iob_tailroom(iob
), /* buffer size */
846 DBG("ath5k: error setting up RX descriptor for %zd bytes\n", iob_tailroom(iob
));
850 if (sc
->rxlink
!= NULL
)
851 *sc
->rxlink
= bf
->daddr
;
852 sc
->rxlink
= &ds
->ds_link
;
857 ath5k_txbuf_setup(struct ath5k_softc
*sc
, struct ath5k_buf
*bf
)
859 struct ath5k_hw
*ah
= sc
->ah
;
860 struct ath5k_txq
*txq
= &sc
->txq
;
861 struct ath5k_desc
*ds
= bf
->desc
;
862 struct io_buffer
*iob
= bf
->iob
;
863 unsigned int pktlen
, flags
;
868 flags
= AR5K_TXDESC_INTREQ
| AR5K_TXDESC_CLRDMASK
;
869 bf
->iobaddr
= virt_to_bus(iob
->data
);
870 pktlen
= iob_len(iob
);
872 /* FIXME: If we are in g mode and rate is a CCK rate
873 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
874 * from tx power (value is in dB units already) */
875 if (sc
->dev
->phy_flags
& NET80211_PHY_USE_PROTECTION
) {
876 struct net80211_device
*dev
= sc
->dev
;
878 flags
|= AR5K_TXDESC_CTSENA
;
879 cts_rate
= sc
->hw_rtscts_rate
;
880 duration
= net80211_cts_duration(dev
, pktlen
);
882 ret
= ah
->ah_setup_tx_desc(ah
, ds
, pktlen
,
883 IEEE80211_TYP_FRAME_HEADER_LEN
,
884 AR5K_PKT_TYPE_NORMAL
, sc
->power_level
* 2,
885 sc
->hw_rate
, ATH5K_RETRIES
,
886 AR5K_TXKEYIX_INVALID
, 0, flags
,
892 ds
->ds_data
= bf
->iobaddr
;
894 list_add_tail(&bf
->list
, &txq
->q
);
895 if (txq
->link
== NULL
) /* is this first packet? */
896 ath5k_hw_set_txdp(ah
, txq
->qnum
, bf
->daddr
);
897 else /* no, so only link it */
898 *txq
->link
= bf
->daddr
;
900 txq
->link
= &ds
->ds_link
;
901 ath5k_hw_start_tx_dma(ah
, txq
->qnum
);
907 /*******************\
908 * Descriptors setup *
909 \*******************/
912 ath5k_desc_alloc(struct ath5k_softc
*sc
)
914 struct ath5k_desc
*ds
;
915 struct ath5k_buf
*bf
;
920 /* allocate descriptors */
921 sc
->desc_len
= sizeof(struct ath5k_desc
) * (ATH_TXBUF
+ ATH_RXBUF
+ 1);
922 sc
->desc
= malloc_dma(sc
->desc_len
, ATH5K_DESC_ALIGN
);
923 if (sc
->desc
== NULL
) {
924 DBG("ath5k: can't allocate descriptors\n");
928 memset(sc
->desc
, 0, sc
->desc_len
);
929 sc
->desc_daddr
= virt_to_bus(sc
->desc
);
934 bf
= calloc(ATH_TXBUF
+ ATH_RXBUF
+ 1, sizeof(struct ath5k_buf
));
936 DBG("ath5k: can't allocate buffer pointers\n");
942 INIT_LIST_HEAD(&sc
->rxbuf
);
943 for (i
= 0; i
< ATH_RXBUF
; i
++, bf
++, ds
++, da
+= sizeof(*ds
)) {
946 list_add_tail(&bf
->list
, &sc
->rxbuf
);
949 INIT_LIST_HEAD(&sc
->txbuf
);
950 sc
->txbuf_len
= ATH_TXBUF
;
951 for (i
= 0; i
< ATH_TXBUF
; i
++, bf
++, ds
++, da
+= sizeof(*ds
)) {
954 list_add_tail(&bf
->list
, &sc
->txbuf
);
960 free_dma(sc
->desc
, sc
->desc_len
);
967 ath5k_desc_free(struct ath5k_softc
*sc
)
969 struct ath5k_buf
*bf
;
971 list_for_each_entry(bf
, &sc
->txbuf
, list
)
972 ath5k_txbuf_free(sc
, bf
);
973 list_for_each_entry(bf
, &sc
->rxbuf
, list
)
974 ath5k_rxbuf_free(sc
, bf
);
976 /* Free memory associated with all descriptors */
977 free_dma(sc
->desc
, sc
->desc_len
);
992 ath5k_txq_setup(struct ath5k_softc
*sc
, int qtype
, int subtype
)
994 struct ath5k_hw
*ah
= sc
->ah
;
995 struct ath5k_txq
*txq
;
996 struct ath5k_txq_info qi
= {
997 .tqi_subtype
= subtype
,
998 .tqi_aifs
= AR5K_TXQ_USEDEFAULT
,
999 .tqi_cw_min
= AR5K_TXQ_USEDEFAULT
,
1000 .tqi_cw_max
= AR5K_TXQ_USEDEFAULT
1005 * Enable interrupts only for EOL and DESC conditions.
1006 * We mark tx descriptors to receive a DESC interrupt
1007 * when a tx queue gets deep; otherwise waiting for the
1008 * EOL to reap descriptors. Note that this is done to
1009 * reduce interrupt load and this only defers reaping
1010 * descriptors, never transmitting frames. Aside from
1011 * reducing interrupts this also permits more concurrency.
1012 * The only potential downside is if the tx queue backs
1013 * up in which case the top half of the kernel may backup
1014 * due to a lack of tx descriptors.
1016 qi
.tqi_flags
= AR5K_TXQ_FLAG_TXEOLINT_ENABLE
|
1017 AR5K_TXQ_FLAG_TXDESCINT_ENABLE
;
1018 qnum
= ath5k_hw_setup_tx_queue(ah
, qtype
, &qi
);
1020 DBG("ath5k: can't set up a TX queue\n");
1028 INIT_LIST_HEAD(&txq
->q
);
1035 ath5k_txq_drainq(struct ath5k_softc
*sc
, struct ath5k_txq
*txq
)
1037 struct ath5k_buf
*bf
, *bf0
;
1039 list_for_each_entry_safe(bf
, bf0
, &txq
->q
, list
) {
1040 ath5k_txbuf_free(sc
, bf
);
1042 list_del(&bf
->list
);
1043 list_add_tail(&bf
->list
, &sc
->txbuf
);
1050 * Drain the transmit queues and reclaim resources.
1053 ath5k_txq_cleanup(struct ath5k_softc
*sc
)
1055 struct ath5k_hw
*ah
= sc
->ah
;
1057 if (!(sc
->status
& ATH_STAT_INVALID
)) {
1058 /* don't touch the hardware if marked invalid */
1059 if (sc
->txq
.setup
) {
1060 ath5k_hw_stop_tx_dma(ah
, sc
->txq
.qnum
);
1061 DBG("ath5k: txq [%d] %x, link %p\n",
1063 ath5k_hw_get_txdp(ah
, sc
->txq
.qnum
),
1069 ath5k_txq_drainq(sc
, &sc
->txq
);
1073 ath5k_txq_release(struct ath5k_softc
*sc
)
1075 if (sc
->txq
.setup
) {
1076 ath5k_hw_release_tx_queue(sc
->ah
);
1089 * Enable the receive h/w following a reset.
1092 ath5k_rx_start(struct ath5k_softc
*sc
)
1094 struct ath5k_hw
*ah
= sc
->ah
;
1095 struct ath5k_buf
*bf
;
1098 sc
->rxbufsize
= IEEE80211_MAX_LEN
;
1099 if (sc
->rxbufsize
% sc
->cachelsz
!= 0)
1100 sc
->rxbufsize
+= sc
->cachelsz
- (sc
->rxbufsize
% sc
->cachelsz
);
1104 list_for_each_entry(bf
, &sc
->rxbuf
, list
) {
1105 ret
= ath5k_rxbuf_setup(sc
, bf
);
1110 bf
= list_entry(sc
->rxbuf
.next
, struct ath5k_buf
, list
);
1112 ath5k_hw_set_rxdp(ah
, bf
->daddr
);
1113 ath5k_hw_start_rx_dma(ah
); /* enable recv descriptors */
1114 ath5k_mode_setup(sc
); /* set filters, etc. */
1115 ath5k_hw_start_rx_pcu(ah
); /* re-enable PCU/DMA engine */
1121 * Disable the receive h/w in preparation for a reset.
1124 ath5k_rx_stop(struct ath5k_softc
*sc
)
1126 struct ath5k_hw
*ah
= sc
->ah
;
1128 ath5k_hw_stop_rx_pcu(ah
); /* disable PCU */
1129 ath5k_hw_set_rx_filter(ah
, 0); /* clear recv filter */
1130 ath5k_hw_stop_rx_dma(ah
); /* disable DMA engine */
1132 sc
->rxlink
= NULL
; /* just in case */
1136 ath5k_handle_rx(struct ath5k_softc
*sc
)
1138 struct ath5k_rx_status rs
;
1139 struct io_buffer
*iob
, *next_iob
;
1141 struct ath5k_buf
*bf
, *bf_last
;
1142 struct ath5k_desc
*ds
;
1145 memset(&rs
, 0, sizeof(rs
));
1147 if (list_empty(&sc
->rxbuf
)) {
1148 DBG("ath5k: empty rx buf pool\n");
1152 bf_last
= list_entry(sc
->rxbuf
.prev
, struct ath5k_buf
, list
);
1155 bf
= list_entry(sc
->rxbuf
.next
, struct ath5k_buf
, list
);
1156 assert(bf
->iob
!= NULL
);
1161 * last buffer must not be freed to ensure proper hardware
1162 * function. When the hardware finishes also a packet next to
1163 * it, we are sure, it doesn't use it anymore and we can go on.
1168 struct ath5k_buf
*bf_next
= list_entry(bf
->list
.next
,
1169 struct ath5k_buf
, list
);
1170 ret
= sc
->ah
->ah_proc_rx_desc(sc
->ah
, bf_next
->desc
,
1175 /* skip the overwritten one (even status is martian) */
1179 ret
= sc
->ah
->ah_proc_rx_desc(sc
->ah
, ds
, &rs
);
1181 if (ret
!= -EINPROGRESS
) {
1182 DBG("ath5k: error in processing rx desc: %s\n",
1184 net80211_rx_err(sc
->dev
, NULL
, -ret
);
1186 /* normal return, reached end of
1187 available descriptors */
1193 DBG("ath5k: unsupported fragmented rx\n");
1198 if (rs
.rs_status
& AR5K_RXERR_PHY
) {
1199 /* These are uncommon, and may indicate a real problem. */
1200 net80211_rx_err(sc
->dev
, NULL
, EIO
);
1203 if (rs
.rs_status
& AR5K_RXERR_CRC
) {
1204 /* These occur *all the time*. */
1207 if (rs
.rs_status
& AR5K_RXERR_DECRYPT
) {
1209 * Decrypt error. If the error occurred
1210 * because there was no hardware key, then
1211 * let the frame through so the upper layers
1212 * can process it. This is necessary for 5210
1213 * parts which have no way to setup a ``clear''
1216 * XXX do key cache faulting
1218 if (rs
.rs_keyix
== AR5K_RXKEYIX_INVALID
&&
1219 !(rs
.rs_status
& AR5K_RXERR_CRC
))
1223 /* any other error, unhandled */
1224 DBG("ath5k: packet rx status %x\n", rs
.rs_status
);
1228 next_iob
= ath5k_rx_iob_alloc(sc
, &next_iob_addr
);
1231 * If we can't replace bf->iob with a new iob under memory
1232 * pressure, just skip this packet
1235 DBG("ath5k: dropping packet under memory pressure\n");
1239 iob_put(iob
, rs
.rs_datalen
);
1241 /* The MAC header is padded to have 32-bit boundary if the
1242 * packet payload is non-zero. However, gPXE only
1243 * supports standard 802.11 packets with 24-byte
1244 * header, so no padding correction should be needed.
1247 DBG2("ath5k: rx %d bytes, signal %d\n", rs
.rs_datalen
,
1250 net80211_rx(sc
->dev
, iob
, rs
.rs_rssi
,
1251 ath5k_hw_rix_to_bitrate(rs
.rs_rate
));
1254 bf
->iobaddr
= next_iob_addr
;
1256 list_del(&bf
->list
);
1257 list_add_tail(&bf
->list
, &sc
->rxbuf
);
1258 } while (ath5k_rxbuf_setup(sc
, bf
) == 0);
1269 ath5k_tx_processq(struct ath5k_softc
*sc
, struct ath5k_txq
*txq
)
1271 struct ath5k_tx_status ts
;
1272 struct ath5k_buf
*bf
, *bf0
;
1273 struct ath5k_desc
*ds
;
1274 struct io_buffer
*iob
;
1277 memset(&ts
, 0, sizeof(ts
));
1279 list_for_each_entry_safe(bf
, bf0
, &txq
->q
, list
) {
1282 ret
= sc
->ah
->ah_proc_tx_desc(sc
->ah
, ds
, &ts
);
1284 if (ret
!= -EINPROGRESS
) {
1285 DBG("ath5k: error in processing tx desc: %s\n",
1288 /* normal return, reached end of tx completions */
1296 DBG2("ath5k: tx %zd bytes complete, %d retries\n",
1297 iob_len(iob
), ts
.ts_retry
[0]);
1299 net80211_tx_complete(sc
->dev
, iob
, ts
.ts_retry
[0],
1300 ts
.ts_status
? EIO
: 0);
1302 list_del(&bf
->list
);
1303 list_add_tail(&bf
->list
, &sc
->txbuf
);
1307 if (list_empty(&txq
->q
))
1312 ath5k_handle_tx(struct ath5k_softc
*sc
)
1314 ath5k_tx_processq(sc
, &sc
->txq
);
1318 /********************\
1319 * Interrupt handling *
1320 \********************/
1323 ath5k_irq(struct net80211_device
*dev
, int enable
)
1325 struct ath5k_softc
*sc
= dev
->priv
;
1326 struct ath5k_hw
*ah
= sc
->ah
;
1328 sc
->irq_ena
= enable
;
1329 ah
->ah_ier
= enable
? AR5K_IER_ENABLE
: AR5K_IER_DISABLE
;
1331 ath5k_hw_reg_write(ah
, ah
->ah_ier
, AR5K_IER
);
1332 ath5k_hw_set_imr(ah
, sc
->imask
);
1336 ath5k_init(struct ath5k_softc
*sc
)
1338 struct ath5k_hw
*ah
= sc
->ah
;
1342 * Stop anything previously setup. This is safe
1343 * no matter this is the first time through or not.
1348 * The basic interface to setting the hardware in a good
1349 * state is ``reset''. On return the hardware is known to
1350 * be powered up and with interrupts disabled. This must
1351 * be followed by initialization of the appropriate bits
1352 * and then setup of the interrupt mask.
1354 sc
->curchan
= sc
->dev
->channels
+ sc
->dev
->channel
;
1355 sc
->curband
= sc
->curchan
->band
;
1356 sc
->imask
= AR5K_INT_RXOK
| AR5K_INT_RXERR
| AR5K_INT_RXEOL
|
1357 AR5K_INT_RXORN
| AR5K_INT_TXDESC
| AR5K_INT_TXEOL
|
1358 AR5K_INT_FATAL
| AR5K_INT_GLOBAL
;
1359 ret
= ath5k_reset(sc
, NULL
);
1363 ath5k_rfkill_hw_start(ah
);
1366 * Reset the key cache since some parts do not reset the
1367 * contents on initial power up or resume from suspend.
1369 for (i
= 0; i
< AR5K_KEYTABLE_SIZE
; i
++)
1370 ath5k_hw_reset_key(ah
, i
);
1372 /* Set ack to be sent at low bit-rates */
1373 ath5k_hw_set_ack_bitrate_high(ah
, 0);
1382 ath5k_stop_hw(struct ath5k_softc
*sc
)
1384 struct ath5k_hw
*ah
= sc
->ah
;
1387 * Shutdown the hardware and driver:
1388 * stop output from above
1389 * disable interrupts
1391 * turn off the radio
1392 * clear transmit machinery
1393 * clear receive machinery
1394 * drain and release tx queues
1395 * reclaim beacon resources
1396 * power down hardware
1398 * Note that some of this work is not possible if the
1399 * hardware is gone (invalid).
1402 if (!(sc
->status
& ATH_STAT_INVALID
)) {
1403 ath5k_hw_set_imr(ah
, 0);
1405 ath5k_txq_cleanup(sc
);
1406 if (!(sc
->status
& ATH_STAT_INVALID
)) {
1408 ath5k_hw_phy_disable(ah
);
1412 ath5k_rfkill_hw_stop(sc
->ah
);
1418 ath5k_poll(struct net80211_device
*dev
)
1420 struct ath5k_softc
*sc
= dev
->priv
;
1421 struct ath5k_hw
*ah
= sc
->ah
;
1422 enum ath5k_int status
;
1423 unsigned int counter
= 1000;
1425 if (currticks() - sc
->last_calib_ticks
>
1426 ATH5K_CALIB_INTERVAL
* ticks_per_sec()) {
1427 ath5k_calibrate(sc
);
1428 sc
->last_calib_ticks
= currticks();
1431 if ((sc
->status
& ATH_STAT_INVALID
) ||
1432 (sc
->irq_ena
&& !ath5k_hw_is_intr_pending(ah
)))
1436 ath5k_hw_get_isr(ah
, &status
); /* NB: clears IRQ too */
1437 DBGP("ath5k: status %#x/%#x\n", status
, sc
->imask
);
1438 if (status
& AR5K_INT_FATAL
) {
1440 * Fatal errors are unrecoverable.
1441 * Typically these are caused by DMA errors.
1443 DBG("ath5k: fatal error, resetting\n");
1444 ath5k_reset_wake(sc
);
1445 } else if (status
& AR5K_INT_RXORN
) {
1446 DBG("ath5k: rx overrun, resetting\n");
1447 ath5k_reset_wake(sc
);
1449 if (status
& AR5K_INT_RXEOL
) {
1451 * NB: the hardware should re-read the link when
1452 * RXE bit is written, but it doesn't work at
1453 * least on older hardware revs.
1455 DBG("ath5k: rx EOL\n");
1458 if (status
& AR5K_INT_TXURN
) {
1459 /* bump tx trigger level */
1460 DBG("ath5k: tx underrun\n");
1461 ath5k_hw_update_tx_triglevel(ah
, 1);
1463 if (status
& (AR5K_INT_RXOK
| AR5K_INT_RXERR
))
1464 ath5k_handle_rx(sc
);
1465 if (status
& (AR5K_INT_TXOK
| AR5K_INT_TXDESC
1466 | AR5K_INT_TXERR
| AR5K_INT_TXEOL
))
1467 ath5k_handle_tx(sc
);
1469 } while (ath5k_hw_is_intr_pending(ah
) && counter
-- > 0);
1472 DBG("ath5k: too many interrupts, giving up for now\n");
1476 * Periodically recalibrate the PHY to account
1477 * for temperature/environment changes.
1480 ath5k_calibrate(struct ath5k_softc
*sc
)
1482 struct ath5k_hw
*ah
= sc
->ah
;
1484 if (ath5k_hw_gainf_calibrate(ah
) == AR5K_RFGAIN_NEED_CHANGE
) {
1486 * Rfgain is out of bounds, reset the chip
1487 * to load new gain values.
1489 DBG("ath5k: resetting for calibration\n");
1490 ath5k_reset_wake(sc
);
1492 if (ath5k_hw_phy_calibrate(ah
, sc
->curchan
))
1493 DBG("ath5k: calibration of channel %d failed\n",
1494 sc
->curchan
->channel_nr
);
1498 /********************\
1499 * Net80211 functions *
1500 \********************/
1503 ath5k_tx(struct net80211_device
*dev
, struct io_buffer
*iob
)
1505 struct ath5k_softc
*sc
= dev
->priv
;
1506 struct ath5k_buf
*bf
;
1510 * The hardware expects the header padded to 4 byte boundaries.
1511 * gPXE only ever sends 24-byte headers, so no action necessary.
1514 if (list_empty(&sc
->txbuf
)) {
1515 DBG("ath5k: dropping packet because no tx bufs available\n");
1519 bf
= list_entry(sc
->txbuf
.next
, struct ath5k_buf
, list
);
1520 list_del(&bf
->list
);
1525 if ((rc
= ath5k_txbuf_setup(sc
, bf
)) != 0) {
1527 list_add_tail(&bf
->list
, &sc
->txbuf
);
1535 * Reset the hardware. If chan is not NULL, then also pause rx/tx
1536 * and change to the given channel.
1539 ath5k_reset(struct ath5k_softc
*sc
, struct net80211_channel
*chan
)
1541 struct ath5k_hw
*ah
= sc
->ah
;
1545 ath5k_hw_set_imr(ah
, 0);
1546 ath5k_txq_cleanup(sc
);
1550 sc
->curband
= chan
->band
;
1553 ret
= ath5k_hw_reset(ah
, sc
->curchan
, 1);
1555 DBG("ath5k: can't reset hardware: %s\n", strerror(ret
));
1559 ret
= ath5k_rx_start(sc
);
1561 DBG("ath5k: can't start rx logic: %s\n", strerror(ret
));
1566 * Change channels and update the h/w rate map if we're switching;
1567 * e.g. 11a to 11b/g.
1569 * We may be doing a reset in response to an ioctl that changes the
1570 * channel so update any state that might change as a result.
1574 /* ath5k_chan_change(sc, c); */
1576 /* Reenable interrupts if necessary */
1577 ath5k_irq(sc
->dev
, sc
->irq_ena
);
1582 static int ath5k_reset_wake(struct ath5k_softc
*sc
)
1584 return ath5k_reset(sc
, sc
->curchan
);
1587 static int ath5k_start(struct net80211_device
*dev
)
1589 struct ath5k_softc
*sc
= dev
->priv
;
1592 if ((ret
= ath5k_init(sc
)) != 0)
1596 ath5k_configure_filter(sc
);
1597 ath5k_hw_set_lladdr(sc
->ah
, dev
->netdev
->ll_addr
);
1602 static void ath5k_stop(struct net80211_device
*dev
)
1604 struct ath5k_softc
*sc
= dev
->priv
;
1605 u8 mac
[ETH_ALEN
] = {};
1607 ath5k_hw_set_lladdr(sc
->ah
, mac
);
1613 ath5k_config(struct net80211_device
*dev
, int changed
)
1615 struct ath5k_softc
*sc
= dev
->priv
;
1616 struct ath5k_hw
*ah
= sc
->ah
;
1617 struct net80211_channel
*chan
= &dev
->channels
[dev
->channel
];
1620 if (changed
& NET80211_CFG_CHANNEL
) {
1621 sc
->power_level
= chan
->maxpower
;
1622 if ((ret
= ath5k_chan_set(sc
, chan
)) != 0)
1626 if ((changed
& NET80211_CFG_RATE
) ||
1627 (changed
& NET80211_CFG_PHY_PARAMS
)) {
1628 int spmbl
= ATH5K_SPMBL_NO
;
1629 u16 rate
= dev
->rates
[dev
->rate
];
1630 u16 slowrate
= dev
->rates
[dev
->rtscts_rate
];
1633 if (dev
->phy_flags
& NET80211_PHY_USE_SHORT_PREAMBLE
)
1634 spmbl
= ATH5K_SPMBL_YES
;
1636 for (i
= 0; i
< ATH5K_NR_RATES
; i
++) {
1637 if (ath5k_rates
[i
].bitrate
== rate
&&
1638 (ath5k_rates
[i
].short_pmbl
& spmbl
))
1639 sc
->hw_rate
= ath5k_rates
[i
].hw_code
;
1641 if (ath5k_rates
[i
].bitrate
== slowrate
&&
1642 (ath5k_rates
[i
].short_pmbl
& spmbl
))
1643 sc
->hw_rtscts_rate
= ath5k_rates
[i
].hw_code
;
1647 if (changed
& NET80211_CFG_ASSOC
) {
1648 sc
->assoc
= !!(dev
->state
& NET80211_ASSOCIATED
);
1650 memcpy(ah
->ah_bssid
, dev
->bssid
, ETH_ALEN
);
1652 memset(ah
->ah_bssid
, 0xff, ETH_ALEN
);
1654 ath5k_hw_set_associd(ah
, ah
->ah_bssid
, 0);
1661 * o always accept unicast, broadcast, and multicast traffic
1662 * o multicast traffic for all BSSIDs will be enabled if mac80211
1664 * o maintain current state of phy ofdm or phy cck error reception.
1665 * If the hardware detects any of these type of errors then
1666 * ath5k_hw_get_rx_filter() will pass to us the respective
1667 * hardware filters to be able to receive these type of frames.
1668 * o probe request frames are accepted only when operating in
1669 * hostap, adhoc, or monitor modes
1670 * o enable promiscuous mode according to the interface state
1672 * - when operating in adhoc mode so the 802.11 layer creates
1673 * node table entries for peers,
1674 * - when operating in station mode for collecting rssi data when
1675 * the station is otherwise quiet, or
1678 static void ath5k_configure_filter(struct ath5k_softc
*sc
)
1680 struct ath5k_hw
*ah
= sc
->ah
;
1681 u32 mfilt
[2], rfilt
;
1683 /* Enable all multicast */
1687 /* Enable data frames and beacons */
1688 rfilt
= (AR5K_RX_FILTER_UCAST
| AR5K_RX_FILTER_BCAST
|
1689 AR5K_RX_FILTER_MCAST
| AR5K_RX_FILTER_BEACON
);
1692 ath5k_hw_set_rx_filter(ah
, rfilt
);
1694 /* Set multicast bits */
1695 ath5k_hw_set_mcast_filter(ah
, mfilt
[0], mfilt
[1]);
1697 /* Set the cached hw filter flags, this will alter actually
1699 sc
->filter_flags
= rfilt
;