2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static struct ieee80211_hw
* ath_get_virt_hw(struct ath_softc
*sc
,
20 struct ieee80211_hdr
*hdr
)
22 struct ieee80211_hw
*hw
= sc
->pri_wiphy
->hw
;
25 spin_lock_bh(&sc
->wiphy_lock
);
26 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
27 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
30 if (compare_ether_addr(hdr
->addr1
, aphy
->hw
->wiphy
->perm_addr
)
36 spin_unlock_bh(&sc
->wiphy_lock
);
41 * Setup and link descriptors.
43 * 11N: we can no longer afford to self link the last descriptor.
44 * MAC acknowledges BA status as long as it copies frames to host
45 * buffer (or rx fifo). This can incorrectly acknowledge packets
46 * to a sender if last desc is self-linked.
48 static void ath_rx_buf_link(struct ath_softc
*sc
, struct ath_buf
*bf
)
50 struct ath_hw
*ah
= sc
->sc_ah
;
57 ds
->ds_link
= 0; /* link to null */
58 ds
->ds_data
= bf
->bf_buf_addr
;
60 /* virtual addr of the beginning of the buffer. */
63 ds
->ds_vdata
= skb
->data
;
65 /* setup rx descriptors. The rx.bufsize here tells the harware
66 * how much data it can DMA to us and that we are prepared
68 ath9k_hw_setuprxdesc(ah
, ds
,
72 if (sc
->rx
.rxlink
== NULL
)
73 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
75 *sc
->rx
.rxlink
= bf
->bf_daddr
;
77 sc
->rx
.rxlink
= &ds
->ds_link
;
81 static void ath_setdefantenna(struct ath_softc
*sc
, u32 antenna
)
83 /* XXX block beacon interrupts */
84 ath9k_hw_setantenna(sc
->sc_ah
, antenna
);
85 sc
->rx
.defant
= antenna
;
86 sc
->rx
.rxotherant
= 0;
90 * Extend 15-bit time stamp from rx descriptor to
91 * a full 64-bit TSF using the current h/w TSF.
93 static u64
ath_extend_tsf(struct ath_softc
*sc
, u32 rstamp
)
97 tsf
= ath9k_hw_gettsf64(sc
->sc_ah
);
98 if ((tsf
& 0x7fff) < rstamp
)
100 return (tsf
& ~0x7fff) | rstamp
;
104 * For Decrypt or Demic errors, we only mark packet status here and always push
105 * up the frame up to let mac80211 handle the actual error case, be it no
106 * decryption key or real decryption error. This let us keep statistics there.
108 static int ath_rx_prepare(struct sk_buff
*skb
, struct ath_desc
*ds
,
109 struct ieee80211_rx_status
*rx_status
, bool *decrypt_error
,
110 struct ath_softc
*sc
)
112 struct ieee80211_hdr
*hdr
;
115 struct ieee80211_hw
*hw
;
116 struct ieee80211_sta
*sta
;
118 int last_rssi
= ATH_RSSI_DUMMY_MARKER
;
121 hdr
= (struct ieee80211_hdr
*)skb
->data
;
122 fc
= hdr
->frame_control
;
123 memset(rx_status
, 0, sizeof(struct ieee80211_rx_status
));
124 hw
= ath_get_virt_hw(sc
, hdr
);
126 if (ds
->ds_rxstat
.rs_more
) {
128 * Frame spans multiple descriptors; this cannot happen yet
129 * as we don't support jumbograms. If not in monitor mode,
130 * discard the frame. Enable this if you want to see
131 * error frames in Monitor mode.
133 if (sc
->sc_ah
->opmode
!= NL80211_IFTYPE_MONITOR
)
135 } else if (ds
->ds_rxstat
.rs_status
!= 0) {
136 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_CRC
)
137 rx_status
->flag
|= RX_FLAG_FAILED_FCS_CRC
;
138 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_PHY
)
141 if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_DECRYPT
) {
142 *decrypt_error
= true;
143 } else if (ds
->ds_rxstat
.rs_status
& ATH9K_RXERR_MIC
) {
144 if (ieee80211_is_ctl(fc
))
146 * Sometimes, we get invalid
147 * MIC failures on valid control frames.
148 * Remove these mic errors.
150 ds
->ds_rxstat
.rs_status
&= ~ATH9K_RXERR_MIC
;
152 rx_status
->flag
|= RX_FLAG_MMIC_ERROR
;
155 * Reject error frames with the exception of
156 * decryption and MIC failures. For monitor mode,
157 * we also ignore the CRC error.
159 if (sc
->sc_ah
->opmode
== NL80211_IFTYPE_MONITOR
) {
160 if (ds
->ds_rxstat
.rs_status
&
161 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
|
165 if (ds
->ds_rxstat
.rs_status
&
166 ~(ATH9K_RXERR_DECRYPT
| ATH9K_RXERR_MIC
)) {
172 ratecode
= ds
->ds_rxstat
.rs_rate
;
174 if (ratecode
& 0x80) {
176 rx_status
->flag
|= RX_FLAG_HT
;
177 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_2040
)
178 rx_status
->flag
|= RX_FLAG_40MHZ
;
179 if (ds
->ds_rxstat
.rs_flags
& ATH9K_RX_GI
)
180 rx_status
->flag
|= RX_FLAG_SHORT_GI
;
181 rx_status
->rate_idx
= ratecode
& 0x7f;
183 int i
= 0, cur_band
, n_rates
;
185 cur_band
= hw
->conf
.channel
->band
;
186 n_rates
= sc
->sbands
[cur_band
].n_bitrates
;
188 for (i
= 0; i
< n_rates
; i
++) {
189 if (sc
->sbands
[cur_band
].bitrates
[i
].hw_value
==
191 rx_status
->rate_idx
= i
;
195 if (sc
->sbands
[cur_band
].bitrates
[i
].hw_value_short
==
197 rx_status
->rate_idx
= i
;
198 rx_status
->flag
|= RX_FLAG_SHORTPRE
;
205 sta
= ieee80211_find_sta(sc
->hw
, hdr
->addr2
);
207 an
= (struct ath_node
*) sta
->drv_priv
;
208 if (ds
->ds_rxstat
.rs_rssi
!= ATH9K_RSSI_BAD
&&
209 !ds
->ds_rxstat
.rs_moreaggr
)
210 ATH_RSSI_LPF(an
->last_rssi
, ds
->ds_rxstat
.rs_rssi
);
211 last_rssi
= an
->last_rssi
;
215 if (likely(last_rssi
!= ATH_RSSI_DUMMY_MARKER
))
216 ds
->ds_rxstat
.rs_rssi
= ATH_EP_RND(last_rssi
,
217 ATH_RSSI_EP_MULTIPLIER
);
218 if (ds
->ds_rxstat
.rs_rssi
< 0)
219 ds
->ds_rxstat
.rs_rssi
= 0;
220 else if (ds
->ds_rxstat
.rs_rssi
> 127)
221 ds
->ds_rxstat
.rs_rssi
= 127;
223 /* Update Beacon RSSI, this is used by ANI. */
224 if (ieee80211_is_beacon(fc
))
225 sc
->sc_ah
->stats
.avgbrssi
= ds
->ds_rxstat
.rs_rssi
;
227 rx_status
->mactime
= ath_extend_tsf(sc
, ds
->ds_rxstat
.rs_tstamp
);
228 rx_status
->band
= hw
->conf
.channel
->band
;
229 rx_status
->freq
= hw
->conf
.channel
->center_freq
;
230 rx_status
->noise
= sc
->ani
.noise_floor
;
231 rx_status
->signal
= ATH_DEFAULT_NOISE_FLOOR
+ ds
->ds_rxstat
.rs_rssi
;
232 rx_status
->antenna
= ds
->ds_rxstat
.rs_antenna
;
235 * Theory for reporting quality:
237 * At a hardware RSSI of 45 you will be able to use MCS 7 reliably.
238 * At a hardware RSSI of 45 you will be able to use MCS 15 reliably.
239 * At a hardware RSSI of 35 you should be able use 54 Mbps reliably.
241 * MCS 7 is the highets MCS index usable by a 1-stream device.
242 * MCS 15 is the highest MCS index usable by a 2-stream device.
244 * All ath9k devices are either 1-stream or 2-stream.
246 * How many bars you see is derived from the qual reporting.
248 * A more elaborate scheme can be used here but it requires tables
249 * of SNR/throughput for each possible mode used. For the MCS table
250 * you can refer to the wireless wiki:
252 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
255 if (conf_is_ht(&hw
->conf
))
256 rx_status
->qual
= ds
->ds_rxstat
.rs_rssi
* 100 / 45;
258 rx_status
->qual
= ds
->ds_rxstat
.rs_rssi
* 100 / 35;
260 /* rssi can be more than 45 though, anything above that
261 * should be considered at 100% */
262 if (rx_status
->qual
> 100)
263 rx_status
->qual
= 100;
265 rx_status
->flag
|= RX_FLAG_TSFT
;
272 static void ath_opmode_init(struct ath_softc
*sc
)
274 struct ath_hw
*ah
= sc
->sc_ah
;
275 struct ath_common
*common
= ath9k_hw_common(ah
);
279 /* configure rx filter */
280 rfilt
= ath_calcrxfilter(sc
);
281 ath9k_hw_setrxfilter(ah
, rfilt
);
283 /* configure bssid mask */
284 if (ah
->caps
.hw_caps
& ATH9K_HW_CAP_BSSIDMASK
)
285 ath_hw_setbssidmask(common
);
287 /* configure operational mode */
288 ath9k_hw_setopmode(ah
);
290 /* Handle any link-level address change. */
291 ath9k_hw_setmac(ah
, common
->macaddr
);
293 /* calculate and install multicast filter */
294 mfilt
[0] = mfilt
[1] = ~0;
295 ath9k_hw_setmcastfilter(ah
, mfilt
[0], mfilt
[1]);
298 int ath_rx_init(struct ath_softc
*sc
, int nbufs
)
300 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
305 spin_lock_init(&sc
->rx
.rxflushlock
);
306 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
307 spin_lock_init(&sc
->rx
.rxbuflock
);
309 sc
->rx
.bufsize
= roundup(IEEE80211_MAX_MPDU_LEN
,
310 min(common
->cachelsz
, (u16
)64));
312 ath_print(common
, ATH_DBG_CONFIG
, "cachelsz %u rxbufsize %u\n",
313 common
->cachelsz
, sc
->rx
.bufsize
);
315 /* Initialize rx descriptors */
317 error
= ath_descdma_setup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
,
320 ath_print(common
, ATH_DBG_FATAL
,
321 "failed to allocate rx descriptors: %d\n", error
);
325 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
326 skb
= ath_rxbuf_alloc(common
, sc
->rx
.bufsize
, GFP_KERNEL
);
333 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, skb
->data
,
336 if (unlikely(dma_mapping_error(sc
->dev
,
338 dev_kfree_skb_any(skb
);
340 ath_print(common
, ATH_DBG_FATAL
,
341 "dma_mapping_error() on RX init\n");
345 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
347 sc
->rx
.rxlink
= NULL
;
356 void ath_rx_cleanup(struct ath_softc
*sc
)
361 list_for_each_entry(bf
, &sc
->rx
.rxbuf
, list
) {
364 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
365 sc
->rx
.bufsize
, DMA_FROM_DEVICE
);
370 if (sc
->rx
.rxdma
.dd_desc_len
!= 0)
371 ath_descdma_cleanup(sc
, &sc
->rx
.rxdma
, &sc
->rx
.rxbuf
);
375 * Calculate the receive filter according to the
376 * operating mode and state:
378 * o always accept unicast, broadcast, and multicast traffic
379 * o maintain current state of phy error reception (the hal
380 * may enable phy error frames for noise immunity work)
381 * o probe request frames are accepted only when operating in
382 * hostap, adhoc, or monitor modes
383 * o enable promiscuous mode according to the interface state
385 * - when operating in adhoc mode so the 802.11 layer creates
386 * node table entries for peers,
387 * - when operating in station mode for collecting rssi data when
388 * the station is otherwise quiet, or
389 * - when operating as a repeater so we see repeater-sta beacons
393 u32
ath_calcrxfilter(struct ath_softc
*sc
)
395 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
399 rfilt
= (ath9k_hw_getrxfilter(sc
->sc_ah
) & RX_FILTER_PRESERVE
)
400 | ATH9K_RX_FILTER_UCAST
| ATH9K_RX_FILTER_BCAST
401 | ATH9K_RX_FILTER_MCAST
;
403 /* If not a STA, enable processing of Probe Requests */
404 if (sc
->sc_ah
->opmode
!= NL80211_IFTYPE_STATION
)
405 rfilt
|= ATH9K_RX_FILTER_PROBEREQ
;
408 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
409 * mode interface or when in monitor mode. AP mode does not need this
410 * since it receives all in-BSS frames anyway.
412 if (((sc
->sc_ah
->opmode
!= NL80211_IFTYPE_AP
) &&
413 (sc
->rx
.rxfilter
& FIF_PROMISC_IN_BSS
)) ||
414 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_MONITOR
))
415 rfilt
|= ATH9K_RX_FILTER_PROM
;
417 if (sc
->rx
.rxfilter
& FIF_CONTROL
)
418 rfilt
|= ATH9K_RX_FILTER_CONTROL
;
420 if ((sc
->sc_ah
->opmode
== NL80211_IFTYPE_STATION
) &&
421 !(sc
->rx
.rxfilter
& FIF_BCN_PRBRESP_PROMISC
))
422 rfilt
|= ATH9K_RX_FILTER_MYBEACON
;
424 rfilt
|= ATH9K_RX_FILTER_BEACON
;
426 if ((AR_SREV_9280_10_OR_LATER(sc
->sc_ah
) ||
427 AR_SREV_9285_10_OR_LATER(sc
->sc_ah
)) &&
428 (sc
->sc_ah
->opmode
== NL80211_IFTYPE_AP
) &&
429 (sc
->rx
.rxfilter
& FIF_PSPOLL
))
430 rfilt
|= ATH9K_RX_FILTER_PSPOLL
;
432 if (conf_is_ht(&sc
->hw
->conf
))
433 rfilt
|= ATH9K_RX_FILTER_COMP_BAR
;
435 if (sc
->sec_wiphy
|| (sc
->rx
.rxfilter
& FIF_OTHER_BSS
)) {
436 /* TODO: only needed if more than one BSSID is in use in
437 * station/adhoc mode */
438 /* The following may also be needed for other older chips */
439 if (sc
->sc_ah
->hw_version
.macVersion
== AR_SREV_VERSION_9160
)
440 rfilt
|= ATH9K_RX_FILTER_PROM
;
441 rfilt
|= ATH9K_RX_FILTER_MCAST_BCAST_ALL
;
446 #undef RX_FILTER_PRESERVE
449 int ath_startrecv(struct ath_softc
*sc
)
451 struct ath_hw
*ah
= sc
->sc_ah
;
452 struct ath_buf
*bf
, *tbf
;
454 spin_lock_bh(&sc
->rx
.rxbuflock
);
455 if (list_empty(&sc
->rx
.rxbuf
))
458 sc
->rx
.rxlink
= NULL
;
459 list_for_each_entry_safe(bf
, tbf
, &sc
->rx
.rxbuf
, list
) {
460 ath_rx_buf_link(sc
, bf
);
463 /* We could have deleted elements so the list may be empty now */
464 if (list_empty(&sc
->rx
.rxbuf
))
467 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
468 ath9k_hw_putrxbuf(ah
, bf
->bf_daddr
);
472 spin_unlock_bh(&sc
->rx
.rxbuflock
);
474 ath9k_hw_startpcureceive(ah
);
479 bool ath_stoprecv(struct ath_softc
*sc
)
481 struct ath_hw
*ah
= sc
->sc_ah
;
484 ath9k_hw_stoppcurecv(ah
);
485 ath9k_hw_setrxfilter(ah
, 0);
486 stopped
= ath9k_hw_stopdmarecv(ah
);
487 sc
->rx
.rxlink
= NULL
;
492 void ath_flushrecv(struct ath_softc
*sc
)
494 spin_lock_bh(&sc
->rx
.rxflushlock
);
495 sc
->sc_flags
|= SC_OP_RXFLUSH
;
496 ath_rx_tasklet(sc
, 1);
497 sc
->sc_flags
&= ~SC_OP_RXFLUSH
;
498 spin_unlock_bh(&sc
->rx
.rxflushlock
);
501 static bool ath_beacon_dtim_pending_cab(struct sk_buff
*skb
)
503 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
504 struct ieee80211_mgmt
*mgmt
;
505 u8
*pos
, *end
, id
, elen
;
506 struct ieee80211_tim_ie
*tim
;
508 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
509 pos
= mgmt
->u
.beacon
.variable
;
510 end
= skb
->data
+ skb
->len
;
512 while (pos
+ 2 < end
) {
515 if (pos
+ elen
> end
)
518 if (id
== WLAN_EID_TIM
) {
519 if (elen
< sizeof(*tim
))
521 tim
= (struct ieee80211_tim_ie
*) pos
;
522 if (tim
->dtim_count
!= 0)
524 return tim
->bitmap_ctrl
& 0x01;
533 static void ath_rx_ps_beacon(struct ath_softc
*sc
, struct sk_buff
*skb
)
535 struct ieee80211_mgmt
*mgmt
;
536 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
538 if (skb
->len
< 24 + 8 + 2 + 2)
541 mgmt
= (struct ieee80211_mgmt
*)skb
->data
;
542 if (memcmp(common
->curbssid
, mgmt
->bssid
, ETH_ALEN
) != 0)
543 return; /* not from our current AP */
545 sc
->sc_flags
&= ~SC_OP_WAIT_FOR_BEACON
;
547 if (sc
->sc_flags
& SC_OP_BEACON_SYNC
) {
548 sc
->sc_flags
&= ~SC_OP_BEACON_SYNC
;
549 ath_print(common
, ATH_DBG_PS
,
550 "Reconfigure Beacon timers based on "
551 "timestamp from the AP\n");
552 ath_beacon_config(sc
, NULL
);
555 if (ath_beacon_dtim_pending_cab(skb
)) {
557 * Remain awake waiting for buffered broadcast/multicast
558 * frames. If the last broadcast/multicast frame is not
559 * received properly, the next beacon frame will work as
560 * a backup trigger for returning into NETWORK SLEEP state,
561 * so we are waiting for it as well.
563 ath_print(common
, ATH_DBG_PS
, "Received DTIM beacon indicating "
564 "buffered broadcast/multicast frame(s)\n");
565 sc
->sc_flags
|= SC_OP_WAIT_FOR_CAB
| SC_OP_WAIT_FOR_BEACON
;
569 if (sc
->sc_flags
& SC_OP_WAIT_FOR_CAB
) {
571 * This can happen if a broadcast frame is dropped or the AP
572 * fails to send a frame indicating that all CAB frames have
575 sc
->sc_flags
&= ~SC_OP_WAIT_FOR_CAB
;
576 ath_print(common
, ATH_DBG_PS
,
577 "PS wait for CAB frames timed out\n");
581 static void ath_rx_ps(struct ath_softc
*sc
, struct sk_buff
*skb
)
583 struct ieee80211_hdr
*hdr
;
584 struct ath_common
*common
= ath9k_hw_common(sc
->sc_ah
);
586 hdr
= (struct ieee80211_hdr
*)skb
->data
;
588 /* Process Beacon and CAB receive in PS state */
589 if ((sc
->sc_flags
& SC_OP_WAIT_FOR_BEACON
) &&
590 ieee80211_is_beacon(hdr
->frame_control
))
591 ath_rx_ps_beacon(sc
, skb
);
592 else if ((sc
->sc_flags
& SC_OP_WAIT_FOR_CAB
) &&
593 (ieee80211_is_data(hdr
->frame_control
) ||
594 ieee80211_is_action(hdr
->frame_control
)) &&
595 is_multicast_ether_addr(hdr
->addr1
) &&
596 !ieee80211_has_moredata(hdr
->frame_control
)) {
598 * No more broadcast/multicast frames to be received at this
601 sc
->sc_flags
&= ~SC_OP_WAIT_FOR_CAB
;
602 ath_print(common
, ATH_DBG_PS
,
603 "All PS CAB frames received, back to sleep\n");
604 } else if ((sc
->sc_flags
& SC_OP_WAIT_FOR_PSPOLL_DATA
) &&
605 !is_multicast_ether_addr(hdr
->addr1
) &&
606 !ieee80211_has_morefrags(hdr
->frame_control
)) {
607 sc
->sc_flags
&= ~SC_OP_WAIT_FOR_PSPOLL_DATA
;
608 ath_print(common
, ATH_DBG_PS
,
609 "Going back to sleep after having received "
610 "PS-Poll data (0x%x)\n",
611 sc
->sc_flags
& (SC_OP_WAIT_FOR_BEACON
|
613 SC_OP_WAIT_FOR_PSPOLL_DATA
|
614 SC_OP_WAIT_FOR_TX_ACK
));
618 static void ath_rx_send_to_mac80211(struct ath_softc
*sc
, struct sk_buff
*skb
,
619 struct ieee80211_rx_status
*rx_status
)
621 struct ieee80211_hdr
*hdr
;
623 hdr
= (struct ieee80211_hdr
*)skb
->data
;
625 /* Send the frame to mac80211 */
626 if (is_multicast_ether_addr(hdr
->addr1
)) {
629 * Deliver broadcast/multicast frames to all suitable
632 /* TODO: filter based on channel configuration */
633 for (i
= 0; i
< sc
->num_sec_wiphy
; i
++) {
634 struct ath_wiphy
*aphy
= sc
->sec_wiphy
[i
];
635 struct sk_buff
*nskb
;
638 nskb
= skb_copy(skb
, GFP_ATOMIC
);
640 memcpy(IEEE80211_SKB_RXCB(nskb
), rx_status
,
642 ieee80211_rx(aphy
->hw
, nskb
);
645 memcpy(IEEE80211_SKB_RXCB(skb
), rx_status
, sizeof(*rx_status
));
646 ieee80211_rx(sc
->hw
, skb
);
648 /* Deliver unicast frames based on receiver address */
649 memcpy(IEEE80211_SKB_RXCB(skb
), rx_status
, sizeof(*rx_status
));
650 ieee80211_rx(ath_get_virt_hw(sc
, hdr
), skb
);
654 int ath_rx_tasklet(struct ath_softc
*sc
, int flush
)
656 #define PA2DESC(_sc, _pa) \
657 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \
658 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
662 struct sk_buff
*skb
= NULL
, *requeue_skb
;
663 struct ieee80211_rx_status rx_status
;
664 struct ath_hw
*ah
= sc
->sc_ah
;
665 struct ath_common
*common
= ath9k_hw_common(ah
);
666 struct ieee80211_hdr
*hdr
;
667 int hdrlen
, padsize
, retval
;
668 bool decrypt_error
= false;
672 spin_lock_bh(&sc
->rx
.rxbuflock
);
675 /* If handling rx interrupt and flush is in progress => exit */
676 if ((sc
->sc_flags
& SC_OP_RXFLUSH
) && (flush
== 0))
679 if (list_empty(&sc
->rx
.rxbuf
)) {
680 sc
->rx
.rxlink
= NULL
;
684 bf
= list_first_entry(&sc
->rx
.rxbuf
, struct ath_buf
, list
);
688 * Must provide the virtual address of the current
689 * descriptor, the physical address, and the virtual
690 * address of the next descriptor in the h/w chain.
691 * This allows the HAL to look ahead to see if the
692 * hardware is done with a descriptor by checking the
693 * done bit in the following descriptor and the address
694 * of the current descriptor the DMA engine is working
695 * on. All this is necessary because of our use of
696 * a self-linked list to avoid rx overruns.
698 retval
= ath9k_hw_rxprocdesc(ah
, ds
,
700 PA2DESC(sc
, ds
->ds_link
),
702 if (retval
== -EINPROGRESS
) {
704 struct ath_desc
*tds
;
706 if (list_is_last(&bf
->list
, &sc
->rx
.rxbuf
)) {
707 sc
->rx
.rxlink
= NULL
;
711 tbf
= list_entry(bf
->list
.next
, struct ath_buf
, list
);
714 * On some hardware the descriptor status words could
715 * get corrupted, including the done bit. Because of
716 * this, check if the next descriptor's done bit is
719 * If the next descriptor's done bit is set, the current
720 * descriptor has been corrupted. Force s/w to discard
721 * this descriptor and continue...
725 retval
= ath9k_hw_rxprocdesc(ah
, tds
, tbf
->bf_daddr
,
726 PA2DESC(sc
, tds
->ds_link
), 0);
727 if (retval
== -EINPROGRESS
) {
737 * Synchronize the DMA transfer with CPU before
738 * 1. accessing the frame
739 * 2. requeueing the same buffer to h/w
741 dma_sync_single_for_cpu(sc
->dev
, bf
->bf_buf_addr
,
746 * If we're asked to flush receive queue, directly
747 * chain it back at the queue without processing it.
752 if (!ds
->ds_rxstat
.rs_datalen
)
755 /* The status portion of the descriptor could get corrupted. */
756 if (sc
->rx
.bufsize
< ds
->ds_rxstat
.rs_datalen
)
759 if (!ath_rx_prepare(skb
, ds
, &rx_status
, &decrypt_error
, sc
))
762 /* Ensure we always have an skb to requeue once we are done
763 * processing the current buffer's skb */
764 requeue_skb
= ath_rxbuf_alloc(common
, sc
->rx
.bufsize
, GFP_ATOMIC
);
766 /* If there is no memory we ignore the current RX'd frame,
767 * tell hardware it can give us a new frame using the old
768 * skb and put it at the tail of the sc->rx.rxbuf list for
773 /* Unmap the frame */
774 dma_unmap_single(sc
->dev
, bf
->bf_buf_addr
,
778 skb_put(skb
, ds
->ds_rxstat
.rs_datalen
);
780 /* see if any padding is done by the hw and remove it */
781 hdr
= (struct ieee80211_hdr
*)skb
->data
;
782 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
783 fc
= hdr
->frame_control
;
785 /* The MAC header is padded to have 32-bit boundary if the
786 * packet payload is non-zero. The general calculation for
787 * padsize would take into account odd header lengths:
788 * padsize = (4 - hdrlen % 4) % 4; However, since only
789 * even-length headers are used, padding can only be 0 or 2
790 * bytes and we can optimize this a bit. In addition, we must
791 * not try to remove padding from short control frames that do
792 * not have payload. */
793 padsize
= hdrlen
& 3;
794 if (padsize
&& hdrlen
>= 24) {
795 memmove(skb
->data
+ padsize
, skb
->data
, hdrlen
);
796 skb_pull(skb
, padsize
);
799 keyix
= ds
->ds_rxstat
.rs_keyix
;
801 if (!(keyix
== ATH9K_RXKEYIX_INVALID
) && !decrypt_error
) {
802 rx_status
.flag
|= RX_FLAG_DECRYPTED
;
803 } else if (ieee80211_has_protected(fc
)
804 && !decrypt_error
&& skb
->len
>= hdrlen
+ 4) {
805 keyix
= skb
->data
[hdrlen
+ 3] >> 6;
807 if (test_bit(keyix
, sc
->keymap
))
808 rx_status
.flag
|= RX_FLAG_DECRYPTED
;
810 if (ah
->sw_mgmt_crypto
&&
811 (rx_status
.flag
& RX_FLAG_DECRYPTED
) &&
812 ieee80211_is_mgmt(fc
)) {
813 /* Use software decrypt for management frames. */
814 rx_status
.flag
&= ~RX_FLAG_DECRYPTED
;
817 /* We will now give hardware our shiny new allocated skb */
818 bf
->bf_mpdu
= requeue_skb
;
819 bf
->bf_buf_addr
= dma_map_single(sc
->dev
, requeue_skb
->data
,
822 if (unlikely(dma_mapping_error(sc
->dev
,
824 dev_kfree_skb_any(requeue_skb
);
826 ath_print(common
, ATH_DBG_FATAL
,
827 "dma_mapping_error() on RX\n");
828 ath_rx_send_to_mac80211(sc
, skb
, &rx_status
);
831 bf
->bf_dmacontext
= bf
->bf_buf_addr
;
834 * change the default rx antenna if rx diversity chooses the
835 * other antenna 3 times in a row.
837 if (sc
->rx
.defant
!= ds
->ds_rxstat
.rs_antenna
) {
838 if (++sc
->rx
.rxotherant
>= 3)
839 ath_setdefantenna(sc
, ds
->ds_rxstat
.rs_antenna
);
841 sc
->rx
.rxotherant
= 0;
844 if (unlikely(sc
->sc_flags
& (SC_OP_WAIT_FOR_BEACON
|
846 SC_OP_WAIT_FOR_PSPOLL_DATA
)))
849 ath_rx_send_to_mac80211(sc
, skb
, &rx_status
);
852 list_move_tail(&bf
->list
, &sc
->rx
.rxbuf
);
853 ath_rx_buf_link(sc
, bf
);
856 spin_unlock_bh(&sc
->rx
.rxbuflock
);