dmfe: kill DEVICE define
[linux/fpc-iii.git] / net / mac80211 / status.c
blobc6d5c724e0326e04921969c9f00d11fac777749d
1 /*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/export.h>
14 #include <linux/etherdevice.h>
15 #include <net/mac80211.h>
16 #include <asm/unaligned.h>
17 #include "ieee80211_i.h"
18 #include "rate.h"
19 #include "mesh.h"
20 #include "led.h"
21 #include "wme.h"
24 void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
25 struct sk_buff *skb)
27 struct ieee80211_local *local = hw_to_local(hw);
28 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
29 int tmp;
31 skb->pkt_type = IEEE80211_TX_STATUS_MSG;
32 skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
33 &local->skb_queue : &local->skb_queue_unreliable, skb);
34 tmp = skb_queue_len(&local->skb_queue) +
35 skb_queue_len(&local->skb_queue_unreliable);
36 while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
37 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
38 ieee80211_free_txskb(hw, skb);
39 tmp--;
40 I802_DEBUG_INC(local->tx_status_drop);
42 tasklet_schedule(&local->tasklet);
44 EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
46 static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
47 struct sta_info *sta,
48 struct sk_buff *skb)
50 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
51 struct ieee80211_hdr *hdr = (void *)skb->data;
52 int ac;
54 if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
55 ieee80211_free_txskb(&local->hw, skb);
56 return;
60 * This skb 'survived' a round-trip through the driver, and
61 * hopefully the driver didn't mangle it too badly. However,
62 * we can definitely not rely on the control information
63 * being correct. Clear it so we don't get junk there, and
64 * indicate that it needs new processing, but must not be
65 * modified/encrypted again.
67 memset(&info->control, 0, sizeof(info->control));
69 info->control.jiffies = jiffies;
70 info->control.vif = &sta->sdata->vif;
71 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
72 IEEE80211_TX_INTFL_RETRANSMISSION;
73 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
75 sta->status_stats.filtered++;
78 * Clear more-data bit on filtered frames, it might be set
79 * but later frames might time out so it might have to be
80 * clear again ... It's all rather unlikely (this frame
81 * should time out first, right?) but let's not confuse
82 * peers unnecessarily.
84 if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
85 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
87 if (ieee80211_is_data_qos(hdr->frame_control)) {
88 u8 *p = ieee80211_get_qos_ctl(hdr);
89 int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
92 * Clear EOSP if set, this could happen e.g.
93 * if an absence period (us being a P2P GO)
94 * shortens the SP.
96 if (*p & IEEE80211_QOS_CTL_EOSP)
97 *p &= ~IEEE80211_QOS_CTL_EOSP;
98 ac = ieee802_1d_to_ac[tid & 7];
99 } else {
100 ac = IEEE80211_AC_BE;
104 * Clear the TX filter mask for this STA when sending the next
105 * packet. If the STA went to power save mode, this will happen
106 * when it wakes up for the next time.
108 set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
109 ieee80211_clear_fast_xmit(sta);
112 * This code races in the following way:
114 * (1) STA sends frame indicating it will go to sleep and does so
115 * (2) hardware/firmware adds STA to filter list, passes frame up
116 * (3) hardware/firmware processes TX fifo and suppresses a frame
117 * (4) we get TX status before having processed the frame and
118 * knowing that the STA has gone to sleep.
120 * This is actually quite unlikely even when both those events are
121 * processed from interrupts coming in quickly after one another or
122 * even at the same time because we queue both TX status events and
123 * RX frames to be processed by a tasklet and process them in the
124 * same order that they were received or TX status last. Hence, there
125 * is no race as long as the frame RX is processed before the next TX
126 * status, which drivers can ensure, see below.
128 * Note that this can only happen if the hardware or firmware can
129 * actually add STAs to the filter list, if this is done by the
130 * driver in response to set_tim() (which will only reduce the race
131 * this whole filtering tries to solve, not completely solve it)
132 * this situation cannot happen.
134 * To completely solve this race drivers need to make sure that they
135 * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
136 * functions and
137 * (b) always process RX events before TX status events if ordering
138 * can be unknown, for example with different interrupt status
139 * bits.
140 * (c) if PS mode transitions are manual (i.e. the flag
141 * %IEEE80211_HW_AP_LINK_PS is set), always process PS state
142 * changes before calling TX status events if ordering can be
143 * unknown.
145 if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
146 skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
147 skb_queue_tail(&sta->tx_filtered[ac], skb);
148 sta_info_recalc_tim(sta);
150 if (!timer_pending(&local->sta_cleanup))
151 mod_timer(&local->sta_cleanup,
152 round_jiffies(jiffies +
153 STA_INFO_CLEANUP_INTERVAL));
154 return;
157 if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
158 !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
159 /* Software retry the packet once */
160 info->flags |= IEEE80211_TX_INTFL_RETRIED;
161 ieee80211_add_pending_skb(local, skb);
162 return;
165 ps_dbg_ratelimited(sta->sdata,
166 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
167 skb_queue_len(&sta->tx_filtered[ac]),
168 !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
169 ieee80211_free_txskb(&local->hw, skb);
172 static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
174 struct tid_ampdu_tx *tid_tx;
176 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
177 if (!tid_tx || !tid_tx->bar_pending)
178 return;
180 tid_tx->bar_pending = false;
181 ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
184 static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
186 struct ieee80211_mgmt *mgmt = (void *) skb->data;
187 struct ieee80211_local *local = sta->local;
188 struct ieee80211_sub_if_data *sdata = sta->sdata;
190 if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
191 sta->status_stats.last_ack = jiffies;
193 if (ieee80211_is_data_qos(mgmt->frame_control)) {
194 struct ieee80211_hdr *hdr = (void *) skb->data;
195 u8 *qc = ieee80211_get_qos_ctl(hdr);
196 u16 tid = qc[0] & 0xf;
198 ieee80211_check_pending_bar(sta, hdr->addr1, tid);
201 if (ieee80211_is_action(mgmt->frame_control) &&
202 mgmt->u.action.category == WLAN_CATEGORY_HT &&
203 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
204 ieee80211_sdata_running(sdata)) {
205 enum ieee80211_smps_mode smps_mode;
207 switch (mgmt->u.action.u.ht_smps.smps_control) {
208 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
209 smps_mode = IEEE80211_SMPS_DYNAMIC;
210 break;
211 case WLAN_HT_SMPS_CONTROL_STATIC:
212 smps_mode = IEEE80211_SMPS_STATIC;
213 break;
214 case WLAN_HT_SMPS_CONTROL_DISABLED:
215 default: /* shouldn't happen since we don't send that */
216 smps_mode = IEEE80211_SMPS_OFF;
217 break;
220 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
222 * This update looks racy, but isn't -- if we come
223 * here we've definitely got a station that we're
224 * talking to, and on a managed interface that can
225 * only be the AP. And the only other place updating
226 * this variable in managed mode is before association.
228 sdata->smps_mode = smps_mode;
229 ieee80211_queue_work(&local->hw, &sdata->recalc_smps);
230 } else if (sdata->vif.type == NL80211_IFTYPE_AP ||
231 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
232 sta->known_smps_mode = smps_mode;
237 static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
239 struct tid_ampdu_tx *tid_tx;
241 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
242 if (!tid_tx)
243 return;
245 tid_tx->failed_bar_ssn = ssn;
246 tid_tx->bar_pending = true;
249 static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
251 int len = sizeof(struct ieee80211_radiotap_header);
253 /* IEEE80211_RADIOTAP_RATE rate */
254 if (info->status.rates[0].idx >= 0 &&
255 !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
256 IEEE80211_TX_RC_VHT_MCS)))
257 len += 2;
259 /* IEEE80211_RADIOTAP_TX_FLAGS */
260 len += 2;
262 /* IEEE80211_RADIOTAP_DATA_RETRIES */
263 len += 1;
265 /* IEEE80211_RADIOTAP_MCS
266 * IEEE80211_RADIOTAP_VHT */
267 if (info->status.rates[0].idx >= 0) {
268 if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
269 len += 3;
270 else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS)
271 len = ALIGN(len, 2) + 12;
274 return len;
277 static void
278 ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
279 struct ieee80211_supported_band *sband,
280 struct sk_buff *skb, int retry_count,
281 int rtap_len, int shift)
283 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
284 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
285 struct ieee80211_radiotap_header *rthdr;
286 unsigned char *pos;
287 u16 txflags;
289 rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
291 memset(rthdr, 0, rtap_len);
292 rthdr->it_len = cpu_to_le16(rtap_len);
293 rthdr->it_present =
294 cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
295 (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
296 pos = (unsigned char *)(rthdr + 1);
299 * XXX: Once radiotap gets the bitmap reset thing the vendor
300 * extensions proposal contains, we can actually report
301 * the whole set of tries we did.
304 /* IEEE80211_RADIOTAP_RATE */
305 if (info->status.rates[0].idx >= 0 &&
306 !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
307 IEEE80211_TX_RC_VHT_MCS))) {
308 u16 rate;
310 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
311 rate = sband->bitrates[info->status.rates[0].idx].bitrate;
312 *pos = DIV_ROUND_UP(rate, 5 * (1 << shift));
313 /* padding for tx flags */
314 pos += 2;
317 /* IEEE80211_RADIOTAP_TX_FLAGS */
318 txflags = 0;
319 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
320 !is_multicast_ether_addr(hdr->addr1))
321 txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
323 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
324 txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
325 if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
326 txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
328 put_unaligned_le16(txflags, pos);
329 pos += 2;
331 /* IEEE80211_RADIOTAP_DATA_RETRIES */
332 /* for now report the total retry_count */
333 *pos = retry_count;
334 pos++;
336 if (info->status.rates[0].idx < 0)
337 return;
339 /* IEEE80211_RADIOTAP_MCS
340 * IEEE80211_RADIOTAP_VHT */
341 if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
342 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
343 pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
344 IEEE80211_RADIOTAP_MCS_HAVE_GI |
345 IEEE80211_RADIOTAP_MCS_HAVE_BW;
346 if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
347 pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
348 if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
349 pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
350 if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
351 pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
352 pos[2] = info->status.rates[0].idx;
353 pos += 3;
354 } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
355 u16 known = local->hw.radiotap_vht_details &
356 (IEEE80211_RADIOTAP_VHT_KNOWN_GI |
357 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
359 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
361 /* required alignment from rthdr */
362 pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
364 /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
365 put_unaligned_le16(known, pos);
366 pos += 2;
368 /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
369 if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
370 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
371 pos++;
373 /* u8 bandwidth */
374 if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
375 *pos = 1;
376 else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
377 *pos = 4;
378 else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
379 *pos = 11;
380 else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */
381 *pos = 0;
382 pos++;
384 /* u8 mcs_nss[4] */
385 *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) |
386 ieee80211_rate_get_vht_nss(&info->status.rates[0]);
387 pos += 4;
389 /* u8 coding */
390 pos++;
391 /* u8 group_id */
392 pos++;
393 /* u16 partial_aid */
394 pos += 2;
399 * Handles the tx for TDLS teardown frames.
400 * If the frame wasn't ACKed by the peer - it will be re-sent through the AP
402 static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local,
403 struct ieee80211_sub_if_data *sdata,
404 struct sk_buff *skb, u32 flags)
406 struct sk_buff *teardown_skb;
407 struct sk_buff *orig_teardown_skb;
408 bool is_teardown = false;
410 /* Get the teardown data we need and free the lock */
411 spin_lock(&sdata->u.mgd.teardown_lock);
412 teardown_skb = sdata->u.mgd.teardown_skb;
413 orig_teardown_skb = sdata->u.mgd.orig_teardown_skb;
414 if ((skb == orig_teardown_skb) && teardown_skb) {
415 sdata->u.mgd.teardown_skb = NULL;
416 sdata->u.mgd.orig_teardown_skb = NULL;
417 is_teardown = true;
419 spin_unlock(&sdata->u.mgd.teardown_lock);
421 if (is_teardown) {
422 /* This mechanism relies on being able to get ACKs */
423 WARN_ON(!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS));
425 /* Check if peer has ACKed */
426 if (flags & IEEE80211_TX_STAT_ACK) {
427 dev_kfree_skb_any(teardown_skb);
428 } else {
429 tdls_dbg(sdata,
430 "TDLS Resending teardown through AP\n");
432 ieee80211_subif_start_xmit(teardown_skb, skb->dev);
437 static struct ieee80211_sub_if_data *
438 ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb)
440 struct ieee80211_sub_if_data *sdata;
442 if (skb->dev) {
443 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
444 if (!sdata->dev)
445 continue;
447 if (skb->dev == sdata->dev)
448 return sdata;
451 return NULL;
454 return rcu_dereference(local->p2p_sdata);
457 static void ieee80211_report_ack_skb(struct ieee80211_local *local,
458 struct ieee80211_tx_info *info,
459 bool acked, bool dropped)
461 struct sk_buff *skb;
462 unsigned long flags;
464 spin_lock_irqsave(&local->ack_status_lock, flags);
465 skb = idr_find(&local->ack_status_frames, info->ack_frame_id);
466 if (skb)
467 idr_remove(&local->ack_status_frames, info->ack_frame_id);
468 spin_unlock_irqrestore(&local->ack_status_lock, flags);
470 if (!skb)
471 return;
473 if (dropped) {
474 dev_kfree_skb_any(skb);
475 return;
478 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
479 u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
480 struct ieee80211_sub_if_data *sdata;
481 struct ieee80211_hdr *hdr = (void *)skb->data;
483 rcu_read_lock();
484 sdata = ieee80211_sdata_from_skb(local, skb);
485 if (sdata) {
486 if (ieee80211_is_nullfunc(hdr->frame_control) ||
487 ieee80211_is_qos_nullfunc(hdr->frame_control))
488 cfg80211_probe_status(sdata->dev, hdr->addr1,
489 cookie, acked,
490 GFP_ATOMIC);
491 else
492 cfg80211_mgmt_tx_status(&sdata->wdev, cookie,
493 skb->data, skb->len,
494 acked, GFP_ATOMIC);
496 rcu_read_unlock();
498 dev_kfree_skb_any(skb);
499 } else {
500 /* consumes skb */
501 skb_complete_wifi_ack(skb, acked);
505 static void ieee80211_report_used_skb(struct ieee80211_local *local,
506 struct sk_buff *skb, bool dropped)
508 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
509 struct ieee80211_hdr *hdr = (void *)skb->data;
510 bool acked = info->flags & IEEE80211_TX_STAT_ACK;
512 if (dropped)
513 acked = false;
515 if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
516 struct ieee80211_sub_if_data *sdata;
518 rcu_read_lock();
520 sdata = ieee80211_sdata_from_skb(local, skb);
522 if (!sdata) {
523 skb->dev = NULL;
524 } else {
525 unsigned int hdr_size =
526 ieee80211_hdrlen(hdr->frame_control);
528 /* Check to see if packet is a TDLS teardown packet */
529 if (ieee80211_is_data(hdr->frame_control) &&
530 (ieee80211_get_tdls_action(skb, hdr_size) ==
531 WLAN_TDLS_TEARDOWN))
532 ieee80211_tdls_td_tx_handle(local, sdata, skb,
533 info->flags);
534 else
535 ieee80211_mgd_conn_tx_status(sdata,
536 hdr->frame_control,
537 acked);
540 rcu_read_unlock();
541 } else if (info->ack_frame_id) {
542 ieee80211_report_ack_skb(local, info, acked, dropped);
547 * Use a static threshold for now, best value to be determined
548 * by testing ...
549 * Should it depend on:
550 * - on # of retransmissions
551 * - current throughput (higher value for higher tpt)?
553 #define STA_LOST_PKT_THRESHOLD 50
554 #define STA_LOST_TDLS_PKT_THRESHOLD 10
555 #define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
557 static void ieee80211_lost_packet(struct sta_info *sta,
558 struct ieee80211_tx_info *info)
560 /* This packet was aggregated but doesn't carry status info */
561 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
562 !(info->flags & IEEE80211_TX_STAT_AMPDU))
563 return;
565 sta->status_stats.lost_packets++;
566 if (!sta->sta.tdls &&
567 sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD)
568 return;
571 * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
572 * of the last packets were lost, and that no ACK was received in the
573 * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
574 * mechanism.
576 if (sta->sta.tdls &&
577 (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
578 time_before(jiffies,
579 sta->status_stats.last_tdls_pkt_time +
580 STA_LOST_TDLS_PKT_TIME)))
581 return;
583 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
584 sta->status_stats.lost_packets, GFP_ATOMIC);
585 sta->status_stats.lost_packets = 0;
588 static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
589 struct ieee80211_tx_info *info,
590 int *retry_count)
592 int rates_idx = -1;
593 int count = -1;
594 int i;
596 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
597 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
598 !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
599 /* just the first aggr frame carry status info */
600 info->status.rates[i].idx = -1;
601 info->status.rates[i].count = 0;
602 break;
603 } else if (info->status.rates[i].idx < 0) {
604 break;
605 } else if (i >= hw->max_report_rates) {
606 /* the HW cannot have attempted that rate */
607 info->status.rates[i].idx = -1;
608 info->status.rates[i].count = 0;
609 break;
612 count += info->status.rates[i].count;
614 rates_idx = i - 1;
616 if (count < 0)
617 count = 0;
619 *retry_count = count;
620 return rates_idx;
623 void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
624 struct ieee80211_sta *pubsta,
625 struct ieee80211_tx_info *info)
627 struct ieee80211_local *local = hw_to_local(hw);
628 struct ieee80211_supported_band *sband;
629 int retry_count;
630 int rates_idx;
631 bool acked, noack_success;
633 rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
635 sband = hw->wiphy->bands[info->band];
637 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
638 noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
640 if (pubsta) {
641 struct sta_info *sta;
643 sta = container_of(pubsta, struct sta_info, sta);
645 if (!acked)
646 sta->status_stats.retry_failed++;
647 sta->status_stats.retry_count += retry_count;
649 if (acked) {
650 sta->status_stats.last_ack = jiffies;
652 if (sta->status_stats.lost_packets)
653 sta->status_stats.lost_packets = 0;
655 /* Track when last TDLS packet was ACKed */
656 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
657 sta->status_stats.last_tdls_pkt_time = jiffies;
658 } else {
659 ieee80211_lost_packet(sta, info);
662 rate_control_tx_status_noskb(local, sband, sta, info);
665 if (acked || noack_success) {
666 I802_DEBUG_INC(local->dot11TransmittedFrameCount);
667 if (!pubsta)
668 I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
669 if (retry_count > 0)
670 I802_DEBUG_INC(local->dot11RetryCount);
671 if (retry_count > 1)
672 I802_DEBUG_INC(local->dot11MultipleRetryCount);
673 } else {
674 I802_DEBUG_INC(local->dot11FailedCount);
677 EXPORT_SYMBOL(ieee80211_tx_status_noskb);
679 void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
680 struct ieee80211_supported_band *sband,
681 int retry_count, int shift, bool send_to_cooked)
683 struct sk_buff *skb2;
684 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
685 struct ieee80211_sub_if_data *sdata;
686 struct net_device *prev_dev = NULL;
687 int rtap_len;
689 /* send frame to monitor interfaces now */
690 rtap_len = ieee80211_tx_radiotap_len(info);
691 if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
692 pr_err("ieee80211_tx_status: headroom too small\n");
693 dev_kfree_skb(skb);
694 return;
696 ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
697 rtap_len, shift);
699 /* XXX: is this sufficient for BPF? */
700 skb_reset_mac_header(skb);
701 skb->ip_summed = CHECKSUM_UNNECESSARY;
702 skb->pkt_type = PACKET_OTHERHOST;
703 skb->protocol = htons(ETH_P_802_2);
704 memset(skb->cb, 0, sizeof(skb->cb));
706 rcu_read_lock();
707 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
708 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
709 if (!ieee80211_sdata_running(sdata))
710 continue;
712 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
713 !send_to_cooked)
714 continue;
716 if (prev_dev) {
717 skb2 = skb_clone(skb, GFP_ATOMIC);
718 if (skb2) {
719 skb2->dev = prev_dev;
720 netif_rx(skb2);
724 prev_dev = sdata->dev;
727 if (prev_dev) {
728 skb->dev = prev_dev;
729 netif_rx(skb);
730 skb = NULL;
732 rcu_read_unlock();
733 dev_kfree_skb(skb);
736 void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
739 struct ieee80211_local *local = hw_to_local(hw);
740 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
741 __le16 fc;
742 struct ieee80211_supported_band *sband;
743 struct sta_info *sta;
744 struct rhash_head *tmp;
745 int retry_count;
746 int rates_idx;
747 bool send_to_cooked;
748 bool acked;
749 struct ieee80211_bar *bar;
750 int shift = 0;
751 int tid = IEEE80211_NUM_TIDS;
752 const struct bucket_table *tbl;
754 rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
756 rcu_read_lock();
758 sband = local->hw.wiphy->bands[info->band];
759 fc = hdr->frame_control;
761 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
763 for_each_sta_info(local, tbl, hdr->addr1, sta, tmp) {
764 /* skip wrong virtual interface */
765 if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
766 continue;
768 shift = ieee80211_vif_get_shift(&sta->sdata->vif);
770 if (info->flags & IEEE80211_TX_STATUS_EOSP)
771 clear_sta_flag(sta, WLAN_STA_SP);
773 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
774 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
776 * The STA is in power save mode, so assume
777 * that this TX packet failed because of that.
779 ieee80211_handle_filtered_frame(local, sta, skb);
780 rcu_read_unlock();
781 return;
784 /* mesh Peer Service Period support */
785 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
786 ieee80211_is_data_qos(fc))
787 ieee80211_mpsp_trigger_process(
788 ieee80211_get_qos_ctl(hdr),
789 sta, true, acked);
791 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
792 (ieee80211_is_data(hdr->frame_control)) &&
793 (rates_idx != -1))
794 sta->tx_stats.last_rate =
795 info->status.rates[rates_idx];
797 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
798 (ieee80211_is_data_qos(fc))) {
799 u16 ssn;
800 u8 *qc;
802 qc = ieee80211_get_qos_ctl(hdr);
803 tid = qc[0] & 0xf;
804 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
805 & IEEE80211_SCTL_SEQ);
806 ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
807 tid, ssn);
808 } else if (ieee80211_is_data_qos(fc)) {
809 u8 *qc = ieee80211_get_qos_ctl(hdr);
811 tid = qc[0] & 0xf;
814 if (!acked && ieee80211_is_back_req(fc)) {
815 u16 control;
818 * BAR failed, store the last SSN and retry sending
819 * the BAR when the next unicast transmission on the
820 * same TID succeeds.
822 bar = (struct ieee80211_bar *) skb->data;
823 control = le16_to_cpu(bar->control);
824 if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
825 u16 ssn = le16_to_cpu(bar->start_seq_num);
827 tid = (control &
828 IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
829 IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
831 ieee80211_set_bar_pending(sta, tid, ssn);
835 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
836 ieee80211_handle_filtered_frame(local, sta, skb);
837 rcu_read_unlock();
838 return;
839 } else {
840 if (!acked)
841 sta->status_stats.retry_failed++;
842 sta->status_stats.retry_count += retry_count;
844 if (ieee80211_is_data_present(fc)) {
845 if (!acked)
846 sta->status_stats.msdu_failed[tid]++;
848 sta->status_stats.msdu_retries[tid] +=
849 retry_count;
853 rate_control_tx_status(local, sband, sta, skb);
854 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
855 ieee80211s_update_metric(local, sta, skb);
857 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
858 ieee80211_frame_acked(sta, skb);
860 if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
861 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
862 ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
863 acked, info->status.tx_time);
865 if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
866 if (info->flags & IEEE80211_TX_STAT_ACK) {
867 if (sta->status_stats.lost_packets)
868 sta->status_stats.lost_packets = 0;
870 /* Track when last TDLS packet was ACKed */
871 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
872 sta->status_stats.last_tdls_pkt_time =
873 jiffies;
874 } else {
875 ieee80211_lost_packet(sta, info);
880 rcu_read_unlock();
882 ieee80211_led_tx(local);
884 /* SNMP counters
885 * Fragments are passed to low-level drivers as separate skbs, so these
886 * are actually fragments, not frames. Update frame counters only for
887 * the first fragment of the frame. */
888 if ((info->flags & IEEE80211_TX_STAT_ACK) ||
889 (info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) {
890 if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
891 I802_DEBUG_INC(local->dot11TransmittedFrameCount);
892 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
893 I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
894 if (retry_count > 0)
895 I802_DEBUG_INC(local->dot11RetryCount);
896 if (retry_count > 1)
897 I802_DEBUG_INC(local->dot11MultipleRetryCount);
900 /* This counter shall be incremented for an acknowledged MPDU
901 * with an individual address in the address 1 field or an MPDU
902 * with a multicast address in the address 1 field of type Data
903 * or Management. */
904 if (!is_multicast_ether_addr(hdr->addr1) ||
905 ieee80211_is_data(fc) ||
906 ieee80211_is_mgmt(fc))
907 I802_DEBUG_INC(local->dot11TransmittedFragmentCount);
908 } else {
909 if (ieee80211_is_first_frag(hdr->seq_ctrl))
910 I802_DEBUG_INC(local->dot11FailedCount);
913 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
914 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
915 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
916 local->ps_sdata && !(local->scanning)) {
917 if (info->flags & IEEE80211_TX_STAT_ACK) {
918 local->ps_sdata->u.mgd.flags |=
919 IEEE80211_STA_NULLFUNC_ACKED;
920 } else
921 mod_timer(&local->dynamic_ps_timer, jiffies +
922 msecs_to_jiffies(10));
925 ieee80211_report_used_skb(local, skb, false);
927 /* this was a transmitted frame, but now we want to reuse it */
928 skb_orphan(skb);
930 /* Need to make a copy before skb->cb gets cleared */
931 send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
932 !(ieee80211_is_data(fc));
935 * This is a bit racy but we can avoid a lot of work
936 * with this test...
938 if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
939 dev_kfree_skb(skb);
940 return;
943 /* send to monitor interfaces */
944 ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked);
946 EXPORT_SYMBOL(ieee80211_tx_status);
948 void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
950 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
951 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
952 num_packets, GFP_ATOMIC);
954 EXPORT_SYMBOL(ieee80211_report_low_ack);
956 void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
958 struct ieee80211_local *local = hw_to_local(hw);
960 ieee80211_report_used_skb(local, skb, true);
961 dev_kfree_skb_any(skb);
963 EXPORT_SYMBOL(ieee80211_free_txskb);
965 void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
966 struct sk_buff_head *skbs)
968 struct sk_buff *skb;
970 while ((skb = __skb_dequeue(skbs)))
971 ieee80211_free_txskb(hw, skb);