gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath9k / dynack.c
blobfbeb4a739d3211820023b74115000643775d375e
1 /*
2 * Copyright (c) 2014, Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include "ath9k.h"
18 #include "hw.h"
19 #include "dynack.h"
21 #define COMPUTE_TO (5 * HZ)
22 #define LATEACK_DELAY (10 * HZ)
23 #define EWMA_LEVEL 96
24 #define EWMA_DIV 128
26 /**
27 * ath_dynack_get_max_to - set max timeout according to channel width
28 * @ah: ath hw
31 static u32 ath_dynack_get_max_to(struct ath_hw *ah)
33 const struct ath9k_channel *chan = ah->curchan;
35 if (!chan)
36 return 300;
38 if (IS_CHAN_HT40(chan))
39 return 300;
40 if (IS_CHAN_HALF_RATE(chan))
41 return 750;
42 if (IS_CHAN_QUARTER_RATE(chan))
43 return 1500;
44 return 600;
47 /**
48 * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
51 static inline int ath_dynack_ewma(int old, int new)
53 if (old > 0)
54 return (new * (EWMA_DIV - EWMA_LEVEL) +
55 old * EWMA_LEVEL) / EWMA_DIV;
56 else
57 return new;
60 /**
61 * ath_dynack_get_sifs - get sifs time based on phy used
62 * @ah: ath hw
63 * @phy: phy used
66 static inline u32 ath_dynack_get_sifs(struct ath_hw *ah, int phy)
68 u32 sifs = CCK_SIFS_TIME;
70 if (phy == WLAN_RC_PHY_OFDM) {
71 if (IS_CHAN_QUARTER_RATE(ah->curchan))
72 sifs = OFDM_SIFS_TIME_QUARTER;
73 else if (IS_CHAN_HALF_RATE(ah->curchan))
74 sifs = OFDM_SIFS_TIME_HALF;
75 else
76 sifs = OFDM_SIFS_TIME;
78 return sifs;
81 /**
82 * ath_dynack_bssidmask - filter out ACK frames based on BSSID mask
83 * @ah: ath hw
84 * @mac: receiver address
86 static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
88 int i;
89 struct ath_common *common = ath9k_hw_common(ah);
91 for (i = 0; i < ETH_ALEN; i++) {
92 if ((common->macaddr[i] & common->bssidmask[i]) !=
93 (mac[i] & common->bssidmask[i]))
94 return false;
97 return true;
101 * ath_dynack_set_timeout - configure timeouts/slottime registers
102 * @ah: ath hw
103 * @to: timeout value
106 static void ath_dynack_set_timeout(struct ath_hw *ah, int to)
108 struct ath_common *common = ath9k_hw_common(ah);
109 int slottime = (to - 3) / 2;
111 ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n",
112 to, slottime);
113 ath9k_hw_setslottime(ah, slottime);
114 ath9k_hw_set_ack_timeout(ah, to);
115 ath9k_hw_set_cts_timeout(ah, to);
119 * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout
120 * @ah: ath hw
122 * should be called while holding qlock
124 static void ath_dynack_compute_ackto(struct ath_hw *ah)
126 struct ath_dynack *da = &ah->dynack;
127 struct ath_node *an;
128 int to = 0;
130 list_for_each_entry(an, &da->nodes, list)
131 if (an->ackto > to)
132 to = an->ackto;
134 if (to && da->ackto != to) {
135 ath_dynack_set_timeout(ah, to);
136 da->ackto = to;
141 * ath_dynack_compute_to - compute STA ACK timeout
142 * @ah: ath hw
144 * should be called while holding qlock
146 static void ath_dynack_compute_to(struct ath_hw *ah)
148 struct ath_dynack *da = &ah->dynack;
149 u32 ackto, ack_ts, max_to;
150 struct ieee80211_sta *sta;
151 struct ts_info *st_ts;
152 struct ath_node *an;
153 u8 *dst, *src;
155 rcu_read_lock();
157 max_to = ath_dynack_get_max_to(ah);
158 while (da->st_rbf.h_rb != da->st_rbf.t_rb &&
159 da->ack_rbf.h_rb != da->ack_rbf.t_rb) {
160 ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb];
161 st_ts = &da->st_rbf.ts[da->st_rbf.h_rb];
162 dst = da->st_rbf.addr[da->st_rbf.h_rb].h_dest;
163 src = da->st_rbf.addr[da->st_rbf.h_rb].h_src;
165 ath_dbg(ath9k_hw_common(ah), DYNACK,
166 "ack_ts %u st_ts %u st_dur %u [%u-%u]\n",
167 ack_ts, st_ts->tstamp, st_ts->dur,
168 da->ack_rbf.h_rb, da->st_rbf.h_rb);
170 if (ack_ts > st_ts->tstamp + st_ts->dur) {
171 ackto = ack_ts - st_ts->tstamp - st_ts->dur;
173 if (ackto < max_to) {
174 sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst,
175 src);
176 if (sta) {
177 an = (struct ath_node *)sta->drv_priv;
178 an->ackto = ath_dynack_ewma(an->ackto,
179 ackto);
180 ath_dbg(ath9k_hw_common(ah), DYNACK,
181 "%pM to %d [%u]\n", dst,
182 an->ackto, ackto);
183 if (time_is_before_jiffies(da->lto)) {
184 ath_dynack_compute_ackto(ah);
185 da->lto = jiffies + COMPUTE_TO;
188 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
190 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
191 } else {
192 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
196 rcu_read_unlock();
200 * ath_dynack_sample_tx_ts - status timestamp sampling method
201 * @ah: ath hw
202 * @skb: socket buffer
203 * @ts: tx status info
204 * @sta: station pointer
207 void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
208 struct ath_tx_status *ts,
209 struct ieee80211_sta *sta)
211 struct ieee80211_hdr *hdr;
212 struct ath_dynack *da = &ah->dynack;
213 struct ath_common *common = ath9k_hw_common(ah);
214 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
215 u32 dur = ts->duration;
216 u8 ridx;
218 if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
219 return;
221 spin_lock_bh(&da->qlock);
223 hdr = (struct ieee80211_hdr *)skb->data;
225 /* late ACK */
226 if (ts->ts_status & ATH9K_TXERR_XRETRY) {
227 if (ieee80211_is_assoc_req(hdr->frame_control) ||
228 ieee80211_is_assoc_resp(hdr->frame_control) ||
229 ieee80211_is_auth(hdr->frame_control)) {
230 u32 max_to = ath_dynack_get_max_to(ah);
232 ath_dbg(common, DYNACK, "late ack\n");
233 ath_dynack_set_timeout(ah, max_to);
234 if (sta) {
235 struct ath_node *an;
237 an = (struct ath_node *)sta->drv_priv;
238 an->ackto = -1;
240 da->lto = jiffies + LATEACK_DELAY;
243 spin_unlock_bh(&da->qlock);
244 return;
247 ridx = ts->ts_rateindex;
249 da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
250 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
251 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
253 if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
254 const struct ieee80211_rate *rate;
255 struct ieee80211_tx_rate *rates = info->status.rates;
256 u32 phy;
258 rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
259 if (info->band == NL80211_BAND_2GHZ &&
260 !(rate->flags & IEEE80211_RATE_ERP_G))
261 phy = WLAN_RC_PHY_CCK;
262 else
263 phy = WLAN_RC_PHY_OFDM;
265 dur -= ath_dynack_get_sifs(ah, phy);
267 da->st_rbf.ts[da->st_rbf.t_rb].dur = dur;
269 INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
270 if (da->st_rbf.t_rb == da->st_rbf.h_rb)
271 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
273 ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
274 hdr->addr1, ts->ts_tstamp, dur, da->st_rbf.h_rb,
275 da->st_rbf.t_rb);
277 ath_dynack_compute_to(ah);
279 spin_unlock_bh(&da->qlock);
281 EXPORT_SYMBOL(ath_dynack_sample_tx_ts);
284 * ath_dynack_sample_ack_ts - ACK timestamp sampling method
285 * @ah: ath hw
286 * @skb: socket buffer
287 * @ts: rx timestamp
290 void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
291 u32 ts)
293 struct ath_dynack *da = &ah->dynack;
294 struct ath_common *common = ath9k_hw_common(ah);
295 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
297 if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
298 return;
300 spin_lock_bh(&da->qlock);
301 da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
303 INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
304 if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
305 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
307 ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
308 ts, da->ack_rbf.h_rb, da->ack_rbf.t_rb);
310 ath_dynack_compute_to(ah);
312 spin_unlock_bh(&da->qlock);
314 EXPORT_SYMBOL(ath_dynack_sample_ack_ts);
317 * ath_dynack_node_init - init ath_node related info
318 * @ah: ath hw
319 * @an: ath node
322 void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
324 struct ath_dynack *da = &ah->dynack;
326 an->ackto = da->ackto;
328 spin_lock_bh(&da->qlock);
329 list_add_tail(&an->list, &da->nodes);
330 spin_unlock_bh(&da->qlock);
332 EXPORT_SYMBOL(ath_dynack_node_init);
335 * ath_dynack_node_deinit - deinit ath_node related info
336 * @ah: ath hw
337 * @an: ath node
340 void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
342 struct ath_dynack *da = &ah->dynack;
344 spin_lock_bh(&da->qlock);
345 list_del(&an->list);
346 spin_unlock_bh(&da->qlock);
348 EXPORT_SYMBOL(ath_dynack_node_deinit);
351 * ath_dynack_reset - reset dynack processing
352 * @ah: ath hw
355 void ath_dynack_reset(struct ath_hw *ah)
357 struct ath_dynack *da = &ah->dynack;
358 struct ath_node *an;
360 spin_lock_bh(&da->qlock);
362 da->lto = jiffies + COMPUTE_TO;
364 da->st_rbf.t_rb = 0;
365 da->st_rbf.h_rb = 0;
366 da->ack_rbf.t_rb = 0;
367 da->ack_rbf.h_rb = 0;
369 da->ackto = ath_dynack_get_max_to(ah);
370 list_for_each_entry(an, &da->nodes, list)
371 an->ackto = da->ackto;
373 /* init acktimeout */
374 ath_dynack_set_timeout(ah, da->ackto);
376 spin_unlock_bh(&da->qlock);
378 EXPORT_SYMBOL(ath_dynack_reset);
381 * ath_dynack_init - init dynack data structure
382 * @ah: ath hw
385 void ath_dynack_init(struct ath_hw *ah)
387 struct ath_dynack *da = &ah->dynack;
389 memset(da, 0, sizeof(struct ath_dynack));
391 spin_lock_init(&da->qlock);
392 INIT_LIST_HEAD(&da->nodes);
393 /* ackto = slottime + sifs + air delay */
394 da->ackto = 9 + 16 + 64;
396 ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION;