treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / wireless / mediatek / mt76 / agg-rx.c
blob59c187898132abcf6deae4580767fbbd5cbcc2ce
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
4 */
5 #include "mt76.h"
7 #define REORDER_TIMEOUT (HZ / 10)
9 static void
10 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
12 struct sk_buff *skb;
14 tid->head = ieee80211_sn_inc(tid->head);
16 skb = tid->reorder_buf[idx];
17 if (!skb)
18 return;
20 tid->reorder_buf[idx] = NULL;
21 tid->nframes--;
22 __skb_queue_tail(frames, skb);
25 static void
26 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
27 struct sk_buff_head *frames,
28 u16 head)
30 int idx;
32 while (ieee80211_sn_less(tid->head, head)) {
33 idx = tid->head % tid->size;
34 mt76_aggr_release(tid, frames, idx);
38 static void
39 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
41 int idx = tid->head % tid->size;
43 while (tid->reorder_buf[idx]) {
44 mt76_aggr_release(tid, frames, idx);
45 idx = tid->head % tid->size;
49 static void
50 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
52 struct mt76_rx_status *status;
53 struct sk_buff *skb;
54 int start, idx, nframes;
56 if (!tid->nframes)
57 return;
59 mt76_rx_aggr_release_head(tid, frames);
61 start = tid->head % tid->size;
62 nframes = tid->nframes;
64 for (idx = (tid->head + 1) % tid->size;
65 idx != start && nframes;
66 idx = (idx + 1) % tid->size) {
67 skb = tid->reorder_buf[idx];
68 if (!skb)
69 continue;
71 nframes--;
72 status = (struct mt76_rx_status *)skb->cb;
73 if (!time_after(jiffies,
74 status->reorder_time + REORDER_TIMEOUT))
75 continue;
77 mt76_rx_aggr_release_frames(tid, frames, status->seqno);
80 mt76_rx_aggr_release_head(tid, frames);
83 static void
84 mt76_rx_aggr_reorder_work(struct work_struct *work)
86 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
87 reorder_work.work);
88 struct mt76_dev *dev = tid->dev;
89 struct sk_buff_head frames;
90 int nframes;
92 __skb_queue_head_init(&frames);
94 local_bh_disable();
95 rcu_read_lock();
97 spin_lock(&tid->lock);
98 mt76_rx_aggr_check_release(tid, &frames);
99 nframes = tid->nframes;
100 spin_unlock(&tid->lock);
102 if (nframes)
103 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
104 REORDER_TIMEOUT);
105 mt76_rx_complete(dev, &frames, NULL);
107 rcu_read_unlock();
108 local_bh_enable();
111 static void
112 mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
114 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
115 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
116 struct mt76_wcid *wcid = status->wcid;
117 struct mt76_rx_tid *tid;
118 u16 seqno;
120 if (!ieee80211_is_ctl(bar->frame_control))
121 return;
123 if (!ieee80211_is_back_req(bar->frame_control))
124 return;
126 status->tid = le16_to_cpu(bar->control) >> 12;
127 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
128 tid = rcu_dereference(wcid->aggr[status->tid]);
129 if (!tid)
130 return;
132 spin_lock_bh(&tid->lock);
133 if (!tid->stopped) {
134 mt76_rx_aggr_release_frames(tid, frames, seqno);
135 mt76_rx_aggr_release_head(tid, frames);
137 spin_unlock_bh(&tid->lock);
140 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
142 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
143 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
144 struct mt76_wcid *wcid = status->wcid;
145 struct ieee80211_sta *sta;
146 struct mt76_rx_tid *tid;
147 bool sn_less;
148 u16 seqno, head, size;
149 u8 ackp, idx;
151 __skb_queue_tail(frames, skb);
153 sta = wcid_to_sta(wcid);
154 if (!sta)
155 return;
157 if (!status->aggr) {
158 mt76_rx_aggr_check_ctl(skb, frames);
159 return;
162 /* not part of a BA session */
163 ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
164 if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
165 ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
166 return;
168 tid = rcu_dereference(wcid->aggr[status->tid]);
169 if (!tid)
170 return;
172 status->flag |= RX_FLAG_DUP_VALIDATED;
173 spin_lock_bh(&tid->lock);
175 if (tid->stopped)
176 goto out;
178 head = tid->head;
179 seqno = status->seqno;
180 size = tid->size;
181 sn_less = ieee80211_sn_less(seqno, head);
183 if (!tid->started) {
184 if (sn_less)
185 goto out;
187 tid->started = true;
190 if (sn_less) {
191 __skb_unlink(skb, frames);
192 dev_kfree_skb(skb);
193 goto out;
196 if (seqno == head) {
197 tid->head = ieee80211_sn_inc(head);
198 if (tid->nframes)
199 mt76_rx_aggr_release_head(tid, frames);
200 goto out;
203 __skb_unlink(skb, frames);
206 * Frame sequence number exceeds buffering window, free up some space
207 * by releasing previous frames
209 if (!ieee80211_sn_less(seqno, head + size)) {
210 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
211 mt76_rx_aggr_release_frames(tid, frames, head);
214 idx = seqno % size;
216 /* Discard if the current slot is already in use */
217 if (tid->reorder_buf[idx]) {
218 dev_kfree_skb(skb);
219 goto out;
222 status->reorder_time = jiffies;
223 tid->reorder_buf[idx] = skb;
224 tid->nframes++;
225 mt76_rx_aggr_release_head(tid, frames);
227 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
228 REORDER_TIMEOUT);
230 out:
231 spin_unlock_bh(&tid->lock);
234 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
235 u16 ssn, u8 size)
237 struct mt76_rx_tid *tid;
239 mt76_rx_aggr_stop(dev, wcid, tidno);
241 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
242 if (!tid)
243 return -ENOMEM;
245 tid->dev = dev;
246 tid->head = ssn;
247 tid->size = size;
248 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
249 spin_lock_init(&tid->lock);
251 rcu_assign_pointer(wcid->aggr[tidno], tid);
253 return 0;
255 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
257 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
259 u8 size = tid->size;
260 int i;
262 spin_lock_bh(&tid->lock);
264 tid->stopped = true;
265 for (i = 0; tid->nframes && i < size; i++) {
266 struct sk_buff *skb = tid->reorder_buf[i];
268 if (!skb)
269 continue;
271 tid->nframes--;
272 dev_kfree_skb(skb);
275 spin_unlock_bh(&tid->lock);
277 cancel_delayed_work_sync(&tid->reorder_work);
280 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
282 struct mt76_rx_tid *tid = NULL;
284 tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
285 lockdep_is_held(&dev->mutex));
286 if (tid) {
287 mt76_rx_aggr_shutdown(dev, tid);
288 kfree_rcu(tid, rcu_head);
291 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);