1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
7 #define REORDER_TIMEOUT (HZ / 10)
10 mt76_aggr_release(struct mt76_rx_tid
*tid
, struct sk_buff_head
*frames
, int idx
)
14 tid
->head
= ieee80211_sn_inc(tid
->head
);
16 skb
= tid
->reorder_buf
[idx
];
20 tid
->reorder_buf
[idx
] = NULL
;
22 __skb_queue_tail(frames
, skb
);
26 mt76_rx_aggr_release_frames(struct mt76_rx_tid
*tid
,
27 struct sk_buff_head
*frames
,
32 while (ieee80211_sn_less(tid
->head
, head
)) {
33 idx
= tid
->head
% tid
->size
;
34 mt76_aggr_release(tid
, frames
, idx
);
39 mt76_rx_aggr_release_head(struct mt76_rx_tid
*tid
, struct sk_buff_head
*frames
)
41 int idx
= tid
->head
% tid
->size
;
43 while (tid
->reorder_buf
[idx
]) {
44 mt76_aggr_release(tid
, frames
, idx
);
45 idx
= tid
->head
% tid
->size
;
50 mt76_rx_aggr_check_release(struct mt76_rx_tid
*tid
, struct sk_buff_head
*frames
)
52 struct mt76_rx_status
*status
;
54 int start
, idx
, nframes
;
59 mt76_rx_aggr_release_head(tid
, frames
);
61 start
= tid
->head
% tid
->size
;
62 nframes
= tid
->nframes
;
64 for (idx
= (tid
->head
+ 1) % tid
->size
;
65 idx
!= start
&& nframes
;
66 idx
= (idx
+ 1) % tid
->size
) {
67 skb
= tid
->reorder_buf
[idx
];
72 status
= (struct mt76_rx_status
*)skb
->cb
;
73 if (!time_after(jiffies
,
74 status
->reorder_time
+ REORDER_TIMEOUT
))
77 mt76_rx_aggr_release_frames(tid
, frames
, status
->seqno
);
80 mt76_rx_aggr_release_head(tid
, frames
);
84 mt76_rx_aggr_reorder_work(struct work_struct
*work
)
86 struct mt76_rx_tid
*tid
= container_of(work
, struct mt76_rx_tid
,
88 struct mt76_dev
*dev
= tid
->dev
;
89 struct sk_buff_head frames
;
92 __skb_queue_head_init(&frames
);
97 spin_lock(&tid
->lock
);
98 mt76_rx_aggr_check_release(tid
, &frames
);
99 nframes
= tid
->nframes
;
100 spin_unlock(&tid
->lock
);
103 ieee80211_queue_delayed_work(tid
->dev
->hw
, &tid
->reorder_work
,
105 mt76_rx_complete(dev
, &frames
, NULL
);
112 mt76_rx_aggr_check_ctl(struct sk_buff
*skb
, struct sk_buff_head
*frames
)
114 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
115 struct ieee80211_bar
*bar
= (struct ieee80211_bar
*)skb
->data
;
116 struct mt76_wcid
*wcid
= status
->wcid
;
117 struct mt76_rx_tid
*tid
;
120 if (!ieee80211_is_ctl(bar
->frame_control
))
123 if (!ieee80211_is_back_req(bar
->frame_control
))
126 status
->tid
= le16_to_cpu(bar
->control
) >> 12;
127 seqno
= IEEE80211_SEQ_TO_SN(le16_to_cpu(bar
->start_seq_num
));
128 tid
= rcu_dereference(wcid
->aggr
[status
->tid
]);
132 spin_lock_bh(&tid
->lock
);
134 mt76_rx_aggr_release_frames(tid
, frames
, seqno
);
135 mt76_rx_aggr_release_head(tid
, frames
);
137 spin_unlock_bh(&tid
->lock
);
140 void mt76_rx_aggr_reorder(struct sk_buff
*skb
, struct sk_buff_head
*frames
)
142 struct mt76_rx_status
*status
= (struct mt76_rx_status
*)skb
->cb
;
143 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
144 struct mt76_wcid
*wcid
= status
->wcid
;
145 struct ieee80211_sta
*sta
;
146 struct mt76_rx_tid
*tid
;
148 u16 seqno
, head
, size
;
151 __skb_queue_tail(frames
, skb
);
153 sta
= wcid_to_sta(wcid
);
158 mt76_rx_aggr_check_ctl(skb
, frames
);
162 /* not part of a BA session */
163 ackp
= *ieee80211_get_qos_ctl(hdr
) & IEEE80211_QOS_CTL_ACK_POLICY_MASK
;
164 if (ackp
!= IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK
&&
165 ackp
!= IEEE80211_QOS_CTL_ACK_POLICY_NORMAL
)
168 tid
= rcu_dereference(wcid
->aggr
[status
->tid
]);
172 status
->flag
|= RX_FLAG_DUP_VALIDATED
;
173 spin_lock_bh(&tid
->lock
);
179 seqno
= status
->seqno
;
181 sn_less
= ieee80211_sn_less(seqno
, head
);
191 __skb_unlink(skb
, frames
);
197 tid
->head
= ieee80211_sn_inc(head
);
199 mt76_rx_aggr_release_head(tid
, frames
);
203 __skb_unlink(skb
, frames
);
206 * Frame sequence number exceeds buffering window, free up some space
207 * by releasing previous frames
209 if (!ieee80211_sn_less(seqno
, head
+ size
)) {
210 head
= ieee80211_sn_inc(ieee80211_sn_sub(seqno
, size
));
211 mt76_rx_aggr_release_frames(tid
, frames
, head
);
216 /* Discard if the current slot is already in use */
217 if (tid
->reorder_buf
[idx
]) {
222 status
->reorder_time
= jiffies
;
223 tid
->reorder_buf
[idx
] = skb
;
225 mt76_rx_aggr_release_head(tid
, frames
);
227 ieee80211_queue_delayed_work(tid
->dev
->hw
, &tid
->reorder_work
,
231 spin_unlock_bh(&tid
->lock
);
234 int mt76_rx_aggr_start(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tidno
,
237 struct mt76_rx_tid
*tid
;
239 mt76_rx_aggr_stop(dev
, wcid
, tidno
);
241 tid
= kzalloc(struct_size(tid
, reorder_buf
, size
), GFP_KERNEL
);
248 INIT_DELAYED_WORK(&tid
->reorder_work
, mt76_rx_aggr_reorder_work
);
249 spin_lock_init(&tid
->lock
);
251 rcu_assign_pointer(wcid
->aggr
[tidno
], tid
);
255 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start
);
257 static void mt76_rx_aggr_shutdown(struct mt76_dev
*dev
, struct mt76_rx_tid
*tid
)
262 spin_lock_bh(&tid
->lock
);
265 for (i
= 0; tid
->nframes
&& i
< size
; i
++) {
266 struct sk_buff
*skb
= tid
->reorder_buf
[i
];
275 spin_unlock_bh(&tid
->lock
);
277 cancel_delayed_work_sync(&tid
->reorder_work
);
280 void mt76_rx_aggr_stop(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tidno
)
282 struct mt76_rx_tid
*tid
= NULL
;
284 tid
= rcu_replace_pointer(wcid
->aggr
[tidno
], tid
,
285 lockdep_is_held(&dev
->mutex
));
287 mt76_rx_aggr_shutdown(dev
, tid
);
288 kfree_rcu(tid
, rcu_head
);
291 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop
);