1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
10 #define SEQ_MODULO 0x1000
11 #define SEQ_MASK 0xfff
13 static inline int seq_less(u16 sq1
, u16 sq2
)
15 return ((sq1
- sq2
) & SEQ_MASK
) > (SEQ_MODULO
>> 1);
18 static inline u16
seq_inc(u16 sq
)
20 return (sq
+ 1) & SEQ_MASK
;
23 static inline u16
seq_sub(u16 sq1
, u16 sq2
)
25 return (sq1
- sq2
) & SEQ_MASK
;
28 static inline int reorder_index(struct wil_tid_ampdu_rx
*r
, u16 seq
)
30 return seq_sub(seq
, r
->ssn
) % r
->buf_size
;
33 static void wil_release_reorder_frame(struct net_device
*ndev
,
34 struct wil_tid_ampdu_rx
*r
,
37 struct sk_buff
*skb
= r
->reorder_buf
[index
];
42 /* release the frame from the reorder ring buffer */
44 r
->reorder_buf
[index
] = NULL
;
45 wil_netif_rx_any(skb
, ndev
);
48 r
->head_seq_num
= seq_inc(r
->head_seq_num
);
51 static void wil_release_reorder_frames(struct net_device
*ndev
,
52 struct wil_tid_ampdu_rx
*r
,
57 /* note: this function is never called with
58 * hseq preceding r->head_seq_num, i.e it is always true
59 * !seq_less(hseq, r->head_seq_num)
60 * and thus on loop exit it should be
61 * r->head_seq_num == hseq
63 while (seq_less(r
->head_seq_num
, hseq
) && r
->stored_mpdu_num
) {
64 index
= reorder_index(r
, r
->head_seq_num
);
65 wil_release_reorder_frame(ndev
, r
, index
);
67 r
->head_seq_num
= hseq
;
70 static void wil_reorder_release(struct net_device
*ndev
,
71 struct wil_tid_ampdu_rx
*r
)
73 int index
= reorder_index(r
, r
->head_seq_num
);
75 while (r
->reorder_buf
[index
]) {
76 wil_release_reorder_frame(ndev
, r
, index
);
77 index
= reorder_index(r
, r
->head_seq_num
);
81 /* called in NAPI context */
82 void wil_rx_reorder(struct wil6210_priv
*wil
, struct sk_buff
*skb
)
83 __acquires(&sta
->tid_rx_lock
) __releases(&sta
->tid_rx_lock
)
85 struct wil6210_vif
*vif
;
86 struct net_device
*ndev
;
87 int tid
, cid
, mid
, mcast
, retry
;
89 struct wil_sta_info
*sta
;
90 struct wil_tid_ampdu_rx
*r
;
94 wil
->txrx_ops
.get_reorder_params(wil
, skb
, &tid
, &cid
, &mid
, &seq
,
98 wil_dbg_txrx(wil
, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
99 mid
, cid
, tid
, seq
, mcast
);
101 vif
= wil
->vifs
[mid
];
102 if (unlikely(!vif
)) {
103 wil_dbg_txrx(wil
, "invalid VIF, mid %d\n", mid
);
107 ndev
= vif_to_ndev(vif
);
109 spin_lock(&sta
->tid_rx_lock
);
111 r
= sta
->tid_rx
[tid
];
113 wil_netif_rx_any(skb
, ndev
);
117 if (unlikely(mcast
)) {
118 if (retry
&& seq
== r
->mcast_last_seq
) {
120 wil_dbg_txrx(wil
, "Rx drop: dup mcast seq 0x%03x\n",
125 r
->mcast_last_seq
= seq
;
126 wil_netif_rx_any(skb
, ndev
);
131 hseq
= r
->head_seq_num
;
133 /** Due to the race between WMI events, where BACK establishment
134 * reported, and data Rx, few packets may be pass up before reorder
135 * buffer get allocated. Catch up by pretending SSN is what we
136 * see in the 1-st Rx packet
138 * Another scenario, Rx get delayed and we got packet from before
139 * BACK. Pass it to the stack and wait.
142 r
->first_time
= false;
143 if (seq
!= r
->head_seq_num
) {
144 if (seq_less(seq
, r
->head_seq_num
)) {
146 "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
147 seq
, r
->head_seq_num
);
148 r
->first_time
= true;
149 wil_netif_rx_any(skb
, ndev
);
153 "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
154 seq
, r
->head_seq_num
);
155 r
->head_seq_num
= seq
;
160 /* frame with out of date sequence number */
161 if (seq_less(seq
, r
->head_seq_num
)) {
162 r
->ssn_last_drop
= seq
;
164 wil_dbg_txrx(wil
, "Rx drop: old seq 0x%03x head 0x%03x\n",
165 seq
, r
->head_seq_num
);
171 * If frame the sequence number exceeds our buffering window
172 * size release some previous frames to make room for this one.
174 if (!seq_less(seq
, r
->head_seq_num
+ r
->buf_size
)) {
175 hseq
= seq_inc(seq_sub(seq
, r
->buf_size
));
176 /* release stored frames up to new head to stack */
177 wil_release_reorder_frames(ndev
, r
, hseq
);
180 /* Now the new frame is always in the range of the reordering buffer */
182 index
= reorder_index(r
, seq
);
184 /* check if we already stored this frame */
185 if (r
->reorder_buf
[index
]) {
187 wil_dbg_txrx(wil
, "Rx drop: dup seq 0x%03x\n", seq
);
193 * If the current MPDU is in the right order and nothing else
194 * is stored we can process it directly, no need to buffer it.
195 * If it is first but there's something stored, we may be able
196 * to release frames after this one.
198 if (seq
== r
->head_seq_num
&& r
->stored_mpdu_num
== 0) {
199 r
->head_seq_num
= seq_inc(r
->head_seq_num
);
200 wil_netif_rx_any(skb
, ndev
);
204 /* put the frame in the reordering buffer */
205 r
->reorder_buf
[index
] = skb
;
206 r
->stored_mpdu_num
++;
207 wil_reorder_release(ndev
, r
);
210 spin_unlock(&sta
->tid_rx_lock
);
213 /* process BAR frame, called in NAPI context */
214 void wil_rx_bar(struct wil6210_priv
*wil
, struct wil6210_vif
*vif
,
215 u8 cid
, u8 tid
, u16 seq
)
217 struct wil_sta_info
*sta
= &wil
->sta
[cid
];
218 struct net_device
*ndev
= vif_to_ndev(vif
);
219 struct wil_tid_ampdu_rx
*r
;
221 spin_lock(&sta
->tid_rx_lock
);
223 r
= sta
->tid_rx
[tid
];
225 wil_err(wil
, "BAR for non-existing CID %d TID %d\n", cid
, tid
);
228 if (seq_less(seq
, r
->head_seq_num
)) {
229 wil_err(wil
, "BAR Seq 0x%03x preceding head 0x%03x\n",
230 seq
, r
->head_seq_num
);
233 wil_dbg_txrx(wil
, "BAR: CID %d MID %d TID %d Seq 0x%03x head 0x%03x\n",
234 cid
, vif
->mid
, tid
, seq
, r
->head_seq_num
);
235 wil_release_reorder_frames(ndev
, r
, seq
);
238 spin_unlock(&sta
->tid_rx_lock
);
241 struct wil_tid_ampdu_rx
*wil_tid_ampdu_rx_alloc(struct wil6210_priv
*wil
,
244 struct wil_tid_ampdu_rx
*r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
250 kcalloc(size
, sizeof(struct sk_buff
*), GFP_KERNEL
);
251 if (!r
->reorder_buf
) {
257 r
->head_seq_num
= ssn
;
259 r
->stored_mpdu_num
= 0;
260 r
->first_time
= true;
261 r
->mcast_last_seq
= U16_MAX
;
265 void wil_tid_ampdu_rx_free(struct wil6210_priv
*wil
,
266 struct wil_tid_ampdu_rx
*r
)
273 /* Do not pass remaining frames to the network stack - it may be
274 * not expecting to get any more Rx. Rx from here may lead to
275 * kernel OOPS since some per-socket accounting info was already
278 for (i
= 0; i
< r
->buf_size
; i
++)
279 kfree_skb(r
->reorder_buf
[i
]);
281 kfree(r
->reorder_buf
);
285 /* ADDBA processing */
286 static u16
wil_agg_size(struct wil6210_priv
*wil
, u16 req_agg_wsize
)
288 u16 max_agg_size
= min_t(u16
, wil
->max_agg_wsize
, wil
->max_ampdu_size
/
289 (mtu_max
+ WIL_MAX_MPDU_OVERHEAD
));
294 return min(max_agg_size
, req_agg_wsize
);
297 /* Block Ack - Rx side (recipient) */
298 int wil_addba_rx_request(struct wil6210_priv
*wil
, u8 mid
, u8 cid
, u8 tid
,
299 u8 dialog_token
, __le16 ba_param_set
,
300 __le16 ba_timeout
, __le16 ba_seq_ctrl
)
301 __acquires(&sta
->tid_rx_lock
) __releases(&sta
->tid_rx_lock
)
303 u16 param_set
= le16_to_cpu(ba_param_set
);
304 u16 agg_timeout
= le16_to_cpu(ba_timeout
);
305 u16 seq_ctrl
= le16_to_cpu(ba_seq_ctrl
);
306 struct wil_sta_info
*sta
;
308 /* bit 0: A-MSDU supported
309 * bit 1: policy (should be 0 for us)
311 * bits 6..15: buffer size
313 u16 req_agg_wsize
= WIL_GET_BITS(param_set
, 6, 15);
314 bool agg_amsdu
= wil
->use_enhanced_dma_hw
&&
315 wil
->use_rx_hw_reordering
&&
316 test_bit(WMI_FW_CAPABILITY_AMSDU
, wil
->fw_capabilities
) &&
317 wil
->amsdu_en
&& (param_set
& BIT(0));
318 int ba_policy
= param_set
& BIT(1);
319 u16 ssn
= seq_ctrl
>> 4;
320 struct wil_tid_ampdu_rx
*r
;
326 if (cid
>= wil
->max_assoc_sta
) {
327 wil_err(wil
, "BACK: invalid CID %d\n", cid
);
332 sta
= &wil
->sta
[cid
];
333 if (sta
->status
!= wil_sta_connected
) {
334 wil_err(wil
, "BACK: CID %d not connected\n", cid
);
340 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
341 cid
, sta
->addr
, tid
, req_agg_wsize
, agg_timeout
,
342 agg_amsdu
? "+" : "-", !!ba_policy
, dialog_token
, ssn
);
345 if (req_agg_wsize
== 0) {
346 wil_dbg_misc(wil
, "Suggest BACK wsize %d\n",
348 agg_wsize
= wil
->max_agg_wsize
;
350 agg_wsize
= min_t(u16
, wil
->max_agg_wsize
, req_agg_wsize
);
353 rc
= wil
->txrx_ops
.wmi_addba_rx_resp(wil
, mid
, cid
, tid
, dialog_token
,
354 WLAN_STATUS_SUCCESS
, agg_amsdu
,
355 agg_wsize
, agg_timeout
);
357 wil_err(wil
, "do not apply ba, rc(%d)\n", rc
);
362 if (!wil
->use_rx_hw_reordering
) {
363 r
= wil_tid_ampdu_rx_alloc(wil
, agg_wsize
, ssn
);
364 spin_lock_bh(&sta
->tid_rx_lock
);
365 wil_tid_ampdu_rx_free(wil
, sta
->tid_rx
[tid
]);
366 sta
->tid_rx
[tid
] = r
;
367 spin_unlock_bh(&sta
->tid_rx_lock
);
374 /* BACK - Tx side (originator) */
375 int wil_addba_tx_request(struct wil6210_priv
*wil
, u8 ringid
, u16 wsize
)
377 u8 agg_wsize
= wil_agg_size(wil
, wsize
);
379 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ringid
];
382 if (txdata
->addba_in_progress
) {
383 wil_dbg_misc(wil
, "ADDBA for vring[%d] already in progress\n",
387 if (txdata
->agg_wsize
) {
389 "ADDBA for vring[%d] already done for wsize %d\n",
390 ringid
, txdata
->agg_wsize
);
393 txdata
->addba_in_progress
= true;
394 rc
= wmi_addba(wil
, txdata
->mid
, ringid
, agg_wsize
, agg_timeout
);
396 wil_err(wil
, "wmi_addba failed, rc (%d)", rc
);
397 txdata
->addba_in_progress
= false;