2 * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #define SEQ_MODULO 0x1000
21 #define SEQ_MASK 0xfff
23 static inline int seq_less(u16 sq1
, u16 sq2
)
25 return ((sq1
- sq2
) & SEQ_MASK
) > (SEQ_MODULO
>> 1);
28 static inline u16
seq_inc(u16 sq
)
30 return (sq
+ 1) & SEQ_MASK
;
33 static inline u16
seq_sub(u16 sq1
, u16 sq2
)
35 return (sq1
- sq2
) & SEQ_MASK
;
38 static inline int reorder_index(struct wil_tid_ampdu_rx
*r
, u16 seq
)
40 return seq_sub(seq
, r
->ssn
) % r
->buf_size
;
43 static void wil_release_reorder_frame(struct wil6210_priv
*wil
,
44 struct wil_tid_ampdu_rx
*r
,
47 struct net_device
*ndev
= wil_to_ndev(wil
);
48 struct sk_buff
*skb
= r
->reorder_buf
[index
];
53 /* release the frame from the reorder ring buffer */
55 r
->reorder_buf
[index
] = NULL
;
56 wil_netif_rx_any(skb
, ndev
);
59 r
->head_seq_num
= seq_inc(r
->head_seq_num
);
62 static void wil_release_reorder_frames(struct wil6210_priv
*wil
,
63 struct wil_tid_ampdu_rx
*r
,
68 /* note: this function is never called with
69 * hseq preceding r->head_seq_num, i.e it is always true
70 * !seq_less(hseq, r->head_seq_num)
71 * and thus on loop exit it should be
72 * r->head_seq_num == hseq
74 while (seq_less(r
->head_seq_num
, hseq
) && r
->stored_mpdu_num
) {
75 index
= reorder_index(r
, r
->head_seq_num
);
76 wil_release_reorder_frame(wil
, r
, index
);
78 r
->head_seq_num
= hseq
;
81 static void wil_reorder_release(struct wil6210_priv
*wil
,
82 struct wil_tid_ampdu_rx
*r
)
84 int index
= reorder_index(r
, r
->head_seq_num
);
86 while (r
->reorder_buf
[index
]) {
87 wil_release_reorder_frame(wil
, r
, index
);
88 index
= reorder_index(r
, r
->head_seq_num
);
92 /* called in NAPI context */
93 void wil_rx_reorder(struct wil6210_priv
*wil
, struct sk_buff
*skb
)
94 __acquires(&sta
->tid_rx_lock
) __releases(&sta
->tid_rx_lock
)
96 struct net_device
*ndev
= wil_to_ndev(wil
);
97 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
98 int tid
= wil_rxdesc_tid(d
);
99 int cid
= wil_rxdesc_cid(d
);
100 int mid
= wil_rxdesc_mid(d
);
101 u16 seq
= wil_rxdesc_seq(d
);
102 int mcast
= wil_rxdesc_mcast(d
);
103 struct wil_sta_info
*sta
= &wil
->sta
[cid
];
104 struct wil_tid_ampdu_rx
*r
;
108 wil_dbg_txrx(wil
, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
109 mid
, cid
, tid
, seq
, mcast
);
111 if (unlikely(mcast
)) {
112 wil_netif_rx_any(skb
, ndev
);
116 spin_lock(&sta
->tid_rx_lock
);
118 r
= sta
->tid_rx
[tid
];
120 wil_netif_rx_any(skb
, ndev
);
125 hseq
= r
->head_seq_num
;
127 /** Due to the race between WMI events, where BACK establishment
128 * reported, and data Rx, few packets may be pass up before reorder
129 * buffer get allocated. Catch up by pretending SSN is what we
130 * see in the 1-st Rx packet
132 * Another scenario, Rx get delayed and we got packet from before
133 * BACK. Pass it to the stack and wait.
136 r
->first_time
= false;
137 if (seq
!= r
->head_seq_num
) {
138 if (seq_less(seq
, r
->head_seq_num
)) {
140 "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
141 seq
, r
->head_seq_num
);
142 r
->first_time
= true;
143 wil_netif_rx_any(skb
, ndev
);
147 "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
148 seq
, r
->head_seq_num
);
149 r
->head_seq_num
= seq
;
154 /* frame with out of date sequence number */
155 if (seq_less(seq
, r
->head_seq_num
)) {
156 r
->ssn_last_drop
= seq
;
158 wil_dbg_txrx(wil
, "Rx drop: old seq 0x%03x head 0x%03x\n",
159 seq
, r
->head_seq_num
);
165 * If frame the sequence number exceeds our buffering window
166 * size release some previous frames to make room for this one.
168 if (!seq_less(seq
, r
->head_seq_num
+ r
->buf_size
)) {
169 hseq
= seq_inc(seq_sub(seq
, r
->buf_size
));
170 /* release stored frames up to new head to stack */
171 wil_release_reorder_frames(wil
, r
, hseq
);
174 /* Now the new frame is always in the range of the reordering buffer */
176 index
= reorder_index(r
, seq
);
178 /* check if we already stored this frame */
179 if (r
->reorder_buf
[index
]) {
181 wil_dbg_txrx(wil
, "Rx drop: dup seq 0x%03x\n", seq
);
187 * If the current MPDU is in the right order and nothing else
188 * is stored we can process it directly, no need to buffer it.
189 * If it is first but there's something stored, we may be able
190 * to release frames after this one.
192 if (seq
== r
->head_seq_num
&& r
->stored_mpdu_num
== 0) {
193 r
->head_seq_num
= seq_inc(r
->head_seq_num
);
194 wil_netif_rx_any(skb
, ndev
);
198 /* put the frame in the reordering buffer */
199 r
->reorder_buf
[index
] = skb
;
200 r
->reorder_time
[index
] = jiffies
;
201 r
->stored_mpdu_num
++;
202 wil_reorder_release(wil
, r
);
205 spin_unlock(&sta
->tid_rx_lock
);
208 /* process BAR frame, called in NAPI context */
209 void wil_rx_bar(struct wil6210_priv
*wil
, u8 cid
, u8 tid
, u16 seq
)
211 struct wil_sta_info
*sta
= &wil
->sta
[cid
];
212 struct wil_tid_ampdu_rx
*r
;
214 spin_lock(&sta
->tid_rx_lock
);
216 r
= sta
->tid_rx
[tid
];
218 wil_err(wil
, "BAR for non-existing CID %d TID %d\n", cid
, tid
);
221 if (seq_less(seq
, r
->head_seq_num
)) {
222 wil_err(wil
, "BAR Seq 0x%03x preceding head 0x%03x\n",
223 seq
, r
->head_seq_num
);
226 wil_dbg_txrx(wil
, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n",
227 cid
, tid
, seq
, r
->head_seq_num
);
228 wil_release_reorder_frames(wil
, r
, seq
);
231 spin_unlock(&sta
->tid_rx_lock
);
234 struct wil_tid_ampdu_rx
*wil_tid_ampdu_rx_alloc(struct wil6210_priv
*wil
,
237 struct wil_tid_ampdu_rx
*r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
243 kcalloc(size
, sizeof(struct sk_buff
*), GFP_KERNEL
);
245 kcalloc(size
, sizeof(unsigned long), GFP_KERNEL
);
246 if (!r
->reorder_buf
|| !r
->reorder_time
) {
247 kfree(r
->reorder_buf
);
248 kfree(r
->reorder_time
);
254 r
->head_seq_num
= ssn
;
256 r
->stored_mpdu_num
= 0;
257 r
->first_time
= true;
261 void wil_tid_ampdu_rx_free(struct wil6210_priv
*wil
,
262 struct wil_tid_ampdu_rx
*r
)
266 wil_release_reorder_frames(wil
, r
, r
->head_seq_num
+ r
->buf_size
);
267 kfree(r
->reorder_buf
);
268 kfree(r
->reorder_time
);
272 /* ADDBA processing */
273 static u16
wil_agg_size(struct wil6210_priv
*wil
, u16 req_agg_wsize
)
275 u16 max_agg_size
= min_t(u16
, WIL_MAX_AGG_WSIZE
, WIL_MAX_AMPDU_SIZE
/
276 (mtu_max
+ WIL_MAX_MPDU_OVERHEAD
));
281 return min(max_agg_size
, req_agg_wsize
);
284 /* Block Ack - Rx side (recipient */
285 int wil_addba_rx_request(struct wil6210_priv
*wil
, u8 cidxtid
,
286 u8 dialog_token
, __le16 ba_param_set
,
287 __le16 ba_timeout
, __le16 ba_seq_ctrl
)
289 struct wil_back_rx
*req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
294 req
->cidxtid
= cidxtid
;
295 req
->dialog_token
= dialog_token
;
296 req
->ba_param_set
= le16_to_cpu(ba_param_set
);
297 req
->ba_timeout
= le16_to_cpu(ba_timeout
);
298 req
->ba_seq_ctrl
= le16_to_cpu(ba_seq_ctrl
);
300 mutex_lock(&wil
->back_rx_mutex
);
301 list_add_tail(&req
->list
, &wil
->back_rx_pending
);
302 mutex_unlock(&wil
->back_rx_mutex
);
304 queue_work(wil
->wq_service
, &wil
->back_rx_worker
);
309 static void wil_back_rx_handle(struct wil6210_priv
*wil
,
310 struct wil_back_rx
*req
)
311 __acquires(&sta
->tid_rx_lock
) __releases(&sta
->tid_rx_lock
)
313 struct wil_sta_info
*sta
;
316 /* bit 0: A-MSDU supported
317 * bit 1: policy (should be 0 for us)
319 * bits 6..15: buffer size
321 u16 req_agg_wsize
= WIL_GET_BITS(req
->ba_param_set
, 6, 15);
322 bool agg_amsdu
= !!(req
->ba_param_set
& BIT(0));
323 int ba_policy
= req
->ba_param_set
& BIT(1);
324 u16 agg_timeout
= req
->ba_timeout
;
325 u16 status
= WLAN_STATUS_SUCCESS
;
326 u16 ssn
= req
->ba_seq_ctrl
>> 4;
327 struct wil_tid_ampdu_rx
*r
;
331 parse_cidxtid(req
->cidxtid
, &cid
, &tid
);
334 if (cid
>= WIL6210_MAX_CID
) {
335 wil_err(wil
, "BACK: invalid CID %d\n", cid
);
339 sta
= &wil
->sta
[cid
];
340 if (sta
->status
!= wil_sta_connected
) {
341 wil_err(wil
, "BACK: CID %d not connected\n", cid
);
346 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
347 cid
, sta
->addr
, tid
, req_agg_wsize
, req
->ba_timeout
,
348 agg_amsdu
? "+" : "-", !!ba_policy
, req
->dialog_token
, ssn
);
352 wil_err(wil
, "BACK requested unsupported ba_policy == 1\n");
353 status
= WLAN_STATUS_INVALID_QOS_PARAM
;
355 if (status
== WLAN_STATUS_SUCCESS
)
356 agg_wsize
= wil_agg_size(wil
, req_agg_wsize
);
358 rc
= wmi_addba_rx_resp(wil
, cid
, tid
, req
->dialog_token
, status
,
359 agg_amsdu
, agg_wsize
, agg_timeout
);
360 if (rc
|| (status
!= WLAN_STATUS_SUCCESS
))
364 r
= wil_tid_ampdu_rx_alloc(wil
, agg_wsize
, ssn
);
365 spin_lock_bh(&sta
->tid_rx_lock
);
366 wil_tid_ampdu_rx_free(wil
, sta
->tid_rx
[tid
]);
367 sta
->tid_rx
[tid
] = r
;
368 spin_unlock_bh(&sta
->tid_rx_lock
);
371 void wil_back_rx_flush(struct wil6210_priv
*wil
)
373 struct wil_back_rx
*evt
, *t
;
375 wil_dbg_misc(wil
, "%s()\n", __func__
);
377 mutex_lock(&wil
->back_rx_mutex
);
379 list_for_each_entry_safe(evt
, t
, &wil
->back_rx_pending
, list
) {
380 list_del(&evt
->list
);
384 mutex_unlock(&wil
->back_rx_mutex
);
387 /* Retrieve next ADDBA request from the pending list */
388 static struct list_head
*next_back_rx(struct wil6210_priv
*wil
)
390 struct list_head
*ret
= NULL
;
392 mutex_lock(&wil
->back_rx_mutex
);
394 if (!list_empty(&wil
->back_rx_pending
)) {
395 ret
= wil
->back_rx_pending
.next
;
399 mutex_unlock(&wil
->back_rx_mutex
);
404 void wil_back_rx_worker(struct work_struct
*work
)
406 struct wil6210_priv
*wil
= container_of(work
, struct wil6210_priv
,
408 struct wil_back_rx
*evt
;
409 struct list_head
*lh
;
411 while ((lh
= next_back_rx(wil
)) != NULL
) {
412 evt
= list_entry(lh
, struct wil_back_rx
, list
);
414 wil_back_rx_handle(wil
, evt
);
419 /* BACK - Tx (originator) side */
420 static void wil_back_tx_handle(struct wil6210_priv
*wil
,
421 struct wil_back_tx
*req
)
423 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[req
->ringid
];
426 if (txdata
->addba_in_progress
) {
427 wil_dbg_misc(wil
, "ADDBA for vring[%d] already in progress\n",
431 if (txdata
->agg_wsize
) {
433 "ADDBA for vring[%d] already established wsize %d\n",
434 req
->ringid
, txdata
->agg_wsize
);
437 txdata
->addba_in_progress
= true;
438 rc
= wmi_addba(wil
, req
->ringid
, req
->agg_wsize
, req
->agg_timeout
);
440 txdata
->addba_in_progress
= false;
443 static struct list_head
*next_back_tx(struct wil6210_priv
*wil
)
445 struct list_head
*ret
= NULL
;
447 mutex_lock(&wil
->back_tx_mutex
);
449 if (!list_empty(&wil
->back_tx_pending
)) {
450 ret
= wil
->back_tx_pending
.next
;
454 mutex_unlock(&wil
->back_tx_mutex
);
459 void wil_back_tx_worker(struct work_struct
*work
)
461 struct wil6210_priv
*wil
= container_of(work
, struct wil6210_priv
,
463 struct wil_back_tx
*evt
;
464 struct list_head
*lh
;
466 while ((lh
= next_back_tx(wil
)) != NULL
) {
467 evt
= list_entry(lh
, struct wil_back_tx
, list
);
469 wil_back_tx_handle(wil
, evt
);
474 void wil_back_tx_flush(struct wil6210_priv
*wil
)
476 struct wil_back_tx
*evt
, *t
;
478 wil_dbg_misc(wil
, "%s()\n", __func__
);
480 mutex_lock(&wil
->back_tx_mutex
);
482 list_for_each_entry_safe(evt
, t
, &wil
->back_tx_pending
, list
) {
483 list_del(&evt
->list
);
487 mutex_unlock(&wil
->back_tx_mutex
);
490 int wil_addba_tx_request(struct wil6210_priv
*wil
, u8 ringid
, u16 wsize
)
492 struct wil_back_tx
*req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
497 req
->ringid
= ringid
;
498 req
->agg_wsize
= wil_agg_size(wil
, wsize
);
499 req
->agg_timeout
= 0;
501 mutex_lock(&wil
->back_tx_mutex
);
502 list_add_tail(&req
->list
, &wil
->back_tx_pending
);
503 mutex_unlock(&wil
->back_tx_mutex
);
505 queue_work(wil
->wq_service
, &wil
->back_tx_worker
);