2 * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
4 * Copyright (C) 2011, Marvell International Ltd.
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
27 #include "11n_rxreorder.h"
30 * This function dispatches all packets in the Rx reorder table.
32 * There could be holes in the buffer, which are skipped by the function.
33 * Since the buffer is linear, the function uses rotation to simulate
37 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private
*priv
,
38 struct mwifiex_rx_reorder_tbl
39 *rx_reor_tbl_ptr
, int start_win
)
41 int no_pkt_to_send
, i
;
45 no_pkt_to_send
= (start_win
> rx_reor_tbl_ptr
->start_win
) ?
46 min((start_win
- rx_reor_tbl_ptr
->start_win
),
47 rx_reor_tbl_ptr
->win_size
) : rx_reor_tbl_ptr
->win_size
;
49 for (i
= 0; i
< no_pkt_to_send
; ++i
) {
50 spin_lock_irqsave(&priv
->rx_pkt_lock
, flags
);
52 if (rx_reor_tbl_ptr
->rx_reorder_ptr
[i
]) {
53 rx_tmp_ptr
= rx_reor_tbl_ptr
->rx_reorder_ptr
[i
];
54 rx_reor_tbl_ptr
->rx_reorder_ptr
[i
] = NULL
;
56 spin_unlock_irqrestore(&priv
->rx_pkt_lock
, flags
);
58 mwifiex_process_rx_packet(priv
->adapter
, rx_tmp_ptr
);
61 spin_lock_irqsave(&priv
->rx_pkt_lock
, flags
);
63 * We don't have a circular buffer, hence use rotation to simulate
66 for (i
= 0; i
< rx_reor_tbl_ptr
->win_size
- no_pkt_to_send
; ++i
) {
67 rx_reor_tbl_ptr
->rx_reorder_ptr
[i
] =
68 rx_reor_tbl_ptr
->rx_reorder_ptr
[no_pkt_to_send
+ i
];
69 rx_reor_tbl_ptr
->rx_reorder_ptr
[no_pkt_to_send
+ i
] = NULL
;
72 rx_reor_tbl_ptr
->start_win
= start_win
;
73 spin_unlock_irqrestore(&priv
->rx_pkt_lock
, flags
);
79 * This function dispatches all packets in the Rx reorder table until
82 * The start window is adjusted automatically when a hole is located.
83 * Since the buffer is linear, the function uses rotation to simulate
87 mwifiex_11n_scan_and_dispatch(struct mwifiex_private
*priv
,
88 struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr
)
94 for (i
= 0; i
< rx_reor_tbl_ptr
->win_size
; ++i
) {
95 spin_lock_irqsave(&priv
->rx_pkt_lock
, flags
);
96 if (!rx_reor_tbl_ptr
->rx_reorder_ptr
[i
]) {
97 spin_unlock_irqrestore(&priv
->rx_pkt_lock
, flags
);
100 rx_tmp_ptr
= rx_reor_tbl_ptr
->rx_reorder_ptr
[i
];
101 rx_reor_tbl_ptr
->rx_reorder_ptr
[i
] = NULL
;
102 spin_unlock_irqrestore(&priv
->rx_pkt_lock
, flags
);
103 mwifiex_process_rx_packet(priv
->adapter
, rx_tmp_ptr
);
106 spin_lock_irqsave(&priv
->rx_pkt_lock
, flags
);
108 * We don't have a circular buffer, hence use rotation to simulate
112 xchg
= rx_reor_tbl_ptr
->win_size
- i
;
113 for (j
= 0; j
< xchg
; ++j
) {
114 rx_reor_tbl_ptr
->rx_reorder_ptr
[j
] =
115 rx_reor_tbl_ptr
->rx_reorder_ptr
[i
+ j
];
116 rx_reor_tbl_ptr
->rx_reorder_ptr
[i
+ j
] = NULL
;
119 rx_reor_tbl_ptr
->start_win
= (rx_reor_tbl_ptr
->start_win
+ i
)
120 &(MAX_TID_VALUE
- 1);
121 spin_unlock_irqrestore(&priv
->rx_pkt_lock
, flags
);
126 * This function deletes the Rx reorder table and frees the memory.
128 * The function stops the associated timer and dispatches all the
129 * pending packets in the Rx reorder table before deletion.
132 mwifiex_11n_delete_rx_reorder_tbl_entry(struct mwifiex_private
*priv
,
133 struct mwifiex_rx_reorder_tbl
138 if (!rx_reor_tbl_ptr
)
141 mwifiex_11n_dispatch_pkt_until_start_win(priv
, rx_reor_tbl_ptr
,
142 (rx_reor_tbl_ptr
->start_win
+
143 rx_reor_tbl_ptr
->win_size
)
144 &(MAX_TID_VALUE
- 1));
146 del_timer(&rx_reor_tbl_ptr
->timer_context
.timer
);
148 spin_lock_irqsave(&priv
->rx_reorder_tbl_lock
, flags
);
149 list_del(&rx_reor_tbl_ptr
->list
);
150 spin_unlock_irqrestore(&priv
->rx_reorder_tbl_lock
, flags
);
152 kfree(rx_reor_tbl_ptr
->rx_reorder_ptr
);
153 kfree(rx_reor_tbl_ptr
);
157 * This function returns the pointer to an entry in Rx reordering
158 * table which matches the given TA/TID pair.
160 static struct mwifiex_rx_reorder_tbl
*
161 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private
*priv
, int tid
, u8
*ta
)
163 struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr
;
166 spin_lock_irqsave(&priv
->rx_reorder_tbl_lock
, flags
);
167 list_for_each_entry(rx_reor_tbl_ptr
, &priv
->rx_reorder_tbl_ptr
, list
) {
168 if ((!memcmp(rx_reor_tbl_ptr
->ta
, ta
, ETH_ALEN
))
169 && (rx_reor_tbl_ptr
->tid
== tid
)) {
170 spin_unlock_irqrestore(&priv
->rx_reorder_tbl_lock
,
172 return rx_reor_tbl_ptr
;
175 spin_unlock_irqrestore(&priv
->rx_reorder_tbl_lock
, flags
);
181 * This function finds the last sequence number used in the packets
182 * buffered in Rx reordering table.
185 mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl
*rx_reorder_tbl_ptr
)
189 for (i
= (rx_reorder_tbl_ptr
->win_size
- 1); i
>= 0; --i
)
190 if (rx_reorder_tbl_ptr
->rx_reorder_ptr
[i
])
197 * This function flushes all the packets in Rx reordering table.
199 * The function checks if any packets are currently buffered in the
200 * table or not. In case there are packets available, it dispatches
201 * them and then dumps the Rx reordering table.
204 mwifiex_flush_data(unsigned long context
)
206 struct reorder_tmr_cnxt
*reorder_cnxt
=
207 (struct reorder_tmr_cnxt
*) context
;
210 start_win
= mwifiex_11n_find_last_seq_num(reorder_cnxt
->ptr
);
211 if (start_win
>= 0) {
212 dev_dbg(reorder_cnxt
->priv
->adapter
->dev
,
213 "info: flush data %d\n", start_win
);
214 mwifiex_11n_dispatch_pkt_until_start_win(reorder_cnxt
->priv
,
216 ((reorder_cnxt
->ptr
->start_win
+
217 start_win
+ 1) & (MAX_TID_VALUE
- 1)));
222 * This function creates an entry in Rx reordering table for the
225 * The function also initializes the entry with sequence number, window
226 * size as well as initializes the timer.
228 * If the received TA/TID pair is already present, all the packets are
229 * dispatched and the window size is moved until the SSN.
232 mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private
*priv
, u8
*ta
,
233 int tid
, int win_size
, int seq_num
)
236 struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr
, *new_node
;
241 * If we get a TID, ta pair which is already present dispatch all the
242 * the packets and move the window size until the ssn
244 rx_reor_tbl_ptr
= mwifiex_11n_get_rx_reorder_tbl(priv
, tid
, ta
);
245 if (rx_reor_tbl_ptr
) {
246 mwifiex_11n_dispatch_pkt_until_start_win(priv
, rx_reor_tbl_ptr
,
250 /* if !rx_reor_tbl_ptr then create one */
251 new_node
= kzalloc(sizeof(struct mwifiex_rx_reorder_tbl
), GFP_KERNEL
);
253 dev_err(priv
->adapter
->dev
, "%s: failed to alloc new_node\n",
258 INIT_LIST_HEAD(&new_node
->list
);
260 memcpy(new_node
->ta
, ta
, ETH_ALEN
);
261 new_node
->start_win
= seq_num
;
262 if (mwifiex_queuing_ra_based(priv
))
264 dev_dbg(priv
->adapter
->dev
,
265 "info: ADHOC:last_seq=%d start_win=%d\n",
266 last_seq
, new_node
->start_win
);
268 last_seq
= priv
->rx_seq
[tid
];
270 if (last_seq
>= new_node
->start_win
)
271 new_node
->start_win
= last_seq
+ 1;
273 new_node
->win_size
= win_size
;
275 new_node
->rx_reorder_ptr
= kzalloc(sizeof(void *) * win_size
,
277 if (!new_node
->rx_reorder_ptr
) {
278 kfree((u8
*) new_node
);
279 dev_err(priv
->adapter
->dev
,
280 "%s: failed to alloc reorder_ptr\n", __func__
);
284 new_node
->timer_context
.ptr
= new_node
;
285 new_node
->timer_context
.priv
= priv
;
287 init_timer(&new_node
->timer_context
.timer
);
288 new_node
->timer_context
.timer
.function
= mwifiex_flush_data
;
289 new_node
->timer_context
.timer
.data
=
290 (unsigned long) &new_node
->timer_context
;
292 for (i
= 0; i
< win_size
; ++i
)
293 new_node
->rx_reorder_ptr
[i
] = NULL
;
295 spin_lock_irqsave(&priv
->rx_reorder_tbl_lock
, flags
);
296 list_add_tail(&new_node
->list
, &priv
->rx_reorder_tbl_ptr
);
297 spin_unlock_irqrestore(&priv
->rx_reorder_tbl_lock
, flags
);
301 * This function prepares command for adding a BA request.
303 * Preparation includes -
304 * - Setting command ID and proper size
305 * - Setting add BA request buffer
306 * - Ensuring correct endian-ness
308 int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command
*cmd
, void *data_buf
)
310 struct host_cmd_ds_11n_addba_req
*add_ba_req
=
311 (struct host_cmd_ds_11n_addba_req
*)
312 &cmd
->params
.add_ba_req
;
314 cmd
->command
= cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ
);
315 cmd
->size
= cpu_to_le16(sizeof(*add_ba_req
) + S_DS_GEN
);
316 memcpy(add_ba_req
, data_buf
, sizeof(*add_ba_req
));
322 * This function prepares command for adding a BA response.
324 * Preparation includes -
325 * - Setting command ID and proper size
326 * - Setting add BA response buffer
327 * - Ensuring correct endian-ness
329 int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private
*priv
,
330 struct host_cmd_ds_command
*cmd
,
333 struct host_cmd_ds_11n_addba_rsp
*add_ba_rsp
=
334 (struct host_cmd_ds_11n_addba_rsp
*)
335 &cmd
->params
.add_ba_rsp
;
336 struct host_cmd_ds_11n_addba_req
*cmd_addba_req
=
337 (struct host_cmd_ds_11n_addba_req
*) data_buf
;
340 uint16_t block_ack_param_set
;
342 cmd
->command
= cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP
);
343 cmd
->size
= cpu_to_le16(sizeof(*add_ba_rsp
) + S_DS_GEN
);
345 memcpy(add_ba_rsp
->peer_mac_addr
, cmd_addba_req
->peer_mac_addr
,
347 add_ba_rsp
->dialog_token
= cmd_addba_req
->dialog_token
;
348 add_ba_rsp
->block_ack_tmo
= cmd_addba_req
->block_ack_tmo
;
349 add_ba_rsp
->ssn
= cmd_addba_req
->ssn
;
351 block_ack_param_set
= le16_to_cpu(cmd_addba_req
->block_ack_param_set
);
352 tid
= (block_ack_param_set
& IEEE80211_ADDBA_PARAM_TID_MASK
)
353 >> BLOCKACKPARAM_TID_POS
;
354 add_ba_rsp
->status_code
= cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT
);
355 block_ack_param_set
&= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK
;
356 /* We donot support AMSDU inside AMPDU, hence reset the bit */
357 block_ack_param_set
&= ~BLOCKACKPARAM_AMSDU_SUPP_MASK
;
358 block_ack_param_set
|= (priv
->add_ba_param
.rx_win_size
<<
359 BLOCKACKPARAM_WINSIZE_POS
);
360 add_ba_rsp
->block_ack_param_set
= cpu_to_le16(block_ack_param_set
);
361 win_size
= (le16_to_cpu(add_ba_rsp
->block_ack_param_set
)
362 & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK
)
363 >> BLOCKACKPARAM_WINSIZE_POS
;
364 cmd_addba_req
->block_ack_param_set
= cpu_to_le16(block_ack_param_set
);
366 mwifiex_11n_create_rx_reorder_tbl(priv
, cmd_addba_req
->peer_mac_addr
,
367 tid
, win_size
, le16_to_cpu(cmd_addba_req
->ssn
));
372 * This function prepares command for deleting a BA request.
374 * Preparation includes -
375 * - Setting command ID and proper size
376 * - Setting del BA request buffer
377 * - Ensuring correct endian-ness
379 int mwifiex_cmd_11n_delba(struct host_cmd_ds_command
*cmd
, void *data_buf
)
381 struct host_cmd_ds_11n_delba
*del_ba
= (struct host_cmd_ds_11n_delba
*)
384 cmd
->command
= cpu_to_le16(HostCmd_CMD_11N_DELBA
);
385 cmd
->size
= cpu_to_le16(sizeof(*del_ba
) + S_DS_GEN
);
386 memcpy(del_ba
, data_buf
, sizeof(*del_ba
));
392 * This function identifies if Rx reordering is needed for a received packet.
394 * In case reordering is required, the function will do the reordering
395 * before sending it to kernel.
397 * The Rx reorder table is checked first with the received TID/TA pair. If
398 * not found, the received packet is dispatched immediately. But if found,
399 * the packet is reordered and all the packets in the updated Rx reordering
400 * table is dispatched until a hole is found.
402 * For sequence number less than the starting window, the packet is dropped.
404 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private
*priv
,
405 u16 seq_num
, u16 tid
,
406 u8
*ta
, u8 pkt_type
, void *payload
)
408 struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr
;
409 int start_win
, end_win
, win_size
, ret
;
413 mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private
*) priv
,
415 if (!rx_reor_tbl_ptr
) {
416 if (pkt_type
!= PKT_TYPE_BAR
)
417 mwifiex_process_rx_packet(priv
->adapter
, payload
);
420 start_win
= rx_reor_tbl_ptr
->start_win
;
421 win_size
= rx_reor_tbl_ptr
->win_size
;
422 end_win
= ((start_win
+ win_size
) - 1) & (MAX_TID_VALUE
- 1);
423 del_timer(&rx_reor_tbl_ptr
->timer_context
.timer
);
424 mod_timer(&rx_reor_tbl_ptr
->timer_context
.timer
, jiffies
425 + (MIN_FLUSH_TIMER_MS
* win_size
* HZ
) / 1000);
428 * If seq_num is less then starting win then ignore and drop the
431 if ((start_win
+ TWOPOW11
) > (MAX_TID_VALUE
- 1)) {/* Wrap */
432 if (seq_num
>= ((start_win
+ (TWOPOW11
)) & (MAX_TID_VALUE
- 1))
433 && (seq_num
< start_win
))
435 } else if ((seq_num
< start_win
)
436 || (seq_num
> (start_win
+ (TWOPOW11
)))) {
441 * If this packet is a BAR we adjust seq_num as
444 if (pkt_type
== PKT_TYPE_BAR
)
445 seq_num
= ((seq_num
+ win_size
) - 1) & (MAX_TID_VALUE
- 1);
447 if (((end_win
< start_win
)
448 && (seq_num
< (TWOPOW11
- (MAX_TID_VALUE
- start_win
)))
449 && (seq_num
> end_win
)) || ((end_win
> start_win
)
450 && ((seq_num
> end_win
) || (seq_num
< start_win
)))) {
452 if (((seq_num
- win_size
) + 1) >= 0)
453 start_win
= (end_win
- win_size
) + 1;
455 start_win
= (MAX_TID_VALUE
- (win_size
- seq_num
)) + 1;
456 ret
= mwifiex_11n_dispatch_pkt_until_start_win(priv
,
457 rx_reor_tbl_ptr
, start_win
);
463 if (pkt_type
!= PKT_TYPE_BAR
) {
464 if (seq_num
>= start_win
)
465 pkt_index
= seq_num
- start_win
;
467 pkt_index
= (seq_num
+MAX_TID_VALUE
) - start_win
;
469 if (rx_reor_tbl_ptr
->rx_reorder_ptr
[pkt_index
])
472 rx_reor_tbl_ptr
->rx_reorder_ptr
[pkt_index
] = payload
;
476 * Dispatch all packets sequentially from start_win until a
477 * hole is found and adjust the start_win appropriately
479 ret
= mwifiex_11n_scan_and_dispatch(priv
, rx_reor_tbl_ptr
);
485 * This function deletes an entry for a given TID/TA pair.
487 * The TID/TA are taken from del BA event body.
490 mwifiex_11n_delete_ba_stream_tbl(struct mwifiex_private
*priv
, int tid
,
491 u8
*peer_mac
, u8 type
, int initiator
)
493 struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr
;
494 struct mwifiex_tx_ba_stream_tbl
*ptx_tbl
;
495 u8 cleanup_rx_reorder_tbl
;
498 if (type
== TYPE_DELBA_RECEIVE
)
499 cleanup_rx_reorder_tbl
= (initiator
) ? true : false;
501 cleanup_rx_reorder_tbl
= (initiator
) ? false : true;
503 dev_dbg(priv
->adapter
->dev
, "event: DELBA: %pM tid=%d, "
504 "initiator=%d\n", peer_mac
, tid
, initiator
);
506 if (cleanup_rx_reorder_tbl
) {
507 rx_reor_tbl_ptr
= mwifiex_11n_get_rx_reorder_tbl(priv
, tid
,
509 if (!rx_reor_tbl_ptr
) {
510 dev_dbg(priv
->adapter
->dev
,
511 "event: TID, TA not found in table\n");
514 mwifiex_11n_delete_rx_reorder_tbl_entry(priv
, rx_reor_tbl_ptr
);
516 ptx_tbl
= mwifiex_11n_get_tx_ba_stream_tbl(priv
, tid
, peer_mac
);
518 dev_dbg(priv
->adapter
->dev
,
519 "event: TID, RA not found in table\n");
523 spin_lock_irqsave(&priv
->tx_ba_stream_tbl_lock
, flags
);
524 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv
, ptx_tbl
);
525 spin_unlock_irqrestore(&priv
->tx_ba_stream_tbl_lock
, flags
);
530 * This function handles the command response of an add BA response.
532 * Handling includes changing the header fields into CPU format and
533 * creating the stream, provided the add BA is accepted.
535 int mwifiex_ret_11n_addba_resp(struct mwifiex_private
*priv
,
536 struct host_cmd_ds_command
*resp
)
538 struct host_cmd_ds_11n_addba_rsp
*add_ba_rsp
=
539 (struct host_cmd_ds_11n_addba_rsp
*)
540 &resp
->params
.add_ba_rsp
;
542 struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr
;
543 uint16_t block_ack_param_set
;
545 block_ack_param_set
= le16_to_cpu(add_ba_rsp
->block_ack_param_set
);
547 tid
= (block_ack_param_set
& IEEE80211_ADDBA_PARAM_TID_MASK
)
548 >> BLOCKACKPARAM_TID_POS
;
550 * Check if we had rejected the ADDBA, if yes then do not create
553 if (le16_to_cpu(add_ba_rsp
->status_code
) == BA_RESULT_SUCCESS
) {
554 win_size
= (block_ack_param_set
&
555 IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK
)
556 >> BLOCKACKPARAM_WINSIZE_POS
;
558 dev_dbg(priv
->adapter
->dev
, "cmd: ADDBA RSP: %pM"
559 " tid=%d ssn=%d win_size=%d\n",
560 add_ba_rsp
->peer_mac_addr
,
561 tid
, add_ba_rsp
->ssn
, win_size
);
563 dev_err(priv
->adapter
->dev
, "ADDBA RSP: failed %pM tid=%d)\n",
564 add_ba_rsp
->peer_mac_addr
, tid
);
566 rx_reor_tbl_ptr
= mwifiex_11n_get_rx_reorder_tbl(priv
,
567 tid
, add_ba_rsp
->peer_mac_addr
);
569 mwifiex_11n_delete_rx_reorder_tbl_entry(priv
,
577 * This function handles BA stream timeout event by preparing and sending
578 * a command to the firmware.
580 void mwifiex_11n_ba_stream_timeout(struct mwifiex_private
*priv
,
581 struct host_cmd_ds_11n_batimeout
*event
)
583 struct host_cmd_ds_11n_delba delba
;
585 memset(&delba
, 0, sizeof(struct host_cmd_ds_11n_delba
));
586 memcpy(delba
.peer_mac_addr
, event
->peer_mac_addr
, ETH_ALEN
);
588 delba
.del_ba_param_set
|=
589 cpu_to_le16((u16
) event
->tid
<< DELBA_TID_POS
);
590 delba
.del_ba_param_set
|= cpu_to_le16(
591 (u16
) event
->origninator
<< DELBA_INITIATOR_POS
);
592 delba
.reason_code
= cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT
);
593 mwifiex_send_cmd_async(priv
, HostCmd_CMD_11N_DELBA
, 0, 0, &delba
);
597 * This function cleans up the Rx reorder table by deleting all the entries
598 * and re-initializing.
600 void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private
*priv
)
602 struct mwifiex_rx_reorder_tbl
*del_tbl_ptr
, *tmp_node
;
605 spin_lock_irqsave(&priv
->rx_reorder_tbl_lock
, flags
);
606 list_for_each_entry_safe(del_tbl_ptr
, tmp_node
,
607 &priv
->rx_reorder_tbl_ptr
, list
) {
608 spin_unlock_irqrestore(&priv
->rx_reorder_tbl_lock
, flags
);
609 mwifiex_11n_delete_rx_reorder_tbl_entry(priv
, del_tbl_ptr
);
610 spin_lock_irqsave(&priv
->rx_reorder_tbl_lock
, flags
);
612 spin_unlock_irqrestore(&priv
->rx_reorder_tbl_lock
, flags
);
614 INIT_LIST_HEAD(&priv
->rx_reorder_tbl_ptr
);
615 memset(priv
->rx_seq
, 0, sizeof(priv
->rx_seq
));