1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
6 #include <linux/etherdevice.h>
7 #include <linux/moduleparam.h>
8 #include <linux/prefetch.h>
9 #include <linux/types.h>
10 #include <linux/list.h>
12 #include <linux/ipv6.h>
14 #include "txrx_edma.h"
18 /* Max number of entries (packets to complete) to update the hwtail of tx
19 * status ring. Should be power of 2
21 #define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128
22 #define WIL_EDMA_MAX_DATA_OFFSET (2)
23 /* RX buffer size must be aligned to 4 bytes */
24 #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
25 #define MAX_INVALID_BUFF_ID_RETRY (3)
27 static void wil_tx_desc_unmap_edma(struct device
*dev
,
28 union wil_tx_desc
*desc
,
31 struct wil_tx_enhanced_desc
*d
= (struct wil_tx_enhanced_desc
*)desc
;
32 dma_addr_t pa
= wil_tx_desc_get_addr_edma(&d
->dma
);
33 u16 dmalen
= le16_to_cpu(d
->dma
.length
);
35 switch (ctx
->mapped_as
) {
36 case wil_mapped_as_single
:
37 dma_unmap_single(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
39 case wil_mapped_as_page
:
40 dma_unmap_page(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
47 static int wil_find_free_sring(struct wil6210_priv
*wil
)
51 for (i
= 0; i
< WIL6210_MAX_STATUS_RINGS
; i
++) {
52 if (!wil
->srings
[i
].va
)
59 static void wil_sring_free(struct wil6210_priv
*wil
,
60 struct wil_status_ring
*sring
)
62 struct device
*dev
= wil_to_dev(wil
);
65 if (!sring
|| !sring
->va
)
68 sz
= sring
->elem_size
* sring
->size
;
70 wil_dbg_misc(wil
, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
71 sz
, sring
->va
, &sring
->pa
);
73 dma_free_coherent(dev
, sz
, (void *)sring
->va
, sring
->pa
);
78 static int wil_sring_alloc(struct wil6210_priv
*wil
,
79 struct wil_status_ring
*sring
)
81 struct device
*dev
= wil_to_dev(wil
);
82 size_t sz
= sring
->elem_size
* sring
->size
;
84 wil_dbg_misc(wil
, "status_ring_alloc: size=%zu\n", sz
);
87 wil_err(wil
, "Cannot allocate a zero size status ring\n");
93 /* Status messages are allocated and initialized to 0. This is necessary
94 * since DR bit should be initialized to 0.
96 sring
->va
= dma_alloc_coherent(dev
, sz
, &sring
->pa
, GFP_KERNEL
);
100 wil_dbg_misc(wil
, "status_ring[%d] 0x%p:%pad\n", sring
->size
, sring
->va
,
106 static int wil_tx_init_edma(struct wil6210_priv
*wil
)
108 int ring_id
= wil_find_free_sring(wil
);
109 struct wil_status_ring
*sring
;
111 u16 status_ring_size
;
113 if (wil
->tx_status_ring_order
< WIL_SRING_SIZE_ORDER_MIN
||
114 wil
->tx_status_ring_order
> WIL_SRING_SIZE_ORDER_MAX
)
115 wil
->tx_status_ring_order
= WIL_TX_SRING_SIZE_ORDER_DEFAULT
;
117 status_ring_size
= 1 << wil
->tx_status_ring_order
;
119 wil_dbg_misc(wil
, "init TX sring: size=%u, ring_id=%u\n",
120 status_ring_size
, ring_id
);
125 /* Allocate Tx status ring. Tx descriptor rings will be
126 * allocated on WMI connect event
128 sring
= &wil
->srings
[ring_id
];
130 sring
->is_rx
= false;
131 sring
->size
= status_ring_size
;
132 sring
->elem_size
= sizeof(struct wil_ring_tx_status
);
133 rc
= wil_sring_alloc(wil
, sring
);
137 rc
= wil_wmi_tx_sring_cfg(wil
, ring_id
);
141 sring
->desc_rdy_pol
= 1;
142 wil
->tx_sring_idx
= ring_id
;
146 wil_sring_free(wil
, sring
);
151 * Allocate one skb for Rx descriptor RING
153 static int wil_ring_alloc_skb_edma(struct wil6210_priv
*wil
,
154 struct wil_ring
*ring
, u32 i
)
156 struct device
*dev
= wil_to_dev(wil
);
157 unsigned int sz
= wil
->rx_buf_len
;
160 struct list_head
*active
= &wil
->rx_buff_mgmt
.active
;
161 struct list_head
*free
= &wil
->rx_buff_mgmt
.free
;
162 struct wil_rx_buff
*rx_buff
;
163 struct wil_rx_buff
*buff_arr
= wil
->rx_buff_mgmt
.buff_arr
;
165 struct wil_rx_enhanced_desc dd
, *d
= &dd
;
166 struct wil_rx_enhanced_desc
*_d
= (struct wil_rx_enhanced_desc
*)
167 &ring
->va
[i
].rx
.enhanced
;
169 if (unlikely(list_empty(free
))) {
170 wil
->rx_buff_mgmt
.free_list_empty_cnt
++;
174 skb
= dev_alloc_skb(sz
);
181 * Make sure that the network stack calculates checksum for packets
182 * which failed the HW checksum calculation
184 skb
->ip_summed
= CHECKSUM_NONE
;
186 pa
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_FROM_DEVICE
);
187 if (unlikely(dma_mapping_error(dev
, pa
))) {
192 /* Get the buffer ID - the index of the rx buffer in the buff_arr */
193 rx_buff
= list_first_entry(free
, struct wil_rx_buff
, list
);
194 buff_id
= rx_buff
->id
;
196 /* Move a buffer from the free list to the active list */
197 list_move(&rx_buff
->list
, active
);
199 buff_arr
[buff_id
].skb
= skb
;
201 wil_desc_set_addr_edma(&d
->dma
.addr
, &d
->dma
.addr_high_high
, pa
);
202 d
->dma
.length
= cpu_to_le16(sz
);
203 d
->mac
.buff_id
= cpu_to_le16(buff_id
);
206 /* Save the physical address in skb->cb for later use in dma_unmap */
207 memcpy(skb
->cb
, &pa
, sizeof(pa
));
213 void wil_get_next_rx_status_msg(struct wil_status_ring
*sring
, u8
*dr_bit
,
216 struct wil_rx_status_compressed
*_msg
;
218 _msg
= (struct wil_rx_status_compressed
*)
219 (sring
->va
+ (sring
->elem_size
* sring
->swhead
));
220 *dr_bit
= WIL_GET_BITS(_msg
->d0
, 31, 31);
221 /* make sure dr_bit is read before the rest of status msg */
223 memcpy(msg
, (void *)_msg
, sring
->elem_size
);
226 static inline void wil_sring_advance_swhead(struct wil_status_ring
*sring
)
228 sring
->swhead
= (sring
->swhead
+ 1) % sring
->size
;
229 if (sring
->swhead
== 0)
230 sring
->desc_rdy_pol
= 1 - sring
->desc_rdy_pol
;
233 static int wil_rx_refill_edma(struct wil6210_priv
*wil
)
235 struct wil_ring
*ring
= &wil
->ring_rx
;
238 ring
->swtail
= *ring
->edma_rx_swtail
.va
;
240 for (; next_head
= wil_ring_next_head(ring
),
241 (next_head
!= ring
->swtail
);
242 ring
->swhead
= next_head
) {
243 rc
= wil_ring_alloc_skb_edma(wil
, ring
, ring
->swhead
);
246 wil_dbg_txrx(wil
, "No free buffer ID found\n");
248 wil_err_ratelimited(wil
,
249 "Error %d in refill desc[%d]\n",
255 /* make sure all writes to descriptors (shared memory) are done before
256 * committing them to HW
260 wil_w(wil
, ring
->hwtail
, ring
->swhead
);
265 static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv
*wil
,
266 struct wil_ring
*ring
)
268 struct device
*dev
= wil_to_dev(wil
);
269 struct list_head
*active
= &wil
->rx_buff_mgmt
.active
;
272 if (!wil
->rx_buff_mgmt
.buff_arr
)
275 while (!list_empty(active
)) {
276 struct wil_rx_buff
*rx_buff
=
277 list_first_entry(active
, struct wil_rx_buff
, list
);
278 struct sk_buff
*skb
= rx_buff
->skb
;
280 if (unlikely(!skb
)) {
281 wil_err(wil
, "No Rx skb at buff_id %d\n", rx_buff
->id
);
284 memcpy(&pa
, skb
->cb
, sizeof(pa
));
285 dma_unmap_single(dev
, pa
, wil
->rx_buf_len
,
290 /* Move the buffer from the active to the free list */
291 list_move(&rx_buff
->list
, &wil
->rx_buff_mgmt
.free
);
295 static void wil_free_rx_buff_arr(struct wil6210_priv
*wil
)
297 struct wil_ring
*ring
= &wil
->ring_rx
;
299 if (!wil
->rx_buff_mgmt
.buff_arr
)
302 /* Move all the buffers to the free list in case active list is
303 * not empty in order to release all SKBs before deleting the array
305 wil_move_all_rx_buff_to_free_list(wil
, ring
);
307 kfree(wil
->rx_buff_mgmt
.buff_arr
);
308 wil
->rx_buff_mgmt
.buff_arr
= NULL
;
311 static int wil_init_rx_buff_arr(struct wil6210_priv
*wil
,
314 struct wil_rx_buff
*buff_arr
;
315 struct list_head
*active
= &wil
->rx_buff_mgmt
.active
;
316 struct list_head
*free
= &wil
->rx_buff_mgmt
.free
;
319 wil
->rx_buff_mgmt
.buff_arr
= kcalloc(size
+ 1,
320 sizeof(struct wil_rx_buff
),
322 if (!wil
->rx_buff_mgmt
.buff_arr
)
326 INIT_LIST_HEAD(active
);
327 INIT_LIST_HEAD(free
);
330 * buffer id 0 should not be used (marks invalid id).
332 buff_arr
= wil
->rx_buff_mgmt
.buff_arr
;
333 for (i
= 1; i
<= size
; i
++) {
334 list_add(&buff_arr
[i
].list
, free
);
338 wil
->rx_buff_mgmt
.size
= size
+ 1;
343 static int wil_init_rx_sring(struct wil6210_priv
*wil
,
344 u16 status_ring_size
,
348 struct wil_status_ring
*sring
= &wil
->srings
[ring_id
];
351 wil_dbg_misc(wil
, "init RX sring: size=%u, ring_id=%u\n",
352 status_ring_size
, ring_id
);
354 memset(&sring
->rx_data
, 0, sizeof(sring
->rx_data
));
357 sring
->size
= status_ring_size
;
358 sring
->elem_size
= elem_size
;
359 rc
= wil_sring_alloc(wil
, sring
);
363 rc
= wil_wmi_rx_sring_add(wil
, ring_id
);
367 sring
->desc_rdy_pol
= 1;
371 wil_sring_free(wil
, sring
);
375 static int wil_ring_alloc_desc_ring(struct wil6210_priv
*wil
,
376 struct wil_ring
*ring
)
378 struct device
*dev
= wil_to_dev(wil
);
379 size_t sz
= ring
->size
* sizeof(ring
->va
[0]);
381 wil_dbg_misc(wil
, "alloc_desc_ring:\n");
383 BUILD_BUG_ON(sizeof(ring
->va
[0]) != 32);
387 ring
->ctx
= kcalloc(ring
->size
, sizeof(ring
->ctx
[0]), GFP_KERNEL
);
391 ring
->va
= dma_alloc_coherent(dev
, sz
, &ring
->pa
, GFP_KERNEL
);
396 sz
= sizeof(*ring
->edma_rx_swtail
.va
);
397 ring
->edma_rx_swtail
.va
=
398 dma_alloc_coherent(dev
, sz
, &ring
->edma_rx_swtail
.pa
,
400 if (!ring
->edma_rx_swtail
.va
)
404 wil_dbg_misc(wil
, "%s ring[%d] 0x%p:%pad 0x%p\n",
405 ring
->is_rx
? "RX" : "TX",
406 ring
->size
, ring
->va
, &ring
->pa
, ring
->ctx
);
410 dma_free_coherent(dev
, ring
->size
* sizeof(ring
->va
[0]),
411 (void *)ring
->va
, ring
->pa
);
420 static void wil_ring_free_edma(struct wil6210_priv
*wil
, struct wil_ring
*ring
)
422 struct device
*dev
= wil_to_dev(wil
);
429 sz
= ring
->size
* sizeof(ring
->va
[0]);
431 lockdep_assert_held(&wil
->mutex
);
433 wil_dbg_misc(wil
, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
434 ring
->size
, ring
->va
,
435 &ring
->pa
, ring
->ctx
);
437 wil_move_all_rx_buff_to_free_list(wil
, ring
);
438 dma_free_coherent(dev
, sizeof(*ring
->edma_rx_swtail
.va
),
439 ring
->edma_rx_swtail
.va
,
440 ring
->edma_rx_swtail
.pa
);
445 ring_index
= ring
- wil
->ring_tx
;
447 wil_dbg_misc(wil
, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
448 ring_index
, ring
->size
, ring
->va
,
449 &ring
->pa
, ring
->ctx
);
451 while (!wil_ring_is_empty(ring
)) {
454 struct wil_tx_enhanced_desc dd
, *d
= &dd
;
455 struct wil_tx_enhanced_desc
*_d
=
456 (struct wil_tx_enhanced_desc
*)
457 &ring
->va
[ring
->swtail
].tx
.enhanced
;
459 ctx
= &ring
->ctx
[ring
->swtail
];
462 "ctx(%d) was already completed\n",
464 ring
->swtail
= wil_ring_next_tail(ring
);
468 wil_tx_desc_unmap_edma(dev
, (union wil_tx_desc
*)d
, ctx
);
470 dev_kfree_skb_any(ctx
->skb
);
471 ring
->swtail
= wil_ring_next_tail(ring
);
475 dma_free_coherent(dev
, sz
, (void *)ring
->va
, ring
->pa
);
482 static int wil_init_rx_desc_ring(struct wil6210_priv
*wil
, u16 desc_ring_size
,
485 struct wil_ring
*ring
= &wil
->ring_rx
;
488 wil_dbg_misc(wil
, "init RX desc ring\n");
490 ring
->size
= desc_ring_size
;
492 rc
= wil_ring_alloc_desc_ring(wil
, ring
);
496 rc
= wil_wmi_rx_desc_ring_add(wil
, status_ring_id
);
502 wil_ring_free_edma(wil
, ring
);
506 static void wil_get_reorder_params_edma(struct wil6210_priv
*wil
,
507 struct sk_buff
*skb
, int *tid
,
508 int *cid
, int *mid
, u16
*seq
,
509 int *mcast
, int *retry
)
511 struct wil_rx_status_extended
*s
= wil_skb_rxstatus(skb
);
513 *tid
= wil_rx_status_get_tid(s
);
514 *cid
= wil_rx_status_get_cid(s
);
515 *mid
= wil_rx_status_get_mid(s
);
516 *seq
= le16_to_cpu(wil_rx_status_get_seq(wil
, s
));
517 *mcast
= wil_rx_status_get_mcast(s
);
518 *retry
= wil_rx_status_get_retry(s
);
521 static void wil_get_netif_rx_params_edma(struct sk_buff
*skb
, int *cid
,
524 struct wil_rx_status_extended
*s
= wil_skb_rxstatus(skb
);
526 *cid
= wil_rx_status_get_cid(s
);
527 *security
= wil_rx_status_get_security(s
);
530 static int wil_rx_crypto_check_edma(struct wil6210_priv
*wil
,
533 struct wil_rx_status_extended
*st
;
534 int cid
, tid
, key_id
, mc
;
535 struct wil_sta_info
*s
;
536 struct wil_tid_crypto_rx
*c
;
537 struct wil_tid_crypto_rx_single
*cc
;
540 /* In HW reorder, HW is responsible for crypto check */
541 if (wil
->use_rx_hw_reordering
)
544 st
= wil_skb_rxstatus(skb
);
546 cid
= wil_rx_status_get_cid(st
);
547 tid
= wil_rx_status_get_tid(st
);
548 key_id
= wil_rx_status_get_key_id(st
);
549 mc
= wil_rx_status_get_mcast(st
);
551 c
= mc
? &s
->group_crypto_rx
: &s
->tid_crypto_rx
[tid
];
552 cc
= &c
->key_id
[key_id
];
553 pn
= (u8
*)&st
->ext
.pn_15_0
;
556 wil_err_ratelimited(wil
,
557 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
558 cid
, tid
, mc
, key_id
);
562 if (reverse_memcmp(pn
, cc
->pn
, IEEE80211_GCMP_PN_LEN
) <= 0) {
563 wil_err_ratelimited(wil
,
564 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
565 cid
, tid
, mc
, key_id
, pn
, cc
->pn
);
568 memcpy(cc
->pn
, pn
, IEEE80211_GCMP_PN_LEN
);
573 static bool wil_is_rx_idle_edma(struct wil6210_priv
*wil
)
575 struct wil_status_ring
*sring
;
576 struct wil_rx_status_extended msg1
;
581 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
582 sring
= &wil
->srings
[i
];
586 wil_get_next_rx_status_msg(sring
, &dr_bit
, msg
);
588 /* Check if there are unhandled RX status messages */
589 if (dr_bit
== sring
->desc_rdy_pol
)
596 static void wil_rx_buf_len_init_edma(struct wil6210_priv
*wil
)
598 /* RX buffer size must be aligned to 4 bytes */
599 wil
->rx_buf_len
= rx_large_buf
?
600 WIL_MAX_ETH_MTU
: WIL_EDMA_RX_BUF_LEN_DEFAULT
;
603 static int wil_rx_init_edma(struct wil6210_priv
*wil
, uint desc_ring_order
)
605 u16 status_ring_size
, desc_ring_size
= 1 << desc_ring_order
;
606 struct wil_ring
*ring
= &wil
->ring_rx
;
608 size_t elem_size
= wil
->use_compressed_rx_status
?
609 sizeof(struct wil_rx_status_compressed
) :
610 sizeof(struct wil_rx_status_extended
);
613 /* In SW reorder one must use extended status messages */
614 if (wil
->use_compressed_rx_status
&& !wil
->use_rx_hw_reordering
) {
616 "compressed RX status cannot be used with SW reorder\n");
619 if (wil
->rx_status_ring_order
<= desc_ring_order
)
620 /* make sure sring is larger than desc ring */
621 wil
->rx_status_ring_order
= desc_ring_order
+ 1;
622 if (wil
->rx_buff_id_count
<= desc_ring_size
)
623 /* make sure we will not run out of buff_ids */
624 wil
->rx_buff_id_count
= desc_ring_size
+ 512;
625 if (wil
->rx_status_ring_order
< WIL_SRING_SIZE_ORDER_MIN
||
626 wil
->rx_status_ring_order
> WIL_SRING_SIZE_ORDER_MAX
)
627 wil
->rx_status_ring_order
= WIL_RX_SRING_SIZE_ORDER_DEFAULT
;
629 status_ring_size
= 1 << wil
->rx_status_ring_order
;
632 "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
633 desc_ring_size
, status_ring_size
, elem_size
);
635 wil_rx_buf_len_init_edma(wil
);
637 /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
638 if (wil
->num_rx_status_rings
> WIL6210_MAX_STATUS_RINGS
- 1)
639 wil
->num_rx_status_rings
= WIL6210_MAX_STATUS_RINGS
- 1;
641 wil_dbg_misc(wil
, "rx_init: allocate %d status rings\n",
642 wil
->num_rx_status_rings
);
644 rc
= wil_wmi_cfg_def_rx_offload(wil
, wil
->rx_buf_len
);
648 /* Allocate status ring */
649 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
650 int sring_id
= wil_find_free_sring(wil
);
654 goto err_free_status
;
656 rc
= wil_init_rx_sring(wil
, status_ring_size
, elem_size
,
659 goto err_free_status
;
662 /* Allocate descriptor ring */
663 rc
= wil_init_rx_desc_ring(wil
, desc_ring_size
,
664 WIL_DEFAULT_RX_STATUS_RING_ID
);
666 goto err_free_status
;
668 if (wil
->rx_buff_id_count
>= status_ring_size
) {
670 "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
671 wil
->rx_buff_id_count
, status_ring_size
,
672 status_ring_size
- 1);
673 wil
->rx_buff_id_count
= status_ring_size
- 1;
676 /* Allocate Rx buffer array */
677 rc
= wil_init_rx_buff_arr(wil
, wil
->rx_buff_id_count
);
681 /* Fill descriptor ring with credits */
682 rc
= wil_rx_refill_edma(wil
);
684 goto err_free_rx_buff_arr
;
687 err_free_rx_buff_arr
:
688 wil_free_rx_buff_arr(wil
);
690 wil_ring_free_edma(wil
, ring
);
692 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++)
693 wil_sring_free(wil
, &wil
->srings
[i
]);
698 static int wil_ring_init_tx_edma(struct wil6210_vif
*vif
, int ring_id
,
699 int size
, int cid
, int tid
)
701 struct wil6210_priv
*wil
= vif_to_wil(vif
);
703 struct wil_ring
*ring
= &wil
->ring_tx
[ring_id
];
704 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_id
];
706 lockdep_assert_held(&wil
->mutex
);
709 "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
710 ring_id
, cid
, tid
, wil
->tx_sring_idx
);
712 wil_tx_data_init(txdata
);
714 rc
= wil_ring_alloc_desc_ring(wil
, ring
);
718 wil
->ring2cid_tid
[ring_id
][0] = cid
;
719 wil
->ring2cid_tid
[ring_id
][1] = tid
;
721 txdata
->dot1x_open
= true;
723 rc
= wil_wmi_tx_desc_ring_add(vif
, ring_id
, cid
, tid
);
725 wil_err(wil
, "WMI_TX_DESC_RING_ADD_CMD failed\n");
729 if (txdata
->dot1x_open
&& agg_wsize
>= 0)
730 wil_addba_tx_request(wil
, ring_id
, agg_wsize
);
734 spin_lock_bh(&txdata
->lock
);
735 txdata
->dot1x_open
= false;
737 spin_unlock_bh(&txdata
->lock
);
738 wil_ring_free_edma(wil
, ring
);
739 wil
->ring2cid_tid
[ring_id
][0] = wil
->max_assoc_sta
;
740 wil
->ring2cid_tid
[ring_id
][1] = 0;
746 static int wil_tx_ring_modify_edma(struct wil6210_vif
*vif
, int ring_id
,
749 struct wil6210_priv
*wil
= vif_to_wil(vif
);
751 wil_err(wil
, "ring modify is not supported for EDMA\n");
756 /* This function is used only for RX SW reorder */
757 static int wil_check_bar(struct wil6210_priv
*wil
, void *msg
, int cid
,
758 struct sk_buff
*skb
, struct wil_net_stats
*stats
)
765 struct wil6210_vif
*vif
;
767 ftype
= wil_rx_status_get_frame_type(wil
, msg
);
768 if (ftype
== IEEE80211_FTYPE_DATA
)
771 fc1
= wil_rx_status_get_fc1(wil
, msg
);
772 mid
= wil_rx_status_get_mid(msg
);
773 tid
= wil_rx_status_get_tid(msg
);
774 seq
= le16_to_cpu(wil_rx_status_get_seq(wil
, msg
));
775 vif
= wil
->vifs
[mid
];
777 if (unlikely(!vif
)) {
778 wil_dbg_txrx(wil
, "RX descriptor with invalid mid %d", mid
);
783 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
784 fc1
, mid
, cid
, tid
, seq
);
786 stats
->rx_non_data_frame
++;
787 if (wil_is_back_req(fc1
)) {
789 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
791 wil_rx_bar(wil
, vif
, cid
, tid
, seq
);
793 u32 sz
= wil
->use_compressed_rx_status
?
794 sizeof(struct wil_rx_status_compressed
) :
795 sizeof(struct wil_rx_status_extended
);
797 /* print again all info. One can enable only this
798 * without overhead for printing every Rx frame
801 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
802 fc1
, mid
, cid
, tid
, seq
);
803 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE
, 32, 4,
804 (const void *)msg
, sz
, false);
805 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
806 skb
->data
, skb_headlen(skb
), false);
812 static int wil_rx_error_check_edma(struct wil6210_priv
*wil
,
814 struct wil_net_stats
*stats
)
817 void *msg
= wil_skb_rxstatus(skb
);
819 l2_rx_status
= wil_rx_status_get_l2_rx_status(msg
);
820 if (l2_rx_status
!= 0) {
821 wil_dbg_txrx(wil
, "L2 RX error, l2_rx_status=0x%x\n",
823 /* Due to HW issue, KEY error will trigger a MIC error */
824 if (l2_rx_status
== WIL_RX_EDMA_ERROR_MIC
) {
825 wil_err_ratelimited(wil
,
826 "L2 MIC/KEY error, dropping packet\n");
827 stats
->rx_mic_error
++;
829 if (l2_rx_status
== WIL_RX_EDMA_ERROR_KEY
) {
830 wil_err_ratelimited(wil
,
831 "L2 KEY error, dropping packet\n");
832 stats
->rx_key_error
++;
834 if (l2_rx_status
== WIL_RX_EDMA_ERROR_REPLAY
) {
835 wil_err_ratelimited(wil
,
836 "L2 REPLAY error, dropping packet\n");
839 if (l2_rx_status
== WIL_RX_EDMA_ERROR_AMSDU
) {
840 wil_err_ratelimited(wil
,
841 "L2 AMSDU error, dropping packet\n");
842 stats
->rx_amsdu_error
++;
847 skb
->ip_summed
= wil_rx_status_get_checksum(msg
, stats
);
852 static struct sk_buff
*wil_sring_reap_rx_edma(struct wil6210_priv
*wil
,
853 struct wil_status_ring
*sring
)
855 struct device
*dev
= wil_to_dev(wil
);
856 struct wil_rx_status_extended msg1
;
861 struct wil_ring_rx_data
*rxdata
= &sring
->rx_data
;
862 unsigned int sz
= wil
->rx_buf_len
;
863 struct wil_net_stats
*stats
= NULL
;
866 bool eop
, headstolen
;
870 struct wil_rx_status_extended
*s
;
871 u16 sring_idx
= sring
- wil
->srings
;
872 int invalid_buff_id_retry
;
874 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended
) > sizeof(skb
->cb
));
877 wil_get_next_rx_status_msg(sring
, &dr_bit
, msg
);
879 /* Completed handling all the ready status messages */
880 if (dr_bit
!= sring
->desc_rdy_pol
)
883 /* Extract the buffer ID from the status message */
884 buff_id
= le16_to_cpu(wil_rx_status_get_buff_id(msg
));
886 invalid_buff_id_retry
= 0;
888 struct wil_rx_status_extended
*s
;
891 "buff_id is not updated yet by HW, (swhead 0x%x)\n",
893 if (++invalid_buff_id_retry
> MAX_INVALID_BUFF_ID_RETRY
)
896 /* Read the status message again */
897 s
= (struct wil_rx_status_extended
*)
898 (sring
->va
+ (sring
->elem_size
* sring
->swhead
));
899 *(struct wil_rx_status_extended
*)msg
= *s
;
900 buff_id
= le16_to_cpu(wil_rx_status_get_buff_id(msg
));
903 if (unlikely(!wil_val_in_range(buff_id
, 1, wil
->rx_buff_mgmt
.size
))) {
904 wil_err(wil
, "Corrupt buff_id=%d, sring->swhead=%d\n",
905 buff_id
, sring
->swhead
);
906 print_hex_dump(KERN_ERR
, "RxS ", DUMP_PREFIX_OFFSET
, 16, 1,
907 msg
, wil
->use_compressed_rx_status
?
908 sizeof(struct wil_rx_status_compressed
) :
909 sizeof(struct wil_rx_status_extended
), false);
911 wil_rx_status_reset_buff_id(sring
);
912 wil_sring_advance_swhead(sring
);
913 sring
->invalid_buff_id_cnt
++;
917 /* Extract the SKB from the rx_buff management array */
918 skb
= wil
->rx_buff_mgmt
.buff_arr
[buff_id
].skb
;
919 wil
->rx_buff_mgmt
.buff_arr
[buff_id
].skb
= NULL
;
921 wil_err(wil
, "No Rx skb at buff_id %d\n", buff_id
);
922 wil_rx_status_reset_buff_id(sring
);
923 /* Move the buffer from the active list to the free list */
924 list_move_tail(&wil
->rx_buff_mgmt
.buff_arr
[buff_id
].list
,
925 &wil
->rx_buff_mgmt
.free
);
926 wil_sring_advance_swhead(sring
);
927 sring
->invalid_buff_id_cnt
++;
931 wil_rx_status_reset_buff_id(sring
);
932 wil_sring_advance_swhead(sring
);
934 memcpy(&pa
, skb
->cb
, sizeof(pa
));
935 dma_unmap_single(dev
, pa
, sz
, DMA_FROM_DEVICE
);
936 dmalen
= le16_to_cpu(wil_rx_status_get_length(msg
));
938 trace_wil6210_rx_status(wil
, wil
->use_compressed_rx_status
, buff_id
,
940 wil_dbg_txrx(wil
, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
941 buff_id
, sring_idx
, dmalen
);
942 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE
, 32, 4,
943 (const void *)msg
, wil
->use_compressed_rx_status
?
944 sizeof(struct wil_rx_status_compressed
) :
945 sizeof(struct wil_rx_status_extended
), false);
947 /* Move the buffer from the active list to the free list */
948 list_move_tail(&wil
->rx_buff_mgmt
.buff_arr
[buff_id
].list
,
949 &wil
->rx_buff_mgmt
.free
);
951 eop
= wil_rx_status_get_eop(msg
);
953 cid
= wil_rx_status_get_cid(msg
);
954 if (unlikely(!wil_val_in_range(cid
, 0, wil
->max_assoc_sta
))) {
955 wil_err(wil
, "Corrupt cid=%d, sring->swhead=%d\n",
957 rxdata
->skipping
= true;
960 stats
= &wil
->sta
[cid
].stats
;
962 if (unlikely(dmalen
< ETH_HLEN
)) {
963 wil_dbg_txrx(wil
, "Short frame, len = %d\n", dmalen
);
964 stats
->rx_short_frame
++;
965 rxdata
->skipping
= true;
969 if (unlikely(dmalen
> sz
)) {
970 wil_err(wil
, "Rx size too large: %d bytes!\n", dmalen
);
971 print_hex_dump(KERN_ERR
, "RxS ", DUMP_PREFIX_OFFSET
, 16, 1,
972 msg
, wil
->use_compressed_rx_status
?
973 sizeof(struct wil_rx_status_compressed
) :
974 sizeof(struct wil_rx_status_extended
), false);
976 stats
->rx_large_frame
++;
977 rxdata
->skipping
= true;
981 /* skipping indicates if a certain SKB should be dropped.
982 * It is set in case there is an error on the current SKB or in case
983 * of RX chaining: as long as we manage to merge the SKBs it will
984 * be false. once we have a bad SKB or we don't manage to merge SKBs
985 * it will be set to the !EOP value of the current SKB.
986 * This guarantees that all the following SKBs until EOP will also
989 if (unlikely(rxdata
->skipping
)) {
992 kfree_skb(rxdata
->skb
);
995 rxdata
->skipping
= !eop
;
999 skb_trim(skb
, dmalen
);
1001 prefetch(skb
->data
);
1006 if (likely(skb_try_coalesce(rxdata
->skb
, skb
, &headstolen
,
1008 kfree_skb_partial(skb
, headstolen
);
1010 wil_err(wil
, "failed to merge skbs!\n");
1012 kfree_skb(rxdata
->skb
);
1014 rxdata
->skipping
= !eop
;
1022 /* reaching here rxdata->skb always contains a full packet */
1025 rxdata
->skipping
= false;
1028 stats
->last_mcs_rx
= wil_rx_status_get_mcs(msg
);
1029 if (stats
->last_mcs_rx
< ARRAY_SIZE(stats
->rx_per_mcs
))
1030 stats
->rx_per_mcs
[stats
->last_mcs_rx
]++;
1032 stats
->last_cb_mode_rx
= wil_rx_status_get_cb_mode(msg
);
1035 if (!wil
->use_rx_hw_reordering
&& !wil
->use_compressed_rx_status
&&
1036 wil_check_bar(wil
, msg
, cid
, skb
, stats
) == -EAGAIN
) {
1041 /* Compensate for the HW data alignment according to the status
1044 data_offset
= wil_rx_status_get_data_offset(msg
);
1045 if (data_offset
== 0xFF ||
1046 data_offset
> WIL_EDMA_MAX_DATA_OFFSET
) {
1047 wil_err(wil
, "Unexpected data offset %d\n", data_offset
);
1052 skb_pull(skb
, data_offset
);
1054 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
1055 skb
->data
, skb_headlen(skb
), false);
1057 /* Has to be done after dma_unmap_single as skb->cb is also
1058 * used for holding the pa
1060 s
= wil_skb_rxstatus(skb
);
1061 memcpy(s
, msg
, sring
->elem_size
);
1066 void wil_rx_handle_edma(struct wil6210_priv
*wil
, int *quota
)
1068 struct net_device
*ndev
;
1069 struct wil_ring
*ring
= &wil
->ring_rx
;
1070 struct wil_status_ring
*sring
;
1071 struct sk_buff
*skb
;
1074 if (unlikely(!ring
->va
)) {
1075 wil_err(wil
, "Rx IRQ while Rx not yet initialized\n");
1078 wil_dbg_txrx(wil
, "rx_handle\n");
1080 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
1081 sring
= &wil
->srings
[i
];
1082 if (unlikely(!sring
->va
)) {
1084 "Rx IRQ while Rx status ring %d not yet initialized\n",
1089 while ((*quota
> 0) &&
1091 wil_sring_reap_rx_edma(wil
, sring
)))) {
1093 if (wil
->use_rx_hw_reordering
) {
1094 void *msg
= wil_skb_rxstatus(skb
);
1095 int mid
= wil_rx_status_get_mid(msg
);
1096 struct wil6210_vif
*vif
= wil
->vifs
[mid
];
1098 if (unlikely(!vif
)) {
1100 "RX desc invalid mid %d",
1105 ndev
= vif_to_ndev(vif
);
1106 wil_netif_rx_any(skb
, ndev
);
1108 wil_rx_reorder(wil
, skb
);
1112 wil_w(wil
, sring
->hwtail
, (sring
->swhead
- 1) % sring
->size
);
1115 wil_rx_refill_edma(wil
);
1118 static int wil_tx_desc_map_edma(union wil_tx_desc
*desc
,
1123 struct wil_tx_enhanced_desc
*d
=
1124 (struct wil_tx_enhanced_desc
*)&desc
->enhanced
;
1126 memset(d
, 0, sizeof(struct wil_tx_enhanced_desc
));
1128 wil_desc_set_addr_edma(&d
->dma
.addr
, &d
->dma
.addr_high_high
, pa
);
1130 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1131 d
->dma
.length
= cpu_to_le16((u16
)len
);
1132 d
->mac
.d
[0] = (ring_index
<< WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS
);
1133 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
1136 d
->mac
.d
[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS
) |
1137 (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS
);
1143 wil_get_next_tx_status_msg(struct wil_status_ring
*sring
, u8
*dr_bit
,
1144 struct wil_ring_tx_status
*msg
)
1146 struct wil_ring_tx_status
*_msg
= (struct wil_ring_tx_status
*)
1147 (sring
->va
+ (sring
->elem_size
* sring
->swhead
));
1149 *dr_bit
= _msg
->desc_ready
>> TX_STATUS_DESC_READY_POS
;
1150 /* make sure dr_bit is read before the rest of status msg */
1156 * Clean up transmitted skb's from the Tx descriptor RING.
1157 * Return number of descriptors cleared.
1159 int wil_tx_sring_handler(struct wil6210_priv
*wil
,
1160 struct wil_status_ring
*sring
)
1162 struct net_device
*ndev
;
1163 struct device
*dev
= wil_to_dev(wil
);
1164 struct wil_ring
*ring
= NULL
;
1165 struct wil_ring_tx_data
*txdata
;
1166 /* Total number of completed descriptors in all descriptor rings */
1169 struct wil_net_stats
*stats
;
1170 struct wil_tx_enhanced_desc
*_d
;
1171 unsigned int ring_id
;
1172 unsigned int num_descs
, num_statuses
= 0;
1174 u8 dr_bit
; /* Descriptor Ready bit */
1175 struct wil_ring_tx_status msg
;
1176 struct wil6210_vif
*vif
;
1177 int used_before_complete
;
1180 wil_get_next_tx_status_msg(sring
, &dr_bit
, &msg
);
1182 /* Process completion messages while DR bit has the expected polarity */
1183 while (dr_bit
== sring
->desc_rdy_pol
) {
1184 num_descs
= msg
.num_descriptors
;
1186 wil_err(wil
, "invalid num_descs 0\n");
1190 /* Find the corresponding descriptor ring */
1191 ring_id
= msg
.ring_id
;
1193 if (unlikely(ring_id
>= WIL6210_MAX_TX_RINGS
)) {
1194 wil_err(wil
, "invalid ring id %d\n", ring_id
);
1197 ring
= &wil
->ring_tx
[ring_id
];
1198 if (unlikely(!ring
->va
)) {
1199 wil_err(wil
, "Tx irq[%d]: ring not initialized\n",
1203 txdata
= &wil
->ring_tx_data
[ring_id
];
1204 if (unlikely(!txdata
->enabled
)) {
1205 wil_info(wil
, "Tx irq[%d]: ring disabled\n", ring_id
);
1208 vif
= wil
->vifs
[txdata
->mid
];
1209 if (unlikely(!vif
)) {
1210 wil_dbg_txrx(wil
, "invalid MID %d for ring %d\n",
1211 txdata
->mid
, ring_id
);
1215 ndev
= vif_to_ndev(vif
);
1217 cid
= wil
->ring2cid_tid
[ring_id
][0];
1218 stats
= (cid
< wil
->max_assoc_sta
) ? &wil
->sta
[cid
].stats
:
1222 "tx_status: completed desc_ring (%d), num_descs (%d)\n",
1223 ring_id
, num_descs
);
1225 used_before_complete
= wil_ring_used_tx(ring
);
1227 for (i
= 0 ; i
< num_descs
; ++i
) {
1228 struct wil_ctx
*ctx
= &ring
->ctx
[ring
->swtail
];
1229 struct wil_tx_enhanced_desc dd
, *d
= &dd
;
1231 struct sk_buff
*skb
= ctx
->skb
;
1233 _d
= (struct wil_tx_enhanced_desc
*)
1234 &ring
->va
[ring
->swtail
].tx
.enhanced
;
1237 dmalen
= le16_to_cpu(d
->dma
.length
);
1238 trace_wil6210_tx_status(&msg
, ring
->swtail
, dmalen
);
1240 "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
1241 ring_id
, ring
->swtail
, dmalen
,
1243 wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE
, 32, 4,
1244 (const void *)&msg
, sizeof(msg
),
1247 wil_tx_desc_unmap_edma(dev
,
1248 (union wil_tx_desc
*)d
,
1252 if (likely(msg
.status
== 0)) {
1253 ndev
->stats
.tx_packets
++;
1254 ndev
->stats
.tx_bytes
+= skb
->len
;
1256 stats
->tx_packets
++;
1257 stats
->tx_bytes
+= skb
->len
;
1259 wil_tx_latency_calc(wil
, skb
,
1263 ndev
->stats
.tx_errors
++;
1268 if (skb
->protocol
== cpu_to_be16(ETH_P_PAE
))
1269 wil_tx_complete_handle_eapol(vif
, skb
);
1271 wil_consume_skb(skb
, msg
.status
== 0);
1273 memset(ctx
, 0, sizeof(*ctx
));
1274 /* Make sure the ctx is zeroed before updating the tail
1275 * to prevent a case where wil_tx_ring will see
1276 * this descriptor as used and handle it before ctx zero
1281 ring
->swtail
= wil_ring_next_tail(ring
);
1286 /* performance monitoring */
1287 used_new
= wil_ring_used_tx(ring
);
1288 if (wil_val_in_range(wil
->ring_idle_trsh
,
1289 used_new
, used_before_complete
)) {
1290 wil_dbg_txrx(wil
, "Ring[%2d] idle %d -> %d\n",
1291 ring_id
, used_before_complete
, used_new
);
1292 txdata
->last_idle
= get_cycles();
1297 if (num_statuses
% WIL_EDMA_TX_SRING_UPDATE_HW_TAIL
== 0)
1298 /* update HW tail to allow HW to push new statuses */
1299 wil_w(wil
, sring
->hwtail
, sring
->swhead
);
1301 wil_sring_advance_swhead(sring
);
1303 wil_get_next_tx_status_msg(sring
, &dr_bit
, &msg
);
1306 /* shall we wake net queues? */
1308 wil_update_net_queues(wil
, vif
, NULL
, false);
1310 if (num_statuses
% WIL_EDMA_TX_SRING_UPDATE_HW_TAIL
!= 0)
1311 /* Update the HW tail ptr (RD ptr) */
1312 wil_w(wil
, sring
->hwtail
, (sring
->swhead
- 1) % sring
->size
);
1318 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1319 * @skb is used to obtain the protocol and headers length.
1320 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1321 * 2 - middle, 3 - last descriptor.
1323 static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc
*d
,
1324 int tso_desc_type
, bool is_ipv4
,
1326 int skb_net_hdr_len
,
1329 /* Number of descriptors */
1331 /* Maximum Segment Size */
1332 d
->mac
.tso_mss
|= cpu_to_le16(mss
>> 2);
1333 /* L4 header len: TCP header length */
1334 d
->dma
.l4_hdr_len
|= tcp_hdr_len
& DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
;
1335 /* EOP, TSO desc type, Segmentation enable,
1336 * Insert IPv4 and TCP / UDP Checksum
1338 d
->dma
.cmd
|= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS
) |
1339 tso_desc_type
<< WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS
|
1340 BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS
) |
1341 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS
) |
1342 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS
);
1343 /* Calculate pseudo-header */
1344 d
->dma
.w1
|= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS
) |
1345 BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS
);
1346 /* IP Header Length */
1347 d
->dma
.ip_length
|= skb_net_hdr_len
;
1348 /* MAC header length and IP address family*/
1349 d
->dma
.b11
|= ETH_HLEN
|
1350 is_ipv4
<< DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
;
1353 static int wil_tx_tso_gen_desc(struct wil6210_priv
*wil
, void *buff_addr
,
1354 int len
, uint i
, int tso_desc_type
,
1355 skb_frag_t
*frag
, struct wil_ring
*ring
,
1356 struct sk_buff
*skb
, bool is_ipv4
,
1357 int tcp_hdr_len
, int skb_net_hdr_len
,
1358 int mss
, int *descs_used
)
1360 struct device
*dev
= wil_to_dev(wil
);
1361 struct wil_tx_enhanced_desc
*_desc
= (struct wil_tx_enhanced_desc
*)
1362 &ring
->va
[i
].tx
.enhanced
;
1363 struct wil_tx_enhanced_desc desc_mem
, *d
= &desc_mem
;
1364 int ring_index
= ring
- wil
->ring_tx
;
1371 pa
= dma_map_single(dev
, buff_addr
, len
, DMA_TO_DEVICE
);
1372 ring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1374 pa
= skb_frag_dma_map(dev
, frag
, 0, len
, DMA_TO_DEVICE
);
1375 ring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
1377 if (unlikely(dma_mapping_error(dev
, pa
))) {
1378 wil_err(wil
, "TSO: Skb DMA map error\n");
1382 wil
->txrx_ops
.tx_desc_map((union wil_tx_desc
*)d
, pa
,
1384 wil_tx_desc_offload_setup_tso_edma(d
, tso_desc_type
, is_ipv4
,
1386 skb_net_hdr_len
, mss
);
1388 /* hold reference to skb
1389 * to prevent skb release before accounting
1390 * in case of immediate "tx done"
1392 if (tso_desc_type
== wil_tso_type_lst
)
1393 ring
->ctx
[i
].skb
= skb_get(skb
);
1395 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
1396 (const void *)d
, sizeof(*d
), false);
1404 static int __wil_tx_ring_tso_edma(struct wil6210_priv
*wil
,
1405 struct wil6210_vif
*vif
,
1406 struct wil_ring
*ring
,
1407 struct sk_buff
*skb
)
1409 int ring_index
= ring
- wil
->ring_tx
;
1410 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_index
];
1411 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1412 int min_desc_required
= nr_frags
+ 2; /* Headers, Head, Fragments */
1413 int used
, avail
= wil_ring_avail_tx(ring
);
1414 int f
, hdrlen
, headlen
;
1417 u32 swhead
= ring
->swhead
;
1418 int descs_used
= 0; /* total number of used descriptors */
1421 int skb_net_hdr_len
;
1422 int mss
= skb_shinfo(skb
)->gso_size
;
1424 wil_dbg_txrx(wil
, "tx_ring_tso: %d bytes to ring %d\n", skb
->len
,
1427 if (unlikely(!txdata
->enabled
))
1430 if (unlikely(avail
< min_desc_required
)) {
1431 wil_err_ratelimited(wil
,
1432 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1433 ring_index
, min_desc_required
);
1437 gso_type
= skb_shinfo(skb
)->gso_type
& (SKB_GSO_TCPV6
| SKB_GSO_TCPV4
);
1449 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1452 /* tcp header length and skb network header length are fixed for all
1453 * packet's descriptors - read them once here
1455 tcp_hdr_len
= tcp_hdrlen(skb
);
1456 skb_net_hdr_len
= skb_network_header_len(skb
);
1458 /* First descriptor must contain the header only
1459 * Header Length = MAC header len + IP header len + TCP header len
1461 hdrlen
= ETH_HLEN
+ tcp_hdr_len
+ skb_net_hdr_len
;
1462 wil_dbg_txrx(wil
, "TSO: process header descriptor, hdrlen %u\n",
1464 rc
= wil_tx_tso_gen_desc(wil
, skb
->data
, hdrlen
, swhead
,
1465 wil_tso_type_hdr
, NULL
, ring
, skb
,
1466 is_ipv4
, tcp_hdr_len
, skb_net_hdr_len
,
1471 /* Second descriptor contains the head */
1472 headlen
= skb_headlen(skb
) - hdrlen
;
1473 wil_dbg_txrx(wil
, "TSO: process skb head, headlen %u\n", headlen
);
1474 rc
= wil_tx_tso_gen_desc(wil
, skb
->data
+ hdrlen
, headlen
,
1475 (swhead
+ descs_used
) % ring
->size
,
1476 (nr_frags
!= 0) ? wil_tso_type_first
:
1477 wil_tso_type_lst
, NULL
, ring
, skb
,
1478 is_ipv4
, tcp_hdr_len
, skb_net_hdr_len
,
1483 /* Rest of the descriptors are from the SKB fragments */
1484 for (f
= 0; f
< nr_frags
; f
++) {
1485 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1486 int len
= skb_frag_size(frag
);
1488 wil_dbg_txrx(wil
, "TSO: frag[%d]: len %u, descs_used %d\n", f
,
1491 rc
= wil_tx_tso_gen_desc(wil
, NULL
, len
,
1492 (swhead
+ descs_used
) % ring
->size
,
1493 (f
!= nr_frags
- 1) ?
1494 wil_tso_type_mid
: wil_tso_type_lst
,
1495 frag
, ring
, skb
, is_ipv4
,
1496 tcp_hdr_len
, skb_net_hdr_len
,
1502 /* performance monitoring */
1503 used
= wil_ring_used_tx(ring
);
1504 if (wil_val_in_range(wil
->ring_idle_trsh
,
1505 used
, used
+ descs_used
)) {
1506 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
1507 wil_dbg_txrx(wil
, "Ring[%2d] not idle %d -> %d\n",
1508 ring_index
, used
, used
+ descs_used
);
1511 /* advance swhead */
1512 wil_ring_advance_head(ring
, descs_used
);
1513 wil_dbg_txrx(wil
, "TSO: Tx swhead %d -> %d\n", swhead
, ring
->swhead
);
1515 /* make sure all writes to descriptors (shared memory) are done before
1516 * committing them to HW
1520 if (wil
->tx_latency
)
1521 *(ktime_t
*)&skb
->cb
= ktime_get();
1523 memset(skb
->cb
, 0, sizeof(ktime_t
));
1525 wil_w(wil
, ring
->hwtail
, ring
->swhead
);
1530 while (descs_used
> 0) {
1531 struct device
*dev
= wil_to_dev(wil
);
1532 struct wil_ctx
*ctx
;
1533 int i
= (swhead
+ descs_used
- 1) % ring
->size
;
1534 struct wil_tx_enhanced_desc dd
, *d
= &dd
;
1535 struct wil_tx_enhanced_desc
*_desc
=
1536 (struct wil_tx_enhanced_desc
*)
1537 &ring
->va
[i
].tx
.enhanced
;
1540 ctx
= &ring
->ctx
[i
];
1541 wil_tx_desc_unmap_edma(dev
, (union wil_tx_desc
*)d
, ctx
);
1542 memset(ctx
, 0, sizeof(*ctx
));
1548 static int wil_ring_init_bcast_edma(struct wil6210_vif
*vif
, int ring_id
,
1551 struct wil6210_priv
*wil
= vif_to_wil(vif
);
1552 struct wil_ring
*ring
= &wil
->ring_tx
[ring_id
];
1554 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_id
];
1556 wil_dbg_misc(wil
, "init bcast: ring_id=%d, sring_id=%d\n",
1557 ring_id
, wil
->tx_sring_idx
);
1559 lockdep_assert_held(&wil
->mutex
);
1561 wil_tx_data_init(txdata
);
1563 ring
->is_rx
= false;
1564 rc
= wil_ring_alloc_desc_ring(wil
, ring
);
1568 wil
->ring2cid_tid
[ring_id
][0] = WIL6210_MAX_CID
; /* CID */
1569 wil
->ring2cid_tid
[ring_id
][1] = 0; /* TID */
1571 txdata
->dot1x_open
= true;
1573 rc
= wil_wmi_bcast_desc_ring_add(vif
, ring_id
);
1580 spin_lock_bh(&txdata
->lock
);
1581 txdata
->enabled
= 0;
1582 txdata
->dot1x_open
= false;
1583 spin_unlock_bh(&txdata
->lock
);
1584 wil_ring_free_edma(wil
, ring
);
1590 static void wil_tx_fini_edma(struct wil6210_priv
*wil
)
1592 struct wil_status_ring
*sring
= &wil
->srings
[wil
->tx_sring_idx
];
1594 wil_dbg_misc(wil
, "free TX sring\n");
1596 wil_sring_free(wil
, sring
);
1599 static void wil_rx_data_free(struct wil_status_ring
*sring
)
1604 kfree_skb(sring
->rx_data
.skb
);
1605 sring
->rx_data
.skb
= NULL
;
1608 static void wil_rx_fini_edma(struct wil6210_priv
*wil
)
1610 struct wil_ring
*ring
= &wil
->ring_rx
;
1613 wil_dbg_misc(wil
, "rx_fini_edma\n");
1615 wil_ring_free_edma(wil
, ring
);
1617 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
1618 wil_rx_data_free(&wil
->srings
[i
]);
1619 wil_sring_free(wil
, &wil
->srings
[i
]);
1622 wil_free_rx_buff_arr(wil
);
1625 void wil_init_txrx_ops_edma(struct wil6210_priv
*wil
)
1627 wil
->txrx_ops
.configure_interrupt_moderation
=
1628 wil_configure_interrupt_moderation_edma
;
1630 wil
->txrx_ops
.ring_init_tx
= wil_ring_init_tx_edma
;
1631 wil
->txrx_ops
.ring_fini_tx
= wil_ring_free_edma
;
1632 wil
->txrx_ops
.ring_init_bcast
= wil_ring_init_bcast_edma
;
1633 wil
->txrx_ops
.tx_init
= wil_tx_init_edma
;
1634 wil
->txrx_ops
.tx_fini
= wil_tx_fini_edma
;
1635 wil
->txrx_ops
.tx_desc_map
= wil_tx_desc_map_edma
;
1636 wil
->txrx_ops
.tx_desc_unmap
= wil_tx_desc_unmap_edma
;
1637 wil
->txrx_ops
.tx_ring_tso
= __wil_tx_ring_tso_edma
;
1638 wil
->txrx_ops
.tx_ring_modify
= wil_tx_ring_modify_edma
;
1640 wil
->txrx_ops
.rx_init
= wil_rx_init_edma
;
1641 wil
->txrx_ops
.wmi_addba_rx_resp
= wmi_addba_rx_resp_edma
;
1642 wil
->txrx_ops
.get_reorder_params
= wil_get_reorder_params_edma
;
1643 wil
->txrx_ops
.get_netif_rx_params
= wil_get_netif_rx_params_edma
;
1644 wil
->txrx_ops
.rx_crypto_check
= wil_rx_crypto_check_edma
;
1645 wil
->txrx_ops
.rx_error_check
= wil_rx_error_check_edma
;
1646 wil
->txrx_ops
.is_rx_idle
= wil_is_rx_idle_edma
;
1647 wil
->txrx_ops
.rx_fini
= wil_rx_fini_edma
;