1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
6 #include <linux/etherdevice.h>
7 #include <linux/moduleparam.h>
8 #include <linux/prefetch.h>
9 #include <linux/types.h>
10 #include <linux/list.h>
12 #include <linux/ipv6.h>
14 #include "txrx_edma.h"
18 /* Max number of entries (packets to complete) to update the hwtail of tx
19 * status ring. Should be power of 2
21 #define WIL_EDMA_TX_SRING_UPDATE_HW_TAIL 128
22 #define WIL_EDMA_MAX_DATA_OFFSET (2)
23 /* RX buffer size must be aligned to 4 bytes */
24 #define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
25 #define MAX_INVALID_BUFF_ID_RETRY (3)
27 static void wil_tx_desc_unmap_edma(struct device
*dev
,
28 union wil_tx_desc
*desc
,
31 struct wil_tx_enhanced_desc
*d
= (struct wil_tx_enhanced_desc
*)desc
;
32 dma_addr_t pa
= wil_tx_desc_get_addr_edma(&d
->dma
);
33 u16 dmalen
= le16_to_cpu(d
->dma
.length
);
35 switch (ctx
->mapped_as
) {
36 case wil_mapped_as_single
:
37 dma_unmap_single(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
39 case wil_mapped_as_page
:
40 dma_unmap_page(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
47 static int wil_find_free_sring(struct wil6210_priv
*wil
)
51 for (i
= 0; i
< WIL6210_MAX_STATUS_RINGS
; i
++) {
52 if (!wil
->srings
[i
].va
)
59 static void wil_sring_free(struct wil6210_priv
*wil
,
60 struct wil_status_ring
*sring
)
62 struct device
*dev
= wil_to_dev(wil
);
65 if (!sring
|| !sring
->va
)
68 sz
= sring
->elem_size
* sring
->size
;
70 wil_dbg_misc(wil
, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
71 sz
, sring
->va
, &sring
->pa
);
73 dma_free_coherent(dev
, sz
, (void *)sring
->va
, sring
->pa
);
78 static int wil_sring_alloc(struct wil6210_priv
*wil
,
79 struct wil_status_ring
*sring
)
81 struct device
*dev
= wil_to_dev(wil
);
82 size_t sz
= sring
->elem_size
* sring
->size
;
84 wil_dbg_misc(wil
, "status_ring_alloc: size=%zu\n", sz
);
87 wil_err(wil
, "Cannot allocate a zero size status ring\n");
93 /* Status messages are allocated and initialized to 0. This is necessary
94 * since DR bit should be initialized to 0.
96 sring
->va
= dma_alloc_coherent(dev
, sz
, &sring
->pa
, GFP_KERNEL
);
100 wil_dbg_misc(wil
, "status_ring[%d] 0x%p:%pad\n", sring
->size
, sring
->va
,
106 static int wil_tx_init_edma(struct wil6210_priv
*wil
)
108 int ring_id
= wil_find_free_sring(wil
);
109 struct wil_status_ring
*sring
;
111 u16 status_ring_size
;
113 if (wil
->tx_status_ring_order
< WIL_SRING_SIZE_ORDER_MIN
||
114 wil
->tx_status_ring_order
> WIL_SRING_SIZE_ORDER_MAX
)
115 wil
->tx_status_ring_order
= WIL_TX_SRING_SIZE_ORDER_DEFAULT
;
117 status_ring_size
= 1 << wil
->tx_status_ring_order
;
119 wil_dbg_misc(wil
, "init TX sring: size=%u, ring_id=%u\n",
120 status_ring_size
, ring_id
);
125 /* Allocate Tx status ring. Tx descriptor rings will be
126 * allocated on WMI connect event
128 sring
= &wil
->srings
[ring_id
];
130 sring
->is_rx
= false;
131 sring
->size
= status_ring_size
;
132 sring
->elem_size
= sizeof(struct wil_ring_tx_status
);
133 rc
= wil_sring_alloc(wil
, sring
);
137 rc
= wil_wmi_tx_sring_cfg(wil
, ring_id
);
141 sring
->desc_rdy_pol
= 1;
142 wil
->tx_sring_idx
= ring_id
;
146 wil_sring_free(wil
, sring
);
150 /* Allocate one skb for Rx descriptor RING */
151 static int wil_ring_alloc_skb_edma(struct wil6210_priv
*wil
,
152 struct wil_ring
*ring
, u32 i
)
154 struct device
*dev
= wil_to_dev(wil
);
155 unsigned int sz
= wil
->rx_buf_len
;
158 struct list_head
*active
= &wil
->rx_buff_mgmt
.active
;
159 struct list_head
*free
= &wil
->rx_buff_mgmt
.free
;
160 struct wil_rx_buff
*rx_buff
;
161 struct wil_rx_buff
*buff_arr
= wil
->rx_buff_mgmt
.buff_arr
;
163 struct wil_rx_enhanced_desc dd
, *d
= &dd
;
164 struct wil_rx_enhanced_desc
*_d
= (struct wil_rx_enhanced_desc
*)
165 &ring
->va
[i
].rx
.enhanced
;
167 if (unlikely(list_empty(free
))) {
168 wil
->rx_buff_mgmt
.free_list_empty_cnt
++;
172 skb
= dev_alloc_skb(sz
);
179 * Make sure that the network stack calculates checksum for packets
180 * which failed the HW checksum calculation
182 skb
->ip_summed
= CHECKSUM_NONE
;
184 pa
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_FROM_DEVICE
);
185 if (unlikely(dma_mapping_error(dev
, pa
))) {
190 /* Get the buffer ID - the index of the rx buffer in the buff_arr */
191 rx_buff
= list_first_entry(free
, struct wil_rx_buff
, list
);
192 buff_id
= rx_buff
->id
;
194 /* Move a buffer from the free list to the active list */
195 list_move(&rx_buff
->list
, active
);
197 buff_arr
[buff_id
].skb
= skb
;
199 wil_desc_set_addr_edma(&d
->dma
.addr
, &d
->dma
.addr_high_high
, pa
);
200 d
->dma
.length
= cpu_to_le16(sz
);
201 d
->mac
.buff_id
= cpu_to_le16(buff_id
);
204 /* Save the physical address in skb->cb for later use in dma_unmap */
205 memcpy(skb
->cb
, &pa
, sizeof(pa
));
211 void wil_get_next_rx_status_msg(struct wil_status_ring
*sring
, u8
*dr_bit
,
214 struct wil_rx_status_compressed
*_msg
;
216 _msg
= (struct wil_rx_status_compressed
*)
217 (sring
->va
+ (sring
->elem_size
* sring
->swhead
));
218 *dr_bit
= WIL_GET_BITS(_msg
->d0
, 31, 31);
219 /* make sure dr_bit is read before the rest of status msg */
221 memcpy(msg
, (void *)_msg
, sring
->elem_size
);
224 static inline void wil_sring_advance_swhead(struct wil_status_ring
*sring
)
226 sring
->swhead
= (sring
->swhead
+ 1) % sring
->size
;
227 if (sring
->swhead
== 0)
228 sring
->desc_rdy_pol
= 1 - sring
->desc_rdy_pol
;
231 static int wil_rx_refill_edma(struct wil6210_priv
*wil
)
233 struct wil_ring
*ring
= &wil
->ring_rx
;
236 ring
->swtail
= *ring
->edma_rx_swtail
.va
;
238 for (; next_head
= wil_ring_next_head(ring
),
239 (next_head
!= ring
->swtail
);
240 ring
->swhead
= next_head
) {
241 rc
= wil_ring_alloc_skb_edma(wil
, ring
, ring
->swhead
);
244 wil_dbg_txrx(wil
, "No free buffer ID found\n");
246 wil_err_ratelimited(wil
,
247 "Error %d in refill desc[%d]\n",
253 /* make sure all writes to descriptors (shared memory) are done before
254 * committing them to HW
258 wil_w(wil
, ring
->hwtail
, ring
->swhead
);
263 static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv
*wil
,
264 struct wil_ring
*ring
)
266 struct device
*dev
= wil_to_dev(wil
);
267 struct list_head
*active
= &wil
->rx_buff_mgmt
.active
;
270 if (!wil
->rx_buff_mgmt
.buff_arr
)
273 while (!list_empty(active
)) {
274 struct wil_rx_buff
*rx_buff
=
275 list_first_entry(active
, struct wil_rx_buff
, list
);
276 struct sk_buff
*skb
= rx_buff
->skb
;
278 if (unlikely(!skb
)) {
279 wil_err(wil
, "No Rx skb at buff_id %d\n", rx_buff
->id
);
282 memcpy(&pa
, skb
->cb
, sizeof(pa
));
283 dma_unmap_single(dev
, pa
, wil
->rx_buf_len
,
288 /* Move the buffer from the active to the free list */
289 list_move(&rx_buff
->list
, &wil
->rx_buff_mgmt
.free
);
293 static void wil_free_rx_buff_arr(struct wil6210_priv
*wil
)
295 struct wil_ring
*ring
= &wil
->ring_rx
;
297 if (!wil
->rx_buff_mgmt
.buff_arr
)
300 /* Move all the buffers to the free list in case active list is
301 * not empty in order to release all SKBs before deleting the array
303 wil_move_all_rx_buff_to_free_list(wil
, ring
);
305 kfree(wil
->rx_buff_mgmt
.buff_arr
);
306 wil
->rx_buff_mgmt
.buff_arr
= NULL
;
309 static int wil_init_rx_buff_arr(struct wil6210_priv
*wil
,
312 struct wil_rx_buff
*buff_arr
;
313 struct list_head
*active
= &wil
->rx_buff_mgmt
.active
;
314 struct list_head
*free
= &wil
->rx_buff_mgmt
.free
;
317 wil
->rx_buff_mgmt
.buff_arr
= kcalloc(size
+ 1,
318 sizeof(struct wil_rx_buff
),
320 if (!wil
->rx_buff_mgmt
.buff_arr
)
324 INIT_LIST_HEAD(active
);
325 INIT_LIST_HEAD(free
);
328 * buffer id 0 should not be used (marks invalid id).
330 buff_arr
= wil
->rx_buff_mgmt
.buff_arr
;
331 for (i
= 1; i
<= size
; i
++) {
332 list_add(&buff_arr
[i
].list
, free
);
336 wil
->rx_buff_mgmt
.size
= size
+ 1;
341 static int wil_init_rx_sring(struct wil6210_priv
*wil
,
342 u16 status_ring_size
,
346 struct wil_status_ring
*sring
= &wil
->srings
[ring_id
];
349 wil_dbg_misc(wil
, "init RX sring: size=%u, ring_id=%u\n",
350 status_ring_size
, ring_id
);
352 memset(&sring
->rx_data
, 0, sizeof(sring
->rx_data
));
355 sring
->size
= status_ring_size
;
356 sring
->elem_size
= elem_size
;
357 rc
= wil_sring_alloc(wil
, sring
);
361 rc
= wil_wmi_rx_sring_add(wil
, ring_id
);
365 sring
->desc_rdy_pol
= 1;
369 wil_sring_free(wil
, sring
);
373 static int wil_ring_alloc_desc_ring(struct wil6210_priv
*wil
,
374 struct wil_ring
*ring
)
376 struct device
*dev
= wil_to_dev(wil
);
377 size_t sz
= ring
->size
* sizeof(ring
->va
[0]);
379 wil_dbg_misc(wil
, "alloc_desc_ring:\n");
381 BUILD_BUG_ON(sizeof(ring
->va
[0]) != 32);
385 ring
->ctx
= kcalloc(ring
->size
, sizeof(ring
->ctx
[0]), GFP_KERNEL
);
389 ring
->va
= dma_alloc_coherent(dev
, sz
, &ring
->pa
, GFP_KERNEL
);
394 sz
= sizeof(*ring
->edma_rx_swtail
.va
);
395 ring
->edma_rx_swtail
.va
=
396 dma_alloc_coherent(dev
, sz
, &ring
->edma_rx_swtail
.pa
,
398 if (!ring
->edma_rx_swtail
.va
)
402 wil_dbg_misc(wil
, "%s ring[%d] 0x%p:%pad 0x%p\n",
403 ring
->is_rx
? "RX" : "TX",
404 ring
->size
, ring
->va
, &ring
->pa
, ring
->ctx
);
408 dma_free_coherent(dev
, ring
->size
* sizeof(ring
->va
[0]),
409 (void *)ring
->va
, ring
->pa
);
418 static void wil_ring_free_edma(struct wil6210_priv
*wil
, struct wil_ring
*ring
)
420 struct device
*dev
= wil_to_dev(wil
);
427 sz
= ring
->size
* sizeof(ring
->va
[0]);
429 lockdep_assert_held(&wil
->mutex
);
431 wil_dbg_misc(wil
, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
432 ring
->size
, ring
->va
,
433 &ring
->pa
, ring
->ctx
);
435 wil_move_all_rx_buff_to_free_list(wil
, ring
);
436 dma_free_coherent(dev
, sizeof(*ring
->edma_rx_swtail
.va
),
437 ring
->edma_rx_swtail
.va
,
438 ring
->edma_rx_swtail
.pa
);
443 ring_index
= ring
- wil
->ring_tx
;
445 wil_dbg_misc(wil
, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
446 ring_index
, ring
->size
, ring
->va
,
447 &ring
->pa
, ring
->ctx
);
449 while (!wil_ring_is_empty(ring
)) {
452 struct wil_tx_enhanced_desc dd
, *d
= &dd
;
453 struct wil_tx_enhanced_desc
*_d
=
454 (struct wil_tx_enhanced_desc
*)
455 &ring
->va
[ring
->swtail
].tx
.enhanced
;
457 ctx
= &ring
->ctx
[ring
->swtail
];
460 "ctx(%d) was already completed\n",
462 ring
->swtail
= wil_ring_next_tail(ring
);
466 wil_tx_desc_unmap_edma(dev
, (union wil_tx_desc
*)d
, ctx
);
468 dev_kfree_skb_any(ctx
->skb
);
469 ring
->swtail
= wil_ring_next_tail(ring
);
473 dma_free_coherent(dev
, sz
, (void *)ring
->va
, ring
->pa
);
480 static int wil_init_rx_desc_ring(struct wil6210_priv
*wil
, u16 desc_ring_size
,
483 struct wil_ring
*ring
= &wil
->ring_rx
;
486 wil_dbg_misc(wil
, "init RX desc ring\n");
488 ring
->size
= desc_ring_size
;
490 rc
= wil_ring_alloc_desc_ring(wil
, ring
);
494 rc
= wil_wmi_rx_desc_ring_add(wil
, status_ring_id
);
500 wil_ring_free_edma(wil
, ring
);
504 static void wil_get_reorder_params_edma(struct wil6210_priv
*wil
,
505 struct sk_buff
*skb
, int *tid
,
506 int *cid
, int *mid
, u16
*seq
,
507 int *mcast
, int *retry
)
509 struct wil_rx_status_extended
*s
= wil_skb_rxstatus(skb
);
511 *tid
= wil_rx_status_get_tid(s
);
512 *cid
= wil_rx_status_get_cid(s
);
513 *mid
= wil_rx_status_get_mid(s
);
514 *seq
= le16_to_cpu(wil_rx_status_get_seq(wil
, s
));
515 *mcast
= wil_rx_status_get_mcast(s
);
516 *retry
= wil_rx_status_get_retry(s
);
519 static void wil_get_netif_rx_params_edma(struct sk_buff
*skb
, int *cid
,
522 struct wil_rx_status_extended
*s
= wil_skb_rxstatus(skb
);
524 *cid
= wil_rx_status_get_cid(s
);
525 *security
= wil_rx_status_get_security(s
);
528 static int wil_rx_crypto_check_edma(struct wil6210_priv
*wil
,
531 struct wil_rx_status_extended
*st
;
532 int cid
, tid
, key_id
, mc
;
533 struct wil_sta_info
*s
;
534 struct wil_tid_crypto_rx
*c
;
535 struct wil_tid_crypto_rx_single
*cc
;
538 /* In HW reorder, HW is responsible for crypto check */
539 if (wil
->use_rx_hw_reordering
)
542 st
= wil_skb_rxstatus(skb
);
544 cid
= wil_rx_status_get_cid(st
);
545 tid
= wil_rx_status_get_tid(st
);
546 key_id
= wil_rx_status_get_key_id(st
);
547 mc
= wil_rx_status_get_mcast(st
);
549 c
= mc
? &s
->group_crypto_rx
: &s
->tid_crypto_rx
[tid
];
550 cc
= &c
->key_id
[key_id
];
551 pn
= (u8
*)&st
->ext
.pn_15_0
;
554 wil_err_ratelimited(wil
,
555 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
556 cid
, tid
, mc
, key_id
);
560 if (reverse_memcmp(pn
, cc
->pn
, IEEE80211_GCMP_PN_LEN
) <= 0) {
561 wil_err_ratelimited(wil
,
562 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
563 cid
, tid
, mc
, key_id
, pn
, cc
->pn
);
566 memcpy(cc
->pn
, pn
, IEEE80211_GCMP_PN_LEN
);
571 static bool wil_is_rx_idle_edma(struct wil6210_priv
*wil
)
573 struct wil_status_ring
*sring
;
574 struct wil_rx_status_extended msg1
;
579 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
580 sring
= &wil
->srings
[i
];
584 wil_get_next_rx_status_msg(sring
, &dr_bit
, msg
);
586 /* Check if there are unhandled RX status messages */
587 if (dr_bit
== sring
->desc_rdy_pol
)
594 static void wil_rx_buf_len_init_edma(struct wil6210_priv
*wil
)
596 /* RX buffer size must be aligned to 4 bytes */
597 wil
->rx_buf_len
= rx_large_buf
?
598 WIL_MAX_ETH_MTU
: WIL_EDMA_RX_BUF_LEN_DEFAULT
;
601 static int wil_rx_init_edma(struct wil6210_priv
*wil
, uint desc_ring_order
)
603 u16 status_ring_size
, desc_ring_size
= 1 << desc_ring_order
;
604 struct wil_ring
*ring
= &wil
->ring_rx
;
606 size_t elem_size
= wil
->use_compressed_rx_status
?
607 sizeof(struct wil_rx_status_compressed
) :
608 sizeof(struct wil_rx_status_extended
);
611 /* In SW reorder one must use extended status messages */
612 if (wil
->use_compressed_rx_status
&& !wil
->use_rx_hw_reordering
) {
614 "compressed RX status cannot be used with SW reorder\n");
617 if (wil
->rx_status_ring_order
<= desc_ring_order
)
618 /* make sure sring is larger than desc ring */
619 wil
->rx_status_ring_order
= desc_ring_order
+ 1;
620 if (wil
->rx_buff_id_count
<= desc_ring_size
)
621 /* make sure we will not run out of buff_ids */
622 wil
->rx_buff_id_count
= desc_ring_size
+ 512;
623 if (wil
->rx_status_ring_order
< WIL_SRING_SIZE_ORDER_MIN
||
624 wil
->rx_status_ring_order
> WIL_SRING_SIZE_ORDER_MAX
)
625 wil
->rx_status_ring_order
= WIL_RX_SRING_SIZE_ORDER_DEFAULT
;
627 status_ring_size
= 1 << wil
->rx_status_ring_order
;
630 "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
631 desc_ring_size
, status_ring_size
, elem_size
);
633 wil_rx_buf_len_init_edma(wil
);
635 /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
636 if (wil
->num_rx_status_rings
> WIL6210_MAX_STATUS_RINGS
- 1)
637 wil
->num_rx_status_rings
= WIL6210_MAX_STATUS_RINGS
- 1;
639 wil_dbg_misc(wil
, "rx_init: allocate %d status rings\n",
640 wil
->num_rx_status_rings
);
642 rc
= wil_wmi_cfg_def_rx_offload(wil
, wil
->rx_buf_len
);
646 /* Allocate status ring */
647 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
648 int sring_id
= wil_find_free_sring(wil
);
652 goto err_free_status
;
654 rc
= wil_init_rx_sring(wil
, status_ring_size
, elem_size
,
657 goto err_free_status
;
660 /* Allocate descriptor ring */
661 rc
= wil_init_rx_desc_ring(wil
, desc_ring_size
,
662 WIL_DEFAULT_RX_STATUS_RING_ID
);
664 goto err_free_status
;
666 if (wil
->rx_buff_id_count
>= status_ring_size
) {
668 "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
669 wil
->rx_buff_id_count
, status_ring_size
,
670 status_ring_size
- 1);
671 wil
->rx_buff_id_count
= status_ring_size
- 1;
674 /* Allocate Rx buffer array */
675 rc
= wil_init_rx_buff_arr(wil
, wil
->rx_buff_id_count
);
679 /* Fill descriptor ring with credits */
680 rc
= wil_rx_refill_edma(wil
);
682 goto err_free_rx_buff_arr
;
685 err_free_rx_buff_arr
:
686 wil_free_rx_buff_arr(wil
);
688 wil_ring_free_edma(wil
, ring
);
690 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++)
691 wil_sring_free(wil
, &wil
->srings
[i
]);
696 static int wil_ring_init_tx_edma(struct wil6210_vif
*vif
, int ring_id
,
697 int size
, int cid
, int tid
)
699 struct wil6210_priv
*wil
= vif_to_wil(vif
);
701 struct wil_ring
*ring
= &wil
->ring_tx
[ring_id
];
702 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_id
];
704 lockdep_assert_held(&wil
->mutex
);
707 "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
708 ring_id
, cid
, tid
, wil
->tx_sring_idx
);
710 wil_tx_data_init(txdata
);
712 rc
= wil_ring_alloc_desc_ring(wil
, ring
);
716 wil
->ring2cid_tid
[ring_id
][0] = cid
;
717 wil
->ring2cid_tid
[ring_id
][1] = tid
;
719 txdata
->dot1x_open
= true;
721 rc
= wil_wmi_tx_desc_ring_add(vif
, ring_id
, cid
, tid
);
723 wil_err(wil
, "WMI_TX_DESC_RING_ADD_CMD failed\n");
727 if (txdata
->dot1x_open
&& agg_wsize
>= 0)
728 wil_addba_tx_request(wil
, ring_id
, agg_wsize
);
732 spin_lock_bh(&txdata
->lock
);
733 txdata
->dot1x_open
= false;
735 spin_unlock_bh(&txdata
->lock
);
736 wil_ring_free_edma(wil
, ring
);
737 wil
->ring2cid_tid
[ring_id
][0] = wil
->max_assoc_sta
;
738 wil
->ring2cid_tid
[ring_id
][1] = 0;
744 static int wil_tx_ring_modify_edma(struct wil6210_vif
*vif
, int ring_id
,
747 struct wil6210_priv
*wil
= vif_to_wil(vif
);
749 wil_err(wil
, "ring modify is not supported for EDMA\n");
754 /* This function is used only for RX SW reorder */
755 static int wil_check_bar(struct wil6210_priv
*wil
, void *msg
, int cid
,
756 struct sk_buff
*skb
, struct wil_net_stats
*stats
)
763 struct wil6210_vif
*vif
;
765 ftype
= wil_rx_status_get_frame_type(wil
, msg
);
766 if (ftype
== IEEE80211_FTYPE_DATA
)
769 fc1
= wil_rx_status_get_fc1(wil
, msg
);
770 mid
= wil_rx_status_get_mid(msg
);
771 tid
= wil_rx_status_get_tid(msg
);
772 seq
= le16_to_cpu(wil_rx_status_get_seq(wil
, msg
));
773 vif
= wil
->vifs
[mid
];
775 if (unlikely(!vif
)) {
776 wil_dbg_txrx(wil
, "RX descriptor with invalid mid %d", mid
);
781 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
782 fc1
, mid
, cid
, tid
, seq
);
784 stats
->rx_non_data_frame
++;
785 if (wil_is_back_req(fc1
)) {
787 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
789 wil_rx_bar(wil
, vif
, cid
, tid
, seq
);
791 u32 sz
= wil
->use_compressed_rx_status
?
792 sizeof(struct wil_rx_status_compressed
) :
793 sizeof(struct wil_rx_status_extended
);
795 /* print again all info. One can enable only this
796 * without overhead for printing every Rx frame
799 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
800 fc1
, mid
, cid
, tid
, seq
);
801 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE
, 32, 4,
802 (const void *)msg
, sz
, false);
803 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
804 skb
->data
, skb_headlen(skb
), false);
810 static int wil_rx_error_check_edma(struct wil6210_priv
*wil
,
812 struct wil_net_stats
*stats
)
815 void *msg
= wil_skb_rxstatus(skb
);
817 l2_rx_status
= wil_rx_status_get_l2_rx_status(msg
);
818 if (l2_rx_status
!= 0) {
819 wil_dbg_txrx(wil
, "L2 RX error, l2_rx_status=0x%x\n",
821 /* Due to HW issue, KEY error will trigger a MIC error */
822 if (l2_rx_status
== WIL_RX_EDMA_ERROR_MIC
) {
823 wil_err_ratelimited(wil
,
824 "L2 MIC/KEY error, dropping packet\n");
825 stats
->rx_mic_error
++;
827 if (l2_rx_status
== WIL_RX_EDMA_ERROR_KEY
) {
828 wil_err_ratelimited(wil
,
829 "L2 KEY error, dropping packet\n");
830 stats
->rx_key_error
++;
832 if (l2_rx_status
== WIL_RX_EDMA_ERROR_REPLAY
) {
833 wil_err_ratelimited(wil
,
834 "L2 REPLAY error, dropping packet\n");
837 if (l2_rx_status
== WIL_RX_EDMA_ERROR_AMSDU
) {
838 wil_err_ratelimited(wil
,
839 "L2 AMSDU error, dropping packet\n");
840 stats
->rx_amsdu_error
++;
845 skb
->ip_summed
= wil_rx_status_get_checksum(msg
, stats
);
850 static struct sk_buff
*wil_sring_reap_rx_edma(struct wil6210_priv
*wil
,
851 struct wil_status_ring
*sring
)
853 struct device
*dev
= wil_to_dev(wil
);
854 struct wil_rx_status_extended msg1
;
859 struct wil_ring_rx_data
*rxdata
= &sring
->rx_data
;
860 unsigned int sz
= wil
->rx_buf_len
;
861 struct wil_net_stats
*stats
= NULL
;
864 bool eop
, headstolen
;
868 struct wil_rx_status_extended
*s
;
869 u16 sring_idx
= sring
- wil
->srings
;
870 int invalid_buff_id_retry
;
872 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended
) > sizeof(skb
->cb
));
875 wil_get_next_rx_status_msg(sring
, &dr_bit
, msg
);
877 /* Completed handling all the ready status messages */
878 if (dr_bit
!= sring
->desc_rdy_pol
)
881 /* Extract the buffer ID from the status message */
882 buff_id
= le16_to_cpu(wil_rx_status_get_buff_id(msg
));
884 invalid_buff_id_retry
= 0;
886 struct wil_rx_status_extended
*s
;
889 "buff_id is not updated yet by HW, (swhead 0x%x)\n",
891 if (++invalid_buff_id_retry
> MAX_INVALID_BUFF_ID_RETRY
)
894 /* Read the status message again */
895 s
= (struct wil_rx_status_extended
*)
896 (sring
->va
+ (sring
->elem_size
* sring
->swhead
));
897 *(struct wil_rx_status_extended
*)msg
= *s
;
898 buff_id
= le16_to_cpu(wil_rx_status_get_buff_id(msg
));
901 if (unlikely(!wil_val_in_range(buff_id
, 1, wil
->rx_buff_mgmt
.size
))) {
902 wil_err(wil
, "Corrupt buff_id=%d, sring->swhead=%d\n",
903 buff_id
, sring
->swhead
);
904 print_hex_dump(KERN_ERR
, "RxS ", DUMP_PREFIX_OFFSET
, 16, 1,
905 msg
, wil
->use_compressed_rx_status
?
906 sizeof(struct wil_rx_status_compressed
) :
907 sizeof(struct wil_rx_status_extended
), false);
909 wil_rx_status_reset_buff_id(sring
);
910 wil_sring_advance_swhead(sring
);
911 sring
->invalid_buff_id_cnt
++;
915 /* Extract the SKB from the rx_buff management array */
916 skb
= wil
->rx_buff_mgmt
.buff_arr
[buff_id
].skb
;
917 wil
->rx_buff_mgmt
.buff_arr
[buff_id
].skb
= NULL
;
919 wil_err(wil
, "No Rx skb at buff_id %d\n", buff_id
);
920 wil_rx_status_reset_buff_id(sring
);
921 /* Move the buffer from the active list to the free list */
922 list_move_tail(&wil
->rx_buff_mgmt
.buff_arr
[buff_id
].list
,
923 &wil
->rx_buff_mgmt
.free
);
924 wil_sring_advance_swhead(sring
);
925 sring
->invalid_buff_id_cnt
++;
929 wil_rx_status_reset_buff_id(sring
);
930 wil_sring_advance_swhead(sring
);
932 memcpy(&pa
, skb
->cb
, sizeof(pa
));
933 dma_unmap_single(dev
, pa
, sz
, DMA_FROM_DEVICE
);
934 dmalen
= le16_to_cpu(wil_rx_status_get_length(msg
));
936 trace_wil6210_rx_status(wil
, wil
->use_compressed_rx_status
, buff_id
,
938 wil_dbg_txrx(wil
, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
939 buff_id
, sring_idx
, dmalen
);
940 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE
, 32, 4,
941 (const void *)msg
, wil
->use_compressed_rx_status
?
942 sizeof(struct wil_rx_status_compressed
) :
943 sizeof(struct wil_rx_status_extended
), false);
945 /* Move the buffer from the active list to the free list */
946 list_move_tail(&wil
->rx_buff_mgmt
.buff_arr
[buff_id
].list
,
947 &wil
->rx_buff_mgmt
.free
);
949 eop
= wil_rx_status_get_eop(msg
);
951 cid
= wil_rx_status_get_cid(msg
);
952 if (unlikely(!wil_val_in_range(cid
, 0, wil
->max_assoc_sta
))) {
953 wil_err(wil
, "Corrupt cid=%d, sring->swhead=%d\n",
955 rxdata
->skipping
= true;
958 stats
= &wil
->sta
[cid
].stats
;
960 if (unlikely(dmalen
< ETH_HLEN
)) {
961 wil_dbg_txrx(wil
, "Short frame, len = %d\n", dmalen
);
962 stats
->rx_short_frame
++;
963 rxdata
->skipping
= true;
967 if (unlikely(dmalen
> sz
)) {
968 wil_err(wil
, "Rx size too large: %d bytes!\n", dmalen
);
969 print_hex_dump(KERN_ERR
, "RxS ", DUMP_PREFIX_OFFSET
, 16, 1,
970 msg
, wil
->use_compressed_rx_status
?
971 sizeof(struct wil_rx_status_compressed
) :
972 sizeof(struct wil_rx_status_extended
), false);
974 stats
->rx_large_frame
++;
975 rxdata
->skipping
= true;
979 /* skipping indicates if a certain SKB should be dropped.
980 * It is set in case there is an error on the current SKB or in case
981 * of RX chaining: as long as we manage to merge the SKBs it will
982 * be false. once we have a bad SKB or we don't manage to merge SKBs
983 * it will be set to the !EOP value of the current SKB.
984 * This guarantees that all the following SKBs until EOP will also
987 if (unlikely(rxdata
->skipping
)) {
990 kfree_skb(rxdata
->skb
);
993 rxdata
->skipping
= !eop
;
997 skb_trim(skb
, dmalen
);
1004 if (likely(skb_try_coalesce(rxdata
->skb
, skb
, &headstolen
,
1006 kfree_skb_partial(skb
, headstolen
);
1008 wil_err(wil
, "failed to merge skbs!\n");
1010 kfree_skb(rxdata
->skb
);
1012 rxdata
->skipping
= !eop
;
1020 /* reaching here rxdata->skb always contains a full packet */
1023 rxdata
->skipping
= false;
1026 stats
->last_mcs_rx
= wil_rx_status_get_mcs(msg
);
1027 if (stats
->last_mcs_rx
< ARRAY_SIZE(stats
->rx_per_mcs
))
1028 stats
->rx_per_mcs
[stats
->last_mcs_rx
]++;
1030 stats
->last_cb_mode_rx
= wil_rx_status_get_cb_mode(msg
);
1033 if (!wil
->use_rx_hw_reordering
&& !wil
->use_compressed_rx_status
&&
1034 wil_check_bar(wil
, msg
, cid
, skb
, stats
) == -EAGAIN
) {
1039 /* Compensate for the HW data alignment according to the status
1042 data_offset
= wil_rx_status_get_data_offset(msg
);
1043 if (data_offset
== 0xFF ||
1044 data_offset
> WIL_EDMA_MAX_DATA_OFFSET
) {
1045 wil_err(wil
, "Unexpected data offset %d\n", data_offset
);
1050 skb_pull(skb
, data_offset
);
1052 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
1053 skb
->data
, skb_headlen(skb
), false);
1055 /* Has to be done after dma_unmap_single as skb->cb is also
1056 * used for holding the pa
1058 s
= wil_skb_rxstatus(skb
);
1059 memcpy(s
, msg
, sring
->elem_size
);
1064 void wil_rx_handle_edma(struct wil6210_priv
*wil
, int *quota
)
1066 struct net_device
*ndev
;
1067 struct wil_ring
*ring
= &wil
->ring_rx
;
1068 struct wil_status_ring
*sring
;
1069 struct sk_buff
*skb
;
1072 if (unlikely(!ring
->va
)) {
1073 wil_err(wil
, "Rx IRQ while Rx not yet initialized\n");
1076 wil_dbg_txrx(wil
, "rx_handle\n");
1078 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
1079 sring
= &wil
->srings
[i
];
1080 if (unlikely(!sring
->va
)) {
1082 "Rx IRQ while Rx status ring %d not yet initialized\n",
1087 while ((*quota
> 0) &&
1089 wil_sring_reap_rx_edma(wil
, sring
)))) {
1091 if (wil
->use_rx_hw_reordering
) {
1092 void *msg
= wil_skb_rxstatus(skb
);
1093 int mid
= wil_rx_status_get_mid(msg
);
1094 struct wil6210_vif
*vif
= wil
->vifs
[mid
];
1096 if (unlikely(!vif
)) {
1098 "RX desc invalid mid %d",
1103 ndev
= vif_to_ndev(vif
);
1104 wil_netif_rx_any(skb
, ndev
);
1106 wil_rx_reorder(wil
, skb
);
1110 wil_w(wil
, sring
->hwtail
, (sring
->swhead
- 1) % sring
->size
);
1113 wil_rx_refill_edma(wil
);
1116 static int wil_tx_desc_map_edma(union wil_tx_desc
*desc
,
1121 struct wil_tx_enhanced_desc
*d
=
1122 (struct wil_tx_enhanced_desc
*)&desc
->enhanced
;
1124 memset(d
, 0, sizeof(struct wil_tx_enhanced_desc
));
1126 wil_desc_set_addr_edma(&d
->dma
.addr
, &d
->dma
.addr_high_high
, pa
);
1128 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1129 d
->dma
.length
= cpu_to_le16((u16
)len
);
1130 d
->mac
.d
[0] = (ring_index
<< WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS
);
1131 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
1134 d
->mac
.d
[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS
) |
1135 (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS
);
1141 wil_get_next_tx_status_msg(struct wil_status_ring
*sring
, u8
*dr_bit
,
1142 struct wil_ring_tx_status
*msg
)
1144 struct wil_ring_tx_status
*_msg
= (struct wil_ring_tx_status
*)
1145 (sring
->va
+ (sring
->elem_size
* sring
->swhead
));
1147 *dr_bit
= _msg
->desc_ready
>> TX_STATUS_DESC_READY_POS
;
1148 /* make sure dr_bit is read before the rest of status msg */
1153 /* Clean up transmitted skb's from the Tx descriptor RING.
1154 * Return number of descriptors cleared.
1156 int wil_tx_sring_handler(struct wil6210_priv
*wil
,
1157 struct wil_status_ring
*sring
)
1159 struct net_device
*ndev
;
1160 struct device
*dev
= wil_to_dev(wil
);
1161 struct wil_ring
*ring
= NULL
;
1162 struct wil_ring_tx_data
*txdata
;
1163 /* Total number of completed descriptors in all descriptor rings */
1166 struct wil_net_stats
*stats
;
1167 struct wil_tx_enhanced_desc
*_d
;
1168 unsigned int ring_id
;
1169 unsigned int num_descs
, num_statuses
= 0;
1171 u8 dr_bit
; /* Descriptor Ready bit */
1172 struct wil_ring_tx_status msg
;
1173 struct wil6210_vif
*vif
;
1174 int used_before_complete
;
1177 wil_get_next_tx_status_msg(sring
, &dr_bit
, &msg
);
1179 /* Process completion messages while DR bit has the expected polarity */
1180 while (dr_bit
== sring
->desc_rdy_pol
) {
1181 num_descs
= msg
.num_descriptors
;
1183 wil_err(wil
, "invalid num_descs 0\n");
1187 /* Find the corresponding descriptor ring */
1188 ring_id
= msg
.ring_id
;
1190 if (unlikely(ring_id
>= WIL6210_MAX_TX_RINGS
)) {
1191 wil_err(wil
, "invalid ring id %d\n", ring_id
);
1194 ring
= &wil
->ring_tx
[ring_id
];
1195 if (unlikely(!ring
->va
)) {
1196 wil_err(wil
, "Tx irq[%d]: ring not initialized\n",
1200 txdata
= &wil
->ring_tx_data
[ring_id
];
1201 if (unlikely(!txdata
->enabled
)) {
1202 wil_info(wil
, "Tx irq[%d]: ring disabled\n", ring_id
);
1205 vif
= wil
->vifs
[txdata
->mid
];
1206 if (unlikely(!vif
)) {
1207 wil_dbg_txrx(wil
, "invalid MID %d for ring %d\n",
1208 txdata
->mid
, ring_id
);
1212 ndev
= vif_to_ndev(vif
);
1214 cid
= wil
->ring2cid_tid
[ring_id
][0];
1215 stats
= (cid
< wil
->max_assoc_sta
) ? &wil
->sta
[cid
].stats
:
1219 "tx_status: completed desc_ring (%d), num_descs (%d)\n",
1220 ring_id
, num_descs
);
1222 used_before_complete
= wil_ring_used_tx(ring
);
1224 for (i
= 0 ; i
< num_descs
; ++i
) {
1225 struct wil_ctx
*ctx
= &ring
->ctx
[ring
->swtail
];
1226 struct wil_tx_enhanced_desc dd
, *d
= &dd
;
1228 struct sk_buff
*skb
= ctx
->skb
;
1230 _d
= (struct wil_tx_enhanced_desc
*)
1231 &ring
->va
[ring
->swtail
].tx
.enhanced
;
1234 dmalen
= le16_to_cpu(d
->dma
.length
);
1235 trace_wil6210_tx_status(&msg
, ring
->swtail
, dmalen
);
1237 "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
1238 ring_id
, ring
->swtail
, dmalen
,
1240 wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE
, 32, 4,
1241 (const void *)&msg
, sizeof(msg
),
1244 wil_tx_desc_unmap_edma(dev
,
1245 (union wil_tx_desc
*)d
,
1249 if (likely(msg
.status
== 0)) {
1250 ndev
->stats
.tx_packets
++;
1251 ndev
->stats
.tx_bytes
+= skb
->len
;
1253 stats
->tx_packets
++;
1254 stats
->tx_bytes
+= skb
->len
;
1256 wil_tx_latency_calc(wil
, skb
,
1260 ndev
->stats
.tx_errors
++;
1265 if (skb
->protocol
== cpu_to_be16(ETH_P_PAE
))
1266 wil_tx_complete_handle_eapol(vif
, skb
);
1268 wil_consume_skb(skb
, msg
.status
== 0);
1270 memset(ctx
, 0, sizeof(*ctx
));
1271 /* Make sure the ctx is zeroed before updating the tail
1272 * to prevent a case where wil_tx_ring will see
1273 * this descriptor as used and handle it before ctx zero
1278 ring
->swtail
= wil_ring_next_tail(ring
);
1283 /* performance monitoring */
1284 used_new
= wil_ring_used_tx(ring
);
1285 if (wil_val_in_range(wil
->ring_idle_trsh
,
1286 used_new
, used_before_complete
)) {
1287 wil_dbg_txrx(wil
, "Ring[%2d] idle %d -> %d\n",
1288 ring_id
, used_before_complete
, used_new
);
1289 txdata
->last_idle
= get_cycles();
1294 if (num_statuses
% WIL_EDMA_TX_SRING_UPDATE_HW_TAIL
== 0)
1295 /* update HW tail to allow HW to push new statuses */
1296 wil_w(wil
, sring
->hwtail
, sring
->swhead
);
1298 wil_sring_advance_swhead(sring
);
1300 wil_get_next_tx_status_msg(sring
, &dr_bit
, &msg
);
1303 /* shall we wake net queues? */
1305 wil_update_net_queues(wil
, vif
, NULL
, false);
1307 if (num_statuses
% WIL_EDMA_TX_SRING_UPDATE_HW_TAIL
!= 0)
1308 /* Update the HW tail ptr (RD ptr) */
1309 wil_w(wil
, sring
->hwtail
, (sring
->swhead
- 1) % sring
->size
);
1314 /* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1315 * @skb is used to obtain the protocol and headers length.
1316 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1317 * 2 - middle, 3 - last descriptor.
1319 static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc
*d
,
1320 int tso_desc_type
, bool is_ipv4
,
1322 int skb_net_hdr_len
,
1325 /* Number of descriptors */
1327 /* Maximum Segment Size */
1328 d
->mac
.tso_mss
|= cpu_to_le16(mss
>> 2);
1329 /* L4 header len: TCP header length */
1330 d
->dma
.l4_hdr_len
|= tcp_hdr_len
& DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
;
1331 /* EOP, TSO desc type, Segmentation enable,
1332 * Insert IPv4 and TCP / UDP Checksum
1334 d
->dma
.cmd
|= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS
) |
1335 tso_desc_type
<< WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS
|
1336 BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS
) |
1337 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS
) |
1338 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS
);
1339 /* Calculate pseudo-header */
1340 d
->dma
.w1
|= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS
) |
1341 BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS
);
1342 /* IP Header Length */
1343 d
->dma
.ip_length
|= skb_net_hdr_len
;
1344 /* MAC header length and IP address family*/
1345 d
->dma
.b11
|= ETH_HLEN
|
1346 is_ipv4
<< DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
;
1349 static int wil_tx_tso_gen_desc(struct wil6210_priv
*wil
, void *buff_addr
,
1350 int len
, uint i
, int tso_desc_type
,
1351 skb_frag_t
*frag
, struct wil_ring
*ring
,
1352 struct sk_buff
*skb
, bool is_ipv4
,
1353 int tcp_hdr_len
, int skb_net_hdr_len
,
1354 int mss
, int *descs_used
)
1356 struct device
*dev
= wil_to_dev(wil
);
1357 struct wil_tx_enhanced_desc
*_desc
= (struct wil_tx_enhanced_desc
*)
1358 &ring
->va
[i
].tx
.enhanced
;
1359 struct wil_tx_enhanced_desc desc_mem
, *d
= &desc_mem
;
1360 int ring_index
= ring
- wil
->ring_tx
;
1367 pa
= dma_map_single(dev
, buff_addr
, len
, DMA_TO_DEVICE
);
1368 ring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1370 pa
= skb_frag_dma_map(dev
, frag
, 0, len
, DMA_TO_DEVICE
);
1371 ring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
1373 if (unlikely(dma_mapping_error(dev
, pa
))) {
1374 wil_err(wil
, "TSO: Skb DMA map error\n");
1378 wil
->txrx_ops
.tx_desc_map((union wil_tx_desc
*)d
, pa
,
1380 wil_tx_desc_offload_setup_tso_edma(d
, tso_desc_type
, is_ipv4
,
1382 skb_net_hdr_len
, mss
);
1384 /* hold reference to skb
1385 * to prevent skb release before accounting
1386 * in case of immediate "tx done"
1388 if (tso_desc_type
== wil_tso_type_lst
)
1389 ring
->ctx
[i
].skb
= skb_get(skb
);
1391 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
1392 (const void *)d
, sizeof(*d
), false);
1400 static int __wil_tx_ring_tso_edma(struct wil6210_priv
*wil
,
1401 struct wil6210_vif
*vif
,
1402 struct wil_ring
*ring
,
1403 struct sk_buff
*skb
)
1405 int ring_index
= ring
- wil
->ring_tx
;
1406 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_index
];
1407 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1408 int min_desc_required
= nr_frags
+ 2; /* Headers, Head, Fragments */
1409 int used
, avail
= wil_ring_avail_tx(ring
);
1410 int f
, hdrlen
, headlen
;
1413 u32 swhead
= ring
->swhead
;
1414 int descs_used
= 0; /* total number of used descriptors */
1417 int skb_net_hdr_len
;
1418 int mss
= skb_shinfo(skb
)->gso_size
;
1420 wil_dbg_txrx(wil
, "tx_ring_tso: %d bytes to ring %d\n", skb
->len
,
1423 if (unlikely(!txdata
->enabled
))
1426 if (unlikely(avail
< min_desc_required
)) {
1427 wil_err_ratelimited(wil
,
1428 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1429 ring_index
, min_desc_required
);
1433 gso_type
= skb_shinfo(skb
)->gso_type
& (SKB_GSO_TCPV6
| SKB_GSO_TCPV4
);
1445 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1448 /* tcp header length and skb network header length are fixed for all
1449 * packet's descriptors - read them once here
1451 tcp_hdr_len
= tcp_hdrlen(skb
);
1452 skb_net_hdr_len
= skb_network_header_len(skb
);
1454 /* First descriptor must contain the header only
1455 * Header Length = MAC header len + IP header len + TCP header len
1457 hdrlen
= ETH_HLEN
+ tcp_hdr_len
+ skb_net_hdr_len
;
1458 wil_dbg_txrx(wil
, "TSO: process header descriptor, hdrlen %u\n",
1460 rc
= wil_tx_tso_gen_desc(wil
, skb
->data
, hdrlen
, swhead
,
1461 wil_tso_type_hdr
, NULL
, ring
, skb
,
1462 is_ipv4
, tcp_hdr_len
, skb_net_hdr_len
,
1467 /* Second descriptor contains the head */
1468 headlen
= skb_headlen(skb
) - hdrlen
;
1469 wil_dbg_txrx(wil
, "TSO: process skb head, headlen %u\n", headlen
);
1470 rc
= wil_tx_tso_gen_desc(wil
, skb
->data
+ hdrlen
, headlen
,
1471 (swhead
+ descs_used
) % ring
->size
,
1472 (nr_frags
!= 0) ? wil_tso_type_first
:
1473 wil_tso_type_lst
, NULL
, ring
, skb
,
1474 is_ipv4
, tcp_hdr_len
, skb_net_hdr_len
,
1479 /* Rest of the descriptors are from the SKB fragments */
1480 for (f
= 0; f
< nr_frags
; f
++) {
1481 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1482 int len
= skb_frag_size(frag
);
1484 wil_dbg_txrx(wil
, "TSO: frag[%d]: len %u, descs_used %d\n", f
,
1487 rc
= wil_tx_tso_gen_desc(wil
, NULL
, len
,
1488 (swhead
+ descs_used
) % ring
->size
,
1489 (f
!= nr_frags
- 1) ?
1490 wil_tso_type_mid
: wil_tso_type_lst
,
1491 frag
, ring
, skb
, is_ipv4
,
1492 tcp_hdr_len
, skb_net_hdr_len
,
1498 /* performance monitoring */
1499 used
= wil_ring_used_tx(ring
);
1500 if (wil_val_in_range(wil
->ring_idle_trsh
,
1501 used
, used
+ descs_used
)) {
1502 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
1503 wil_dbg_txrx(wil
, "Ring[%2d] not idle %d -> %d\n",
1504 ring_index
, used
, used
+ descs_used
);
1507 /* advance swhead */
1508 wil_ring_advance_head(ring
, descs_used
);
1509 wil_dbg_txrx(wil
, "TSO: Tx swhead %d -> %d\n", swhead
, ring
->swhead
);
1511 /* make sure all writes to descriptors (shared memory) are done before
1512 * committing them to HW
1516 if (wil
->tx_latency
)
1517 *(ktime_t
*)&skb
->cb
= ktime_get();
1519 memset(skb
->cb
, 0, sizeof(ktime_t
));
1521 wil_w(wil
, ring
->hwtail
, ring
->swhead
);
1526 while (descs_used
> 0) {
1527 struct device
*dev
= wil_to_dev(wil
);
1528 struct wil_ctx
*ctx
;
1529 int i
= (swhead
+ descs_used
- 1) % ring
->size
;
1530 struct wil_tx_enhanced_desc dd
, *d
= &dd
;
1531 struct wil_tx_enhanced_desc
*_desc
=
1532 (struct wil_tx_enhanced_desc
*)
1533 &ring
->va
[i
].tx
.enhanced
;
1536 ctx
= &ring
->ctx
[i
];
1537 wil_tx_desc_unmap_edma(dev
, (union wil_tx_desc
*)d
, ctx
);
1538 memset(ctx
, 0, sizeof(*ctx
));
1544 static int wil_ring_init_bcast_edma(struct wil6210_vif
*vif
, int ring_id
,
1547 struct wil6210_priv
*wil
= vif_to_wil(vif
);
1548 struct wil_ring
*ring
= &wil
->ring_tx
[ring_id
];
1550 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_id
];
1552 wil_dbg_misc(wil
, "init bcast: ring_id=%d, sring_id=%d\n",
1553 ring_id
, wil
->tx_sring_idx
);
1555 lockdep_assert_held(&wil
->mutex
);
1557 wil_tx_data_init(txdata
);
1559 ring
->is_rx
= false;
1560 rc
= wil_ring_alloc_desc_ring(wil
, ring
);
1564 wil
->ring2cid_tid
[ring_id
][0] = WIL6210_MAX_CID
; /* CID */
1565 wil
->ring2cid_tid
[ring_id
][1] = 0; /* TID */
1567 txdata
->dot1x_open
= true;
1569 rc
= wil_wmi_bcast_desc_ring_add(vif
, ring_id
);
1576 spin_lock_bh(&txdata
->lock
);
1577 txdata
->enabled
= 0;
1578 txdata
->dot1x_open
= false;
1579 spin_unlock_bh(&txdata
->lock
);
1580 wil_ring_free_edma(wil
, ring
);
1586 static void wil_tx_fini_edma(struct wil6210_priv
*wil
)
1588 struct wil_status_ring
*sring
= &wil
->srings
[wil
->tx_sring_idx
];
1590 wil_dbg_misc(wil
, "free TX sring\n");
1592 wil_sring_free(wil
, sring
);
1595 static void wil_rx_data_free(struct wil_status_ring
*sring
)
1600 kfree_skb(sring
->rx_data
.skb
);
1601 sring
->rx_data
.skb
= NULL
;
1604 static void wil_rx_fini_edma(struct wil6210_priv
*wil
)
1606 struct wil_ring
*ring
= &wil
->ring_rx
;
1609 wil_dbg_misc(wil
, "rx_fini_edma\n");
1611 wil_ring_free_edma(wil
, ring
);
1613 for (i
= 0; i
< wil
->num_rx_status_rings
; i
++) {
1614 wil_rx_data_free(&wil
->srings
[i
]);
1615 wil_sring_free(wil
, &wil
->srings
[i
]);
1618 wil_free_rx_buff_arr(wil
);
1621 void wil_init_txrx_ops_edma(struct wil6210_priv
*wil
)
1623 wil
->txrx_ops
.configure_interrupt_moderation
=
1624 wil_configure_interrupt_moderation_edma
;
1626 wil
->txrx_ops
.ring_init_tx
= wil_ring_init_tx_edma
;
1627 wil
->txrx_ops
.ring_fini_tx
= wil_ring_free_edma
;
1628 wil
->txrx_ops
.ring_init_bcast
= wil_ring_init_bcast_edma
;
1629 wil
->txrx_ops
.tx_init
= wil_tx_init_edma
;
1630 wil
->txrx_ops
.tx_fini
= wil_tx_fini_edma
;
1631 wil
->txrx_ops
.tx_desc_map
= wil_tx_desc_map_edma
;
1632 wil
->txrx_ops
.tx_desc_unmap
= wil_tx_desc_unmap_edma
;
1633 wil
->txrx_ops
.tx_ring_tso
= __wil_tx_ring_tso_edma
;
1634 wil
->txrx_ops
.tx_ring_modify
= wil_tx_ring_modify_edma
;
1636 wil
->txrx_ops
.rx_init
= wil_rx_init_edma
;
1637 wil
->txrx_ops
.wmi_addba_rx_resp
= wmi_addba_rx_resp_edma
;
1638 wil
->txrx_ops
.get_reorder_params
= wil_get_reorder_params_edma
;
1639 wil
->txrx_ops
.get_netif_rx_params
= wil_get_netif_rx_params_edma
;
1640 wil
->txrx_ops
.rx_crypto_check
= wil_rx_crypto_check_edma
;
1641 wil
->txrx_ops
.rx_error_check
= wil_rx_error_check_edma
;
1642 wil
->txrx_ops
.is_rx_idle
= wil_is_rx_idle_edma
;
1643 wil
->txrx_ops
.rx_fini
= wil_rx_fini_edma
;