2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
22 #include <linux/ipv6.h>
24 #include <linux/prefetch.h>
31 static bool rtap_include_phy_info
;
32 module_param(rtap_include_phy_info
, bool, 0444);
33 MODULE_PARM_DESC(rtap_include_phy_info
,
34 " Include PHY info in the radiotap header, default - no");
37 module_param(rx_align_2
, bool, 0444);
38 MODULE_PARM_DESC(rx_align_2
, " align Rx buffers on 4*n+2, default - no");
41 module_param(rx_large_buf
, bool, 0444);
42 MODULE_PARM_DESC(rx_large_buf
, " allocate 8KB RX buffers, default - no");
44 static inline uint
wil_rx_snaplen(void)
46 return rx_align_2
? 6 : 0;
49 static inline int wil_vring_is_empty(struct vring
*vring
)
51 return vring
->swhead
== vring
->swtail
;
54 static inline u32
wil_vring_next_tail(struct vring
*vring
)
56 return (vring
->swtail
+ 1) % vring
->size
;
59 static inline void wil_vring_advance_head(struct vring
*vring
, int n
)
61 vring
->swhead
= (vring
->swhead
+ n
) % vring
->size
;
64 static inline int wil_vring_is_full(struct vring
*vring
)
66 return wil_vring_next_tail(vring
) == vring
->swhead
;
69 /* Used space in Tx Vring */
70 static inline int wil_vring_used_tx(struct vring
*vring
)
72 u32 swhead
= vring
->swhead
;
73 u32 swtail
= vring
->swtail
;
74 return (vring
->size
+ swhead
- swtail
) % vring
->size
;
77 /* Available space in Tx Vring */
78 static inline int wil_vring_avail_tx(struct vring
*vring
)
80 return vring
->size
- wil_vring_used_tx(vring
) - 1;
83 /* wil_vring_wmark_low - low watermark for available descriptor space */
84 static inline int wil_vring_wmark_low(struct vring
*vring
)
89 /* wil_vring_wmark_high - high watermark for available descriptor space */
90 static inline int wil_vring_wmark_high(struct vring
*vring
)
95 /* returns true if num avail descriptors is lower than wmark_low */
96 static inline int wil_vring_avail_low(struct vring
*vring
)
98 return wil_vring_avail_tx(vring
) < wil_vring_wmark_low(vring
);
101 /* returns true if num avail descriptors is higher than wmark_high */
102 static inline int wil_vring_avail_high(struct vring
*vring
)
104 return wil_vring_avail_tx(vring
) > wil_vring_wmark_high(vring
);
107 /* returns true when all tx vrings are empty */
108 bool wil_is_tx_idle(struct wil6210_priv
*wil
)
111 unsigned long data_comp_to
;
113 for (i
= 0; i
< WIL6210_MAX_TX_RINGS
; i
++) {
114 struct vring
*vring
= &wil
->vring_tx
[i
];
115 int vring_index
= vring
- wil
->vring_tx
;
116 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[vring_index
];
118 spin_lock(&txdata
->lock
);
120 if (!vring
->va
|| !txdata
->enabled
) {
121 spin_unlock(&txdata
->lock
);
125 data_comp_to
= jiffies
+ msecs_to_jiffies(
126 WIL_DATA_COMPLETION_TO_MS
);
127 if (test_bit(wil_status_napi_en
, wil
->status
)) {
128 while (!wil_vring_is_empty(vring
)) {
129 if (time_after(jiffies
, data_comp_to
)) {
131 "TO waiting for idle tx\n");
132 spin_unlock(&txdata
->lock
);
135 wil_dbg_ratelimited(wil
,
136 "tx vring is not empty -> NAPI\n");
137 spin_unlock(&txdata
->lock
);
138 napi_synchronize(&wil
->napi_tx
);
140 spin_lock(&txdata
->lock
);
141 if (!vring
->va
|| !txdata
->enabled
)
146 spin_unlock(&txdata
->lock
);
152 /* wil_val_in_range - check if value in [min,max) */
153 static inline bool wil_val_in_range(int val
, int min
, int max
)
155 return val
>= min
&& val
< max
;
158 static int wil_vring_alloc(struct wil6210_priv
*wil
, struct vring
*vring
)
160 struct device
*dev
= wil_to_dev(wil
);
161 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
164 wil_dbg_misc(wil
, "vring_alloc:\n");
166 BUILD_BUG_ON(sizeof(vring
->va
[0]) != 32);
170 vring
->ctx
= kcalloc(vring
->size
, sizeof(vring
->ctx
[0]), GFP_KERNEL
);
176 /* vring->va should be aligned on its size rounded up to power of 2
177 * This is granted by the dma_alloc_coherent.
179 * HW has limitation that all vrings addresses must share the same
180 * upper 16 msb bits part of 48 bits address. To workaround that,
181 * if we are using more than 32 bit addresses switch to 32 bit
182 * allocation before allocating vring memory.
184 * There's no check for the return value of dma_set_mask_and_coherent,
185 * since we assume if we were able to set the mask during
186 * initialization in this system it will not fail if we set it again
188 if (wil
->dma_addr_size
> 32)
189 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
191 vring
->va
= dma_alloc_coherent(dev
, sz
, &vring
->pa
, GFP_KERNEL
);
198 if (wil
->dma_addr_size
> 32)
199 dma_set_mask_and_coherent(dev
,
200 DMA_BIT_MASK(wil
->dma_addr_size
));
202 /* initially, all descriptors are SW owned
203 * For Tx and Rx, ownership bit is at the same location, thus
206 for (i
= 0; i
< vring
->size
; i
++) {
207 volatile struct vring_tx_desc
*_d
= &vring
->va
[i
].tx
;
209 _d
->dma
.status
= TX_DMA_STATUS_DU
;
212 wil_dbg_misc(wil
, "vring[%d] 0x%p:%pad 0x%p\n", vring
->size
,
213 vring
->va
, &vring
->pa
, vring
->ctx
);
218 static void wil_txdesc_unmap(struct device
*dev
, struct vring_tx_desc
*d
,
221 dma_addr_t pa
= wil_desc_addr(&d
->dma
.addr
);
222 u16 dmalen
= le16_to_cpu(d
->dma
.length
);
224 switch (ctx
->mapped_as
) {
225 case wil_mapped_as_single
:
226 dma_unmap_single(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
228 case wil_mapped_as_page
:
229 dma_unmap_page(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
236 static void wil_vring_free(struct wil6210_priv
*wil
, struct vring
*vring
,
239 struct device
*dev
= wil_to_dev(wil
);
240 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
242 lockdep_assert_held(&wil
->mutex
);
244 int vring_index
= vring
- wil
->vring_tx
;
246 wil_dbg_misc(wil
, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
247 vring_index
, vring
->size
, vring
->va
,
248 &vring
->pa
, vring
->ctx
);
250 wil_dbg_misc(wil
, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
251 vring
->size
, vring
->va
,
252 &vring
->pa
, vring
->ctx
);
255 while (!wil_vring_is_empty(vring
)) {
261 struct vring_tx_desc dd
, *d
= &dd
;
262 volatile struct vring_tx_desc
*_d
=
263 &vring
->va
[vring
->swtail
].tx
;
265 ctx
= &vring
->ctx
[vring
->swtail
];
268 "ctx(%d) was already completed\n",
270 vring
->swtail
= wil_vring_next_tail(vring
);
274 wil_txdesc_unmap(dev
, d
, ctx
);
276 dev_kfree_skb_any(ctx
->skb
);
277 vring
->swtail
= wil_vring_next_tail(vring
);
279 struct vring_rx_desc dd
, *d
= &dd
;
280 volatile struct vring_rx_desc
*_d
=
281 &vring
->va
[vring
->swhead
].rx
;
283 ctx
= &vring
->ctx
[vring
->swhead
];
285 pa
= wil_desc_addr(&d
->dma
.addr
);
286 dmalen
= le16_to_cpu(d
->dma
.length
);
287 dma_unmap_single(dev
, pa
, dmalen
, DMA_FROM_DEVICE
);
289 wil_vring_advance_head(vring
, 1);
292 dma_free_coherent(dev
, sz
, (void *)vring
->va
, vring
->pa
);
300 * Allocate one skb for Rx VRING
302 * Safe to call from IRQ
304 static int wil_vring_alloc_skb(struct wil6210_priv
*wil
, struct vring
*vring
,
307 struct device
*dev
= wil_to_dev(wil
);
308 unsigned int sz
= wil
->rx_buf_len
+ ETH_HLEN
+ wil_rx_snaplen();
309 struct vring_rx_desc dd
, *d
= &dd
;
310 volatile struct vring_rx_desc
*_d
= &vring
->va
[i
].rx
;
312 struct sk_buff
*skb
= dev_alloc_skb(sz
+ headroom
);
317 skb_reserve(skb
, headroom
);
320 pa
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_FROM_DEVICE
);
321 if (unlikely(dma_mapping_error(dev
, pa
))) {
326 d
->dma
.d0
= RX_DMA_D0_CMD_DMA_RT
| RX_DMA_D0_CMD_DMA_IT
;
327 wil_desc_addr_set(&d
->dma
.addr
, pa
);
328 /* ip_length don't care */
330 /* error don't care */
331 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
332 d
->dma
.length
= cpu_to_le16(sz
);
334 vring
->ctx
[i
].skb
= skb
;
340 * Adds radiotap header
342 * Any error indicated as "Bad FCS"
344 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
345 * - Rx descriptor: 32 bytes
348 static void wil_rx_add_radiotap_header(struct wil6210_priv
*wil
,
351 struct wil6210_rtap
{
352 struct ieee80211_radiotap_header rthdr
;
353 /* fields should be in the order of bits in rthdr.it_present */
357 __le16 chnl_freq
__aligned(2);
364 struct wil6210_rtap_vendor
{
365 struct wil6210_rtap rtap
;
367 u8 vendor_oui
[3] __aligned(2);
372 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
373 struct wil6210_rtap_vendor
*rtap_vendor
;
374 int rtap_len
= sizeof(struct wil6210_rtap
);
375 int phy_length
= 0; /* phy info header size, bytes */
376 static char phy_data
[128];
377 struct ieee80211_channel
*ch
= wil
->monitor_chandef
.chan
;
379 if (rtap_include_phy_info
) {
380 rtap_len
= sizeof(*rtap_vendor
) + sizeof(*d
);
381 /* calculate additional length */
382 if (d
->dma
.status
& RX_DMA_STATUS_PHY_INFO
) {
384 * PHY info starts from 8-byte boundary
385 * there are 8-byte lines, last line may be partially
386 * written (HW bug), thus FW configures for last line
387 * to be excessive. Driver skips this last line.
389 int len
= min_t(int, 8 + sizeof(phy_data
),
390 wil_rxdesc_phy_length(d
));
393 void *p
= skb_tail_pointer(skb
);
394 void *pa
= PTR_ALIGN(p
, 8);
396 if (skb_tailroom(skb
) >= len
+ (pa
- p
)) {
397 phy_length
= len
- 8;
398 memcpy(phy_data
, pa
, phy_length
);
402 rtap_len
+= phy_length
;
405 if (skb_headroom(skb
) < rtap_len
&&
406 pskb_expand_head(skb
, rtap_len
, 0, GFP_ATOMIC
)) {
407 wil_err(wil
, "Unable to expand headroom to %d\n", rtap_len
);
411 rtap_vendor
= skb_push(skb
, rtap_len
);
412 memset(rtap_vendor
, 0, rtap_len
);
414 rtap_vendor
->rtap
.rthdr
.it_version
= PKTHDR_RADIOTAP_VERSION
;
415 rtap_vendor
->rtap
.rthdr
.it_len
= cpu_to_le16(rtap_len
);
416 rtap_vendor
->rtap
.rthdr
.it_present
= cpu_to_le32(
417 (1 << IEEE80211_RADIOTAP_FLAGS
) |
418 (1 << IEEE80211_RADIOTAP_CHANNEL
) |
419 (1 << IEEE80211_RADIOTAP_MCS
));
420 if (d
->dma
.status
& RX_DMA_STATUS_ERROR
)
421 rtap_vendor
->rtap
.flags
|= IEEE80211_RADIOTAP_F_BADFCS
;
423 rtap_vendor
->rtap
.chnl_freq
= cpu_to_le16(ch
? ch
->center_freq
: 58320);
424 rtap_vendor
->rtap
.chnl_flags
= cpu_to_le16(0);
426 rtap_vendor
->rtap
.mcs_present
= IEEE80211_RADIOTAP_MCS_HAVE_MCS
;
427 rtap_vendor
->rtap
.mcs_flags
= 0;
428 rtap_vendor
->rtap
.mcs_index
= wil_rxdesc_mcs(d
);
430 if (rtap_include_phy_info
) {
431 rtap_vendor
->rtap
.rthdr
.it_present
|= cpu_to_le32(1 <<
432 IEEE80211_RADIOTAP_VENDOR_NAMESPACE
);
433 /* OUI for Wilocity 04:ce:14 */
434 rtap_vendor
->vendor_oui
[0] = 0x04;
435 rtap_vendor
->vendor_oui
[1] = 0xce;
436 rtap_vendor
->vendor_oui
[2] = 0x14;
437 rtap_vendor
->vendor_ns
= 1;
438 /* Rx descriptor + PHY data */
439 rtap_vendor
->vendor_skip
= cpu_to_le16(sizeof(*d
) +
441 memcpy(rtap_vendor
->vendor_data
, (void *)d
, sizeof(*d
));
442 memcpy(rtap_vendor
->vendor_data
+ sizeof(*d
), phy_data
,
447 /* similar to ieee80211_ version, but FC contain only 1-st byte */
448 static inline int wil_is_back_req(u8 fc
)
450 return (fc
& (IEEE80211_FCTL_FTYPE
| IEEE80211_FCTL_STYPE
)) ==
451 (IEEE80211_FTYPE_CTL
| IEEE80211_STYPE_BACK_REQ
);
454 bool wil_is_rx_idle(struct wil6210_priv
*wil
)
456 struct vring_rx_desc
*_d
;
457 struct vring
*vring
= &wil
->vring_rx
;
459 _d
= (struct vring_rx_desc
*)&vring
->va
[vring
->swhead
].rx
;
460 if (_d
->dma
.status
& RX_DMA_STATUS_DU
)
467 * reap 1 frame from @swhead
469 * Rx descriptor copied to skb->cb
471 * Safe to call from IRQ
473 static struct sk_buff
*wil_vring_reap_rx(struct wil6210_priv
*wil
,
476 struct device
*dev
= wil_to_dev(wil
);
477 struct net_device
*ndev
= wil_to_ndev(wil
);
478 volatile struct vring_rx_desc
*_d
;
479 struct vring_rx_desc
*d
;
482 unsigned int snaplen
= wil_rx_snaplen();
483 unsigned int sz
= wil
->rx_buf_len
+ ETH_HLEN
+ snaplen
;
488 struct wil_net_stats
*stats
;
490 BUILD_BUG_ON(sizeof(struct vring_rx_desc
) > sizeof(skb
->cb
));
493 if (unlikely(wil_vring_is_empty(vring
)))
496 i
= (int)vring
->swhead
;
497 _d
= &vring
->va
[i
].rx
;
498 if (unlikely(!(_d
->dma
.status
& RX_DMA_STATUS_DU
))) {
499 /* it is not error, we just reached end of Rx done area */
503 skb
= vring
->ctx
[i
].skb
;
504 vring
->ctx
[i
].skb
= NULL
;
505 wil_vring_advance_head(vring
, 1);
507 wil_err(wil
, "No Rx skb at [%d]\n", i
);
510 d
= wil_skb_rxdesc(skb
);
512 pa
= wil_desc_addr(&d
->dma
.addr
);
514 dma_unmap_single(dev
, pa
, sz
, DMA_FROM_DEVICE
);
515 dmalen
= le16_to_cpu(d
->dma
.length
);
517 trace_wil6210_rx(i
, d
);
518 wil_dbg_txrx(wil
, "Rx[%3d] : %d bytes\n", i
, dmalen
);
519 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE
, 32, 4,
520 (const void *)d
, sizeof(*d
), false);
522 cid
= wil_rxdesc_cid(d
);
523 stats
= &wil
->sta
[cid
].stats
;
525 if (unlikely(dmalen
> sz
)) {
526 wil_err(wil
, "Rx size too large: %d bytes!\n", dmalen
);
527 stats
->rx_large_frame
++;
531 skb_trim(skb
, dmalen
);
535 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
536 skb
->data
, skb_headlen(skb
), false);
538 stats
->last_mcs_rx
= wil_rxdesc_mcs(d
);
539 if (stats
->last_mcs_rx
< ARRAY_SIZE(stats
->rx_per_mcs
))
540 stats
->rx_per_mcs
[stats
->last_mcs_rx
]++;
542 /* use radiotap header only if required */
543 if (ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
)
544 wil_rx_add_radiotap_header(wil
, skb
);
546 /* no extra checks if in sniffer mode */
547 if (ndev
->type
!= ARPHRD_ETHER
)
549 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
550 * Driver should recognize it by frame type, that is found
551 * in Rx descriptor. If type is not data, it is 802.11 frame as is
553 ftype
= wil_rxdesc_ftype(d
) << 2;
554 if (unlikely(ftype
!= IEEE80211_FTYPE_DATA
)) {
555 u8 fc1
= wil_rxdesc_fc1(d
);
556 int mid
= wil_rxdesc_mid(d
);
557 int tid
= wil_rxdesc_tid(d
);
558 u16 seq
= wil_rxdesc_seq(d
);
561 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
562 fc1
, mid
, cid
, tid
, seq
);
563 stats
->rx_non_data_frame
++;
564 if (wil_is_back_req(fc1
)) {
566 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
568 wil_rx_bar(wil
, cid
, tid
, seq
);
570 /* print again all info. One can enable only this
571 * without overhead for printing every Rx frame
574 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
575 fc1
, mid
, cid
, tid
, seq
);
576 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE
, 32, 4,
577 (const void *)d
, sizeof(*d
), false);
578 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
579 skb
->data
, skb_headlen(skb
), false);
585 if (unlikely(skb
->len
< ETH_HLEN
+ snaplen
)) {
586 wil_err(wil
, "Short frame, len = %d\n", skb
->len
);
587 stats
->rx_short_frame
++;
592 /* L4 IDENT is on when HW calculated checksum, check status
593 * and in case of error drop the packet
594 * higher stack layers will handle retransmission (if required)
596 if (likely(d
->dma
.status
& RX_DMA_STATUS_L4I
)) {
597 /* L4 protocol identified, csum calculated */
598 if (likely((d
->dma
.error
& RX_DMA_ERROR_L4_ERR
) == 0))
599 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
600 /* If HW reports bad checksum, let IP stack re-check it
601 * For example, HW don't understand Microsoft IP stack that
602 * mis-calculates TCP checksum - if it should be 0x0,
603 * it writes 0xffff in violation of RFC 1624
609 * +-------+-------+---------+------------+------+
610 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
611 * +-------+-------+---------+------------+------+
612 * Need to remove SNAP, shifting SA and DA forward
614 memmove(skb
->data
+ snaplen
, skb
->data
, 2 * ETH_ALEN
);
615 skb_pull(skb
, snaplen
);
622 * allocate and fill up to @count buffers in rx ring
623 * buffers posted at @swtail
625 static int wil_rx_refill(struct wil6210_priv
*wil
, int count
)
627 struct net_device
*ndev
= wil_to_ndev(wil
);
628 struct vring
*v
= &wil
->vring_rx
;
631 int headroom
= ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
?
632 WIL6210_RTAP_SIZE
: 0;
634 for (; next_tail
= wil_vring_next_tail(v
),
635 (next_tail
!= v
->swhead
) && (count
-- > 0);
636 v
->swtail
= next_tail
) {
637 rc
= wil_vring_alloc_skb(wil
, v
, v
->swtail
, headroom
);
639 wil_err(wil
, "Error %d in wil_rx_refill[%d]\n",
645 /* make sure all writes to descriptors (shared memory) are done before
646 * committing them to HW
650 wil_w(wil
, v
->hwtail
, v
->swtail
);
656 * reverse_memcmp - Compare two areas of memory, in reverse order
657 * @cs: One area of memory
658 * @ct: Another area of memory
659 * @count: The size of the area.
661 * Cut'n'paste from original memcmp (see lib/string.c)
662 * with minimal modifications
664 static int reverse_memcmp(const void *cs
, const void *ct
, size_t count
)
666 const unsigned char *su1
, *su2
;
669 for (su1
= cs
+ count
- 1, su2
= ct
+ count
- 1; count
> 0;
670 --su1
, --su2
, count
--) {
678 static int wil_rx_crypto_check(struct wil6210_priv
*wil
, struct sk_buff
*skb
)
680 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
681 int cid
= wil_rxdesc_cid(d
);
682 int tid
= wil_rxdesc_tid(d
);
683 int key_id
= wil_rxdesc_key_id(d
);
684 int mc
= wil_rxdesc_mcast(d
);
685 struct wil_sta_info
*s
= &wil
->sta
[cid
];
686 struct wil_tid_crypto_rx
*c
= mc
? &s
->group_crypto_rx
:
687 &s
->tid_crypto_rx
[tid
];
688 struct wil_tid_crypto_rx_single
*cc
= &c
->key_id
[key_id
];
689 const u8
*pn
= (u8
*)&d
->mac
.pn_15_0
;
692 wil_err_ratelimited(wil
,
693 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
694 cid
, tid
, mc
, key_id
);
698 if (reverse_memcmp(pn
, cc
->pn
, IEEE80211_GCMP_PN_LEN
) <= 0) {
699 wil_err_ratelimited(wil
,
700 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
701 cid
, tid
, mc
, key_id
, pn
, cc
->pn
);
704 memcpy(cc
->pn
, pn
, IEEE80211_GCMP_PN_LEN
);
710 * Pass Rx packet to the netif. Update statistics.
711 * Called in softirq context (NAPI poll).
713 void wil_netif_rx_any(struct sk_buff
*skb
, struct net_device
*ndev
)
715 gro_result_t rc
= GRO_NORMAL
;
716 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
717 struct wireless_dev
*wdev
= wil_to_wdev(wil
);
718 unsigned int len
= skb
->len
;
719 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
720 int cid
= wil_rxdesc_cid(d
); /* always 0..7, no need to check */
721 int security
= wil_rxdesc_security(d
);
722 struct ethhdr
*eth
= (void *)skb
->data
;
723 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
724 * is not suitable, need to look at data
726 int mcast
= is_multicast_ether_addr(eth
->h_dest
);
727 struct wil_net_stats
*stats
= &wil
->sta
[cid
].stats
;
728 struct sk_buff
*xmit_skb
= NULL
;
729 static const char * const gro_res_str
[] = {
730 [GRO_MERGED
] = "GRO_MERGED",
731 [GRO_MERGED_FREE
] = "GRO_MERGED_FREE",
732 [GRO_HELD
] = "GRO_HELD",
733 [GRO_NORMAL
] = "GRO_NORMAL",
734 [GRO_DROP
] = "GRO_DROP",
737 if (ndev
->features
& NETIF_F_RXHASH
)
738 /* fake L4 to ensure it won't be re-calculated later
739 * set hash to any non-zero value to activate rps
740 * mechanism, core will be chosen according
741 * to user-level rps configuration.
743 skb_set_hash(skb
, 1, PKT_HASH_TYPE_L4
);
747 if (security
&& (wil_rx_crypto_check(wil
, skb
) != 0)) {
754 if (wdev
->iftype
== NL80211_IFTYPE_AP
&& !wil
->ap_isolate
) {
756 /* send multicast frames both to higher layers in
757 * local net stack and back to the wireless medium
759 xmit_skb
= skb_copy(skb
, GFP_ATOMIC
);
761 int xmit_cid
= wil_find_cid(wil
, eth
->h_dest
);
764 /* The destination station is associated to
765 * this AP (in this VLAN), so send the frame
766 * directly to it and do not pass it to local
775 /* Send to wireless media and increase priority by 256 to
776 * keep the received priority instead of reclassifying
777 * the frame (see cfg80211_classify8021d).
779 xmit_skb
->dev
= ndev
;
780 xmit_skb
->priority
+= 256;
781 xmit_skb
->protocol
= htons(ETH_P_802_3
);
782 skb_reset_network_header(xmit_skb
);
783 skb_reset_mac_header(xmit_skb
);
784 wil_dbg_txrx(wil
, "Rx -> Tx %d bytes\n", len
);
785 dev_queue_xmit(xmit_skb
);
788 if (skb
) { /* deliver to local stack */
790 skb
->protocol
= eth_type_trans(skb
, ndev
);
791 rc
= napi_gro_receive(&wil
->napi_rx
, skb
);
792 wil_dbg_txrx(wil
, "Rx complete %d bytes => %s\n",
793 len
, gro_res_str
[rc
]);
796 /* statistics. rc set to GRO_NORMAL for AP bridging */
797 if (unlikely(rc
== GRO_DROP
)) {
798 ndev
->stats
.rx_dropped
++;
800 wil_dbg_txrx(wil
, "Rx drop %d bytes\n", len
);
802 ndev
->stats
.rx_packets
++;
804 ndev
->stats
.rx_bytes
+= len
;
805 stats
->rx_bytes
+= len
;
807 ndev
->stats
.multicast
++;
812 * Proceed all completed skb's from Rx VRING
814 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
816 void wil_rx_handle(struct wil6210_priv
*wil
, int *quota
)
818 struct net_device
*ndev
= wil_to_ndev(wil
);
819 struct vring
*v
= &wil
->vring_rx
;
822 if (unlikely(!v
->va
)) {
823 wil_err(wil
, "Rx IRQ while Rx not yet initialized\n");
826 wil_dbg_txrx(wil
, "rx_handle\n");
827 while ((*quota
> 0) && (NULL
!= (skb
= wil_vring_reap_rx(wil
, v
)))) {
830 if (wil
->wdev
->iftype
== NL80211_IFTYPE_MONITOR
) {
832 skb_reset_mac_header(skb
);
833 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
834 skb
->pkt_type
= PACKET_OTHERHOST
;
835 skb
->protocol
= htons(ETH_P_802_2
);
836 wil_netif_rx_any(skb
, ndev
);
838 wil_rx_reorder(wil
, skb
);
841 wil_rx_refill(wil
, v
->size
);
844 static void wil_rx_buf_len_init(struct wil6210_priv
*wil
)
846 wil
->rx_buf_len
= rx_large_buf
?
847 WIL_MAX_ETH_MTU
: TXRX_BUF_LEN_DEFAULT
- WIL_MAX_MPDU_OVERHEAD
;
848 if (mtu_max
> wil
->rx_buf_len
) {
849 /* do not allow RX buffers to be smaller than mtu_max, for
850 * backward compatibility (mtu_max parameter was also used
851 * to support receiving large packets)
853 wil_info(wil
, "Override RX buffer to mtu_max(%d)\n", mtu_max
);
854 wil
->rx_buf_len
= mtu_max
;
858 int wil_rx_init(struct wil6210_priv
*wil
, u16 size
)
860 struct vring
*vring
= &wil
->vring_rx
;
863 wil_dbg_misc(wil
, "rx_init\n");
866 wil_err(wil
, "Rx ring already allocated\n");
870 wil_rx_buf_len_init(wil
);
873 rc
= wil_vring_alloc(wil
, vring
);
877 rc
= wmi_rx_chain_add(wil
, vring
);
881 rc
= wil_rx_refill(wil
, vring
->size
);
887 wil_vring_free(wil
, vring
, 0);
892 void wil_rx_fini(struct wil6210_priv
*wil
)
894 struct vring
*vring
= &wil
->vring_rx
;
896 wil_dbg_misc(wil
, "rx_fini\n");
899 wil_vring_free(wil
, vring
, 0);
902 static inline void wil_tx_data_init(struct vring_tx_data
*txdata
)
904 spin_lock_bh(&txdata
->lock
);
905 txdata
->dot1x_open
= 0;
908 txdata
->last_idle
= 0;
910 txdata
->agg_wsize
= 0;
911 txdata
->agg_timeout
= 0;
912 txdata
->agg_amsdu
= 0;
913 txdata
->addba_in_progress
= false;
914 spin_unlock_bh(&txdata
->lock
);
917 int wil_vring_init_tx(struct wil6210_priv
*wil
, int id
, int size
,
921 struct wmi_vring_cfg_cmd cmd
= {
922 .action
= cpu_to_le32(WMI_VRING_CMD_ADD
),
926 cpu_to_le16(wil_mtu2macbuf(mtu_max
)),
927 .ring_size
= cpu_to_le16(size
),
930 .cidxtid
= mk_cidxtid(cid
, tid
),
931 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
936 .priority
= cpu_to_le16(0),
937 .timeslot_us
= cpu_to_le16(0xfff),
942 struct wmi_cmd_hdr wmi
;
943 struct wmi_vring_cfg_done_event cmd
;
945 struct vring
*vring
= &wil
->vring_tx
[id
];
946 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[id
];
948 wil_dbg_misc(wil
, "vring_init_tx: max_mpdu_size %d\n",
949 cmd
.vring_cfg
.tx_sw_ring
.max_mpdu_size
);
950 lockdep_assert_held(&wil
->mutex
);
953 wil_err(wil
, "Tx ring [%d] already allocated\n", id
);
958 wil_tx_data_init(txdata
);
960 rc
= wil_vring_alloc(wil
, vring
);
964 wil
->vring2cid_tid
[id
][0] = cid
;
965 wil
->vring2cid_tid
[id
][1] = tid
;
967 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
970 txdata
->dot1x_open
= true;
971 rc
= wmi_call(wil
, WMI_VRING_CFG_CMDID
, &cmd
, sizeof(cmd
),
972 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
), 100);
976 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
977 wil_err(wil
, "Tx config failed, status 0x%02x\n",
983 spin_lock_bh(&txdata
->lock
);
984 vring
->hwtail
= le32_to_cpu(reply
.cmd
.tx_vring_tail_ptr
);
986 spin_unlock_bh(&txdata
->lock
);
988 if (txdata
->dot1x_open
&& (agg_wsize
>= 0))
989 wil_addba_tx_request(wil
, id
, agg_wsize
);
993 spin_lock_bh(&txdata
->lock
);
994 txdata
->dot1x_open
= false;
996 spin_unlock_bh(&txdata
->lock
);
997 wil_vring_free(wil
, vring
, 1);
998 wil
->vring2cid_tid
[id
][0] = WIL6210_MAX_CID
;
999 wil
->vring2cid_tid
[id
][1] = 0;
1006 int wil_vring_init_bcast(struct wil6210_priv
*wil
, int id
, int size
)
1009 struct wmi_bcast_vring_cfg_cmd cmd
= {
1010 .action
= cpu_to_le32(WMI_VRING_CMD_ADD
),
1014 cpu_to_le16(wil_mtu2macbuf(mtu_max
)),
1015 .ring_size
= cpu_to_le16(size
),
1018 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
1022 struct wmi_cmd_hdr wmi
;
1023 struct wmi_vring_cfg_done_event cmd
;
1025 struct vring
*vring
= &wil
->vring_tx
[id
];
1026 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[id
];
1028 wil_dbg_misc(wil
, "vring_init_bcast: max_mpdu_size %d\n",
1029 cmd
.vring_cfg
.tx_sw_ring
.max_mpdu_size
);
1030 lockdep_assert_held(&wil
->mutex
);
1033 wil_err(wil
, "Tx ring [%d] already allocated\n", id
);
1038 wil_tx_data_init(txdata
);
1040 rc
= wil_vring_alloc(wil
, vring
);
1044 wil
->vring2cid_tid
[id
][0] = WIL6210_MAX_CID
; /* CID */
1045 wil
->vring2cid_tid
[id
][1] = 0; /* TID */
1047 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
1050 txdata
->dot1x_open
= true;
1051 rc
= wmi_call(wil
, WMI_BCAST_VRING_CFG_CMDID
, &cmd
, sizeof(cmd
),
1052 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
), 100);
1056 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
1057 wil_err(wil
, "Tx config failed, status 0x%02x\n",
1063 spin_lock_bh(&txdata
->lock
);
1064 vring
->hwtail
= le32_to_cpu(reply
.cmd
.tx_vring_tail_ptr
);
1065 txdata
->enabled
= 1;
1066 spin_unlock_bh(&txdata
->lock
);
1070 spin_lock_bh(&txdata
->lock
);
1071 txdata
->enabled
= 0;
1072 txdata
->dot1x_open
= false;
1073 spin_unlock_bh(&txdata
->lock
);
1074 wil_vring_free(wil
, vring
, 1);
1080 void wil_vring_fini_tx(struct wil6210_priv
*wil
, int id
)
1082 struct vring
*vring
= &wil
->vring_tx
[id
];
1083 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[id
];
1085 lockdep_assert_held(&wil
->mutex
);
1090 wil_dbg_misc(wil
, "vring_fini_tx: id=%d\n", id
);
1092 spin_lock_bh(&txdata
->lock
);
1093 txdata
->dot1x_open
= false;
1094 txdata
->enabled
= 0; /* no Tx can be in progress or start anew */
1095 spin_unlock_bh(&txdata
->lock
);
1096 /* napi_synchronize waits for completion of the current NAPI but will
1097 * not prevent the next NAPI run.
1098 * Add a memory barrier to guarantee that txdata->enabled is zeroed
1099 * before napi_synchronize so that the next scheduled NAPI will not
1103 /* make sure NAPI won't touch this vring */
1104 if (test_bit(wil_status_napi_en
, wil
->status
))
1105 napi_synchronize(&wil
->napi_tx
);
1107 wil_vring_free(wil
, vring
, 1);
1110 static struct vring
*wil_find_tx_ucast(struct wil6210_priv
*wil
,
1111 struct sk_buff
*skb
)
1114 struct ethhdr
*eth
= (void *)skb
->data
;
1115 int cid
= wil_find_cid(wil
, eth
->h_dest
);
1120 /* TODO: fix for multiple TID */
1121 for (i
= 0; i
< ARRAY_SIZE(wil
->vring2cid_tid
); i
++) {
1122 if (!wil
->vring_tx_data
[i
].dot1x_open
&&
1123 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1125 if (wil
->vring2cid_tid
[i
][0] == cid
) {
1126 struct vring
*v
= &wil
->vring_tx
[i
];
1127 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[i
];
1129 wil_dbg_txrx(wil
, "find_tx_ucast: (%pM) -> [%d]\n",
1131 if (v
->va
&& txdata
->enabled
) {
1135 "find_tx_ucast: vring[%d] not valid\n",
1145 static int wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
1146 struct sk_buff
*skb
);
1148 static struct vring
*wil_find_tx_vring_sta(struct wil6210_priv
*wil
,
1149 struct sk_buff
*skb
)
1154 struct vring_tx_data
*txdata
;
1156 /* In the STA mode, it is expected to have only 1 VRING
1157 * for the AP we connected to.
1158 * find 1-st vring eligible for this skb and use it.
1160 for (i
= 0; i
< WIL6210_MAX_TX_RINGS
; i
++) {
1161 v
= &wil
->vring_tx
[i
];
1162 txdata
= &wil
->vring_tx_data
[i
];
1163 if (!v
->va
|| !txdata
->enabled
)
1166 cid
= wil
->vring2cid_tid
[i
][0];
1167 if (cid
>= WIL6210_MAX_CID
) /* skip BCAST */
1170 if (!wil
->vring_tx_data
[i
].dot1x_open
&&
1171 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1174 wil_dbg_txrx(wil
, "Tx -> ring %d\n", i
);
1179 wil_dbg_txrx(wil
, "Tx while no vrings active?\n");
1184 /* Use one of 2 strategies:
1186 * 1. New (real broadcast):
1187 * use dedicated broadcast vring
1188 * 2. Old (pseudo-DMS):
1189 * Find 1-st vring and return it;
1190 * duplicate skb and send it to other active vrings;
1191 * in all cases override dest address to unicast peer's address
1192 * Use old strategy when new is not supported yet:
1195 static struct vring
*wil_find_tx_bcast_1(struct wil6210_priv
*wil
,
1196 struct sk_buff
*skb
)
1199 struct vring_tx_data
*txdata
;
1200 int i
= wil
->bcast_vring
;
1204 v
= &wil
->vring_tx
[i
];
1205 txdata
= &wil
->vring_tx_data
[i
];
1206 if (!v
->va
|| !txdata
->enabled
)
1208 if (!wil
->vring_tx_data
[i
].dot1x_open
&&
1209 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1215 static void wil_set_da_for_vring(struct wil6210_priv
*wil
,
1216 struct sk_buff
*skb
, int vring_index
)
1218 struct ethhdr
*eth
= (void *)skb
->data
;
1219 int cid
= wil
->vring2cid_tid
[vring_index
][0];
1221 ether_addr_copy(eth
->h_dest
, wil
->sta
[cid
].addr
);
1224 static struct vring
*wil_find_tx_bcast_2(struct wil6210_priv
*wil
,
1225 struct sk_buff
*skb
)
1227 struct vring
*v
, *v2
;
1228 struct sk_buff
*skb2
;
1231 struct ethhdr
*eth
= (void *)skb
->data
;
1232 char *src
= eth
->h_source
;
1233 struct vring_tx_data
*txdata
;
1235 /* find 1-st vring eligible for data */
1236 for (i
= 0; i
< WIL6210_MAX_TX_RINGS
; i
++) {
1237 v
= &wil
->vring_tx
[i
];
1238 txdata
= &wil
->vring_tx_data
[i
];
1239 if (!v
->va
|| !txdata
->enabled
)
1242 cid
= wil
->vring2cid_tid
[i
][0];
1243 if (cid
>= WIL6210_MAX_CID
) /* skip BCAST */
1245 if (!wil
->vring_tx_data
[i
].dot1x_open
&&
1246 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1249 /* don't Tx back to source when re-routing Rx->Tx at the AP */
1250 if (0 == memcmp(wil
->sta
[cid
].addr
, src
, ETH_ALEN
))
1256 wil_dbg_txrx(wil
, "Tx while no vrings active?\n");
1261 wil_dbg_txrx(wil
, "BCAST -> ring %d\n", i
);
1262 wil_set_da_for_vring(wil
, skb
, i
);
1264 /* find other active vrings and duplicate skb for each */
1265 for (i
++; i
< WIL6210_MAX_TX_RINGS
; i
++) {
1266 v2
= &wil
->vring_tx
[i
];
1269 cid
= wil
->vring2cid_tid
[i
][0];
1270 if (cid
>= WIL6210_MAX_CID
) /* skip BCAST */
1272 if (!wil
->vring_tx_data
[i
].dot1x_open
&&
1273 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
1276 if (0 == memcmp(wil
->sta
[cid
].addr
, src
, ETH_ALEN
))
1279 skb2
= skb_copy(skb
, GFP_ATOMIC
);
1281 wil_dbg_txrx(wil
, "BCAST DUP -> ring %d\n", i
);
1282 wil_set_da_for_vring(wil
, skb2
, i
);
1283 wil_tx_vring(wil
, v2
, skb2
);
1285 wil_err(wil
, "skb_copy failed\n");
1292 static int wil_tx_desc_map(struct vring_tx_desc
*d
, dma_addr_t pa
, u32 len
,
1295 wil_desc_addr_set(&d
->dma
.addr
, pa
);
1296 d
->dma
.ip_length
= 0;
1297 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1298 d
->dma
.b11
= 0/*14 | BIT(7)*/;
1300 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
1301 d
->dma
.length
= cpu_to_le16((u16
)len
);
1302 d
->dma
.d0
= (vring_index
<< DMA_CFG_DESC_TX_0_QID_POS
);
1306 d
->mac
.ucode_cmd
= 0;
1307 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
1308 d
->mac
.d
[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS
) |
1309 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS
);
1315 void wil_tx_desc_set_nr_frags(struct vring_tx_desc
*d
, int nr_frags
)
1317 d
->mac
.d
[2] |= (nr_frags
<< MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS
);
1321 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1322 * @skb is used to obtain the protocol and headers length.
1323 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1324 * 2 - middle, 3 - last descriptor.
1327 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc
*d
,
1328 struct sk_buff
*skb
,
1329 int tso_desc_type
, bool is_ipv4
,
1330 int tcp_hdr_len
, int skb_net_hdr_len
)
1332 d
->dma
.b11
= ETH_HLEN
; /* MAC header length */
1333 d
->dma
.b11
|= is_ipv4
<< DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
;
1335 d
->dma
.d0
|= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS
);
1336 /* L4 header len: TCP header length */
1337 d
->dma
.d0
|= (tcp_hdr_len
& DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1339 /* Setup TSO: bit and desc type */
1340 d
->dma
.d0
|= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS
)) |
1341 (tso_desc_type
<< DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS
);
1342 d
->dma
.d0
|= (is_ipv4
<< DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS
);
1344 d
->dma
.ip_length
= skb_net_hdr_len
;
1345 /* Enable TCP/UDP checksum */
1346 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS
);
1347 /* Calculate pseudo-header */
1348 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS
);
1352 * Sets the descriptor @d up for csum. The corresponding
1353 * @skb is used to obtain the protocol and headers length.
1354 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1355 * Note, if d==NULL, the function only returns the protocol result.
1357 * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1358 * is "if unrolling" to optimize the critical path.
1361 static int wil_tx_desc_offload_setup(struct vring_tx_desc
*d
,
1362 struct sk_buff
*skb
){
1365 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1368 d
->dma
.b11
= ETH_HLEN
; /* MAC header length */
1370 switch (skb
->protocol
) {
1371 case cpu_to_be16(ETH_P_IP
):
1372 protocol
= ip_hdr(skb
)->protocol
;
1373 d
->dma
.b11
|= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
);
1375 case cpu_to_be16(ETH_P_IPV6
):
1376 protocol
= ipv6_hdr(skb
)->nexthdr
;
1384 d
->dma
.d0
|= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS
);
1385 /* L4 header len: TCP header length */
1387 (tcp_hdrlen(skb
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1390 /* L4 header len: UDP header length */
1392 (sizeof(struct udphdr
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1398 d
->dma
.ip_length
= skb_network_header_len(skb
);
1399 /* Enable TCP/UDP checksum */
1400 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS
);
1401 /* Calculate pseudo-header */
1402 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS
);
1407 static inline void wil_tx_last_desc(struct vring_tx_desc
*d
)
1409 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS
) |
1410 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS
) |
1411 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS
);
1414 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc
*d
)
1416 d
->dma
.d0
|= wil_tso_type_lst
<<
1417 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS
;
1420 static int __wil_tx_vring_tso(struct wil6210_priv
*wil
, struct vring
*vring
,
1421 struct sk_buff
*skb
)
1423 struct device
*dev
= wil_to_dev(wil
);
1425 /* point to descriptors in shared memory */
1426 volatile struct vring_tx_desc
*_desc
= NULL
, *_hdr_desc
,
1427 *_first_desc
= NULL
;
1429 /* pointers to shadow descriptors */
1430 struct vring_tx_desc desc_mem
, hdr_desc_mem
, first_desc_mem
,
1431 *d
= &hdr_desc_mem
, *hdr_desc
= &hdr_desc_mem
,
1432 *first_desc
= &first_desc_mem
;
1434 /* pointer to shadow descriptors' context */
1435 struct wil_ctx
*hdr_ctx
, *first_ctx
= NULL
;
1437 int descs_used
= 0; /* total number of used descriptors */
1438 int sg_desc_cnt
= 0; /* number of descriptors for current mss*/
1440 u32 swhead
= vring
->swhead
;
1441 int used
, avail
= wil_vring_avail_tx(vring
);
1442 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1443 int min_desc_required
= nr_frags
+ 1;
1444 int mss
= skb_shinfo(skb
)->gso_size
; /* payload size w/o headers */
1445 int f
, len
, hdrlen
, headlen
;
1446 int vring_index
= vring
- wil
->vring_tx
;
1447 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[vring_index
];
1450 const skb_frag_t
*frag
= NULL
;
1453 int hdr_compensation_need
= true;
1454 int desc_tso_type
= wil_tso_type_first
;
1457 int skb_net_hdr_len
;
1461 wil_dbg_txrx(wil
, "tx_vring_tso: %d bytes to vring %d\n", skb
->len
,
1464 if (unlikely(!txdata
->enabled
))
1467 /* A typical page 4K is 3-4 payloads, we assume each fragment
1468 * is a full payload, that's how min_desc_required has been
1469 * calculated. In real we might need more or less descriptors,
1470 * this is the initial check only.
1472 if (unlikely(avail
< min_desc_required
)) {
1473 wil_err_ratelimited(wil
,
1474 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1475 vring_index
, min_desc_required
);
1479 /* Header Length = MAC header len + IP header len + TCP header len*/
1481 (int)skb_network_header_len(skb
) +
1484 gso_type
= skb_shinfo(skb
)->gso_type
& (SKB_GSO_TCPV6
| SKB_GSO_TCPV4
);
1487 /* TCP v4, zero out the IP length and IPv4 checksum fields
1488 * as required by the offloading doc
1490 ip_hdr(skb
)->tot_len
= 0;
1491 ip_hdr(skb
)->check
= 0;
1495 /* TCP v6, zero out the payload length */
1496 ipv6_hdr(skb
)->payload_len
= 0;
1500 /* other than TCPv4 or TCPv6 types are not supported for TSO.
1501 * It is also illegal for both to be set simultaneously
1506 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1509 /* tcp header length and skb network header length are fixed for all
1510 * packet's descriptors - read then once here
1512 tcp_hdr_len
= tcp_hdrlen(skb
);
1513 skb_net_hdr_len
= skb_network_header_len(skb
);
1515 _hdr_desc
= &vring
->va
[i
].tx
;
1517 pa
= dma_map_single(dev
, skb
->data
, hdrlen
, DMA_TO_DEVICE
);
1518 if (unlikely(dma_mapping_error(dev
, pa
))) {
1519 wil_err(wil
, "TSO: Skb head DMA map error\n");
1523 wil_tx_desc_map(hdr_desc
, pa
, hdrlen
, vring_index
);
1524 wil_tx_desc_offload_setup_tso(hdr_desc
, skb
, wil_tso_type_hdr
, is_ipv4
,
1525 tcp_hdr_len
, skb_net_hdr_len
);
1526 wil_tx_last_desc(hdr_desc
);
1528 vring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1529 hdr_ctx
= &vring
->ctx
[i
];
1532 headlen
= skb_headlen(skb
) - hdrlen
;
1534 for (f
= headlen
? -1 : 0; f
< nr_frags
; f
++) {
1537 wil_dbg_txrx(wil
, "TSO: process skb head, len %u\n",
1540 frag
= &skb_shinfo(skb
)->frags
[f
];
1542 wil_dbg_txrx(wil
, "TSO: frag[%d]: len %u\n", f
, len
);
1547 "TSO: len %d, rem_data %d, descs_used %d\n",
1548 len
, rem_data
, descs_used
);
1550 if (descs_used
== avail
) {
1551 wil_err_ratelimited(wil
, "TSO: ring overflow\n");
1556 lenmss
= min_t(int, rem_data
, len
);
1557 i
= (swhead
+ descs_used
) % vring
->size
;
1558 wil_dbg_txrx(wil
, "TSO: lenmss %d, i %d\n", lenmss
, i
);
1561 pa
= skb_frag_dma_map(dev
, frag
,
1562 frag
->size
- len
, lenmss
,
1564 vring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
1566 pa
= dma_map_single(dev
,
1568 skb_headlen(skb
) - headlen
,
1571 vring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1575 if (unlikely(dma_mapping_error(dev
, pa
))) {
1576 wil_err(wil
, "TSO: DMA map page error\n");
1580 _desc
= &vring
->va
[i
].tx
;
1583 _first_desc
= _desc
;
1584 first_ctx
= &vring
->ctx
[i
];
1590 wil_tx_desc_map(d
, pa
, lenmss
, vring_index
);
1591 wil_tx_desc_offload_setup_tso(d
, skb
, desc_tso_type
,
1592 is_ipv4
, tcp_hdr_len
,
1595 /* use tso_type_first only once */
1596 desc_tso_type
= wil_tso_type_mid
;
1598 descs_used
++; /* desc used so far */
1599 sg_desc_cnt
++; /* desc used for this segment */
1604 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1605 len
, rem_data
, descs_used
, sg_desc_cnt
);
1607 /* Close the segment if reached mss size or last frag*/
1608 if (rem_data
== 0 || (f
== nr_frags
- 1 && len
== 0)) {
1609 if (hdr_compensation_need
) {
1610 /* first segment include hdr desc for
1613 hdr_ctx
->nr_frags
= sg_desc_cnt
;
1614 wil_tx_desc_set_nr_frags(first_desc
,
1617 hdr_compensation_need
= false;
1619 wil_tx_desc_set_nr_frags(first_desc
,
1622 first_ctx
->nr_frags
= sg_desc_cnt
- 1;
1624 wil_tx_last_desc(d
);
1626 /* first descriptor may also be the last
1627 * for this mss - make sure not to copy
1630 if (first_desc
!= d
)
1631 *_first_desc
= *first_desc
;
1633 /*last descriptor will be copied at the end
1634 * of this TS processing
1636 if (f
< nr_frags
- 1 || len
> 0)
1642 } else if (first_desc
!= d
) /* update mid descriptor */
1647 /* first descriptor may also be the last.
1648 * in this case d pointer is invalid
1650 if (_first_desc
== _desc
)
1653 /* Last data descriptor */
1654 wil_set_tx_desc_last_tso(d
);
1657 /* Fill the total number of descriptors in first desc (hdr)*/
1658 wil_tx_desc_set_nr_frags(hdr_desc
, descs_used
);
1659 *_hdr_desc
= *hdr_desc
;
1661 /* hold reference to skb
1662 * to prevent skb release before accounting
1663 * in case of immediate "tx done"
1665 vring
->ctx
[i
].skb
= skb_get(skb
);
1667 /* performance monitoring */
1668 used
= wil_vring_used_tx(vring
);
1669 if (wil_val_in_range(wil
->vring_idle_trsh
,
1670 used
, used
+ descs_used
)) {
1671 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
1672 wil_dbg_txrx(wil
, "Ring[%2d] not idle %d -> %d\n",
1673 vring_index
, used
, used
+ descs_used
);
1676 /* Make sure to advance the head only after descriptor update is done.
1677 * This will prevent a race condition where the completion thread
1678 * will see the DU bit set from previous run and will handle the
1679 * skb before it was completed.
1683 /* advance swhead */
1684 wil_vring_advance_head(vring
, descs_used
);
1685 wil_dbg_txrx(wil
, "TSO: Tx swhead %d -> %d\n", swhead
, vring
->swhead
);
1687 /* make sure all writes to descriptors (shared memory) are done before
1688 * committing them to HW
1692 wil_w(wil
, vring
->hwtail
, vring
->swhead
);
1696 while (descs_used
> 0) {
1697 struct wil_ctx
*ctx
;
1699 i
= (swhead
+ descs_used
- 1) % vring
->size
;
1700 d
= (struct vring_tx_desc
*)&vring
->va
[i
].tx
;
1701 _desc
= &vring
->va
[i
].tx
;
1703 _desc
->dma
.status
= TX_DMA_STATUS_DU
;
1704 ctx
= &vring
->ctx
[i
];
1705 wil_txdesc_unmap(dev
, d
, ctx
);
1706 memset(ctx
, 0, sizeof(*ctx
));
1713 static int __wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
1714 struct sk_buff
*skb
)
1716 struct device
*dev
= wil_to_dev(wil
);
1717 struct vring_tx_desc dd
, *d
= &dd
;
1718 volatile struct vring_tx_desc
*_d
;
1719 u32 swhead
= vring
->swhead
;
1720 int avail
= wil_vring_avail_tx(vring
);
1721 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1723 int vring_index
= vring
- wil
->vring_tx
;
1724 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[vring_index
];
1728 bool mcast
= (vring_index
== wil
->bcast_vring
);
1729 uint len
= skb_headlen(skb
);
1731 wil_dbg_txrx(wil
, "tx_vring: %d bytes to vring %d\n", skb
->len
,
1734 if (unlikely(!txdata
->enabled
))
1737 if (unlikely(avail
< 1 + nr_frags
)) {
1738 wil_err_ratelimited(wil
,
1739 "Tx ring[%2d] full. No space for %d fragments\n",
1740 vring_index
, 1 + nr_frags
);
1743 _d
= &vring
->va
[i
].tx
;
1745 pa
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
1747 wil_dbg_txrx(wil
, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index
,
1748 skb_headlen(skb
), skb
->data
, &pa
);
1749 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET
, 16, 1,
1750 skb
->data
, skb_headlen(skb
), false);
1752 if (unlikely(dma_mapping_error(dev
, pa
)))
1754 vring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1756 wil_tx_desc_map(d
, pa
, len
, vring_index
);
1757 if (unlikely(mcast
)) {
1758 d
->mac
.d
[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS
); /* MCS 0 */
1759 if (unlikely(len
> WIL_BCAST_MCS0_LIMIT
)) /* set MCS 1 */
1760 d
->mac
.d
[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS
);
1762 /* Process TCP/UDP checksum offloading */
1763 if (unlikely(wil_tx_desc_offload_setup(d
, skb
))) {
1764 wil_err(wil
, "Tx[%2d] Failed to set cksum, drop packet\n",
1769 vring
->ctx
[i
].nr_frags
= nr_frags
;
1770 wil_tx_desc_set_nr_frags(d
, nr_frags
+ 1);
1772 /* middle segments */
1773 for (; f
< nr_frags
; f
++) {
1774 const struct skb_frag_struct
*frag
=
1775 &skb_shinfo(skb
)->frags
[f
];
1776 int len
= skb_frag_size(frag
);
1779 wil_dbg_txrx(wil
, "Tx[%2d] desc[%4d]\n", vring_index
, i
);
1780 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
1781 (const void *)d
, sizeof(*d
), false);
1782 i
= (swhead
+ f
+ 1) % vring
->size
;
1783 _d
= &vring
->va
[i
].tx
;
1784 pa
= skb_frag_dma_map(dev
, frag
, 0, skb_frag_size(frag
),
1786 if (unlikely(dma_mapping_error(dev
, pa
))) {
1787 wil_err(wil
, "Tx[%2d] failed to map fragment\n",
1791 vring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
1792 wil_tx_desc_map(d
, pa
, len
, vring_index
);
1793 /* no need to check return code -
1794 * if it succeeded for 1-st descriptor,
1795 * it will succeed here too
1797 wil_tx_desc_offload_setup(d
, skb
);
1799 /* for the last seg only */
1800 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS
);
1801 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS
);
1802 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS
);
1804 wil_dbg_txrx(wil
, "Tx[%2d] desc[%4d]\n", vring_index
, i
);
1805 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
1806 (const void *)d
, sizeof(*d
), false);
1808 /* hold reference to skb
1809 * to prevent skb release before accounting
1810 * in case of immediate "tx done"
1812 vring
->ctx
[i
].skb
= skb_get(skb
);
1814 /* performance monitoring */
1815 used
= wil_vring_used_tx(vring
);
1816 if (wil_val_in_range(wil
->vring_idle_trsh
,
1817 used
, used
+ nr_frags
+ 1)) {
1818 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
1819 wil_dbg_txrx(wil
, "Ring[%2d] not idle %d -> %d\n",
1820 vring_index
, used
, used
+ nr_frags
+ 1);
1823 /* Make sure to advance the head only after descriptor update is done.
1824 * This will prevent a race condition where the completion thread
1825 * will see the DU bit set from previous run and will handle the
1826 * skb before it was completed.
1830 /* advance swhead */
1831 wil_vring_advance_head(vring
, nr_frags
+ 1);
1832 wil_dbg_txrx(wil
, "Tx[%2d] swhead %d -> %d\n", vring_index
, swhead
,
1834 trace_wil6210_tx(vring_index
, swhead
, skb
->len
, nr_frags
);
1836 /* make sure all writes to descriptors (shared memory) are done before
1837 * committing them to HW
1841 wil_w(wil
, vring
->hwtail
, vring
->swhead
);
1845 /* unmap what we have mapped */
1846 nr_frags
= f
+ 1; /* frags mapped + one for skb head */
1847 for (f
= 0; f
< nr_frags
; f
++) {
1848 struct wil_ctx
*ctx
;
1850 i
= (swhead
+ f
) % vring
->size
;
1851 ctx
= &vring
->ctx
[i
];
1852 _d
= &vring
->va
[i
].tx
;
1854 _d
->dma
.status
= TX_DMA_STATUS_DU
;
1855 wil_txdesc_unmap(dev
, d
, ctx
);
1857 memset(ctx
, 0, sizeof(*ctx
));
1863 static int wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
1864 struct sk_buff
*skb
)
1866 int vring_index
= vring
- wil
->vring_tx
;
1867 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[vring_index
];
1870 spin_lock(&txdata
->lock
);
1872 if (test_bit(wil_status_suspending
, wil
->status
) ||
1873 test_bit(wil_status_suspended
, wil
->status
) ||
1874 test_bit(wil_status_resuming
, wil
->status
)) {
1876 "suspend/resume in progress. drop packet\n");
1877 spin_unlock(&txdata
->lock
);
1881 rc
= (skb_is_gso(skb
) ? __wil_tx_vring_tso
: __wil_tx_vring
)
1884 spin_unlock(&txdata
->lock
);
1890 * Check status of tx vrings and stop/wake net queues if needed
1892 * This function does one of two checks:
1893 * In case check_stop is true, will check if net queues need to be stopped. If
1894 * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
1895 * In case check_stop is false, will check if net queues need to be waked. If
1896 * the conditions for waking are met, netif_tx_wake_all_queues() is called.
1897 * vring is the vring which is currently being modified by either adding
1898 * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
1899 * be null when irrelevant (e.g. connect/disconnect events).
1901 * The implementation is to stop net queues if modified vring has low
1902 * descriptor availability. Wake if all vrings are not in low descriptor
1903 * availability and modified vring has high descriptor availability.
1905 static inline void __wil_update_net_queues(struct wil6210_priv
*wil
,
1906 struct vring
*vring
,
1912 wil_dbg_txrx(wil
, "vring %d, check_stop=%d, stopped=%d",
1913 (int)(vring
- wil
->vring_tx
), check_stop
,
1914 wil
->net_queue_stopped
);
1916 wil_dbg_txrx(wil
, "check_stop=%d, stopped=%d",
1917 check_stop
, wil
->net_queue_stopped
);
1919 if (check_stop
== wil
->net_queue_stopped
)
1920 /* net queues already in desired state */
1924 if (!vring
|| unlikely(wil_vring_avail_low(vring
))) {
1925 /* not enough room in the vring */
1926 netif_tx_stop_all_queues(wil_to_ndev(wil
));
1927 wil
->net_queue_stopped
= true;
1928 wil_dbg_txrx(wil
, "netif_tx_stop called\n");
1933 /* Do not wake the queues in suspend flow */
1934 if (test_bit(wil_status_suspending
, wil
->status
) ||
1935 test_bit(wil_status_suspended
, wil
->status
))
1939 for (i
= 0; i
< WIL6210_MAX_TX_RINGS
; i
++) {
1940 struct vring
*cur_vring
= &wil
->vring_tx
[i
];
1941 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[i
];
1943 if (!cur_vring
->va
|| !txdata
->enabled
|| cur_vring
== vring
)
1946 if (wil_vring_avail_low(cur_vring
)) {
1947 wil_dbg_txrx(wil
, "vring %d full, can't wake\n",
1948 (int)(cur_vring
- wil
->vring_tx
));
1953 if (!vring
|| wil_vring_avail_high(vring
)) {
1954 /* enough room in the vring */
1955 wil_dbg_txrx(wil
, "calling netif_tx_wake\n");
1956 netif_tx_wake_all_queues(wil_to_ndev(wil
));
1957 wil
->net_queue_stopped
= false;
1961 void wil_update_net_queues(struct wil6210_priv
*wil
, struct vring
*vring
,
1964 spin_lock(&wil
->net_queue_lock
);
1965 __wil_update_net_queues(wil
, vring
, check_stop
);
1966 spin_unlock(&wil
->net_queue_lock
);
1969 void wil_update_net_queues_bh(struct wil6210_priv
*wil
, struct vring
*vring
,
1972 spin_lock_bh(&wil
->net_queue_lock
);
1973 __wil_update_net_queues(wil
, vring
, check_stop
);
1974 spin_unlock_bh(&wil
->net_queue_lock
);
1977 netdev_tx_t
wil_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1979 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
1980 struct ethhdr
*eth
= (void *)skb
->data
;
1981 bool bcast
= is_multicast_ether_addr(eth
->h_dest
);
1982 struct vring
*vring
;
1983 static bool pr_once_fw
;
1986 wil_dbg_txrx(wil
, "start_xmit\n");
1987 if (unlikely(!test_bit(wil_status_fwready
, wil
->status
))) {
1989 wil_err(wil
, "FW not ready\n");
1994 if (unlikely(!test_bit(wil_status_fwconnected
, wil
->status
))) {
1995 wil_dbg_ratelimited(wil
, "FW not connected, packet dropped\n");
1998 if (unlikely(wil
->wdev
->iftype
== NL80211_IFTYPE_MONITOR
)) {
1999 wil_err(wil
, "Xmit in monitor mode not supported\n");
2005 if (wil
->wdev
->iftype
== NL80211_IFTYPE_STATION
&& !wil
->pbss
) {
2006 /* in STA mode (ESS), all to same VRING (to AP) */
2007 vring
= wil_find_tx_vring_sta(wil
, skb
);
2010 /* in pbss, no bcast VRING - duplicate skb in
2011 * all stations VRINGs
2013 vring
= wil_find_tx_bcast_2(wil
, skb
);
2014 else if (wil
->wdev
->iftype
== NL80211_IFTYPE_AP
)
2015 /* AP has a dedicated bcast VRING */
2016 vring
= wil_find_tx_bcast_1(wil
, skb
);
2018 /* unexpected combination, fallback to duplicating
2019 * the skb in all stations VRINGs
2021 vring
= wil_find_tx_bcast_2(wil
, skb
);
2023 /* unicast, find specific VRING by dest. address */
2024 vring
= wil_find_tx_ucast(wil
, skb
);
2026 if (unlikely(!vring
)) {
2027 wil_dbg_txrx(wil
, "No Tx VRING found for %pM\n", eth
->h_dest
);
2030 /* set up vring entry */
2031 rc
= wil_tx_vring(wil
, vring
, skb
);
2035 /* shall we stop net queues? */
2036 wil_update_net_queues_bh(wil
, vring
, true);
2037 /* statistics will be updated on the tx_complete */
2038 dev_kfree_skb_any(skb
);
2039 return NETDEV_TX_OK
;
2041 return NETDEV_TX_BUSY
;
2043 break; /* goto drop; */
2046 ndev
->stats
.tx_dropped
++;
2047 dev_kfree_skb_any(skb
);
2049 return NET_XMIT_DROP
;
2052 static inline bool wil_need_txstat(struct sk_buff
*skb
)
2054 struct ethhdr
*eth
= (void *)skb
->data
;
2056 return is_unicast_ether_addr(eth
->h_dest
) && skb
->sk
&&
2057 (skb_shinfo(skb
)->tx_flags
& SKBTX_WIFI_STATUS
);
2060 static inline void wil_consume_skb(struct sk_buff
*skb
, bool acked
)
2062 if (unlikely(wil_need_txstat(skb
)))
2063 skb_complete_wifi_ack(skb
, acked
);
2065 acked
? dev_consume_skb_any(skb
) : dev_kfree_skb_any(skb
);
2069 * Clean up transmitted skb's from the Tx VRING
2071 * Return number of descriptors cleared
2073 * Safe to call from IRQ
2075 int wil_tx_complete(struct wil6210_priv
*wil
, int ringid
)
2077 struct net_device
*ndev
= wil_to_ndev(wil
);
2078 struct device
*dev
= wil_to_dev(wil
);
2079 struct vring
*vring
= &wil
->vring_tx
[ringid
];
2080 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[ringid
];
2082 int cid
= wil
->vring2cid_tid
[ringid
][0];
2083 struct wil_net_stats
*stats
= NULL
;
2084 volatile struct vring_tx_desc
*_d
;
2085 int used_before_complete
;
2088 if (unlikely(!vring
->va
)) {
2089 wil_err(wil
, "Tx irq[%d]: vring not initialized\n", ringid
);
2093 if (unlikely(!txdata
->enabled
)) {
2094 wil_info(wil
, "Tx irq[%d]: vring disabled\n", ringid
);
2098 wil_dbg_txrx(wil
, "tx_complete: (%d)\n", ringid
);
2100 used_before_complete
= wil_vring_used_tx(vring
);
2102 if (cid
< WIL6210_MAX_CID
)
2103 stats
= &wil
->sta
[cid
].stats
;
2105 while (!wil_vring_is_empty(vring
)) {
2107 struct wil_ctx
*ctx
= &vring
->ctx
[vring
->swtail
];
2109 * For the fragmented skb, HW will set DU bit only for the
2110 * last fragment. look for it.
2111 * In TSO the first DU will include hdr desc
2113 int lf
= (vring
->swtail
+ ctx
->nr_frags
) % vring
->size
;
2114 /* TODO: check we are not past head */
2116 _d
= &vring
->va
[lf
].tx
;
2117 if (unlikely(!(_d
->dma
.status
& TX_DMA_STATUS_DU
)))
2120 new_swtail
= (lf
+ 1) % vring
->size
;
2121 while (vring
->swtail
!= new_swtail
) {
2122 struct vring_tx_desc dd
, *d
= &dd
;
2124 struct sk_buff
*skb
;
2126 ctx
= &vring
->ctx
[vring
->swtail
];
2128 _d
= &vring
->va
[vring
->swtail
].tx
;
2132 dmalen
= le16_to_cpu(d
->dma
.length
);
2133 trace_wil6210_tx_done(ringid
, vring
->swtail
, dmalen
,
2136 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2137 ringid
, vring
->swtail
, dmalen
,
2138 d
->dma
.status
, d
->dma
.error
);
2139 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE
, 32, 4,
2140 (const void *)d
, sizeof(*d
), false);
2142 wil_txdesc_unmap(dev
, d
, ctx
);
2145 if (likely(d
->dma
.error
== 0)) {
2146 ndev
->stats
.tx_packets
++;
2147 ndev
->stats
.tx_bytes
+= skb
->len
;
2149 stats
->tx_packets
++;
2150 stats
->tx_bytes
+= skb
->len
;
2153 ndev
->stats
.tx_errors
++;
2157 wil_consume_skb(skb
, d
->dma
.error
== 0);
2159 memset(ctx
, 0, sizeof(*ctx
));
2160 /* Make sure the ctx is zeroed before updating the tail
2161 * to prevent a case where wil_tx_vring will see
2162 * this descriptor as used and handle it before ctx zero
2166 /* There is no need to touch HW descriptor:
2167 * - ststus bit TX_DMA_STATUS_DU is set by design,
2168 * so hardware will not try to process this desc.,
2169 * - rest of descriptor will be initialized on Tx.
2171 vring
->swtail
= wil_vring_next_tail(vring
);
2176 /* performance monitoring */
2177 used_new
= wil_vring_used_tx(vring
);
2178 if (wil_val_in_range(wil
->vring_idle_trsh
,
2179 used_new
, used_before_complete
)) {
2180 wil_dbg_txrx(wil
, "Ring[%2d] idle %d -> %d\n",
2181 ringid
, used_before_complete
, used_new
);
2182 txdata
->last_idle
= get_cycles();
2185 /* shall we wake net queues? */
2187 wil_update_net_queues(wil
, vring
, false);