1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
7 #include <linux/etherdevice.h>
8 #include <net/ieee80211_radiotap.h>
9 #include <linux/if_arp.h>
10 #include <linux/moduleparam.h>
12 #include <linux/ipv6.h>
13 #include <linux/if_vlan.h>
15 #include <linux/prefetch.h>
21 #include "txrx_edma.h"
24 module_param(rx_align_2
, bool, 0444);
25 MODULE_PARM_DESC(rx_align_2
, " align Rx buffers on 4*n+2, default - no");
28 module_param(rx_large_buf
, bool, 0444);
29 MODULE_PARM_DESC(rx_large_buf
, " allocate 8KB RX buffers, default - no");
31 /* Drop Tx packets in case Tx ring is full */
32 bool drop_if_ring_full
;
34 static inline uint
wil_rx_snaplen(void)
36 return rx_align_2
? 6 : 0;
39 /* wil_ring_wmark_low - low watermark for available descriptor space */
40 static inline int wil_ring_wmark_low(struct wil_ring
*ring
)
42 return ring
->size
/ 8;
45 /* wil_ring_wmark_high - high watermark for available descriptor space */
46 static inline int wil_ring_wmark_high(struct wil_ring
*ring
)
48 return ring
->size
/ 4;
51 /* returns true if num avail descriptors is lower than wmark_low */
52 static inline int wil_ring_avail_low(struct wil_ring
*ring
)
54 return wil_ring_avail_tx(ring
) < wil_ring_wmark_low(ring
);
57 /* returns true if num avail descriptors is higher than wmark_high */
58 static inline int wil_ring_avail_high(struct wil_ring
*ring
)
60 return wil_ring_avail_tx(ring
) > wil_ring_wmark_high(ring
);
63 /* returns true when all tx vrings are empty */
64 bool wil_is_tx_idle(struct wil6210_priv
*wil
)
67 unsigned long data_comp_to
;
68 int min_ring_id
= wil_get_min_tx_ring_id(wil
);
70 for (i
= min_ring_id
; i
< WIL6210_MAX_TX_RINGS
; i
++) {
71 struct wil_ring
*vring
= &wil
->ring_tx
[i
];
72 int vring_index
= vring
- wil
->ring_tx
;
73 struct wil_ring_tx_data
*txdata
=
74 &wil
->ring_tx_data
[vring_index
];
76 spin_lock(&txdata
->lock
);
78 if (!vring
->va
|| !txdata
->enabled
) {
79 spin_unlock(&txdata
->lock
);
83 data_comp_to
= jiffies
+ msecs_to_jiffies(
84 WIL_DATA_COMPLETION_TO_MS
);
85 if (test_bit(wil_status_napi_en
, wil
->status
)) {
86 while (!wil_ring_is_empty(vring
)) {
87 if (time_after(jiffies
, data_comp_to
)) {
89 "TO waiting for idle tx\n");
90 spin_unlock(&txdata
->lock
);
93 wil_dbg_ratelimited(wil
,
94 "tx vring is not empty -> NAPI\n");
95 spin_unlock(&txdata
->lock
);
96 napi_synchronize(&wil
->napi_tx
);
98 spin_lock(&txdata
->lock
);
99 if (!vring
->va
|| !txdata
->enabled
)
104 spin_unlock(&txdata
->lock
);
110 static int wil_vring_alloc(struct wil6210_priv
*wil
, struct wil_ring
*vring
)
112 struct device
*dev
= wil_to_dev(wil
);
113 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
116 wil_dbg_misc(wil
, "vring_alloc:\n");
118 BUILD_BUG_ON(sizeof(vring
->va
[0]) != 32);
122 vring
->ctx
= kcalloc(vring
->size
, sizeof(vring
->ctx
[0]), GFP_KERNEL
);
128 /* vring->va should be aligned on its size rounded up to power of 2
129 * This is granted by the dma_alloc_coherent.
131 * HW has limitation that all vrings addresses must share the same
132 * upper 16 msb bits part of 48 bits address. To workaround that,
133 * if we are using more than 32 bit addresses switch to 32 bit
134 * allocation before allocating vring memory.
136 * There's no check for the return value of dma_set_mask_and_coherent,
137 * since we assume if we were able to set the mask during
138 * initialization in this system it will not fail if we set it again
140 if (wil
->dma_addr_size
> 32)
141 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
143 vring
->va
= dma_alloc_coherent(dev
, sz
, &vring
->pa
, GFP_KERNEL
);
150 if (wil
->dma_addr_size
> 32)
151 dma_set_mask_and_coherent(dev
,
152 DMA_BIT_MASK(wil
->dma_addr_size
));
154 /* initially, all descriptors are SW owned
155 * For Tx and Rx, ownership bit is at the same location, thus
158 for (i
= 0; i
< vring
->size
; i
++) {
159 volatile struct vring_tx_desc
*_d
=
160 &vring
->va
[i
].tx
.legacy
;
162 _d
->dma
.status
= TX_DMA_STATUS_DU
;
165 wil_dbg_misc(wil
, "vring[%d] 0x%p:%pad 0x%p\n", vring
->size
,
166 vring
->va
, &vring
->pa
, vring
->ctx
);
171 static void wil_txdesc_unmap(struct device
*dev
, union wil_tx_desc
*desc
,
174 struct vring_tx_desc
*d
= &desc
->legacy
;
175 dma_addr_t pa
= wil_desc_addr(&d
->dma
.addr
);
176 u16 dmalen
= le16_to_cpu(d
->dma
.length
);
178 switch (ctx
->mapped_as
) {
179 case wil_mapped_as_single
:
180 dma_unmap_single(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
182 case wil_mapped_as_page
:
183 dma_unmap_page(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
190 static void wil_vring_free(struct wil6210_priv
*wil
, struct wil_ring
*vring
)
192 struct device
*dev
= wil_to_dev(wil
);
193 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
195 lockdep_assert_held(&wil
->mutex
);
197 int vring_index
= vring
- wil
->ring_tx
;
199 wil_dbg_misc(wil
, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
200 vring_index
, vring
->size
, vring
->va
,
201 &vring
->pa
, vring
->ctx
);
203 wil_dbg_misc(wil
, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
204 vring
->size
, vring
->va
,
205 &vring
->pa
, vring
->ctx
);
208 while (!wil_ring_is_empty(vring
)) {
214 struct vring_tx_desc dd
, *d
= &dd
;
215 volatile struct vring_tx_desc
*_d
=
216 &vring
->va
[vring
->swtail
].tx
.legacy
;
218 ctx
= &vring
->ctx
[vring
->swtail
];
221 "ctx(%d) was already completed\n",
223 vring
->swtail
= wil_ring_next_tail(vring
);
227 wil_txdesc_unmap(dev
, (union wil_tx_desc
*)d
, ctx
);
229 dev_kfree_skb_any(ctx
->skb
);
230 vring
->swtail
= wil_ring_next_tail(vring
);
232 struct vring_rx_desc dd
, *d
= &dd
;
233 volatile struct vring_rx_desc
*_d
=
234 &vring
->va
[vring
->swhead
].rx
.legacy
;
236 ctx
= &vring
->ctx
[vring
->swhead
];
238 pa
= wil_desc_addr(&d
->dma
.addr
);
239 dmalen
= le16_to_cpu(d
->dma
.length
);
240 dma_unmap_single(dev
, pa
, dmalen
, DMA_FROM_DEVICE
);
242 wil_ring_advance_head(vring
, 1);
245 dma_free_coherent(dev
, sz
, (void *)vring
->va
, vring
->pa
);
253 * Allocate one skb for Rx VRING
255 * Safe to call from IRQ
257 static int wil_vring_alloc_skb(struct wil6210_priv
*wil
, struct wil_ring
*vring
,
260 struct device
*dev
= wil_to_dev(wil
);
261 unsigned int sz
= wil
->rx_buf_len
+ ETH_HLEN
+ wil_rx_snaplen();
262 struct vring_rx_desc dd
, *d
= &dd
;
263 volatile struct vring_rx_desc
*_d
= &vring
->va
[i
].rx
.legacy
;
265 struct sk_buff
*skb
= dev_alloc_skb(sz
+ headroom
);
270 skb_reserve(skb
, headroom
);
274 * Make sure that the network stack calculates checksum for packets
275 * which failed the HW checksum calculation
277 skb
->ip_summed
= CHECKSUM_NONE
;
279 pa
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_FROM_DEVICE
);
280 if (unlikely(dma_mapping_error(dev
, pa
))) {
285 d
->dma
.d0
= RX_DMA_D0_CMD_DMA_RT
| RX_DMA_D0_CMD_DMA_IT
;
286 wil_desc_addr_set(&d
->dma
.addr
, pa
);
287 /* ip_length don't care */
289 /* error don't care */
290 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
291 d
->dma
.length
= cpu_to_le16(sz
);
293 vring
->ctx
[i
].skb
= skb
;
299 * Adds radiotap header
301 * Any error indicated as "Bad FCS"
303 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
304 * - Rx descriptor: 32 bytes
307 static void wil_rx_add_radiotap_header(struct wil6210_priv
*wil
,
310 struct wil6210_rtap
{
311 struct ieee80211_radiotap_header rthdr
;
312 /* fields should be in the order of bits in rthdr.it_present */
316 __le16 chnl_freq
__aligned(2);
323 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
324 struct wil6210_rtap
*rtap
;
325 int rtap_len
= sizeof(struct wil6210_rtap
);
326 struct ieee80211_channel
*ch
= wil
->monitor_chandef
.chan
;
328 if (skb_headroom(skb
) < rtap_len
&&
329 pskb_expand_head(skb
, rtap_len
, 0, GFP_ATOMIC
)) {
330 wil_err(wil
, "Unable to expand headroom to %d\n", rtap_len
);
334 rtap
= skb_push(skb
, rtap_len
);
335 memset(rtap
, 0, rtap_len
);
337 rtap
->rthdr
.it_version
= PKTHDR_RADIOTAP_VERSION
;
338 rtap
->rthdr
.it_len
= cpu_to_le16(rtap_len
);
339 rtap
->rthdr
.it_present
= cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS
) |
340 (1 << IEEE80211_RADIOTAP_CHANNEL
) |
341 (1 << IEEE80211_RADIOTAP_MCS
));
342 if (d
->dma
.status
& RX_DMA_STATUS_ERROR
)
343 rtap
->flags
|= IEEE80211_RADIOTAP_F_BADFCS
;
345 rtap
->chnl_freq
= cpu_to_le16(ch
? ch
->center_freq
: 58320);
346 rtap
->chnl_flags
= cpu_to_le16(0);
348 rtap
->mcs_present
= IEEE80211_RADIOTAP_MCS_HAVE_MCS
;
350 rtap
->mcs_index
= wil_rxdesc_mcs(d
);
353 static bool wil_is_rx_idle(struct wil6210_priv
*wil
)
355 struct vring_rx_desc
*_d
;
356 struct wil_ring
*ring
= &wil
->ring_rx
;
358 _d
= (struct vring_rx_desc
*)&ring
->va
[ring
->swhead
].rx
.legacy
;
359 if (_d
->dma
.status
& RX_DMA_STATUS_DU
)
365 static int wil_rx_get_cid_by_skb(struct wil6210_priv
*wil
, struct sk_buff
*skb
)
367 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
368 int mid
= wil_rxdesc_mid(d
);
369 struct wil6210_vif
*vif
= wil
->vifs
[mid
];
370 /* cid from DMA descriptor is limited to 3 bits.
371 * In case of cid>=8, the value would be cid modulo 8 and we need to
372 * find real cid by locating the transmitter (ta) inside sta array
374 int cid
= wil_rxdesc_cid(d
);
375 unsigned int snaplen
= wil_rx_snaplen();
376 struct ieee80211_hdr_3addr
*hdr
;
381 /* in monitor mode there are no connections */
382 if (vif
->wdev
.iftype
== NL80211_IFTYPE_MONITOR
)
385 ftype
= wil_rxdesc_ftype(d
) << 2;
386 if (likely(ftype
== IEEE80211_FTYPE_DATA
)) {
387 if (unlikely(skb
->len
< ETH_HLEN
+ snaplen
)) {
388 wil_err_ratelimited(wil
,
389 "Short data frame, len = %d\n",
393 ta
= wil_skb_get_sa(skb
);
395 if (unlikely(skb
->len
< sizeof(struct ieee80211_hdr_3addr
))) {
396 wil_err_ratelimited(wil
, "Short frame, len = %d\n",
400 hdr
= (void *)skb
->data
;
404 if (wil
->max_assoc_sta
<= WIL6210_RX_DESC_MAX_CID
)
407 /* assuming no concurrency between AP interfaces and STA interfaces.
408 * multista is used only in P2P_GO or AP mode. In other modes return
409 * cid from the rx descriptor
411 if (vif
->wdev
.iftype
!= NL80211_IFTYPE_P2P_GO
&&
412 vif
->wdev
.iftype
!= NL80211_IFTYPE_AP
)
415 /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
416 * to find the real cid, compare transmitter address with the stored
417 * stations mac address in the driver sta array
419 for (i
= cid
; i
< wil
->max_assoc_sta
; i
+= WIL6210_RX_DESC_MAX_CID
) {
420 if (wil
->sta
[i
].status
!= wil_sta_unused
&&
421 ether_addr_equal(wil
->sta
[i
].addr
, ta
)) {
426 if (i
>= wil
->max_assoc_sta
) {
427 wil_err_ratelimited(wil
, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
428 ta
, vif
->wdev
.iftype
, ftype
, skb
->len
);
436 * reap 1 frame from @swhead
438 * Rx descriptor copied to skb->cb
440 * Safe to call from IRQ
442 static struct sk_buff
*wil_vring_reap_rx(struct wil6210_priv
*wil
,
443 struct wil_ring
*vring
)
445 struct device
*dev
= wil_to_dev(wil
);
446 struct wil6210_vif
*vif
;
447 struct net_device
*ndev
;
448 volatile struct vring_rx_desc
*_d
;
449 struct vring_rx_desc
*d
;
452 unsigned int snaplen
= wil_rx_snaplen();
453 unsigned int sz
= wil
->rx_buf_len
+ ETH_HLEN
+ snaplen
;
458 struct wil_net_stats
*stats
;
460 BUILD_BUG_ON(sizeof(struct skb_rx_info
) > sizeof(skb
->cb
));
463 if (unlikely(wil_ring_is_empty(vring
)))
466 i
= (int)vring
->swhead
;
467 _d
= &vring
->va
[i
].rx
.legacy
;
468 if (unlikely(!(_d
->dma
.status
& RX_DMA_STATUS_DU
))) {
469 /* it is not error, we just reached end of Rx done area */
473 skb
= vring
->ctx
[i
].skb
;
474 vring
->ctx
[i
].skb
= NULL
;
475 wil_ring_advance_head(vring
, 1);
477 wil_err(wil
, "No Rx skb at [%d]\n", i
);
480 d
= wil_skb_rxdesc(skb
);
482 pa
= wil_desc_addr(&d
->dma
.addr
);
484 dma_unmap_single(dev
, pa
, sz
, DMA_FROM_DEVICE
);
485 dmalen
= le16_to_cpu(d
->dma
.length
);
487 trace_wil6210_rx(i
, d
);
488 wil_dbg_txrx(wil
, "Rx[%3d] : %d bytes\n", i
, dmalen
);
489 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE
, 32, 4,
490 (const void *)d
, sizeof(*d
), false);
492 mid
= wil_rxdesc_mid(d
);
493 vif
= wil
->vifs
[mid
];
495 if (unlikely(!vif
)) {
496 wil_dbg_txrx(wil
, "skipped RX descriptor with invalid mid %d",
501 ndev
= vif_to_ndev(vif
);
502 if (unlikely(dmalen
> sz
)) {
503 wil_err_ratelimited(wil
, "Rx size too large: %d bytes!\n",
508 skb_trim(skb
, dmalen
);
512 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
513 skb
->data
, skb_headlen(skb
), false);
515 cid
= wil_rx_get_cid_by_skb(wil
, skb
);
516 if (cid
== -ENOENT
) {
520 wil_skb_set_cid(skb
, (u8
)cid
);
521 stats
= &wil
->sta
[cid
].stats
;
523 stats
->last_mcs_rx
= wil_rxdesc_mcs(d
);
524 if (stats
->last_mcs_rx
< ARRAY_SIZE(stats
->rx_per_mcs
))
525 stats
->rx_per_mcs
[stats
->last_mcs_rx
]++;
527 /* use radiotap header only if required */
528 if (ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
)
529 wil_rx_add_radiotap_header(wil
, skb
);
531 /* no extra checks if in sniffer mode */
532 if (ndev
->type
!= ARPHRD_ETHER
)
534 /* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
535 * Driver should recognize it by frame type, that is found
536 * in Rx descriptor. If type is not data, it is 802.11 frame as is
538 ftype
= wil_rxdesc_ftype(d
) << 2;
539 if (unlikely(ftype
!= IEEE80211_FTYPE_DATA
)) {
540 u8 fc1
= wil_rxdesc_fc1(d
);
541 int tid
= wil_rxdesc_tid(d
);
542 u16 seq
= wil_rxdesc_seq(d
);
545 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
546 fc1
, mid
, cid
, tid
, seq
);
547 stats
->rx_non_data_frame
++;
548 if (wil_is_back_req(fc1
)) {
550 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
552 wil_rx_bar(wil
, vif
, cid
, tid
, seq
);
554 /* print again all info. One can enable only this
555 * without overhead for printing every Rx frame
558 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
559 fc1
, mid
, cid
, tid
, seq
);
560 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE
, 32, 4,
561 (const void *)d
, sizeof(*d
), false);
562 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
563 skb
->data
, skb_headlen(skb
), false);
569 /* L4 IDENT is on when HW calculated checksum, check status
570 * and in case of error drop the packet
571 * higher stack layers will handle retransmission (if required)
573 if (likely(d
->dma
.status
& RX_DMA_STATUS_L4I
)) {
574 /* L4 protocol identified, csum calculated */
575 if (likely((d
->dma
.error
& RX_DMA_ERROR_L4_ERR
) == 0))
576 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
577 /* If HW reports bad checksum, let IP stack re-check it
578 * For example, HW don't understand Microsoft IP stack that
579 * mis-calculates TCP checksum - if it should be 0x0,
580 * it writes 0xffff in violation of RFC 1624
583 stats
->rx_csum_err
++;
588 * +-------+-------+---------+------------+------+
589 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
590 * +-------+-------+---------+------------+------+
591 * Need to remove SNAP, shifting SA and DA forward
593 memmove(skb
->data
+ snaplen
, skb
->data
, 2 * ETH_ALEN
);
594 skb_pull(skb
, snaplen
);
601 * allocate and fill up to @count buffers in rx ring
602 * buffers posted at @swtail
603 * Note: we have a single RX queue for servicing all VIFs, but we
604 * allocate skbs with headroom according to main interface only. This
605 * means it will not work with monitor interface together with other VIFs.
606 * Currently we only support monitor interface on its own without other VIFs,
607 * and we will need to fix this code once we add support.
609 static int wil_rx_refill(struct wil6210_priv
*wil
, int count
)
611 struct net_device
*ndev
= wil
->main_ndev
;
612 struct wil_ring
*v
= &wil
->ring_rx
;
615 int headroom
= ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
?
616 WIL6210_RTAP_SIZE
: 0;
618 for (; next_tail
= wil_ring_next_tail(v
),
619 (next_tail
!= v
->swhead
) && (count
-- > 0);
620 v
->swtail
= next_tail
) {
621 rc
= wil_vring_alloc_skb(wil
, v
, v
->swtail
, headroom
);
623 wil_err_ratelimited(wil
, "Error %d in rx refill[%d]\n",
629 /* make sure all writes to descriptors (shared memory) are done before
630 * committing them to HW
634 wil_w(wil
, v
->hwtail
, v
->swtail
);
640 * reverse_memcmp - Compare two areas of memory, in reverse order
641 * @cs: One area of memory
642 * @ct: Another area of memory
643 * @count: The size of the area.
645 * Cut'n'paste from original memcmp (see lib/string.c)
646 * with minimal modifications
648 int reverse_memcmp(const void *cs
, const void *ct
, size_t count
)
650 const unsigned char *su1
, *su2
;
653 for (su1
= cs
+ count
- 1, su2
= ct
+ count
- 1; count
> 0;
654 --su1
, --su2
, count
--) {
662 static int wil_rx_crypto_check(struct wil6210_priv
*wil
, struct sk_buff
*skb
)
664 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
665 int cid
= wil_skb_get_cid(skb
);
666 int tid
= wil_rxdesc_tid(d
);
667 int key_id
= wil_rxdesc_key_id(d
);
668 int mc
= wil_rxdesc_mcast(d
);
669 struct wil_sta_info
*s
= &wil
->sta
[cid
];
670 struct wil_tid_crypto_rx
*c
= mc
? &s
->group_crypto_rx
:
671 &s
->tid_crypto_rx
[tid
];
672 struct wil_tid_crypto_rx_single
*cc
= &c
->key_id
[key_id
];
673 const u8
*pn
= (u8
*)&d
->mac
.pn_15_0
;
676 wil_err_ratelimited(wil
,
677 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
678 cid
, tid
, mc
, key_id
);
682 if (reverse_memcmp(pn
, cc
->pn
, IEEE80211_GCMP_PN_LEN
) <= 0) {
683 wil_err_ratelimited(wil
,
684 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
685 cid
, tid
, mc
, key_id
, pn
, cc
->pn
);
688 memcpy(cc
->pn
, pn
, IEEE80211_GCMP_PN_LEN
);
693 static int wil_rx_error_check(struct wil6210_priv
*wil
, struct sk_buff
*skb
,
694 struct wil_net_stats
*stats
)
696 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
698 if ((d
->dma
.status
& RX_DMA_STATUS_ERROR
) &&
699 (d
->dma
.error
& RX_DMA_ERROR_MIC
)) {
700 stats
->rx_mic_error
++;
701 wil_dbg_txrx(wil
, "MIC error, dropping packet\n");
708 static void wil_get_netif_rx_params(struct sk_buff
*skb
, int *cid
,
711 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
713 *cid
= wil_skb_get_cid(skb
);
714 *security
= wil_rxdesc_security(d
);
718 * Check if skb is ptk eapol key message
720 * returns a pointer to the start of the eapol key structure, NULL
721 * if frame is not PTK eapol key
723 static struct wil_eapol_key
*wil_is_ptk_eapol_key(struct wil6210_priv
*wil
,
727 const struct wil_1x_hdr
*hdr
;
728 struct wil_eapol_key
*key
;
732 if (!skb_mac_header_was_set(skb
)) {
733 wil_err(wil
, "mac header was not set\n");
737 len
-= skb_mac_offset(skb
);
739 if (len
< sizeof(struct ethhdr
) + sizeof(struct wil_1x_hdr
) +
740 sizeof(struct wil_eapol_key
))
743 buf
= skb_mac_header(skb
) + sizeof(struct ethhdr
);
745 hdr
= (const struct wil_1x_hdr
*)buf
;
746 if (hdr
->type
!= WIL_1X_TYPE_EAPOL_KEY
)
749 key
= (struct wil_eapol_key
*)(buf
+ sizeof(struct wil_1x_hdr
));
750 if (key
->type
!= WIL_EAPOL_KEY_TYPE_WPA
&&
751 key
->type
!= WIL_EAPOL_KEY_TYPE_RSN
)
754 key_info
= be16_to_cpu(key
->key_info
);
755 if (!(key_info
& WIL_KEY_INFO_KEY_TYPE
)) /* check if pairwise */
761 static bool wil_skb_is_eap_3(struct wil6210_priv
*wil
, struct sk_buff
*skb
)
763 struct wil_eapol_key
*key
;
766 key
= wil_is_ptk_eapol_key(wil
, skb
);
770 key_info
= be16_to_cpu(key
->key_info
);
771 if (key_info
& (WIL_KEY_INFO_MIC
|
772 WIL_KEY_INFO_ENCR_KEY_DATA
)) {
773 /* 3/4 of 4-Way Handshake */
774 wil_dbg_misc(wil
, "EAPOL key message 3\n");
777 /* 1/4 of 4-Way Handshake */
778 wil_dbg_misc(wil
, "EAPOL key message 1\n");
783 static bool wil_skb_is_eap_4(struct wil6210_priv
*wil
, struct sk_buff
*skb
)
785 struct wil_eapol_key
*key
;
788 key
= wil_is_ptk_eapol_key(wil
, skb
);
792 nonce
= (u32
*)key
->key_nonce
;
793 for (i
= 0; i
< WIL_EAP_NONCE_LEN
/ sizeof(u32
); i
++, nonce
++) {
796 wil_dbg_misc(wil
, "EAPOL key message 2\n");
800 wil_dbg_misc(wil
, "EAPOL key message 4\n");
805 void wil_enable_tx_key_worker(struct work_struct
*work
)
807 struct wil6210_vif
*vif
= container_of(work
,
808 struct wil6210_vif
, enable_tx_key_worker
);
809 struct wil6210_priv
*wil
= vif_to_wil(vif
);
813 if (vif
->ptk_rekey_state
!= WIL_REKEY_WAIT_M4_SENT
) {
814 wil_dbg_misc(wil
, "Invalid rekey state = %d\n",
815 vif
->ptk_rekey_state
);
820 cid
= wil_find_cid_by_idx(wil
, vif
->mid
, 0);
821 if (!wil_cid_valid(wil
, cid
)) {
822 wil_err(wil
, "Invalid cid = %d\n", cid
);
827 wil_dbg_misc(wil
, "Apply PTK key after eapol was sent out\n");
828 rc
= wmi_add_cipher_key(vif
, 0, wil
->sta
[cid
].addr
, 0, NULL
,
829 WMI_KEY_USE_APPLY_PTK
);
831 vif
->ptk_rekey_state
= WIL_REKEY_IDLE
;
835 wil_err(wil
, "Apply PTK key failed %d\n", rc
);
838 void wil_tx_complete_handle_eapol(struct wil6210_vif
*vif
, struct sk_buff
*skb
)
840 struct wil6210_priv
*wil
= vif_to_wil(vif
);
841 struct wireless_dev
*wdev
= vif_to_wdev(vif
);
844 if (wdev
->iftype
!= NL80211_IFTYPE_STATION
||
845 !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY
, wil
->fw_capabilities
))
848 /* check if skb is an EAP message 4/4 */
849 if (!wil_skb_is_eap_4(wil
, skb
))
852 spin_lock_bh(&wil
->eap_lock
);
853 switch (vif
->ptk_rekey_state
) {
855 /* ignore idle state, can happen due to M4 retransmission */
857 case WIL_REKEY_M3_RECEIVED
:
858 vif
->ptk_rekey_state
= WIL_REKEY_IDLE
;
860 case WIL_REKEY_WAIT_M4_SENT
:
864 wil_err(wil
, "Unknown rekey state = %d",
865 vif
->ptk_rekey_state
);
867 spin_unlock_bh(&wil
->eap_lock
);
870 q
= queue_work(wil
->wmi_wq
, &vif
->enable_tx_key_worker
);
871 wil_dbg_misc(wil
, "queue_work of enable_tx_key_worker -> %d\n",
876 static void wil_rx_handle_eapol(struct wil6210_vif
*vif
, struct sk_buff
*skb
)
878 struct wil6210_priv
*wil
= vif_to_wil(vif
);
879 struct wireless_dev
*wdev
= vif_to_wdev(vif
);
881 if (wdev
->iftype
!= NL80211_IFTYPE_STATION
||
882 !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY
, wil
->fw_capabilities
))
885 /* check if skb is a EAP message 3/4 */
886 if (!wil_skb_is_eap_3(wil
, skb
))
889 if (vif
->ptk_rekey_state
== WIL_REKEY_IDLE
)
890 vif
->ptk_rekey_state
= WIL_REKEY_M3_RECEIVED
;
894 * Pass Rx packet to the netif. Update statistics.
895 * Called in softirq context (NAPI poll).
897 void wil_netif_rx(struct sk_buff
*skb
, struct net_device
*ndev
, int cid
,
898 struct wil_net_stats
*stats
, bool gro
)
900 gro_result_t rc
= GRO_NORMAL
;
901 struct wil6210_vif
*vif
= ndev_to_vif(ndev
);
902 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
903 struct wireless_dev
*wdev
= vif_to_wdev(vif
);
904 unsigned int len
= skb
->len
;
905 u8
*sa
, *da
= wil_skb_get_da(skb
);
906 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
907 * is not suitable, need to look at data
909 int mcast
= is_multicast_ether_addr(da
);
910 struct sk_buff
*xmit_skb
= NULL
;
911 static const char * const gro_res_str
[] = {
912 [GRO_MERGED
] = "GRO_MERGED",
913 [GRO_MERGED_FREE
] = "GRO_MERGED_FREE",
914 [GRO_HELD
] = "GRO_HELD",
915 [GRO_NORMAL
] = "GRO_NORMAL",
916 [GRO_DROP
] = "GRO_DROP",
917 [GRO_CONSUMED
] = "GRO_CONSUMED",
920 if (wdev
->iftype
== NL80211_IFTYPE_STATION
) {
921 sa
= wil_skb_get_sa(skb
);
922 if (mcast
&& ether_addr_equal(sa
, ndev
->dev_addr
)) {
923 /* mcast packet looped back to us */
928 } else if (wdev
->iftype
== NL80211_IFTYPE_AP
&& !vif
->ap_isolate
) {
930 /* send multicast frames both to higher layers in
931 * local net stack and back to the wireless medium
933 xmit_skb
= skb_copy(skb
, GFP_ATOMIC
);
935 int xmit_cid
= wil_find_cid(wil
, vif
->mid
, da
);
938 /* The destination station is associated to
939 * this AP (in this VLAN), so send the frame
940 * directly to it and do not pass it to local
949 /* Send to wireless media and increase priority by 256 to
950 * keep the received priority instead of reclassifying
951 * the frame (see cfg80211_classify8021d).
953 xmit_skb
->dev
= ndev
;
954 xmit_skb
->priority
+= 256;
955 xmit_skb
->protocol
= htons(ETH_P_802_3
);
956 skb_reset_network_header(xmit_skb
);
957 skb_reset_mac_header(xmit_skb
);
958 wil_dbg_txrx(wil
, "Rx -> Tx %d bytes\n", len
);
959 dev_queue_xmit(xmit_skb
);
962 if (skb
) { /* deliver to local stack */
963 skb
->protocol
= eth_type_trans(skb
, ndev
);
966 if (skb
->protocol
== cpu_to_be16(ETH_P_PAE
))
967 wil_rx_handle_eapol(vif
, skb
);
970 rc
= napi_gro_receive(&wil
->napi_rx
, skb
);
973 wil_dbg_txrx(wil
, "Rx complete %d bytes => %s\n",
974 len
, gro_res_str
[rc
]);
977 /* statistics. rc set to GRO_NORMAL for AP bridging */
978 if (unlikely(rc
== GRO_DROP
)) {
979 ndev
->stats
.rx_dropped
++;
981 wil_dbg_txrx(wil
, "Rx drop %d bytes\n", len
);
983 ndev
->stats
.rx_packets
++;
985 ndev
->stats
.rx_bytes
+= len
;
986 stats
->rx_bytes
+= len
;
988 ndev
->stats
.multicast
++;
992 void wil_netif_rx_any(struct sk_buff
*skb
, struct net_device
*ndev
)
995 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
996 struct wil_net_stats
*stats
;
998 wil
->txrx_ops
.get_netif_rx_params(skb
, &cid
, &security
);
1000 stats
= &wil
->sta
[cid
].stats
;
1004 if (security
&& (wil
->txrx_ops
.rx_crypto_check(wil
, skb
) != 0)) {
1005 wil_dbg_txrx(wil
, "Rx drop %d bytes\n", skb
->len
);
1007 ndev
->stats
.rx_dropped
++;
1009 stats
->rx_dropped
++;
1013 /* check errors reported by HW and update statistics */
1014 if (unlikely(wil
->txrx_ops
.rx_error_check(wil
, skb
, stats
))) {
1019 wil_netif_rx(skb
, ndev
, cid
, stats
, true);
1023 * Proceed all completed skb's from Rx VRING
1025 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
1027 void wil_rx_handle(struct wil6210_priv
*wil
, int *quota
)
1029 struct net_device
*ndev
= wil
->main_ndev
;
1030 struct wireless_dev
*wdev
= ndev
->ieee80211_ptr
;
1031 struct wil_ring
*v
= &wil
->ring_rx
;
1032 struct sk_buff
*skb
;
1034 if (unlikely(!v
->va
)) {
1035 wil_err(wil
, "Rx IRQ while Rx not yet initialized\n");
1038 wil_dbg_txrx(wil
, "rx_handle\n");
1039 while ((*quota
> 0) && (NULL
!= (skb
= wil_vring_reap_rx(wil
, v
)))) {
1042 /* monitor is currently supported on main interface only */
1043 if (wdev
->iftype
== NL80211_IFTYPE_MONITOR
) {
1045 skb_reset_mac_header(skb
);
1046 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1047 skb
->pkt_type
= PACKET_OTHERHOST
;
1048 skb
->protocol
= htons(ETH_P_802_2
);
1049 wil_netif_rx_any(skb
, ndev
);
1051 wil_rx_reorder(wil
, skb
);
1054 wil_rx_refill(wil
, v
->size
);
1057 static void wil_rx_buf_len_init(struct wil6210_priv
*wil
)
1059 wil
->rx_buf_len
= rx_large_buf
?
1060 WIL_MAX_ETH_MTU
: TXRX_BUF_LEN_DEFAULT
- WIL_MAX_MPDU_OVERHEAD
;
1061 if (mtu_max
> wil
->rx_buf_len
) {
1062 /* do not allow RX buffers to be smaller than mtu_max, for
1063 * backward compatibility (mtu_max parameter was also used
1064 * to support receiving large packets)
1066 wil_info(wil
, "Override RX buffer to mtu_max(%d)\n", mtu_max
);
1067 wil
->rx_buf_len
= mtu_max
;
1071 static int wil_rx_init(struct wil6210_priv
*wil
, uint order
)
1073 struct wil_ring
*vring
= &wil
->ring_rx
;
1076 wil_dbg_misc(wil
, "rx_init\n");
1079 wil_err(wil
, "Rx ring already allocated\n");
1083 wil_rx_buf_len_init(wil
);
1085 vring
->size
= 1 << order
;
1086 vring
->is_rx
= true;
1087 rc
= wil_vring_alloc(wil
, vring
);
1091 rc
= wmi_rx_chain_add(wil
, vring
);
1095 rc
= wil_rx_refill(wil
, vring
->size
);
1101 wil_vring_free(wil
, vring
);
1106 static void wil_rx_fini(struct wil6210_priv
*wil
)
1108 struct wil_ring
*vring
= &wil
->ring_rx
;
1110 wil_dbg_misc(wil
, "rx_fini\n");
1113 wil_vring_free(wil
, vring
);
1116 static int wil_tx_desc_map(union wil_tx_desc
*desc
, dma_addr_t pa
,
1117 u32 len
, int vring_index
)
1119 struct vring_tx_desc
*d
= &desc
->legacy
;
1121 wil_desc_addr_set(&d
->dma
.addr
, pa
);
1122 d
->dma
.ip_length
= 0;
1123 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1124 d
->dma
.b11
= 0/*14 | BIT(7)*/;
1126 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
1127 d
->dma
.length
= cpu_to_le16((u16
)len
);
1128 d
->dma
.d0
= (vring_index
<< DMA_CFG_DESC_TX_0_QID_POS
);
1132 d
->mac
.ucode_cmd
= 0;
1133 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
1134 d
->mac
.d
[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS
) |
1135 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS
);
1140 void wil_tx_data_init(struct wil_ring_tx_data
*txdata
)
1142 spin_lock_bh(&txdata
->lock
);
1143 txdata
->dot1x_open
= false;
1144 txdata
->enabled
= 0;
1146 txdata
->last_idle
= 0;
1148 txdata
->agg_wsize
= 0;
1149 txdata
->agg_timeout
= 0;
1150 txdata
->agg_amsdu
= 0;
1151 txdata
->addba_in_progress
= false;
1152 txdata
->mid
= U8_MAX
;
1153 spin_unlock_bh(&txdata
->lock
);
1156 static int wil_vring_init_tx(struct wil6210_vif
*vif
, int id
, int size
,
1159 struct wil6210_priv
*wil
= vif_to_wil(vif
);
1161 struct wmi_vring_cfg_cmd cmd
= {
1162 .action
= cpu_to_le32(WMI_VRING_CMD_ADD
),
1166 cpu_to_le16(wil_mtu2macbuf(mtu_max
)),
1167 .ring_size
= cpu_to_le16(size
),
1170 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
1175 .priority
= cpu_to_le16(0),
1176 .timeslot_us
= cpu_to_le16(0xfff),
1181 struct wmi_cmd_hdr wmi
;
1182 struct wmi_vring_cfg_done_event cmd
;
1183 } __packed reply
= {
1184 .cmd
= {.status
= WMI_FW_STATUS_FAILURE
},
1186 struct wil_ring
*vring
= &wil
->ring_tx
[id
];
1187 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[id
];
1189 if (cid
>= WIL6210_RX_DESC_MAX_CID
) {
1190 cmd
.vring_cfg
.cidxtid
= CIDXTID_EXTENDED_CID_TID
;
1191 cmd
.vring_cfg
.cid
= cid
;
1192 cmd
.vring_cfg
.tid
= tid
;
1194 cmd
.vring_cfg
.cidxtid
= mk_cidxtid(cid
, tid
);
1197 wil_dbg_misc(wil
, "vring_init_tx: max_mpdu_size %d\n",
1198 cmd
.vring_cfg
.tx_sw_ring
.max_mpdu_size
);
1199 lockdep_assert_held(&wil
->mutex
);
1202 wil_err(wil
, "Tx ring [%d] already allocated\n", id
);
1207 wil_tx_data_init(txdata
);
1208 vring
->is_rx
= false;
1210 rc
= wil_vring_alloc(wil
, vring
);
1214 wil
->ring2cid_tid
[id
][0] = cid
;
1215 wil
->ring2cid_tid
[id
][1] = tid
;
1217 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
1220 txdata
->dot1x_open
= true;
1221 rc
= wmi_call(wil
, WMI_VRING_CFG_CMDID
, vif
->mid
, &cmd
, sizeof(cmd
),
1222 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
),
1223 WIL_WMI_CALL_GENERAL_TO_MS
);
1227 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
1228 wil_err(wil
, "Tx config failed, status 0x%02x\n",
1234 spin_lock_bh(&txdata
->lock
);
1235 vring
->hwtail
= le32_to_cpu(reply
.cmd
.tx_vring_tail_ptr
);
1236 txdata
->mid
= vif
->mid
;
1237 txdata
->enabled
= 1;
1238 spin_unlock_bh(&txdata
->lock
);
1240 if (txdata
->dot1x_open
&& (agg_wsize
>= 0))
1241 wil_addba_tx_request(wil
, id
, agg_wsize
);
1245 spin_lock_bh(&txdata
->lock
);
1246 txdata
->dot1x_open
= false;
1247 txdata
->enabled
= 0;
1248 spin_unlock_bh(&txdata
->lock
);
1249 wil_vring_free(wil
, vring
);
1250 wil
->ring2cid_tid
[id
][0] = wil
->max_assoc_sta
;
1251 wil
->ring2cid_tid
[id
][1] = 0;
1258 static int wil_tx_vring_modify(struct wil6210_vif
*vif
, int ring_id
, int cid
,
1261 struct wil6210_priv
*wil
= vif_to_wil(vif
);
1263 struct wmi_vring_cfg_cmd cmd
= {
1264 .action
= cpu_to_le32(WMI_VRING_CMD_MODIFY
),
1268 cpu_to_le16(wil_mtu2macbuf(mtu_max
)),
1272 .cidxtid
= mk_cidxtid(cid
, tid
),
1273 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
1278 .priority
= cpu_to_le16(0),
1279 .timeslot_us
= cpu_to_le16(0xfff),
1284 struct wmi_cmd_hdr wmi
;
1285 struct wmi_vring_cfg_done_event cmd
;
1286 } __packed reply
= {
1287 .cmd
= {.status
= WMI_FW_STATUS_FAILURE
},
1289 struct wil_ring
*vring
= &wil
->ring_tx
[ring_id
];
1290 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_id
];
1292 wil_dbg_misc(wil
, "vring_modify: ring %d cid %d tid %d\n", ring_id
,
1294 lockdep_assert_held(&wil
->mutex
);
1297 wil_err(wil
, "Tx ring [%d] not allocated\n", ring_id
);
1301 if (wil
->ring2cid_tid
[ring_id
][0] != cid
||
1302 wil
->ring2cid_tid
[ring_id
][1] != tid
) {
1303 wil_err(wil
, "ring info does not match cid=%u tid=%u\n",
1304 wil
->ring2cid_tid
[ring_id
][0],
1305 wil
->ring2cid_tid
[ring_id
][1]);
1308 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
1310 rc
= wmi_call(wil
, WMI_VRING_CFG_CMDID
, vif
->mid
, &cmd
, sizeof(cmd
),
1311 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
),
1312 WIL_WMI_CALL_GENERAL_TO_MS
);
1316 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
1317 wil_err(wil
, "Tx modify failed, status 0x%02x\n",
1323 /* set BA aggregation window size to 0 to force a new BA with the
1326 txdata
->agg_wsize
= 0;
1327 if (txdata
->dot1x_open
&& agg_wsize
>= 0)
1328 wil_addba_tx_request(wil
, ring_id
, agg_wsize
);
1332 spin_lock_bh(&txdata
->lock
);
1333 txdata
->dot1x_open
= false;
1334 txdata
->enabled
= 0;
1335 spin_unlock_bh(&txdata
->lock
);
1336 wil
->ring2cid_tid
[ring_id
][0] = wil
->max_assoc_sta
;
1337 wil
->ring2cid_tid
[ring_id
][1] = 0;
1341 int wil_vring_init_bcast(struct wil6210_vif
*vif
, int id
, int size
)
1343 struct wil6210_priv
*wil
= vif_to_wil(vif
);
1345 struct wmi_bcast_vring_cfg_cmd cmd
= {
1346 .action
= cpu_to_le32(WMI_VRING_CMD_ADD
),
1350 cpu_to_le16(wil_mtu2macbuf(mtu_max
)),
1351 .ring_size
= cpu_to_le16(size
),
1354 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
1358 struct wmi_cmd_hdr wmi
;
1359 struct wmi_vring_cfg_done_event cmd
;
1360 } __packed reply
= {
1361 .cmd
= {.status
= WMI_FW_STATUS_FAILURE
},
1363 struct wil_ring
*vring
= &wil
->ring_tx
[id
];
1364 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[id
];
1366 wil_dbg_misc(wil
, "vring_init_bcast: max_mpdu_size %d\n",
1367 cmd
.vring_cfg
.tx_sw_ring
.max_mpdu_size
);
1368 lockdep_assert_held(&wil
->mutex
);
1371 wil_err(wil
, "Tx ring [%d] already allocated\n", id
);
1376 wil_tx_data_init(txdata
);
1377 vring
->is_rx
= false;
1379 rc
= wil_vring_alloc(wil
, vring
);
1383 wil
->ring2cid_tid
[id
][0] = wil
->max_assoc_sta
; /* CID */
1384 wil
->ring2cid_tid
[id
][1] = 0; /* TID */
1386 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
1389 txdata
->dot1x_open
= true;
1390 rc
= wmi_call(wil
, WMI_BCAST_VRING_CFG_CMDID
, vif
->mid
,
1392 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
),
1393 WIL_WMI_CALL_GENERAL_TO_MS
);
1397 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
1398 wil_err(wil
, "Tx config failed, status 0x%02x\n",
1404 spin_lock_bh(&txdata
->lock
);
1405 vring
->hwtail
= le32_to_cpu(reply
.cmd
.tx_vring_tail_ptr
);
1406 txdata
->mid
= vif
->mid
;
1407 txdata
->enabled
= 1;
1408 spin_unlock_bh(&txdata
->lock
);
1412 spin_lock_bh(&txdata
->lock
);
1413 txdata
->enabled
= 0;
1414 txdata
->dot1x_open
= false;
1415 spin_unlock_bh(&txdata
->lock
);
1416 wil_vring_free(wil
, vring
);
1422 static struct wil_ring
*wil_find_tx_ucast(struct wil6210_priv
*wil
,
1423 struct wil6210_vif
*vif
,
1424 struct sk_buff
*skb
)
1427 const u8
*da
= wil_skb_get_da(skb
);
1428 int min_ring_id
= wil_get_min_tx_ring_id(wil
);
1430 cid
= wil_find_cid(wil
, vif
->mid
, da
);
1432 if (cid
< 0 || cid
>= wil
->max_assoc_sta
)
1435 /* TODO: fix for multiple TID */
1436 for (i
= min_ring_id
; i
< ARRAY_SIZE(wil
->ring2cid_tid
); i
++) {
1437 if (!wil
->ring_tx_data
[i
].dot1x_open
&&
1438 skb
->protocol
!= cpu_to_be16(ETH_P_PAE
))
1440 if (wil
->ring2cid_tid
[i
][0] == cid
) {
1441 struct wil_ring
*v
= &wil
->ring_tx
[i
];
1442 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[i
];
1444 wil_dbg_txrx(wil
, "find_tx_ucast: (%pM) -> [%d]\n",
1446 if (v
->va
&& txdata
->enabled
) {
1450 "find_tx_ucast: vring[%d] not valid\n",
1460 static int wil_tx_ring(struct wil6210_priv
*wil
, struct wil6210_vif
*vif
,
1461 struct wil_ring
*ring
, struct sk_buff
*skb
);
1463 static struct wil_ring
*wil_find_tx_ring_sta(struct wil6210_priv
*wil
,
1464 struct wil6210_vif
*vif
,
1465 struct sk_buff
*skb
)
1467 struct wil_ring
*ring
;
1470 struct wil_ring_tx_data
*txdata
;
1471 int min_ring_id
= wil_get_min_tx_ring_id(wil
);
1473 /* In the STA mode, it is expected to have only 1 VRING
1474 * for the AP we connected to.
1475 * find 1-st vring eligible for this skb and use it.
1477 for (i
= min_ring_id
; i
< WIL6210_MAX_TX_RINGS
; i
++) {
1478 ring
= &wil
->ring_tx
[i
];
1479 txdata
= &wil
->ring_tx_data
[i
];
1480 if (!ring
->va
|| !txdata
->enabled
|| txdata
->mid
!= vif
->mid
)
1483 cid
= wil
->ring2cid_tid
[i
][0];
1484 if (cid
>= wil
->max_assoc_sta
) /* skip BCAST */
1487 if (!wil
->ring_tx_data
[i
].dot1x_open
&&
1488 skb
->protocol
!= cpu_to_be16(ETH_P_PAE
))
1491 wil_dbg_txrx(wil
, "Tx -> ring %d\n", i
);
1496 wil_dbg_txrx(wil
, "Tx while no rings active?\n");
1501 /* Use one of 2 strategies:
1503 * 1. New (real broadcast):
1504 * use dedicated broadcast vring
1505 * 2. Old (pseudo-DMS):
1506 * Find 1-st vring and return it;
1507 * duplicate skb and send it to other active vrings;
1508 * in all cases override dest address to unicast peer's address
1509 * Use old strategy when new is not supported yet:
1512 static struct wil_ring
*wil_find_tx_bcast_1(struct wil6210_priv
*wil
,
1513 struct wil6210_vif
*vif
,
1514 struct sk_buff
*skb
)
1517 struct wil_ring_tx_data
*txdata
;
1518 int i
= vif
->bcast_ring
;
1522 v
= &wil
->ring_tx
[i
];
1523 txdata
= &wil
->ring_tx_data
[i
];
1524 if (!v
->va
|| !txdata
->enabled
)
1526 if (!wil
->ring_tx_data
[i
].dot1x_open
&&
1527 skb
->protocol
!= cpu_to_be16(ETH_P_PAE
))
1533 /* apply multicast to unicast only for ARP and IP packets
1534 * (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
1536 static bool wil_check_multicast_to_unicast(struct wil6210_priv
*wil
,
1537 struct sk_buff
*skb
)
1539 const struct ethhdr
*eth
= (void *)skb
->data
;
1540 const struct vlan_ethhdr
*ethvlan
= (void *)skb
->data
;
1543 if (!wil
->multicast_to_unicast
)
1546 /* multicast to unicast conversion only for some payload */
1547 ethertype
= eth
->h_proto
;
1548 if (ethertype
== htons(ETH_P_8021Q
) && skb
->len
>= VLAN_ETH_HLEN
)
1549 ethertype
= ethvlan
->h_vlan_encapsulated_proto
;
1550 switch (ethertype
) {
1551 case htons(ETH_P_ARP
):
1552 case htons(ETH_P_IP
):
1553 case htons(ETH_P_IPV6
):
1562 static void wil_set_da_for_vring(struct wil6210_priv
*wil
,
1563 struct sk_buff
*skb
, int vring_index
)
1565 u8
*da
= wil_skb_get_da(skb
);
1566 int cid
= wil
->ring2cid_tid
[vring_index
][0];
1568 ether_addr_copy(da
, wil
->sta
[cid
].addr
);
1571 static struct wil_ring
*wil_find_tx_bcast_2(struct wil6210_priv
*wil
,
1572 struct wil6210_vif
*vif
,
1573 struct sk_buff
*skb
)
1575 struct wil_ring
*v
, *v2
;
1576 struct sk_buff
*skb2
;
1579 const u8
*src
= wil_skb_get_sa(skb
);
1580 struct wil_ring_tx_data
*txdata
, *txdata2
;
1581 int min_ring_id
= wil_get_min_tx_ring_id(wil
);
1583 /* find 1-st vring eligible for data */
1584 for (i
= min_ring_id
; i
< WIL6210_MAX_TX_RINGS
; i
++) {
1585 v
= &wil
->ring_tx
[i
];
1586 txdata
= &wil
->ring_tx_data
[i
];
1587 if (!v
->va
|| !txdata
->enabled
|| txdata
->mid
!= vif
->mid
)
1590 cid
= wil
->ring2cid_tid
[i
][0];
1591 if (cid
>= wil
->max_assoc_sta
) /* skip BCAST */
1593 if (!wil
->ring_tx_data
[i
].dot1x_open
&&
1594 skb
->protocol
!= cpu_to_be16(ETH_P_PAE
))
1597 /* don't Tx back to source when re-routing Rx->Tx at the AP */
1598 if (0 == memcmp(wil
->sta
[cid
].addr
, src
, ETH_ALEN
))
1604 wil_dbg_txrx(wil
, "Tx while no vrings active?\n");
1609 wil_dbg_txrx(wil
, "BCAST -> ring %d\n", i
);
1610 wil_set_da_for_vring(wil
, skb
, i
);
1612 /* find other active vrings and duplicate skb for each */
1613 for (i
++; i
< WIL6210_MAX_TX_RINGS
; i
++) {
1614 v2
= &wil
->ring_tx
[i
];
1615 txdata2
= &wil
->ring_tx_data
[i
];
1616 if (!v2
->va
|| txdata2
->mid
!= vif
->mid
)
1618 cid
= wil
->ring2cid_tid
[i
][0];
1619 if (cid
>= wil
->max_assoc_sta
) /* skip BCAST */
1621 if (!wil
->ring_tx_data
[i
].dot1x_open
&&
1622 skb
->protocol
!= cpu_to_be16(ETH_P_PAE
))
1625 if (0 == memcmp(wil
->sta
[cid
].addr
, src
, ETH_ALEN
))
1628 skb2
= skb_copy(skb
, GFP_ATOMIC
);
1630 wil_dbg_txrx(wil
, "BCAST DUP -> ring %d\n", i
);
1631 wil_set_da_for_vring(wil
, skb2
, i
);
1632 wil_tx_ring(wil
, vif
, v2
, skb2
);
1633 /* successful call to wil_tx_ring takes skb2 ref */
1634 dev_kfree_skb_any(skb2
);
1636 wil_err(wil
, "skb_copy failed\n");
1644 void wil_tx_desc_set_nr_frags(struct vring_tx_desc
*d
, int nr_frags
)
1646 d
->mac
.d
[2] |= (nr_frags
<< MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS
);
1650 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1651 * @skb is used to obtain the protocol and headers length.
1652 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1653 * 2 - middle, 3 - last descriptor.
1656 static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc
*d
,
1657 struct sk_buff
*skb
,
1658 int tso_desc_type
, bool is_ipv4
,
1659 int tcp_hdr_len
, int skb_net_hdr_len
)
1661 d
->dma
.b11
= ETH_HLEN
; /* MAC header length */
1662 d
->dma
.b11
|= is_ipv4
<< DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
;
1664 d
->dma
.d0
|= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS
);
1665 /* L4 header len: TCP header length */
1666 d
->dma
.d0
|= (tcp_hdr_len
& DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1668 /* Setup TSO: bit and desc type */
1669 d
->dma
.d0
|= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS
)) |
1670 (tso_desc_type
<< DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS
);
1671 d
->dma
.d0
|= (is_ipv4
<< DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS
);
1673 d
->dma
.ip_length
= skb_net_hdr_len
;
1674 /* Enable TCP/UDP checksum */
1675 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS
);
1676 /* Calculate pseudo-header */
1677 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS
);
1681 * Sets the descriptor @d up for csum. The corresponding
1682 * @skb is used to obtain the protocol and headers length.
1683 * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1684 * Note, if d==NULL, the function only returns the protocol result.
1686 * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1687 * is "if unrolling" to optimize the critical path.
1690 static int wil_tx_desc_offload_setup(struct vring_tx_desc
*d
,
1691 struct sk_buff
*skb
){
1694 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1697 d
->dma
.b11
= ETH_HLEN
; /* MAC header length */
1699 switch (skb
->protocol
) {
1700 case cpu_to_be16(ETH_P_IP
):
1701 protocol
= ip_hdr(skb
)->protocol
;
1702 d
->dma
.b11
|= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
);
1704 case cpu_to_be16(ETH_P_IPV6
):
1705 protocol
= ipv6_hdr(skb
)->nexthdr
;
1713 d
->dma
.d0
|= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS
);
1714 /* L4 header len: TCP header length */
1716 (tcp_hdrlen(skb
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1719 /* L4 header len: UDP header length */
1721 (sizeof(struct udphdr
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
1727 d
->dma
.ip_length
= skb_network_header_len(skb
);
1728 /* Enable TCP/UDP checksum */
1729 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS
);
1730 /* Calculate pseudo-header */
1731 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS
);
1736 static inline void wil_tx_last_desc(struct vring_tx_desc
*d
)
1738 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS
) |
1739 BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS
) |
1740 BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS
);
1743 static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc
*d
)
1745 d
->dma
.d0
|= wil_tso_type_lst
<<
1746 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS
;
1749 static int __wil_tx_vring_tso(struct wil6210_priv
*wil
, struct wil6210_vif
*vif
,
1750 struct wil_ring
*vring
, struct sk_buff
*skb
)
1752 struct device
*dev
= wil_to_dev(wil
);
1754 /* point to descriptors in shared memory */
1755 volatile struct vring_tx_desc
*_desc
= NULL
, *_hdr_desc
,
1756 *_first_desc
= NULL
;
1758 /* pointers to shadow descriptors */
1759 struct vring_tx_desc desc_mem
, hdr_desc_mem
, first_desc_mem
,
1760 *d
= &hdr_desc_mem
, *hdr_desc
= &hdr_desc_mem
,
1761 *first_desc
= &first_desc_mem
;
1763 /* pointer to shadow descriptors' context */
1764 struct wil_ctx
*hdr_ctx
, *first_ctx
= NULL
;
1766 int descs_used
= 0; /* total number of used descriptors */
1767 int sg_desc_cnt
= 0; /* number of descriptors for current mss*/
1769 u32 swhead
= vring
->swhead
;
1770 int used
, avail
= wil_ring_avail_tx(vring
);
1771 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1772 int min_desc_required
= nr_frags
+ 1;
1773 int mss
= skb_shinfo(skb
)->gso_size
; /* payload size w/o headers */
1774 int f
, len
, hdrlen
, headlen
;
1775 int vring_index
= vring
- wil
->ring_tx
;
1776 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[vring_index
];
1779 const skb_frag_t
*frag
= NULL
;
1782 int hdr_compensation_need
= true;
1783 int desc_tso_type
= wil_tso_type_first
;
1786 int skb_net_hdr_len
;
1790 wil_dbg_txrx(wil
, "tx_vring_tso: %d bytes to vring %d\n", skb
->len
,
1793 if (unlikely(!txdata
->enabled
))
1796 /* A typical page 4K is 3-4 payloads, we assume each fragment
1797 * is a full payload, that's how min_desc_required has been
1798 * calculated. In real we might need more or less descriptors,
1799 * this is the initial check only.
1801 if (unlikely(avail
< min_desc_required
)) {
1802 wil_err_ratelimited(wil
,
1803 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1804 vring_index
, min_desc_required
);
1808 /* Header Length = MAC header len + IP header len + TCP header len*/
1810 (int)skb_network_header_len(skb
) +
1813 gso_type
= skb_shinfo(skb
)->gso_type
& (SKB_GSO_TCPV6
| SKB_GSO_TCPV4
);
1816 /* TCP v4, zero out the IP length and IPv4 checksum fields
1817 * as required by the offloading doc
1819 ip_hdr(skb
)->tot_len
= 0;
1820 ip_hdr(skb
)->check
= 0;
1824 /* TCP v6, zero out the payload length */
1825 ipv6_hdr(skb
)->payload_len
= 0;
1829 /* other than TCPv4 or TCPv6 types are not supported for TSO.
1830 * It is also illegal for both to be set simultaneously
1835 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1838 /* tcp header length and skb network header length are fixed for all
1839 * packet's descriptors - read then once here
1841 tcp_hdr_len
= tcp_hdrlen(skb
);
1842 skb_net_hdr_len
= skb_network_header_len(skb
);
1844 _hdr_desc
= &vring
->va
[i
].tx
.legacy
;
1846 pa
= dma_map_single(dev
, skb
->data
, hdrlen
, DMA_TO_DEVICE
);
1847 if (unlikely(dma_mapping_error(dev
, pa
))) {
1848 wil_err(wil
, "TSO: Skb head DMA map error\n");
1852 wil
->txrx_ops
.tx_desc_map((union wil_tx_desc
*)hdr_desc
, pa
,
1853 hdrlen
, vring_index
);
1854 wil_tx_desc_offload_setup_tso(hdr_desc
, skb
, wil_tso_type_hdr
, is_ipv4
,
1855 tcp_hdr_len
, skb_net_hdr_len
);
1856 wil_tx_last_desc(hdr_desc
);
1858 vring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1859 hdr_ctx
= &vring
->ctx
[i
];
1862 headlen
= skb_headlen(skb
) - hdrlen
;
1864 for (f
= headlen
? -1 : 0; f
< nr_frags
; f
++) {
1867 wil_dbg_txrx(wil
, "TSO: process skb head, len %u\n",
1870 frag
= &skb_shinfo(skb
)->frags
[f
];
1871 len
= skb_frag_size(frag
);
1872 wil_dbg_txrx(wil
, "TSO: frag[%d]: len %u\n", f
, len
);
1877 "TSO: len %d, rem_data %d, descs_used %d\n",
1878 len
, rem_data
, descs_used
);
1880 if (descs_used
== avail
) {
1881 wil_err_ratelimited(wil
, "TSO: ring overflow\n");
1886 lenmss
= min_t(int, rem_data
, len
);
1887 i
= (swhead
+ descs_used
) % vring
->size
;
1888 wil_dbg_txrx(wil
, "TSO: lenmss %d, i %d\n", lenmss
, i
);
1891 pa
= skb_frag_dma_map(dev
, frag
,
1892 skb_frag_size(frag
) - len
,
1893 lenmss
, DMA_TO_DEVICE
);
1894 vring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
1896 pa
= dma_map_single(dev
,
1898 skb_headlen(skb
) - headlen
,
1901 vring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
1905 if (unlikely(dma_mapping_error(dev
, pa
))) {
1906 wil_err(wil
, "TSO: DMA map page error\n");
1910 _desc
= &vring
->va
[i
].tx
.legacy
;
1913 _first_desc
= _desc
;
1914 first_ctx
= &vring
->ctx
[i
];
1920 wil
->txrx_ops
.tx_desc_map((union wil_tx_desc
*)d
,
1921 pa
, lenmss
, vring_index
);
1922 wil_tx_desc_offload_setup_tso(d
, skb
, desc_tso_type
,
1923 is_ipv4
, tcp_hdr_len
,
1926 /* use tso_type_first only once */
1927 desc_tso_type
= wil_tso_type_mid
;
1929 descs_used
++; /* desc used so far */
1930 sg_desc_cnt
++; /* desc used for this segment */
1935 "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1936 len
, rem_data
, descs_used
, sg_desc_cnt
);
1938 /* Close the segment if reached mss size or last frag*/
1939 if (rem_data
== 0 || (f
== nr_frags
- 1 && len
== 0)) {
1940 if (hdr_compensation_need
) {
1941 /* first segment include hdr desc for
1944 hdr_ctx
->nr_frags
= sg_desc_cnt
;
1945 wil_tx_desc_set_nr_frags(first_desc
,
1948 hdr_compensation_need
= false;
1950 wil_tx_desc_set_nr_frags(first_desc
,
1953 first_ctx
->nr_frags
= sg_desc_cnt
- 1;
1955 wil_tx_last_desc(d
);
1957 /* first descriptor may also be the last
1958 * for this mss - make sure not to copy
1961 if (first_desc
!= d
)
1962 *_first_desc
= *first_desc
;
1964 /*last descriptor will be copied at the end
1965 * of this TS processing
1967 if (f
< nr_frags
- 1 || len
> 0)
1973 } else if (first_desc
!= d
) /* update mid descriptor */
1981 /* first descriptor may also be the last.
1982 * in this case d pointer is invalid
1984 if (_first_desc
== _desc
)
1987 /* Last data descriptor */
1988 wil_set_tx_desc_last_tso(d
);
1991 /* Fill the total number of descriptors in first desc (hdr)*/
1992 wil_tx_desc_set_nr_frags(hdr_desc
, descs_used
);
1993 *_hdr_desc
= *hdr_desc
;
1995 /* hold reference to skb
1996 * to prevent skb release before accounting
1997 * in case of immediate "tx done"
1999 vring
->ctx
[i
].skb
= skb_get(skb
);
2001 /* performance monitoring */
2002 used
= wil_ring_used_tx(vring
);
2003 if (wil_val_in_range(wil
->ring_idle_trsh
,
2004 used
, used
+ descs_used
)) {
2005 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
2006 wil_dbg_txrx(wil
, "Ring[%2d] not idle %d -> %d\n",
2007 vring_index
, used
, used
+ descs_used
);
2010 /* Make sure to advance the head only after descriptor update is done.
2011 * This will prevent a race condition where the completion thread
2012 * will see the DU bit set from previous run and will handle the
2013 * skb before it was completed.
2017 /* advance swhead */
2018 wil_ring_advance_head(vring
, descs_used
);
2019 wil_dbg_txrx(wil
, "TSO: Tx swhead %d -> %d\n", swhead
, vring
->swhead
);
2021 /* make sure all writes to descriptors (shared memory) are done before
2022 * committing them to HW
2026 if (wil
->tx_latency
)
2027 *(ktime_t
*)&skb
->cb
= ktime_get();
2029 memset(skb
->cb
, 0, sizeof(ktime_t
));
2031 wil_w(wil
, vring
->hwtail
, vring
->swhead
);
2035 while (descs_used
> 0) {
2036 struct wil_ctx
*ctx
;
2038 i
= (swhead
+ descs_used
- 1) % vring
->size
;
2039 d
= (struct vring_tx_desc
*)&vring
->va
[i
].tx
.legacy
;
2040 _desc
= &vring
->va
[i
].tx
.legacy
;
2042 _desc
->dma
.status
= TX_DMA_STATUS_DU
;
2043 ctx
= &vring
->ctx
[i
];
2044 wil_txdesc_unmap(dev
, (union wil_tx_desc
*)d
, ctx
);
2045 memset(ctx
, 0, sizeof(*ctx
));
2052 static int __wil_tx_ring(struct wil6210_priv
*wil
, struct wil6210_vif
*vif
,
2053 struct wil_ring
*ring
, struct sk_buff
*skb
)
2055 struct device
*dev
= wil_to_dev(wil
);
2056 struct vring_tx_desc dd
, *d
= &dd
;
2057 volatile struct vring_tx_desc
*_d
;
2058 u32 swhead
= ring
->swhead
;
2059 int avail
= wil_ring_avail_tx(ring
);
2060 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2062 int ring_index
= ring
- wil
->ring_tx
;
2063 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_index
];
2067 bool mcast
= (ring_index
== vif
->bcast_ring
);
2068 uint len
= skb_headlen(skb
);
2070 wil_dbg_txrx(wil
, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
2071 skb
->len
, ring_index
, nr_frags
);
2073 if (unlikely(!txdata
->enabled
))
2076 if (unlikely(avail
< 1 + nr_frags
)) {
2077 wil_err_ratelimited(wil
,
2078 "Tx ring[%2d] full. No space for %d fragments\n",
2079 ring_index
, 1 + nr_frags
);
2082 _d
= &ring
->va
[i
].tx
.legacy
;
2084 pa
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
2086 wil_dbg_txrx(wil
, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index
,
2087 skb_headlen(skb
), skb
->data
, &pa
);
2088 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET
, 16, 1,
2089 skb
->data
, skb_headlen(skb
), false);
2091 if (unlikely(dma_mapping_error(dev
, pa
)))
2093 ring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
2095 wil
->txrx_ops
.tx_desc_map((union wil_tx_desc
*)d
, pa
, len
,
2097 if (unlikely(mcast
)) {
2098 d
->mac
.d
[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS
); /* MCS 0 */
2099 if (unlikely(len
> WIL_BCAST_MCS0_LIMIT
)) /* set MCS 1 */
2100 d
->mac
.d
[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS
);
2102 /* Process TCP/UDP checksum offloading */
2103 if (unlikely(wil_tx_desc_offload_setup(d
, skb
))) {
2104 wil_err(wil
, "Tx[%2d] Failed to set cksum, drop packet\n",
2109 ring
->ctx
[i
].nr_frags
= nr_frags
;
2110 wil_tx_desc_set_nr_frags(d
, nr_frags
+ 1);
2112 /* middle segments */
2113 for (; f
< nr_frags
; f
++) {
2114 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
2115 int len
= skb_frag_size(frag
);
2118 wil_dbg_txrx(wil
, "Tx[%2d] desc[%4d]\n", ring_index
, i
);
2119 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
2120 (const void *)d
, sizeof(*d
), false);
2121 i
= (swhead
+ f
+ 1) % ring
->size
;
2122 _d
= &ring
->va
[i
].tx
.legacy
;
2123 pa
= skb_frag_dma_map(dev
, frag
, 0, skb_frag_size(frag
),
2125 if (unlikely(dma_mapping_error(dev
, pa
))) {
2126 wil_err(wil
, "Tx[%2d] failed to map fragment\n",
2130 ring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
2131 wil
->txrx_ops
.tx_desc_map((union wil_tx_desc
*)d
,
2132 pa
, len
, ring_index
);
2133 /* no need to check return code -
2134 * if it succeeded for 1-st descriptor,
2135 * it will succeed here too
2137 wil_tx_desc_offload_setup(d
, skb
);
2139 /* for the last seg only */
2140 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS
);
2141 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS
);
2142 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS
);
2144 wil_dbg_txrx(wil
, "Tx[%2d] desc[%4d]\n", ring_index
, i
);
2145 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE
, 32, 4,
2146 (const void *)d
, sizeof(*d
), false);
2148 /* hold reference to skb
2149 * to prevent skb release before accounting
2150 * in case of immediate "tx done"
2152 ring
->ctx
[i
].skb
= skb_get(skb
);
2154 /* performance monitoring */
2155 used
= wil_ring_used_tx(ring
);
2156 if (wil_val_in_range(wil
->ring_idle_trsh
,
2157 used
, used
+ nr_frags
+ 1)) {
2158 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
2159 wil_dbg_txrx(wil
, "Ring[%2d] not idle %d -> %d\n",
2160 ring_index
, used
, used
+ nr_frags
+ 1);
2163 /* Make sure to advance the head only after descriptor update is done.
2164 * This will prevent a race condition where the completion thread
2165 * will see the DU bit set from previous run and will handle the
2166 * skb before it was completed.
2170 /* advance swhead */
2171 wil_ring_advance_head(ring
, nr_frags
+ 1);
2172 wil_dbg_txrx(wil
, "Tx[%2d] swhead %d -> %d\n", ring_index
, swhead
,
2174 trace_wil6210_tx(ring_index
, swhead
, skb
->len
, nr_frags
);
2176 /* make sure all writes to descriptors (shared memory) are done before
2177 * committing them to HW
2181 if (wil
->tx_latency
)
2182 *(ktime_t
*)&skb
->cb
= ktime_get();
2184 memset(skb
->cb
, 0, sizeof(ktime_t
));
2186 wil_w(wil
, ring
->hwtail
, ring
->swhead
);
2190 /* unmap what we have mapped */
2191 nr_frags
= f
+ 1; /* frags mapped + one for skb head */
2192 for (f
= 0; f
< nr_frags
; f
++) {
2193 struct wil_ctx
*ctx
;
2195 i
= (swhead
+ f
) % ring
->size
;
2196 ctx
= &ring
->ctx
[i
];
2197 _d
= &ring
->va
[i
].tx
.legacy
;
2199 _d
->dma
.status
= TX_DMA_STATUS_DU
;
2200 wil
->txrx_ops
.tx_desc_unmap(dev
,
2201 (union wil_tx_desc
*)d
,
2204 memset(ctx
, 0, sizeof(*ctx
));
2210 static int wil_tx_ring(struct wil6210_priv
*wil
, struct wil6210_vif
*vif
,
2211 struct wil_ring
*ring
, struct sk_buff
*skb
)
2213 int ring_index
= ring
- wil
->ring_tx
;
2214 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ring_index
];
2217 spin_lock(&txdata
->lock
);
2219 if (test_bit(wil_status_suspending
, wil
->status
) ||
2220 test_bit(wil_status_suspended
, wil
->status
) ||
2221 test_bit(wil_status_resuming
, wil
->status
)) {
2223 "suspend/resume in progress. drop packet\n");
2224 spin_unlock(&txdata
->lock
);
2228 rc
= (skb_is_gso(skb
) ? wil
->txrx_ops
.tx_ring_tso
: __wil_tx_ring
)
2229 (wil
, vif
, ring
, skb
);
2231 spin_unlock(&txdata
->lock
);
2237 * Check status of tx vrings and stop/wake net queues if needed
2238 * It will start/stop net queues of a specific VIF net_device.
2240 * This function does one of two checks:
2241 * In case check_stop is true, will check if net queues need to be stopped. If
2242 * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
2243 * In case check_stop is false, will check if net queues need to be waked. If
2244 * the conditions for waking are met, netif_tx_wake_all_queues() is called.
2245 * vring is the vring which is currently being modified by either adding
2246 * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
2247 * be null when irrelevant (e.g. connect/disconnect events).
2249 * The implementation is to stop net queues if modified vring has low
2250 * descriptor availability. Wake if all vrings are not in low descriptor
2251 * availability and modified vring has high descriptor availability.
2253 static inline void __wil_update_net_queues(struct wil6210_priv
*wil
,
2254 struct wil6210_vif
*vif
,
2255 struct wil_ring
*ring
,
2259 int min_ring_id
= wil_get_min_tx_ring_id(wil
);
2265 wil_dbg_txrx(wil
, "vring %d, mid %d, check_stop=%d, stopped=%d",
2266 (int)(ring
- wil
->ring_tx
), vif
->mid
, check_stop
,
2267 vif
->net_queue_stopped
);
2269 wil_dbg_txrx(wil
, "check_stop=%d, mid=%d, stopped=%d",
2270 check_stop
, vif
->mid
, vif
->net_queue_stopped
);
2272 if (ring
&& drop_if_ring_full
)
2273 /* no need to stop/wake net queues */
2276 if (check_stop
== vif
->net_queue_stopped
)
2277 /* net queues already in desired state */
2281 if (!ring
|| unlikely(wil_ring_avail_low(ring
))) {
2282 /* not enough room in the vring */
2283 netif_tx_stop_all_queues(vif_to_ndev(vif
));
2284 vif
->net_queue_stopped
= true;
2285 wil_dbg_txrx(wil
, "netif_tx_stop called\n");
2290 /* Do not wake the queues in suspend flow */
2291 if (test_bit(wil_status_suspending
, wil
->status
) ||
2292 test_bit(wil_status_suspended
, wil
->status
))
2296 for (i
= min_ring_id
; i
< WIL6210_MAX_TX_RINGS
; i
++) {
2297 struct wil_ring
*cur_ring
= &wil
->ring_tx
[i
];
2298 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[i
];
2300 if (txdata
->mid
!= vif
->mid
|| !cur_ring
->va
||
2301 !txdata
->enabled
|| cur_ring
== ring
)
2304 if (wil_ring_avail_low(cur_ring
)) {
2305 wil_dbg_txrx(wil
, "ring %d full, can't wake\n",
2306 (int)(cur_ring
- wil
->ring_tx
));
2311 if (!ring
|| wil_ring_avail_high(ring
)) {
2312 /* enough room in the ring */
2313 wil_dbg_txrx(wil
, "calling netif_tx_wake\n");
2314 netif_tx_wake_all_queues(vif_to_ndev(vif
));
2315 vif
->net_queue_stopped
= false;
2319 void wil_update_net_queues(struct wil6210_priv
*wil
, struct wil6210_vif
*vif
,
2320 struct wil_ring
*ring
, bool check_stop
)
2322 spin_lock(&wil
->net_queue_lock
);
2323 __wil_update_net_queues(wil
, vif
, ring
, check_stop
);
2324 spin_unlock(&wil
->net_queue_lock
);
2327 void wil_update_net_queues_bh(struct wil6210_priv
*wil
, struct wil6210_vif
*vif
,
2328 struct wil_ring
*ring
, bool check_stop
)
2330 spin_lock_bh(&wil
->net_queue_lock
);
2331 __wil_update_net_queues(wil
, vif
, ring
, check_stop
);
2332 spin_unlock_bh(&wil
->net_queue_lock
);
2335 netdev_tx_t
wil_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
2337 struct wil6210_vif
*vif
= ndev_to_vif(ndev
);
2338 struct wil6210_priv
*wil
= vif_to_wil(vif
);
2339 const u8
*da
= wil_skb_get_da(skb
);
2340 bool bcast
= is_multicast_ether_addr(da
);
2341 struct wil_ring
*ring
;
2342 static bool pr_once_fw
;
2345 wil_dbg_txrx(wil
, "start_xmit\n");
2346 if (unlikely(!test_bit(wil_status_fwready
, wil
->status
))) {
2348 wil_err(wil
, "FW not ready\n");
2353 if (unlikely(!test_bit(wil_vif_fwconnected
, vif
->status
))) {
2354 wil_dbg_ratelimited(wil
,
2355 "VIF not connected, packet dropped\n");
2358 if (unlikely(vif
->wdev
.iftype
== NL80211_IFTYPE_MONITOR
)) {
2359 wil_err(wil
, "Xmit in monitor mode not supported\n");
2365 if (vif
->wdev
.iftype
== NL80211_IFTYPE_STATION
&& !vif
->pbss
) {
2366 /* in STA mode (ESS), all to same VRING (to AP) */
2367 ring
= wil_find_tx_ring_sta(wil
, vif
, skb
);
2369 if (vif
->pbss
|| wil_check_multicast_to_unicast(wil
, skb
))
2370 /* in pbss, no bcast VRING - duplicate skb in
2371 * all stations VRINGs
2373 ring
= wil_find_tx_bcast_2(wil
, vif
, skb
);
2374 else if (vif
->wdev
.iftype
== NL80211_IFTYPE_AP
)
2375 /* AP has a dedicated bcast VRING */
2376 ring
= wil_find_tx_bcast_1(wil
, vif
, skb
);
2378 /* unexpected combination, fallback to duplicating
2379 * the skb in all stations VRINGs
2381 ring
= wil_find_tx_bcast_2(wil
, vif
, skb
);
2383 /* unicast, find specific VRING by dest. address */
2384 ring
= wil_find_tx_ucast(wil
, vif
, skb
);
2386 if (unlikely(!ring
)) {
2387 wil_dbg_txrx(wil
, "No Tx RING found for %pM\n", da
);
2390 /* set up vring entry */
2391 rc
= wil_tx_ring(wil
, vif
, ring
, skb
);
2395 /* shall we stop net queues? */
2396 wil_update_net_queues_bh(wil
, vif
, ring
, true);
2397 /* statistics will be updated on the tx_complete */
2398 dev_kfree_skb_any(skb
);
2399 return NETDEV_TX_OK
;
2401 if (drop_if_ring_full
)
2403 return NETDEV_TX_BUSY
;
2405 break; /* goto drop; */
2408 ndev
->stats
.tx_dropped
++;
2409 dev_kfree_skb_any(skb
);
2411 return NET_XMIT_DROP
;
2414 void wil_tx_latency_calc(struct wil6210_priv
*wil
, struct sk_buff
*skb
,
2415 struct wil_sta_info
*sta
)
2420 if (!wil
->tx_latency
)
2423 if (ktime_to_ms(*(ktime_t
*)&skb
->cb
) == 0)
2426 skb_time_us
= ktime_us_delta(ktime_get(), *(ktime_t
*)&skb
->cb
);
2427 bin
= skb_time_us
/ wil
->tx_latency_res
;
2428 bin
= min_t(int, bin
, WIL_NUM_LATENCY_BINS
- 1);
2430 wil_dbg_txrx(wil
, "skb time %dus => bin %d\n", skb_time_us
, bin
);
2431 sta
->tx_latency_bins
[bin
]++;
2432 sta
->stats
.tx_latency_total_us
+= skb_time_us
;
2433 if (skb_time_us
< sta
->stats
.tx_latency_min_us
)
2434 sta
->stats
.tx_latency_min_us
= skb_time_us
;
2435 if (skb_time_us
> sta
->stats
.tx_latency_max_us
)
2436 sta
->stats
.tx_latency_max_us
= skb_time_us
;
2440 * Clean up transmitted skb's from the Tx VRING
2442 * Return number of descriptors cleared
2444 * Safe to call from IRQ
2446 int wil_tx_complete(struct wil6210_vif
*vif
, int ringid
)
2448 struct wil6210_priv
*wil
= vif_to_wil(vif
);
2449 struct net_device
*ndev
= vif_to_ndev(vif
);
2450 struct device
*dev
= wil_to_dev(wil
);
2451 struct wil_ring
*vring
= &wil
->ring_tx
[ringid
];
2452 struct wil_ring_tx_data
*txdata
= &wil
->ring_tx_data
[ringid
];
2454 int cid
= wil
->ring2cid_tid
[ringid
][0];
2455 struct wil_net_stats
*stats
= NULL
;
2456 volatile struct vring_tx_desc
*_d
;
2457 int used_before_complete
;
2460 if (unlikely(!vring
->va
)) {
2461 wil_err(wil
, "Tx irq[%d]: vring not initialized\n", ringid
);
2465 if (unlikely(!txdata
->enabled
)) {
2466 wil_info(wil
, "Tx irq[%d]: vring disabled\n", ringid
);
2470 wil_dbg_txrx(wil
, "tx_complete: (%d)\n", ringid
);
2472 used_before_complete
= wil_ring_used_tx(vring
);
2474 if (cid
< wil
->max_assoc_sta
)
2475 stats
= &wil
->sta
[cid
].stats
;
2477 while (!wil_ring_is_empty(vring
)) {
2479 struct wil_ctx
*ctx
= &vring
->ctx
[vring
->swtail
];
2481 * For the fragmented skb, HW will set DU bit only for the
2482 * last fragment. look for it.
2483 * In TSO the first DU will include hdr desc
2485 int lf
= (vring
->swtail
+ ctx
->nr_frags
) % vring
->size
;
2486 /* TODO: check we are not past head */
2488 _d
= &vring
->va
[lf
].tx
.legacy
;
2489 if (unlikely(!(_d
->dma
.status
& TX_DMA_STATUS_DU
)))
2492 new_swtail
= (lf
+ 1) % vring
->size
;
2493 while (vring
->swtail
!= new_swtail
) {
2494 struct vring_tx_desc dd
, *d
= &dd
;
2496 struct sk_buff
*skb
;
2498 ctx
= &vring
->ctx
[vring
->swtail
];
2500 _d
= &vring
->va
[vring
->swtail
].tx
.legacy
;
2504 dmalen
= le16_to_cpu(d
->dma
.length
);
2505 trace_wil6210_tx_done(ringid
, vring
->swtail
, dmalen
,
2508 "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2509 ringid
, vring
->swtail
, dmalen
,
2510 d
->dma
.status
, d
->dma
.error
);
2511 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE
, 32, 4,
2512 (const void *)d
, sizeof(*d
), false);
2514 wil
->txrx_ops
.tx_desc_unmap(dev
,
2515 (union wil_tx_desc
*)d
,
2519 if (likely(d
->dma
.error
== 0)) {
2520 ndev
->stats
.tx_packets
++;
2521 ndev
->stats
.tx_bytes
+= skb
->len
;
2523 stats
->tx_packets
++;
2524 stats
->tx_bytes
+= skb
->len
;
2526 wil_tx_latency_calc(wil
, skb
,
2530 ndev
->stats
.tx_errors
++;
2535 if (skb
->protocol
== cpu_to_be16(ETH_P_PAE
))
2536 wil_tx_complete_handle_eapol(vif
, skb
);
2538 wil_consume_skb(skb
, d
->dma
.error
== 0);
2540 memset(ctx
, 0, sizeof(*ctx
));
2541 /* Make sure the ctx is zeroed before updating the tail
2542 * to prevent a case where wil_tx_ring will see
2543 * this descriptor as used and handle it before ctx zero
2547 /* There is no need to touch HW descriptor:
2548 * - ststus bit TX_DMA_STATUS_DU is set by design,
2549 * so hardware will not try to process this desc.,
2550 * - rest of descriptor will be initialized on Tx.
2552 vring
->swtail
= wil_ring_next_tail(vring
);
2557 /* performance monitoring */
2558 used_new
= wil_ring_used_tx(vring
);
2559 if (wil_val_in_range(wil
->ring_idle_trsh
,
2560 used_new
, used_before_complete
)) {
2561 wil_dbg_txrx(wil
, "Ring[%2d] idle %d -> %d\n",
2562 ringid
, used_before_complete
, used_new
);
2563 txdata
->last_idle
= get_cycles();
2566 /* shall we wake net queues? */
2568 wil_update_net_queues(wil
, vif
, vring
, false);
2573 static inline int wil_tx_init(struct wil6210_priv
*wil
)
2578 static inline void wil_tx_fini(struct wil6210_priv
*wil
) {}
2580 static void wil_get_reorder_params(struct wil6210_priv
*wil
,
2581 struct sk_buff
*skb
, int *tid
, int *cid
,
2582 int *mid
, u16
*seq
, int *mcast
, int *retry
)
2584 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
2586 *tid
= wil_rxdesc_tid(d
);
2587 *cid
= wil_skb_get_cid(skb
);
2588 *mid
= wil_rxdesc_mid(d
);
2589 *seq
= wil_rxdesc_seq(d
);
2590 *mcast
= wil_rxdesc_mcast(d
);
2591 *retry
= wil_rxdesc_retry(d
);
2594 void wil_init_txrx_ops_legacy_dma(struct wil6210_priv
*wil
)
2596 wil
->txrx_ops
.configure_interrupt_moderation
=
2597 wil_configure_interrupt_moderation
;
2599 wil
->txrx_ops
.tx_desc_map
= wil_tx_desc_map
;
2600 wil
->txrx_ops
.tx_desc_unmap
= wil_txdesc_unmap
;
2601 wil
->txrx_ops
.tx_ring_tso
= __wil_tx_vring_tso
;
2602 wil
->txrx_ops
.ring_init_tx
= wil_vring_init_tx
;
2603 wil
->txrx_ops
.ring_fini_tx
= wil_vring_free
;
2604 wil
->txrx_ops
.ring_init_bcast
= wil_vring_init_bcast
;
2605 wil
->txrx_ops
.tx_init
= wil_tx_init
;
2606 wil
->txrx_ops
.tx_fini
= wil_tx_fini
;
2607 wil
->txrx_ops
.tx_ring_modify
= wil_tx_vring_modify
;
2609 wil
->txrx_ops
.rx_init
= wil_rx_init
;
2610 wil
->txrx_ops
.wmi_addba_rx_resp
= wmi_addba_rx_resp
;
2611 wil
->txrx_ops
.get_reorder_params
= wil_get_reorder_params
;
2612 wil
->txrx_ops
.get_netif_rx_params
=
2613 wil_get_netif_rx_params
;
2614 wil
->txrx_ops
.rx_crypto_check
= wil_rx_crypto_check
;
2615 wil
->txrx_ops
.rx_error_check
= wil_rx_error_check
;
2616 wil
->txrx_ops
.is_rx_idle
= wil_is_rx_idle
;
2617 wil
->txrx_ops
.rx_fini
= wil_rx_fini
;