2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
22 #include <linux/ipv6.h>
24 #include <linux/prefetch.h>
31 static bool rtap_include_phy_info
;
32 module_param(rtap_include_phy_info
, bool, S_IRUGO
);
33 MODULE_PARM_DESC(rtap_include_phy_info
,
34 " Include PHY info in the radiotap header, default - no");
36 static inline int wil_vring_is_empty(struct vring
*vring
)
38 return vring
->swhead
== vring
->swtail
;
41 static inline u32
wil_vring_next_tail(struct vring
*vring
)
43 return (vring
->swtail
+ 1) % vring
->size
;
46 static inline void wil_vring_advance_head(struct vring
*vring
, int n
)
48 vring
->swhead
= (vring
->swhead
+ n
) % vring
->size
;
51 static inline int wil_vring_is_full(struct vring
*vring
)
53 return wil_vring_next_tail(vring
) == vring
->swhead
;
57 * Available space in Tx Vring
59 static inline int wil_vring_avail_tx(struct vring
*vring
)
61 u32 swhead
= vring
->swhead
;
62 u32 swtail
= vring
->swtail
;
63 int used
= (vring
->size
+ swhead
- swtail
) % vring
->size
;
65 return vring
->size
- used
- 1;
69 * wil_vring_wmark_low - low watermark for available descriptor space
71 static inline int wil_vring_wmark_low(struct vring
*vring
)
77 * wil_vring_wmark_high - high watermark for available descriptor space
79 static inline int wil_vring_wmark_high(struct vring
*vring
)
84 static int wil_vring_alloc(struct wil6210_priv
*wil
, struct vring
*vring
)
86 struct device
*dev
= wil_to_dev(wil
);
87 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
90 wil_dbg_misc(wil
, "%s()\n", __func__
);
92 BUILD_BUG_ON(sizeof(vring
->va
[0]) != 32);
96 vring
->ctx
= kcalloc(vring
->size
, sizeof(vring
->ctx
[0]), GFP_KERNEL
);
102 * vring->va should be aligned on its size rounded up to power of 2
103 * This is granted by the dma_alloc_coherent
105 vring
->va
= dma_alloc_coherent(dev
, sz
, &vring
->pa
, GFP_KERNEL
);
111 /* initially, all descriptors are SW owned
112 * For Tx and Rx, ownership bit is at the same location, thus
115 for (i
= 0; i
< vring
->size
; i
++) {
116 volatile struct vring_tx_desc
*_d
= &vring
->va
[i
].tx
;
118 _d
->dma
.status
= TX_DMA_STATUS_DU
;
121 wil_dbg_misc(wil
, "vring[%d] 0x%p:%pad 0x%p\n", vring
->size
,
122 vring
->va
, &vring
->pa
, vring
->ctx
);
127 static void wil_txdesc_unmap(struct device
*dev
, struct vring_tx_desc
*d
,
130 dma_addr_t pa
= wil_desc_addr(&d
->dma
.addr
);
131 u16 dmalen
= le16_to_cpu(d
->dma
.length
);
133 switch (ctx
->mapped_as
) {
134 case wil_mapped_as_single
:
135 dma_unmap_single(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
137 case wil_mapped_as_page
:
138 dma_unmap_page(dev
, pa
, dmalen
, DMA_TO_DEVICE
);
145 static void wil_vring_free(struct wil6210_priv
*wil
, struct vring
*vring
,
148 struct device
*dev
= wil_to_dev(wil
);
149 size_t sz
= vring
->size
* sizeof(vring
->va
[0]);
152 int vring_index
= vring
- wil
->vring_tx
;
154 wil_dbg_misc(wil
, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
155 vring_index
, vring
->size
, vring
->va
,
156 &vring
->pa
, vring
->ctx
);
158 wil_dbg_misc(wil
, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
159 vring
->size
, vring
->va
,
160 &vring
->pa
, vring
->ctx
);
163 while (!wil_vring_is_empty(vring
)) {
169 struct vring_tx_desc dd
, *d
= &dd
;
170 volatile struct vring_tx_desc
*_d
=
171 &vring
->va
[vring
->swtail
].tx
;
173 ctx
= &vring
->ctx
[vring
->swtail
];
175 wil_txdesc_unmap(dev
, d
, ctx
);
177 dev_kfree_skb_any(ctx
->skb
);
178 vring
->swtail
= wil_vring_next_tail(vring
);
180 struct vring_rx_desc dd
, *d
= &dd
;
181 volatile struct vring_rx_desc
*_d
=
182 &vring
->va
[vring
->swhead
].rx
;
184 ctx
= &vring
->ctx
[vring
->swhead
];
186 pa
= wil_desc_addr(&d
->dma
.addr
);
187 dmalen
= le16_to_cpu(d
->dma
.length
);
188 dma_unmap_single(dev
, pa
, dmalen
, DMA_FROM_DEVICE
);
190 wil_vring_advance_head(vring
, 1);
193 dma_free_coherent(dev
, sz
, (void *)vring
->va
, vring
->pa
);
201 * Allocate one skb for Rx VRING
203 * Safe to call from IRQ
205 static int wil_vring_alloc_skb(struct wil6210_priv
*wil
, struct vring
*vring
,
208 struct device
*dev
= wil_to_dev(wil
);
209 unsigned int sz
= RX_BUF_LEN
;
210 struct vring_rx_desc dd
, *d
= &dd
;
211 volatile struct vring_rx_desc
*_d
= &vring
->va
[i
].rx
;
215 struct sk_buff
*skb
= dev_alloc_skb(sz
+ headroom
);
220 skb_reserve(skb
, headroom
);
223 pa
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_FROM_DEVICE
);
224 if (unlikely(dma_mapping_error(dev
, pa
))) {
229 d
->dma
.d0
= BIT(9) | RX_DMA_D0_CMD_DMA_IT
;
230 wil_desc_addr_set(&d
->dma
.addr
, pa
);
231 /* ip_length don't care */
233 /* error don't care */
234 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
235 d
->dma
.length
= cpu_to_le16(sz
);
237 vring
->ctx
[i
].skb
= skb
;
243 * Adds radiotap header
245 * Any error indicated as "Bad FCS"
247 * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
248 * - Rx descriptor: 32 bytes
251 static void wil_rx_add_radiotap_header(struct wil6210_priv
*wil
,
254 struct wireless_dev
*wdev
= wil
->wdev
;
255 struct wil6210_rtap
{
256 struct ieee80211_radiotap_header rthdr
;
257 /* fields should be in the order of bits in rthdr.it_present */
261 __le16 chnl_freq
__aligned(2);
268 struct wil6210_rtap_vendor
{
269 struct wil6210_rtap rtap
;
271 u8 vendor_oui
[3] __aligned(2);
276 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
277 struct wil6210_rtap_vendor
*rtap_vendor
;
278 int rtap_len
= sizeof(struct wil6210_rtap
);
279 int phy_length
= 0; /* phy info header size, bytes */
280 static char phy_data
[128];
281 struct ieee80211_channel
*ch
= wdev
->preset_chandef
.chan
;
283 if (rtap_include_phy_info
) {
284 rtap_len
= sizeof(*rtap_vendor
) + sizeof(*d
);
285 /* calculate additional length */
286 if (d
->dma
.status
& RX_DMA_STATUS_PHY_INFO
) {
288 * PHY info starts from 8-byte boundary
289 * there are 8-byte lines, last line may be partially
290 * written (HW bug), thus FW configures for last line
291 * to be excessive. Driver skips this last line.
293 int len
= min_t(int, 8 + sizeof(phy_data
),
294 wil_rxdesc_phy_length(d
));
297 void *p
= skb_tail_pointer(skb
);
298 void *pa
= PTR_ALIGN(p
, 8);
300 if (skb_tailroom(skb
) >= len
+ (pa
- p
)) {
301 phy_length
= len
- 8;
302 memcpy(phy_data
, pa
, phy_length
);
306 rtap_len
+= phy_length
;
309 if (skb_headroom(skb
) < rtap_len
&&
310 pskb_expand_head(skb
, rtap_len
, 0, GFP_ATOMIC
)) {
311 wil_err(wil
, "Unable to expand headrom to %d\n", rtap_len
);
315 rtap_vendor
= (void *)skb_push(skb
, rtap_len
);
316 memset(rtap_vendor
, 0, rtap_len
);
318 rtap_vendor
->rtap
.rthdr
.it_version
= PKTHDR_RADIOTAP_VERSION
;
319 rtap_vendor
->rtap
.rthdr
.it_len
= cpu_to_le16(rtap_len
);
320 rtap_vendor
->rtap
.rthdr
.it_present
= cpu_to_le32(
321 (1 << IEEE80211_RADIOTAP_FLAGS
) |
322 (1 << IEEE80211_RADIOTAP_CHANNEL
) |
323 (1 << IEEE80211_RADIOTAP_MCS
));
324 if (d
->dma
.status
& RX_DMA_STATUS_ERROR
)
325 rtap_vendor
->rtap
.flags
|= IEEE80211_RADIOTAP_F_BADFCS
;
327 rtap_vendor
->rtap
.chnl_freq
= cpu_to_le16(ch
? ch
->center_freq
: 58320);
328 rtap_vendor
->rtap
.chnl_flags
= cpu_to_le16(0);
330 rtap_vendor
->rtap
.mcs_present
= IEEE80211_RADIOTAP_MCS_HAVE_MCS
;
331 rtap_vendor
->rtap
.mcs_flags
= 0;
332 rtap_vendor
->rtap
.mcs_index
= wil_rxdesc_mcs(d
);
334 if (rtap_include_phy_info
) {
335 rtap_vendor
->rtap
.rthdr
.it_present
|= cpu_to_le32(1 <<
336 IEEE80211_RADIOTAP_VENDOR_NAMESPACE
);
337 /* OUI for Wilocity 04:ce:14 */
338 rtap_vendor
->vendor_oui
[0] = 0x04;
339 rtap_vendor
->vendor_oui
[1] = 0xce;
340 rtap_vendor
->vendor_oui
[2] = 0x14;
341 rtap_vendor
->vendor_ns
= 1;
342 /* Rx descriptor + PHY data */
343 rtap_vendor
->vendor_skip
= cpu_to_le16(sizeof(*d
) +
345 memcpy(rtap_vendor
->vendor_data
, (void *)d
, sizeof(*d
));
346 memcpy(rtap_vendor
->vendor_data
+ sizeof(*d
), phy_data
,
352 * Fast swap in place between 2 registers
354 static void wil_swap_u16(u16
*a
, u16
*b
)
361 static void wil_swap_ethaddr(void *data
)
363 struct ethhdr
*eth
= data
;
364 u16
*s
= (u16
*)eth
->h_source
;
365 u16
*d
= (u16
*)eth
->h_dest
;
367 wil_swap_u16(s
++, d
++);
368 wil_swap_u16(s
++, d
++);
373 * reap 1 frame from @swhead
375 * Rx descriptor copied to skb->cb
377 * Safe to call from IRQ
379 static struct sk_buff
*wil_vring_reap_rx(struct wil6210_priv
*wil
,
382 struct device
*dev
= wil_to_dev(wil
);
383 struct net_device
*ndev
= wil_to_ndev(wil
);
384 volatile struct vring_rx_desc
*_d
;
385 struct vring_rx_desc
*d
;
388 unsigned int sz
= RX_BUF_LEN
;
393 struct wil_net_stats
*stats
;
395 BUILD_BUG_ON(sizeof(struct vring_rx_desc
) > sizeof(skb
->cb
));
397 if (wil_vring_is_empty(vring
))
400 _d
= &vring
->va
[vring
->swhead
].rx
;
401 if (!(_d
->dma
.status
& RX_DMA_STATUS_DU
)) {
402 /* it is not error, we just reached end of Rx done area */
406 skb
= vring
->ctx
[vring
->swhead
].skb
;
407 d
= wil_skb_rxdesc(skb
);
409 pa
= wil_desc_addr(&d
->dma
.addr
);
410 vring
->ctx
[vring
->swhead
].skb
= NULL
;
411 wil_vring_advance_head(vring
, 1);
413 dma_unmap_single(dev
, pa
, sz
, DMA_FROM_DEVICE
);
414 dmalen
= le16_to_cpu(d
->dma
.length
);
416 trace_wil6210_rx(vring
->swhead
, d
);
417 wil_dbg_txrx(wil
, "Rx[%3d] : %d bytes\n", vring
->swhead
, dmalen
);
418 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE
, 32, 4,
419 (const void *)d
, sizeof(*d
), false);
422 wil_err(wil
, "Rx size too large: %d bytes!\n", dmalen
);
426 skb_trim(skb
, dmalen
);
430 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET
, 16, 1,
431 skb
->data
, skb_headlen(skb
), false);
433 cid
= wil_rxdesc_cid(d
);
434 stats
= &wil
->sta
[cid
].stats
;
435 stats
->last_mcs_rx
= wil_rxdesc_mcs(d
);
437 /* use radiotap header only if required */
438 if (ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
)
439 wil_rx_add_radiotap_header(wil
, skb
);
441 /* no extra checks if in sniffer mode */
442 if (ndev
->type
!= ARPHRD_ETHER
)
445 * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
446 * Driver should recognize it by frame type, that is found
447 * in Rx descriptor. If type is not data, it is 802.11 frame as is
449 ftype
= wil_rxdesc_ftype(d
) << 2;
450 if (ftype
!= IEEE80211_FTYPE_DATA
) {
451 wil_dbg_txrx(wil
, "Non-data frame ftype 0x%08x\n", ftype
);
452 /* TODO: process it */
457 if (skb
->len
< ETH_HLEN
) {
458 wil_err(wil
, "Short frame, len = %d\n", skb
->len
);
459 /* TODO: process it (i.e. BAR) */
464 /* L4 IDENT is on when HW calculated checksum, check status
465 * and in case of error drop the packet
466 * higher stack layers will handle retransmission (if required)
468 if (d
->dma
.status
& RX_DMA_STATUS_L4_IDENT
) {
469 /* L4 protocol identified, csum calculated */
470 if ((d
->dma
.error
& RX_DMA_ERROR_L4_ERR
) == 0)
471 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
472 /* If HW reports bad checksum, let IP stack re-check it
473 * For example, HW don't understand Microsoft IP stack that
474 * mis-calculates TCP checksum - if it should be 0x0,
475 * it writes 0xffff in violation of RFC 1624
479 ds_bits
= wil_rxdesc_ds_bits(d
);
482 * HW bug - in ToDS mode, i.e. Rx on AP side,
483 * addresses get swapped
485 wil_swap_ethaddr(skb
->data
);
492 * allocate and fill up to @count buffers in rx ring
493 * buffers posted at @swtail
495 static int wil_rx_refill(struct wil6210_priv
*wil
, int count
)
497 struct net_device
*ndev
= wil_to_ndev(wil
);
498 struct vring
*v
= &wil
->vring_rx
;
501 int headroom
= ndev
->type
== ARPHRD_IEEE80211_RADIOTAP
?
502 WIL6210_RTAP_SIZE
: 0;
504 for (; next_tail
= wil_vring_next_tail(v
),
505 (next_tail
!= v
->swhead
) && (count
-- > 0);
506 v
->swtail
= next_tail
) {
507 rc
= wil_vring_alloc_skb(wil
, v
, v
->swtail
, headroom
);
509 wil_err(wil
, "Error %d in wil_rx_refill[%d]\n",
514 iowrite32(v
->swtail
, wil
->csr
+ HOSTADDR(v
->hwtail
));
520 * Pass Rx packet to the netif. Update statistics.
521 * Called in softirq context (NAPI poll).
523 void wil_netif_rx_any(struct sk_buff
*skb
, struct net_device
*ndev
)
526 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
527 unsigned int len
= skb
->len
;
528 struct vring_rx_desc
*d
= wil_skb_rxdesc(skb
);
529 int cid
= wil_rxdesc_cid(d
);
530 struct wil_net_stats
*stats
= &wil
->sta
[cid
].stats
;
534 rc
= napi_gro_receive(&wil
->napi_rx
, skb
);
536 if (unlikely(rc
== GRO_DROP
)) {
537 ndev
->stats
.rx_dropped
++;
539 wil_dbg_txrx(wil
, "Rx drop %d bytes\n", len
);
541 ndev
->stats
.rx_packets
++;
543 ndev
->stats
.rx_bytes
+= len
;
544 stats
->rx_bytes
+= len
;
547 static const char * const gro_res_str
[] = {
548 [GRO_MERGED
] = "GRO_MERGED",
549 [GRO_MERGED_FREE
] = "GRO_MERGED_FREE",
550 [GRO_HELD
] = "GRO_HELD",
551 [GRO_NORMAL
] = "GRO_NORMAL",
552 [GRO_DROP
] = "GRO_DROP",
554 wil_dbg_txrx(wil
, "Rx complete %d bytes => %s\n",
555 len
, gro_res_str
[rc
]);
560 * Proceed all completed skb's from Rx VRING
562 * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
564 void wil_rx_handle(struct wil6210_priv
*wil
, int *quota
)
566 struct net_device
*ndev
= wil_to_ndev(wil
);
567 struct vring
*v
= &wil
->vring_rx
;
571 wil_err(wil
, "Rx IRQ while Rx not yet initialized\n");
574 wil_dbg_txrx(wil
, "%s()\n", __func__
);
575 while ((*quota
> 0) && (NULL
!= (skb
= wil_vring_reap_rx(wil
, v
)))) {
578 if (wil
->wdev
->iftype
== NL80211_IFTYPE_MONITOR
) {
580 skb_reset_mac_header(skb
);
581 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
582 skb
->pkt_type
= PACKET_OTHERHOST
;
583 skb
->protocol
= htons(ETH_P_802_2
);
584 wil_netif_rx_any(skb
, ndev
);
586 struct ethhdr
*eth
= (void *)skb
->data
;
588 skb
->protocol
= eth_type_trans(skb
, ndev
);
590 if (is_unicast_ether_addr(eth
->h_dest
))
591 wil_rx_reorder(wil
, skb
);
593 wil_netif_rx_any(skb
, ndev
);
596 wil_rx_refill(wil
, v
->size
);
599 int wil_rx_init(struct wil6210_priv
*wil
)
601 struct vring
*vring
= &wil
->vring_rx
;
604 wil_dbg_misc(wil
, "%s()\n", __func__
);
607 wil_err(wil
, "Rx ring already allocated\n");
611 vring
->size
= WIL6210_RX_RING_SIZE
;
612 rc
= wil_vring_alloc(wil
, vring
);
616 rc
= wmi_rx_chain_add(wil
, vring
);
620 rc
= wil_rx_refill(wil
, vring
->size
);
626 wil_vring_free(wil
, vring
, 0);
631 void wil_rx_fini(struct wil6210_priv
*wil
)
633 struct vring
*vring
= &wil
->vring_rx
;
635 wil_dbg_misc(wil
, "%s()\n", __func__
);
638 wil_vring_free(wil
, vring
, 0);
641 int wil_vring_init_tx(struct wil6210_priv
*wil
, int id
, int size
,
645 struct wmi_vring_cfg_cmd cmd
= {
646 .action
= cpu_to_le32(WMI_VRING_CMD_ADD
),
649 .max_mpdu_size
= cpu_to_le16(TX_BUF_LEN
),
650 .ring_size
= cpu_to_le16(size
),
653 .cidxtid
= mk_cidxtid(cid
, tid
),
654 .encap_trans_type
= WMI_VRING_ENC_TYPE_802_3
,
659 .priority
= cpu_to_le16(0),
660 .timeslot_us
= cpu_to_le16(0xfff),
665 struct wil6210_mbox_hdr_wmi wmi
;
666 struct wmi_vring_cfg_done_event cmd
;
668 struct vring
*vring
= &wil
->vring_tx
[id
];
669 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[id
];
671 wil_dbg_misc(wil
, "%s() max_mpdu_size %d\n", __func__
,
672 cmd
.vring_cfg
.tx_sw_ring
.max_mpdu_size
);
675 wil_err(wil
, "Tx ring [%d] already allocated\n", id
);
680 memset(txdata
, 0, sizeof(*txdata
));
682 rc
= wil_vring_alloc(wil
, vring
);
686 wil
->vring2cid_tid
[id
][0] = cid
;
687 wil
->vring2cid_tid
[id
][1] = tid
;
689 cmd
.vring_cfg
.tx_sw_ring
.ring_mem_base
= cpu_to_le64(vring
->pa
);
691 rc
= wmi_call(wil
, WMI_VRING_CFG_CMDID
, &cmd
, sizeof(cmd
),
692 WMI_VRING_CFG_DONE_EVENTID
, &reply
, sizeof(reply
), 100);
696 if (reply
.cmd
.status
!= WMI_FW_STATUS_SUCCESS
) {
697 wil_err(wil
, "Tx config failed, status 0x%02x\n",
702 vring
->hwtail
= le32_to_cpu(reply
.cmd
.tx_vring_tail_ptr
);
708 wil_vring_free(wil
, vring
, 1);
714 void wil_vring_fini_tx(struct wil6210_priv
*wil
, int id
)
716 struct vring
*vring
= &wil
->vring_tx
[id
];
718 WARN_ON(!mutex_is_locked(&wil
->mutex
));
723 wil_dbg_misc(wil
, "%s() id=%d\n", __func__
, id
);
725 /* make sure NAPI won't touch this vring */
726 wil
->vring_tx_data
[id
].enabled
= 0;
727 if (test_bit(wil_status_napi_en
, &wil
->status
))
728 napi_synchronize(&wil
->napi_tx
);
730 wil_vring_free(wil
, vring
, 1);
733 static struct vring
*wil_find_tx_vring(struct wil6210_priv
*wil
,
737 struct ethhdr
*eth
= (void *)skb
->data
;
738 int cid
= wil_find_cid(wil
, eth
->h_dest
);
743 if (!wil
->sta
[cid
].data_port_open
&&
744 (skb
->protocol
!= cpu_to_be16(ETH_P_PAE
)))
747 /* TODO: fix for multiple TID */
748 for (i
= 0; i
< ARRAY_SIZE(wil
->vring2cid_tid
); i
++) {
749 if (wil
->vring2cid_tid
[i
][0] == cid
) {
750 struct vring
*v
= &wil
->vring_tx
[i
];
752 wil_dbg_txrx(wil
, "%s(%pM) -> [%d]\n",
753 __func__
, eth
->h_dest
, i
);
757 wil_dbg_txrx(wil
, "vring[%d] not valid\n", i
);
766 static void wil_set_da_for_vring(struct wil6210_priv
*wil
,
767 struct sk_buff
*skb
, int vring_index
)
769 struct ethhdr
*eth
= (void *)skb
->data
;
770 int cid
= wil
->vring2cid_tid
[vring_index
][0];
772 memcpy(eth
->h_dest
, wil
->sta
[cid
].addr
, ETH_ALEN
);
775 static int wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
776 struct sk_buff
*skb
);
778 * Find 1-st vring and return it; set dest address for this vring in skb
779 * duplicate skb and send it to other active vrings
781 static struct vring
*wil_tx_bcast(struct wil6210_priv
*wil
,
784 struct vring
*v
, *v2
;
785 struct sk_buff
*skb2
;
789 /* find 1-st vring eligible for data */
790 for (i
= 0; i
< WIL6210_MAX_TX_RINGS
; i
++) {
791 v
= &wil
->vring_tx
[i
];
795 cid
= wil
->vring2cid_tid
[i
][0];
796 if (!wil
->sta
[cid
].data_port_open
)
802 wil_dbg_txrx(wil
, "Tx while no vrings active?\n");
807 wil_dbg_txrx(wil
, "BCAST -> ring %d\n", i
);
808 wil_set_da_for_vring(wil
, skb
, i
);
810 /* find other active vrings and duplicate skb for each */
811 for (i
++; i
< WIL6210_MAX_TX_RINGS
; i
++) {
812 v2
= &wil
->vring_tx
[i
];
815 cid
= wil
->vring2cid_tid
[i
][0];
816 if (!wil
->sta
[cid
].data_port_open
)
819 skb2
= skb_copy(skb
, GFP_ATOMIC
);
821 wil_dbg_txrx(wil
, "BCAST DUP -> ring %d\n", i
);
822 wil_set_da_for_vring(wil
, skb2
, i
);
823 wil_tx_vring(wil
, v2
, skb2
);
825 wil_err(wil
, "skb_copy failed\n");
832 static int wil_tx_desc_map(struct vring_tx_desc
*d
, dma_addr_t pa
, u32 len
,
835 wil_desc_addr_set(&d
->dma
.addr
, pa
);
836 d
->dma
.ip_length
= 0;
837 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
838 d
->dma
.b11
= 0/*14 | BIT(7)*/;
840 d
->dma
.status
= 0; /* BIT(0) should be 0 for HW_OWNED */
841 d
->dma
.length
= cpu_to_le16((u16
)len
);
842 d
->dma
.d0
= (vring_index
<< DMA_CFG_DESC_TX_0_QID_POS
);
846 d
->mac
.ucode_cmd
= 0;
847 /* use dst index 0 */
848 d
->mac
.d
[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS
) |
849 (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS
);
850 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
851 d
->mac
.d
[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS
) |
852 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS
);
858 void wil_tx_desc_set_nr_frags(struct vring_tx_desc
*d
, int nr_frags
)
860 d
->mac
.d
[2] |= ((nr_frags
+ 1) <<
861 MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS
);
864 static int wil_tx_desc_offload_cksum_set(struct wil6210_priv
*wil
,
865 struct vring_tx_desc
*d
,
870 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
873 d
->dma
.b11
= ETH_HLEN
; /* MAC header length */
875 switch (skb
->protocol
) {
876 case cpu_to_be16(ETH_P_IP
):
877 protocol
= ip_hdr(skb
)->protocol
;
878 d
->dma
.b11
|= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS
);
880 case cpu_to_be16(ETH_P_IPV6
):
881 protocol
= ipv6_hdr(skb
)->nexthdr
;
889 d
->dma
.d0
|= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS
);
890 /* L4 header len: TCP header length */
892 (tcp_hdrlen(skb
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
895 /* L4 header len: UDP header length */
897 (sizeof(struct udphdr
) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK
);
903 d
->dma
.ip_length
= skb_network_header_len(skb
);
904 /* Enable TCP/UDP checksum */
905 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS
);
906 /* Calculate pseudo-header */
907 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS
);
912 static int wil_tx_vring(struct wil6210_priv
*wil
, struct vring
*vring
,
915 struct device
*dev
= wil_to_dev(wil
);
916 struct vring_tx_desc dd
, *d
= &dd
;
917 volatile struct vring_tx_desc
*_d
;
918 u32 swhead
= vring
->swhead
;
919 int avail
= wil_vring_avail_tx(vring
);
920 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
922 int vring_index
= vring
- wil
->vring_tx
;
923 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[vring_index
];
927 wil_dbg_txrx(wil
, "%s()\n", __func__
);
929 if (avail
< 1 + nr_frags
) {
930 wil_err(wil
, "Tx ring full. No space for %d fragments\n",
934 _d
= &vring
->va
[i
].tx
;
936 pa
= dma_map_single(dev
, skb
->data
, skb_headlen(skb
), DMA_TO_DEVICE
);
938 wil_dbg_txrx(wil
, "Tx skb %d bytes 0x%p -> %pad\n", skb_headlen(skb
),
940 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET
, 16, 1,
941 skb
->data
, skb_headlen(skb
), false);
943 if (unlikely(dma_mapping_error(dev
, pa
)))
945 vring
->ctx
[i
].mapped_as
= wil_mapped_as_single
;
947 wil_tx_desc_map(d
, pa
, skb_headlen(skb
), vring_index
);
948 /* Process TCP/UDP checksum offloading */
949 if (wil_tx_desc_offload_cksum_set(wil
, d
, skb
)) {
950 wil_err(wil
, "VRING #%d Failed to set cksum, drop packet\n",
955 vring
->ctx
[i
].nr_frags
= nr_frags
;
956 wil_tx_desc_set_nr_frags(d
, nr_frags
);
960 /* middle segments */
961 for (; f
< nr_frags
; f
++) {
962 const struct skb_frag_struct
*frag
=
963 &skb_shinfo(skb
)->frags
[f
];
964 int len
= skb_frag_size(frag
);
966 i
= (swhead
+ f
+ 1) % vring
->size
;
967 _d
= &vring
->va
[i
].tx
;
968 pa
= skb_frag_dma_map(dev
, frag
, 0, skb_frag_size(frag
),
970 if (unlikely(dma_mapping_error(dev
, pa
)))
972 vring
->ctx
[i
].mapped_as
= wil_mapped_as_page
;
973 wil_tx_desc_map(d
, pa
, len
, vring_index
);
974 /* no need to check return code -
975 * if it succeeded for 1-st descriptor,
976 * it will succeed here too
978 wil_tx_desc_offload_cksum_set(wil
, d
, skb
);
981 /* for the last seg only */
982 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS
);
983 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS
);
984 d
->dma
.d0
|= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS
);
987 /* hold reference to skb
988 * to prevent skb release before accounting
989 * in case of immediate "tx done"
991 vring
->ctx
[i
].skb
= skb_get(skb
);
993 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE
, 32, 4,
994 (const void *)d
, sizeof(*d
), false);
996 if (wil_vring_is_empty(vring
)) /* performance monitoring */
997 txdata
->idle
+= get_cycles() - txdata
->last_idle
;
1000 wil_vring_advance_head(vring
, nr_frags
+ 1);
1001 wil_dbg_txrx(wil
, "Tx swhead %d -> %d\n", swhead
, vring
->swhead
);
1002 trace_wil6210_tx(vring_index
, swhead
, skb
->len
, nr_frags
);
1003 iowrite32(vring
->swhead
, wil
->csr
+ HOSTADDR(vring
->hwtail
));
1007 /* unmap what we have mapped */
1008 nr_frags
= f
+ 1; /* frags mapped + one for skb head */
1009 for (f
= 0; f
< nr_frags
; f
++) {
1010 struct wil_ctx
*ctx
;
1012 i
= (swhead
+ f
) % vring
->size
;
1013 ctx
= &vring
->ctx
[i
];
1014 _d
= &vring
->va
[i
].tx
;
1016 _d
->dma
.status
= TX_DMA_STATUS_DU
;
1017 wil_txdesc_unmap(dev
, d
, ctx
);
1020 dev_kfree_skb_any(ctx
->skb
);
1022 memset(ctx
, 0, sizeof(*ctx
));
1028 netdev_tx_t
wil_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1030 struct wil6210_priv
*wil
= ndev_to_wil(ndev
);
1031 struct ethhdr
*eth
= (void *)skb
->data
;
1032 struct vring
*vring
;
1033 static bool pr_once_fw
;
1036 wil_dbg_txrx(wil
, "%s()\n", __func__
);
1037 if (!test_bit(wil_status_fwready
, &wil
->status
)) {
1039 wil_err(wil
, "FW not ready\n");
1044 if (!test_bit(wil_status_fwconnected
, &wil
->status
)) {
1045 wil_err(wil
, "FW not connected\n");
1048 if (wil
->wdev
->iftype
== NL80211_IFTYPE_MONITOR
) {
1049 wil_err(wil
, "Xmit in monitor mode not supported\n");
1055 if (is_unicast_ether_addr(eth
->h_dest
))
1056 vring
= wil_find_tx_vring(wil
, skb
);
1058 vring
= wil_tx_bcast(wil
, skb
);
1060 wil_dbg_txrx(wil
, "No Tx VRING found for %pM\n", eth
->h_dest
);
1064 /* set up vring entry */
1065 rc
= wil_tx_vring(wil
, vring
, skb
);
1067 /* do we still have enough room in the vring? */
1068 if (wil_vring_avail_tx(vring
) < wil_vring_wmark_low(vring
)) {
1069 netif_tx_stop_all_queues(wil_to_ndev(wil
));
1070 wil_dbg_txrx(wil
, "netif_tx_stop : ring full\n");
1075 /* statistics will be updated on the tx_complete */
1076 dev_kfree_skb_any(skb
);
1077 return NETDEV_TX_OK
;
1079 return NETDEV_TX_BUSY
;
1081 break; /* goto drop; */
1084 ndev
->stats
.tx_dropped
++;
1085 dev_kfree_skb_any(skb
);
1087 return NET_XMIT_DROP
;
1091 * Clean up transmitted skb's from the Tx VRING
1093 * Return number of descriptors cleared
1095 * Safe to call from IRQ
1097 int wil_tx_complete(struct wil6210_priv
*wil
, int ringid
)
1099 struct net_device
*ndev
= wil_to_ndev(wil
);
1100 struct device
*dev
= wil_to_dev(wil
);
1101 struct vring
*vring
= &wil
->vring_tx
[ringid
];
1102 struct vring_tx_data
*txdata
= &wil
->vring_tx_data
[ringid
];
1104 int cid
= wil
->vring2cid_tid
[ringid
][0];
1105 struct wil_net_stats
*stats
= &wil
->sta
[cid
].stats
;
1106 volatile struct vring_tx_desc
*_d
;
1109 wil_err(wil
, "Tx irq[%d]: vring not initialized\n", ringid
);
1113 if (!txdata
->enabled
) {
1114 wil_info(wil
, "Tx irq[%d]: vring disabled\n", ringid
);
1118 wil_dbg_txrx(wil
, "%s(%d)\n", __func__
, ringid
);
1120 while (!wil_vring_is_empty(vring
)) {
1122 struct wil_ctx
*ctx
= &vring
->ctx
[vring
->swtail
];
1124 * For the fragmented skb, HW will set DU bit only for the
1125 * last fragment. look for it
1127 int lf
= (vring
->swtail
+ ctx
->nr_frags
) % vring
->size
;
1128 /* TODO: check we are not past head */
1130 _d
= &vring
->va
[lf
].tx
;
1131 if (!(_d
->dma
.status
& TX_DMA_STATUS_DU
))
1134 new_swtail
= (lf
+ 1) % vring
->size
;
1135 while (vring
->swtail
!= new_swtail
) {
1136 struct vring_tx_desc dd
, *d
= &dd
;
1138 struct sk_buff
*skb
;
1140 ctx
= &vring
->ctx
[vring
->swtail
];
1142 _d
= &vring
->va
[vring
->swtail
].tx
;
1146 dmalen
= le16_to_cpu(d
->dma
.length
);
1147 trace_wil6210_tx_done(ringid
, vring
->swtail
, dmalen
,
1150 "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
1151 vring
->swtail
, dmalen
, d
->dma
.status
,
1153 wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE
, 32, 4,
1154 (const void *)d
, sizeof(*d
), false);
1156 wil_txdesc_unmap(dev
, d
, ctx
);
1159 if (d
->dma
.error
== 0) {
1160 ndev
->stats
.tx_packets
++;
1161 stats
->tx_packets
++;
1162 ndev
->stats
.tx_bytes
+= skb
->len
;
1163 stats
->tx_bytes
+= skb
->len
;
1165 ndev
->stats
.tx_errors
++;
1169 dev_kfree_skb_any(skb
);
1171 memset(ctx
, 0, sizeof(*ctx
));
1172 /* There is no need to touch HW descriptor:
1173 * - ststus bit TX_DMA_STATUS_DU is set by design,
1174 * so hardware will not try to process this desc.,
1175 * - rest of descriptor will be initialized on Tx.
1177 vring
->swtail
= wil_vring_next_tail(vring
);
1182 if (wil_vring_is_empty(vring
)) { /* performance monitoring */
1183 wil_dbg_txrx(wil
, "Ring[%2d] empty\n", ringid
);
1184 txdata
->last_idle
= get_cycles();
1187 if (wil_vring_avail_tx(vring
) > wil_vring_wmark_high(vring
)) {
1188 wil_dbg_txrx(wil
, "netif_tx_wake : ring not full\n");
1189 netif_tx_wake_all_queues(wil_to_ndev(wil
));