1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2019 Intel Corporation. */
4 #include <linux/types.h>
5 #include <linux/module.h>
9 #include <linux/if_macvlan.h>
10 #include <linux/prefetch.h>
14 #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
15 char fm10k_driver_name
[] = "fm10k";
16 static const char fm10k_driver_string
[] = DRV_SUMMARY
;
17 static const char fm10k_copyright
[] =
18 "Copyright(c) 2013 - 2019 Intel Corporation.";
20 MODULE_DESCRIPTION(DRV_SUMMARY
);
21 MODULE_LICENSE("GPL v2");
23 /* single workqueue for entire fm10k driver */
24 struct workqueue_struct
*fm10k_workqueue
;
27 * fm10k_init_module - Driver Registration Routine
29 * fm10k_init_module is the first routine called when the driver is
30 * loaded. All it does is register with the PCI subsystem.
32 static int __init
fm10k_init_module(void)
36 pr_info("%s\n", fm10k_driver_string
);
37 pr_info("%s\n", fm10k_copyright
);
39 /* create driver workqueue */
40 fm10k_workqueue
= alloc_workqueue("%s", WQ_MEM_RECLAIM
, 0,
47 ret
= fm10k_register_pci_driver();
50 destroy_workqueue(fm10k_workqueue
);
55 module_init(fm10k_init_module
);
58 * fm10k_exit_module - Driver Exit Cleanup Routine
60 * fm10k_exit_module is called just before the driver is removed
63 static void __exit
fm10k_exit_module(void)
65 fm10k_unregister_pci_driver();
69 /* destroy driver workqueue */
70 destroy_workqueue(fm10k_workqueue
);
72 module_exit(fm10k_exit_module
);
74 static bool fm10k_alloc_mapped_page(struct fm10k_ring
*rx_ring
,
75 struct fm10k_rx_buffer
*bi
)
77 struct page
*page
= bi
->page
;
80 /* Only page will be NULL if buffer was consumed */
84 /* alloc new page for storage */
85 page
= dev_alloc_page();
86 if (unlikely(!page
)) {
87 rx_ring
->rx_stats
.alloc_failed
++;
91 /* map page for use */
92 dma
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
94 /* if mapping failed free memory back to system since
95 * there isn't much point in holding memory we can't use
97 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
100 rx_ring
->rx_stats
.alloc_failed
++;
112 * fm10k_alloc_rx_buffers - Replace used receive buffers
113 * @rx_ring: ring to place buffers on
114 * @cleaned_count: number of buffers to replace
116 void fm10k_alloc_rx_buffers(struct fm10k_ring
*rx_ring
, u16 cleaned_count
)
118 union fm10k_rx_desc
*rx_desc
;
119 struct fm10k_rx_buffer
*bi
;
120 u16 i
= rx_ring
->next_to_use
;
126 rx_desc
= FM10K_RX_DESC(rx_ring
, i
);
127 bi
= &rx_ring
->rx_buffer
[i
];
131 if (!fm10k_alloc_mapped_page(rx_ring
, bi
))
134 /* Refresh the desc even if buffer_addrs didn't change
135 * because each write-back erases this info.
137 rx_desc
->q
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
143 rx_desc
= FM10K_RX_DESC(rx_ring
, 0);
144 bi
= rx_ring
->rx_buffer
;
148 /* clear the status bits for the next_to_use descriptor */
149 rx_desc
->d
.staterr
= 0;
152 } while (cleaned_count
);
156 if (rx_ring
->next_to_use
!= i
) {
157 /* record the next descriptor to use */
158 rx_ring
->next_to_use
= i
;
160 /* update next to alloc since we have filled the ring */
161 rx_ring
->next_to_alloc
= i
;
163 /* Force memory writes to complete before letting h/w
164 * know there are new descriptors to fetch. (Only
165 * applicable for weak-ordered memory model archs,
170 /* notify hardware of new descriptors */
171 writel(i
, rx_ring
->tail
);
176 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
177 * @rx_ring: rx descriptor ring to store buffers on
178 * @old_buff: donor buffer to have page reused
180 * Synchronizes page for reuse by the interface
182 static void fm10k_reuse_rx_page(struct fm10k_ring
*rx_ring
,
183 struct fm10k_rx_buffer
*old_buff
)
185 struct fm10k_rx_buffer
*new_buff
;
186 u16 nta
= rx_ring
->next_to_alloc
;
188 new_buff
= &rx_ring
->rx_buffer
[nta
];
190 /* update, and store next to alloc */
192 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
194 /* transfer page from old buffer to new buffer */
195 *new_buff
= *old_buff
;
197 /* sync the buffer for use by the device */
198 dma_sync_single_range_for_device(rx_ring
->dev
, old_buff
->dma
,
199 old_buff
->page_offset
,
204 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer
*rx_buffer
,
206 unsigned int __maybe_unused truesize
)
208 /* avoid re-using remote and pfmemalloc pages */
209 if (!dev_page_is_reusable(page
))
212 #if (PAGE_SIZE < 8192)
213 /* if we are only owner of page we can reuse it */
214 if (unlikely(page_count(page
) != 1))
217 /* flip page offset to other buffer */
218 rx_buffer
->page_offset
^= FM10K_RX_BUFSZ
;
220 /* move offset up to the next cache line */
221 rx_buffer
->page_offset
+= truesize
;
223 if (rx_buffer
->page_offset
> (PAGE_SIZE
- FM10K_RX_BUFSZ
))
227 /* Even if we own the page, we are not allowed to use atomic_set()
228 * This would break get_page_unless_zero() users.
236 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
237 * @rx_buffer: buffer containing page to add
238 * @size: packet size from rx_desc
239 * @rx_desc: descriptor containing length of buffer written by hardware
240 * @skb: sk_buff to place the data into
242 * This function will add the data contained in rx_buffer->page to the skb.
243 * This is done either through a direct copy if the data in the buffer is
244 * less than the skb header size, otherwise it will just attach the page as
247 * The function will then update the page offset if necessary and return
248 * true if the buffer can be reused by the interface.
250 static bool fm10k_add_rx_frag(struct fm10k_rx_buffer
*rx_buffer
,
252 union fm10k_rx_desc
*rx_desc
,
255 struct page
*page
= rx_buffer
->page
;
256 unsigned char *va
= page_address(page
) + rx_buffer
->page_offset
;
257 #if (PAGE_SIZE < 8192)
258 unsigned int truesize
= FM10K_RX_BUFSZ
;
260 unsigned int truesize
= ALIGN(size
, 512);
262 unsigned int pull_len
;
264 if (unlikely(skb_is_nonlinear(skb
)))
267 if (likely(size
<= FM10K_RX_HDR_LEN
)) {
268 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
270 /* page is reusable, we can reuse buffer as-is */
271 if (dev_page_is_reusable(page
))
274 /* this page cannot be reused so discard it */
279 /* we need the header to contain the greater of either ETH_HLEN or
280 * 60 bytes if the skb->len is less than 60 for skb_pad.
282 pull_len
= eth_get_headlen(skb
->dev
, va
, FM10K_RX_HDR_LEN
);
284 /* align pull length to size of long to optimize memcpy performance */
285 memcpy(__skb_put(skb
, pull_len
), va
, ALIGN(pull_len
, sizeof(long)));
287 /* update all of the pointers */
292 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
293 (unsigned long)va
& ~PAGE_MASK
, size
, truesize
);
295 return fm10k_can_reuse_rx_page(rx_buffer
, page
, truesize
);
298 static struct sk_buff
*fm10k_fetch_rx_buffer(struct fm10k_ring
*rx_ring
,
299 union fm10k_rx_desc
*rx_desc
,
302 unsigned int size
= le16_to_cpu(rx_desc
->w
.length
);
303 struct fm10k_rx_buffer
*rx_buffer
;
306 rx_buffer
= &rx_ring
->rx_buffer
[rx_ring
->next_to_clean
];
307 page
= rx_buffer
->page
;
311 void *page_addr
= page_address(page
) +
312 rx_buffer
->page_offset
;
314 /* prefetch first cache line of first page */
315 net_prefetch(page_addr
);
317 /* allocate a skb to store the frags */
318 skb
= napi_alloc_skb(&rx_ring
->q_vector
->napi
,
320 if (unlikely(!skb
)) {
321 rx_ring
->rx_stats
.alloc_failed
++;
325 /* we will be copying header into skb->data in
326 * pskb_may_pull so it is in our interest to prefetch
327 * it now to avoid a possible cache miss
329 prefetchw(skb
->data
);
332 /* we are reusing so sync this buffer for CPU use */
333 dma_sync_single_range_for_cpu(rx_ring
->dev
,
335 rx_buffer
->page_offset
,
339 /* pull page into skb */
340 if (fm10k_add_rx_frag(rx_buffer
, size
, rx_desc
, skb
)) {
341 /* hand second half of page back to the ring */
342 fm10k_reuse_rx_page(rx_ring
, rx_buffer
);
344 /* we are not reusing the buffer so unmap it */
345 dma_unmap_page(rx_ring
->dev
, rx_buffer
->dma
,
346 PAGE_SIZE
, DMA_FROM_DEVICE
);
349 /* clear contents of rx_buffer */
350 rx_buffer
->page
= NULL
;
355 static inline void fm10k_rx_checksum(struct fm10k_ring
*ring
,
356 union fm10k_rx_desc
*rx_desc
,
359 skb_checksum_none_assert(skb
);
361 /* Rx checksum disabled via ethtool */
362 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
365 /* TCP/UDP checksum error bit is set */
366 if (fm10k_test_staterr(rx_desc
,
367 FM10K_RXD_STATUS_L4E
|
368 FM10K_RXD_STATUS_L4E2
|
369 FM10K_RXD_STATUS_IPE
|
370 FM10K_RXD_STATUS_IPE2
)) {
371 ring
->rx_stats
.csum_err
++;
375 /* It must be a TCP or UDP packet with a valid checksum */
376 if (fm10k_test_staterr(rx_desc
, FM10K_RXD_STATUS_L4CS2
))
377 skb
->encapsulation
= true;
378 else if (!fm10k_test_staterr(rx_desc
, FM10K_RXD_STATUS_L4CS
))
381 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
383 ring
->rx_stats
.csum_good
++;
386 #define FM10K_RSS_L4_TYPES_MASK \
387 (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
388 BIT(FM10K_RSSTYPE_IPV4_UDP) | \
389 BIT(FM10K_RSSTYPE_IPV6_TCP) | \
390 BIT(FM10K_RSSTYPE_IPV6_UDP))
392 static inline void fm10k_rx_hash(struct fm10k_ring
*ring
,
393 union fm10k_rx_desc
*rx_desc
,
398 if (!(ring
->netdev
->features
& NETIF_F_RXHASH
))
401 rss_type
= le16_to_cpu(rx_desc
->w
.pkt_info
) & FM10K_RXD_RSSTYPE_MASK
;
405 skb_set_hash(skb
, le32_to_cpu(rx_desc
->d
.rss
),
406 (BIT(rss_type
) & FM10K_RSS_L4_TYPES_MASK
) ?
407 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
);
410 static void fm10k_type_trans(struct fm10k_ring
*rx_ring
,
411 union fm10k_rx_desc __maybe_unused
*rx_desc
,
414 struct net_device
*dev
= rx_ring
->netdev
;
415 struct fm10k_l2_accel
*l2_accel
= rcu_dereference_bh(rx_ring
->l2_accel
);
417 /* check to see if DGLORT belongs to a MACVLAN */
419 u16 idx
= le16_to_cpu(FM10K_CB(skb
)->fi
.w
.dglort
) - 1;
421 idx
-= l2_accel
->dglort
;
422 if (idx
< l2_accel
->size
&& l2_accel
->macvlan
[idx
])
423 dev
= l2_accel
->macvlan
[idx
];
428 /* Record Rx queue, or update macvlan statistics */
430 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
432 macvlan_count_rx(netdev_priv(dev
), skb
->len
+ ETH_HLEN
, true,
435 skb
->protocol
= eth_type_trans(skb
, dev
);
439 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
440 * @rx_ring: rx descriptor ring packet is being transacted on
441 * @rx_desc: pointer to the EOP Rx descriptor
442 * @skb: pointer to current skb being populated
444 * This function checks the ring, descriptor, and packet information in
445 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
446 * other fields within the skb.
448 static unsigned int fm10k_process_skb_fields(struct fm10k_ring
*rx_ring
,
449 union fm10k_rx_desc
*rx_desc
,
452 unsigned int len
= skb
->len
;
454 fm10k_rx_hash(rx_ring
, rx_desc
, skb
);
456 fm10k_rx_checksum(rx_ring
, rx_desc
, skb
);
458 FM10K_CB(skb
)->tstamp
= rx_desc
->q
.timestamp
;
460 FM10K_CB(skb
)->fi
.w
.vlan
= rx_desc
->w
.vlan
;
462 FM10K_CB(skb
)->fi
.d
.glort
= rx_desc
->d
.glort
;
464 if (rx_desc
->w
.vlan
) {
465 u16 vid
= le16_to_cpu(rx_desc
->w
.vlan
);
467 if ((vid
& VLAN_VID_MASK
) != rx_ring
->vid
)
468 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
469 else if (vid
& VLAN_PRIO_MASK
)
470 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
471 vid
& VLAN_PRIO_MASK
);
474 fm10k_type_trans(rx_ring
, rx_desc
, skb
);
480 * fm10k_is_non_eop - process handling of non-EOP buffers
481 * @rx_ring: Rx ring being processed
482 * @rx_desc: Rx descriptor for current buffer
484 * This function updates next to clean. If the buffer is an EOP buffer
485 * this function exits returning false, otherwise it will place the
486 * sk_buff in the next buffer to be chained and return true indicating
487 * that this is in fact a non-EOP buffer.
489 static bool fm10k_is_non_eop(struct fm10k_ring
*rx_ring
,
490 union fm10k_rx_desc
*rx_desc
)
492 u32 ntc
= rx_ring
->next_to_clean
+ 1;
494 /* fetch, update, and store next to clean */
495 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
496 rx_ring
->next_to_clean
= ntc
;
498 prefetch(FM10K_RX_DESC(rx_ring
, ntc
));
500 if (likely(fm10k_test_staterr(rx_desc
, FM10K_RXD_STATUS_EOP
)))
507 * fm10k_cleanup_headers - Correct corrupted or empty headers
508 * @rx_ring: rx descriptor ring packet is being transacted on
509 * @rx_desc: pointer to the EOP Rx descriptor
510 * @skb: pointer to current skb being fixed
512 * Address the case where we are pulling data in on pages only
513 * and as such no data is present in the skb header.
515 * In addition if skb is not at least 60 bytes we need to pad it so that
516 * it is large enough to qualify as a valid Ethernet frame.
518 * Returns true if an error was encountered and skb was freed.
520 static bool fm10k_cleanup_headers(struct fm10k_ring
*rx_ring
,
521 union fm10k_rx_desc
*rx_desc
,
524 if (unlikely((fm10k_test_staterr(rx_desc
,
525 FM10K_RXD_STATUS_RXE
)))) {
526 #define FM10K_TEST_RXD_BIT(rxd, bit) \
527 ((rxd)->w.csum_err & cpu_to_le16(bit))
528 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_SWITCH_ERROR
))
529 rx_ring
->rx_stats
.switch_errors
++;
530 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_NO_DESCRIPTOR
))
531 rx_ring
->rx_stats
.drops
++;
532 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_PP_ERROR
))
533 rx_ring
->rx_stats
.pp_errors
++;
534 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_SWITCH_READY
))
535 rx_ring
->rx_stats
.link_errors
++;
536 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_TOO_BIG
))
537 rx_ring
->rx_stats
.length_errors
++;
538 dev_kfree_skb_any(skb
);
539 rx_ring
->rx_stats
.errors
++;
543 /* if eth_skb_pad returns an error the skb was freed */
544 if (eth_skb_pad(skb
))
551 * fm10k_receive_skb - helper function to handle rx indications
552 * @q_vector: structure containing interrupt and ring information
553 * @skb: packet to send up
555 static void fm10k_receive_skb(struct fm10k_q_vector
*q_vector
,
558 napi_gro_receive(&q_vector
->napi
, skb
);
561 static int fm10k_clean_rx_irq(struct fm10k_q_vector
*q_vector
,
562 struct fm10k_ring
*rx_ring
,
565 struct sk_buff
*skb
= rx_ring
->skb
;
566 unsigned int total_bytes
= 0, total_packets
= 0;
567 u16 cleaned_count
= fm10k_desc_unused(rx_ring
);
569 while (likely(total_packets
< budget
)) {
570 union fm10k_rx_desc
*rx_desc
;
572 /* return some buffers to hardware, one at a time is too slow */
573 if (cleaned_count
>= FM10K_RX_BUFFER_WRITE
) {
574 fm10k_alloc_rx_buffers(rx_ring
, cleaned_count
);
578 rx_desc
= FM10K_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
580 if (!rx_desc
->d
.staterr
)
583 /* This memory barrier is needed to keep us from reading
584 * any other fields out of the rx_desc until we know the
585 * descriptor has been written back
589 /* retrieve a buffer from the ring */
590 skb
= fm10k_fetch_rx_buffer(rx_ring
, rx_desc
, skb
);
592 /* exit if we failed to retrieve a buffer */
598 /* fetch next buffer in frame if non-eop */
599 if (fm10k_is_non_eop(rx_ring
, rx_desc
))
602 /* verify the packet layout is correct */
603 if (fm10k_cleanup_headers(rx_ring
, rx_desc
, skb
)) {
608 /* populate checksum, timestamp, VLAN, and protocol */
609 total_bytes
+= fm10k_process_skb_fields(rx_ring
, rx_desc
, skb
);
611 fm10k_receive_skb(q_vector
, skb
);
613 /* reset skb pointer */
616 /* update budget accounting */
620 /* place incomplete frames back on ring for completion */
623 u64_stats_update_begin(&rx_ring
->syncp
);
624 rx_ring
->stats
.packets
+= total_packets
;
625 rx_ring
->stats
.bytes
+= total_bytes
;
626 u64_stats_update_end(&rx_ring
->syncp
);
627 q_vector
->rx
.total_packets
+= total_packets
;
628 q_vector
->rx
.total_bytes
+= total_bytes
;
630 return total_packets
;
633 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
634 static struct ethhdr
*fm10k_port_is_vxlan(struct sk_buff
*skb
)
636 struct fm10k_intfc
*interface
= netdev_priv(skb
->dev
);
638 if (interface
->vxlan_port
!= udp_hdr(skb
)->dest
)
641 /* return offset of udp_hdr plus 8 bytes for VXLAN header */
642 return (struct ethhdr
*)(skb_transport_header(skb
) + VXLAN_HLEN
);
645 #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
646 #define NVGRE_TNI htons(0x2000)
647 struct fm10k_nvgre_hdr
{
653 static struct ethhdr
*fm10k_gre_is_nvgre(struct sk_buff
*skb
)
655 struct fm10k_nvgre_hdr
*nvgre_hdr
;
656 int hlen
= ip_hdrlen(skb
);
658 /* currently only IPv4 is supported due to hlen above */
659 if (vlan_get_protocol(skb
) != htons(ETH_P_IP
))
662 /* our transport header should be NVGRE */
663 nvgre_hdr
= (struct fm10k_nvgre_hdr
*)(skb_network_header(skb
) + hlen
);
665 /* verify all reserved flags are 0 */
666 if (nvgre_hdr
->flags
& FM10K_NVGRE_RESERVED0_FLAGS
)
669 /* report start of ethernet header */
670 if (nvgre_hdr
->flags
& NVGRE_TNI
)
671 return (struct ethhdr
*)(nvgre_hdr
+ 1);
673 return (struct ethhdr
*)(&nvgre_hdr
->tni
);
676 __be16
fm10k_tx_encap_offload(struct sk_buff
*skb
)
678 u8 l4_hdr
= 0, inner_l4_hdr
= 0, inner_l4_hlen
;
679 struct ethhdr
*eth_hdr
;
681 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
682 skb
->inner_protocol
!= htons(ETH_P_TEB
))
685 switch (vlan_get_protocol(skb
)) {
686 case htons(ETH_P_IP
):
687 l4_hdr
= ip_hdr(skb
)->protocol
;
689 case htons(ETH_P_IPV6
):
690 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
698 eth_hdr
= fm10k_port_is_vxlan(skb
);
701 eth_hdr
= fm10k_gre_is_nvgre(skb
);
710 switch (eth_hdr
->h_proto
) {
711 case htons(ETH_P_IP
):
712 inner_l4_hdr
= inner_ip_hdr(skb
)->protocol
;
714 case htons(ETH_P_IPV6
):
715 inner_l4_hdr
= inner_ipv6_hdr(skb
)->nexthdr
;
721 switch (inner_l4_hdr
) {
723 inner_l4_hlen
= inner_tcp_hdrlen(skb
);
732 /* The hardware allows tunnel offloads only if the combined inner and
733 * outer header is 184 bytes or less
735 if (skb_inner_transport_header(skb
) + inner_l4_hlen
-
736 skb_mac_header(skb
) > FM10K_TUNNEL_HEADER_LENGTH
)
739 return eth_hdr
->h_proto
;
742 static int fm10k_tso(struct fm10k_ring
*tx_ring
,
743 struct fm10k_tx_buffer
*first
)
745 struct sk_buff
*skb
= first
->skb
;
746 struct fm10k_tx_desc
*tx_desc
;
750 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
753 if (!skb_is_gso(skb
))
756 /* compute header lengths */
757 if (skb
->encapsulation
) {
758 if (!fm10k_tx_encap_offload(skb
))
760 th
= skb_inner_transport_header(skb
);
762 th
= skb_transport_header(skb
);
765 /* compute offset from SOF to transport header and add header len */
766 hdrlen
= (th
- skb
->data
) + (((struct tcphdr
*)th
)->doff
<< 2);
768 first
->tx_flags
|= FM10K_TX_FLAGS_CSUM
;
770 /* update gso size and bytecount with header size */
771 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
772 first
->bytecount
+= (first
->gso_segs
- 1) * hdrlen
;
774 /* populate Tx descriptor header size and mss */
775 tx_desc
= FM10K_TX_DESC(tx_ring
, tx_ring
->next_to_use
);
776 tx_desc
->hdrlen
= hdrlen
;
777 tx_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
782 tx_ring
->netdev
->features
&= ~NETIF_F_GSO_UDP_TUNNEL
;
784 netdev_err(tx_ring
->netdev
,
785 "TSO requested for unsupported tunnel, disabling offload\n");
789 static void fm10k_tx_csum(struct fm10k_ring
*tx_ring
,
790 struct fm10k_tx_buffer
*first
)
792 struct sk_buff
*skb
= first
->skb
;
793 struct fm10k_tx_desc
*tx_desc
;
796 struct ipv6hdr
*ipv6
;
804 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
807 if (skb
->encapsulation
) {
808 protocol
= fm10k_tx_encap_offload(skb
);
810 if (skb_checksum_help(skb
)) {
811 dev_warn(tx_ring
->dev
,
812 "failed to offload encap csum!\n");
813 tx_ring
->tx_stats
.csum_err
++;
817 network_hdr
.raw
= skb_inner_network_header(skb
);
818 transport_hdr
= skb_inner_transport_header(skb
);
820 protocol
= vlan_get_protocol(skb
);
821 network_hdr
.raw
= skb_network_header(skb
);
822 transport_hdr
= skb_transport_header(skb
);
826 case htons(ETH_P_IP
):
827 l4_hdr
= network_hdr
.ipv4
->protocol
;
829 case htons(ETH_P_IPV6
):
830 l4_hdr
= network_hdr
.ipv6
->nexthdr
;
831 if (likely((transport_hdr
- network_hdr
.raw
) ==
832 sizeof(struct ipv6hdr
)))
834 ipv6_skip_exthdr(skb
, network_hdr
.raw
- skb
->data
+
835 sizeof(struct ipv6hdr
),
837 if (unlikely(frag_off
))
838 l4_hdr
= NEXTHDR_FRAGMENT
;
849 if (skb
->encapsulation
)
853 if (unlikely(net_ratelimit())) {
854 dev_warn(tx_ring
->dev
,
855 "partial checksum, version=%d l4 proto=%x\n",
858 skb_checksum_help(skb
);
859 tx_ring
->tx_stats
.csum_err
++;
863 /* update TX checksum flag */
864 first
->tx_flags
|= FM10K_TX_FLAGS_CSUM
;
865 tx_ring
->tx_stats
.csum_good
++;
868 /* populate Tx descriptor header size and mss */
869 tx_desc
= FM10K_TX_DESC(tx_ring
, tx_ring
->next_to_use
);
874 #define FM10K_SET_FLAG(_input, _flag, _result) \
875 ((_flag <= _result) ? \
876 ((u32)(_input & _flag) * (_result / _flag)) : \
877 ((u32)(_input & _flag) / (_flag / _result)))
879 static u8
fm10k_tx_desc_flags(struct sk_buff
*skb
, u32 tx_flags
)
881 /* set type for advanced descriptor with frame checksum insertion */
884 /* set checksum offload bits */
885 desc_flags
|= FM10K_SET_FLAG(tx_flags
, FM10K_TX_FLAGS_CSUM
,
886 FM10K_TXD_FLAG_CSUM
);
891 static bool fm10k_tx_desc_push(struct fm10k_ring
*tx_ring
,
892 struct fm10k_tx_desc
*tx_desc
, u16 i
,
893 dma_addr_t dma
, unsigned int size
, u8 desc_flags
)
895 /* set RS and INT for last frame in a cache line */
896 if ((++i
& (FM10K_TXD_WB_FIFO_SIZE
- 1)) == 0)
897 desc_flags
|= FM10K_TXD_FLAG_RS
| FM10K_TXD_FLAG_INT
;
899 /* record values to descriptor */
900 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
901 tx_desc
->flags
= desc_flags
;
902 tx_desc
->buflen
= cpu_to_le16(size
);
904 /* return true if we just wrapped the ring */
905 return i
== tx_ring
->count
;
908 static int __fm10k_maybe_stop_tx(struct fm10k_ring
*tx_ring
, u16 size
)
910 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
912 /* Memory barrier before checking head and tail */
915 /* Check again in a case another CPU has just made room available */
916 if (likely(fm10k_desc_unused(tx_ring
) < size
))
919 /* A reprieve! - use start_queue because it doesn't call schedule */
920 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
921 ++tx_ring
->tx_stats
.restart_queue
;
925 static inline int fm10k_maybe_stop_tx(struct fm10k_ring
*tx_ring
, u16 size
)
927 if (likely(fm10k_desc_unused(tx_ring
) >= size
))
929 return __fm10k_maybe_stop_tx(tx_ring
, size
);
932 static void fm10k_tx_map(struct fm10k_ring
*tx_ring
,
933 struct fm10k_tx_buffer
*first
)
935 struct sk_buff
*skb
= first
->skb
;
936 struct fm10k_tx_buffer
*tx_buffer
;
937 struct fm10k_tx_desc
*tx_desc
;
941 unsigned int data_len
, size
;
942 u32 tx_flags
= first
->tx_flags
;
943 u16 i
= tx_ring
->next_to_use
;
944 u8 flags
= fm10k_tx_desc_flags(skb
, tx_flags
);
946 tx_desc
= FM10K_TX_DESC(tx_ring
, i
);
948 /* add HW VLAN tag */
949 if (skb_vlan_tag_present(skb
))
950 tx_desc
->vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
954 size
= skb_headlen(skb
);
957 dma
= dma_map_single(tx_ring
->dev
, data
, size
, DMA_TO_DEVICE
);
959 data_len
= skb
->data_len
;
962 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
963 if (dma_mapping_error(tx_ring
->dev
, dma
))
966 /* record length, and DMA address */
967 dma_unmap_len_set(tx_buffer
, len
, size
);
968 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
970 while (unlikely(size
> FM10K_MAX_DATA_PER_TXD
)) {
971 if (fm10k_tx_desc_push(tx_ring
, tx_desc
++, i
++, dma
,
972 FM10K_MAX_DATA_PER_TXD
, flags
)) {
973 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
977 dma
+= FM10K_MAX_DATA_PER_TXD
;
978 size
-= FM10K_MAX_DATA_PER_TXD
;
981 if (likely(!data_len
))
984 if (fm10k_tx_desc_push(tx_ring
, tx_desc
++, i
++,
986 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
990 size
= skb_frag_size(frag
);
993 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
996 tx_buffer
= &tx_ring
->tx_buffer
[i
];
999 /* write last descriptor with LAST bit set */
1000 flags
|= FM10K_TXD_FLAG_LAST
;
1002 if (fm10k_tx_desc_push(tx_ring
, tx_desc
, i
++, dma
, size
, flags
))
1005 /* record bytecount for BQL */
1006 netdev_tx_sent_queue(txring_txq(tx_ring
), first
->bytecount
);
1008 /* record SW timestamp if HW timestamp is not available */
1009 skb_tx_timestamp(first
->skb
);
1011 /* Force memory writes to complete before letting h/w know there
1012 * are new descriptors to fetch. (Only applicable for weak-ordered
1013 * memory model archs, such as IA-64).
1015 * We also need this memory barrier to make certain all of the
1016 * status bits have been updated before next_to_watch is written.
1020 /* set next_to_watch value indicating a packet is present */
1021 first
->next_to_watch
= tx_desc
;
1023 tx_ring
->next_to_use
= i
;
1025 /* Make sure there is space in the ring for the next send. */
1026 fm10k_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
1028 /* notify HW of packet */
1029 if (netif_xmit_stopped(txring_txq(tx_ring
)) || !netdev_xmit_more()) {
1030 writel(i
, tx_ring
->tail
);
1035 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
1037 /* clear dma mappings for failed tx_buffer map */
1039 tx_buffer
= &tx_ring
->tx_buffer
[i
];
1040 fm10k_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
1041 if (tx_buffer
== first
)
1048 tx_ring
->next_to_use
= i
;
1051 netdev_tx_t
fm10k_xmit_frame_ring(struct sk_buff
*skb
,
1052 struct fm10k_ring
*tx_ring
)
1054 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
1055 struct fm10k_tx_buffer
*first
;
1060 /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
1061 * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
1062 * + 2 desc gap to keep tail from touching head
1063 * otherwise try next time
1065 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++) {
1066 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
1068 count
+= TXD_USE_COUNT(skb_frag_size(frag
));
1071 if (fm10k_maybe_stop_tx(tx_ring
, count
+ 3)) {
1072 tx_ring
->tx_stats
.tx_busy
++;
1073 return NETDEV_TX_BUSY
;
1076 /* record the location of the first descriptor for this packet */
1077 first
= &tx_ring
->tx_buffer
[tx_ring
->next_to_use
];
1079 first
->bytecount
= max_t(unsigned int, skb
->len
, ETH_ZLEN
);
1080 first
->gso_segs
= 1;
1082 /* record initial flags and protocol */
1083 first
->tx_flags
= tx_flags
;
1085 tso
= fm10k_tso(tx_ring
, first
);
1089 fm10k_tx_csum(tx_ring
, first
);
1091 fm10k_tx_map(tx_ring
, first
);
1093 return NETDEV_TX_OK
;
1096 dev_kfree_skb_any(first
->skb
);
1099 return NETDEV_TX_OK
;
1102 static u64
fm10k_get_tx_completed(struct fm10k_ring
*ring
)
1104 return ring
->stats
.packets
;
1108 * fm10k_get_tx_pending - how many Tx descriptors not processed
1109 * @ring: the ring structure
1110 * @in_sw: is tx_pending being checked in SW or in HW?
1112 u64
fm10k_get_tx_pending(struct fm10k_ring
*ring
, bool in_sw
)
1114 struct fm10k_intfc
*interface
= ring
->q_vector
->interface
;
1115 struct fm10k_hw
*hw
= &interface
->hw
;
1118 if (likely(in_sw
)) {
1119 head
= ring
->next_to_clean
;
1120 tail
= ring
->next_to_use
;
1122 head
= fm10k_read_reg(hw
, FM10K_TDH(ring
->reg_idx
));
1123 tail
= fm10k_read_reg(hw
, FM10K_TDT(ring
->reg_idx
));
1126 return ((head
<= tail
) ? tail
: tail
+ ring
->count
) - head
;
1129 bool fm10k_check_tx_hang(struct fm10k_ring
*tx_ring
)
1131 u32 tx_done
= fm10k_get_tx_completed(tx_ring
);
1132 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
1133 u32 tx_pending
= fm10k_get_tx_pending(tx_ring
, true);
1135 clear_check_for_tx_hang(tx_ring
);
1137 /* Check for a hung queue, but be thorough. This verifies
1138 * that a transmit has been completed since the previous
1139 * check AND there is at least one packet pending. By
1140 * requiring this to fail twice we avoid races with
1141 * clearing the ARMED bit and conditions where we
1142 * run the check_tx_hang logic with a transmit completion
1143 * pending but without time to complete it yet.
1145 if (!tx_pending
|| (tx_done_old
!= tx_done
)) {
1146 /* update completed stats and continue */
1147 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
1148 /* reset the countdown */
1149 clear_bit(__FM10K_HANG_CHECK_ARMED
, tx_ring
->state
);
1154 /* make sure it is true for two checks in a row */
1155 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED
, tx_ring
->state
);
1159 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
1160 * @interface: driver private struct
1162 void fm10k_tx_timeout_reset(struct fm10k_intfc
*interface
)
1164 /* Do the reset outside of interrupt context */
1165 if (!test_bit(__FM10K_DOWN
, interface
->state
)) {
1166 interface
->tx_timeout_count
++;
1167 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
1168 fm10k_service_event_schedule(interface
);
1173 * fm10k_clean_tx_irq - Reclaim resources after transmit completes
1174 * @q_vector: structure containing interrupt and ring information
1175 * @tx_ring: tx ring to clean
1176 * @napi_budget: Used to determine if we are in netpoll
1178 static bool fm10k_clean_tx_irq(struct fm10k_q_vector
*q_vector
,
1179 struct fm10k_ring
*tx_ring
, int napi_budget
)
1181 struct fm10k_intfc
*interface
= q_vector
->interface
;
1182 struct fm10k_tx_buffer
*tx_buffer
;
1183 struct fm10k_tx_desc
*tx_desc
;
1184 unsigned int total_bytes
= 0, total_packets
= 0;
1185 unsigned int budget
= q_vector
->tx
.work_limit
;
1186 unsigned int i
= tx_ring
->next_to_clean
;
1188 if (test_bit(__FM10K_DOWN
, interface
->state
))
1191 tx_buffer
= &tx_ring
->tx_buffer
[i
];
1192 tx_desc
= FM10K_TX_DESC(tx_ring
, i
);
1193 i
-= tx_ring
->count
;
1196 struct fm10k_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
1198 /* if next_to_watch is not set then there is no work pending */
1202 /* prevent any other reads prior to eop_desc */
1205 /* if DD is not set pending work has not been completed */
1206 if (!(eop_desc
->flags
& FM10K_TXD_FLAG_DONE
))
1209 /* clear next_to_watch to prevent false hangs */
1210 tx_buffer
->next_to_watch
= NULL
;
1212 /* update the statistics for this packet */
1213 total_bytes
+= tx_buffer
->bytecount
;
1214 total_packets
+= tx_buffer
->gso_segs
;
1217 napi_consume_skb(tx_buffer
->skb
, napi_budget
);
1219 /* unmap skb header data */
1220 dma_unmap_single(tx_ring
->dev
,
1221 dma_unmap_addr(tx_buffer
, dma
),
1222 dma_unmap_len(tx_buffer
, len
),
1225 /* clear tx_buffer data */
1226 tx_buffer
->skb
= NULL
;
1227 dma_unmap_len_set(tx_buffer
, len
, 0);
1229 /* unmap remaining buffers */
1230 while (tx_desc
!= eop_desc
) {
1235 i
-= tx_ring
->count
;
1236 tx_buffer
= tx_ring
->tx_buffer
;
1237 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
1240 /* unmap any remaining paged data */
1241 if (dma_unmap_len(tx_buffer
, len
)) {
1242 dma_unmap_page(tx_ring
->dev
,
1243 dma_unmap_addr(tx_buffer
, dma
),
1244 dma_unmap_len(tx_buffer
, len
),
1246 dma_unmap_len_set(tx_buffer
, len
, 0);
1250 /* move us one more past the eop_desc for start of next pkt */
1255 i
-= tx_ring
->count
;
1256 tx_buffer
= tx_ring
->tx_buffer
;
1257 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
1260 /* issue prefetch for next Tx descriptor */
1263 /* update budget accounting */
1265 } while (likely(budget
));
1267 i
+= tx_ring
->count
;
1268 tx_ring
->next_to_clean
= i
;
1269 u64_stats_update_begin(&tx_ring
->syncp
);
1270 tx_ring
->stats
.bytes
+= total_bytes
;
1271 tx_ring
->stats
.packets
+= total_packets
;
1272 u64_stats_update_end(&tx_ring
->syncp
);
1273 q_vector
->tx
.total_bytes
+= total_bytes
;
1274 q_vector
->tx
.total_packets
+= total_packets
;
1276 if (check_for_tx_hang(tx_ring
) && fm10k_check_tx_hang(tx_ring
)) {
1277 /* schedule immediate reset if we believe we hung */
1278 struct fm10k_hw
*hw
= &interface
->hw
;
1280 netif_err(interface
, drv
, tx_ring
->netdev
,
1281 "Detected Tx Unit Hang\n"
1283 " TDH, TDT <%x>, <%x>\n"
1284 " next_to_use <%x>\n"
1285 " next_to_clean <%x>\n",
1286 tx_ring
->queue_index
,
1287 fm10k_read_reg(hw
, FM10K_TDH(tx_ring
->reg_idx
)),
1288 fm10k_read_reg(hw
, FM10K_TDT(tx_ring
->reg_idx
)),
1289 tx_ring
->next_to_use
, i
);
1291 netif_stop_subqueue(tx_ring
->netdev
,
1292 tx_ring
->queue_index
);
1294 netif_info(interface
, probe
, tx_ring
->netdev
,
1295 "tx hang %d detected on queue %d, resetting interface\n",
1296 interface
->tx_timeout_count
+ 1,
1297 tx_ring
->queue_index
);
1299 fm10k_tx_timeout_reset(interface
);
1301 /* the netdev is about to reset, no point in enabling stuff */
1305 /* notify netdev of completed buffers */
1306 netdev_tx_completed_queue(txring_txq(tx_ring
),
1307 total_packets
, total_bytes
);
1309 #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
1310 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
1311 (fm10k_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
1312 /* Make sure that anybody stopping the queue after this
1313 * sees the new next_to_clean.
1316 if (__netif_subqueue_stopped(tx_ring
->netdev
,
1317 tx_ring
->queue_index
) &&
1318 !test_bit(__FM10K_DOWN
, interface
->state
)) {
1319 netif_wake_subqueue(tx_ring
->netdev
,
1320 tx_ring
->queue_index
);
1321 ++tx_ring
->tx_stats
.restart_queue
;
1329 * fm10k_update_itr - update the dynamic ITR value based on packet size
1331 * Stores a new ITR value based on strictly on packet size. The
1332 * divisors and thresholds used by this function were determined based
1333 * on theoretical maximum wire speed and testing data, in order to
1334 * minimize response time while increasing bulk throughput.
1336 * @ring_container: Container for rings to have ITR updated
1338 static void fm10k_update_itr(struct fm10k_ring_container
*ring_container
)
1340 unsigned int avg_wire_size
, packets
, itr_round
;
1342 /* Only update ITR if we are using adaptive setting */
1343 if (!ITR_IS_ADAPTIVE(ring_container
->itr
))
1346 packets
= ring_container
->total_packets
;
1350 avg_wire_size
= ring_container
->total_bytes
/ packets
;
1352 /* The following is a crude approximation of:
1353 * wmem_default / (size + overhead) = desired_pkts_per_int
1354 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1355 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1357 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1358 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1361 * (34 * (size + 24)) / (size + 640) = ITR
1363 * We first do some math on the packet size and then finally bitshift
1364 * by 8 after rounding up. We also have to account for PCIe link speed
1365 * difference as ITR scales based on this.
1367 if (avg_wire_size
<= 360) {
1368 /* Start at 250K ints/sec and gradually drop to 77K ints/sec */
1370 avg_wire_size
+= 376;
1371 } else if (avg_wire_size
<= 1152) {
1372 /* 77K ints/sec to 45K ints/sec */
1374 avg_wire_size
+= 2176;
1375 } else if (avg_wire_size
<= 1920) {
1376 /* 45K ints/sec to 38K ints/sec */
1377 avg_wire_size
+= 4480;
1379 /* plateau at a limit of 38K ints/sec */
1380 avg_wire_size
= 6656;
1383 /* Perform final bitshift for division after rounding up to ensure
1384 * that the calculation will never get below a 1. The bit shift
1385 * accounts for changes in the ITR due to PCIe link speed.
1387 itr_round
= READ_ONCE(ring_container
->itr_scale
) + 8;
1388 avg_wire_size
+= BIT(itr_round
) - 1;
1389 avg_wire_size
>>= itr_round
;
1391 /* write back value and retain adaptive flag */
1392 ring_container
->itr
= avg_wire_size
| FM10K_ITR_ADAPTIVE
;
1395 ring_container
->total_bytes
= 0;
1396 ring_container
->total_packets
= 0;
1399 static void fm10k_qv_enable(struct fm10k_q_vector
*q_vector
)
1401 /* Enable auto-mask and clear the current mask */
1402 u32 itr
= FM10K_ITR_ENABLE
;
1405 fm10k_update_itr(&q_vector
->tx
);
1408 fm10k_update_itr(&q_vector
->rx
);
1410 /* Store Tx itr in timer slot 0 */
1411 itr
|= (q_vector
->tx
.itr
& FM10K_ITR_MAX
);
1413 /* Shift Rx itr to timer slot 1 */
1414 itr
|= (q_vector
->rx
.itr
& FM10K_ITR_MAX
) << FM10K_ITR_INTERVAL1_SHIFT
;
1416 /* Write the final value to the ITR register */
1417 writel(itr
, q_vector
->itr
);
1420 static int fm10k_poll(struct napi_struct
*napi
, int budget
)
1422 struct fm10k_q_vector
*q_vector
=
1423 container_of(napi
, struct fm10k_q_vector
, napi
);
1424 struct fm10k_ring
*ring
;
1425 int per_ring_budget
, work_done
= 0;
1426 bool clean_complete
= true;
1428 fm10k_for_each_ring(ring
, q_vector
->tx
) {
1429 if (!fm10k_clean_tx_irq(q_vector
, ring
, budget
))
1430 clean_complete
= false;
1433 /* Handle case where we are called by netpoll with a budget of 0 */
1437 /* attempt to distribute budget to each queue fairly, but don't
1438 * allow the budget to go below 1 because we'll exit polling
1440 if (q_vector
->rx
.count
> 1)
1441 per_ring_budget
= max(budget
/ q_vector
->rx
.count
, 1);
1443 per_ring_budget
= budget
;
1445 fm10k_for_each_ring(ring
, q_vector
->rx
) {
1446 int work
= fm10k_clean_rx_irq(q_vector
, ring
, per_ring_budget
);
1449 if (work
>= per_ring_budget
)
1450 clean_complete
= false;
1453 /* If all work not completed, return budget and keep polling */
1454 if (!clean_complete
)
1457 /* Exit the polling mode, but don't re-enable interrupts if stack might
1458 * poll us due to busy-polling
1460 if (likely(napi_complete_done(napi
, work_done
)))
1461 fm10k_qv_enable(q_vector
);
1463 return min(work_done
, budget
- 1);
1467 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
1468 * @interface: board private structure to initialize
1470 * When QoS (Quality of Service) is enabled, allocate queues for
1471 * each traffic class. If multiqueue isn't available,then abort QoS
1474 * This function handles all combinations of Qos and RSS.
1477 static bool fm10k_set_qos_queues(struct fm10k_intfc
*interface
)
1479 struct net_device
*dev
= interface
->netdev
;
1480 struct fm10k_ring_feature
*f
;
1484 /* Map queue offset and counts onto allocated tx queues */
1485 pcs
= netdev_get_num_tc(dev
);
1490 /* set QoS mask and indices */
1491 f
= &interface
->ring_feature
[RING_F_QOS
];
1493 f
->mask
= BIT(fls(pcs
- 1)) - 1;
1495 /* determine the upper limit for our current DCB mode */
1496 rss_i
= interface
->hw
.mac
.max_queues
/ pcs
;
1497 rss_i
= BIT(fls(rss_i
) - 1);
1499 /* set RSS mask and indices */
1500 f
= &interface
->ring_feature
[RING_F_RSS
];
1501 rss_i
= min_t(u16
, rss_i
, f
->limit
);
1503 f
->mask
= BIT(fls(rss_i
- 1)) - 1;
1505 /* configure pause class to queue mapping */
1506 for (i
= 0; i
< pcs
; i
++)
1507 netdev_set_tc_queue(dev
, i
, rss_i
, rss_i
* i
);
1509 interface
->num_rx_queues
= rss_i
* pcs
;
1510 interface
->num_tx_queues
= rss_i
* pcs
;
1516 * fm10k_set_rss_queues: Allocate queues for RSS
1517 * @interface: board private structure to initialize
1519 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
1520 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1523 static bool fm10k_set_rss_queues(struct fm10k_intfc
*interface
)
1525 struct fm10k_ring_feature
*f
;
1528 f
= &interface
->ring_feature
[RING_F_RSS
];
1529 rss_i
= min_t(u16
, interface
->hw
.mac
.max_queues
, f
->limit
);
1531 /* record indices and power of 2 mask for RSS */
1533 f
->mask
= BIT(fls(rss_i
- 1)) - 1;
1535 interface
->num_rx_queues
= rss_i
;
1536 interface
->num_tx_queues
= rss_i
;
1542 * fm10k_set_num_queues: Allocate queues for device, feature dependent
1543 * @interface: board private structure to initialize
1545 * This is the top level queue allocation routine. The order here is very
1546 * important, starting with the "most" number of features turned on at once,
1547 * and ending with the smallest set of features. This way large combinations
1548 * can be allocated if they're turned on, and smaller combinations are the
1549 * fall through conditions.
1552 static void fm10k_set_num_queues(struct fm10k_intfc
*interface
)
1554 /* Attempt to setup QoS and RSS first */
1555 if (fm10k_set_qos_queues(interface
))
1558 /* If we don't have QoS, just fallback to only RSS. */
1559 fm10k_set_rss_queues(interface
);
1563 * fm10k_reset_num_queues - Reset the number of queues to zero
1564 * @interface: board private structure
1566 * This function should be called whenever we need to reset the number of
1567 * queues after an error condition.
1569 static void fm10k_reset_num_queues(struct fm10k_intfc
*interface
)
1571 interface
->num_tx_queues
= 0;
1572 interface
->num_rx_queues
= 0;
1573 interface
->num_q_vectors
= 0;
1577 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
1578 * @interface: board private structure to initialize
1579 * @v_count: q_vectors allocated on interface, used for ring interleaving
1580 * @v_idx: index of vector in interface struct
1581 * @txr_count: total number of Tx rings to allocate
1582 * @txr_idx: index of first Tx ring to allocate
1583 * @rxr_count: total number of Rx rings to allocate
1584 * @rxr_idx: index of first Rx ring to allocate
1586 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1588 static int fm10k_alloc_q_vector(struct fm10k_intfc
*interface
,
1589 unsigned int v_count
, unsigned int v_idx
,
1590 unsigned int txr_count
, unsigned int txr_idx
,
1591 unsigned int rxr_count
, unsigned int rxr_idx
)
1593 struct fm10k_q_vector
*q_vector
;
1594 struct fm10k_ring
*ring
;
1597 ring_count
= txr_count
+ rxr_count
;
1599 /* allocate q_vector and rings */
1600 q_vector
= kzalloc(struct_size(q_vector
, ring
, ring_count
), GFP_KERNEL
);
1604 /* initialize NAPI */
1605 netif_napi_add(interface
->netdev
, &q_vector
->napi
, fm10k_poll
);
1607 /* tie q_vector and interface together */
1608 interface
->q_vector
[v_idx
] = q_vector
;
1609 q_vector
->interface
= interface
;
1610 q_vector
->v_idx
= v_idx
;
1612 /* initialize pointer to rings */
1613 ring
= q_vector
->ring
;
1615 /* save Tx ring container info */
1616 q_vector
->tx
.ring
= ring
;
1617 q_vector
->tx
.work_limit
= FM10K_DEFAULT_TX_WORK
;
1618 q_vector
->tx
.itr
= interface
->tx_itr
;
1619 q_vector
->tx
.itr_scale
= interface
->hw
.mac
.itr_scale
;
1620 q_vector
->tx
.count
= txr_count
;
1623 /* assign generic ring traits */
1624 ring
->dev
= &interface
->pdev
->dev
;
1625 ring
->netdev
= interface
->netdev
;
1627 /* configure backlink on ring */
1628 ring
->q_vector
= q_vector
;
1630 /* apply Tx specific ring traits */
1631 ring
->count
= interface
->tx_ring_count
;
1632 ring
->queue_index
= txr_idx
;
1634 /* assign ring to interface */
1635 interface
->tx_ring
[txr_idx
] = ring
;
1637 /* update count and index */
1641 /* push pointer to next ring */
1645 /* save Rx ring container info */
1646 q_vector
->rx
.ring
= ring
;
1647 q_vector
->rx
.itr
= interface
->rx_itr
;
1648 q_vector
->rx
.itr_scale
= interface
->hw
.mac
.itr_scale
;
1649 q_vector
->rx
.count
= rxr_count
;
1652 /* assign generic ring traits */
1653 ring
->dev
= &interface
->pdev
->dev
;
1654 ring
->netdev
= interface
->netdev
;
1655 rcu_assign_pointer(ring
->l2_accel
, interface
->l2_accel
);
1657 /* configure backlink on ring */
1658 ring
->q_vector
= q_vector
;
1660 /* apply Rx specific ring traits */
1661 ring
->count
= interface
->rx_ring_count
;
1662 ring
->queue_index
= rxr_idx
;
1664 /* assign ring to interface */
1665 interface
->rx_ring
[rxr_idx
] = ring
;
1667 /* update count and index */
1671 /* push pointer to next ring */
1675 fm10k_dbg_q_vector_init(q_vector
);
1681 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
1682 * @interface: board private structure to initialize
1683 * @v_idx: Index of vector to be freed
1685 * This function frees the memory allocated to the q_vector. In addition if
1686 * NAPI is enabled it will delete any references to the NAPI struct prior
1687 * to freeing the q_vector.
1689 static void fm10k_free_q_vector(struct fm10k_intfc
*interface
, int v_idx
)
1691 struct fm10k_q_vector
*q_vector
= interface
->q_vector
[v_idx
];
1692 struct fm10k_ring
*ring
;
1694 fm10k_dbg_q_vector_exit(q_vector
);
1696 fm10k_for_each_ring(ring
, q_vector
->tx
)
1697 interface
->tx_ring
[ring
->queue_index
] = NULL
;
1699 fm10k_for_each_ring(ring
, q_vector
->rx
)
1700 interface
->rx_ring
[ring
->queue_index
] = NULL
;
1702 interface
->q_vector
[v_idx
] = NULL
;
1703 netif_napi_del(&q_vector
->napi
);
1704 kfree_rcu(q_vector
, rcu
);
1708 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
1709 * @interface: board private structure to initialize
1711 * We allocate one q_vector per queue interrupt. If allocation fails we
1714 static int fm10k_alloc_q_vectors(struct fm10k_intfc
*interface
)
1716 unsigned int q_vectors
= interface
->num_q_vectors
;
1717 unsigned int rxr_remaining
= interface
->num_rx_queues
;
1718 unsigned int txr_remaining
= interface
->num_tx_queues
;
1719 unsigned int rxr_idx
= 0, txr_idx
= 0, v_idx
= 0;
1722 if (q_vectors
>= (rxr_remaining
+ txr_remaining
)) {
1723 for (; rxr_remaining
; v_idx
++) {
1724 err
= fm10k_alloc_q_vector(interface
, q_vectors
, v_idx
,
1729 /* update counts and index */
1735 for (; v_idx
< q_vectors
; v_idx
++) {
1736 int rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- v_idx
);
1737 int tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- v_idx
);
1739 err
= fm10k_alloc_q_vector(interface
, q_vectors
, v_idx
,
1746 /* update counts and index */
1747 rxr_remaining
-= rqpv
;
1748 txr_remaining
-= tqpv
;
1756 fm10k_reset_num_queues(interface
);
1759 fm10k_free_q_vector(interface
, v_idx
);
1765 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
1766 * @interface: board private structure to initialize
1768 * This function frees the memory allocated to the q_vectors. In addition if
1769 * NAPI is enabled it will delete any references to the NAPI struct prior
1770 * to freeing the q_vector.
1772 static void fm10k_free_q_vectors(struct fm10k_intfc
*interface
)
1774 int v_idx
= interface
->num_q_vectors
;
1776 fm10k_reset_num_queues(interface
);
1779 fm10k_free_q_vector(interface
, v_idx
);
1783 * fm10k_reset_msix_capability - reset MSI-X capability
1784 * @interface: board private structure to initialize
1786 * Reset the MSI-X capability back to its starting state
1788 static void fm10k_reset_msix_capability(struct fm10k_intfc
*interface
)
1790 pci_disable_msix(interface
->pdev
);
1791 kfree(interface
->msix_entries
);
1792 interface
->msix_entries
= NULL
;
1796 * fm10k_init_msix_capability - configure MSI-X capability
1797 * @interface: board private structure to initialize
1799 * Attempt to configure the interrupts using the best available
1800 * capabilities of the hardware and the kernel.
1802 static int fm10k_init_msix_capability(struct fm10k_intfc
*interface
)
1804 struct fm10k_hw
*hw
= &interface
->hw
;
1805 int v_budget
, vector
;
1807 /* It's easy to be greedy for MSI-X vectors, but it really
1808 * doesn't do us much good if we have a lot more vectors
1809 * than CPU's. So let's be conservative and only ask for
1810 * (roughly) the same number of vectors as there are CPU's.
1811 * the default is to use pairs of vectors
1813 v_budget
= max(interface
->num_rx_queues
, interface
->num_tx_queues
);
1814 v_budget
= min_t(u16
, v_budget
, num_online_cpus());
1816 /* account for vectors not related to queues */
1817 v_budget
+= NON_Q_VECTORS
;
1819 /* At the same time, hardware can only support a maximum of
1820 * hw.mac->max_msix_vectors vectors. With features
1821 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1822 * descriptor queues supported by our device. Thus, we cap it off in
1823 * those rare cases where the cpu count also exceeds our vector limit.
1825 v_budget
= min_t(int, v_budget
, hw
->mac
.max_msix_vectors
);
1827 /* A failure in MSI-X entry allocation is fatal. */
1828 interface
->msix_entries
= kcalloc(v_budget
, sizeof(struct msix_entry
),
1830 if (!interface
->msix_entries
)
1833 /* populate entry values */
1834 for (vector
= 0; vector
< v_budget
; vector
++)
1835 interface
->msix_entries
[vector
].entry
= vector
;
1837 /* Attempt to enable MSI-X with requested value */
1838 v_budget
= pci_enable_msix_range(interface
->pdev
,
1839 interface
->msix_entries
,
1843 kfree(interface
->msix_entries
);
1844 interface
->msix_entries
= NULL
;
1848 /* record the number of queues available for q_vectors */
1849 interface
->num_q_vectors
= v_budget
- NON_Q_VECTORS
;
1855 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
1856 * @interface: Interface structure continaining rings and devices
1858 * Cache the descriptor ring offsets for Qos
1860 static bool fm10k_cache_ring_qos(struct fm10k_intfc
*interface
)
1862 struct net_device
*dev
= interface
->netdev
;
1863 int pc
, offset
, rss_i
, i
;
1864 u16 pc_stride
= interface
->ring_feature
[RING_F_QOS
].mask
+ 1;
1865 u8 num_pcs
= netdev_get_num_tc(dev
);
1870 rss_i
= interface
->ring_feature
[RING_F_RSS
].indices
;
1872 for (pc
= 0, offset
= 0; pc
< num_pcs
; pc
++, offset
+= rss_i
) {
1875 for (i
= 0; i
< rss_i
; i
++) {
1876 interface
->tx_ring
[offset
+ i
]->reg_idx
= q_idx
;
1877 interface
->tx_ring
[offset
+ i
]->qos_pc
= pc
;
1878 interface
->rx_ring
[offset
+ i
]->reg_idx
= q_idx
;
1879 interface
->rx_ring
[offset
+ i
]->qos_pc
= pc
;
1888 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
1889 * @interface: Interface structure continaining rings and devices
1891 * Cache the descriptor ring offsets for RSS
1893 static void fm10k_cache_ring_rss(struct fm10k_intfc
*interface
)
1897 for (i
= 0; i
< interface
->num_rx_queues
; i
++)
1898 interface
->rx_ring
[i
]->reg_idx
= i
;
1900 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
1901 interface
->tx_ring
[i
]->reg_idx
= i
;
1905 * fm10k_assign_rings - Map rings to network devices
1906 * @interface: Interface structure containing rings and devices
1908 * This function is meant to go though and configure both the network
1909 * devices so that they contain rings, and configure the rings so that
1910 * they function with their network devices.
1912 static void fm10k_assign_rings(struct fm10k_intfc
*interface
)
1914 if (fm10k_cache_ring_qos(interface
))
1917 fm10k_cache_ring_rss(interface
);
1920 static void fm10k_init_reta(struct fm10k_intfc
*interface
)
1922 u16 i
, rss_i
= interface
->ring_feature
[RING_F_RSS
].indices
;
1925 /* If the Rx flow indirection table has been configured manually, we
1926 * need to maintain it when possible.
1928 if (netif_is_rxfh_configured(interface
->netdev
)) {
1929 for (i
= FM10K_RETA_SIZE
; i
--;) {
1930 reta
= interface
->reta
[i
];
1931 if ((((reta
<< 24) >> 24) < rss_i
) &&
1932 (((reta
<< 16) >> 24) < rss_i
) &&
1933 (((reta
<< 8) >> 24) < rss_i
) &&
1934 (((reta
) >> 24) < rss_i
))
1937 /* this should never happen */
1938 dev_err(&interface
->pdev
->dev
,
1939 "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n");
1940 goto repopulate_reta
;
1943 /* do nothing if all of the elements are in bounds */
1948 fm10k_write_reta(interface
, NULL
);
1952 * fm10k_init_queueing_scheme - Determine proper queueing scheme
1953 * @interface: board private structure to initialize
1955 * We determine which queueing scheme to use based on...
1956 * - Hardware queue count (num_*_queues)
1957 * - defined by miscellaneous hardware support/features (RSS, etc.)
1959 int fm10k_init_queueing_scheme(struct fm10k_intfc
*interface
)
1963 /* Number of supported queues */
1964 fm10k_set_num_queues(interface
);
1966 /* Configure MSI-X capability */
1967 err
= fm10k_init_msix_capability(interface
);
1969 dev_err(&interface
->pdev
->dev
,
1970 "Unable to initialize MSI-X capability\n");
1974 /* Allocate memory for queues */
1975 err
= fm10k_alloc_q_vectors(interface
);
1977 dev_err(&interface
->pdev
->dev
,
1978 "Unable to allocate queue vectors\n");
1979 goto err_alloc_q_vectors
;
1982 /* Map rings to devices, and map devices to physical queues */
1983 fm10k_assign_rings(interface
);
1985 /* Initialize RSS redirection table */
1986 fm10k_init_reta(interface
);
1990 err_alloc_q_vectors
:
1991 fm10k_reset_msix_capability(interface
);
1993 fm10k_reset_num_queues(interface
);
1998 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
1999 * @interface: board private structure to clear queueing scheme on
2001 * We go through and clear queueing specific resources and reset the structure
2002 * to pre-load conditions
2004 void fm10k_clear_queueing_scheme(struct fm10k_intfc
*interface
)
2006 fm10k_free_q_vectors(interface
);
2007 fm10k_reset_msix_capability(interface
);