1 /* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 #include <linux/types.h>
22 #include <linux/module.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/prefetch.h>
31 #define DRV_VERSION "0.15.2-k"
32 const char fm10k_driver_version
[] = DRV_VERSION
;
33 char fm10k_driver_name
[] = "fm10k";
34 static const char fm10k_driver_string
[] =
35 "Intel(R) Ethernet Switch Host Interface Driver";
36 static const char fm10k_copyright
[] =
37 "Copyright (c) 2013 Intel Corporation.";
39 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
40 MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(DRV_VERSION
);
44 /* single workqueue for entire fm10k driver */
45 struct workqueue_struct
*fm10k_workqueue
= NULL
;
48 * fm10k_init_module - Driver Registration Routine
50 * fm10k_init_module is the first routine called when the driver is
51 * loaded. All it does is register with the PCI subsystem.
53 static int __init
fm10k_init_module(void)
55 pr_info("%s - version %s\n", fm10k_driver_string
, fm10k_driver_version
);
56 pr_info("%s\n", fm10k_copyright
);
58 /* create driver workqueue */
60 fm10k_workqueue
= create_workqueue("fm10k");
64 return fm10k_register_pci_driver();
66 module_init(fm10k_init_module
);
69 * fm10k_exit_module - Driver Exit Cleanup Routine
71 * fm10k_exit_module is called just before the driver is removed
74 static void __exit
fm10k_exit_module(void)
76 fm10k_unregister_pci_driver();
80 /* destroy driver workqueue */
81 flush_workqueue(fm10k_workqueue
);
82 destroy_workqueue(fm10k_workqueue
);
83 fm10k_workqueue
= NULL
;
85 module_exit(fm10k_exit_module
);
87 static bool fm10k_alloc_mapped_page(struct fm10k_ring
*rx_ring
,
88 struct fm10k_rx_buffer
*bi
)
90 struct page
*page
= bi
->page
;
93 /* Only page will be NULL if buffer was consumed */
97 /* alloc new page for storage */
98 page
= dev_alloc_page();
99 if (unlikely(!page
)) {
100 rx_ring
->rx_stats
.alloc_failed
++;
104 /* map page for use */
105 dma
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
107 /* if mapping failed free memory back to system since
108 * there isn't much point in holding memory we can't use
110 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
113 rx_ring
->rx_stats
.alloc_failed
++;
125 * fm10k_alloc_rx_buffers - Replace used receive buffers
126 * @rx_ring: ring to place buffers on
127 * @cleaned_count: number of buffers to replace
129 void fm10k_alloc_rx_buffers(struct fm10k_ring
*rx_ring
, u16 cleaned_count
)
131 union fm10k_rx_desc
*rx_desc
;
132 struct fm10k_rx_buffer
*bi
;
133 u16 i
= rx_ring
->next_to_use
;
139 rx_desc
= FM10K_RX_DESC(rx_ring
, i
);
140 bi
= &rx_ring
->rx_buffer
[i
];
144 if (!fm10k_alloc_mapped_page(rx_ring
, bi
))
147 /* Refresh the desc even if buffer_addrs didn't change
148 * because each write-back erases this info.
150 rx_desc
->q
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
156 rx_desc
= FM10K_RX_DESC(rx_ring
, 0);
157 bi
= rx_ring
->rx_buffer
;
161 /* clear the status bits for the next_to_use descriptor */
162 rx_desc
->d
.staterr
= 0;
165 } while (cleaned_count
);
169 if (rx_ring
->next_to_use
!= i
) {
170 /* record the next descriptor to use */
171 rx_ring
->next_to_use
= i
;
173 /* update next to alloc since we have filled the ring */
174 rx_ring
->next_to_alloc
= i
;
176 /* Force memory writes to complete before letting h/w
177 * know there are new descriptors to fetch. (Only
178 * applicable for weak-ordered memory model archs,
183 /* notify hardware of new descriptors */
184 writel(i
, rx_ring
->tail
);
189 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
190 * @rx_ring: rx descriptor ring to store buffers on
191 * @old_buff: donor buffer to have page reused
193 * Synchronizes page for reuse by the interface
195 static void fm10k_reuse_rx_page(struct fm10k_ring
*rx_ring
,
196 struct fm10k_rx_buffer
*old_buff
)
198 struct fm10k_rx_buffer
*new_buff
;
199 u16 nta
= rx_ring
->next_to_alloc
;
201 new_buff
= &rx_ring
->rx_buffer
[nta
];
203 /* update, and store next to alloc */
205 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
207 /* transfer page from old buffer to new buffer */
208 *new_buff
= *old_buff
;
210 /* sync the buffer for use by the device */
211 dma_sync_single_range_for_device(rx_ring
->dev
, old_buff
->dma
,
212 old_buff
->page_offset
,
217 static inline bool fm10k_page_is_reserved(struct page
*page
)
219 return (page_to_nid(page
) != numa_mem_id()) || page_is_pfmemalloc(page
);
222 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer
*rx_buffer
,
224 unsigned int __maybe_unused truesize
)
226 /* avoid re-using remote pages */
227 if (unlikely(fm10k_page_is_reserved(page
)))
230 #if (PAGE_SIZE < 8192)
231 /* if we are only owner of page we can reuse it */
232 if (unlikely(page_count(page
) != 1))
235 /* flip page offset to other buffer */
236 rx_buffer
->page_offset
^= FM10K_RX_BUFSZ
;
238 /* move offset up to the next cache line */
239 rx_buffer
->page_offset
+= truesize
;
241 if (rx_buffer
->page_offset
> (PAGE_SIZE
- FM10K_RX_BUFSZ
))
245 /* Even if we own the page, we are not allowed to use atomic_set()
246 * This would break get_page_unless_zero() users.
248 atomic_inc(&page
->_count
);
254 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
255 * @rx_buffer: buffer containing page to add
256 * @rx_desc: descriptor containing length of buffer written by hardware
257 * @skb: sk_buff to place the data into
259 * This function will add the data contained in rx_buffer->page to the skb.
260 * This is done either through a direct copy if the data in the buffer is
261 * less than the skb header size, otherwise it will just attach the page as
264 * The function will then update the page offset if necessary and return
265 * true if the buffer can be reused by the interface.
267 static bool fm10k_add_rx_frag(struct fm10k_rx_buffer
*rx_buffer
,
268 union fm10k_rx_desc
*rx_desc
,
271 struct page
*page
= rx_buffer
->page
;
272 unsigned char *va
= page_address(page
) + rx_buffer
->page_offset
;
273 unsigned int size
= le16_to_cpu(rx_desc
->w
.length
);
274 #if (PAGE_SIZE < 8192)
275 unsigned int truesize
= FM10K_RX_BUFSZ
;
277 unsigned int truesize
= SKB_DATA_ALIGN(size
);
279 unsigned int pull_len
;
281 if (unlikely(skb_is_nonlinear(skb
)))
284 if (likely(size
<= FM10K_RX_HDR_LEN
)) {
285 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
287 /* page is not reserved, we can reuse buffer as-is */
288 if (likely(!fm10k_page_is_reserved(page
)))
291 /* this page cannot be reused so discard it */
296 /* we need the header to contain the greater of either ETH_HLEN or
297 * 60 bytes if the skb->len is less than 60 for skb_pad.
299 pull_len
= eth_get_headlen(va
, FM10K_RX_HDR_LEN
);
301 /* align pull length to size of long to optimize memcpy performance */
302 memcpy(__skb_put(skb
, pull_len
), va
, ALIGN(pull_len
, sizeof(long)));
304 /* update all of the pointers */
309 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
310 (unsigned long)va
& ~PAGE_MASK
, size
, truesize
);
312 return fm10k_can_reuse_rx_page(rx_buffer
, page
, truesize
);
315 static struct sk_buff
*fm10k_fetch_rx_buffer(struct fm10k_ring
*rx_ring
,
316 union fm10k_rx_desc
*rx_desc
,
319 struct fm10k_rx_buffer
*rx_buffer
;
322 rx_buffer
= &rx_ring
->rx_buffer
[rx_ring
->next_to_clean
];
323 page
= rx_buffer
->page
;
327 void *page_addr
= page_address(page
) +
328 rx_buffer
->page_offset
;
330 /* prefetch first cache line of first page */
332 #if L1_CACHE_BYTES < 128
333 prefetch(page_addr
+ L1_CACHE_BYTES
);
336 /* allocate a skb to store the frags */
337 skb
= napi_alloc_skb(&rx_ring
->q_vector
->napi
,
339 if (unlikely(!skb
)) {
340 rx_ring
->rx_stats
.alloc_failed
++;
344 /* we will be copying header into skb->data in
345 * pskb_may_pull so it is in our interest to prefetch
346 * it now to avoid a possible cache miss
348 prefetchw(skb
->data
);
351 /* we are reusing so sync this buffer for CPU use */
352 dma_sync_single_range_for_cpu(rx_ring
->dev
,
354 rx_buffer
->page_offset
,
358 /* pull page into skb */
359 if (fm10k_add_rx_frag(rx_buffer
, rx_desc
, skb
)) {
360 /* hand second half of page back to the ring */
361 fm10k_reuse_rx_page(rx_ring
, rx_buffer
);
363 /* we are not reusing the buffer so unmap it */
364 dma_unmap_page(rx_ring
->dev
, rx_buffer
->dma
,
365 PAGE_SIZE
, DMA_FROM_DEVICE
);
368 /* clear contents of rx_buffer */
369 rx_buffer
->page
= NULL
;
374 static inline void fm10k_rx_checksum(struct fm10k_ring
*ring
,
375 union fm10k_rx_desc
*rx_desc
,
378 skb_checksum_none_assert(skb
);
380 /* Rx checksum disabled via ethtool */
381 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
384 /* TCP/UDP checksum error bit is set */
385 if (fm10k_test_staterr(rx_desc
,
386 FM10K_RXD_STATUS_L4E
|
387 FM10K_RXD_STATUS_L4E2
|
388 FM10K_RXD_STATUS_IPE
|
389 FM10K_RXD_STATUS_IPE2
)) {
390 ring
->rx_stats
.csum_err
++;
394 /* It must be a TCP or UDP packet with a valid checksum */
395 if (fm10k_test_staterr(rx_desc
, FM10K_RXD_STATUS_L4CS2
))
396 skb
->encapsulation
= true;
397 else if (!fm10k_test_staterr(rx_desc
, FM10K_RXD_STATUS_L4CS
))
400 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
402 ring
->rx_stats
.csum_good
++;
405 #define FM10K_RSS_L4_TYPES_MASK \
406 ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
407 (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
408 (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
409 (1ul << FM10K_RSSTYPE_IPV6_UDP))
411 static inline void fm10k_rx_hash(struct fm10k_ring
*ring
,
412 union fm10k_rx_desc
*rx_desc
,
417 if (!(ring
->netdev
->features
& NETIF_F_RXHASH
))
420 rss_type
= le16_to_cpu(rx_desc
->w
.pkt_info
) & FM10K_RXD_RSSTYPE_MASK
;
424 skb_set_hash(skb
, le32_to_cpu(rx_desc
->d
.rss
),
425 (FM10K_RSS_L4_TYPES_MASK
& (1ul << rss_type
)) ?
426 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
);
429 static void fm10k_rx_hwtstamp(struct fm10k_ring
*rx_ring
,
430 union fm10k_rx_desc
*rx_desc
,
433 struct fm10k_intfc
*interface
= rx_ring
->q_vector
->interface
;
435 FM10K_CB(skb
)->tstamp
= rx_desc
->q
.timestamp
;
437 if (unlikely(interface
->flags
& FM10K_FLAG_RX_TS_ENABLED
))
438 fm10k_systime_to_hwtstamp(interface
, skb_hwtstamps(skb
),
439 le64_to_cpu(rx_desc
->q
.timestamp
));
442 static void fm10k_type_trans(struct fm10k_ring
*rx_ring
,
443 union fm10k_rx_desc __maybe_unused
*rx_desc
,
446 struct net_device
*dev
= rx_ring
->netdev
;
447 struct fm10k_l2_accel
*l2_accel
= rcu_dereference_bh(rx_ring
->l2_accel
);
449 /* check to see if DGLORT belongs to a MACVLAN */
451 u16 idx
= le16_to_cpu(FM10K_CB(skb
)->fi
.w
.dglort
) - 1;
453 idx
-= l2_accel
->dglort
;
454 if (idx
< l2_accel
->size
&& l2_accel
->macvlan
[idx
])
455 dev
= l2_accel
->macvlan
[idx
];
460 skb
->protocol
= eth_type_trans(skb
, dev
);
465 /* update MACVLAN statistics */
466 macvlan_count_rx(netdev_priv(dev
), skb
->len
+ ETH_HLEN
, 1,
467 !!(rx_desc
->w
.hdr_info
&
468 cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK
)));
472 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
473 * @rx_ring: rx descriptor ring packet is being transacted on
474 * @rx_desc: pointer to the EOP Rx descriptor
475 * @skb: pointer to current skb being populated
477 * This function checks the ring, descriptor, and packet information in
478 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
479 * other fields within the skb.
481 static unsigned int fm10k_process_skb_fields(struct fm10k_ring
*rx_ring
,
482 union fm10k_rx_desc
*rx_desc
,
485 unsigned int len
= skb
->len
;
487 fm10k_rx_hash(rx_ring
, rx_desc
, skb
);
489 fm10k_rx_checksum(rx_ring
, rx_desc
, skb
);
491 fm10k_rx_hwtstamp(rx_ring
, rx_desc
, skb
);
493 FM10K_CB(skb
)->fi
.w
.vlan
= rx_desc
->w
.vlan
;
495 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
497 FM10K_CB(skb
)->fi
.d
.glort
= rx_desc
->d
.glort
;
499 if (rx_desc
->w
.vlan
) {
500 u16 vid
= le16_to_cpu(rx_desc
->w
.vlan
);
502 if ((vid
& VLAN_VID_MASK
) != rx_ring
->vid
)
503 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
504 else if (vid
& VLAN_PRIO_MASK
)
505 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
506 vid
& VLAN_PRIO_MASK
);
509 fm10k_type_trans(rx_ring
, rx_desc
, skb
);
515 * fm10k_is_non_eop - process handling of non-EOP buffers
516 * @rx_ring: Rx ring being processed
517 * @rx_desc: Rx descriptor for current buffer
519 * This function updates next to clean. If the buffer is an EOP buffer
520 * this function exits returning false, otherwise it will place the
521 * sk_buff in the next buffer to be chained and return true indicating
522 * that this is in fact a non-EOP buffer.
524 static bool fm10k_is_non_eop(struct fm10k_ring
*rx_ring
,
525 union fm10k_rx_desc
*rx_desc
)
527 u32 ntc
= rx_ring
->next_to_clean
+ 1;
529 /* fetch, update, and store next to clean */
530 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
531 rx_ring
->next_to_clean
= ntc
;
533 prefetch(FM10K_RX_DESC(rx_ring
, ntc
));
535 if (likely(fm10k_test_staterr(rx_desc
, FM10K_RXD_STATUS_EOP
)))
542 * fm10k_cleanup_headers - Correct corrupted or empty headers
543 * @rx_ring: rx descriptor ring packet is being transacted on
544 * @rx_desc: pointer to the EOP Rx descriptor
545 * @skb: pointer to current skb being fixed
547 * Address the case where we are pulling data in on pages only
548 * and as such no data is present in the skb header.
550 * In addition if skb is not at least 60 bytes we need to pad it so that
551 * it is large enough to qualify as a valid Ethernet frame.
553 * Returns true if an error was encountered and skb was freed.
555 static bool fm10k_cleanup_headers(struct fm10k_ring
*rx_ring
,
556 union fm10k_rx_desc
*rx_desc
,
559 if (unlikely((fm10k_test_staterr(rx_desc
,
560 FM10K_RXD_STATUS_RXE
)))) {
561 #define FM10K_TEST_RXD_BIT(rxd, bit) \
562 ((rxd)->w.csum_err & cpu_to_le16(bit))
563 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_SWITCH_ERROR
))
564 rx_ring
->rx_stats
.switch_errors
++;
565 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_NO_DESCRIPTOR
))
566 rx_ring
->rx_stats
.drops
++;
567 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_PP_ERROR
))
568 rx_ring
->rx_stats
.pp_errors
++;
569 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_SWITCH_READY
))
570 rx_ring
->rx_stats
.link_errors
++;
571 if (FM10K_TEST_RXD_BIT(rx_desc
, FM10K_RXD_ERR_TOO_BIG
))
572 rx_ring
->rx_stats
.length_errors
++;
573 dev_kfree_skb_any(skb
);
574 rx_ring
->rx_stats
.errors
++;
578 /* if eth_skb_pad returns an error the skb was freed */
579 if (eth_skb_pad(skb
))
586 * fm10k_receive_skb - helper function to handle rx indications
587 * @q_vector: structure containing interrupt and ring information
588 * @skb: packet to send up
590 static void fm10k_receive_skb(struct fm10k_q_vector
*q_vector
,
593 napi_gro_receive(&q_vector
->napi
, skb
);
596 static int fm10k_clean_rx_irq(struct fm10k_q_vector
*q_vector
,
597 struct fm10k_ring
*rx_ring
,
600 struct sk_buff
*skb
= rx_ring
->skb
;
601 unsigned int total_bytes
= 0, total_packets
= 0;
602 u16 cleaned_count
= fm10k_desc_unused(rx_ring
);
604 while (likely(total_packets
< budget
)) {
605 union fm10k_rx_desc
*rx_desc
;
607 /* return some buffers to hardware, one at a time is too slow */
608 if (cleaned_count
>= FM10K_RX_BUFFER_WRITE
) {
609 fm10k_alloc_rx_buffers(rx_ring
, cleaned_count
);
613 rx_desc
= FM10K_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
615 if (!rx_desc
->d
.staterr
)
618 /* This memory barrier is needed to keep us from reading
619 * any other fields out of the rx_desc until we know the
620 * descriptor has been written back
624 /* retrieve a buffer from the ring */
625 skb
= fm10k_fetch_rx_buffer(rx_ring
, rx_desc
, skb
);
627 /* exit if we failed to retrieve a buffer */
633 /* fetch next buffer in frame if non-eop */
634 if (fm10k_is_non_eop(rx_ring
, rx_desc
))
637 /* verify the packet layout is correct */
638 if (fm10k_cleanup_headers(rx_ring
, rx_desc
, skb
)) {
643 /* populate checksum, timestamp, VLAN, and protocol */
644 total_bytes
+= fm10k_process_skb_fields(rx_ring
, rx_desc
, skb
);
646 fm10k_receive_skb(q_vector
, skb
);
648 /* reset skb pointer */
651 /* update budget accounting */
655 /* place incomplete frames back on ring for completion */
658 u64_stats_update_begin(&rx_ring
->syncp
);
659 rx_ring
->stats
.packets
+= total_packets
;
660 rx_ring
->stats
.bytes
+= total_bytes
;
661 u64_stats_update_end(&rx_ring
->syncp
);
662 q_vector
->rx
.total_packets
+= total_packets
;
663 q_vector
->rx
.total_bytes
+= total_bytes
;
665 return total_packets
;
668 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
669 static struct ethhdr
*fm10k_port_is_vxlan(struct sk_buff
*skb
)
671 struct fm10k_intfc
*interface
= netdev_priv(skb
->dev
);
672 struct fm10k_vxlan_port
*vxlan_port
;
674 /* we can only offload a vxlan if we recognize it as such */
675 vxlan_port
= list_first_entry_or_null(&interface
->vxlan_port
,
676 struct fm10k_vxlan_port
, list
);
680 if (vxlan_port
->port
!= udp_hdr(skb
)->dest
)
683 /* return offset of udp_hdr plus 8 bytes for VXLAN header */
684 return (struct ethhdr
*)(skb_transport_header(skb
) + VXLAN_HLEN
);
687 #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
688 #define NVGRE_TNI htons(0x2000)
689 struct fm10k_nvgre_hdr
{
695 static struct ethhdr
*fm10k_gre_is_nvgre(struct sk_buff
*skb
)
697 struct fm10k_nvgre_hdr
*nvgre_hdr
;
698 int hlen
= ip_hdrlen(skb
);
700 /* currently only IPv4 is supported due to hlen above */
701 if (vlan_get_protocol(skb
) != htons(ETH_P_IP
))
704 /* our transport header should be NVGRE */
705 nvgre_hdr
= (struct fm10k_nvgre_hdr
*)(skb_network_header(skb
) + hlen
);
707 /* verify all reserved flags are 0 */
708 if (nvgre_hdr
->flags
& FM10K_NVGRE_RESERVED0_FLAGS
)
711 /* report start of ethernet header */
712 if (nvgre_hdr
->flags
& NVGRE_TNI
)
713 return (struct ethhdr
*)(nvgre_hdr
+ 1);
715 return (struct ethhdr
*)(&nvgre_hdr
->tni
);
718 __be16
fm10k_tx_encap_offload(struct sk_buff
*skb
)
720 u8 l4_hdr
= 0, inner_l4_hdr
= 0, inner_l4_hlen
;
721 struct ethhdr
*eth_hdr
;
723 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
724 skb
->inner_protocol
!= htons(ETH_P_TEB
))
727 switch (vlan_get_protocol(skb
)) {
728 case htons(ETH_P_IP
):
729 l4_hdr
= ip_hdr(skb
)->protocol
;
731 case htons(ETH_P_IPV6
):
732 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
740 eth_hdr
= fm10k_port_is_vxlan(skb
);
743 eth_hdr
= fm10k_gre_is_nvgre(skb
);
752 switch (eth_hdr
->h_proto
) {
753 case htons(ETH_P_IP
):
754 inner_l4_hdr
= inner_ip_hdr(skb
)->protocol
;
756 case htons(ETH_P_IPV6
):
757 inner_l4_hdr
= inner_ipv6_hdr(skb
)->nexthdr
;
763 switch (inner_l4_hdr
) {
765 inner_l4_hlen
= inner_tcp_hdrlen(skb
);
774 /* The hardware allows tunnel offloads only if the combined inner and
775 * outer header is 184 bytes or less
777 if (skb_inner_transport_header(skb
) + inner_l4_hlen
-
778 skb_mac_header(skb
) > FM10K_TUNNEL_HEADER_LENGTH
)
781 return eth_hdr
->h_proto
;
784 static int fm10k_tso(struct fm10k_ring
*tx_ring
,
785 struct fm10k_tx_buffer
*first
)
787 struct sk_buff
*skb
= first
->skb
;
788 struct fm10k_tx_desc
*tx_desc
;
792 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
795 if (!skb_is_gso(skb
))
798 /* compute header lengths */
799 if (skb
->encapsulation
) {
800 if (!fm10k_tx_encap_offload(skb
))
802 th
= skb_inner_transport_header(skb
);
804 th
= skb_transport_header(skb
);
807 /* compute offset from SOF to transport header and add header len */
808 hdrlen
= (th
- skb
->data
) + (((struct tcphdr
*)th
)->doff
<< 2);
810 first
->tx_flags
|= FM10K_TX_FLAGS_CSUM
;
812 /* update gso size and bytecount with header size */
813 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
814 first
->bytecount
+= (first
->gso_segs
- 1) * hdrlen
;
816 /* populate Tx descriptor header size and mss */
817 tx_desc
= FM10K_TX_DESC(tx_ring
, tx_ring
->next_to_use
);
818 tx_desc
->hdrlen
= hdrlen
;
819 tx_desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
823 tx_ring
->netdev
->features
&= ~NETIF_F_GSO_UDP_TUNNEL
;
824 if (!net_ratelimit())
825 netdev_err(tx_ring
->netdev
,
826 "TSO requested for unsupported tunnel, disabling offload\n");
830 static void fm10k_tx_csum(struct fm10k_ring
*tx_ring
,
831 struct fm10k_tx_buffer
*first
)
833 struct sk_buff
*skb
= first
->skb
;
834 struct fm10k_tx_desc
*tx_desc
;
837 struct ipv6hdr
*ipv6
;
843 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
846 if (skb
->encapsulation
) {
847 protocol
= fm10k_tx_encap_offload(skb
);
849 if (skb_checksum_help(skb
)) {
850 dev_warn(tx_ring
->dev
,
851 "failed to offload encap csum!\n");
852 tx_ring
->tx_stats
.csum_err
++;
856 network_hdr
.raw
= skb_inner_network_header(skb
);
858 protocol
= vlan_get_protocol(skb
);
859 network_hdr
.raw
= skb_network_header(skb
);
863 case htons(ETH_P_IP
):
864 l4_hdr
= network_hdr
.ipv4
->protocol
;
866 case htons(ETH_P_IPV6
):
867 l4_hdr
= network_hdr
.ipv6
->nexthdr
;
870 if (unlikely(net_ratelimit())) {
871 dev_warn(tx_ring
->dev
,
872 "partial checksum but ip version=%x!\n",
875 tx_ring
->tx_stats
.csum_err
++;
884 if (skb
->encapsulation
)
887 if (unlikely(net_ratelimit())) {
888 dev_warn(tx_ring
->dev
,
889 "partial checksum but l4 proto=%x!\n",
892 tx_ring
->tx_stats
.csum_err
++;
896 /* update TX checksum flag */
897 first
->tx_flags
|= FM10K_TX_FLAGS_CSUM
;
898 tx_ring
->tx_stats
.csum_good
++;
901 /* populate Tx descriptor header size and mss */
902 tx_desc
= FM10K_TX_DESC(tx_ring
, tx_ring
->next_to_use
);
907 #define FM10K_SET_FLAG(_input, _flag, _result) \
908 ((_flag <= _result) ? \
909 ((u32)(_input & _flag) * (_result / _flag)) : \
910 ((u32)(_input & _flag) / (_flag / _result)))
912 static u8
fm10k_tx_desc_flags(struct sk_buff
*skb
, u32 tx_flags
)
914 /* set type for advanced descriptor with frame checksum insertion */
917 /* set timestamping bits */
918 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
919 likely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
920 desc_flags
|= FM10K_TXD_FLAG_TIME
;
922 /* set checksum offload bits */
923 desc_flags
|= FM10K_SET_FLAG(tx_flags
, FM10K_TX_FLAGS_CSUM
,
924 FM10K_TXD_FLAG_CSUM
);
929 static bool fm10k_tx_desc_push(struct fm10k_ring
*tx_ring
,
930 struct fm10k_tx_desc
*tx_desc
, u16 i
,
931 dma_addr_t dma
, unsigned int size
, u8 desc_flags
)
933 /* set RS and INT for last frame in a cache line */
934 if ((++i
& (FM10K_TXD_WB_FIFO_SIZE
- 1)) == 0)
935 desc_flags
|= FM10K_TXD_FLAG_RS
| FM10K_TXD_FLAG_INT
;
937 /* record values to descriptor */
938 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
939 tx_desc
->flags
= desc_flags
;
940 tx_desc
->buflen
= cpu_to_le16(size
);
942 /* return true if we just wrapped the ring */
943 return i
== tx_ring
->count
;
946 static int __fm10k_maybe_stop_tx(struct fm10k_ring
*tx_ring
, u16 size
)
948 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
950 /* Memory barrier before checking head and tail */
953 /* Check again in a case another CPU has just made room available */
954 if (likely(fm10k_desc_unused(tx_ring
) < size
))
957 /* A reprieve! - use start_queue because it doesn't call schedule */
958 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
959 ++tx_ring
->tx_stats
.restart_queue
;
963 static inline int fm10k_maybe_stop_tx(struct fm10k_ring
*tx_ring
, u16 size
)
965 if (likely(fm10k_desc_unused(tx_ring
) >= size
))
967 return __fm10k_maybe_stop_tx(tx_ring
, size
);
970 static void fm10k_tx_map(struct fm10k_ring
*tx_ring
,
971 struct fm10k_tx_buffer
*first
)
973 struct sk_buff
*skb
= first
->skb
;
974 struct fm10k_tx_buffer
*tx_buffer
;
975 struct fm10k_tx_desc
*tx_desc
;
976 struct skb_frag_struct
*frag
;
979 unsigned int data_len
, size
;
980 u32 tx_flags
= first
->tx_flags
;
981 u16 i
= tx_ring
->next_to_use
;
982 u8 flags
= fm10k_tx_desc_flags(skb
, tx_flags
);
984 tx_desc
= FM10K_TX_DESC(tx_ring
, i
);
986 /* add HW VLAN tag */
987 if (skb_vlan_tag_present(skb
))
988 tx_desc
->vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
992 size
= skb_headlen(skb
);
995 dma
= dma_map_single(tx_ring
->dev
, data
, size
, DMA_TO_DEVICE
);
997 data_len
= skb
->data_len
;
1000 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
1001 if (dma_mapping_error(tx_ring
->dev
, dma
))
1004 /* record length, and DMA address */
1005 dma_unmap_len_set(tx_buffer
, len
, size
);
1006 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
1008 while (unlikely(size
> FM10K_MAX_DATA_PER_TXD
)) {
1009 if (fm10k_tx_desc_push(tx_ring
, tx_desc
++, i
++, dma
,
1010 FM10K_MAX_DATA_PER_TXD
, flags
)) {
1011 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
1015 dma
+= FM10K_MAX_DATA_PER_TXD
;
1016 size
-= FM10K_MAX_DATA_PER_TXD
;
1019 if (likely(!data_len
))
1022 if (fm10k_tx_desc_push(tx_ring
, tx_desc
++, i
++,
1023 dma
, size
, flags
)) {
1024 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
1028 size
= skb_frag_size(frag
);
1031 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
1034 tx_buffer
= &tx_ring
->tx_buffer
[i
];
1037 /* write last descriptor with LAST bit set */
1038 flags
|= FM10K_TXD_FLAG_LAST
;
1040 if (fm10k_tx_desc_push(tx_ring
, tx_desc
, i
++, dma
, size
, flags
))
1043 /* record bytecount for BQL */
1044 netdev_tx_sent_queue(txring_txq(tx_ring
), first
->bytecount
);
1046 /* record SW timestamp if HW timestamp is not available */
1047 skb_tx_timestamp(first
->skb
);
1049 /* Force memory writes to complete before letting h/w know there
1050 * are new descriptors to fetch. (Only applicable for weak-ordered
1051 * memory model archs, such as IA-64).
1053 * We also need this memory barrier to make certain all of the
1054 * status bits have been updated before next_to_watch is written.
1058 /* set next_to_watch value indicating a packet is present */
1059 first
->next_to_watch
= tx_desc
;
1061 tx_ring
->next_to_use
= i
;
1063 /* Make sure there is space in the ring for the next send. */
1064 fm10k_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
1066 /* notify HW of packet */
1067 if (netif_xmit_stopped(txring_txq(tx_ring
)) || !skb
->xmit_more
) {
1068 writel(i
, tx_ring
->tail
);
1070 /* we need this if more than one processor can write to our tail
1071 * at a time, it synchronizes IO on IA64/Altix systems
1078 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
1080 /* clear dma mappings for failed tx_buffer map */
1082 tx_buffer
= &tx_ring
->tx_buffer
[i
];
1083 fm10k_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
1084 if (tx_buffer
== first
)
1091 tx_ring
->next_to_use
= i
;
1094 netdev_tx_t
fm10k_xmit_frame_ring(struct sk_buff
*skb
,
1095 struct fm10k_ring
*tx_ring
)
1097 struct fm10k_tx_buffer
*first
;
1101 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
1103 /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
1104 * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
1105 * + 2 desc gap to keep tail from touching head
1106 * otherwise try next time
1108 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
1109 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
1111 if (fm10k_maybe_stop_tx(tx_ring
, count
+ 3)) {
1112 tx_ring
->tx_stats
.tx_busy
++;
1113 return NETDEV_TX_BUSY
;
1116 /* record the location of the first descriptor for this packet */
1117 first
= &tx_ring
->tx_buffer
[tx_ring
->next_to_use
];
1119 first
->bytecount
= max_t(unsigned int, skb
->len
, ETH_ZLEN
);
1120 first
->gso_segs
= 1;
1122 /* record initial flags and protocol */
1123 first
->tx_flags
= tx_flags
;
1125 tso
= fm10k_tso(tx_ring
, first
);
1129 fm10k_tx_csum(tx_ring
, first
);
1131 fm10k_tx_map(tx_ring
, first
);
1133 return NETDEV_TX_OK
;
1136 dev_kfree_skb_any(first
->skb
);
1139 return NETDEV_TX_OK
;
1142 static u64
fm10k_get_tx_completed(struct fm10k_ring
*ring
)
1144 return ring
->stats
.packets
;
1147 static u64
fm10k_get_tx_pending(struct fm10k_ring
*ring
)
1149 /* use SW head and tail until we have real hardware */
1150 u32 head
= ring
->next_to_clean
;
1151 u32 tail
= ring
->next_to_use
;
1153 return ((head
<= tail
) ? tail
: tail
+ ring
->count
) - head
;
1156 bool fm10k_check_tx_hang(struct fm10k_ring
*tx_ring
)
1158 u32 tx_done
= fm10k_get_tx_completed(tx_ring
);
1159 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
1160 u32 tx_pending
= fm10k_get_tx_pending(tx_ring
);
1162 clear_check_for_tx_hang(tx_ring
);
1164 /* Check for a hung queue, but be thorough. This verifies
1165 * that a transmit has been completed since the previous
1166 * check AND there is at least one packet pending. By
1167 * requiring this to fail twice we avoid races with
1168 * clearing the ARMED bit and conditions where we
1169 * run the check_tx_hang logic with a transmit completion
1170 * pending but without time to complete it yet.
1172 if (!tx_pending
|| (tx_done_old
!= tx_done
)) {
1173 /* update completed stats and continue */
1174 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
1175 /* reset the countdown */
1176 clear_bit(__FM10K_HANG_CHECK_ARMED
, &tx_ring
->state
);
1181 /* make sure it is true for two checks in a row */
1182 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED
, &tx_ring
->state
);
1186 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
1187 * @interface: driver private struct
1189 void fm10k_tx_timeout_reset(struct fm10k_intfc
*interface
)
1191 /* Do the reset outside of interrupt context */
1192 if (!test_bit(__FM10K_DOWN
, &interface
->state
)) {
1193 interface
->tx_timeout_count
++;
1194 interface
->flags
|= FM10K_FLAG_RESET_REQUESTED
;
1195 fm10k_service_event_schedule(interface
);
1200 * fm10k_clean_tx_irq - Reclaim resources after transmit completes
1201 * @q_vector: structure containing interrupt and ring information
1202 * @tx_ring: tx ring to clean
1204 static bool fm10k_clean_tx_irq(struct fm10k_q_vector
*q_vector
,
1205 struct fm10k_ring
*tx_ring
)
1207 struct fm10k_intfc
*interface
= q_vector
->interface
;
1208 struct fm10k_tx_buffer
*tx_buffer
;
1209 struct fm10k_tx_desc
*tx_desc
;
1210 unsigned int total_bytes
= 0, total_packets
= 0;
1211 unsigned int budget
= q_vector
->tx
.work_limit
;
1212 unsigned int i
= tx_ring
->next_to_clean
;
1214 if (test_bit(__FM10K_DOWN
, &interface
->state
))
1217 tx_buffer
= &tx_ring
->tx_buffer
[i
];
1218 tx_desc
= FM10K_TX_DESC(tx_ring
, i
);
1219 i
-= tx_ring
->count
;
1222 struct fm10k_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
1224 /* if next_to_watch is not set then there is no work pending */
1228 /* prevent any other reads prior to eop_desc */
1229 read_barrier_depends();
1231 /* if DD is not set pending work has not been completed */
1232 if (!(eop_desc
->flags
& FM10K_TXD_FLAG_DONE
))
1235 /* clear next_to_watch to prevent false hangs */
1236 tx_buffer
->next_to_watch
= NULL
;
1238 /* update the statistics for this packet */
1239 total_bytes
+= tx_buffer
->bytecount
;
1240 total_packets
+= tx_buffer
->gso_segs
;
1243 dev_consume_skb_any(tx_buffer
->skb
);
1245 /* unmap skb header data */
1246 dma_unmap_single(tx_ring
->dev
,
1247 dma_unmap_addr(tx_buffer
, dma
),
1248 dma_unmap_len(tx_buffer
, len
),
1251 /* clear tx_buffer data */
1252 tx_buffer
->skb
= NULL
;
1253 dma_unmap_len_set(tx_buffer
, len
, 0);
1255 /* unmap remaining buffers */
1256 while (tx_desc
!= eop_desc
) {
1261 i
-= tx_ring
->count
;
1262 tx_buffer
= tx_ring
->tx_buffer
;
1263 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
1266 /* unmap any remaining paged data */
1267 if (dma_unmap_len(tx_buffer
, len
)) {
1268 dma_unmap_page(tx_ring
->dev
,
1269 dma_unmap_addr(tx_buffer
, dma
),
1270 dma_unmap_len(tx_buffer
, len
),
1272 dma_unmap_len_set(tx_buffer
, len
, 0);
1276 /* move us one more past the eop_desc for start of next pkt */
1281 i
-= tx_ring
->count
;
1282 tx_buffer
= tx_ring
->tx_buffer
;
1283 tx_desc
= FM10K_TX_DESC(tx_ring
, 0);
1286 /* issue prefetch for next Tx descriptor */
1289 /* update budget accounting */
1291 } while (likely(budget
));
1293 i
+= tx_ring
->count
;
1294 tx_ring
->next_to_clean
= i
;
1295 u64_stats_update_begin(&tx_ring
->syncp
);
1296 tx_ring
->stats
.bytes
+= total_bytes
;
1297 tx_ring
->stats
.packets
+= total_packets
;
1298 u64_stats_update_end(&tx_ring
->syncp
);
1299 q_vector
->tx
.total_bytes
+= total_bytes
;
1300 q_vector
->tx
.total_packets
+= total_packets
;
1302 if (check_for_tx_hang(tx_ring
) && fm10k_check_tx_hang(tx_ring
)) {
1303 /* schedule immediate reset if we believe we hung */
1304 struct fm10k_hw
*hw
= &interface
->hw
;
1306 netif_err(interface
, drv
, tx_ring
->netdev
,
1307 "Detected Tx Unit Hang\n"
1309 " TDH, TDT <%x>, <%x>\n"
1310 " next_to_use <%x>\n"
1311 " next_to_clean <%x>\n",
1312 tx_ring
->queue_index
,
1313 fm10k_read_reg(hw
, FM10K_TDH(tx_ring
->reg_idx
)),
1314 fm10k_read_reg(hw
, FM10K_TDT(tx_ring
->reg_idx
)),
1315 tx_ring
->next_to_use
, i
);
1317 netif_stop_subqueue(tx_ring
->netdev
,
1318 tx_ring
->queue_index
);
1320 netif_info(interface
, probe
, tx_ring
->netdev
,
1321 "tx hang %d detected on queue %d, resetting interface\n",
1322 interface
->tx_timeout_count
+ 1,
1323 tx_ring
->queue_index
);
1325 fm10k_tx_timeout_reset(interface
);
1327 /* the netdev is about to reset, no point in enabling stuff */
1331 /* notify netdev of completed buffers */
1332 netdev_tx_completed_queue(txring_txq(tx_ring
),
1333 total_packets
, total_bytes
);
1335 #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
1336 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
1337 (fm10k_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
1338 /* Make sure that anybody stopping the queue after this
1339 * sees the new next_to_clean.
1342 if (__netif_subqueue_stopped(tx_ring
->netdev
,
1343 tx_ring
->queue_index
) &&
1344 !test_bit(__FM10K_DOWN
, &interface
->state
)) {
1345 netif_wake_subqueue(tx_ring
->netdev
,
1346 tx_ring
->queue_index
);
1347 ++tx_ring
->tx_stats
.restart_queue
;
1355 * fm10k_update_itr - update the dynamic ITR value based on packet size
1357 * Stores a new ITR value based on strictly on packet size. The
1358 * divisors and thresholds used by this function were determined based
1359 * on theoretical maximum wire speed and testing data, in order to
1360 * minimize response time while increasing bulk throughput.
1362 * @ring_container: Container for rings to have ITR updated
1364 static void fm10k_update_itr(struct fm10k_ring_container
*ring_container
)
1366 unsigned int avg_wire_size
, packets
;
1368 /* Only update ITR if we are using adaptive setting */
1369 if (!(ring_container
->itr
& FM10K_ITR_ADAPTIVE
))
1372 packets
= ring_container
->total_packets
;
1376 avg_wire_size
= ring_container
->total_bytes
/ packets
;
1378 /* Add 24 bytes to size to account for CRC, preamble, and gap */
1379 avg_wire_size
+= 24;
1381 /* Don't starve jumbo frames */
1382 if (avg_wire_size
> 3000)
1383 avg_wire_size
= 3000;
1385 /* Give a little boost to mid-size frames */
1386 if ((avg_wire_size
> 300) && (avg_wire_size
< 1200))
1391 /* write back value and retain adaptive flag */
1392 ring_container
->itr
= avg_wire_size
| FM10K_ITR_ADAPTIVE
;
1395 ring_container
->total_bytes
= 0;
1396 ring_container
->total_packets
= 0;
1399 static void fm10k_qv_enable(struct fm10k_q_vector
*q_vector
)
1401 /* Enable auto-mask and clear the current mask */
1402 u32 itr
= FM10K_ITR_ENABLE
;
1405 fm10k_update_itr(&q_vector
->tx
);
1408 fm10k_update_itr(&q_vector
->rx
);
1410 /* Store Tx itr in timer slot 0 */
1411 itr
|= (q_vector
->tx
.itr
& FM10K_ITR_MAX
);
1413 /* Shift Rx itr to timer slot 1 */
1414 itr
|= (q_vector
->rx
.itr
& FM10K_ITR_MAX
) << FM10K_ITR_INTERVAL1_SHIFT
;
1416 /* Write the final value to the ITR register */
1417 writel(itr
, q_vector
->itr
);
1420 static int fm10k_poll(struct napi_struct
*napi
, int budget
)
1422 struct fm10k_q_vector
*q_vector
=
1423 container_of(napi
, struct fm10k_q_vector
, napi
);
1424 struct fm10k_ring
*ring
;
1425 int per_ring_budget
, work_done
= 0;
1426 bool clean_complete
= true;
1428 fm10k_for_each_ring(ring
, q_vector
->tx
)
1429 clean_complete
&= fm10k_clean_tx_irq(q_vector
, ring
);
1431 /* attempt to distribute budget to each queue fairly, but don't
1432 * allow the budget to go below 1 because we'll exit polling
1434 if (q_vector
->rx
.count
> 1)
1435 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
1437 per_ring_budget
= budget
;
1439 fm10k_for_each_ring(ring
, q_vector
->rx
) {
1440 int work
= fm10k_clean_rx_irq(q_vector
, ring
, per_ring_budget
);
1443 clean_complete
&= !!(work
< per_ring_budget
);
1446 /* If all work not completed, return budget and keep polling */
1447 if (!clean_complete
)
1450 /* all work done, exit the polling mode */
1451 napi_complete_done(napi
, work_done
);
1453 /* re-enable the q_vector */
1454 fm10k_qv_enable(q_vector
);
1460 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
1461 * @interface: board private structure to initialize
1463 * When QoS (Quality of Service) is enabled, allocate queues for
1464 * each traffic class. If multiqueue isn't available,then abort QoS
1467 * This function handles all combinations of Qos and RSS.
1470 static bool fm10k_set_qos_queues(struct fm10k_intfc
*interface
)
1472 struct net_device
*dev
= interface
->netdev
;
1473 struct fm10k_ring_feature
*f
;
1477 /* Map queue offset and counts onto allocated tx queues */
1478 pcs
= netdev_get_num_tc(dev
);
1483 /* set QoS mask and indices */
1484 f
= &interface
->ring_feature
[RING_F_QOS
];
1486 f
->mask
= (1 << fls(pcs
- 1)) - 1;
1488 /* determine the upper limit for our current DCB mode */
1489 rss_i
= interface
->hw
.mac
.max_queues
/ pcs
;
1490 rss_i
= 1 << (fls(rss_i
) - 1);
1492 /* set RSS mask and indices */
1493 f
= &interface
->ring_feature
[RING_F_RSS
];
1494 rss_i
= min_t(u16
, rss_i
, f
->limit
);
1496 f
->mask
= (1 << fls(rss_i
- 1)) - 1;
1498 /* configure pause class to queue mapping */
1499 for (i
= 0; i
< pcs
; i
++)
1500 netdev_set_tc_queue(dev
, i
, rss_i
, rss_i
* i
);
1502 interface
->num_rx_queues
= rss_i
* pcs
;
1503 interface
->num_tx_queues
= rss_i
* pcs
;
1509 * fm10k_set_rss_queues: Allocate queues for RSS
1510 * @interface: board private structure to initialize
1512 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
1513 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1516 static bool fm10k_set_rss_queues(struct fm10k_intfc
*interface
)
1518 struct fm10k_ring_feature
*f
;
1521 f
= &interface
->ring_feature
[RING_F_RSS
];
1522 rss_i
= min_t(u16
, interface
->hw
.mac
.max_queues
, f
->limit
);
1524 /* record indices and power of 2 mask for RSS */
1526 f
->mask
= (1 << fls(rss_i
- 1)) - 1;
1528 interface
->num_rx_queues
= rss_i
;
1529 interface
->num_tx_queues
= rss_i
;
1535 * fm10k_set_num_queues: Allocate queues for device, feature dependent
1536 * @interface: board private structure to initialize
1538 * This is the top level queue allocation routine. The order here is very
1539 * important, starting with the "most" number of features turned on at once,
1540 * and ending with the smallest set of features. This way large combinations
1541 * can be allocated if they're turned on, and smaller combinations are the
1542 * fallthrough conditions.
1545 static void fm10k_set_num_queues(struct fm10k_intfc
*interface
)
1547 /* Start with base case */
1548 interface
->num_rx_queues
= 1;
1549 interface
->num_tx_queues
= 1;
1551 if (fm10k_set_qos_queues(interface
))
1554 fm10k_set_rss_queues(interface
);
1558 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
1559 * @interface: board private structure to initialize
1560 * @v_count: q_vectors allocated on interface, used for ring interleaving
1561 * @v_idx: index of vector in interface struct
1562 * @txr_count: total number of Tx rings to allocate
1563 * @txr_idx: index of first Tx ring to allocate
1564 * @rxr_count: total number of Rx rings to allocate
1565 * @rxr_idx: index of first Rx ring to allocate
1567 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1569 static int fm10k_alloc_q_vector(struct fm10k_intfc
*interface
,
1570 unsigned int v_count
, unsigned int v_idx
,
1571 unsigned int txr_count
, unsigned int txr_idx
,
1572 unsigned int rxr_count
, unsigned int rxr_idx
)
1574 struct fm10k_q_vector
*q_vector
;
1575 struct fm10k_ring
*ring
;
1576 int ring_count
, size
;
1578 ring_count
= txr_count
+ rxr_count
;
1579 size
= sizeof(struct fm10k_q_vector
) +
1580 (sizeof(struct fm10k_ring
) * ring_count
);
1582 /* allocate q_vector and rings */
1583 q_vector
= kzalloc(size
, GFP_KERNEL
);
1587 /* initialize NAPI */
1588 netif_napi_add(interface
->netdev
, &q_vector
->napi
,
1589 fm10k_poll
, NAPI_POLL_WEIGHT
);
1591 /* tie q_vector and interface together */
1592 interface
->q_vector
[v_idx
] = q_vector
;
1593 q_vector
->interface
= interface
;
1594 q_vector
->v_idx
= v_idx
;
1596 /* initialize pointer to rings */
1597 ring
= q_vector
->ring
;
1599 /* save Tx ring container info */
1600 q_vector
->tx
.ring
= ring
;
1601 q_vector
->tx
.work_limit
= FM10K_DEFAULT_TX_WORK
;
1602 q_vector
->tx
.itr
= interface
->tx_itr
;
1603 q_vector
->tx
.count
= txr_count
;
1606 /* assign generic ring traits */
1607 ring
->dev
= &interface
->pdev
->dev
;
1608 ring
->netdev
= interface
->netdev
;
1610 /* configure backlink on ring */
1611 ring
->q_vector
= q_vector
;
1613 /* apply Tx specific ring traits */
1614 ring
->count
= interface
->tx_ring_count
;
1615 ring
->queue_index
= txr_idx
;
1617 /* assign ring to interface */
1618 interface
->tx_ring
[txr_idx
] = ring
;
1620 /* update count and index */
1624 /* push pointer to next ring */
1628 /* save Rx ring container info */
1629 q_vector
->rx
.ring
= ring
;
1630 q_vector
->rx
.itr
= interface
->rx_itr
;
1631 q_vector
->rx
.count
= rxr_count
;
1634 /* assign generic ring traits */
1635 ring
->dev
= &interface
->pdev
->dev
;
1636 ring
->netdev
= interface
->netdev
;
1637 rcu_assign_pointer(ring
->l2_accel
, interface
->l2_accel
);
1639 /* configure backlink on ring */
1640 ring
->q_vector
= q_vector
;
1642 /* apply Rx specific ring traits */
1643 ring
->count
= interface
->rx_ring_count
;
1644 ring
->queue_index
= rxr_idx
;
1646 /* assign ring to interface */
1647 interface
->rx_ring
[rxr_idx
] = ring
;
1649 /* update count and index */
1653 /* push pointer to next ring */
1657 fm10k_dbg_q_vector_init(q_vector
);
1663 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
1664 * @interface: board private structure to initialize
1665 * @v_idx: Index of vector to be freed
1667 * This function frees the memory allocated to the q_vector. In addition if
1668 * NAPI is enabled it will delete any references to the NAPI struct prior
1669 * to freeing the q_vector.
1671 static void fm10k_free_q_vector(struct fm10k_intfc
*interface
, int v_idx
)
1673 struct fm10k_q_vector
*q_vector
= interface
->q_vector
[v_idx
];
1674 struct fm10k_ring
*ring
;
1676 fm10k_dbg_q_vector_exit(q_vector
);
1678 fm10k_for_each_ring(ring
, q_vector
->tx
)
1679 interface
->tx_ring
[ring
->queue_index
] = NULL
;
1681 fm10k_for_each_ring(ring
, q_vector
->rx
)
1682 interface
->rx_ring
[ring
->queue_index
] = NULL
;
1684 interface
->q_vector
[v_idx
] = NULL
;
1685 netif_napi_del(&q_vector
->napi
);
1686 kfree_rcu(q_vector
, rcu
);
1690 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
1691 * @interface: board private structure to initialize
1693 * We allocate one q_vector per queue interrupt. If allocation fails we
1696 static int fm10k_alloc_q_vectors(struct fm10k_intfc
*interface
)
1698 unsigned int q_vectors
= interface
->num_q_vectors
;
1699 unsigned int rxr_remaining
= interface
->num_rx_queues
;
1700 unsigned int txr_remaining
= interface
->num_tx_queues
;
1701 unsigned int rxr_idx
= 0, txr_idx
= 0, v_idx
= 0;
1704 if (q_vectors
>= (rxr_remaining
+ txr_remaining
)) {
1705 for (; rxr_remaining
; v_idx
++) {
1706 err
= fm10k_alloc_q_vector(interface
, q_vectors
, v_idx
,
1711 /* update counts and index */
1717 for (; v_idx
< q_vectors
; v_idx
++) {
1718 int rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- v_idx
);
1719 int tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- v_idx
);
1721 err
= fm10k_alloc_q_vector(interface
, q_vectors
, v_idx
,
1728 /* update counts and index */
1729 rxr_remaining
-= rqpv
;
1730 txr_remaining
-= tqpv
;
1738 interface
->num_tx_queues
= 0;
1739 interface
->num_rx_queues
= 0;
1740 interface
->num_q_vectors
= 0;
1743 fm10k_free_q_vector(interface
, v_idx
);
1749 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
1750 * @interface: board private structure to initialize
1752 * This function frees the memory allocated to the q_vectors. In addition if
1753 * NAPI is enabled it will delete any references to the NAPI struct prior
1754 * to freeing the q_vector.
1756 static void fm10k_free_q_vectors(struct fm10k_intfc
*interface
)
1758 int v_idx
= interface
->num_q_vectors
;
1760 interface
->num_tx_queues
= 0;
1761 interface
->num_rx_queues
= 0;
1762 interface
->num_q_vectors
= 0;
1765 fm10k_free_q_vector(interface
, v_idx
);
1769 * f10k_reset_msix_capability - reset MSI-X capability
1770 * @interface: board private structure to initialize
1772 * Reset the MSI-X capability back to its starting state
1774 static void fm10k_reset_msix_capability(struct fm10k_intfc
*interface
)
1776 pci_disable_msix(interface
->pdev
);
1777 kfree(interface
->msix_entries
);
1778 interface
->msix_entries
= NULL
;
1782 * f10k_init_msix_capability - configure MSI-X capability
1783 * @interface: board private structure to initialize
1785 * Attempt to configure the interrupts using the best available
1786 * capabilities of the hardware and the kernel.
1788 static int fm10k_init_msix_capability(struct fm10k_intfc
*interface
)
1790 struct fm10k_hw
*hw
= &interface
->hw
;
1791 int v_budget
, vector
;
1793 /* It's easy to be greedy for MSI-X vectors, but it really
1794 * doesn't do us much good if we have a lot more vectors
1795 * than CPU's. So let's be conservative and only ask for
1796 * (roughly) the same number of vectors as there are CPU's.
1797 * the default is to use pairs of vectors
1799 v_budget
= max(interface
->num_rx_queues
, interface
->num_tx_queues
);
1800 v_budget
= min_t(u16
, v_budget
, num_online_cpus());
1802 /* account for vectors not related to queues */
1803 v_budget
+= NON_Q_VECTORS(hw
);
1805 /* At the same time, hardware can only support a maximum of
1806 * hw.mac->max_msix_vectors vectors. With features
1807 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1808 * descriptor queues supported by our device. Thus, we cap it off in
1809 * those rare cases where the cpu count also exceeds our vector limit.
1811 v_budget
= min_t(int, v_budget
, hw
->mac
.max_msix_vectors
);
1813 /* A failure in MSI-X entry allocation is fatal. */
1814 interface
->msix_entries
= kcalloc(v_budget
, sizeof(struct msix_entry
),
1816 if (!interface
->msix_entries
)
1819 /* populate entry values */
1820 for (vector
= 0; vector
< v_budget
; vector
++)
1821 interface
->msix_entries
[vector
].entry
= vector
;
1823 /* Attempt to enable MSI-X with requested value */
1824 v_budget
= pci_enable_msix_range(interface
->pdev
,
1825 interface
->msix_entries
,
1829 kfree(interface
->msix_entries
);
1830 interface
->msix_entries
= NULL
;
1834 /* record the number of queues available for q_vectors */
1835 interface
->num_q_vectors
= v_budget
- NON_Q_VECTORS(hw
);
1841 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
1842 * @interface: Interface structure continaining rings and devices
1844 * Cache the descriptor ring offsets for Qos
1846 static bool fm10k_cache_ring_qos(struct fm10k_intfc
*interface
)
1848 struct net_device
*dev
= interface
->netdev
;
1849 int pc
, offset
, rss_i
, i
, q_idx
;
1850 u16 pc_stride
= interface
->ring_feature
[RING_F_QOS
].mask
+ 1;
1851 u8 num_pcs
= netdev_get_num_tc(dev
);
1856 rss_i
= interface
->ring_feature
[RING_F_RSS
].indices
;
1858 for (pc
= 0, offset
= 0; pc
< num_pcs
; pc
++, offset
+= rss_i
) {
1860 for (i
= 0; i
< rss_i
; i
++) {
1861 interface
->tx_ring
[offset
+ i
]->reg_idx
= q_idx
;
1862 interface
->tx_ring
[offset
+ i
]->qos_pc
= pc
;
1863 interface
->rx_ring
[offset
+ i
]->reg_idx
= q_idx
;
1864 interface
->rx_ring
[offset
+ i
]->qos_pc
= pc
;
1873 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
1874 * @interface: Interface structure continaining rings and devices
1876 * Cache the descriptor ring offsets for RSS
1878 static void fm10k_cache_ring_rss(struct fm10k_intfc
*interface
)
1882 for (i
= 0; i
< interface
->num_rx_queues
; i
++)
1883 interface
->rx_ring
[i
]->reg_idx
= i
;
1885 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
1886 interface
->tx_ring
[i
]->reg_idx
= i
;
1890 * fm10k_assign_rings - Map rings to network devices
1891 * @interface: Interface structure containing rings and devices
1893 * This function is meant to go though and configure both the network
1894 * devices so that they contain rings, and configure the rings so that
1895 * they function with their network devices.
1897 static void fm10k_assign_rings(struct fm10k_intfc
*interface
)
1899 if (fm10k_cache_ring_qos(interface
))
1902 fm10k_cache_ring_rss(interface
);
1905 static void fm10k_init_reta(struct fm10k_intfc
*interface
)
1907 u16 i
, rss_i
= interface
->ring_feature
[RING_F_RSS
].indices
;
1910 /* If the netdev is initialized we have to maintain table if possible */
1911 if (interface
->netdev
->reg_state
!= NETREG_UNINITIALIZED
) {
1912 for (i
= FM10K_RETA_SIZE
; i
--;) {
1913 reta
= interface
->reta
[i
];
1914 if ((((reta
<< 24) >> 24) < rss_i
) &&
1915 (((reta
<< 16) >> 24) < rss_i
) &&
1916 (((reta
<< 8) >> 24) < rss_i
) &&
1917 (((reta
) >> 24) < rss_i
))
1919 goto repopulate_reta
;
1922 /* do nothing if all of the elements are in bounds */
1927 /* Populate the redirection table 4 entries at a time. To do this
1928 * we are generating the results for n and n+2 and then interleaving
1929 * those with the results with n+1 and n+3.
1931 for (i
= FM10K_RETA_SIZE
; i
--;) {
1932 /* first pass generates n and n+2 */
1933 base
= ((i
* 0x00040004) + 0x00020000) * rss_i
;
1934 reta
= (base
& 0x3F803F80) >> 7;
1936 /* second pass generates n+1 and n+3 */
1937 base
+= 0x00010001 * rss_i
;
1938 reta
|= (base
& 0x3F803F80) << 1;
1940 interface
->reta
[i
] = reta
;
1945 * fm10k_init_queueing_scheme - Determine proper queueing scheme
1946 * @interface: board private structure to initialize
1948 * We determine which queueing scheme to use based on...
1949 * - Hardware queue count (num_*_queues)
1950 * - defined by miscellaneous hardware support/features (RSS, etc.)
1952 int fm10k_init_queueing_scheme(struct fm10k_intfc
*interface
)
1956 /* Number of supported queues */
1957 fm10k_set_num_queues(interface
);
1959 /* Configure MSI-X capability */
1960 err
= fm10k_init_msix_capability(interface
);
1962 dev_err(&interface
->pdev
->dev
,
1963 "Unable to initialize MSI-X capability\n");
1967 /* Allocate memory for queues */
1968 err
= fm10k_alloc_q_vectors(interface
);
1972 /* Map rings to devices, and map devices to physical queues */
1973 fm10k_assign_rings(interface
);
1975 /* Initialize RSS redirection table */
1976 fm10k_init_reta(interface
);
1982 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
1983 * @interface: board private structure to clear queueing scheme on
1985 * We go through and clear queueing specific resources and reset the structure
1986 * to pre-load conditions
1988 void fm10k_clear_queueing_scheme(struct fm10k_intfc
*interface
)
1990 fm10k_free_q_vectors(interface
);
1991 fm10k_reset_msix_capability(interface
);