1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
10 #define ICE_RX_HDR_SIZE 256
13 * ice_unmap_and_free_tx_buf - Release a Tx buffer
14 * @ring: the ring that owns the buffer
15 * @tx_buf: the buffer to free
18 ice_unmap_and_free_tx_buf(struct ice_ring
*ring
, struct ice_tx_buf
*tx_buf
)
21 dev_kfree_skb_any(tx_buf
->skb
);
22 if (dma_unmap_len(tx_buf
, len
))
23 dma_unmap_single(ring
->dev
,
24 dma_unmap_addr(tx_buf
, dma
),
25 dma_unmap_len(tx_buf
, len
),
27 } else if (dma_unmap_len(tx_buf
, len
)) {
28 dma_unmap_page(ring
->dev
,
29 dma_unmap_addr(tx_buf
, dma
),
30 dma_unmap_len(tx_buf
, len
),
34 tx_buf
->next_to_watch
= NULL
;
36 dma_unmap_len_set(tx_buf
, len
, 0);
37 /* tx_buf must be completely set up in the transmit path */
40 static struct netdev_queue
*txring_txq(const struct ice_ring
*ring
)
42 return netdev_get_tx_queue(ring
->netdev
, ring
->q_index
);
46 * ice_clean_tx_ring - Free any empty Tx buffers
47 * @tx_ring: ring to be cleaned
49 void ice_clean_tx_ring(struct ice_ring
*tx_ring
)
54 /* ring already cleared, nothing to do */
58 /* Free all the Tx ring sk_bufss */
59 for (i
= 0; i
< tx_ring
->count
; i
++)
60 ice_unmap_and_free_tx_buf(tx_ring
, &tx_ring
->tx_buf
[i
]);
62 size
= sizeof(struct ice_tx_buf
) * tx_ring
->count
;
63 memset(tx_ring
->tx_buf
, 0, size
);
65 /* Zero out the descriptor ring */
66 memset(tx_ring
->desc
, 0, tx_ring
->size
);
68 tx_ring
->next_to_use
= 0;
69 tx_ring
->next_to_clean
= 0;
74 /* cleanup Tx queue statistics */
75 netdev_tx_reset_queue(txring_txq(tx_ring
));
79 * ice_free_tx_ring - Free Tx resources per queue
80 * @tx_ring: Tx descriptor ring for a specific queue
82 * Free all transmit software resources
84 void ice_free_tx_ring(struct ice_ring
*tx_ring
)
86 ice_clean_tx_ring(tx_ring
);
87 devm_kfree(tx_ring
->dev
, tx_ring
->tx_buf
);
88 tx_ring
->tx_buf
= NULL
;
91 dmam_free_coherent(tx_ring
->dev
, tx_ring
->size
,
92 tx_ring
->desc
, tx_ring
->dma
);
98 * ice_clean_tx_irq - Reclaim resources after transmit completes
99 * @vsi: the VSI we care about
100 * @tx_ring: Tx ring to clean
101 * @napi_budget: Used to determine if we are in netpoll
103 * Returns true if there's any budget left (e.g. the clean is finished)
105 static bool ice_clean_tx_irq(struct ice_vsi
*vsi
, struct ice_ring
*tx_ring
,
108 unsigned int total_bytes
= 0, total_pkts
= 0;
109 unsigned int budget
= vsi
->work_lmt
;
110 s16 i
= tx_ring
->next_to_clean
;
111 struct ice_tx_desc
*tx_desc
;
112 struct ice_tx_buf
*tx_buf
;
114 tx_buf
= &tx_ring
->tx_buf
[i
];
115 tx_desc
= ICE_TX_DESC(tx_ring
, i
);
119 struct ice_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
121 /* if next_to_watch is not set then there is no work pending */
125 smp_rmb(); /* prevent any other reads prior to eop_desc */
127 /* if the descriptor isn't done, no work yet to do */
128 if (!(eop_desc
->cmd_type_offset_bsz
&
129 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE
)))
132 /* clear next_to_watch to prevent false hangs */
133 tx_buf
->next_to_watch
= NULL
;
135 /* update the statistics for this packet */
136 total_bytes
+= tx_buf
->bytecount
;
137 total_pkts
+= tx_buf
->gso_segs
;
140 napi_consume_skb(tx_buf
->skb
, napi_budget
);
142 /* unmap skb header data */
143 dma_unmap_single(tx_ring
->dev
,
144 dma_unmap_addr(tx_buf
, dma
),
145 dma_unmap_len(tx_buf
, len
),
148 /* clear tx_buf data */
150 dma_unmap_len_set(tx_buf
, len
, 0);
152 /* unmap remaining buffers */
153 while (tx_desc
!= eop_desc
) {
159 tx_buf
= tx_ring
->tx_buf
;
160 tx_desc
= ICE_TX_DESC(tx_ring
, 0);
163 /* unmap any remaining paged data */
164 if (dma_unmap_len(tx_buf
, len
)) {
165 dma_unmap_page(tx_ring
->dev
,
166 dma_unmap_addr(tx_buf
, dma
),
167 dma_unmap_len(tx_buf
, len
),
169 dma_unmap_len_set(tx_buf
, len
, 0);
173 /* move us one more past the eop_desc for start of next pkt */
179 tx_buf
= tx_ring
->tx_buf
;
180 tx_desc
= ICE_TX_DESC(tx_ring
, 0);
185 /* update budget accounting */
187 } while (likely(budget
));
190 tx_ring
->next_to_clean
= i
;
191 u64_stats_update_begin(&tx_ring
->syncp
);
192 tx_ring
->stats
.bytes
+= total_bytes
;
193 tx_ring
->stats
.pkts
+= total_pkts
;
194 u64_stats_update_end(&tx_ring
->syncp
);
195 tx_ring
->q_vector
->tx
.total_bytes
+= total_bytes
;
196 tx_ring
->q_vector
->tx
.total_pkts
+= total_pkts
;
198 netdev_tx_completed_queue(txring_txq(tx_ring
), total_pkts
,
201 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
202 if (unlikely(total_pkts
&& netif_carrier_ok(tx_ring
->netdev
) &&
203 (ICE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
204 /* Make sure that anybody stopping the queue after this
205 * sees the new next_to_clean.
208 if (__netif_subqueue_stopped(tx_ring
->netdev
,
210 !test_bit(__ICE_DOWN
, vsi
->state
)) {
211 netif_wake_subqueue(tx_ring
->netdev
,
213 ++tx_ring
->tx_stats
.restart_q
;
221 * ice_setup_tx_ring - Allocate the Tx descriptors
222 * @tx_ring: the tx ring to set up
224 * Return 0 on success, negative on error
226 int ice_setup_tx_ring(struct ice_ring
*tx_ring
)
228 struct device
*dev
= tx_ring
->dev
;
234 /* warn if we are about to overwrite the pointer */
235 WARN_ON(tx_ring
->tx_buf
);
236 bi_size
= sizeof(struct ice_tx_buf
) * tx_ring
->count
;
237 tx_ring
->tx_buf
= devm_kzalloc(dev
, bi_size
, GFP_KERNEL
);
238 if (!tx_ring
->tx_buf
)
241 /* round up to nearest 4K */
242 tx_ring
->size
= tx_ring
->count
* sizeof(struct ice_tx_desc
);
243 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
244 tx_ring
->desc
= dmam_alloc_coherent(dev
, tx_ring
->size
, &tx_ring
->dma
,
246 if (!tx_ring
->desc
) {
247 dev_err(dev
, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
252 tx_ring
->next_to_use
= 0;
253 tx_ring
->next_to_clean
= 0;
257 devm_kfree(dev
, tx_ring
->tx_buf
);
258 tx_ring
->tx_buf
= NULL
;
263 * ice_clean_rx_ring - Free Rx buffers
264 * @rx_ring: ring to be cleaned
266 void ice_clean_rx_ring(struct ice_ring
*rx_ring
)
268 struct device
*dev
= rx_ring
->dev
;
272 /* ring already cleared, nothing to do */
273 if (!rx_ring
->rx_buf
)
276 /* Free all the Rx ring sk_buffs */
277 for (i
= 0; i
< rx_ring
->count
; i
++) {
278 struct ice_rx_buf
*rx_buf
= &rx_ring
->rx_buf
[i
];
281 dev_kfree_skb(rx_buf
->skb
);
287 dma_unmap_page(dev
, rx_buf
->dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
288 __free_pages(rx_buf
->page
, 0);
291 rx_buf
->page_offset
= 0;
294 size
= sizeof(struct ice_rx_buf
) * rx_ring
->count
;
295 memset(rx_ring
->rx_buf
, 0, size
);
297 /* Zero out the descriptor ring */
298 memset(rx_ring
->desc
, 0, rx_ring
->size
);
300 rx_ring
->next_to_alloc
= 0;
301 rx_ring
->next_to_clean
= 0;
302 rx_ring
->next_to_use
= 0;
306 * ice_free_rx_ring - Free Rx resources
307 * @rx_ring: ring to clean the resources from
309 * Free all receive software resources
311 void ice_free_rx_ring(struct ice_ring
*rx_ring
)
313 ice_clean_rx_ring(rx_ring
);
314 devm_kfree(rx_ring
->dev
, rx_ring
->rx_buf
);
315 rx_ring
->rx_buf
= NULL
;
318 dmam_free_coherent(rx_ring
->dev
, rx_ring
->size
,
319 rx_ring
->desc
, rx_ring
->dma
);
320 rx_ring
->desc
= NULL
;
325 * ice_setup_rx_ring - Allocate the Rx descriptors
326 * @rx_ring: the rx ring to set up
328 * Return 0 on success, negative on error
330 int ice_setup_rx_ring(struct ice_ring
*rx_ring
)
332 struct device
*dev
= rx_ring
->dev
;
338 /* warn if we are about to overwrite the pointer */
339 WARN_ON(rx_ring
->rx_buf
);
340 bi_size
= sizeof(struct ice_rx_buf
) * rx_ring
->count
;
341 rx_ring
->rx_buf
= devm_kzalloc(dev
, bi_size
, GFP_KERNEL
);
342 if (!rx_ring
->rx_buf
)
345 /* round up to nearest 4K */
346 rx_ring
->size
= rx_ring
->count
* sizeof(union ice_32byte_rx_desc
);
347 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
348 rx_ring
->desc
= dmam_alloc_coherent(dev
, rx_ring
->size
, &rx_ring
->dma
,
350 if (!rx_ring
->desc
) {
351 dev_err(dev
, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
356 rx_ring
->next_to_use
= 0;
357 rx_ring
->next_to_clean
= 0;
361 devm_kfree(dev
, rx_ring
->rx_buf
);
362 rx_ring
->rx_buf
= NULL
;
367 * ice_release_rx_desc - Store the new tail and head values
368 * @rx_ring: ring to bump
369 * @val: new head index
371 static void ice_release_rx_desc(struct ice_ring
*rx_ring
, u32 val
)
373 rx_ring
->next_to_use
= val
;
375 /* update next to alloc since we have filled the ring */
376 rx_ring
->next_to_alloc
= val
;
378 /* Force memory writes to complete before letting h/w
379 * know there are new descriptors to fetch. (Only
380 * applicable for weak-ordered memory model archs,
384 writel(val
, rx_ring
->tail
);
388 * ice_alloc_mapped_page - recycle or make a new page
389 * @rx_ring: ring to use
390 * @bi: rx_buf struct to modify
392 * Returns true if the page was successfully allocated or
395 static bool ice_alloc_mapped_page(struct ice_ring
*rx_ring
,
396 struct ice_rx_buf
*bi
)
398 struct page
*page
= bi
->page
;
401 /* since we are recycling buffers we should seldom need to alloc */
403 rx_ring
->rx_stats
.page_reuse_count
++;
407 /* alloc new page for storage */
408 page
= alloc_page(GFP_ATOMIC
| __GFP_NOWARN
);
409 if (unlikely(!page
)) {
410 rx_ring
->rx_stats
.alloc_page_failed
++;
414 /* map page for use */
415 dma
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
417 /* if mapping failed free memory back to system since
418 * there isn't much point in holding memory we can't use
420 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
421 __free_pages(page
, 0);
422 rx_ring
->rx_stats
.alloc_page_failed
++;
434 * ice_alloc_rx_bufs - Replace used receive buffers
435 * @rx_ring: ring to place buffers on
436 * @cleaned_count: number of buffers to replace
438 * Returns false if all allocations were successful, true if any fail
440 bool ice_alloc_rx_bufs(struct ice_ring
*rx_ring
, u16 cleaned_count
)
442 union ice_32b_rx_flex_desc
*rx_desc
;
443 u16 ntu
= rx_ring
->next_to_use
;
444 struct ice_rx_buf
*bi
;
446 /* do nothing if no valid netdev defined */
447 if (!rx_ring
->netdev
|| !cleaned_count
)
450 /* get the RX descriptor and buffer based on next_to_use */
451 rx_desc
= ICE_RX_DESC(rx_ring
, ntu
);
452 bi
= &rx_ring
->rx_buf
[ntu
];
455 if (!ice_alloc_mapped_page(rx_ring
, bi
))
458 /* Refresh the desc even if buffer_addrs didn't change
459 * because each write-back erases this info.
461 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
466 if (unlikely(ntu
== rx_ring
->count
)) {
467 rx_desc
= ICE_RX_DESC(rx_ring
, 0);
468 bi
= rx_ring
->rx_buf
;
472 /* clear the status bits for the next_to_use descriptor */
473 rx_desc
->wb
.status_error0
= 0;
476 } while (cleaned_count
);
478 if (rx_ring
->next_to_use
!= ntu
)
479 ice_release_rx_desc(rx_ring
, ntu
);
484 if (rx_ring
->next_to_use
!= ntu
)
485 ice_release_rx_desc(rx_ring
, ntu
);
487 /* make sure to come back via polling to try again after
494 * ice_page_is_reserved - check if reuse is possible
495 * @page: page struct to check
497 static bool ice_page_is_reserved(struct page
*page
)
499 return (page_to_nid(page
) != numa_mem_id()) || page_is_pfmemalloc(page
);
503 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
504 * @rx_buf: buffer containing page to add
505 * @rx_desc: descriptor containing length of buffer written by hardware
506 * @skb: sk_buf to place the data into
508 * This function will add the data contained in rx_buf->page to the skb.
509 * This is done either through a direct copy if the data in the buffer is
510 * less than the skb header size, otherwise it will just attach the page as
513 * The function will then update the page offset if necessary and return
514 * true if the buffer can be reused by the adapter.
516 static bool ice_add_rx_frag(struct ice_rx_buf
*rx_buf
,
517 union ice_32b_rx_flex_desc
*rx_desc
,
520 #if (PAGE_SIZE < 8192)
521 unsigned int truesize
= ICE_RXBUF_2048
;
523 unsigned int last_offset
= PAGE_SIZE
- ICE_RXBUF_2048
;
524 unsigned int truesize
;
525 #endif /* PAGE_SIZE < 8192) */
530 size
= le16_to_cpu(rx_desc
->wb
.pkt_len
) &
531 ICE_RX_FLX_DESC_PKT_LEN_M
;
535 #if (PAGE_SIZE >= 8192)
536 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
537 #endif /* PAGE_SIZE >= 8192) */
539 /* will the data fit in the skb we allocated? if so, just
540 * copy it as it is pretty small anyway
542 if (size
<= ICE_RX_HDR_SIZE
&& !skb_is_nonlinear(skb
)) {
543 unsigned char *va
= page_address(page
) + rx_buf
->page_offset
;
545 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
547 /* page is not reserved, we can reuse buffer as-is */
548 if (likely(!ice_page_is_reserved(page
)))
551 /* this page cannot be reused so discard it */
552 __free_pages(page
, 0);
556 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
557 rx_buf
->page_offset
, size
, truesize
);
559 /* avoid re-using remote pages */
560 if (unlikely(ice_page_is_reserved(page
)))
563 #if (PAGE_SIZE < 8192)
564 /* if we are only owner of page we can reuse it */
565 if (unlikely(page_count(page
) != 1))
568 /* flip page offset to other buffer */
569 rx_buf
->page_offset
^= truesize
;
571 /* move offset up to the next cache line */
572 rx_buf
->page_offset
+= truesize
;
574 if (rx_buf
->page_offset
> last_offset
)
576 #endif /* PAGE_SIZE < 8192) */
578 /* Even if we own the page, we are not allowed to use atomic_set()
579 * This would break get_page_unless_zero() users.
581 get_page(rx_buf
->page
);
587 * ice_reuse_rx_page - page flip buffer and store it back on the ring
588 * @rx_ring: rx descriptor ring to store buffers on
589 * @old_buf: donor buffer to have page reused
591 * Synchronizes page for reuse by the adapter
593 static void ice_reuse_rx_page(struct ice_ring
*rx_ring
,
594 struct ice_rx_buf
*old_buf
)
596 u16 nta
= rx_ring
->next_to_alloc
;
597 struct ice_rx_buf
*new_buf
;
599 new_buf
= &rx_ring
->rx_buf
[nta
];
601 /* update, and store next to alloc */
603 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
605 /* transfer page from old buffer to new buffer */
610 * ice_fetch_rx_buf - Allocate skb and populate it
611 * @rx_ring: rx descriptor ring to transact packets on
612 * @rx_desc: descriptor containing info written by hardware
614 * This function allocates an skb on the fly, and populates it with the page
615 * data from the current receive descriptor, taking care to set up the skb
616 * correctly, as well as handling calling the page recycle function if
619 static struct sk_buff
*ice_fetch_rx_buf(struct ice_ring
*rx_ring
,
620 union ice_32b_rx_flex_desc
*rx_desc
)
622 struct ice_rx_buf
*rx_buf
;
626 rx_buf
= &rx_ring
->rx_buf
[rx_ring
->next_to_clean
];
633 u8
*page_addr
= page_address(page
) + rx_buf
->page_offset
;
635 /* prefetch first cache line of first page */
637 #if L1_CACHE_BYTES < 128
638 prefetch((void *)(page_addr
+ L1_CACHE_BYTES
));
639 #endif /* L1_CACHE_BYTES */
641 /* allocate a skb to store the frags */
642 skb
= __napi_alloc_skb(&rx_ring
->q_vector
->napi
,
644 GFP_ATOMIC
| __GFP_NOWARN
);
645 if (unlikely(!skb
)) {
646 rx_ring
->rx_stats
.alloc_buf_failed
++;
650 /* we will be copying header into skb->data in
651 * pskb_may_pull so it is in our interest to prefetch
652 * it now to avoid a possible cache miss
654 prefetchw(skb
->data
);
656 skb_record_rx_queue(skb
, rx_ring
->q_index
);
658 /* we are reusing so sync this buffer for CPU use */
659 dma_sync_single_range_for_cpu(rx_ring
->dev
, rx_buf
->dma
,
667 /* pull page into skb */
668 if (ice_add_rx_frag(rx_buf
, rx_desc
, skb
)) {
669 /* hand second half of page back to the ring */
670 ice_reuse_rx_page(rx_ring
, rx_buf
);
671 rx_ring
->rx_stats
.page_reuse_count
++;
673 /* we are not reusing the buffer so unmap it */
674 dma_unmap_page(rx_ring
->dev
, rx_buf
->dma
, PAGE_SIZE
,
678 /* clear contents of buffer_info */
685 * ice_pull_tail - ice specific version of skb_pull_tail
686 * @skb: pointer to current skb being adjusted
688 * This function is an ice specific version of __pskb_pull_tail. The
689 * main difference between this version and the original function is that
690 * this function can make several assumptions about the state of things
691 * that allow for significant optimizations versus the standard function.
692 * As a result we can do things like drop a frag and maintain an accurate
693 * truesize for the skb.
695 static void ice_pull_tail(struct sk_buff
*skb
)
697 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
698 unsigned int pull_len
;
701 /* it is valid to use page_address instead of kmap since we are
702 * working with pages allocated out of the lomem pool per
703 * alloc_page(GFP_ATOMIC)
705 va
= skb_frag_address(frag
);
707 /* we need the header to contain the greater of either ETH_HLEN or
708 * 60 bytes if the skb->len is less than 60 for skb_pad.
710 pull_len
= eth_get_headlen(va
, ICE_RX_HDR_SIZE
);
712 /* align pull length to size of long to optimize memcpy performance */
713 skb_copy_to_linear_data(skb
, va
, ALIGN(pull_len
, sizeof(long)));
715 /* update all of the pointers */
716 skb_frag_size_sub(frag
, pull_len
);
717 frag
->page_offset
+= pull_len
;
718 skb
->data_len
-= pull_len
;
719 skb
->tail
+= pull_len
;
723 * ice_cleanup_headers - Correct empty headers
724 * @skb: pointer to current skb being fixed
726 * Also address the case where we are pulling data in on pages only
727 * and as such no data is present in the skb header.
729 * In addition if skb is not at least 60 bytes we need to pad it so that
730 * it is large enough to qualify as a valid Ethernet frame.
732 * Returns true if an error was encountered and skb was freed.
734 static bool ice_cleanup_headers(struct sk_buff
*skb
)
736 /* place header in linear portion of buffer */
737 if (skb_is_nonlinear(skb
))
740 /* if eth_skb_pad returns an error the skb was freed */
741 if (eth_skb_pad(skb
))
748 * ice_test_staterr - tests bits in Rx descriptor status and error fields
749 * @rx_desc: pointer to receive descriptor (in le64 format)
750 * @stat_err_bits: value to mask
752 * This function does some fast chicanery in order to return the
753 * value of the mask which is really only used for boolean tests.
754 * The status_error_len doesn't need to be shifted because it begins
757 static bool ice_test_staterr(union ice_32b_rx_flex_desc
*rx_desc
,
758 const u16 stat_err_bits
)
760 return !!(rx_desc
->wb
.status_error0
&
761 cpu_to_le16(stat_err_bits
));
765 * ice_is_non_eop - process handling of non-EOP buffers
766 * @rx_ring: Rx ring being processed
767 * @rx_desc: Rx descriptor for current buffer
768 * @skb: Current socket buffer containing buffer in progress
770 * This function updates next to clean. If the buffer is an EOP buffer
771 * this function exits returning false, otherwise it will place the
772 * sk_buff in the next buffer to be chained and return true indicating
773 * that this is in fact a non-EOP buffer.
775 static bool ice_is_non_eop(struct ice_ring
*rx_ring
,
776 union ice_32b_rx_flex_desc
*rx_desc
,
779 u32 ntc
= rx_ring
->next_to_clean
+ 1;
781 /* fetch, update, and store next to clean */
782 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
783 rx_ring
->next_to_clean
= ntc
;
785 prefetch(ICE_RX_DESC(rx_ring
, ntc
));
787 /* if we are the last buffer then there is nothing else to do */
788 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
789 if (likely(ice_test_staterr(rx_desc
, ICE_RXD_EOF
)))
792 /* place skb in next buffer to be received */
793 rx_ring
->rx_buf
[ntc
].skb
= skb
;
794 rx_ring
->rx_stats
.non_eop_descs
++;
800 * ice_ptype_to_htype - get a hash type
801 * @ptype: the ptype value from the descriptor
803 * Returns a hash type to be used by skb_set_hash
805 static enum pkt_hash_types
ice_ptype_to_htype(u8 __always_unused ptype
)
807 return PKT_HASH_TYPE_NONE
;
811 * ice_rx_hash - set the hash value in the skb
812 * @rx_ring: descriptor ring
813 * @rx_desc: specific descriptor
814 * @skb: pointer to current skb
815 * @rx_ptype: the ptype value from the descriptor
818 ice_rx_hash(struct ice_ring
*rx_ring
, union ice_32b_rx_flex_desc
*rx_desc
,
819 struct sk_buff
*skb
, u8 rx_ptype
)
821 struct ice_32b_rx_flex_desc_nic
*nic_mdid
;
824 if (!(rx_ring
->netdev
->features
& NETIF_F_RXHASH
))
827 if (rx_desc
->wb
.rxdid
!= ICE_RXDID_FLEX_NIC
)
830 nic_mdid
= (struct ice_32b_rx_flex_desc_nic
*)rx_desc
;
831 hash
= le32_to_cpu(nic_mdid
->rss_hash
);
832 skb_set_hash(skb
, hash
, ice_ptype_to_htype(rx_ptype
));
836 * ice_rx_csum - Indicate in skb if checksum is good
837 * @vsi: the VSI we care about
838 * @skb: skb currently being received and modified
839 * @rx_desc: the receive descriptor
840 * @ptype: the packet type decoded by hardware
842 * skb->protocol must be set before this function is called
844 static void ice_rx_csum(struct ice_vsi
*vsi
, struct sk_buff
*skb
,
845 union ice_32b_rx_flex_desc
*rx_desc
, u8 ptype
)
847 struct ice_rx_ptype_decoded decoded
;
848 u32 rx_error
, rx_status
;
851 rx_status
= le16_to_cpu(rx_desc
->wb
.status_error0
);
852 rx_error
= rx_status
;
854 decoded
= ice_decode_rx_desc_ptype(ptype
);
856 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
857 skb
->ip_summed
= CHECKSUM_NONE
;
858 skb_checksum_none_assert(skb
);
860 /* check if Rx checksum is enabled */
861 if (!(vsi
->netdev
->features
& NETIF_F_RXCSUM
))
864 /* check if HW has decoded the packet and checksum */
865 if (!(rx_status
& BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S
)))
868 if (!(decoded
.known
&& decoded
.outer_ip
))
871 ipv4
= (decoded
.outer_ip
== ICE_RX_PTYPE_OUTER_IP
) &&
872 (decoded
.outer_ip_ver
== ICE_RX_PTYPE_OUTER_IPV4
);
873 ipv6
= (decoded
.outer_ip
== ICE_RX_PTYPE_OUTER_IP
) &&
874 (decoded
.outer_ip_ver
== ICE_RX_PTYPE_OUTER_IPV6
);
876 if (ipv4
&& (rx_error
& (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S
) |
877 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S
))))
879 else if (ipv6
&& (rx_status
&
880 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S
))))
883 /* check for L4 errors and handle packets that were not able to be
884 * checksummed due to arrival speed
886 if (rx_error
& BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S
))
889 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
890 switch (decoded
.inner_prot
) {
891 case ICE_RX_PTYPE_INNER_PROT_TCP
:
892 case ICE_RX_PTYPE_INNER_PROT_UDP
:
893 case ICE_RX_PTYPE_INNER_PROT_SCTP
:
894 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
901 vsi
->back
->hw_csum_rx_error
++;
905 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
906 * @rx_ring: rx descriptor ring packet is being transacted on
907 * @rx_desc: pointer to the EOP Rx descriptor
908 * @skb: pointer to current skb being populated
909 * @ptype: the packet type decoded by hardware
911 * This function checks the ring, descriptor, and packet information in
912 * order to populate the hash, checksum, VLAN, protocol, and
913 * other fields within the skb.
915 static void ice_process_skb_fields(struct ice_ring
*rx_ring
,
916 union ice_32b_rx_flex_desc
*rx_desc
,
917 struct sk_buff
*skb
, u8 ptype
)
919 ice_rx_hash(rx_ring
, rx_desc
, skb
, ptype
);
921 /* modifies the skb - consumes the enet header */
922 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
924 ice_rx_csum(rx_ring
->vsi
, skb
, rx_desc
, ptype
);
928 * ice_receive_skb - Send a completed packet up the stack
929 * @rx_ring: rx ring in play
930 * @skb: packet to send up
931 * @vlan_tag: vlan tag for packet
933 * This function sends the completed packet (via. skb) up the stack using
934 * gro receive functions (with/without vlan tag)
936 static void ice_receive_skb(struct ice_ring
*rx_ring
, struct sk_buff
*skb
,
939 if ((rx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
940 (vlan_tag
& VLAN_VID_MASK
)) {
941 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
943 napi_gro_receive(&rx_ring
->q_vector
->napi
, skb
);
947 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
948 * @rx_ring: rx descriptor ring to transact packets on
949 * @budget: Total limit on number of packets to process
951 * This function provides a "bounce buffer" approach to Rx interrupt
952 * processing. The advantage to this is that on systems that have
953 * expensive overhead for IOMMU access this provides a means of avoiding
954 * it by maintaining the mapping of the page to the system.
956 * Returns amount of work completed
958 static int ice_clean_rx_irq(struct ice_ring
*rx_ring
, int budget
)
960 unsigned int total_rx_bytes
= 0, total_rx_pkts
= 0;
961 u16 cleaned_count
= ICE_DESC_UNUSED(rx_ring
);
962 bool failure
= false;
964 /* start the loop to process RX packets bounded by 'budget' */
965 while (likely(total_rx_pkts
< (unsigned int)budget
)) {
966 union ice_32b_rx_flex_desc
*rx_desc
;
972 /* return some buffers to hardware, one at a time is too slow */
973 if (cleaned_count
>= ICE_RX_BUF_WRITE
) {
975 ice_alloc_rx_bufs(rx_ring
, cleaned_count
);
979 /* get the RX desc from RX ring based on 'next_to_clean' */
980 rx_desc
= ICE_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
982 /* status_error_len will always be zero for unused descriptors
983 * because it's cleared in cleanup, and overlaps with hdr_addr
984 * which is always zero because packet split isn't used, if the
985 * hardware wrote DD then it will be non-zero
987 stat_err_bits
= BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S
);
988 if (!ice_test_staterr(rx_desc
, stat_err_bits
))
991 /* This memory barrier is needed to keep us from reading
992 * any other fields out of the rx_desc until we know the
997 /* allocate (if needed) and populate skb */
998 skb
= ice_fetch_rx_buf(rx_ring
, rx_desc
);
1004 /* skip if it is NOP desc */
1005 if (ice_is_non_eop(rx_ring
, rx_desc
, skb
))
1008 stat_err_bits
= BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S
);
1009 if (unlikely(ice_test_staterr(rx_desc
, stat_err_bits
))) {
1010 dev_kfree_skb_any(skb
);
1014 rx_ptype
= le16_to_cpu(rx_desc
->wb
.ptype_flex_flags0
) &
1015 ICE_RX_FLEX_DESC_PTYPE_M
;
1017 stat_err_bits
= BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S
);
1018 if (ice_test_staterr(rx_desc
, stat_err_bits
))
1019 vlan_tag
= le16_to_cpu(rx_desc
->wb
.l2tag1
);
1021 /* correct empty headers and pad skb if needed (to make valid
1024 if (ice_cleanup_headers(skb
)) {
1029 /* probably a little skewed due to removing CRC */
1030 total_rx_bytes
+= skb
->len
;
1032 /* populate checksum, VLAN, and protocol */
1033 ice_process_skb_fields(rx_ring
, rx_desc
, skb
, rx_ptype
);
1035 /* send completed skb up the stack */
1036 ice_receive_skb(rx_ring
, skb
, vlan_tag
);
1038 /* update budget accounting */
1042 /* update queue and vector specific stats */
1043 u64_stats_update_begin(&rx_ring
->syncp
);
1044 rx_ring
->stats
.pkts
+= total_rx_pkts
;
1045 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1046 u64_stats_update_end(&rx_ring
->syncp
);
1047 rx_ring
->q_vector
->rx
.total_pkts
+= total_rx_pkts
;
1048 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1050 /* guarantee a trip back through this routine if there was a failure */
1051 return failure
? budget
: (int)total_rx_pkts
;
1055 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1056 * @napi: napi struct with our devices info in it
1057 * @budget: amount of work driver is allowed to do this pass, in packets
1059 * This function will clean all queues associated with a q_vector.
1061 * Returns the amount of work done
1063 int ice_napi_poll(struct napi_struct
*napi
, int budget
)
1065 struct ice_q_vector
*q_vector
=
1066 container_of(napi
, struct ice_q_vector
, napi
);
1067 struct ice_vsi
*vsi
= q_vector
->vsi
;
1068 struct ice_pf
*pf
= vsi
->back
;
1069 bool clean_complete
= true;
1070 int budget_per_ring
= 0;
1071 struct ice_ring
*ring
;
1074 /* Since the actual Tx work is minimal, we can give the Tx a larger
1075 * budget and be more aggressive about cleaning up the Tx descriptors.
1077 ice_for_each_ring(ring
, q_vector
->tx
)
1078 if (!ice_clean_tx_irq(vsi
, ring
, budget
))
1079 clean_complete
= false;
1081 /* Handle case where we are called by netpoll with a budget of 0 */
1085 /* We attempt to distribute budget to each Rx queue fairly, but don't
1086 * allow the budget to go below 1 because that would exit polling early.
1088 if (q_vector
->num_ring_rx
)
1089 budget_per_ring
= max(budget
/ q_vector
->num_ring_rx
, 1);
1091 ice_for_each_ring(ring
, q_vector
->rx
) {
1094 cleaned
= ice_clean_rx_irq(ring
, budget_per_ring
);
1095 work_done
+= cleaned
;
1096 /* if we clean as many as budgeted, we must not be done */
1097 if (cleaned
>= budget_per_ring
)
1098 clean_complete
= false;
1101 /* If work not completed, return budget and polling will return */
1102 if (!clean_complete
)
1105 /* Work is done so exit the polling mode and re-enable the interrupt */
1106 napi_complete_done(napi
, work_done
);
1107 if (test_bit(ICE_FLAG_MSIX_ENA
, pf
->flags
))
1108 ice_irq_dynamic_ena(&vsi
->back
->hw
, vsi
, q_vector
);
1112 /* helper function for building cmd/type/offset */
1114 build_ctob(u64 td_cmd
, u64 td_offset
, unsigned int size
, u64 td_tag
)
1116 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA
|
1117 (td_cmd
<< ICE_TXD_QW1_CMD_S
) |
1118 (td_offset
<< ICE_TXD_QW1_OFFSET_S
) |
1119 ((u64
)size
<< ICE_TXD_QW1_TX_BUF_SZ_S
) |
1120 (td_tag
<< ICE_TXD_QW1_L2TAG1_S
));
1124 * __ice_maybe_stop_tx - 2nd level check for tx stop conditions
1125 * @tx_ring: the ring to be checked
1126 * @size: the size buffer we want to assure is available
1128 * Returns -EBUSY if a stop is needed, else 0
1130 static int __ice_maybe_stop_tx(struct ice_ring
*tx_ring
, unsigned int size
)
1132 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->q_index
);
1133 /* Memory barrier before checking head and tail */
1136 /* Check again in a case another CPU has just made room available. */
1137 if (likely(ICE_DESC_UNUSED(tx_ring
) < size
))
1140 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1141 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->q_index
);
1142 ++tx_ring
->tx_stats
.restart_q
;
1147 * ice_maybe_stop_tx - 1st level check for tx stop conditions
1148 * @tx_ring: the ring to be checked
1149 * @size: the size buffer we want to assure is available
1151 * Returns 0 if stop is not needed
1153 static int ice_maybe_stop_tx(struct ice_ring
*tx_ring
, unsigned int size
)
1155 if (likely(ICE_DESC_UNUSED(tx_ring
) >= size
))
1157 return __ice_maybe_stop_tx(tx_ring
, size
);
1161 * ice_tx_map - Build the Tx descriptor
1162 * @tx_ring: ring to send buffer on
1163 * @first: first buffer info buffer to use
1164 * @off: pointer to struct that holds offload parameters
1166 * This function loops over the skb data pointed to by *first
1167 * and gets a physical address for each memory location and programs
1168 * it and the length into the transmit descriptor.
1171 ice_tx_map(struct ice_ring
*tx_ring
, struct ice_tx_buf
*first
,
1172 struct ice_tx_offload_params
*off
)
1174 u64 td_offset
, td_tag
, td_cmd
;
1175 u16 i
= tx_ring
->next_to_use
;
1176 struct skb_frag_struct
*frag
;
1177 unsigned int data_len
, size
;
1178 struct ice_tx_desc
*tx_desc
;
1179 struct ice_tx_buf
*tx_buf
;
1180 struct sk_buff
*skb
;
1183 td_tag
= off
->td_l2tag1
;
1184 td_cmd
= off
->td_cmd
;
1185 td_offset
= off
->td_offset
;
1188 data_len
= skb
->data_len
;
1189 size
= skb_headlen(skb
);
1191 tx_desc
= ICE_TX_DESC(tx_ring
, i
);
1193 if (first
->tx_flags
& ICE_TX_FLAGS_HW_VLAN
) {
1194 td_cmd
|= (u64
)ICE_TX_DESC_CMD_IL2TAG1
;
1195 td_tag
= (first
->tx_flags
& ICE_TX_FLAGS_VLAN_M
) >>
1196 ICE_TX_FLAGS_VLAN_S
;
1199 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1203 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
1204 unsigned int max_data
= ICE_MAX_DATA_PER_TXD_ALIGNED
;
1206 if (dma_mapping_error(tx_ring
->dev
, dma
))
1209 /* record length, and DMA address */
1210 dma_unmap_len_set(tx_buf
, len
, size
);
1211 dma_unmap_addr_set(tx_buf
, dma
, dma
);
1213 /* align size to end of page */
1214 max_data
+= -dma
& (ICE_MAX_READ_REQ_SIZE
- 1);
1215 tx_desc
->buf_addr
= cpu_to_le64(dma
);
1217 /* account for data chunks larger than the hardware
1220 while (unlikely(size
> ICE_MAX_DATA_PER_TXD
)) {
1221 tx_desc
->cmd_type_offset_bsz
=
1222 build_ctob(td_cmd
, td_offset
, max_data
, td_tag
);
1227 if (i
== tx_ring
->count
) {
1228 tx_desc
= ICE_TX_DESC(tx_ring
, 0);
1235 max_data
= ICE_MAX_DATA_PER_TXD_ALIGNED
;
1236 tx_desc
->buf_addr
= cpu_to_le64(dma
);
1239 if (likely(!data_len
))
1242 tx_desc
->cmd_type_offset_bsz
= build_ctob(td_cmd
, td_offset
,
1248 if (i
== tx_ring
->count
) {
1249 tx_desc
= ICE_TX_DESC(tx_ring
, 0);
1253 size
= skb_frag_size(frag
);
1256 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
1259 tx_buf
= &tx_ring
->tx_buf
[i
];
1262 /* record bytecount for BQL */
1263 netdev_tx_sent_queue(txring_txq(tx_ring
), first
->bytecount
);
1265 /* record SW timestamp if HW timestamp is not available */
1266 skb_tx_timestamp(first
->skb
);
1269 if (i
== tx_ring
->count
)
1272 /* write last descriptor with RS and EOP bits */
1273 td_cmd
|= (u64
)(ICE_TX_DESC_CMD_EOP
| ICE_TX_DESC_CMD_RS
);
1274 tx_desc
->cmd_type_offset_bsz
=
1275 build_ctob(td_cmd
, td_offset
, size
, td_tag
);
1277 /* Force memory writes to complete before letting h/w know there
1278 * are new descriptors to fetch.
1280 * We also use this memory barrier to make certain all of the
1281 * status bits have been updated before next_to_watch is written.
1285 /* set next_to_watch value indicating a packet is present */
1286 first
->next_to_watch
= tx_desc
;
1288 tx_ring
->next_to_use
= i
;
1290 ice_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
1292 /* notify HW of packet */
1293 if (netif_xmit_stopped(txring_txq(tx_ring
)) || !skb
->xmit_more
) {
1294 writel(i
, tx_ring
->tail
);
1296 /* we need this if more than one processor can write to our tail
1297 * at a time, it synchronizes IO on IA64/Altix systems
1305 /* clear dma mappings for failed tx_buf map */
1307 tx_buf
= &tx_ring
->tx_buf
[i
];
1308 ice_unmap_and_free_tx_buf(tx_ring
, tx_buf
);
1309 if (tx_buf
== first
)
1316 tx_ring
->next_to_use
= i
;
1320 * ice_tx_csum - Enable Tx checksum offloads
1321 * @first: pointer to the first descriptor
1322 * @off: pointer to struct that holds offload parameters
1324 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1327 int ice_tx_csum(struct ice_tx_buf
*first
, struct ice_tx_offload_params
*off
)
1329 u32 l4_len
= 0, l3_len
= 0, l2_len
= 0;
1330 struct sk_buff
*skb
= first
->skb
;
1340 __be16 frag_off
, protocol
;
1341 unsigned char *exthdr
;
1342 u32 offset
, cmd
= 0;
1345 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1348 ip
.hdr
= skb_network_header(skb
);
1349 l4
.hdr
= skb_transport_header(skb
);
1351 /* compute outer L2 header size */
1352 l2_len
= ip
.hdr
- skb
->data
;
1353 offset
= (l2_len
/ 2) << ICE_TX_DESC_LEN_MACLEN_S
;
1355 if (skb
->encapsulation
)
1358 /* Enable IP checksum offloads */
1359 protocol
= vlan_get_protocol(skb
);
1360 if (protocol
== htons(ETH_P_IP
)) {
1361 l4_proto
= ip
.v4
->protocol
;
1362 /* the stack computes the IP header already, the only time we
1363 * need the hardware to recompute it is in the case of TSO.
1365 if (first
->tx_flags
& ICE_TX_FLAGS_TSO
)
1366 cmd
|= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM
;
1368 cmd
|= ICE_TX_DESC_CMD_IIPT_IPV4
;
1370 } else if (protocol
== htons(ETH_P_IPV6
)) {
1371 cmd
|= ICE_TX_DESC_CMD_IIPT_IPV6
;
1372 exthdr
= ip
.hdr
+ sizeof(*ip
.v6
);
1373 l4_proto
= ip
.v6
->nexthdr
;
1374 if (l4
.hdr
!= exthdr
)
1375 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
, &l4_proto
,
1381 /* compute inner L3 header size */
1382 l3_len
= l4
.hdr
- ip
.hdr
;
1383 offset
|= (l3_len
/ 4) << ICE_TX_DESC_LEN_IPLEN_S
;
1385 /* Enable L4 checksum offloads */
1388 /* enable checksum offloads */
1389 cmd
|= ICE_TX_DESC_CMD_L4T_EOFT_TCP
;
1390 l4_len
= l4
.tcp
->doff
;
1391 offset
|= l4_len
<< ICE_TX_DESC_LEN_L4_LEN_S
;
1394 /* enable UDP checksum offload */
1395 cmd
|= ICE_TX_DESC_CMD_L4T_EOFT_UDP
;
1396 l4_len
= (sizeof(struct udphdr
) >> 2);
1397 offset
|= l4_len
<< ICE_TX_DESC_LEN_L4_LEN_S
;
1401 if (first
->tx_flags
& ICE_TX_FLAGS_TSO
)
1403 skb_checksum_help(skb
);
1408 off
->td_offset
|= offset
;
1413 * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1414 * @tx_ring: ring to send buffer on
1415 * @first: pointer to struct ice_tx_buf
1417 * Checks the skb and set up correspondingly several generic transmit flags
1418 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1420 * Returns error code indicate the frame should be dropped upon error and the
1421 * otherwise returns 0 to indicate the flags has been set properly.
1424 ice_tx_prepare_vlan_flags(struct ice_ring
*tx_ring
, struct ice_tx_buf
*first
)
1426 struct sk_buff
*skb
= first
->skb
;
1427 __be16 protocol
= skb
->protocol
;
1429 if (protocol
== htons(ETH_P_8021Q
) &&
1430 !(tx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)) {
1431 /* when HW VLAN acceleration is turned off by the user the
1432 * stack sets the protocol to 8021q so that the driver
1433 * can take any steps required to support the SW only
1434 * VLAN handling. In our case the driver doesn't need
1435 * to take any further steps so just set the protocol
1436 * to the encapsulated ethertype.
1438 skb
->protocol
= vlan_get_protocol(skb
);
1442 /* if we have a HW VLAN tag being added, default to the HW one */
1443 if (skb_vlan_tag_present(skb
)) {
1444 first
->tx_flags
|= skb_vlan_tag_get(skb
) << ICE_TX_FLAGS_VLAN_S
;
1445 first
->tx_flags
|= ICE_TX_FLAGS_HW_VLAN
;
1446 } else if (protocol
== htons(ETH_P_8021Q
)) {
1447 struct vlan_hdr
*vhdr
, _vhdr
;
1449 /* for SW VLAN, check the next protocol and store the tag */
1450 vhdr
= (struct vlan_hdr
*)skb_header_pointer(skb
, ETH_HLEN
,
1456 first
->tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) <<
1457 ICE_TX_FLAGS_VLAN_S
;
1458 first
->tx_flags
|= ICE_TX_FLAGS_SW_VLAN
;
1466 * ice_tso - computes mss and TSO length to prepare for TSO
1467 * @first: pointer to struct ice_tx_buf
1468 * @off: pointer to struct that holds offload parameters
1470 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1473 int ice_tso(struct ice_tx_buf
*first
, struct ice_tx_offload_params
*off
)
1475 struct sk_buff
*skb
= first
->skb
;
1485 u64 cd_mss
, cd_tso_len
;
1486 u32 paylen
, l4_start
;
1489 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1492 if (!skb_is_gso(skb
))
1495 err
= skb_cow_head(skb
, 0);
1499 ip
.hdr
= skb_network_header(skb
);
1500 l4
.hdr
= skb_transport_header(skb
);
1502 /* initialize outer IP header fields */
1503 if (ip
.v4
->version
== 4) {
1507 ip
.v6
->payload_len
= 0;
1510 /* determine offset of transport header */
1511 l4_start
= l4
.hdr
- skb
->data
;
1513 /* remove payload length from checksum */
1514 paylen
= skb
->len
- l4_start
;
1515 csum_replace_by_diff(&l4
.tcp
->check
, (__force __wsum
)htonl(paylen
));
1517 /* compute length of segmentation header */
1518 off
->header_len
= (l4
.tcp
->doff
* 4) + l4_start
;
1520 /* update gso_segs and bytecount */
1521 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
1522 first
->bytecount
= (first
->gso_segs
- 1) * off
->header_len
;
1524 cd_tso_len
= skb
->len
- off
->header_len
;
1525 cd_mss
= skb_shinfo(skb
)->gso_size
;
1527 /* record cdesc_qw1 with TSO parameters */
1528 off
->cd_qw1
|= ICE_TX_DESC_DTYPE_CTX
|
1529 (ICE_TX_CTX_DESC_TSO
<< ICE_TXD_CTX_QW1_CMD_S
) |
1530 (cd_tso_len
<< ICE_TXD_CTX_QW1_TSO_LEN_S
) |
1531 (cd_mss
<< ICE_TXD_CTX_QW1_MSS_S
);
1532 first
->tx_flags
|= ICE_TX_FLAGS_TSO
;
1537 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1538 * @size: transmit request size in bytes
1540 * Due to hardware alignment restrictions (4K alignment), we need to
1541 * assume that we can have no more than 12K of data per descriptor, even
1542 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1543 * Thus, we need to divide by 12K. But division is slow! Instead,
1544 * we decompose the operation into shifts and one relatively cheap
1545 * multiply operation.
1547 * To divide by 12K, we first divide by 4K, then divide by 3:
1548 * To divide by 4K, shift right by 12 bits
1549 * To divide by 3, multiply by 85, then divide by 256
1550 * (Divide by 256 is done by shifting right by 8 bits)
1551 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1552 * 3, we'll underestimate near each multiple of 12K. This is actually more
1553 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1554 * segment. For our purposes this is accurate out to 1M which is orders of
1555 * magnitude greater than our largest possible GSO size.
1557 * This would then be implemented as:
1558 * return (((size >> 12) * 85) >> 8) + 1;
1560 * Since multiplication and division are commutative, we can reorder
1562 * return ((size * 85) >> 20) + 1;
1564 static unsigned int ice_txd_use_count(unsigned int size
)
1566 return ((size
* 85) >> 20) + 1;
1570 * ice_xmit_desc_count - calculate number of tx descriptors needed
1573 * Returns number of data descriptors needed for this skb.
1575 static unsigned int ice_xmit_desc_count(struct sk_buff
*skb
)
1577 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
1578 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1579 unsigned int count
= 0, size
= skb_headlen(skb
);
1582 count
+= ice_txd_use_count(size
);
1587 size
= skb_frag_size(frag
++);
1594 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
1597 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
1598 * and so we need to figure out the cases where we need to linearize the skb.
1600 * For TSO we need to count the TSO header and segment payload separately.
1601 * As such we need to check cases where we have 7 fragments or more as we
1602 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1603 * the segment payload in the first descriptor, and another 7 for the
1606 static bool __ice_chk_linearize(struct sk_buff
*skb
)
1608 const struct skb_frag_struct
*frag
, *stale
;
1611 /* no need to check if number of frags is less than 7 */
1612 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1613 if (nr_frags
< (ICE_MAX_BUF_TXD
- 1))
1616 /* We need to walk through the list and validate that each group
1617 * of 6 fragments totals at least gso_size.
1619 nr_frags
-= ICE_MAX_BUF_TXD
- 2;
1620 frag
= &skb_shinfo(skb
)->frags
[0];
1622 /* Initialize size to the negative value of gso_size minus 1. We
1623 * use this as the worst case scenerio in which the frag ahead
1624 * of us only provides one byte which is why we are limited to 6
1625 * descriptors for a single transmit as the header and previous
1626 * fragment are already consuming 2 descriptors.
1628 sum
= 1 - skb_shinfo(skb
)->gso_size
;
1630 /* Add size of frags 0 through 4 to create our initial sum */
1631 sum
+= skb_frag_size(frag
++);
1632 sum
+= skb_frag_size(frag
++);
1633 sum
+= skb_frag_size(frag
++);
1634 sum
+= skb_frag_size(frag
++);
1635 sum
+= skb_frag_size(frag
++);
1637 /* Walk through fragments adding latest fragment, testing it, and
1638 * then removing stale fragments from the sum.
1640 stale
= &skb_shinfo(skb
)->frags
[0];
1642 sum
+= skb_frag_size(frag
++);
1644 /* if sum is negative we failed to make sufficient progress */
1651 sum
-= skb_frag_size(stale
++);
1658 * ice_chk_linearize - Check if there are more than 8 fragments per packet
1660 * @count: number of buffers used
1662 * Note: Our HW can't scatter-gather more than 8 fragments to build
1663 * a packet on the wire and so we need to figure out the cases where we
1664 * need to linearize the skb.
1666 static bool ice_chk_linearize(struct sk_buff
*skb
, unsigned int count
)
1668 /* Both TSO and single send will work if count is less than 8 */
1669 if (likely(count
< ICE_MAX_BUF_TXD
))
1672 if (skb_is_gso(skb
))
1673 return __ice_chk_linearize(skb
);
1675 /* we can support up to 8 data buffers for a single send */
1676 return count
!= ICE_MAX_BUF_TXD
;
1680 * ice_xmit_frame_ring - Sends buffer on Tx ring
1682 * @tx_ring: ring to send buffer on
1684 * Returns NETDEV_TX_OK if sent, else an error code
1687 ice_xmit_frame_ring(struct sk_buff
*skb
, struct ice_ring
*tx_ring
)
1689 struct ice_tx_offload_params offload
= { 0 };
1690 struct ice_tx_buf
*first
;
1694 count
= ice_xmit_desc_count(skb
);
1695 if (ice_chk_linearize(skb
, count
)) {
1696 if (__skb_linearize(skb
))
1698 count
= ice_txd_use_count(skb
->len
);
1699 tx_ring
->tx_stats
.tx_linearize
++;
1702 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
1703 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
1704 * + 4 desc gap to avoid the cache line where head is,
1705 * + 1 desc for context descriptor,
1706 * otherwise try next time
1708 if (ice_maybe_stop_tx(tx_ring
, count
+ 4 + 1)) {
1709 tx_ring
->tx_stats
.tx_busy
++;
1710 return NETDEV_TX_BUSY
;
1713 offload
.tx_ring
= tx_ring
;
1715 /* record the location of the first descriptor for this packet */
1716 first
= &tx_ring
->tx_buf
[tx_ring
->next_to_use
];
1718 first
->bytecount
= max_t(unsigned int, skb
->len
, ETH_ZLEN
);
1719 first
->gso_segs
= 1;
1720 first
->tx_flags
= 0;
1722 /* prepare the VLAN tagging flags for Tx */
1723 if (ice_tx_prepare_vlan_flags(tx_ring
, first
))
1726 /* set up TSO offload */
1727 tso
= ice_tso(first
, &offload
);
1731 /* always set up Tx checksum offload */
1732 csum
= ice_tx_csum(first
, &offload
);
1736 if (tso
|| offload
.cd_tunnel_params
) {
1737 struct ice_tx_ctx_desc
*cdesc
;
1738 int i
= tx_ring
->next_to_use
;
1740 /* grab the next descriptor */
1741 cdesc
= ICE_TX_CTX_DESC(tx_ring
, i
);
1743 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1745 /* setup context descriptor */
1746 cdesc
->tunneling_params
= cpu_to_le32(offload
.cd_tunnel_params
);
1747 cdesc
->l2tag2
= cpu_to_le16(offload
.cd_l2tag2
);
1748 cdesc
->rsvd
= cpu_to_le16(0);
1749 cdesc
->qw1
= cpu_to_le64(offload
.cd_qw1
);
1752 ice_tx_map(tx_ring
, first
, &offload
);
1753 return NETDEV_TX_OK
;
1756 dev_kfree_skb_any(skb
);
1757 return NETDEV_TX_OK
;
1761 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
1763 * @netdev: network interface device structure
1765 * Returns NETDEV_TX_OK if sent, else an error code
1767 netdev_tx_t
ice_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1769 struct ice_netdev_priv
*np
= netdev_priv(netdev
);
1770 struct ice_vsi
*vsi
= np
->vsi
;
1771 struct ice_ring
*tx_ring
;
1773 tx_ring
= vsi
->tx_rings
[skb
->queue_mapping
];
1775 /* hardware can't handle really short frames, hardware padding works
1778 if (skb_put_padto(skb
, ICE_MIN_TX_LEN
))
1779 return NETDEV_TX_OK
;
1781 return ice_xmit_frame_ring(skb
, tx_ring
);