1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include "ice_txrx_lib.h"
7 * ice_release_rx_desc - Store the new tail and head values
8 * @rx_ring: ring to bump
11 void ice_release_rx_desc(struct ice_ring
*rx_ring
, u16 val
)
13 u16 prev_ntu
= rx_ring
->next_to_use
& ~0x7;
15 rx_ring
->next_to_use
= val
;
17 /* update next to alloc since we have filled the ring */
18 rx_ring
->next_to_alloc
= val
;
20 /* QRX_TAIL will be updated with any tail value, but hardware ignores
21 * the lower 3 bits. This makes it so we only bump tail on meaningful
22 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
23 * the budget depending on the current traffic load.
26 if (prev_ntu
!= val
) {
27 /* Force memory writes to complete before letting h/w
28 * know there are new descriptors to fetch. (Only
29 * applicable for weak-ordered memory model archs,
33 writel(val
, rx_ring
->tail
);
38 * ice_ptype_to_htype - get a hash type
39 * @ptype: the ptype value from the descriptor
41 * Returns a hash type to be used by skb_set_hash
43 static enum pkt_hash_types
ice_ptype_to_htype(u8 __always_unused ptype
)
45 return PKT_HASH_TYPE_NONE
;
49 * ice_rx_hash - set the hash value in the skb
50 * @rx_ring: descriptor ring
51 * @rx_desc: specific descriptor
52 * @skb: pointer to current skb
53 * @rx_ptype: the ptype value from the descriptor
56 ice_rx_hash(struct ice_ring
*rx_ring
, union ice_32b_rx_flex_desc
*rx_desc
,
57 struct sk_buff
*skb
, u8 rx_ptype
)
59 struct ice_32b_rx_flex_desc_nic
*nic_mdid
;
62 if (!(rx_ring
->netdev
->features
& NETIF_F_RXHASH
))
65 if (rx_desc
->wb
.rxdid
!= ICE_RXDID_FLEX_NIC
)
68 nic_mdid
= (struct ice_32b_rx_flex_desc_nic
*)rx_desc
;
69 hash
= le32_to_cpu(nic_mdid
->rss_hash
);
70 skb_set_hash(skb
, hash
, ice_ptype_to_htype(rx_ptype
));
74 * ice_rx_csum - Indicate in skb if checksum is good
75 * @ring: the ring we care about
76 * @skb: skb currently being received and modified
77 * @rx_desc: the receive descriptor
78 * @ptype: the packet type decoded by hardware
80 * skb->protocol must be set before this function is called
83 ice_rx_csum(struct ice_ring
*ring
, struct sk_buff
*skb
,
84 union ice_32b_rx_flex_desc
*rx_desc
, u8 ptype
)
86 struct ice_rx_ptype_decoded decoded
;
87 u16 rx_status0
, rx_status1
;
90 rx_status0
= le16_to_cpu(rx_desc
->wb
.status_error0
);
91 rx_status1
= le16_to_cpu(rx_desc
->wb
.status_error1
);
93 decoded
= ice_decode_rx_desc_ptype(ptype
);
95 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
96 skb
->ip_summed
= CHECKSUM_NONE
;
97 skb_checksum_none_assert(skb
);
99 /* check if Rx checksum is enabled */
100 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
103 /* check if HW has decoded the packet and checksum */
104 if (!(rx_status0
& BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S
)))
107 if (!(decoded
.known
&& decoded
.outer_ip
))
110 ipv4
= (decoded
.outer_ip
== ICE_RX_PTYPE_OUTER_IP
) &&
111 (decoded
.outer_ip_ver
== ICE_RX_PTYPE_OUTER_IPV4
);
112 ipv6
= (decoded
.outer_ip
== ICE_RX_PTYPE_OUTER_IP
) &&
113 (decoded
.outer_ip_ver
== ICE_RX_PTYPE_OUTER_IPV6
);
115 if (ipv4
&& (rx_status0
& (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S
) |
116 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S
))))
119 if (ipv6
&& (rx_status0
& (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S
))))
122 /* check for L4 errors and handle packets that were not able to be
123 * checksummed due to arrival speed
125 if (rx_status0
& BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S
))
128 /* check for outer UDP checksum error in tunneled packets */
129 if ((rx_status1
& BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S
)) &&
130 (rx_status0
& BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S
)))
133 /* If there is an outer header present that might contain a checksum
134 * we need to bump the checksum level by 1 to reflect the fact that
135 * we are indicating we validated the inner checksum.
137 if (decoded
.tunnel_type
>= ICE_RX_PTYPE_TUNNEL_IP_GRENAT
)
140 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
141 switch (decoded
.inner_prot
) {
142 case ICE_RX_PTYPE_INNER_PROT_TCP
:
143 case ICE_RX_PTYPE_INNER_PROT_UDP
:
144 case ICE_RX_PTYPE_INNER_PROT_SCTP
:
145 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
152 ring
->vsi
->back
->hw_csum_rx_error
++;
156 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
157 * @rx_ring: Rx descriptor ring packet is being transacted on
158 * @rx_desc: pointer to the EOP Rx descriptor
159 * @skb: pointer to current skb being populated
160 * @ptype: the packet type decoded by hardware
162 * This function checks the ring, descriptor, and packet information in
163 * order to populate the hash, checksum, VLAN, protocol, and
164 * other fields within the skb.
167 ice_process_skb_fields(struct ice_ring
*rx_ring
,
168 union ice_32b_rx_flex_desc
*rx_desc
,
169 struct sk_buff
*skb
, u8 ptype
)
171 ice_rx_hash(rx_ring
, rx_desc
, skb
, ptype
);
173 /* modifies the skb - consumes the enet header */
174 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
176 ice_rx_csum(rx_ring
, skb
, rx_desc
, ptype
);
180 * ice_receive_skb - Send a completed packet up the stack
181 * @rx_ring: Rx ring in play
182 * @skb: packet to send up
183 * @vlan_tag: VLAN tag for packet
185 * This function sends the completed packet (via. skb) up the stack using
186 * gro receive functions (with/without VLAN tag)
189 ice_receive_skb(struct ice_ring
*rx_ring
, struct sk_buff
*skb
, u16 vlan_tag
)
191 if ((rx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
192 (vlan_tag
& VLAN_VID_MASK
))
193 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
194 if (napi_gro_receive(&rx_ring
->q_vector
->napi
, skb
) == GRO_DROP
) {
195 /* this is tracked separately to help us debug stack drops */
196 rx_ring
->rx_stats
.gro_dropped
++;
197 netdev_dbg(rx_ring
->netdev
, "Receive Queue %d: Dropped packet from GRO\n",
203 * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
204 * @data: packet data pointer
205 * @size: packet data size
206 * @xdp_ring: XDP ring for transmission
208 int ice_xmit_xdp_ring(void *data
, u16 size
, struct ice_ring
*xdp_ring
)
210 u16 i
= xdp_ring
->next_to_use
;
211 struct ice_tx_desc
*tx_desc
;
212 struct ice_tx_buf
*tx_buf
;
215 if (!unlikely(ICE_DESC_UNUSED(xdp_ring
))) {
216 xdp_ring
->tx_stats
.tx_busy
++;
217 return ICE_XDP_CONSUMED
;
220 dma
= dma_map_single(xdp_ring
->dev
, data
, size
, DMA_TO_DEVICE
);
221 if (dma_mapping_error(xdp_ring
->dev
, dma
))
222 return ICE_XDP_CONSUMED
;
224 tx_buf
= &xdp_ring
->tx_buf
[i
];
225 tx_buf
->bytecount
= size
;
226 tx_buf
->gso_segs
= 1;
227 tx_buf
->raw_buf
= data
;
229 /* record length, and DMA address */
230 dma_unmap_len_set(tx_buf
, len
, size
);
231 dma_unmap_addr_set(tx_buf
, dma
, dma
);
233 tx_desc
= ICE_TX_DESC(xdp_ring
, i
);
234 tx_desc
->buf_addr
= cpu_to_le64(dma
);
235 tx_desc
->cmd_type_offset_bsz
= ice_build_ctob(ICE_TXD_LAST_DESC_CMD
, 0,
238 /* Make certain all of the status bits have been updated
239 * before next_to_watch is written.
244 if (i
== xdp_ring
->count
)
247 tx_buf
->next_to_watch
= tx_desc
;
248 xdp_ring
->next_to_use
= i
;
254 * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
256 * @xdp_ring: XDP Tx ring
258 * Returns negative on failure, 0 on success.
260 int ice_xmit_xdp_buff(struct xdp_buff
*xdp
, struct ice_ring
*xdp_ring
)
262 struct xdp_frame
*xdpf
= xdp_convert_buff_to_frame(xdp
);
265 return ICE_XDP_CONSUMED
;
267 return ice_xmit_xdp_ring(xdpf
->data
, xdpf
->len
, xdp_ring
);
271 * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
273 * @xdp_res: Result of the receive batch
275 * This function bumps XDP Tx tail and/or flush redirect map, and
276 * should be called when a batch of packets has been processed in the
279 void ice_finalize_xdp_rx(struct ice_ring
*rx_ring
, unsigned int xdp_res
)
281 if (xdp_res
& ICE_XDP_REDIR
)
284 if (xdp_res
& ICE_XDP_TX
) {
285 struct ice_ring
*xdp_ring
=
286 rx_ring
->vsi
->xdp_rings
[rx_ring
->q_index
];
288 ice_xdp_ring_update_tail(xdp_ring
);