1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include "ice_txrx_lib.h"
7 * ice_release_rx_desc - Store the new tail and head values
8 * @rx_ring: ring to bump
11 void ice_release_rx_desc(struct ice_ring
*rx_ring
, u32 val
)
13 u16 prev_ntu
= rx_ring
->next_to_use
& ~0x7;
15 rx_ring
->next_to_use
= val
;
17 /* update next to alloc since we have filled the ring */
18 rx_ring
->next_to_alloc
= val
;
20 /* QRX_TAIL will be updated with any tail value, but hardware ignores
21 * the lower 3 bits. This makes it so we only bump tail on meaningful
22 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
23 * the budget depending on the current traffic load.
26 if (prev_ntu
!= val
) {
27 /* Force memory writes to complete before letting h/w
28 * know there are new descriptors to fetch. (Only
29 * applicable for weak-ordered memory model archs,
33 writel(val
, rx_ring
->tail
);
38 * ice_ptype_to_htype - get a hash type
39 * @ptype: the ptype value from the descriptor
41 * Returns a hash type to be used by skb_set_hash
43 static enum pkt_hash_types
ice_ptype_to_htype(u8 __always_unused ptype
)
45 return PKT_HASH_TYPE_NONE
;
49 * ice_rx_hash - set the hash value in the skb
50 * @rx_ring: descriptor ring
51 * @rx_desc: specific descriptor
52 * @skb: pointer to current skb
53 * @rx_ptype: the ptype value from the descriptor
56 ice_rx_hash(struct ice_ring
*rx_ring
, union ice_32b_rx_flex_desc
*rx_desc
,
57 struct sk_buff
*skb
, u8 rx_ptype
)
59 struct ice_32b_rx_flex_desc_nic
*nic_mdid
;
62 if (!(rx_ring
->netdev
->features
& NETIF_F_RXHASH
))
65 if (rx_desc
->wb
.rxdid
!= ICE_RXDID_FLEX_NIC
)
68 nic_mdid
= (struct ice_32b_rx_flex_desc_nic
*)rx_desc
;
69 hash
= le32_to_cpu(nic_mdid
->rss_hash
);
70 skb_set_hash(skb
, hash
, ice_ptype_to_htype(rx_ptype
));
74 * ice_rx_csum - Indicate in skb if checksum is good
75 * @ring: the ring we care about
76 * @skb: skb currently being received and modified
77 * @rx_desc: the receive descriptor
78 * @ptype: the packet type decoded by hardware
80 * skb->protocol must be set before this function is called
83 ice_rx_csum(struct ice_ring
*ring
, struct sk_buff
*skb
,
84 union ice_32b_rx_flex_desc
*rx_desc
, u8 ptype
)
86 struct ice_rx_ptype_decoded decoded
;
87 u32 rx_error
, rx_status
;
90 rx_status
= le16_to_cpu(rx_desc
->wb
.status_error0
);
93 decoded
= ice_decode_rx_desc_ptype(ptype
);
95 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
96 skb
->ip_summed
= CHECKSUM_NONE
;
97 skb_checksum_none_assert(skb
);
99 /* check if Rx checksum is enabled */
100 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
103 /* check if HW has decoded the packet and checksum */
104 if (!(rx_status
& BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S
)))
107 if (!(decoded
.known
&& decoded
.outer_ip
))
110 ipv4
= (decoded
.outer_ip
== ICE_RX_PTYPE_OUTER_IP
) &&
111 (decoded
.outer_ip_ver
== ICE_RX_PTYPE_OUTER_IPV4
);
112 ipv6
= (decoded
.outer_ip
== ICE_RX_PTYPE_OUTER_IP
) &&
113 (decoded
.outer_ip_ver
== ICE_RX_PTYPE_OUTER_IPV6
);
115 if (ipv4
&& (rx_error
& (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S
) |
116 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S
))))
118 else if (ipv6
&& (rx_status
&
119 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S
))))
122 /* check for L4 errors and handle packets that were not able to be
123 * checksummed due to arrival speed
125 if (rx_error
& BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S
))
128 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
129 switch (decoded
.inner_prot
) {
130 case ICE_RX_PTYPE_INNER_PROT_TCP
:
131 case ICE_RX_PTYPE_INNER_PROT_UDP
:
132 case ICE_RX_PTYPE_INNER_PROT_SCTP
:
133 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
140 ring
->vsi
->back
->hw_csum_rx_error
++;
144 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
145 * @rx_ring: Rx descriptor ring packet is being transacted on
146 * @rx_desc: pointer to the EOP Rx descriptor
147 * @skb: pointer to current skb being populated
148 * @ptype: the packet type decoded by hardware
150 * This function checks the ring, descriptor, and packet information in
151 * order to populate the hash, checksum, VLAN, protocol, and
152 * other fields within the skb.
155 ice_process_skb_fields(struct ice_ring
*rx_ring
,
156 union ice_32b_rx_flex_desc
*rx_desc
,
157 struct sk_buff
*skb
, u8 ptype
)
159 ice_rx_hash(rx_ring
, rx_desc
, skb
, ptype
);
161 /* modifies the skb - consumes the enet header */
162 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
164 ice_rx_csum(rx_ring
, skb
, rx_desc
, ptype
);
168 * ice_receive_skb - Send a completed packet up the stack
169 * @rx_ring: Rx ring in play
170 * @skb: packet to send up
171 * @vlan_tag: VLAN tag for packet
173 * This function sends the completed packet (via. skb) up the stack using
174 * gro receive functions (with/without VLAN tag)
177 ice_receive_skb(struct ice_ring
*rx_ring
, struct sk_buff
*skb
, u16 vlan_tag
)
179 if ((rx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
180 (vlan_tag
& VLAN_VID_MASK
))
181 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
182 napi_gro_receive(&rx_ring
->q_vector
->napi
, skb
);
186 * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
187 * @data: packet data pointer
188 * @size: packet data size
189 * @xdp_ring: XDP ring for transmission
191 int ice_xmit_xdp_ring(void *data
, u16 size
, struct ice_ring
*xdp_ring
)
193 u16 i
= xdp_ring
->next_to_use
;
194 struct ice_tx_desc
*tx_desc
;
195 struct ice_tx_buf
*tx_buf
;
198 if (!unlikely(ICE_DESC_UNUSED(xdp_ring
))) {
199 xdp_ring
->tx_stats
.tx_busy
++;
200 return ICE_XDP_CONSUMED
;
203 dma
= dma_map_single(xdp_ring
->dev
, data
, size
, DMA_TO_DEVICE
);
204 if (dma_mapping_error(xdp_ring
->dev
, dma
))
205 return ICE_XDP_CONSUMED
;
207 tx_buf
= &xdp_ring
->tx_buf
[i
];
208 tx_buf
->bytecount
= size
;
209 tx_buf
->gso_segs
= 1;
210 tx_buf
->raw_buf
= data
;
212 /* record length, and DMA address */
213 dma_unmap_len_set(tx_buf
, len
, size
);
214 dma_unmap_addr_set(tx_buf
, dma
, dma
);
216 tx_desc
= ICE_TX_DESC(xdp_ring
, i
);
217 tx_desc
->buf_addr
= cpu_to_le64(dma
);
218 tx_desc
->cmd_type_offset_bsz
= build_ctob(ICE_TXD_LAST_DESC_CMD
, 0,
221 /* Make certain all of the status bits have been updated
222 * before next_to_watch is written.
227 if (i
== xdp_ring
->count
)
230 tx_buf
->next_to_watch
= tx_desc
;
231 xdp_ring
->next_to_use
= i
;
237 * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
239 * @xdp_ring: XDP Tx ring
241 * Returns negative on failure, 0 on success.
243 int ice_xmit_xdp_buff(struct xdp_buff
*xdp
, struct ice_ring
*xdp_ring
)
245 struct xdp_frame
*xdpf
= convert_to_xdp_frame(xdp
);
248 return ICE_XDP_CONSUMED
;
250 return ice_xmit_xdp_ring(xdpf
->data
, xdpf
->len
, xdp_ring
);
254 * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
256 * @xdp_res: Result of the receive batch
258 * This function bumps XDP Tx tail and/or flush redirect map, and
259 * should be called when a batch of packets has been processed in the
262 void ice_finalize_xdp_rx(struct ice_ring
*rx_ring
, unsigned int xdp_res
)
264 if (xdp_res
& ICE_XDP_REDIR
)
267 if (xdp_res
& ICE_XDP_TX
) {
268 struct ice_ring
*xdp_ring
=
269 rx_ring
->vsi
->xdp_rings
[rx_ring
->q_index
];
271 ice_xdp_ring_update_tail(xdp_ring
);