1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include "ice_common.h"
7 /* Describe properties of a protocol header field */
8 struct ice_flow_field_info
{
9 enum ice_flow_seg_hdr hdr
;
10 s16 off
; /* Offset from start of a protocol header, in bits */
11 u16 size
; /* Size of fields in bits */
14 #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
16 .off = (_offset_bytes) * BITS_PER_BYTE, \
17 .size = (_size_bytes) * BITS_PER_BYTE, \
20 /* Table containing properties of supported protocol header fields */
22 struct ice_flow_field_info ice_flds_info
[ICE_FLOW_FIELD_IDX_MAX
] = {
24 /* ICE_FLOW_FIELD_IDX_IPV4_SA */
25 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4
, 12, sizeof(struct in_addr
)),
26 /* ICE_FLOW_FIELD_IDX_IPV4_DA */
27 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4
, 16, sizeof(struct in_addr
)),
28 /* ICE_FLOW_FIELD_IDX_IPV6_SA */
29 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6
, 8, sizeof(struct in6_addr
)),
30 /* ICE_FLOW_FIELD_IDX_IPV6_DA */
31 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6
, 24, sizeof(struct in6_addr
)),
33 /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
34 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP
, 0, sizeof(__be16
)),
35 /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
36 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP
, 2, sizeof(__be16
)),
37 /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
38 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP
, 0, sizeof(__be16
)),
39 /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
40 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP
, 2, sizeof(__be16
)),
41 /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
42 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP
, 0, sizeof(__be16
)),
43 /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
44 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP
, 2, sizeof(__be16
)),
46 /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
47 ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE
, 12,
48 sizeof_field(struct gre_full_hdr
, key
)),
51 /* Bitmaps indicating relevant packet types for a particular protocol header
53 * Packet types for packets with an Outer/First/Single IPv4 header
55 static const u32 ice_ptypes_ipv4_ofos
[] = {
56 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
57 0x00000000, 0x00000000, 0x00000000, 0x00000000,
58 0x00000000, 0x00000000, 0x00000000, 0x00000000,
59 0x00000000, 0x00000000, 0x00000000, 0x00000000,
60 0x00000000, 0x00000000, 0x00000000, 0x00000000,
61 0x00000000, 0x00000000, 0x00000000, 0x00000000,
62 0x00000000, 0x00000000, 0x00000000, 0x00000000,
63 0x00000000, 0x00000000, 0x00000000, 0x00000000,
66 /* Packet types for packets with an Innermost/Last IPv4 header */
67 static const u32 ice_ptypes_ipv4_il
[] = {
68 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
69 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
70 0x00000000, 0x00000000, 0x00000000, 0x00000000,
71 0x00000000, 0x00000000, 0x00000000, 0x00000000,
72 0x00000000, 0x00000000, 0x00000000, 0x00000000,
73 0x00000000, 0x00000000, 0x00000000, 0x00000000,
74 0x00000000, 0x00000000, 0x00000000, 0x00000000,
75 0x00000000, 0x00000000, 0x00000000, 0x00000000,
78 /* Packet types for packets with an Outer/First/Single IPv6 header */
79 static const u32 ice_ptypes_ipv6_ofos
[] = {
80 0x00000000, 0x00000000, 0x77000000, 0x10002000,
81 0x00000000, 0x00000000, 0x00000000, 0x00000000,
82 0x00000000, 0x00000000, 0x00000000, 0x00000000,
83 0x00000000, 0x00000000, 0x00000000, 0x00000000,
84 0x00000000, 0x00000000, 0x00000000, 0x00000000,
85 0x00000000, 0x00000000, 0x00000000, 0x00000000,
86 0x00000000, 0x00000000, 0x00000000, 0x00000000,
87 0x00000000, 0x00000000, 0x00000000, 0x00000000,
90 /* Packet types for packets with an Innermost/Last IPv6 header */
91 static const u32 ice_ptypes_ipv6_il
[] = {
92 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
93 0x00000770, 0x00000000, 0x00000000, 0x00000000,
94 0x00000000, 0x00000000, 0x00000000, 0x00000000,
95 0x00000000, 0x00000000, 0x00000000, 0x00000000,
96 0x00000000, 0x00000000, 0x00000000, 0x00000000,
97 0x00000000, 0x00000000, 0x00000000, 0x00000000,
98 0x00000000, 0x00000000, 0x00000000, 0x00000000,
99 0x00000000, 0x00000000, 0x00000000, 0x00000000,
102 /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
103 static const u32 ice_ipv4_ofos_no_l4
[] = {
104 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
105 0x00000000, 0x00000000, 0x00000000, 0x00000000,
106 0x00000000, 0x00000000, 0x00000000, 0x00000000,
107 0x00000000, 0x00000000, 0x00000000, 0x00000000,
108 0x00000000, 0x00000000, 0x00000000, 0x00000000,
109 0x00000000, 0x00000000, 0x00000000, 0x00000000,
110 0x00000000, 0x00000000, 0x00000000, 0x00000000,
111 0x00000000, 0x00000000, 0x00000000, 0x00000000,
114 /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
115 static const u32 ice_ipv4_il_no_l4
[] = {
116 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
117 0x00000008, 0x00000000, 0x00000000, 0x00000000,
118 0x00000000, 0x00000000, 0x00000000, 0x00000000,
119 0x00000000, 0x00000000, 0x00000000, 0x00000000,
120 0x00000000, 0x00000000, 0x00000000, 0x00000000,
121 0x00000000, 0x00000000, 0x00000000, 0x00000000,
122 0x00000000, 0x00000000, 0x00000000, 0x00000000,
123 0x00000000, 0x00000000, 0x00000000, 0x00000000,
126 /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
127 static const u32 ice_ipv6_ofos_no_l4
[] = {
128 0x00000000, 0x00000000, 0x43000000, 0x10002000,
129 0x00000000, 0x00000000, 0x00000000, 0x00000000,
130 0x00000000, 0x00000000, 0x00000000, 0x00000000,
131 0x00000000, 0x00000000, 0x00000000, 0x00000000,
132 0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 0x00000000, 0x00000000, 0x00000000, 0x00000000,
134 0x00000000, 0x00000000, 0x00000000, 0x00000000,
135 0x00000000, 0x00000000, 0x00000000, 0x00000000,
138 /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
139 static const u32 ice_ipv6_il_no_l4
[] = {
140 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
141 0x00000430, 0x00000000, 0x00000000, 0x00000000,
142 0x00000000, 0x00000000, 0x00000000, 0x00000000,
143 0x00000000, 0x00000000, 0x00000000, 0x00000000,
144 0x00000000, 0x00000000, 0x00000000, 0x00000000,
145 0x00000000, 0x00000000, 0x00000000, 0x00000000,
146 0x00000000, 0x00000000, 0x00000000, 0x00000000,
147 0x00000000, 0x00000000, 0x00000000, 0x00000000,
150 /* UDP Packet types for non-tunneled packets or tunneled
151 * packets with inner UDP.
153 static const u32 ice_ptypes_udp_il
[] = {
154 0x81000000, 0x20204040, 0x04000010, 0x80810102,
155 0x00000040, 0x00000000, 0x00000000, 0x00000000,
156 0x00000000, 0x00000000, 0x00000000, 0x00000000,
157 0x00000000, 0x00000000, 0x00000000, 0x00000000,
158 0x00000000, 0x00000000, 0x00000000, 0x00000000,
159 0x00000000, 0x00000000, 0x00000000, 0x00000000,
160 0x00000000, 0x00000000, 0x00000000, 0x00000000,
161 0x00000000, 0x00000000, 0x00000000, 0x00000000,
164 /* Packet types for packets with an Innermost/Last TCP header */
165 static const u32 ice_ptypes_tcp_il
[] = {
166 0x04000000, 0x80810102, 0x10000040, 0x02040408,
167 0x00000102, 0x00000000, 0x00000000, 0x00000000,
168 0x00000000, 0x00000000, 0x00000000, 0x00000000,
169 0x00000000, 0x00000000, 0x00000000, 0x00000000,
170 0x00000000, 0x00000000, 0x00000000, 0x00000000,
171 0x00000000, 0x00000000, 0x00000000, 0x00000000,
172 0x00000000, 0x00000000, 0x00000000, 0x00000000,
173 0x00000000, 0x00000000, 0x00000000, 0x00000000,
176 /* Packet types for packets with an Innermost/Last SCTP header */
177 static const u32 ice_ptypes_sctp_il
[] = {
178 0x08000000, 0x01020204, 0x20000081, 0x04080810,
179 0x00000204, 0x00000000, 0x00000000, 0x00000000,
180 0x00000000, 0x00000000, 0x00000000, 0x00000000,
181 0x00000000, 0x00000000, 0x00000000, 0x00000000,
182 0x00000000, 0x00000000, 0x00000000, 0x00000000,
183 0x00000000, 0x00000000, 0x00000000, 0x00000000,
184 0x00000000, 0x00000000, 0x00000000, 0x00000000,
185 0x00000000, 0x00000000, 0x00000000, 0x00000000,
188 /* Packet types for packets with an Outermost/First GRE header */
189 static const u32 ice_ptypes_gre_of
[] = {
190 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
191 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
192 0x00000000, 0x00000000, 0x00000000, 0x00000000,
193 0x00000000, 0x00000000, 0x00000000, 0x00000000,
194 0x00000000, 0x00000000, 0x00000000, 0x00000000,
195 0x00000000, 0x00000000, 0x00000000, 0x00000000,
196 0x00000000, 0x00000000, 0x00000000, 0x00000000,
197 0x00000000, 0x00000000, 0x00000000, 0x00000000,
200 /* Manage parameters and info. used during the creation of a flow profile */
201 struct ice_flow_prof_params
{
203 u16 entry_length
; /* # of bytes formatted entry will require */
205 struct ice_flow_prof
*prof
;
207 /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
208 * This will give us the direction flags.
210 struct ice_fv_word es
[ICE_MAX_FV_WORDS
];
211 DECLARE_BITMAP(ptypes
, ICE_FLOW_PTYPE_MAX
);
214 #define ICE_FLOW_SEG_HDRS_L3_MASK \
215 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
216 #define ICE_FLOW_SEG_HDRS_L4_MASK \
217 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
220 * ice_flow_val_hdrs - validates packet segments for valid protocol headers
221 * @segs: array of one or more packet segments that describe the flow
222 * @segs_cnt: number of packet segments provided
224 static enum ice_status
225 ice_flow_val_hdrs(struct ice_flow_seg_info
*segs
, u8 segs_cnt
)
229 for (i
= 0; i
< segs_cnt
; i
++) {
230 /* Multiple L3 headers */
231 if (segs
[i
].hdrs
& ICE_FLOW_SEG_HDRS_L3_MASK
&&
232 !is_power_of_2(segs
[i
].hdrs
& ICE_FLOW_SEG_HDRS_L3_MASK
))
233 return ICE_ERR_PARAM
;
235 /* Multiple L4 headers */
236 if (segs
[i
].hdrs
& ICE_FLOW_SEG_HDRS_L4_MASK
&&
237 !is_power_of_2(segs
[i
].hdrs
& ICE_FLOW_SEG_HDRS_L4_MASK
))
238 return ICE_ERR_PARAM
;
244 /* Sizes of fixed known protocol headers without header options */
245 #define ICE_FLOW_PROT_HDR_SZ_MAC 14
246 #define ICE_FLOW_PROT_HDR_SZ_IPV4 20
247 #define ICE_FLOW_PROT_HDR_SZ_IPV6 40
248 #define ICE_FLOW_PROT_HDR_SZ_TCP 20
249 #define ICE_FLOW_PROT_HDR_SZ_UDP 8
250 #define ICE_FLOW_PROT_HDR_SZ_SCTP 12
253 * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
254 * @params: information about the flow to be processed
255 * @seg: index of packet segment whose header size is to be determined
257 static u16
ice_flow_calc_seg_sz(struct ice_flow_prof_params
*params
, u8 seg
)
259 u16 sz
= ICE_FLOW_PROT_HDR_SZ_MAC
;
262 if (params
->prof
->segs
[seg
].hdrs
& ICE_FLOW_SEG_HDR_IPV4
)
263 sz
+= ICE_FLOW_PROT_HDR_SZ_IPV4
;
264 else if (params
->prof
->segs
[seg
].hdrs
& ICE_FLOW_SEG_HDR_IPV6
)
265 sz
+= ICE_FLOW_PROT_HDR_SZ_IPV6
;
268 if (params
->prof
->segs
[seg
].hdrs
& ICE_FLOW_SEG_HDR_TCP
)
269 sz
+= ICE_FLOW_PROT_HDR_SZ_TCP
;
270 else if (params
->prof
->segs
[seg
].hdrs
& ICE_FLOW_SEG_HDR_UDP
)
271 sz
+= ICE_FLOW_PROT_HDR_SZ_UDP
;
272 else if (params
->prof
->segs
[seg
].hdrs
& ICE_FLOW_SEG_HDR_SCTP
)
273 sz
+= ICE_FLOW_PROT_HDR_SZ_SCTP
;
279 * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
280 * @params: information about the flow to be processed
282 * This function identifies the packet types associated with the protocol
283 * headers being present in packet segments of the specified flow profile.
285 static enum ice_status
286 ice_flow_proc_seg_hdrs(struct ice_flow_prof_params
*params
)
288 struct ice_flow_prof
*prof
;
291 memset(params
->ptypes
, 0xff, sizeof(params
->ptypes
));
295 for (i
= 0; i
< params
->prof
->segs_cnt
; i
++) {
296 const unsigned long *src
;
299 hdrs
= prof
->segs
[i
].hdrs
;
301 if ((hdrs
& ICE_FLOW_SEG_HDR_IPV4
) &&
302 !(hdrs
& ICE_FLOW_SEG_HDRS_L4_MASK
)) {
303 src
= !i
? (const unsigned long *)ice_ipv4_ofos_no_l4
:
304 (const unsigned long *)ice_ipv4_il_no_l4
;
305 bitmap_and(params
->ptypes
, params
->ptypes
, src
,
307 } else if (hdrs
& ICE_FLOW_SEG_HDR_IPV4
) {
308 src
= !i
? (const unsigned long *)ice_ptypes_ipv4_ofos
:
309 (const unsigned long *)ice_ptypes_ipv4_il
;
310 bitmap_and(params
->ptypes
, params
->ptypes
, src
,
312 } else if ((hdrs
& ICE_FLOW_SEG_HDR_IPV6
) &&
313 !(hdrs
& ICE_FLOW_SEG_HDRS_L4_MASK
)) {
314 src
= !i
? (const unsigned long *)ice_ipv6_ofos_no_l4
:
315 (const unsigned long *)ice_ipv6_il_no_l4
;
316 bitmap_and(params
->ptypes
, params
->ptypes
, src
,
318 } else if (hdrs
& ICE_FLOW_SEG_HDR_IPV6
) {
319 src
= !i
? (const unsigned long *)ice_ptypes_ipv6_ofos
:
320 (const unsigned long *)ice_ptypes_ipv6_il
;
321 bitmap_and(params
->ptypes
, params
->ptypes
, src
,
325 if (hdrs
& ICE_FLOW_SEG_HDR_UDP
) {
326 src
= (const unsigned long *)ice_ptypes_udp_il
;
327 bitmap_and(params
->ptypes
, params
->ptypes
, src
,
329 } else if (hdrs
& ICE_FLOW_SEG_HDR_TCP
) {
330 bitmap_and(params
->ptypes
, params
->ptypes
,
331 (const unsigned long *)ice_ptypes_tcp_il
,
333 } else if (hdrs
& ICE_FLOW_SEG_HDR_SCTP
) {
334 src
= (const unsigned long *)ice_ptypes_sctp_il
;
335 bitmap_and(params
->ptypes
, params
->ptypes
, src
,
337 } else if (hdrs
& ICE_FLOW_SEG_HDR_GRE
) {
339 src
= (const unsigned long *)ice_ptypes_gre_of
;
340 bitmap_and(params
->ptypes
, params
->ptypes
,
341 src
, ICE_FLOW_PTYPE_MAX
);
350 * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
351 * @hw: pointer to the HW struct
352 * @params: information about the flow to be processed
353 * @seg: packet segment index of the field to be extracted
354 * @fld: ID of field to be extracted
356 * This function determines the protocol ID, offset, and size of the given
357 * field. It then allocates one or more extraction sequence entries for the
358 * given field, and fill the entries with protocol ID and offset information.
360 static enum ice_status
361 ice_flow_xtract_fld(struct ice_hw
*hw
, struct ice_flow_prof_params
*params
,
362 u8 seg
, enum ice_flow_field fld
)
364 enum ice_prot_id prot_id
= ICE_PROT_ID_INVAL
;
365 u8 fv_words
= hw
->blk
[params
->blk
].es
.fvw
;
366 struct ice_flow_fld_info
*flds
;
367 u16 cnt
, ese_bits
, i
;
370 flds
= params
->prof
->segs
[seg
].fields
;
373 case ICE_FLOW_FIELD_IDX_IPV4_SA
:
374 case ICE_FLOW_FIELD_IDX_IPV4_DA
:
375 prot_id
= seg
== 0 ? ICE_PROT_IPV4_OF_OR_S
: ICE_PROT_IPV4_IL
;
377 case ICE_FLOW_FIELD_IDX_IPV6_SA
:
378 case ICE_FLOW_FIELD_IDX_IPV6_DA
:
379 prot_id
= seg
== 0 ? ICE_PROT_IPV6_OF_OR_S
: ICE_PROT_IPV6_IL
;
381 case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT
:
382 case ICE_FLOW_FIELD_IDX_TCP_DST_PORT
:
383 prot_id
= ICE_PROT_TCP_IL
;
385 case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT
:
386 case ICE_FLOW_FIELD_IDX_UDP_DST_PORT
:
387 prot_id
= ICE_PROT_UDP_IL_OR_S
;
389 case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT
:
390 case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT
:
391 prot_id
= ICE_PROT_SCTP_IL
;
393 case ICE_FLOW_FIELD_IDX_GRE_KEYID
:
394 prot_id
= ICE_PROT_GRE_OF
;
397 return ICE_ERR_NOT_IMPL
;
400 /* Each extraction sequence entry is a word in size, and extracts a
401 * word-aligned offset from a protocol header.
403 ese_bits
= ICE_FLOW_FV_EXTRACT_SZ
* BITS_PER_BYTE
;
405 flds
[fld
].xtrct
.prot_id
= prot_id
;
406 flds
[fld
].xtrct
.off
= (ice_flds_info
[fld
].off
/ ese_bits
) *
407 ICE_FLOW_FV_EXTRACT_SZ
;
408 flds
[fld
].xtrct
.disp
= (u8
)(ice_flds_info
[fld
].off
% ese_bits
);
409 flds
[fld
].xtrct
.idx
= params
->es_cnt
;
411 /* Adjust the next field-entry index after accommodating the number of
412 * entries this field consumes
414 cnt
= DIV_ROUND_UP(flds
[fld
].xtrct
.disp
+ ice_flds_info
[fld
].size
,
417 /* Fill in the extraction sequence entries needed for this field */
418 off
= flds
[fld
].xtrct
.off
;
419 for (i
= 0; i
< cnt
; i
++) {
422 /* Make sure the number of extraction sequence required
423 * does not exceed the block's capability
425 if (params
->es_cnt
>= fv_words
)
426 return ICE_ERR_MAX_LIMIT
;
428 /* some blocks require a reversed field vector layout */
429 if (hw
->blk
[params
->blk
].es
.reverse
)
430 idx
= fv_words
- params
->es_cnt
- 1;
432 idx
= params
->es_cnt
;
434 params
->es
[idx
].prot_id
= prot_id
;
435 params
->es
[idx
].off
= off
;
438 off
+= ICE_FLOW_FV_EXTRACT_SZ
;
445 * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
446 * @hw: pointer to the HW struct
447 * @params: information about the flow to be processed
448 * @seg: index of packet segment whose raw fields are to be extracted
450 static enum ice_status
451 ice_flow_xtract_raws(struct ice_hw
*hw
, struct ice_flow_prof_params
*params
,
458 if (!params
->prof
->segs
[seg
].raws_cnt
)
461 if (params
->prof
->segs
[seg
].raws_cnt
>
462 ARRAY_SIZE(params
->prof
->segs
[seg
].raws
))
463 return ICE_ERR_MAX_LIMIT
;
465 /* Offsets within the segment headers are not supported */
466 hdrs_sz
= ice_flow_calc_seg_sz(params
, seg
);
468 return ICE_ERR_PARAM
;
470 fv_words
= hw
->blk
[params
->blk
].es
.fvw
;
472 for (i
= 0; i
< params
->prof
->segs
[seg
].raws_cnt
; i
++) {
473 struct ice_flow_seg_fld_raw
*raw
;
476 raw
= ¶ms
->prof
->segs
[seg
].raws
[i
];
478 /* Storing extraction information */
479 raw
->info
.xtrct
.prot_id
= ICE_PROT_MAC_OF_OR_S
;
480 raw
->info
.xtrct
.off
= (raw
->off
/ ICE_FLOW_FV_EXTRACT_SZ
) *
481 ICE_FLOW_FV_EXTRACT_SZ
;
482 raw
->info
.xtrct
.disp
= (raw
->off
% ICE_FLOW_FV_EXTRACT_SZ
) *
484 raw
->info
.xtrct
.idx
= params
->es_cnt
;
486 /* Determine the number of field vector entries this raw field
489 cnt
= DIV_ROUND_UP(raw
->info
.xtrct
.disp
+
490 (raw
->info
.src
.last
* BITS_PER_BYTE
),
491 (ICE_FLOW_FV_EXTRACT_SZ
* BITS_PER_BYTE
));
492 off
= raw
->info
.xtrct
.off
;
493 for (j
= 0; j
< cnt
; j
++) {
496 /* Make sure the number of extraction sequence required
497 * does not exceed the block's capability
499 if (params
->es_cnt
>= hw
->blk
[params
->blk
].es
.count
||
500 params
->es_cnt
>= ICE_MAX_FV_WORDS
)
501 return ICE_ERR_MAX_LIMIT
;
503 /* some blocks require a reversed field vector layout */
504 if (hw
->blk
[params
->blk
].es
.reverse
)
505 idx
= fv_words
- params
->es_cnt
- 1;
507 idx
= params
->es_cnt
;
509 params
->es
[idx
].prot_id
= raw
->info
.xtrct
.prot_id
;
510 params
->es
[idx
].off
= off
;
512 off
+= ICE_FLOW_FV_EXTRACT_SZ
;
520 * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
521 * @hw: pointer to the HW struct
522 * @params: information about the flow to be processed
524 * This function iterates through all matched fields in the given segments, and
525 * creates an extraction sequence for the fields.
527 static enum ice_status
528 ice_flow_create_xtrct_seq(struct ice_hw
*hw
,
529 struct ice_flow_prof_params
*params
)
531 struct ice_flow_prof
*prof
= params
->prof
;
532 enum ice_status status
= 0;
535 for (i
= 0; i
< prof
->segs_cnt
; i
++) {
538 for_each_set_bit(j
, (unsigned long *)&prof
->segs
[i
].match
,
539 ICE_FLOW_FIELD_IDX_MAX
) {
540 status
= ice_flow_xtract_fld(hw
, params
, i
,
541 (enum ice_flow_field
)j
);
546 /* Process raw matching bytes */
547 status
= ice_flow_xtract_raws(hw
, params
, i
);
556 * ice_flow_proc_segs - process all packet segments associated with a profile
557 * @hw: pointer to the HW struct
558 * @params: information about the flow to be processed
560 static enum ice_status
561 ice_flow_proc_segs(struct ice_hw
*hw
, struct ice_flow_prof_params
*params
)
563 enum ice_status status
;
565 status
= ice_flow_proc_seg_hdrs(params
);
569 status
= ice_flow_create_xtrct_seq(hw
, params
);
573 switch (params
->blk
) {
579 return ICE_ERR_NOT_IMPL
;
585 #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
586 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
587 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
590 * ice_flow_find_prof_conds - Find a profile matching headers and conditions
591 * @hw: pointer to the HW struct
592 * @blk: classification stage
593 * @dir: flow direction
594 * @segs: array of one or more packet segments that describe the flow
595 * @segs_cnt: number of packet segments provided
596 * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
597 * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
599 static struct ice_flow_prof
*
600 ice_flow_find_prof_conds(struct ice_hw
*hw
, enum ice_block blk
,
601 enum ice_flow_dir dir
, struct ice_flow_seg_info
*segs
,
602 u8 segs_cnt
, u16 vsi_handle
, u32 conds
)
604 struct ice_flow_prof
*p
, *prof
= NULL
;
606 mutex_lock(&hw
->fl_profs_locks
[blk
]);
607 list_for_each_entry(p
, &hw
->fl_profs
[blk
], l_entry
)
608 if ((p
->dir
== dir
|| conds
& ICE_FLOW_FIND_PROF_NOT_CHK_DIR
) &&
609 segs_cnt
&& segs_cnt
== p
->segs_cnt
) {
612 /* Check for profile-VSI association if specified */
613 if ((conds
& ICE_FLOW_FIND_PROF_CHK_VSI
) &&
614 ice_is_vsi_valid(hw
, vsi_handle
) &&
615 !test_bit(vsi_handle
, p
->vsis
))
618 /* Protocol headers must be checked. Matched fields are
619 * checked if specified.
621 for (i
= 0; i
< segs_cnt
; i
++)
622 if (segs
[i
].hdrs
!= p
->segs
[i
].hdrs
||
623 ((conds
& ICE_FLOW_FIND_PROF_CHK_FLDS
) &&
624 segs
[i
].match
!= p
->segs
[i
].match
))
627 /* A match is found if all segments are matched */
633 mutex_unlock(&hw
->fl_profs_locks
[blk
]);
639 * ice_flow_find_prof_id - Look up a profile with given profile ID
640 * @hw: pointer to the HW struct
641 * @blk: classification stage
642 * @prof_id: unique ID to identify this flow profile
644 static struct ice_flow_prof
*
645 ice_flow_find_prof_id(struct ice_hw
*hw
, enum ice_block blk
, u64 prof_id
)
647 struct ice_flow_prof
*p
;
649 list_for_each_entry(p
, &hw
->fl_profs
[blk
], l_entry
)
650 if (p
->id
== prof_id
)
657 * ice_dealloc_flow_entry - Deallocate flow entry memory
658 * @hw: pointer to the HW struct
659 * @entry: flow entry to be removed
662 ice_dealloc_flow_entry(struct ice_hw
*hw
, struct ice_flow_entry
*entry
)
668 devm_kfree(ice_hw_to_dev(hw
), entry
->entry
);
670 devm_kfree(ice_hw_to_dev(hw
), entry
);
674 * ice_flow_rem_entry_sync - Remove a flow entry
675 * @hw: pointer to the HW struct
676 * @blk: classification stage
677 * @entry: flow entry to be removed
679 static enum ice_status
680 ice_flow_rem_entry_sync(struct ice_hw
*hw
, enum ice_block __always_unused blk
,
681 struct ice_flow_entry
*entry
)
684 return ICE_ERR_BAD_PTR
;
686 list_del(&entry
->l_entry
);
688 ice_dealloc_flow_entry(hw
, entry
);
694 * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
695 * @hw: pointer to the HW struct
696 * @blk: classification stage
697 * @dir: flow direction
698 * @prof_id: unique ID to identify this flow profile
699 * @segs: array of one or more packet segments that describe the flow
700 * @segs_cnt: number of packet segments provided
701 * @prof: stores the returned flow profile added
703 * Assumption: the caller has acquired the lock to the profile list
705 static enum ice_status
706 ice_flow_add_prof_sync(struct ice_hw
*hw
, enum ice_block blk
,
707 enum ice_flow_dir dir
, u64 prof_id
,
708 struct ice_flow_seg_info
*segs
, u8 segs_cnt
,
709 struct ice_flow_prof
**prof
)
711 struct ice_flow_prof_params
*params
;
712 enum ice_status status
;
716 return ICE_ERR_BAD_PTR
;
718 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
720 return ICE_ERR_NO_MEMORY
;
722 params
->prof
= devm_kzalloc(ice_hw_to_dev(hw
), sizeof(*params
->prof
),
725 status
= ICE_ERR_NO_MEMORY
;
729 /* initialize extraction sequence to all invalid (0xff) */
730 for (i
= 0; i
< ICE_MAX_FV_WORDS
; i
++) {
731 params
->es
[i
].prot_id
= ICE_PROT_INVALID
;
732 params
->es
[i
].off
= ICE_FV_OFFSET_INVAL
;
736 params
->prof
->id
= prof_id
;
737 params
->prof
->dir
= dir
;
738 params
->prof
->segs_cnt
= segs_cnt
;
740 /* Make a copy of the segments that need to be persistent in the flow
743 for (i
= 0; i
< segs_cnt
; i
++)
744 memcpy(¶ms
->prof
->segs
[i
], &segs
[i
], sizeof(*segs
));
746 status
= ice_flow_proc_segs(hw
, params
);
748 ice_debug(hw
, ICE_DBG_FLOW
, "Error processing a flow's packet segments\n");
752 /* Add a HW profile for this flow profile */
753 status
= ice_add_prof(hw
, blk
, prof_id
, (u8
*)params
->ptypes
,
756 ice_debug(hw
, ICE_DBG_FLOW
, "Error adding a HW flow profile\n");
760 INIT_LIST_HEAD(¶ms
->prof
->entries
);
761 mutex_init(¶ms
->prof
->entries_lock
);
762 *prof
= params
->prof
;
766 devm_kfree(ice_hw_to_dev(hw
), params
->prof
);
774 * ice_flow_rem_prof_sync - remove a flow profile
775 * @hw: pointer to the hardware structure
776 * @blk: classification stage
777 * @prof: pointer to flow profile to remove
779 * Assumption: the caller has acquired the lock to the profile list
781 static enum ice_status
782 ice_flow_rem_prof_sync(struct ice_hw
*hw
, enum ice_block blk
,
783 struct ice_flow_prof
*prof
)
785 enum ice_status status
;
787 /* Remove all remaining flow entries before removing the flow profile */
788 if (!list_empty(&prof
->entries
)) {
789 struct ice_flow_entry
*e
, *t
;
791 mutex_lock(&prof
->entries_lock
);
793 list_for_each_entry_safe(e
, t
, &prof
->entries
, l_entry
) {
794 status
= ice_flow_rem_entry_sync(hw
, blk
, e
);
799 mutex_unlock(&prof
->entries_lock
);
802 /* Remove all hardware profiles associated with this flow profile */
803 status
= ice_rem_prof(hw
, blk
, prof
->id
);
805 list_del(&prof
->l_entry
);
806 mutex_destroy(&prof
->entries_lock
);
807 devm_kfree(ice_hw_to_dev(hw
), prof
);
814 * ice_flow_assoc_prof - associate a VSI with a flow profile
815 * @hw: pointer to the hardware structure
816 * @blk: classification stage
817 * @prof: pointer to flow profile
818 * @vsi_handle: software VSI handle
820 * Assumption: the caller has acquired the lock to the profile list
821 * and the software VSI handle has been validated
823 static enum ice_status
824 ice_flow_assoc_prof(struct ice_hw
*hw
, enum ice_block blk
,
825 struct ice_flow_prof
*prof
, u16 vsi_handle
)
827 enum ice_status status
= 0;
829 if (!test_bit(vsi_handle
, prof
->vsis
)) {
830 status
= ice_add_prof_id_flow(hw
, blk
,
831 ice_get_hw_vsi_num(hw
,
835 set_bit(vsi_handle
, prof
->vsis
);
837 ice_debug(hw
, ICE_DBG_FLOW
, "HW profile add failed, %d\n",
845 * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
846 * @hw: pointer to the hardware structure
847 * @blk: classification stage
848 * @prof: pointer to flow profile
849 * @vsi_handle: software VSI handle
851 * Assumption: the caller has acquired the lock to the profile list
852 * and the software VSI handle has been validated
854 static enum ice_status
855 ice_flow_disassoc_prof(struct ice_hw
*hw
, enum ice_block blk
,
856 struct ice_flow_prof
*prof
, u16 vsi_handle
)
858 enum ice_status status
= 0;
860 if (test_bit(vsi_handle
, prof
->vsis
)) {
861 status
= ice_rem_prof_id_flow(hw
, blk
,
862 ice_get_hw_vsi_num(hw
,
866 clear_bit(vsi_handle
, prof
->vsis
);
868 ice_debug(hw
, ICE_DBG_FLOW
, "HW profile remove failed, %d\n",
876 * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
877 * @hw: pointer to the HW struct
878 * @blk: classification stage
879 * @dir: flow direction
880 * @prof_id: unique ID to identify this flow profile
881 * @segs: array of one or more packet segments that describe the flow
882 * @segs_cnt: number of packet segments provided
883 * @prof: stores the returned flow profile added
886 ice_flow_add_prof(struct ice_hw
*hw
, enum ice_block blk
, enum ice_flow_dir dir
,
887 u64 prof_id
, struct ice_flow_seg_info
*segs
, u8 segs_cnt
,
888 struct ice_flow_prof
**prof
)
890 enum ice_status status
;
892 if (segs_cnt
> ICE_FLOW_SEG_MAX
)
893 return ICE_ERR_MAX_LIMIT
;
896 return ICE_ERR_PARAM
;
899 return ICE_ERR_BAD_PTR
;
901 status
= ice_flow_val_hdrs(segs
, segs_cnt
);
905 mutex_lock(&hw
->fl_profs_locks
[blk
]);
907 status
= ice_flow_add_prof_sync(hw
, blk
, dir
, prof_id
, segs
, segs_cnt
,
910 list_add(&(*prof
)->l_entry
, &hw
->fl_profs
[blk
]);
912 mutex_unlock(&hw
->fl_profs_locks
[blk
]);
918 * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
919 * @hw: pointer to the HW struct
920 * @blk: the block for which the flow profile is to be removed
921 * @prof_id: unique ID of the flow profile to be removed
924 ice_flow_rem_prof(struct ice_hw
*hw
, enum ice_block blk
, u64 prof_id
)
926 struct ice_flow_prof
*prof
;
927 enum ice_status status
;
929 mutex_lock(&hw
->fl_profs_locks
[blk
]);
931 prof
= ice_flow_find_prof_id(hw
, blk
, prof_id
);
933 status
= ICE_ERR_DOES_NOT_EXIST
;
937 /* prof becomes invalid after the call */
938 status
= ice_flow_rem_prof_sync(hw
, blk
, prof
);
941 mutex_unlock(&hw
->fl_profs_locks
[blk
]);
947 * ice_flow_add_entry - Add a flow entry
948 * @hw: pointer to the HW struct
949 * @blk: classification stage
950 * @prof_id: ID of the profile to add a new flow entry to
951 * @entry_id: unique ID to identify this flow entry
952 * @vsi_handle: software VSI handle for the flow entry
953 * @prio: priority of the flow entry
954 * @data: pointer to a data buffer containing flow entry's match values/masks
955 * @entry_h: pointer to buffer that receives the new flow entry's handle
958 ice_flow_add_entry(struct ice_hw
*hw
, enum ice_block blk
, u64 prof_id
,
959 u64 entry_id
, u16 vsi_handle
, enum ice_flow_priority prio
,
960 void *data
, u64
*entry_h
)
962 struct ice_flow_entry
*e
= NULL
;
963 struct ice_flow_prof
*prof
;
964 enum ice_status status
;
966 /* No flow entry data is expected for RSS */
967 if (!entry_h
|| (!data
&& blk
!= ICE_BLK_RSS
))
968 return ICE_ERR_BAD_PTR
;
970 if (!ice_is_vsi_valid(hw
, vsi_handle
))
971 return ICE_ERR_PARAM
;
973 mutex_lock(&hw
->fl_profs_locks
[blk
]);
975 prof
= ice_flow_find_prof_id(hw
, blk
, prof_id
);
977 status
= ICE_ERR_DOES_NOT_EXIST
;
979 /* Allocate memory for the entry being added and associate
980 * the VSI to the found flow profile
982 e
= devm_kzalloc(ice_hw_to_dev(hw
), sizeof(*e
), GFP_KERNEL
);
984 status
= ICE_ERR_NO_MEMORY
;
986 status
= ice_flow_assoc_prof(hw
, blk
, prof
, vsi_handle
);
989 mutex_unlock(&hw
->fl_profs_locks
[blk
]);
994 e
->vsi_handle
= vsi_handle
;
1003 status
= ICE_ERR_NOT_IMPL
;
1007 mutex_lock(&prof
->entries_lock
);
1008 list_add(&e
->l_entry
, &prof
->entries
);
1009 mutex_unlock(&prof
->entries_lock
);
1011 *entry_h
= ICE_FLOW_ENTRY_HNDL(e
);
1016 devm_kfree(ice_hw_to_dev(hw
), e
->entry
);
1017 devm_kfree(ice_hw_to_dev(hw
), e
);
1024 * ice_flow_rem_entry - Remove a flow entry
1025 * @hw: pointer to the HW struct
1026 * @blk: classification stage
1027 * @entry_h: handle to the flow entry to be removed
1029 enum ice_status
ice_flow_rem_entry(struct ice_hw
*hw
, enum ice_block blk
,
1032 struct ice_flow_entry
*entry
;
1033 struct ice_flow_prof
*prof
;
1034 enum ice_status status
= 0;
1036 if (entry_h
== ICE_FLOW_ENTRY_HANDLE_INVAL
)
1037 return ICE_ERR_PARAM
;
1039 entry
= ICE_FLOW_ENTRY_PTR(entry_h
);
1041 /* Retain the pointer to the flow profile as the entry will be freed */
1045 mutex_lock(&prof
->entries_lock
);
1046 status
= ice_flow_rem_entry_sync(hw
, blk
, entry
);
1047 mutex_unlock(&prof
->entries_lock
);
1054 * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
1055 * @seg: packet segment the field being set belongs to
1056 * @fld: field to be set
1057 * @field_type: type of the field
1058 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1059 * entry's input buffer
1060 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1062 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1063 * entry's input buffer
1065 * This helper function stores information of a field being matched, including
1066 * the type of the field and the locations of the value to match, the mask, and
1067 * the upper-bound value in the start of the input buffer for a flow entry.
1068 * This function should only be used for fixed-size data structures.
1070 * This function also opportunistically determines the protocol headers to be
1071 * present based on the fields being set. Some fields cannot be used alone to
1072 * determine the protocol headers present. Sometimes, fields for particular
1073 * protocol headers are not matched. In those cases, the protocol headers
1074 * must be explicitly set.
1077 ice_flow_set_fld_ext(struct ice_flow_seg_info
*seg
, enum ice_flow_field fld
,
1078 enum ice_flow_fld_match_type field_type
, u16 val_loc
,
1079 u16 mask_loc
, u16 last_loc
)
1081 u64 bit
= BIT_ULL(fld
);
1084 if (field_type
== ICE_FLOW_FLD_TYPE_RANGE
)
1087 seg
->fields
[fld
].type
= field_type
;
1088 seg
->fields
[fld
].src
.val
= val_loc
;
1089 seg
->fields
[fld
].src
.mask
= mask_loc
;
1090 seg
->fields
[fld
].src
.last
= last_loc
;
1092 ICE_FLOW_SET_HDRS(seg
, ice_flds_info
[fld
].hdr
);
1096 * ice_flow_set_fld - specifies locations of field from entry's input buffer
1097 * @seg: packet segment the field being set belongs to
1098 * @fld: field to be set
1099 * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
1100 * entry's input buffer
1101 * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
1103 * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
1104 * entry's input buffer
1105 * @range: indicate if field being matched is to be in a range
1107 * This function specifies the locations, in the form of byte offsets from the
1108 * start of the input buffer for a flow entry, from where the value to match,
1109 * the mask value, and upper value can be extracted. These locations are then
1110 * stored in the flow profile. When adding a flow entry associated with the
1111 * flow profile, these locations will be used to quickly extract the values and
1112 * create the content of a match entry. This function should only be used for
1113 * fixed-size data structures.
1116 ice_flow_set_fld(struct ice_flow_seg_info
*seg
, enum ice_flow_field fld
,
1117 u16 val_loc
, u16 mask_loc
, u16 last_loc
, bool range
)
1119 enum ice_flow_fld_match_type t
= range
?
1120 ICE_FLOW_FLD_TYPE_RANGE
: ICE_FLOW_FLD_TYPE_REG
;
1122 ice_flow_set_fld_ext(seg
, fld
, t
, val_loc
, mask_loc
, last_loc
);
1126 * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
1127 * @seg: packet segment the field being set belongs to
1128 * @off: offset of the raw field from the beginning of the segment in bytes
1129 * @len: length of the raw pattern to be matched
1130 * @val_loc: location of the value to match from entry's input buffer
1131 * @mask_loc: location of mask value from entry's input buffer
1133 * This function specifies the offset of the raw field to be match from the
1134 * beginning of the specified packet segment, and the locations, in the form of
1135 * byte offsets from the start of the input buffer for a flow entry, from where
1136 * the value to match and the mask value to be extracted. These locations are
1137 * then stored in the flow profile. When adding flow entries to the associated
1138 * flow profile, these locations can be used to quickly extract the values to
1139 * create the content of a match entry. This function should only be used for
1140 * fixed-size data structures.
1143 ice_flow_add_fld_raw(struct ice_flow_seg_info
*seg
, u16 off
, u8 len
,
1144 u16 val_loc
, u16 mask_loc
)
1146 if (seg
->raws_cnt
< ICE_FLOW_SEG_RAW_FLD_MAX
) {
1147 seg
->raws
[seg
->raws_cnt
].off
= off
;
1148 seg
->raws
[seg
->raws_cnt
].info
.type
= ICE_FLOW_FLD_TYPE_SIZE
;
1149 seg
->raws
[seg
->raws_cnt
].info
.src
.val
= val_loc
;
1150 seg
->raws
[seg
->raws_cnt
].info
.src
.mask
= mask_loc
;
1151 /* The "last" field is used to store the length of the field */
1152 seg
->raws
[seg
->raws_cnt
].info
.src
.last
= len
;
1155 /* Overflows of "raws" will be handled as an error condition later in
1156 * the flow when this information is processed.
1161 #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
1162 (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
1164 #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
1165 (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
1167 #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
1168 (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
1169 ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
1172 * ice_flow_set_rss_seg_info - setup packet segments for RSS
1173 * @segs: pointer to the flow field segment(s)
1174 * @hash_fields: fields to be hashed on for the segment(s)
1175 * @flow_hdr: protocol header fields within a packet segment
1177 * Helper function to extract fields from hash bitmap and use flow
1178 * header value to set flow field segment for further use in flow
1179 * profile entry or removal.
1181 static enum ice_status
1182 ice_flow_set_rss_seg_info(struct ice_flow_seg_info
*segs
, u64 hash_fields
,
1188 for_each_set_bit(i
, (unsigned long *)&hash_fields
,
1189 ICE_FLOW_FIELD_IDX_MAX
)
1190 ice_flow_set_fld(segs
, (enum ice_flow_field
)i
,
1191 ICE_FLOW_FLD_OFF_INVAL
, ICE_FLOW_FLD_OFF_INVAL
,
1192 ICE_FLOW_FLD_OFF_INVAL
, false);
1194 ICE_FLOW_SET_HDRS(segs
, flow_hdr
);
1196 if (segs
->hdrs
& ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS
)
1197 return ICE_ERR_PARAM
;
1199 val
= (u64
)(segs
->hdrs
& ICE_FLOW_RSS_SEG_HDR_L3_MASKS
);
1200 if (val
&& !is_power_of_2(val
))
1203 val
= (u64
)(segs
->hdrs
& ICE_FLOW_RSS_SEG_HDR_L4_MASKS
);
1204 if (val
&& !is_power_of_2(val
))
1211 * ice_rem_vsi_rss_list - remove VSI from RSS list
1212 * @hw: pointer to the hardware structure
1213 * @vsi_handle: software VSI handle
1215 * Remove the VSI from all RSS configurations in the list.
1217 void ice_rem_vsi_rss_list(struct ice_hw
*hw
, u16 vsi_handle
)
1219 struct ice_rss_cfg
*r
, *tmp
;
1221 if (list_empty(&hw
->rss_list_head
))
1224 mutex_lock(&hw
->rss_locks
);
1225 list_for_each_entry_safe(r
, tmp
, &hw
->rss_list_head
, l_entry
)
1226 if (test_and_clear_bit(vsi_handle
, r
->vsis
))
1227 if (bitmap_empty(r
->vsis
, ICE_MAX_VSI
)) {
1228 list_del(&r
->l_entry
);
1229 devm_kfree(ice_hw_to_dev(hw
), r
);
1231 mutex_unlock(&hw
->rss_locks
);
1235 * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
1236 * @hw: pointer to the hardware structure
1237 * @vsi_handle: software VSI handle
1239 * This function will iterate through all flow profiles and disassociate
1240 * the VSI from that profile. If the flow profile has no VSIs it will
1243 enum ice_status
ice_rem_vsi_rss_cfg(struct ice_hw
*hw
, u16 vsi_handle
)
1245 const enum ice_block blk
= ICE_BLK_RSS
;
1246 struct ice_flow_prof
*p
, *t
;
1247 enum ice_status status
= 0;
1249 if (!ice_is_vsi_valid(hw
, vsi_handle
))
1250 return ICE_ERR_PARAM
;
1252 if (list_empty(&hw
->fl_profs
[blk
]))
1255 mutex_lock(&hw
->rss_locks
);
1256 list_for_each_entry_safe(p
, t
, &hw
->fl_profs
[blk
], l_entry
)
1257 if (test_bit(vsi_handle
, p
->vsis
)) {
1258 status
= ice_flow_disassoc_prof(hw
, blk
, p
, vsi_handle
);
1262 if (bitmap_empty(p
->vsis
, ICE_MAX_VSI
)) {
1263 status
= ice_flow_rem_prof(hw
, blk
, p
->id
);
1268 mutex_unlock(&hw
->rss_locks
);
1274 * ice_rem_rss_list - remove RSS configuration from list
1275 * @hw: pointer to the hardware structure
1276 * @vsi_handle: software VSI handle
1277 * @prof: pointer to flow profile
1279 * Assumption: lock has already been acquired for RSS list
1282 ice_rem_rss_list(struct ice_hw
*hw
, u16 vsi_handle
, struct ice_flow_prof
*prof
)
1284 struct ice_rss_cfg
*r
, *tmp
;
1286 /* Search for RSS hash fields associated to the VSI that match the
1287 * hash configurations associated to the flow profile. If found
1288 * remove from the RSS entry list of the VSI context and delete entry.
1290 list_for_each_entry_safe(r
, tmp
, &hw
->rss_list_head
, l_entry
)
1291 if (r
->hashed_flds
== prof
->segs
[prof
->segs_cnt
- 1].match
&&
1292 r
->packet_hdr
== prof
->segs
[prof
->segs_cnt
- 1].hdrs
) {
1293 clear_bit(vsi_handle
, r
->vsis
);
1294 if (bitmap_empty(r
->vsis
, ICE_MAX_VSI
)) {
1295 list_del(&r
->l_entry
);
1296 devm_kfree(ice_hw_to_dev(hw
), r
);
1303 * ice_add_rss_list - add RSS configuration to list
1304 * @hw: pointer to the hardware structure
1305 * @vsi_handle: software VSI handle
1306 * @prof: pointer to flow profile
1308 * Assumption: lock has already been acquired for RSS list
1310 static enum ice_status
1311 ice_add_rss_list(struct ice_hw
*hw
, u16 vsi_handle
, struct ice_flow_prof
*prof
)
1313 struct ice_rss_cfg
*r
, *rss_cfg
;
1315 list_for_each_entry(r
, &hw
->rss_list_head
, l_entry
)
1316 if (r
->hashed_flds
== prof
->segs
[prof
->segs_cnt
- 1].match
&&
1317 r
->packet_hdr
== prof
->segs
[prof
->segs_cnt
- 1].hdrs
) {
1318 set_bit(vsi_handle
, r
->vsis
);
1322 rss_cfg
= devm_kzalloc(ice_hw_to_dev(hw
), sizeof(*rss_cfg
),
1325 return ICE_ERR_NO_MEMORY
;
1327 rss_cfg
->hashed_flds
= prof
->segs
[prof
->segs_cnt
- 1].match
;
1328 rss_cfg
->packet_hdr
= prof
->segs
[prof
->segs_cnt
- 1].hdrs
;
1329 set_bit(vsi_handle
, rss_cfg
->vsis
);
1331 list_add_tail(&rss_cfg
->l_entry
, &hw
->rss_list_head
);
1336 #define ICE_FLOW_PROF_HASH_S 0
1337 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
1338 #define ICE_FLOW_PROF_HDR_S 32
1339 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
1340 #define ICE_FLOW_PROF_ENCAP_S 63
1341 #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
1343 #define ICE_RSS_OUTER_HEADERS 1
1344 #define ICE_RSS_INNER_HEADERS 2
1346 /* Flow profile ID format:
1347 * [0:31] - Packet match fields
1348 * [32:62] - Protocol header
1349 * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
1351 #define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
1352 (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
1353 (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
1354 ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
1357 * ice_add_rss_cfg_sync - add an RSS configuration
1358 * @hw: pointer to the hardware structure
1359 * @vsi_handle: software VSI handle
1360 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1361 * @addl_hdrs: protocol header fields
1362 * @segs_cnt: packet segment count
1364 * Assumption: lock has already been acquired for RSS list
1366 static enum ice_status
1367 ice_add_rss_cfg_sync(struct ice_hw
*hw
, u16 vsi_handle
, u64 hashed_flds
,
1368 u32 addl_hdrs
, u8 segs_cnt
)
1370 const enum ice_block blk
= ICE_BLK_RSS
;
1371 struct ice_flow_prof
*prof
= NULL
;
1372 struct ice_flow_seg_info
*segs
;
1373 enum ice_status status
;
1375 if (!segs_cnt
|| segs_cnt
> ICE_FLOW_SEG_MAX
)
1376 return ICE_ERR_PARAM
;
1378 segs
= kcalloc(segs_cnt
, sizeof(*segs
), GFP_KERNEL
);
1380 return ICE_ERR_NO_MEMORY
;
1382 /* Construct the packet segment info from the hashed fields */
1383 status
= ice_flow_set_rss_seg_info(&segs
[segs_cnt
- 1], hashed_flds
,
1388 /* Search for a flow profile that has matching headers, hash fields
1389 * and has the input VSI associated to it. If found, no further
1390 * operations required and exit.
1392 prof
= ice_flow_find_prof_conds(hw
, blk
, ICE_FLOW_RX
, segs
, segs_cnt
,
1394 ICE_FLOW_FIND_PROF_CHK_FLDS
|
1395 ICE_FLOW_FIND_PROF_CHK_VSI
);
1399 /* Check if a flow profile exists with the same protocol headers and
1400 * associated with the input VSI. If so disassociate the VSI from
1401 * this profile. The VSI will be added to a new profile created with
1402 * the protocol header and new hash field configuration.
1404 prof
= ice_flow_find_prof_conds(hw
, blk
, ICE_FLOW_RX
, segs
, segs_cnt
,
1405 vsi_handle
, ICE_FLOW_FIND_PROF_CHK_VSI
);
1407 status
= ice_flow_disassoc_prof(hw
, blk
, prof
, vsi_handle
);
1409 ice_rem_rss_list(hw
, vsi_handle
, prof
);
1413 /* Remove profile if it has no VSIs associated */
1414 if (bitmap_empty(prof
->vsis
, ICE_MAX_VSI
)) {
1415 status
= ice_flow_rem_prof(hw
, blk
, prof
->id
);
1421 /* Search for a profile that has same match fields only. If this
1422 * exists then associate the VSI to this profile.
1424 prof
= ice_flow_find_prof_conds(hw
, blk
, ICE_FLOW_RX
, segs
, segs_cnt
,
1426 ICE_FLOW_FIND_PROF_CHK_FLDS
);
1428 status
= ice_flow_assoc_prof(hw
, blk
, prof
, vsi_handle
);
1430 status
= ice_add_rss_list(hw
, vsi_handle
, prof
);
1434 /* Create a new flow profile with generated profile and packet
1435 * segment information.
1437 status
= ice_flow_add_prof(hw
, blk
, ICE_FLOW_RX
,
1438 ICE_FLOW_GEN_PROFID(hashed_flds
,
1439 segs
[segs_cnt
- 1].hdrs
,
1441 segs
, segs_cnt
, &prof
);
1445 status
= ice_flow_assoc_prof(hw
, blk
, prof
, vsi_handle
);
1446 /* If association to a new flow profile failed then this profile can
1450 ice_flow_rem_prof(hw
, blk
, prof
->id
);
1454 status
= ice_add_rss_list(hw
, vsi_handle
, prof
);
1462 * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
1463 * @hw: pointer to the hardware structure
1464 * @vsi_handle: software VSI handle
1465 * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
1466 * @addl_hdrs: protocol header fields
1468 * This function will generate a flow profile based on fields associated with
1469 * the input fields to hash on, the flow type and use the VSI number to add
1470 * a flow entry to the profile.
1473 ice_add_rss_cfg(struct ice_hw
*hw
, u16 vsi_handle
, u64 hashed_flds
,
1476 enum ice_status status
;
1478 if (hashed_flds
== ICE_HASH_INVALID
||
1479 !ice_is_vsi_valid(hw
, vsi_handle
))
1480 return ICE_ERR_PARAM
;
1482 mutex_lock(&hw
->rss_locks
);
1483 status
= ice_add_rss_cfg_sync(hw
, vsi_handle
, hashed_flds
, addl_hdrs
,
1484 ICE_RSS_OUTER_HEADERS
);
1486 status
= ice_add_rss_cfg_sync(hw
, vsi_handle
, hashed_flds
,
1487 addl_hdrs
, ICE_RSS_INNER_HEADERS
);
1488 mutex_unlock(&hw
->rss_locks
);
1493 /* Mapping of AVF hash bit fields to an L3-L4 hash combination.
1494 * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
1495 * convert its values to their appropriate flow L3, L4 values.
1497 #define ICE_FLOW_AVF_RSS_IPV4_MASKS \
1498 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
1499 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
1500 #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
1501 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
1502 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
1503 #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
1504 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
1505 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
1506 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
1507 #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
1508 (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
1509 ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
1511 #define ICE_FLOW_AVF_RSS_IPV6_MASKS \
1512 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
1513 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
1514 #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
1515 (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
1516 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
1517 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
1518 #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
1519 (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
1520 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
1521 #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
1522 (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
1523 ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
1526 * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
1527 * @hw: pointer to the hardware structure
1528 * @vsi_handle: software VSI handle
1529 * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
1531 * This function will take the hash bitmap provided by the AVF driver via a
1532 * message, convert it to ICE-compatible values, and configure RSS flow
1536 ice_add_avf_rss_cfg(struct ice_hw
*hw
, u16 vsi_handle
, u64 avf_hash
)
1538 enum ice_status status
= 0;
1541 if (avf_hash
== ICE_AVF_FLOW_FIELD_INVALID
||
1542 !ice_is_vsi_valid(hw
, vsi_handle
))
1543 return ICE_ERR_PARAM
;
1545 /* Make sure no unsupported bits are specified */
1546 if (avf_hash
& ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS
|
1547 ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS
))
1550 hash_flds
= avf_hash
;
1552 /* Always create an L3 RSS configuration for any L4 RSS configuration */
1553 if (hash_flds
& ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS
)
1554 hash_flds
|= ICE_FLOW_AVF_RSS_IPV4_MASKS
;
1556 if (hash_flds
& ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS
)
1557 hash_flds
|= ICE_FLOW_AVF_RSS_IPV6_MASKS
;
1559 /* Create the corresponding RSS configuration for each valid hash bit */
1561 u64 rss_hash
= ICE_HASH_INVALID
;
1563 if (hash_flds
& ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS
) {
1564 if (hash_flds
& ICE_FLOW_AVF_RSS_IPV4_MASKS
) {
1565 rss_hash
= ICE_FLOW_HASH_IPV4
;
1566 hash_flds
&= ~ICE_FLOW_AVF_RSS_IPV4_MASKS
;
1567 } else if (hash_flds
&
1568 ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS
) {
1569 rss_hash
= ICE_FLOW_HASH_IPV4
|
1570 ICE_FLOW_HASH_TCP_PORT
;
1571 hash_flds
&= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS
;
1572 } else if (hash_flds
&
1573 ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS
) {
1574 rss_hash
= ICE_FLOW_HASH_IPV4
|
1575 ICE_FLOW_HASH_UDP_PORT
;
1576 hash_flds
&= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS
;
1577 } else if (hash_flds
&
1578 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP
)) {
1579 rss_hash
= ICE_FLOW_HASH_IPV4
|
1580 ICE_FLOW_HASH_SCTP_PORT
;
1582 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP
);
1584 } else if (hash_flds
& ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS
) {
1585 if (hash_flds
& ICE_FLOW_AVF_RSS_IPV6_MASKS
) {
1586 rss_hash
= ICE_FLOW_HASH_IPV6
;
1587 hash_flds
&= ~ICE_FLOW_AVF_RSS_IPV6_MASKS
;
1588 } else if (hash_flds
&
1589 ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS
) {
1590 rss_hash
= ICE_FLOW_HASH_IPV6
|
1591 ICE_FLOW_HASH_TCP_PORT
;
1592 hash_flds
&= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS
;
1593 } else if (hash_flds
&
1594 ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS
) {
1595 rss_hash
= ICE_FLOW_HASH_IPV6
|
1596 ICE_FLOW_HASH_UDP_PORT
;
1597 hash_flds
&= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS
;
1598 } else if (hash_flds
&
1599 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP
)) {
1600 rss_hash
= ICE_FLOW_HASH_IPV6
|
1601 ICE_FLOW_HASH_SCTP_PORT
;
1603 ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP
);
1607 if (rss_hash
== ICE_HASH_INVALID
)
1608 return ICE_ERR_OUT_OF_RANGE
;
1610 status
= ice_add_rss_cfg(hw
, vsi_handle
, rss_hash
,
1611 ICE_FLOW_SEG_HDR_NONE
);
1620 * ice_replay_rss_cfg - replay RSS configurations associated with VSI
1621 * @hw: pointer to the hardware structure
1622 * @vsi_handle: software VSI handle
1624 enum ice_status
ice_replay_rss_cfg(struct ice_hw
*hw
, u16 vsi_handle
)
1626 enum ice_status status
= 0;
1627 struct ice_rss_cfg
*r
;
1629 if (!ice_is_vsi_valid(hw
, vsi_handle
))
1630 return ICE_ERR_PARAM
;
1632 mutex_lock(&hw
->rss_locks
);
1633 list_for_each_entry(r
, &hw
->rss_list_head
, l_entry
) {
1634 if (test_bit(vsi_handle
, r
->vsis
)) {
1635 status
= ice_add_rss_cfg_sync(hw
, vsi_handle
,
1638 ICE_RSS_OUTER_HEADERS
);
1641 status
= ice_add_rss_cfg_sync(hw
, vsi_handle
,
1644 ICE_RSS_INNER_HEADERS
);
1649 mutex_unlock(&hw
->rss_locks
);
1655 * ice_get_rss_cfg - returns hashed fields for the given header types
1656 * @hw: pointer to the hardware structure
1657 * @vsi_handle: software VSI handle
1658 * @hdrs: protocol header type
1660 * This function will return the match fields of the first instance of flow
1661 * profile having the given header types and containing input VSI
1663 u64
ice_get_rss_cfg(struct ice_hw
*hw
, u16 vsi_handle
, u32 hdrs
)
1665 u64 rss_hash
= ICE_HASH_INVALID
;
1666 struct ice_rss_cfg
*r
;
1668 /* verify if the protocol header is non zero and VSI is valid */
1669 if (hdrs
== ICE_FLOW_SEG_HDR_NONE
|| !ice_is_vsi_valid(hw
, vsi_handle
))
1670 return ICE_HASH_INVALID
;
1672 mutex_lock(&hw
->rss_locks
);
1673 list_for_each_entry(r
, &hw
->rss_list_head
, l_entry
)
1674 if (test_bit(vsi_handle
, r
->vsis
) &&
1675 r
->packet_hdr
== hdrs
) {
1676 rss_hash
= r
->hashed_flds
;
1679 mutex_unlock(&hw
->rss_locks
);