WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / intel / ice / ice_flex_pipe.c
blobf5e81b5553537ba5988abb64ee4715dc6b8391bb
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
6 #include "ice_flow.h"
8 /* To support tunneling entries by PF, the package will append the PF number to
9 * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
11 static const struct ice_tunnel_type_scan tnls[] = {
12 { TNL_VXLAN, "TNL_VXLAN_PF" },
13 { TNL_GENEVE, "TNL_GENEVE_PF" },
14 { TNL_LAST, "" }
17 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18 /* SWITCH */
20 ICE_SID_XLT0_SW,
21 ICE_SID_XLT_KEY_BUILDER_SW,
22 ICE_SID_XLT1_SW,
23 ICE_SID_XLT2_SW,
24 ICE_SID_PROFID_TCAM_SW,
25 ICE_SID_PROFID_REDIR_SW,
26 ICE_SID_FLD_VEC_SW,
27 ICE_SID_CDID_KEY_BUILDER_SW,
28 ICE_SID_CDID_REDIR_SW
31 /* ACL */
33 ICE_SID_XLT0_ACL,
34 ICE_SID_XLT_KEY_BUILDER_ACL,
35 ICE_SID_XLT1_ACL,
36 ICE_SID_XLT2_ACL,
37 ICE_SID_PROFID_TCAM_ACL,
38 ICE_SID_PROFID_REDIR_ACL,
39 ICE_SID_FLD_VEC_ACL,
40 ICE_SID_CDID_KEY_BUILDER_ACL,
41 ICE_SID_CDID_REDIR_ACL
44 /* FD */
46 ICE_SID_XLT0_FD,
47 ICE_SID_XLT_KEY_BUILDER_FD,
48 ICE_SID_XLT1_FD,
49 ICE_SID_XLT2_FD,
50 ICE_SID_PROFID_TCAM_FD,
51 ICE_SID_PROFID_REDIR_FD,
52 ICE_SID_FLD_VEC_FD,
53 ICE_SID_CDID_KEY_BUILDER_FD,
54 ICE_SID_CDID_REDIR_FD
57 /* RSS */
59 ICE_SID_XLT0_RSS,
60 ICE_SID_XLT_KEY_BUILDER_RSS,
61 ICE_SID_XLT1_RSS,
62 ICE_SID_XLT2_RSS,
63 ICE_SID_PROFID_TCAM_RSS,
64 ICE_SID_PROFID_REDIR_RSS,
65 ICE_SID_FLD_VEC_RSS,
66 ICE_SID_CDID_KEY_BUILDER_RSS,
67 ICE_SID_CDID_REDIR_RSS
70 /* PE */
72 ICE_SID_XLT0_PE,
73 ICE_SID_XLT_KEY_BUILDER_PE,
74 ICE_SID_XLT1_PE,
75 ICE_SID_XLT2_PE,
76 ICE_SID_PROFID_TCAM_PE,
77 ICE_SID_PROFID_REDIR_PE,
78 ICE_SID_FLD_VEC_PE,
79 ICE_SID_CDID_KEY_BUILDER_PE,
80 ICE_SID_CDID_REDIR_PE
84 /**
85 * ice_sect_id - returns section ID
86 * @blk: block type
87 * @sect: section type
89 * This helper function returns the proper section ID given a block type and a
90 * section type.
92 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
94 return ice_sect_lkup[blk][sect];
97 /**
98 * ice_pkg_val_buf
99 * @buf: pointer to the ice buffer
101 * This helper function validates a buffer's header.
103 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
105 struct ice_buf_hdr *hdr;
106 u16 section_count;
107 u16 data_end;
109 hdr = (struct ice_buf_hdr *)buf->buf;
110 /* verify data */
111 section_count = le16_to_cpu(hdr->section_count);
112 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 return NULL;
115 data_end = le16_to_cpu(hdr->data_end);
116 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 return NULL;
119 return hdr;
123 * ice_find_buf_table
124 * @ice_seg: pointer to the ice segment
126 * Returns the address of the buffer table within the ice segment.
128 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
130 struct ice_nvm_table *nvms;
132 nvms = (struct ice_nvm_table *)
133 (ice_seg->device_table +
134 le32_to_cpu(ice_seg->device_table_count));
136 return (__force struct ice_buf_table *)
137 (nvms->vers + le32_to_cpu(nvms->table_count));
141 * ice_pkg_enum_buf
142 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
143 * @state: pointer to the enum state
145 * This function will enumerate all the buffers in the ice segment. The first
146 * call is made with the ice_seg parameter non-NULL; on subsequent calls,
147 * ice_seg is set to NULL which continues the enumeration. When the function
148 * returns a NULL pointer, then the end of the buffers has been reached, or an
149 * unexpected value has been detected (for example an invalid section count or
150 * an invalid buffer end value).
152 static struct ice_buf_hdr *
153 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
155 if (ice_seg) {
156 state->buf_table = ice_find_buf_table(ice_seg);
157 if (!state->buf_table)
158 return NULL;
160 state->buf_idx = 0;
161 return ice_pkg_val_buf(state->buf_table->buf_array);
164 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 return ice_pkg_val_buf(state->buf_table->buf_array +
166 state->buf_idx);
167 else
168 return NULL;
172 * ice_pkg_advance_sect
173 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
174 * @state: pointer to the enum state
176 * This helper function will advance the section within the ice segment,
177 * also advancing the buffer if needed.
179 static bool
180 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
182 if (!ice_seg && !state->buf)
183 return false;
185 if (!ice_seg && state->buf)
186 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 return true;
189 state->buf = ice_pkg_enum_buf(ice_seg, state);
190 if (!state->buf)
191 return false;
193 /* start of new buffer, reset section index */
194 state->sect_idx = 0;
195 return true;
199 * ice_pkg_enum_section
200 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
201 * @state: pointer to the enum state
202 * @sect_type: section type to enumerate
204 * This function will enumerate all the sections of a particular type in the
205 * ice segment. The first call is made with the ice_seg parameter non-NULL;
206 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
207 * When the function returns a NULL pointer, then the end of the matching
208 * sections has been reached.
210 static void *
211 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 u32 sect_type)
214 u16 offset, size;
216 if (ice_seg)
217 state->type = sect_type;
219 if (!ice_pkg_advance_sect(ice_seg, state))
220 return NULL;
222 /* scan for next matching section */
223 while (state->buf->section_entry[state->sect_idx].type !=
224 cpu_to_le32(state->type))
225 if (!ice_pkg_advance_sect(NULL, state))
226 return NULL;
228 /* validate section */
229 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 return NULL;
233 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 return NULL;
237 /* make sure the section fits in the buffer */
238 if (offset + size > ICE_PKG_BUF_SIZE)
239 return NULL;
241 state->sect_type =
242 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
244 /* calc pointer to this section */
245 state->sect = ((u8 *)state->buf) +
246 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
248 return state->sect;
252 * ice_pkg_enum_entry
253 * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
254 * @state: pointer to the enum state
255 * @sect_type: section type to enumerate
256 * @offset: pointer to variable that receives the offset in the table (optional)
257 * @handler: function that handles access to the entries into the section type
259 * This function will enumerate all the entries in particular section type in
260 * the ice segment. The first call is made with the ice_seg parameter non-NULL;
261 * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
262 * When the function returns a NULL pointer, then the end of the entries has
263 * been reached.
265 * Since each section may have a different header and entry size, the handler
266 * function is needed to determine the number and location entries in each
267 * section.
269 * The offset parameter is optional, but should be used for sections that
270 * contain an offset for each section table. For such cases, the section handler
271 * function must return the appropriate offset + index to give the absolution
272 * offset for each entry. For example, if the base for a section's header
273 * indicates a base offset of 10, and the index for the entry is 2, then
274 * section handler function should set the offset to 10 + 2 = 12.
276 static void *
277 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 u32 sect_type, u32 *offset,
279 void *(*handler)(u32 sect_type, void *section,
280 u32 index, u32 *offset))
282 void *entry;
284 if (ice_seg) {
285 if (!handler)
286 return NULL;
288 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 return NULL;
291 state->entry_idx = 0;
292 state->handler = handler;
293 } else {
294 state->entry_idx++;
297 if (!state->handler)
298 return NULL;
300 /* get entry */
301 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 offset);
303 if (!entry) {
304 /* end of a section, look for another section of this type */
305 if (!ice_pkg_enum_section(NULL, state, 0))
306 return NULL;
308 state->entry_idx = 0;
309 entry = state->handler(state->sect_type, state->sect,
310 state->entry_idx, offset);
313 return entry;
317 * ice_boost_tcam_handler
318 * @sect_type: section type
319 * @section: pointer to section
320 * @index: index of the boost TCAM entry to be returned
321 * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
323 * This is a callback function that can be passed to ice_pkg_enum_entry.
324 * Handles enumeration of individual boost TCAM entries.
326 static void *
327 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
329 struct ice_boost_tcam_section *boost;
331 if (!section)
332 return NULL;
334 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 return NULL;
337 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
338 return NULL;
340 if (offset)
341 *offset = 0;
343 boost = section;
344 if (index >= le16_to_cpu(boost->count))
345 return NULL;
347 return boost->tcam + index;
351 * ice_find_boost_entry
352 * @ice_seg: pointer to the ice segment (non-NULL)
353 * @addr: Boost TCAM address of entry to search for
354 * @entry: returns pointer to the entry
356 * Finds a particular Boost TCAM entry and returns a pointer to that entry
357 * if it is found. The ice_seg parameter must not be NULL since the first call
358 * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
360 static enum ice_status
361 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
362 struct ice_boost_tcam_entry **entry)
364 struct ice_boost_tcam_entry *tcam;
365 struct ice_pkg_enum state;
367 memset(&state, 0, sizeof(state));
369 if (!ice_seg)
370 return ICE_ERR_PARAM;
372 do {
373 tcam = ice_pkg_enum_entry(ice_seg, &state,
374 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
375 ice_boost_tcam_handler);
376 if (tcam && le16_to_cpu(tcam->addr) == addr) {
377 *entry = tcam;
378 return 0;
381 ice_seg = NULL;
382 } while (tcam);
384 *entry = NULL;
385 return ICE_ERR_CFG;
389 * ice_label_enum_handler
390 * @sect_type: section type
391 * @section: pointer to section
392 * @index: index of the label entry to be returned
393 * @offset: pointer to receive absolute offset, always zero for label sections
395 * This is a callback function that can be passed to ice_pkg_enum_entry.
396 * Handles enumeration of individual label entries.
398 static void *
399 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
400 u32 *offset)
402 struct ice_label_section *labels;
404 if (!section)
405 return NULL;
407 if (index > ICE_MAX_LABELS_IN_BUF)
408 return NULL;
410 if (offset)
411 *offset = 0;
413 labels = section;
414 if (index >= le16_to_cpu(labels->count))
415 return NULL;
417 return labels->label + index;
421 * ice_enum_labels
422 * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
423 * @type: the section type that will contain the label (0 on subsequent calls)
424 * @state: ice_pkg_enum structure that will hold the state of the enumeration
425 * @value: pointer to a value that will return the label's value if found
427 * Enumerates a list of labels in the package. The caller will call
428 * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
429 * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
430 * the end of the list has been reached.
432 static char *
433 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
434 u16 *value)
436 struct ice_label *label;
438 /* Check for valid label section on first call */
439 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
440 return NULL;
442 label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
443 ice_label_enum_handler);
444 if (!label)
445 return NULL;
447 *value = le16_to_cpu(label->value);
448 return label->name;
452 * ice_init_pkg_hints
453 * @hw: pointer to the HW structure
454 * @ice_seg: pointer to the segment of the package scan (non-NULL)
456 * This function will scan the package and save off relevant information
457 * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
458 * since the first call to ice_enum_labels requires a pointer to an actual
459 * ice_seg structure.
461 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
463 struct ice_pkg_enum state;
464 char *label_name;
465 u16 val;
466 int i;
468 memset(&hw->tnl, 0, sizeof(hw->tnl));
469 memset(&state, 0, sizeof(state));
471 if (!ice_seg)
472 return;
474 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
475 &val);
477 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
478 for (i = 0; tnls[i].type != TNL_LAST; i++) {
479 size_t len = strlen(tnls[i].label_prefix);
481 /* Look for matching label start, before continuing */
482 if (strncmp(label_name, tnls[i].label_prefix, len))
483 continue;
485 /* Make sure this label matches our PF. Note that the PF
486 * character ('0' - '7') will be located where our
487 * prefix string's null terminator is located.
489 if ((label_name[len] - '0') == hw->pf_id) {
490 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
491 hw->tnl.tbl[hw->tnl.count].valid = false;
492 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
493 hw->tnl.tbl[hw->tnl.count].port = 0;
494 hw->tnl.count++;
495 break;
499 label_name = ice_enum_labels(NULL, 0, &state, &val);
502 /* Cache the appropriate boost TCAM entry pointers */
503 for (i = 0; i < hw->tnl.count; i++) {
504 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
505 &hw->tnl.tbl[i].boost_entry);
506 if (hw->tnl.tbl[i].boost_entry) {
507 hw->tnl.tbl[i].valid = true;
508 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
509 hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
514 /* Key creation */
516 #define ICE_DC_KEY 0x1 /* don't care */
517 #define ICE_DC_KEYINV 0x1
518 #define ICE_NM_KEY 0x0 /* never match */
519 #define ICE_NM_KEYINV 0x0
520 #define ICE_0_KEY 0x1 /* match 0 */
521 #define ICE_0_KEYINV 0x0
522 #define ICE_1_KEY 0x0 /* match 1 */
523 #define ICE_1_KEYINV 0x1
526 * ice_gen_key_word - generate 16-bits of a key/mask word
527 * @val: the value
528 * @valid: valid bits mask (change only the valid bits)
529 * @dont_care: don't care mask
530 * @nvr_mtch: never match mask
531 * @key: pointer to an array of where the resulting key portion
532 * @key_inv: pointer to an array of where the resulting key invert portion
534 * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
535 * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
536 * of key and 8 bits of key invert.
538 * '0' = b01, always match a 0 bit
539 * '1' = b10, always match a 1 bit
540 * '?' = b11, don't care bit (always matches)
541 * '~' = b00, never match bit
543 * Input:
544 * val: b0 1 0 1 0 1
545 * dont_care: b0 0 1 1 0 0
546 * never_mtch: b0 0 0 0 1 1
547 * ------------------------------
548 * Result: key: b01 10 11 11 00 00
550 static enum ice_status
551 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
552 u8 *key_inv)
554 u8 in_key = *key, in_key_inv = *key_inv;
555 u8 i;
557 /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
558 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
559 return ICE_ERR_CFG;
561 *key = 0;
562 *key_inv = 0;
564 /* encode the 8 bits into 8-bit key and 8-bit key invert */
565 for (i = 0; i < 8; i++) {
566 *key >>= 1;
567 *key_inv >>= 1;
569 if (!(valid & 0x1)) { /* change only valid bits */
570 *key |= (in_key & 0x1) << 7;
571 *key_inv |= (in_key_inv & 0x1) << 7;
572 } else if (dont_care & 0x1) { /* don't care bit */
573 *key |= ICE_DC_KEY << 7;
574 *key_inv |= ICE_DC_KEYINV << 7;
575 } else if (nvr_mtch & 0x1) { /* never match bit */
576 *key |= ICE_NM_KEY << 7;
577 *key_inv |= ICE_NM_KEYINV << 7;
578 } else if (val & 0x01) { /* exact 1 match */
579 *key |= ICE_1_KEY << 7;
580 *key_inv |= ICE_1_KEYINV << 7;
581 } else { /* exact 0 match */
582 *key |= ICE_0_KEY << 7;
583 *key_inv |= ICE_0_KEYINV << 7;
586 dont_care >>= 1;
587 nvr_mtch >>= 1;
588 valid >>= 1;
589 val >>= 1;
590 in_key >>= 1;
591 in_key_inv >>= 1;
594 return 0;
598 * ice_bits_max_set - determine if the number of bits set is within a maximum
599 * @mask: pointer to the byte array which is the mask
600 * @size: the number of bytes in the mask
601 * @max: the max number of set bits
603 * This function determines if there are at most 'max' number of bits set in an
604 * array. Returns true if the number for bits set is <= max or will return false
605 * otherwise.
607 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
609 u16 count = 0;
610 u16 i;
612 /* check each byte */
613 for (i = 0; i < size; i++) {
614 /* if 0, go to next byte */
615 if (!mask[i])
616 continue;
618 /* We know there is at least one set bit in this byte because of
619 * the above check; if we already have found 'max' number of
620 * bits set, then we can return failure now.
622 if (count == max)
623 return false;
625 /* count the bits in this byte, checking threshold */
626 count += hweight8(mask[i]);
627 if (count > max)
628 return false;
631 return true;
635 * ice_set_key - generate a variable sized key with multiples of 16-bits
636 * @key: pointer to where the key will be stored
637 * @size: the size of the complete key in bytes (must be even)
638 * @val: array of 8-bit values that makes up the value portion of the key
639 * @upd: array of 8-bit masks that determine what key portion to update
640 * @dc: array of 8-bit masks that make up the don't care mask
641 * @nm: array of 8-bit masks that make up the never match mask
642 * @off: the offset of the first byte in the key to update
643 * @len: the number of bytes in the key update
645 * This function generates a key from a value, a don't care mask and a never
646 * match mask.
647 * upd, dc, and nm are optional parameters, and can be NULL:
648 * upd == NULL --> upd mask is all 1's (update all bits)
649 * dc == NULL --> dc mask is all 0's (no don't care bits)
650 * nm == NULL --> nm mask is all 0's (no never match bits)
652 static enum ice_status
653 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
654 u16 len)
656 u16 half_size;
657 u16 i;
659 /* size must be a multiple of 2 bytes. */
660 if (size % 2)
661 return ICE_ERR_CFG;
663 half_size = size / 2;
664 if (off + len > half_size)
665 return ICE_ERR_CFG;
667 /* Make sure at most one bit is set in the never match mask. Having more
668 * than one never match mask bit set will cause HW to consume excessive
669 * power otherwise; this is a power management efficiency check.
671 #define ICE_NVR_MTCH_BITS_MAX 1
672 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
673 return ICE_ERR_CFG;
675 for (i = 0; i < len; i++)
676 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
677 dc ? dc[i] : 0, nm ? nm[i] : 0,
678 key + off + i, key + half_size + off + i))
679 return ICE_ERR_CFG;
681 return 0;
685 * ice_acquire_global_cfg_lock
686 * @hw: pointer to the HW structure
687 * @access: access type (read or write)
689 * This function will request ownership of the global config lock for reading
690 * or writing of the package. When attempting to obtain write access, the
691 * caller must check for the following two return values:
693 * ICE_SUCCESS - Means the caller has acquired the global config lock
694 * and can perform writing of the package.
695 * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
696 * package or has found that no update was necessary; in
697 * this case, the caller can just skip performing any
698 * update of the package.
700 static enum ice_status
701 ice_acquire_global_cfg_lock(struct ice_hw *hw,
702 enum ice_aq_res_access_type access)
704 enum ice_status status;
706 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
707 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
709 if (!status)
710 mutex_lock(&ice_global_cfg_lock_sw);
711 else if (status == ICE_ERR_AQ_NO_WORK)
712 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
714 return status;
718 * ice_release_global_cfg_lock
719 * @hw: pointer to the HW structure
721 * This function will release the global config lock.
723 static void ice_release_global_cfg_lock(struct ice_hw *hw)
725 mutex_unlock(&ice_global_cfg_lock_sw);
726 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
730 * ice_acquire_change_lock
731 * @hw: pointer to the HW structure
732 * @access: access type (read or write)
734 * This function will request ownership of the change lock.
736 static enum ice_status
737 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
739 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
740 ICE_CHANGE_LOCK_TIMEOUT);
744 * ice_release_change_lock
745 * @hw: pointer to the HW structure
747 * This function will release the change lock using the proper Admin Command.
749 static void ice_release_change_lock(struct ice_hw *hw)
751 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
755 * ice_aq_download_pkg
756 * @hw: pointer to the hardware structure
757 * @pkg_buf: the package buffer to transfer
758 * @buf_size: the size of the package buffer
759 * @last_buf: last buffer indicator
760 * @error_offset: returns error offset
761 * @error_info: returns error information
762 * @cd: pointer to command details structure or NULL
764 * Download Package (0x0C40)
766 static enum ice_status
767 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
768 u16 buf_size, bool last_buf, u32 *error_offset,
769 u32 *error_info, struct ice_sq_cd *cd)
771 struct ice_aqc_download_pkg *cmd;
772 struct ice_aq_desc desc;
773 enum ice_status status;
775 if (error_offset)
776 *error_offset = 0;
777 if (error_info)
778 *error_info = 0;
780 cmd = &desc.params.download_pkg;
781 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
782 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
784 if (last_buf)
785 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
787 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
788 if (status == ICE_ERR_AQ_ERROR) {
789 /* Read error from buffer only when the FW returned an error */
790 struct ice_aqc_download_pkg_resp *resp;
792 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
793 if (error_offset)
794 *error_offset = le32_to_cpu(resp->error_offset);
795 if (error_info)
796 *error_info = le32_to_cpu(resp->error_info);
799 return status;
803 * ice_aq_update_pkg
804 * @hw: pointer to the hardware structure
805 * @pkg_buf: the package cmd buffer
806 * @buf_size: the size of the package cmd buffer
807 * @last_buf: last buffer indicator
808 * @error_offset: returns error offset
809 * @error_info: returns error information
810 * @cd: pointer to command details structure or NULL
812 * Update Package (0x0C42)
814 static enum ice_status
815 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
816 bool last_buf, u32 *error_offset, u32 *error_info,
817 struct ice_sq_cd *cd)
819 struct ice_aqc_download_pkg *cmd;
820 struct ice_aq_desc desc;
821 enum ice_status status;
823 if (error_offset)
824 *error_offset = 0;
825 if (error_info)
826 *error_info = 0;
828 cmd = &desc.params.download_pkg;
829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
830 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
832 if (last_buf)
833 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
835 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
836 if (status == ICE_ERR_AQ_ERROR) {
837 /* Read error from buffer only when the FW returned an error */
838 struct ice_aqc_download_pkg_resp *resp;
840 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
841 if (error_offset)
842 *error_offset = le32_to_cpu(resp->error_offset);
843 if (error_info)
844 *error_info = le32_to_cpu(resp->error_info);
847 return status;
851 * ice_find_seg_in_pkg
852 * @hw: pointer to the hardware structure
853 * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
854 * @pkg_hdr: pointer to the package header to be searched
856 * This function searches a package file for a particular segment type. On
857 * success it returns a pointer to the segment header, otherwise it will
858 * return NULL.
860 static struct ice_generic_seg_hdr *
861 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
862 struct ice_pkg_hdr *pkg_hdr)
864 u32 i;
866 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
867 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
868 pkg_hdr->pkg_format_ver.update,
869 pkg_hdr->pkg_format_ver.draft);
871 /* Search all package segments for the requested segment type */
872 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
873 struct ice_generic_seg_hdr *seg;
875 seg = (struct ice_generic_seg_hdr *)
876 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
878 if (le32_to_cpu(seg->seg_type) == seg_type)
879 return seg;
882 return NULL;
886 * ice_update_pkg
887 * @hw: pointer to the hardware structure
888 * @bufs: pointer to an array of buffers
889 * @count: the number of buffers in the array
891 * Obtains change lock and updates package.
893 static enum ice_status
894 ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
896 enum ice_status status;
897 u32 offset, info, i;
899 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
900 if (status)
901 return status;
903 for (i = 0; i < count; i++) {
904 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
905 bool last = ((i + 1) == count);
907 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
908 last, &offset, &info, NULL);
910 if (status) {
911 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
912 status, offset, info);
913 break;
917 ice_release_change_lock(hw);
919 return status;
923 * ice_dwnld_cfg_bufs
924 * @hw: pointer to the hardware structure
925 * @bufs: pointer to an array of buffers
926 * @count: the number of buffers in the array
928 * Obtains global config lock and downloads the package configuration buffers
929 * to the firmware. Metadata buffers are skipped, and the first metadata buffer
930 * found indicates that the rest of the buffers are all metadata buffers.
932 static enum ice_status
933 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
935 enum ice_status status;
936 struct ice_buf_hdr *bh;
937 u32 offset, info, i;
939 if (!bufs || !count)
940 return ICE_ERR_PARAM;
942 /* If the first buffer's first section has its metadata bit set
943 * then there are no buffers to be downloaded, and the operation is
944 * considered a success.
946 bh = (struct ice_buf_hdr *)bufs;
947 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
948 return 0;
950 /* reset pkg_dwnld_status in case this function is called in the
951 * reset/rebuild flow
953 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
955 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
956 if (status) {
957 if (status == ICE_ERR_AQ_NO_WORK)
958 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
959 else
960 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
961 return status;
964 for (i = 0; i < count; i++) {
965 bool last = ((i + 1) == count);
967 if (!last) {
968 /* check next buffer for metadata flag */
969 bh = (struct ice_buf_hdr *)(bufs + i + 1);
971 /* A set metadata flag in the next buffer will signal
972 * that the current buffer will be the last buffer
973 * downloaded
975 if (le16_to_cpu(bh->section_count))
976 if (le32_to_cpu(bh->section_entry[0].type) &
977 ICE_METADATA_BUF)
978 last = true;
981 bh = (struct ice_buf_hdr *)(bufs + i);
983 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
984 &offset, &info, NULL);
986 /* Save AQ status from download package */
987 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
988 if (status) {
989 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
990 status, offset, info);
992 break;
995 if (last)
996 break;
999 ice_release_global_cfg_lock(hw);
1001 return status;
1005 * ice_aq_get_pkg_info_list
1006 * @hw: pointer to the hardware structure
1007 * @pkg_info: the buffer which will receive the information list
1008 * @buf_size: the size of the pkg_info information buffer
1009 * @cd: pointer to command details structure or NULL
1011 * Get Package Info List (0x0C43)
1013 static enum ice_status
1014 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1015 struct ice_aqc_get_pkg_info_resp *pkg_info,
1016 u16 buf_size, struct ice_sq_cd *cd)
1018 struct ice_aq_desc desc;
1020 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1022 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1026 * ice_download_pkg
1027 * @hw: pointer to the hardware structure
1028 * @ice_seg: pointer to the segment of the package to be downloaded
1030 * Handles the download of a complete package.
1032 static enum ice_status
1033 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1035 struct ice_buf_table *ice_buf_tbl;
1037 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1038 ice_seg->hdr.seg_format_ver.major,
1039 ice_seg->hdr.seg_format_ver.minor,
1040 ice_seg->hdr.seg_format_ver.update,
1041 ice_seg->hdr.seg_format_ver.draft);
1043 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1044 le32_to_cpu(ice_seg->hdr.seg_type),
1045 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1047 ice_buf_tbl = ice_find_buf_table(ice_seg);
1049 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1050 le32_to_cpu(ice_buf_tbl->buf_count));
1052 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1053 le32_to_cpu(ice_buf_tbl->buf_count));
1057 * ice_init_pkg_info
1058 * @hw: pointer to the hardware structure
1059 * @pkg_hdr: pointer to the driver's package hdr
1061 * Saves off the package details into the HW structure.
1063 static enum ice_status
1064 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1066 struct ice_global_metadata_seg *meta_seg;
1067 struct ice_generic_seg_hdr *seg_hdr;
1069 if (!pkg_hdr)
1070 return ICE_ERR_PARAM;
1072 meta_seg = (struct ice_global_metadata_seg *)
1073 ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
1074 if (meta_seg) {
1075 hw->pkg_ver = meta_seg->pkg_ver;
1076 memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
1078 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1079 meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
1080 meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
1081 meta_seg->pkg_name);
1082 } else {
1083 ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n");
1084 return ICE_ERR_CFG;
1087 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1088 if (seg_hdr) {
1089 hw->ice_pkg_ver = seg_hdr->seg_format_ver;
1090 memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
1091 sizeof(hw->ice_pkg_name));
1093 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1094 seg_hdr->seg_format_ver.major,
1095 seg_hdr->seg_format_ver.minor,
1096 seg_hdr->seg_format_ver.update,
1097 seg_hdr->seg_format_ver.draft,
1098 seg_hdr->seg_id);
1099 } else {
1100 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1101 return ICE_ERR_CFG;
1104 return 0;
1108 * ice_get_pkg_info
1109 * @hw: pointer to the hardware structure
1111 * Store details of the package currently loaded in HW into the HW structure.
1113 static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1115 struct ice_aqc_get_pkg_info_resp *pkg_info;
1116 enum ice_status status;
1117 u16 size;
1118 u32 i;
1120 size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1121 pkg_info = kzalloc(size, GFP_KERNEL);
1122 if (!pkg_info)
1123 return ICE_ERR_NO_MEMORY;
1125 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1126 if (status)
1127 goto init_pkg_free_alloc;
1129 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1130 #define ICE_PKG_FLAG_COUNT 4
1131 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1132 u8 place = 0;
1134 if (pkg_info->pkg_info[i].is_active) {
1135 flags[place++] = 'A';
1136 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1137 hw->active_track_id =
1138 le32_to_cpu(pkg_info->pkg_info[i].track_id);
1139 memcpy(hw->active_pkg_name,
1140 pkg_info->pkg_info[i].name,
1141 sizeof(pkg_info->pkg_info[i].name));
1142 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1144 if (pkg_info->pkg_info[i].is_active_at_boot)
1145 flags[place++] = 'B';
1146 if (pkg_info->pkg_info[i].is_modified)
1147 flags[place++] = 'M';
1148 if (pkg_info->pkg_info[i].is_in_nvm)
1149 flags[place++] = 'N';
1151 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1152 i, pkg_info->pkg_info[i].ver.major,
1153 pkg_info->pkg_info[i].ver.minor,
1154 pkg_info->pkg_info[i].ver.update,
1155 pkg_info->pkg_info[i].ver.draft,
1156 pkg_info->pkg_info[i].name, flags);
1159 init_pkg_free_alloc:
1160 kfree(pkg_info);
1162 return status;
1166 * ice_verify_pkg - verify package
1167 * @pkg: pointer to the package buffer
1168 * @len: size of the package buffer
1170 * Verifies various attributes of the package file, including length, format
1171 * version, and the requirement of at least one segment.
1173 static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1175 u32 seg_count;
1176 u32 i;
1178 if (len < struct_size(pkg, seg_offset, 1))
1179 return ICE_ERR_BUF_TOO_SHORT;
1181 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1182 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1183 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1184 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1185 return ICE_ERR_CFG;
1187 /* pkg must have at least one segment */
1188 seg_count = le32_to_cpu(pkg->seg_count);
1189 if (seg_count < 1)
1190 return ICE_ERR_CFG;
1192 /* make sure segment array fits in package length */
1193 if (len < struct_size(pkg, seg_offset, seg_count))
1194 return ICE_ERR_BUF_TOO_SHORT;
1196 /* all segments must fit within length */
1197 for (i = 0; i < seg_count; i++) {
1198 u32 off = le32_to_cpu(pkg->seg_offset[i]);
1199 struct ice_generic_seg_hdr *seg;
1201 /* segment header must fit */
1202 if (len < off + sizeof(*seg))
1203 return ICE_ERR_BUF_TOO_SHORT;
1205 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1207 /* segment body must fit */
1208 if (len < off + le32_to_cpu(seg->seg_size))
1209 return ICE_ERR_BUF_TOO_SHORT;
1212 return 0;
1216 * ice_free_seg - free package segment pointer
1217 * @hw: pointer to the hardware structure
1219 * Frees the package segment pointer in the proper manner, depending on if the
1220 * segment was allocated or just the passed in pointer was stored.
1222 void ice_free_seg(struct ice_hw *hw)
1224 if (hw->pkg_copy) {
1225 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1226 hw->pkg_copy = NULL;
1227 hw->pkg_size = 0;
1229 hw->seg = NULL;
1233 * ice_init_pkg_regs - initialize additional package registers
1234 * @hw: pointer to the hardware structure
1236 static void ice_init_pkg_regs(struct ice_hw *hw)
1238 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1239 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1240 #define ICE_SW_BLK_IDX 0
1242 /* setup Switch block input mask, which is 48-bits in two parts */
1243 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1244 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1248 * ice_chk_pkg_version - check package version for compatibility with driver
1249 * @pkg_ver: pointer to a version structure to check
1251 * Check to make sure that the package about to be downloaded is compatible with
1252 * the driver. To be compatible, the major and minor components of the package
1253 * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
1254 * definitions.
1256 static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1258 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1259 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1260 return ICE_ERR_NOT_SUPPORTED;
1262 return 0;
1266 * ice_chk_pkg_compat
1267 * @hw: pointer to the hardware structure
1268 * @ospkg: pointer to the package hdr
1269 * @seg: pointer to the package segment hdr
1271 * This function checks the package version compatibility with driver and NVM
1273 static enum ice_status
1274 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1275 struct ice_seg **seg)
1277 struct ice_aqc_get_pkg_info_resp *pkg;
1278 enum ice_status status;
1279 u16 size;
1280 u32 i;
1282 /* Check package version compatibility */
1283 status = ice_chk_pkg_version(&hw->pkg_ver);
1284 if (status) {
1285 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1286 return status;
1289 /* find ICE segment in given package */
1290 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1291 ospkg);
1292 if (!*seg) {
1293 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1294 return ICE_ERR_CFG;
1297 /* Check if FW is compatible with the OS package */
1298 size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1299 pkg = kzalloc(size, GFP_KERNEL);
1300 if (!pkg)
1301 return ICE_ERR_NO_MEMORY;
1303 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1304 if (status)
1305 goto fw_ddp_compat_free_alloc;
1307 for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1308 /* loop till we find the NVM package */
1309 if (!pkg->pkg_info[i].is_in_nvm)
1310 continue;
1311 if ((*seg)->hdr.seg_format_ver.major !=
1312 pkg->pkg_info[i].ver.major ||
1313 (*seg)->hdr.seg_format_ver.minor >
1314 pkg->pkg_info[i].ver.minor) {
1315 status = ICE_ERR_FW_DDP_MISMATCH;
1316 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1318 /* done processing NVM package so break */
1319 break;
1321 fw_ddp_compat_free_alloc:
1322 kfree(pkg);
1323 return status;
1327 * ice_init_pkg - initialize/download package
1328 * @hw: pointer to the hardware structure
1329 * @buf: pointer to the package buffer
1330 * @len: size of the package buffer
1332 * This function initializes a package. The package contains HW tables
1333 * required to do packet processing. First, the function extracts package
1334 * information such as version. Then it finds the ice configuration segment
1335 * within the package; this function then saves a copy of the segment pointer
1336 * within the supplied package buffer. Next, the function will cache any hints
1337 * from the package, followed by downloading the package itself. Note, that if
1338 * a previous PF driver has already downloaded the package successfully, then
1339 * the current driver will not have to download the package again.
1341 * The local package contents will be used to query default behavior and to
1342 * update specific sections of the HW's version of the package (e.g. to update
1343 * the parse graph to understand new protocols).
1345 * This function stores a pointer to the package buffer memory, and it is
1346 * expected that the supplied buffer will not be freed immediately. If the
1347 * package buffer needs to be freed, such as when read from a file, use
1348 * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
1349 * case.
1351 enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1353 struct ice_pkg_hdr *pkg;
1354 enum ice_status status;
1355 struct ice_seg *seg;
1357 if (!buf || !len)
1358 return ICE_ERR_PARAM;
1360 pkg = (struct ice_pkg_hdr *)buf;
1361 status = ice_verify_pkg(pkg, len);
1362 if (status) {
1363 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1364 status);
1365 return status;
1368 /* initialize package info */
1369 status = ice_init_pkg_info(hw, pkg);
1370 if (status)
1371 return status;
1373 /* before downloading the package, check package version for
1374 * compatibility with driver
1376 status = ice_chk_pkg_compat(hw, pkg, &seg);
1377 if (status)
1378 return status;
1380 /* initialize package hints and then download package */
1381 ice_init_pkg_hints(hw, seg);
1382 status = ice_download_pkg(hw, seg);
1383 if (status == ICE_ERR_AQ_NO_WORK) {
1384 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1385 status = 0;
1388 /* Get information on the package currently loaded in HW, then make sure
1389 * the driver is compatible with this version.
1391 if (!status) {
1392 status = ice_get_pkg_info(hw);
1393 if (!status)
1394 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1397 if (!status) {
1398 hw->seg = seg;
1399 /* on successful package download update other required
1400 * registers to support the package and fill HW tables
1401 * with package content.
1403 ice_init_pkg_regs(hw);
1404 ice_fill_blk_tbls(hw);
1405 } else {
1406 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1407 status);
1410 return status;
1414 * ice_copy_and_init_pkg - initialize/download a copy of the package
1415 * @hw: pointer to the hardware structure
1416 * @buf: pointer to the package buffer
1417 * @len: size of the package buffer
1419 * This function copies the package buffer, and then calls ice_init_pkg() to
1420 * initialize the copied package contents.
1422 * The copying is necessary if the package buffer supplied is constant, or if
1423 * the memory may disappear shortly after calling this function.
1425 * If the package buffer resides in the data segment and can be modified, the
1426 * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
1428 * However, if the package buffer needs to be copied first, such as when being
1429 * read from a file, the caller should use ice_copy_and_init_pkg().
1431 * This function will first copy the package buffer, before calling
1432 * ice_init_pkg(). The caller is free to immediately destroy the original
1433 * package buffer, as the new copy will be managed by this function and
1434 * related routines.
1436 enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1438 enum ice_status status;
1439 u8 *buf_copy;
1441 if (!buf || !len)
1442 return ICE_ERR_PARAM;
1444 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1446 status = ice_init_pkg(hw, buf_copy, len);
1447 if (status) {
1448 /* Free the copy, since we failed to initialize the package */
1449 devm_kfree(ice_hw_to_dev(hw), buf_copy);
1450 } else {
1451 /* Track the copied pkg so we can free it later */
1452 hw->pkg_copy = buf_copy;
1453 hw->pkg_size = len;
1456 return status;
1460 * ice_pkg_buf_alloc
1461 * @hw: pointer to the HW structure
1463 * Allocates a package buffer and returns a pointer to the buffer header.
1464 * Note: all package contents must be in Little Endian form.
1466 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1468 struct ice_buf_build *bld;
1469 struct ice_buf_hdr *buf;
1471 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1472 if (!bld)
1473 return NULL;
1475 buf = (struct ice_buf_hdr *)bld;
1476 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1477 section_entry));
1478 return bld;
1482 * ice_pkg_buf_free
1483 * @hw: pointer to the HW structure
1484 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1486 * Frees a package buffer
1488 static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1490 devm_kfree(ice_hw_to_dev(hw), bld);
1494 * ice_pkg_buf_reserve_section
1495 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1496 * @count: the number of sections to reserve
1498 * Reserves one or more section table entries in a package buffer. This routine
1499 * can be called multiple times as long as they are made before calling
1500 * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
1501 * is called once, the number of sections that can be allocated will not be able
1502 * to be increased; not using all reserved sections is fine, but this will
1503 * result in some wasted space in the buffer.
1504 * Note: all package contents must be in Little Endian form.
1506 static enum ice_status
1507 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1509 struct ice_buf_hdr *buf;
1510 u16 section_count;
1511 u16 data_end;
1513 if (!bld)
1514 return ICE_ERR_PARAM;
1516 buf = (struct ice_buf_hdr *)&bld->buf;
1518 /* already an active section, can't increase table size */
1519 section_count = le16_to_cpu(buf->section_count);
1520 if (section_count > 0)
1521 return ICE_ERR_CFG;
1523 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1524 return ICE_ERR_CFG;
1525 bld->reserved_section_table_entries += count;
1527 data_end = le16_to_cpu(buf->data_end) +
1528 (count * sizeof(buf->section_entry[0]));
1529 buf->data_end = cpu_to_le16(data_end);
1531 return 0;
1535 * ice_pkg_buf_alloc_section
1536 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1537 * @type: the section type value
1538 * @size: the size of the section to reserve (in bytes)
1540 * Reserves memory in the buffer for a section's content and updates the
1541 * buffers' status accordingly. This routine returns a pointer to the first
1542 * byte of the section start within the buffer, which is used to fill in the
1543 * section contents.
1544 * Note: all package contents must be in Little Endian form.
1546 static void *
1547 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1549 struct ice_buf_hdr *buf;
1550 u16 sect_count;
1551 u16 data_end;
1553 if (!bld || !type || !size)
1554 return NULL;
1556 buf = (struct ice_buf_hdr *)&bld->buf;
1558 /* check for enough space left in buffer */
1559 data_end = le16_to_cpu(buf->data_end);
1561 /* section start must align on 4 byte boundary */
1562 data_end = ALIGN(data_end, 4);
1564 if ((data_end + size) > ICE_MAX_S_DATA_END)
1565 return NULL;
1567 /* check for more available section table entries */
1568 sect_count = le16_to_cpu(buf->section_count);
1569 if (sect_count < bld->reserved_section_table_entries) {
1570 void *section_ptr = ((u8 *)buf) + data_end;
1572 buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1573 buf->section_entry[sect_count].size = cpu_to_le16(size);
1574 buf->section_entry[sect_count].type = cpu_to_le32(type);
1576 data_end += size;
1577 buf->data_end = cpu_to_le16(data_end);
1579 buf->section_count = cpu_to_le16(sect_count + 1);
1580 return section_ptr;
1583 /* no free section table entries */
1584 return NULL;
1588 * ice_pkg_buf_get_active_sections
1589 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1591 * Returns the number of active sections. Before using the package buffer
1592 * in an update package command, the caller should make sure that there is at
1593 * least one active section - otherwise, the buffer is not legal and should
1594 * not be used.
1595 * Note: all package contents must be in Little Endian form.
1597 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1599 struct ice_buf_hdr *buf;
1601 if (!bld)
1602 return 0;
1604 buf = (struct ice_buf_hdr *)&bld->buf;
1605 return le16_to_cpu(buf->section_count);
1609 * ice_pkg_buf
1610 * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
1612 * Return a pointer to the buffer's header
1614 static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1616 if (!bld)
1617 return NULL;
1619 return &bld->buf;
1623 * ice_get_open_tunnel_port - retrieve an open tunnel port
1624 * @hw: pointer to the HW structure
1625 * @port: returns open port
1627 bool
1628 ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
1630 bool res = false;
1631 u16 i;
1633 mutex_lock(&hw->tnl_lock);
1635 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1636 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
1637 *port = hw->tnl.tbl[i].port;
1638 res = true;
1639 break;
1642 mutex_unlock(&hw->tnl_lock);
1644 return res;
1648 * ice_tunnel_idx_to_entry - convert linear index to the sparse one
1649 * @hw: pointer to the HW structure
1650 * @type: type of tunnel
1651 * @idx: linear index
1653 * Stack assumes we have 2 linear tables with indexes [0, count_valid),
1654 * but really the port table may be sprase, and types are mixed, so convert
1655 * the stack index into the device index.
1657 static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1658 u16 idx)
1660 u16 i;
1662 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1663 if (hw->tnl.tbl[i].valid &&
1664 hw->tnl.tbl[i].type == type &&
1665 idx--)
1666 return i;
1668 WARN_ON_ONCE(1);
1669 return 0;
1673 * ice_create_tunnel
1674 * @hw: pointer to the HW structure
1675 * @index: device table entry
1676 * @type: type of tunnel
1677 * @port: port of tunnel to create
1679 * Create a tunnel by updating the parse graph in the parser. We do that by
1680 * creating a package buffer with the tunnel info and issuing an update package
1681 * command.
1683 static enum ice_status
1684 ice_create_tunnel(struct ice_hw *hw, u16 index,
1685 enum ice_tunnel_type type, u16 port)
1687 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1688 enum ice_status status = ICE_ERR_MAX_LIMIT;
1689 struct ice_buf_build *bld;
1691 mutex_lock(&hw->tnl_lock);
1693 bld = ice_pkg_buf_alloc(hw);
1694 if (!bld) {
1695 status = ICE_ERR_NO_MEMORY;
1696 goto ice_create_tunnel_end;
1699 /* allocate 2 sections, one for Rx parser, one for Tx parser */
1700 if (ice_pkg_buf_reserve_section(bld, 2))
1701 goto ice_create_tunnel_err;
1703 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1704 struct_size(sect_rx, tcam, 1));
1705 if (!sect_rx)
1706 goto ice_create_tunnel_err;
1707 sect_rx->count = cpu_to_le16(1);
1709 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1710 struct_size(sect_tx, tcam, 1));
1711 if (!sect_tx)
1712 goto ice_create_tunnel_err;
1713 sect_tx->count = cpu_to_le16(1);
1715 /* copy original boost entry to update package buffer */
1716 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1717 sizeof(*sect_rx->tcam));
1719 /* over-write the never-match dest port key bits with the encoded port
1720 * bits
1722 ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1723 (u8 *)&port, NULL, NULL, NULL,
1724 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1725 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1727 /* exact copy of entry to Tx section entry */
1728 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1730 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1731 if (!status)
1732 hw->tnl.tbl[index].port = port;
1734 ice_create_tunnel_err:
1735 ice_pkg_buf_free(hw, bld);
1737 ice_create_tunnel_end:
1738 mutex_unlock(&hw->tnl_lock);
1740 return status;
1744 * ice_destroy_tunnel
1745 * @hw: pointer to the HW structure
1746 * @index: device table entry
1747 * @type: type of tunnel
1748 * @port: port of tunnel to destroy (ignored if the all parameter is true)
1750 * Destroys a tunnel or all tunnels by creating an update package buffer
1751 * targeting the specific updates requested and then performing an update
1752 * package.
1754 static enum ice_status
1755 ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
1756 u16 port)
1758 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1759 enum ice_status status = ICE_ERR_MAX_LIMIT;
1760 struct ice_buf_build *bld;
1762 mutex_lock(&hw->tnl_lock);
1764 if (WARN_ON(!hw->tnl.tbl[index].valid ||
1765 hw->tnl.tbl[index].type != type ||
1766 hw->tnl.tbl[index].port != port)) {
1767 status = ICE_ERR_OUT_OF_RANGE;
1768 goto ice_destroy_tunnel_end;
1771 bld = ice_pkg_buf_alloc(hw);
1772 if (!bld) {
1773 status = ICE_ERR_NO_MEMORY;
1774 goto ice_destroy_tunnel_end;
1777 /* allocate 2 sections, one for Rx parser, one for Tx parser */
1778 if (ice_pkg_buf_reserve_section(bld, 2))
1779 goto ice_destroy_tunnel_err;
1781 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1782 struct_size(sect_rx, tcam, 1));
1783 if (!sect_rx)
1784 goto ice_destroy_tunnel_err;
1785 sect_rx->count = cpu_to_le16(1);
1787 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1788 struct_size(sect_tx, tcam, 1));
1789 if (!sect_tx)
1790 goto ice_destroy_tunnel_err;
1791 sect_tx->count = cpu_to_le16(1);
1793 /* copy original boost entry to update package buffer, one copy to Rx
1794 * section, another copy to the Tx section
1796 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1797 sizeof(*sect_rx->tcam));
1798 memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
1799 sizeof(*sect_tx->tcam));
1801 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1802 if (!status)
1803 hw->tnl.tbl[index].port = 0;
1805 ice_destroy_tunnel_err:
1806 ice_pkg_buf_free(hw, bld);
1808 ice_destroy_tunnel_end:
1809 mutex_unlock(&hw->tnl_lock);
1811 return status;
1814 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
1815 unsigned int idx, struct udp_tunnel_info *ti)
1817 struct ice_netdev_priv *np = netdev_priv(netdev);
1818 struct ice_vsi *vsi = np->vsi;
1819 struct ice_pf *pf = vsi->back;
1820 enum ice_tunnel_type tnl_type;
1821 enum ice_status status;
1822 u16 index;
1824 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1825 index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
1827 status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
1828 if (status) {
1829 netdev_err(netdev, "Error adding UDP tunnel - %s\n",
1830 ice_stat_str(status));
1831 return -EIO;
1834 udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
1835 return 0;
1838 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
1839 unsigned int idx, struct udp_tunnel_info *ti)
1841 struct ice_netdev_priv *np = netdev_priv(netdev);
1842 struct ice_vsi *vsi = np->vsi;
1843 struct ice_pf *pf = vsi->back;
1844 enum ice_tunnel_type tnl_type;
1845 enum ice_status status;
1847 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1849 status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
1850 ntohs(ti->port));
1851 if (status) {
1852 netdev_err(netdev, "Error removing UDP tunnel - %s\n",
1853 ice_stat_str(status));
1854 return -EIO;
1857 return 0;
1860 /* PTG Management */
1863 * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
1864 * @hw: pointer to the hardware structure
1865 * @blk: HW block
1866 * @ptype: the ptype to search for
1867 * @ptg: pointer to variable that receives the PTG
1869 * This function will search the PTGs for a particular ptype, returning the
1870 * PTG ID that contains it through the PTG parameter, with the value of
1871 * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
1873 static enum ice_status
1874 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1876 if (ptype >= ICE_XLT1_CNT || !ptg)
1877 return ICE_ERR_PARAM;
1879 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1880 return 0;
1884 * ice_ptg_alloc_val - Allocates a new packet type group ID by value
1885 * @hw: pointer to the hardware structure
1886 * @blk: HW block
1887 * @ptg: the PTG to allocate
1889 * This function allocates a given packet type group ID specified by the PTG
1890 * parameter.
1892 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
1894 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
1898 * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
1899 * @hw: pointer to the hardware structure
1900 * @blk: HW block
1901 * @ptype: the ptype to remove
1902 * @ptg: the PTG to remove the ptype from
1904 * This function will remove the ptype from the specific PTG, and move it to
1905 * the default PTG (ICE_DEFAULT_PTG).
1907 static enum ice_status
1908 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1910 struct ice_ptg_ptype **ch;
1911 struct ice_ptg_ptype *p;
1913 if (ptype > ICE_XLT1_CNT - 1)
1914 return ICE_ERR_PARAM;
1916 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
1917 return ICE_ERR_DOES_NOT_EXIST;
1919 /* Should not happen if .in_use is set, bad config */
1920 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
1921 return ICE_ERR_CFG;
1923 /* find the ptype within this PTG, and bypass the link over it */
1924 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1925 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1926 while (p) {
1927 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
1928 *ch = p->next_ptype;
1929 break;
1932 ch = &p->next_ptype;
1933 p = p->next_ptype;
1936 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
1937 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
1939 return 0;
1943 * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
1944 * @hw: pointer to the hardware structure
1945 * @blk: HW block
1946 * @ptype: the ptype to add or move
1947 * @ptg: the PTG to add or move the ptype to
1949 * This function will either add or move a ptype to a particular PTG depending
1950 * on if the ptype is already part of another group. Note that using a
1951 * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
1952 * default PTG.
1954 static enum ice_status
1955 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1957 enum ice_status status;
1958 u8 original_ptg;
1960 if (ptype > ICE_XLT1_CNT - 1)
1961 return ICE_ERR_PARAM;
1963 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
1964 return ICE_ERR_DOES_NOT_EXIST;
1966 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
1967 if (status)
1968 return status;
1970 /* Is ptype already in the correct PTG? */
1971 if (original_ptg == ptg)
1972 return 0;
1974 /* Remove from original PTG and move back to the default PTG */
1975 if (original_ptg != ICE_DEFAULT_PTG)
1976 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
1978 /* Moving to default PTG? Then we're done with this request */
1979 if (ptg == ICE_DEFAULT_PTG)
1980 return 0;
1982 /* Add ptype to PTG at beginning of list */
1983 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
1984 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1985 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
1986 &hw->blk[blk].xlt1.ptypes[ptype];
1988 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
1989 hw->blk[blk].xlt1.t[ptype] = ptg;
1991 return 0;
1994 /* Block / table size info */
1995 struct ice_blk_size_details {
1996 u16 xlt1; /* # XLT1 entries */
1997 u16 xlt2; /* # XLT2 entries */
1998 u16 prof_tcam; /* # profile ID TCAM entries */
1999 u16 prof_id; /* # profile IDs */
2000 u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
2001 u16 prof_redir; /* # profile redirection entries */
2002 u16 es; /* # extraction sequence entries */
2003 u16 fvw; /* # field vector words */
2004 u8 overwrite; /* overwrite existing entries allowed */
2005 u8 reverse; /* reverse FV order */
2008 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2010 * Table Definitions
2011 * XLT1 - Number of entries in XLT1 table
2012 * XLT2 - Number of entries in XLT2 table
2013 * TCAM - Number of entries Profile ID TCAM table
2014 * CDID - Control Domain ID of the hardware block
2015 * PRED - Number of entries in the Profile Redirection Table
2016 * FV - Number of entries in the Field Vector
2017 * FVW - Width (in WORDs) of the Field Vector
2018 * OVR - Overwrite existing table entries
2019 * REV - Reverse FV
2021 /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
2022 /* Overwrite , Reverse FV */
2023 /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2024 false, false },
2025 /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2026 false, false },
2027 /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2028 false, true },
2029 /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2030 true, true },
2031 /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2032 false, false },
2035 enum ice_sid_all {
2036 ICE_SID_XLT1_OFF = 0,
2037 ICE_SID_XLT2_OFF,
2038 ICE_SID_PR_OFF,
2039 ICE_SID_PR_REDIR_OFF,
2040 ICE_SID_ES_OFF,
2041 ICE_SID_OFF_COUNT,
2044 /* Characteristic handling */
2047 * ice_match_prop_lst - determine if properties of two lists match
2048 * @list1: first properties list
2049 * @list2: second properties list
2051 * Count, cookies and the order must match in order to be considered equivalent.
2053 static bool
2054 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2056 struct ice_vsig_prof *tmp1;
2057 struct ice_vsig_prof *tmp2;
2058 u16 chk_count = 0;
2059 u16 count = 0;
2061 /* compare counts */
2062 list_for_each_entry(tmp1, list1, list)
2063 count++;
2064 list_for_each_entry(tmp2, list2, list)
2065 chk_count++;
2066 if (!count || count != chk_count)
2067 return false;
2069 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2070 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2072 /* profile cookies must compare, and in the exact same order to take
2073 * into account priority
2075 while (count--) {
2076 if (tmp2->profile_cookie != tmp1->profile_cookie)
2077 return false;
2079 tmp1 = list_next_entry(tmp1, list);
2080 tmp2 = list_next_entry(tmp2, list);
2083 return true;
2086 /* VSIG Management */
2089 * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
2090 * @hw: pointer to the hardware structure
2091 * @blk: HW block
2092 * @vsi: VSI of interest
2093 * @vsig: pointer to receive the VSI group
2095 * This function will lookup the VSI entry in the XLT2 list and return
2096 * the VSI group its associated with.
2098 static enum ice_status
2099 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2101 if (!vsig || vsi >= ICE_MAX_VSI)
2102 return ICE_ERR_PARAM;
2104 /* As long as there's a default or valid VSIG associated with the input
2105 * VSI, the functions returns a success. Any handling of VSIG will be
2106 * done by the following add, update or remove functions.
2108 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2110 return 0;
2114 * ice_vsig_alloc_val - allocate a new VSIG by value
2115 * @hw: pointer to the hardware structure
2116 * @blk: HW block
2117 * @vsig: the VSIG to allocate
2119 * This function will allocate a given VSIG specified by the VSIG parameter.
2121 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2123 u16 idx = vsig & ICE_VSIG_IDX_M;
2125 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2126 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2127 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2130 return ICE_VSIG_VALUE(idx, hw->pf_id);
2134 * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
2135 * @hw: pointer to the hardware structure
2136 * @blk: HW block
2138 * This function will iterate through the VSIG list and mark the first
2139 * unused entry for the new VSIG entry as used and return that value.
2141 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2143 u16 i;
2145 for (i = 1; i < ICE_MAX_VSIGS; i++)
2146 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2147 return ice_vsig_alloc_val(hw, blk, i);
2149 return ICE_DEFAULT_VSIG;
2153 * ice_find_dup_props_vsig - find VSI group with a specified set of properties
2154 * @hw: pointer to the hardware structure
2155 * @blk: HW block
2156 * @chs: characteristic list
2157 * @vsig: returns the VSIG with the matching profiles, if found
2159 * Each VSIG is associated with a characteristic set; i.e. all VSIs under
2160 * a group have the same characteristic set. To check if there exists a VSIG
2161 * which has the same characteristics as the input characteristics; this
2162 * function will iterate through the XLT2 list and return the VSIG that has a
2163 * matching configuration. In order to make sure that priorities are accounted
2164 * for, the list must match exactly, including the order in which the
2165 * characteristics are listed.
2167 static enum ice_status
2168 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2169 struct list_head *chs, u16 *vsig)
2171 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2172 u16 i;
2174 for (i = 0; i < xlt2->count; i++)
2175 if (xlt2->vsig_tbl[i].in_use &&
2176 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2177 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2178 return 0;
2181 return ICE_ERR_DOES_NOT_EXIST;
2185 * ice_vsig_free - free VSI group
2186 * @hw: pointer to the hardware structure
2187 * @blk: HW block
2188 * @vsig: VSIG to remove
2190 * The function will remove all VSIs associated with the input VSIG and move
2191 * them to the DEFAULT_VSIG and mark the VSIG available.
2193 static enum ice_status
2194 ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2196 struct ice_vsig_prof *dtmp, *del;
2197 struct ice_vsig_vsi *vsi_cur;
2198 u16 idx;
2200 idx = vsig & ICE_VSIG_IDX_M;
2201 if (idx >= ICE_MAX_VSIGS)
2202 return ICE_ERR_PARAM;
2204 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2205 return ICE_ERR_DOES_NOT_EXIST;
2207 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2209 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2210 /* If the VSIG has at least 1 VSI then iterate through the
2211 * list and remove the VSIs before deleting the group.
2213 if (vsi_cur) {
2214 /* remove all vsis associated with this VSIG XLT2 entry */
2215 do {
2216 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2218 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2219 vsi_cur->changed = 1;
2220 vsi_cur->next_vsi = NULL;
2221 vsi_cur = tmp;
2222 } while (vsi_cur);
2224 /* NULL terminate head of VSI list */
2225 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2228 /* free characteristic list */
2229 list_for_each_entry_safe(del, dtmp,
2230 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2231 list) {
2232 list_del(&del->list);
2233 devm_kfree(ice_hw_to_dev(hw), del);
2236 /* if VSIG characteristic list was cleared for reset
2237 * re-initialize the list head
2239 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2241 return 0;
2245 * ice_vsig_remove_vsi - remove VSI from VSIG
2246 * @hw: pointer to the hardware structure
2247 * @blk: HW block
2248 * @vsi: VSI to remove
2249 * @vsig: VSI group to remove from
2251 * The function will remove the input VSI from its VSI group and move it
2252 * to the DEFAULT_VSIG.
2254 static enum ice_status
2255 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2257 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2258 u16 idx;
2260 idx = vsig & ICE_VSIG_IDX_M;
2262 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2263 return ICE_ERR_PARAM;
2265 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2266 return ICE_ERR_DOES_NOT_EXIST;
2268 /* entry already in default VSIG, don't have to remove */
2269 if (idx == ICE_DEFAULT_VSIG)
2270 return 0;
2272 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2273 if (!(*vsi_head))
2274 return ICE_ERR_CFG;
2276 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2277 vsi_cur = (*vsi_head);
2279 /* iterate the VSI list, skip over the entry to be removed */
2280 while (vsi_cur) {
2281 if (vsi_tgt == vsi_cur) {
2282 (*vsi_head) = vsi_cur->next_vsi;
2283 break;
2285 vsi_head = &vsi_cur->next_vsi;
2286 vsi_cur = vsi_cur->next_vsi;
2289 /* verify if VSI was removed from group list */
2290 if (!vsi_cur)
2291 return ICE_ERR_DOES_NOT_EXIST;
2293 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2294 vsi_cur->changed = 1;
2295 vsi_cur->next_vsi = NULL;
2297 return 0;
2301 * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
2302 * @hw: pointer to the hardware structure
2303 * @blk: HW block
2304 * @vsi: VSI to move
2305 * @vsig: destination VSI group
2307 * This function will move or add the input VSI to the target VSIG.
2308 * The function will find the original VSIG the VSI belongs to and
2309 * move the entry to the DEFAULT_VSIG, update the original VSIG and
2310 * then move entry to the new VSIG.
2312 static enum ice_status
2313 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2315 struct ice_vsig_vsi *tmp;
2316 enum ice_status status;
2317 u16 orig_vsig, idx;
2319 idx = vsig & ICE_VSIG_IDX_M;
2321 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2322 return ICE_ERR_PARAM;
2324 /* if VSIG not in use and VSIG is not default type this VSIG
2325 * doesn't exist.
2327 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2328 vsig != ICE_DEFAULT_VSIG)
2329 return ICE_ERR_DOES_NOT_EXIST;
2331 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2332 if (status)
2333 return status;
2335 /* no update required if vsigs match */
2336 if (orig_vsig == vsig)
2337 return 0;
2339 if (orig_vsig != ICE_DEFAULT_VSIG) {
2340 /* remove entry from orig_vsig and add to default VSIG */
2341 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2342 if (status)
2343 return status;
2346 if (idx == ICE_DEFAULT_VSIG)
2347 return 0;
2349 /* Create VSI entry and add VSIG and prop_mask values */
2350 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2351 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2353 /* Add new entry to the head of the VSIG list */
2354 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2355 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2356 &hw->blk[blk].xlt2.vsis[vsi];
2357 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2358 hw->blk[blk].xlt2.t[vsi] = vsig;
2360 return 0;
2364 * ice_find_prof_id - find profile ID for a given field vector
2365 * @hw: pointer to the hardware structure
2366 * @blk: HW block
2367 * @fv: field vector to search for
2368 * @prof_id: receives the profile ID
2370 static enum ice_status
2371 ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
2372 struct ice_fv_word *fv, u8 *prof_id)
2374 struct ice_es *es = &hw->blk[blk].es;
2375 u16 off;
2376 u8 i;
2378 /* For FD, we don't want to re-use a existed profile with the same
2379 * field vector and mask. This will cause rule interference.
2381 if (blk == ICE_BLK_FD)
2382 return ICE_ERR_DOES_NOT_EXIST;
2384 for (i = 0; i < (u8)es->count; i++) {
2385 off = i * es->fvw;
2387 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2388 continue;
2390 *prof_id = i;
2391 return 0;
2394 return ICE_ERR_DOES_NOT_EXIST;
2398 * ice_prof_id_rsrc_type - get profile ID resource type for a block type
2399 * @blk: the block type
2400 * @rsrc_type: pointer to variable to receive the resource type
2402 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2404 switch (blk) {
2405 case ICE_BLK_FD:
2406 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2407 break;
2408 case ICE_BLK_RSS:
2409 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2410 break;
2411 default:
2412 return false;
2414 return true;
2418 * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
2419 * @blk: the block type
2420 * @rsrc_type: pointer to variable to receive the resource type
2422 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2424 switch (blk) {
2425 case ICE_BLK_FD:
2426 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2427 break;
2428 case ICE_BLK_RSS:
2429 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2430 break;
2431 default:
2432 return false;
2434 return true;
2438 * ice_alloc_tcam_ent - allocate hardware TCAM entry
2439 * @hw: pointer to the HW struct
2440 * @blk: the block to allocate the TCAM for
2441 * @tcam_idx: pointer to variable to receive the TCAM entry
2443 * This function allocates a new entry in a Profile ID TCAM for a specific
2444 * block.
2446 static enum ice_status
2447 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
2449 u16 res_type;
2451 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2452 return ICE_ERR_PARAM;
2454 return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
2458 * ice_free_tcam_ent - free hardware TCAM entry
2459 * @hw: pointer to the HW struct
2460 * @blk: the block from which to free the TCAM entry
2461 * @tcam_idx: the TCAM entry to free
2463 * This function frees an entry in a Profile ID TCAM for a specific block.
2465 static enum ice_status
2466 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2468 u16 res_type;
2470 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2471 return ICE_ERR_PARAM;
2473 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2477 * ice_alloc_prof_id - allocate profile ID
2478 * @hw: pointer to the HW struct
2479 * @blk: the block to allocate the profile ID for
2480 * @prof_id: pointer to variable to receive the profile ID
2482 * This function allocates a new profile ID, which also corresponds to a Field
2483 * Vector (Extraction Sequence) entry.
2485 static enum ice_status
2486 ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2488 enum ice_status status;
2489 u16 res_type;
2490 u16 get_prof;
2492 if (!ice_prof_id_rsrc_type(blk, &res_type))
2493 return ICE_ERR_PARAM;
2495 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2496 if (!status)
2497 *prof_id = (u8)get_prof;
2499 return status;
2503 * ice_free_prof_id - free profile ID
2504 * @hw: pointer to the HW struct
2505 * @blk: the block from which to free the profile ID
2506 * @prof_id: the profile ID to free
2508 * This function frees a profile ID, which also corresponds to a Field Vector.
2510 static enum ice_status
2511 ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2513 u16 tmp_prof_id = (u16)prof_id;
2514 u16 res_type;
2516 if (!ice_prof_id_rsrc_type(blk, &res_type))
2517 return ICE_ERR_PARAM;
2519 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2523 * ice_prof_inc_ref - increment reference count for profile
2524 * @hw: pointer to the HW struct
2525 * @blk: the block from which to free the profile ID
2526 * @prof_id: the profile ID for which to increment the reference count
2528 static enum ice_status
2529 ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2531 if (prof_id > hw->blk[blk].es.count)
2532 return ICE_ERR_PARAM;
2534 hw->blk[blk].es.ref_count[prof_id]++;
2536 return 0;
2540 * ice_write_es - write an extraction sequence to hardware
2541 * @hw: pointer to the HW struct
2542 * @blk: the block in which to write the extraction sequence
2543 * @prof_id: the profile ID to write
2544 * @fv: pointer to the extraction sequence to write - NULL to clear extraction
2546 static void
2547 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
2548 struct ice_fv_word *fv)
2550 u16 off;
2552 off = prof_id * hw->blk[blk].es.fvw;
2553 if (!fv) {
2554 memset(&hw->blk[blk].es.t[off], 0,
2555 hw->blk[blk].es.fvw * sizeof(*fv));
2556 hw->blk[blk].es.written[prof_id] = false;
2557 } else {
2558 memcpy(&hw->blk[blk].es.t[off], fv,
2559 hw->blk[blk].es.fvw * sizeof(*fv));
2564 * ice_prof_dec_ref - decrement reference count for profile
2565 * @hw: pointer to the HW struct
2566 * @blk: the block from which to free the profile ID
2567 * @prof_id: the profile ID for which to decrement the reference count
2569 static enum ice_status
2570 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2572 if (prof_id > hw->blk[blk].es.count)
2573 return ICE_ERR_PARAM;
2575 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
2576 if (!--hw->blk[blk].es.ref_count[prof_id]) {
2577 ice_write_es(hw, blk, prof_id, NULL);
2578 return ice_free_prof_id(hw, blk, prof_id);
2582 return 0;
2585 /* Block / table section IDs */
2586 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
2587 /* SWITCH */
2588 { ICE_SID_XLT1_SW,
2589 ICE_SID_XLT2_SW,
2590 ICE_SID_PROFID_TCAM_SW,
2591 ICE_SID_PROFID_REDIR_SW,
2592 ICE_SID_FLD_VEC_SW
2595 /* ACL */
2596 { ICE_SID_XLT1_ACL,
2597 ICE_SID_XLT2_ACL,
2598 ICE_SID_PROFID_TCAM_ACL,
2599 ICE_SID_PROFID_REDIR_ACL,
2600 ICE_SID_FLD_VEC_ACL
2603 /* FD */
2604 { ICE_SID_XLT1_FD,
2605 ICE_SID_XLT2_FD,
2606 ICE_SID_PROFID_TCAM_FD,
2607 ICE_SID_PROFID_REDIR_FD,
2608 ICE_SID_FLD_VEC_FD
2611 /* RSS */
2612 { ICE_SID_XLT1_RSS,
2613 ICE_SID_XLT2_RSS,
2614 ICE_SID_PROFID_TCAM_RSS,
2615 ICE_SID_PROFID_REDIR_RSS,
2616 ICE_SID_FLD_VEC_RSS
2619 /* PE */
2620 { ICE_SID_XLT1_PE,
2621 ICE_SID_XLT2_PE,
2622 ICE_SID_PROFID_TCAM_PE,
2623 ICE_SID_PROFID_REDIR_PE,
2624 ICE_SID_FLD_VEC_PE
2629 * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
2630 * @hw: pointer to the hardware structure
2631 * @blk: the HW block to initialize
2633 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
2635 u16 pt;
2637 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
2638 u8 ptg;
2640 ptg = hw->blk[blk].xlt1.t[pt];
2641 if (ptg != ICE_DEFAULT_PTG) {
2642 ice_ptg_alloc_val(hw, blk, ptg);
2643 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
2649 * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
2650 * @hw: pointer to the hardware structure
2651 * @blk: the HW block to initialize
2653 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
2655 u16 vsi;
2657 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
2658 u16 vsig;
2660 vsig = hw->blk[blk].xlt2.t[vsi];
2661 if (vsig) {
2662 ice_vsig_alloc_val(hw, blk, vsig);
2663 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
2664 /* no changes at this time, since this has been
2665 * initialized from the original package
2667 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
2673 * ice_init_sw_db - init software database from HW tables
2674 * @hw: pointer to the hardware structure
2676 static void ice_init_sw_db(struct ice_hw *hw)
2678 u16 i;
2680 for (i = 0; i < ICE_BLK_COUNT; i++) {
2681 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
2682 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
2687 * ice_fill_tbl - Reads content of a single table type into database
2688 * @hw: pointer to the hardware structure
2689 * @block_id: Block ID of the table to copy
2690 * @sid: Section ID of the table to copy
2692 * Will attempt to read the entire content of a given table of a single block
2693 * into the driver database. We assume that the buffer will always
2694 * be as large or larger than the data contained in the package. If
2695 * this condition is not met, there is most likely an error in the package
2696 * contents.
2698 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
2700 u32 dst_len, sect_len, offset = 0;
2701 struct ice_prof_redir_section *pr;
2702 struct ice_prof_id_section *pid;
2703 struct ice_xlt1_section *xlt1;
2704 struct ice_xlt2_section *xlt2;
2705 struct ice_sw_fv_section *es;
2706 struct ice_pkg_enum state;
2707 u8 *src, *dst;
2708 void *sect;
2710 /* if the HW segment pointer is null then the first iteration of
2711 * ice_pkg_enum_section() will fail. In this case the HW tables will
2712 * not be filled and return success.
2714 if (!hw->seg) {
2715 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
2716 return;
2719 memset(&state, 0, sizeof(state));
2721 sect = ice_pkg_enum_section(hw->seg, &state, sid);
2723 while (sect) {
2724 switch (sid) {
2725 case ICE_SID_XLT1_SW:
2726 case ICE_SID_XLT1_FD:
2727 case ICE_SID_XLT1_RSS:
2728 case ICE_SID_XLT1_ACL:
2729 case ICE_SID_XLT1_PE:
2730 xlt1 = (struct ice_xlt1_section *)sect;
2731 src = xlt1->value;
2732 sect_len = le16_to_cpu(xlt1->count) *
2733 sizeof(*hw->blk[block_id].xlt1.t);
2734 dst = hw->blk[block_id].xlt1.t;
2735 dst_len = hw->blk[block_id].xlt1.count *
2736 sizeof(*hw->blk[block_id].xlt1.t);
2737 break;
2738 case ICE_SID_XLT2_SW:
2739 case ICE_SID_XLT2_FD:
2740 case ICE_SID_XLT2_RSS:
2741 case ICE_SID_XLT2_ACL:
2742 case ICE_SID_XLT2_PE:
2743 xlt2 = (struct ice_xlt2_section *)sect;
2744 src = (__force u8 *)xlt2->value;
2745 sect_len = le16_to_cpu(xlt2->count) *
2746 sizeof(*hw->blk[block_id].xlt2.t);
2747 dst = (u8 *)hw->blk[block_id].xlt2.t;
2748 dst_len = hw->blk[block_id].xlt2.count *
2749 sizeof(*hw->blk[block_id].xlt2.t);
2750 break;
2751 case ICE_SID_PROFID_TCAM_SW:
2752 case ICE_SID_PROFID_TCAM_FD:
2753 case ICE_SID_PROFID_TCAM_RSS:
2754 case ICE_SID_PROFID_TCAM_ACL:
2755 case ICE_SID_PROFID_TCAM_PE:
2756 pid = (struct ice_prof_id_section *)sect;
2757 src = (u8 *)pid->entry;
2758 sect_len = le16_to_cpu(pid->count) *
2759 sizeof(*hw->blk[block_id].prof.t);
2760 dst = (u8 *)hw->blk[block_id].prof.t;
2761 dst_len = hw->blk[block_id].prof.count *
2762 sizeof(*hw->blk[block_id].prof.t);
2763 break;
2764 case ICE_SID_PROFID_REDIR_SW:
2765 case ICE_SID_PROFID_REDIR_FD:
2766 case ICE_SID_PROFID_REDIR_RSS:
2767 case ICE_SID_PROFID_REDIR_ACL:
2768 case ICE_SID_PROFID_REDIR_PE:
2769 pr = (struct ice_prof_redir_section *)sect;
2770 src = pr->redir_value;
2771 sect_len = le16_to_cpu(pr->count) *
2772 sizeof(*hw->blk[block_id].prof_redir.t);
2773 dst = hw->blk[block_id].prof_redir.t;
2774 dst_len = hw->blk[block_id].prof_redir.count *
2775 sizeof(*hw->blk[block_id].prof_redir.t);
2776 break;
2777 case ICE_SID_FLD_VEC_SW:
2778 case ICE_SID_FLD_VEC_FD:
2779 case ICE_SID_FLD_VEC_RSS:
2780 case ICE_SID_FLD_VEC_ACL:
2781 case ICE_SID_FLD_VEC_PE:
2782 es = (struct ice_sw_fv_section *)sect;
2783 src = (u8 *)es->fv;
2784 sect_len = (u32)(le16_to_cpu(es->count) *
2785 hw->blk[block_id].es.fvw) *
2786 sizeof(*hw->blk[block_id].es.t);
2787 dst = (u8 *)hw->blk[block_id].es.t;
2788 dst_len = (u32)(hw->blk[block_id].es.count *
2789 hw->blk[block_id].es.fvw) *
2790 sizeof(*hw->blk[block_id].es.t);
2791 break;
2792 default:
2793 return;
2796 /* if the section offset exceeds destination length, terminate
2797 * table fill.
2799 if (offset > dst_len)
2800 return;
2802 /* if the sum of section size and offset exceed destination size
2803 * then we are out of bounds of the HW table size for that PF.
2804 * Changing section length to fill the remaining table space
2805 * of that PF.
2807 if ((offset + sect_len) > dst_len)
2808 sect_len = dst_len - offset;
2810 memcpy(dst + offset, src, sect_len);
2811 offset += sect_len;
2812 sect = ice_pkg_enum_section(NULL, &state, sid);
2817 * ice_fill_blk_tbls - Read package context for tables
2818 * @hw: pointer to the hardware structure
2820 * Reads the current package contents and populates the driver
2821 * database with the data iteratively for all advanced feature
2822 * blocks. Assume that the HW tables have been allocated.
2824 void ice_fill_blk_tbls(struct ice_hw *hw)
2826 u8 i;
2828 for (i = 0; i < ICE_BLK_COUNT; i++) {
2829 enum ice_block blk_id = (enum ice_block)i;
2831 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
2832 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
2833 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
2834 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
2835 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
2838 ice_init_sw_db(hw);
2842 * ice_free_prof_map - free profile map
2843 * @hw: pointer to the hardware structure
2844 * @blk_idx: HW block index
2846 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
2848 struct ice_es *es = &hw->blk[blk_idx].es;
2849 struct ice_prof_map *del, *tmp;
2851 mutex_lock(&es->prof_map_lock);
2852 list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
2853 list_del(&del->list);
2854 devm_kfree(ice_hw_to_dev(hw), del);
2856 INIT_LIST_HEAD(&es->prof_map);
2857 mutex_unlock(&es->prof_map_lock);
2861 * ice_free_flow_profs - free flow profile entries
2862 * @hw: pointer to the hardware structure
2863 * @blk_idx: HW block index
2865 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
2867 struct ice_flow_prof *p, *tmp;
2869 mutex_lock(&hw->fl_profs_locks[blk_idx]);
2870 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
2871 struct ice_flow_entry *e, *t;
2873 list_for_each_entry_safe(e, t, &p->entries, l_entry)
2874 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
2875 ICE_FLOW_ENTRY_HNDL(e));
2877 list_del(&p->l_entry);
2879 mutex_destroy(&p->entries_lock);
2880 devm_kfree(ice_hw_to_dev(hw), p);
2882 mutex_unlock(&hw->fl_profs_locks[blk_idx]);
2884 /* if driver is in reset and tables are being cleared
2885 * re-initialize the flow profile list heads
2887 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2891 * ice_free_vsig_tbl - free complete VSIG table entries
2892 * @hw: pointer to the hardware structure
2893 * @blk: the HW block on which to free the VSIG table entries
2895 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2897 u16 i;
2899 if (!hw->blk[blk].xlt2.vsig_tbl)
2900 return;
2902 for (i = 1; i < ICE_MAX_VSIGS; i++)
2903 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2904 ice_vsig_free(hw, blk, i);
2908 * ice_free_hw_tbls - free hardware table memory
2909 * @hw: pointer to the hardware structure
2911 void ice_free_hw_tbls(struct ice_hw *hw)
2913 struct ice_rss_cfg *r, *rt;
2914 u8 i;
2916 for (i = 0; i < ICE_BLK_COUNT; i++) {
2917 if (hw->blk[i].is_list_init) {
2918 struct ice_es *es = &hw->blk[i].es;
2920 ice_free_prof_map(hw, i);
2921 mutex_destroy(&es->prof_map_lock);
2923 ice_free_flow_profs(hw, i);
2924 mutex_destroy(&hw->fl_profs_locks[i]);
2926 hw->blk[i].is_list_init = false;
2928 ice_free_vsig_tbl(hw, (enum ice_block)i);
2929 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
2930 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
2931 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
2932 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
2933 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
2934 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
2935 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
2936 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
2937 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
2938 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
2939 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
2942 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
2943 list_del(&r->l_entry);
2944 devm_kfree(ice_hw_to_dev(hw), r);
2946 mutex_destroy(&hw->rss_locks);
2947 memset(hw->blk, 0, sizeof(hw->blk));
2951 * ice_init_flow_profs - init flow profile locks and list heads
2952 * @hw: pointer to the hardware structure
2953 * @blk_idx: HW block index
2955 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
2957 mutex_init(&hw->fl_profs_locks[blk_idx]);
2958 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2962 * ice_clear_hw_tbls - clear HW tables and flow profiles
2963 * @hw: pointer to the hardware structure
2965 void ice_clear_hw_tbls(struct ice_hw *hw)
2967 u8 i;
2969 for (i = 0; i < ICE_BLK_COUNT; i++) {
2970 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2971 struct ice_prof_tcam *prof = &hw->blk[i].prof;
2972 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2973 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2974 struct ice_es *es = &hw->blk[i].es;
2976 if (hw->blk[i].is_list_init) {
2977 ice_free_prof_map(hw, i);
2978 ice_free_flow_profs(hw, i);
2981 ice_free_vsig_tbl(hw, (enum ice_block)i);
2983 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
2984 memset(xlt1->ptg_tbl, 0,
2985 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
2986 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
2988 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
2989 memset(xlt2->vsig_tbl, 0,
2990 xlt2->count * sizeof(*xlt2->vsig_tbl));
2991 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
2993 memset(prof->t, 0, prof->count * sizeof(*prof->t));
2994 memset(prof_redir->t, 0,
2995 prof_redir->count * sizeof(*prof_redir->t));
2997 memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
2998 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
2999 memset(es->written, 0, es->count * sizeof(*es->written));
3004 * ice_init_hw_tbls - init hardware table memory
3005 * @hw: pointer to the hardware structure
3007 enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3009 u8 i;
3011 mutex_init(&hw->rss_locks);
3012 INIT_LIST_HEAD(&hw->rss_list_head);
3013 for (i = 0; i < ICE_BLK_COUNT; i++) {
3014 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3015 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3016 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3017 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3018 struct ice_es *es = &hw->blk[i].es;
3019 u16 j;
3021 if (hw->blk[i].is_list_init)
3022 continue;
3024 ice_init_flow_profs(hw, i);
3025 mutex_init(&es->prof_map_lock);
3026 INIT_LIST_HEAD(&es->prof_map);
3027 hw->blk[i].is_list_init = true;
3029 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3030 es->reverse = blk_sizes[i].reverse;
3032 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3033 xlt1->count = blk_sizes[i].xlt1;
3035 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3036 sizeof(*xlt1->ptypes), GFP_KERNEL);
3038 if (!xlt1->ptypes)
3039 goto err;
3041 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3042 sizeof(*xlt1->ptg_tbl),
3043 GFP_KERNEL);
3045 if (!xlt1->ptg_tbl)
3046 goto err;
3048 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3049 sizeof(*xlt1->t), GFP_KERNEL);
3050 if (!xlt1->t)
3051 goto err;
3053 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3054 xlt2->count = blk_sizes[i].xlt2;
3056 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3057 sizeof(*xlt2->vsis), GFP_KERNEL);
3059 if (!xlt2->vsis)
3060 goto err;
3062 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3063 sizeof(*xlt2->vsig_tbl),
3064 GFP_KERNEL);
3065 if (!xlt2->vsig_tbl)
3066 goto err;
3068 for (j = 0; j < xlt2->count; j++)
3069 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3071 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3072 sizeof(*xlt2->t), GFP_KERNEL);
3073 if (!xlt2->t)
3074 goto err;
3076 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3077 prof->count = blk_sizes[i].prof_tcam;
3078 prof->max_prof_id = blk_sizes[i].prof_id;
3079 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3080 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3081 sizeof(*prof->t), GFP_KERNEL);
3083 if (!prof->t)
3084 goto err;
3086 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3087 prof_redir->count = blk_sizes[i].prof_redir;
3088 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3089 prof_redir->count,
3090 sizeof(*prof_redir->t),
3091 GFP_KERNEL);
3093 if (!prof_redir->t)
3094 goto err;
3096 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3097 es->count = blk_sizes[i].es;
3098 es->fvw = blk_sizes[i].fvw;
3099 es->t = devm_kcalloc(ice_hw_to_dev(hw),
3100 (u32)(es->count * es->fvw),
3101 sizeof(*es->t), GFP_KERNEL);
3102 if (!es->t)
3103 goto err;
3105 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3106 sizeof(*es->ref_count),
3107 GFP_KERNEL);
3108 if (!es->ref_count)
3109 goto err;
3111 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3112 sizeof(*es->written), GFP_KERNEL);
3113 if (!es->written)
3114 goto err;
3116 return 0;
3118 err:
3119 ice_free_hw_tbls(hw);
3120 return ICE_ERR_NO_MEMORY;
3124 * ice_prof_gen_key - generate profile ID key
3125 * @hw: pointer to the HW struct
3126 * @blk: the block in which to write profile ID to
3127 * @ptg: packet type group (PTG) portion of key
3128 * @vsig: VSIG portion of key
3129 * @cdid: CDID portion of key
3130 * @flags: flag portion of key
3131 * @vl_msk: valid mask
3132 * @dc_msk: don't care mask
3133 * @nm_msk: never match mask
3134 * @key: output of profile ID key
3136 static enum ice_status
3137 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3138 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3139 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3140 u8 key[ICE_TCAM_KEY_SZ])
3142 struct ice_prof_id_key inkey;
3144 inkey.xlt1 = ptg;
3145 inkey.xlt2_cdid = cpu_to_le16(vsig);
3146 inkey.flags = cpu_to_le16(flags);
3148 switch (hw->blk[blk].prof.cdid_bits) {
3149 case 0:
3150 break;
3151 case 2:
3152 #define ICE_CD_2_M 0xC000U
3153 #define ICE_CD_2_S 14
3154 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3155 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3156 break;
3157 case 4:
3158 #define ICE_CD_4_M 0xF000U
3159 #define ICE_CD_4_S 12
3160 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3161 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3162 break;
3163 case 8:
3164 #define ICE_CD_8_M 0xFF00U
3165 #define ICE_CD_8_S 16
3166 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3167 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3168 break;
3169 default:
3170 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3171 break;
3174 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3175 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3179 * ice_tcam_write_entry - write TCAM entry
3180 * @hw: pointer to the HW struct
3181 * @blk: the block in which to write profile ID to
3182 * @idx: the entry index to write to
3183 * @prof_id: profile ID
3184 * @ptg: packet type group (PTG) portion of key
3185 * @vsig: VSIG portion of key
3186 * @cdid: CDID portion of key
3187 * @flags: flag portion of key
3188 * @vl_msk: valid mask
3189 * @dc_msk: don't care mask
3190 * @nm_msk: never match mask
3192 static enum ice_status
3193 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3194 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3195 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3196 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3197 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3199 struct ice_prof_tcam_entry;
3200 enum ice_status status;
3202 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3203 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3204 if (!status) {
3205 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3206 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3209 return status;
3213 * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
3214 * @hw: pointer to the hardware structure
3215 * @blk: HW block
3216 * @vsig: VSIG to query
3217 * @refs: pointer to variable to receive the reference count
3219 static enum ice_status
3220 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3222 u16 idx = vsig & ICE_VSIG_IDX_M;
3223 struct ice_vsig_vsi *ptr;
3225 *refs = 0;
3227 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3228 return ICE_ERR_DOES_NOT_EXIST;
3230 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3231 while (ptr) {
3232 (*refs)++;
3233 ptr = ptr->next_vsi;
3236 return 0;
3240 * ice_has_prof_vsig - check to see if VSIG has a specific profile
3241 * @hw: pointer to the hardware structure
3242 * @blk: HW block
3243 * @vsig: VSIG to check against
3244 * @hdl: profile handle
3246 static bool
3247 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3249 u16 idx = vsig & ICE_VSIG_IDX_M;
3250 struct ice_vsig_prof *ent;
3252 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3253 list)
3254 if (ent->profile_cookie == hdl)
3255 return true;
3257 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
3258 vsig);
3259 return false;
3263 * ice_prof_bld_es - build profile ID extraction sequence changes
3264 * @hw: pointer to the HW struct
3265 * @blk: hardware block
3266 * @bld: the update package buffer build to add to
3267 * @chgs: the list of changes to make in hardware
3269 static enum ice_status
3270 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3271 struct ice_buf_build *bld, struct list_head *chgs)
3273 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3274 struct ice_chs_chg *tmp;
3276 list_for_each_entry(tmp, chgs, list_entry)
3277 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3278 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3279 struct ice_pkg_es *p;
3280 u32 id;
3282 id = ice_sect_id(blk, ICE_VEC_TBL);
3283 p = ice_pkg_buf_alloc_section(bld, id,
3284 struct_size(p, es, 1) +
3285 vec_size -
3286 sizeof(p->es[0]));
3288 if (!p)
3289 return ICE_ERR_MAX_LIMIT;
3291 p->count = cpu_to_le16(1);
3292 p->offset = cpu_to_le16(tmp->prof_id);
3294 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3297 return 0;
3301 * ice_prof_bld_tcam - build profile ID TCAM changes
3302 * @hw: pointer to the HW struct
3303 * @blk: hardware block
3304 * @bld: the update package buffer build to add to
3305 * @chgs: the list of changes to make in hardware
3307 static enum ice_status
3308 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3309 struct ice_buf_build *bld, struct list_head *chgs)
3311 struct ice_chs_chg *tmp;
3313 list_for_each_entry(tmp, chgs, list_entry)
3314 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3315 struct ice_prof_id_section *p;
3316 u32 id;
3318 id = ice_sect_id(blk, ICE_PROF_TCAM);
3319 p = ice_pkg_buf_alloc_section(bld, id,
3320 struct_size(p, entry, 1));
3322 if (!p)
3323 return ICE_ERR_MAX_LIMIT;
3325 p->count = cpu_to_le16(1);
3326 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
3327 p->entry[0].prof_id = tmp->prof_id;
3329 memcpy(p->entry[0].key,
3330 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3331 sizeof(hw->blk[blk].prof.t->key));
3334 return 0;
3338 * ice_prof_bld_xlt1 - build XLT1 changes
3339 * @blk: hardware block
3340 * @bld: the update package buffer build to add to
3341 * @chgs: the list of changes to make in hardware
3343 static enum ice_status
3344 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3345 struct list_head *chgs)
3347 struct ice_chs_chg *tmp;
3349 list_for_each_entry(tmp, chgs, list_entry)
3350 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3351 struct ice_xlt1_section *p;
3352 u32 id;
3354 id = ice_sect_id(blk, ICE_XLT1);
3355 p = ice_pkg_buf_alloc_section(bld, id,
3356 struct_size(p, value, 1));
3358 if (!p)
3359 return ICE_ERR_MAX_LIMIT;
3361 p->count = cpu_to_le16(1);
3362 p->offset = cpu_to_le16(tmp->ptype);
3363 p->value[0] = tmp->ptg;
3366 return 0;
3370 * ice_prof_bld_xlt2 - build XLT2 changes
3371 * @blk: hardware block
3372 * @bld: the update package buffer build to add to
3373 * @chgs: the list of changes to make in hardware
3375 static enum ice_status
3376 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3377 struct list_head *chgs)
3379 struct ice_chs_chg *tmp;
3381 list_for_each_entry(tmp, chgs, list_entry) {
3382 struct ice_xlt2_section *p;
3383 u32 id;
3385 switch (tmp->type) {
3386 case ICE_VSIG_ADD:
3387 case ICE_VSI_MOVE:
3388 case ICE_VSIG_REM:
3389 id = ice_sect_id(blk, ICE_XLT2);
3390 p = ice_pkg_buf_alloc_section(bld, id,
3391 struct_size(p, value, 1));
3393 if (!p)
3394 return ICE_ERR_MAX_LIMIT;
3396 p->count = cpu_to_le16(1);
3397 p->offset = cpu_to_le16(tmp->vsi);
3398 p->value[0] = cpu_to_le16(tmp->vsig);
3399 break;
3400 default:
3401 break;
3405 return 0;
3409 * ice_upd_prof_hw - update hardware using the change list
3410 * @hw: pointer to the HW struct
3411 * @blk: hardware block
3412 * @chgs: the list of changes to make in hardware
3414 static enum ice_status
3415 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3416 struct list_head *chgs)
3418 struct ice_buf_build *b;
3419 struct ice_chs_chg *tmp;
3420 enum ice_status status;
3421 u16 pkg_sects;
3422 u16 xlt1 = 0;
3423 u16 xlt2 = 0;
3424 u16 tcam = 0;
3425 u16 es = 0;
3426 u16 sects;
3428 /* count number of sections we need */
3429 list_for_each_entry(tmp, chgs, list_entry) {
3430 switch (tmp->type) {
3431 case ICE_PTG_ES_ADD:
3432 if (tmp->add_ptg)
3433 xlt1++;
3434 if (tmp->add_prof)
3435 es++;
3436 break;
3437 case ICE_TCAM_ADD:
3438 tcam++;
3439 break;
3440 case ICE_VSIG_ADD:
3441 case ICE_VSI_MOVE:
3442 case ICE_VSIG_REM:
3443 xlt2++;
3444 break;
3445 default:
3446 break;
3449 sects = xlt1 + xlt2 + tcam + es;
3451 if (!sects)
3452 return 0;
3454 /* Build update package buffer */
3455 b = ice_pkg_buf_alloc(hw);
3456 if (!b)
3457 return ICE_ERR_NO_MEMORY;
3459 status = ice_pkg_buf_reserve_section(b, sects);
3460 if (status)
3461 goto error_tmp;
3463 /* Preserve order of table update: ES, TCAM, PTG, VSIG */
3464 if (es) {
3465 status = ice_prof_bld_es(hw, blk, b, chgs);
3466 if (status)
3467 goto error_tmp;
3470 if (tcam) {
3471 status = ice_prof_bld_tcam(hw, blk, b, chgs);
3472 if (status)
3473 goto error_tmp;
3476 if (xlt1) {
3477 status = ice_prof_bld_xlt1(blk, b, chgs);
3478 if (status)
3479 goto error_tmp;
3482 if (xlt2) {
3483 status = ice_prof_bld_xlt2(blk, b, chgs);
3484 if (status)
3485 goto error_tmp;
3488 /* After package buffer build check if the section count in buffer is
3489 * non-zero and matches the number of sections detected for package
3490 * update.
3492 pkg_sects = ice_pkg_buf_get_active_sections(b);
3493 if (!pkg_sects || pkg_sects != sects) {
3494 status = ICE_ERR_INVAL_SIZE;
3495 goto error_tmp;
3498 /* update package */
3499 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
3500 if (status == ICE_ERR_AQ_ERROR)
3501 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
3503 error_tmp:
3504 ice_pkg_buf_free(hw, b);
3505 return status;
3509 * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
3510 * @hw: pointer to the HW struct
3511 * @prof_id: profile ID
3512 * @mask_sel: mask select
3514 * This function enable any of the masks selected by the mask select parameter
3515 * for the profile specified.
3517 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
3519 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
3521 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
3522 GLQF_FDMASK_SEL(prof_id), mask_sel);
3525 struct ice_fd_src_dst_pair {
3526 u8 prot_id;
3527 u8 count;
3528 u16 off;
3531 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
3532 /* These are defined in pairs */
3533 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
3534 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
3536 { ICE_PROT_IPV4_IL, 2, 12 },
3537 { ICE_PROT_IPV4_IL, 2, 16 },
3539 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
3540 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
3542 { ICE_PROT_IPV6_IL, 8, 8 },
3543 { ICE_PROT_IPV6_IL, 8, 24 },
3545 { ICE_PROT_TCP_IL, 1, 0 },
3546 { ICE_PROT_TCP_IL, 1, 2 },
3548 { ICE_PROT_UDP_OF, 1, 0 },
3549 { ICE_PROT_UDP_OF, 1, 2 },
3551 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
3552 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
3554 { ICE_PROT_SCTP_IL, 1, 0 },
3555 { ICE_PROT_SCTP_IL, 1, 2 }
3558 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
3561 * ice_update_fd_swap - set register appropriately for a FD FV extraction
3562 * @hw: pointer to the HW struct
3563 * @prof_id: profile ID
3564 * @es: extraction sequence (length of array is determined by the block)
3566 static enum ice_status
3567 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
3569 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3570 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
3571 #define ICE_FD_FV_NOT_FOUND (-2)
3572 s8 first_free = ICE_FD_FV_NOT_FOUND;
3573 u8 used[ICE_MAX_FV_WORDS] = { 0 };
3574 s8 orig_free, si;
3575 u32 mask_sel = 0;
3576 u8 i, j, k;
3578 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3580 /* This code assumes that the Flow Director field vectors are assigned
3581 * from the end of the FV indexes working towards the zero index, that
3582 * only complete fields will be included and will be consecutive, and
3583 * that there are no gaps between valid indexes.
3586 /* Determine swap fields present */
3587 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
3588 /* Find the first free entry, assuming right to left population.
3589 * This is where we can start adding additional pairs if needed.
3591 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
3592 ICE_PROT_INVALID)
3593 first_free = i - 1;
3595 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3596 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
3597 es[i].off == ice_fd_pairs[j].off) {
3598 set_bit(j, pair_list);
3599 pair_start[j] = i;
3603 orig_free = first_free;
3605 /* determine missing swap fields that need to be added */
3606 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
3607 u8 bit1 = test_bit(i + 1, pair_list);
3608 u8 bit0 = test_bit(i, pair_list);
3610 if (bit0 ^ bit1) {
3611 u8 index;
3613 /* add the appropriate 'paired' entry */
3614 if (!bit0)
3615 index = i;
3616 else
3617 index = i + 1;
3619 /* check for room */
3620 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
3621 return ICE_ERR_MAX_LIMIT;
3623 /* place in extraction sequence */
3624 for (k = 0; k < ice_fd_pairs[index].count; k++) {
3625 es[first_free - k].prot_id =
3626 ice_fd_pairs[index].prot_id;
3627 es[first_free - k].off =
3628 ice_fd_pairs[index].off + (k * 2);
3630 if (k > first_free)
3631 return ICE_ERR_OUT_OF_RANGE;
3633 /* keep track of non-relevant fields */
3634 mask_sel |= BIT(first_free - k);
3637 pair_start[index] = first_free;
3638 first_free -= ice_fd_pairs[index].count;
3642 /* fill in the swap array */
3643 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
3644 while (si >= 0) {
3645 u8 indexes_used = 1;
3647 /* assume flat at this index */
3648 #define ICE_SWAP_VALID 0x80
3649 used[si] = si | ICE_SWAP_VALID;
3651 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
3652 si -= indexes_used;
3653 continue;
3656 /* check for a swap location */
3657 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
3658 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
3659 es[si].off == ice_fd_pairs[j].off) {
3660 u8 idx;
3662 /* determine the appropriate matching field */
3663 idx = j + ((j % 2) ? -1 : 1);
3665 indexes_used = ice_fd_pairs[idx].count;
3666 for (k = 0; k < indexes_used; k++) {
3667 used[si - k] = (pair_start[idx] - k) |
3668 ICE_SWAP_VALID;
3671 break;
3674 si -= indexes_used;
3677 /* for each set of 4 swap and 4 inset indexes, write the appropriate
3678 * register
3680 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
3681 u32 raw_swap = 0;
3682 u32 raw_in = 0;
3684 for (k = 0; k < 4; k++) {
3685 u8 idx;
3687 idx = (j * 4) + k;
3688 if (used[idx] && !(mask_sel & BIT(idx))) {
3689 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
3690 #define ICE_INSET_DFLT 0x9f
3691 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
3695 /* write the appropriate swap register set */
3696 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
3698 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
3699 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
3701 /* write the appropriate inset register set */
3702 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
3704 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
3705 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
3708 /* initially clear the mask select for this profile */
3709 ice_update_fd_mask(hw, prof_id, 0);
3711 return 0;
3715 * ice_add_prof - add profile
3716 * @hw: pointer to the HW struct
3717 * @blk: hardware block
3718 * @id: profile tracking ID
3719 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
3720 * @es: extraction sequence (length of array is determined by the block)
3722 * This function registers a profile, which matches a set of PTGs with a
3723 * particular extraction sequence. While the hardware profile is allocated
3724 * it will not be written until the first call to ice_add_flow that specifies
3725 * the ID value used here.
3727 enum ice_status
3728 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
3729 struct ice_fv_word *es)
3731 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
3732 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3733 struct ice_prof_map *prof;
3734 enum ice_status status;
3735 u8 byte = 0;
3736 u8 prof_id;
3738 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3740 mutex_lock(&hw->blk[blk].es.prof_map_lock);
3742 /* search for existing profile */
3743 status = ice_find_prof_id(hw, blk, es, &prof_id);
3744 if (status) {
3745 /* allocate profile ID */
3746 status = ice_alloc_prof_id(hw, blk, &prof_id);
3747 if (status)
3748 goto err_ice_add_prof;
3749 if (blk == ICE_BLK_FD) {
3750 /* For Flow Director block, the extraction sequence may
3751 * need to be altered in the case where there are paired
3752 * fields that have no match. This is necessary because
3753 * for Flow Director, src and dest fields need to paired
3754 * for filter programming and these values are swapped
3755 * during Tx.
3757 status = ice_update_fd_swap(hw, prof_id, es);
3758 if (status)
3759 goto err_ice_add_prof;
3762 /* and write new es */
3763 ice_write_es(hw, blk, prof_id, es);
3766 ice_prof_inc_ref(hw, blk, prof_id);
3768 /* add profile info */
3769 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
3770 if (!prof) {
3771 status = ICE_ERR_NO_MEMORY;
3772 goto err_ice_add_prof;
3775 prof->profile_cookie = id;
3776 prof->prof_id = prof_id;
3777 prof->ptg_cnt = 0;
3778 prof->context = 0;
3780 /* build list of ptgs */
3781 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
3782 u8 bit;
3784 if (!ptypes[byte]) {
3785 bytes--;
3786 byte++;
3787 continue;
3790 /* Examine 8 bits per byte */
3791 for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
3792 BITS_PER_BYTE) {
3793 u16 ptype;
3794 u8 ptg;
3795 u8 m;
3797 ptype = byte * BITS_PER_BYTE + bit;
3799 /* The package should place all ptypes in a non-zero
3800 * PTG, so the following call should never fail.
3802 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
3803 continue;
3805 /* If PTG is already added, skip and continue */
3806 if (test_bit(ptg, ptgs_used))
3807 continue;
3809 set_bit(ptg, ptgs_used);
3810 prof->ptg[prof->ptg_cnt] = ptg;
3812 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
3813 break;
3815 /* nothing left in byte, then exit */
3816 m = ~(u8)((1 << (bit + 1)) - 1);
3817 if (!(ptypes[byte] & m))
3818 break;
3821 bytes--;
3822 byte++;
3825 list_add(&prof->list, &hw->blk[blk].es.prof_map);
3826 status = 0;
3828 err_ice_add_prof:
3829 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3830 return status;
3834 * ice_search_prof_id - Search for a profile tracking ID
3835 * @hw: pointer to the HW struct
3836 * @blk: hardware block
3837 * @id: profile tracking ID
3839 * This will search for a profile tracking ID which was previously added.
3840 * The profile map lock should be held before calling this function.
3842 static struct ice_prof_map *
3843 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
3845 struct ice_prof_map *entry = NULL;
3846 struct ice_prof_map *map;
3848 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
3849 if (map->profile_cookie == id) {
3850 entry = map;
3851 break;
3854 return entry;
3858 * ice_vsig_prof_id_count - count profiles in a VSIG
3859 * @hw: pointer to the HW struct
3860 * @blk: hardware block
3861 * @vsig: VSIG to remove the profile from
3863 static u16
3864 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3866 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
3867 struct ice_vsig_prof *p;
3869 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3870 list)
3871 count++;
3873 return count;
3877 * ice_rel_tcam_idx - release a TCAM index
3878 * @hw: pointer to the HW struct
3879 * @blk: hardware block
3880 * @idx: the index to release
3882 static enum ice_status
3883 ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
3885 /* Masks to invoke a never match entry */
3886 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3887 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
3888 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
3889 enum ice_status status;
3891 /* write the TCAM entry */
3892 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
3893 dc_msk, nm_msk);
3894 if (status)
3895 return status;
3897 /* release the TCAM entry */
3898 status = ice_free_tcam_ent(hw, blk, idx);
3900 return status;
3904 * ice_rem_prof_id - remove one profile from a VSIG
3905 * @hw: pointer to the HW struct
3906 * @blk: hardware block
3907 * @prof: pointer to profile structure to remove
3909 static enum ice_status
3910 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
3911 struct ice_vsig_prof *prof)
3913 enum ice_status status;
3914 u16 i;
3916 for (i = 0; i < prof->tcam_count; i++)
3917 if (prof->tcam[i].in_use) {
3918 prof->tcam[i].in_use = false;
3919 status = ice_rel_tcam_idx(hw, blk,
3920 prof->tcam[i].tcam_idx);
3921 if (status)
3922 return ICE_ERR_HW_TABLE;
3925 return 0;
3929 * ice_rem_vsig - remove VSIG
3930 * @hw: pointer to the HW struct
3931 * @blk: hardware block
3932 * @vsig: the VSIG to remove
3933 * @chg: the change list
3935 static enum ice_status
3936 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3937 struct list_head *chg)
3939 u16 idx = vsig & ICE_VSIG_IDX_M;
3940 struct ice_vsig_vsi *vsi_cur;
3941 struct ice_vsig_prof *d, *t;
3942 enum ice_status status;
3944 /* remove TCAM entries */
3945 list_for_each_entry_safe(d, t,
3946 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3947 list) {
3948 status = ice_rem_prof_id(hw, blk, d);
3949 if (status)
3950 return status;
3952 list_del(&d->list);
3953 devm_kfree(ice_hw_to_dev(hw), d);
3956 /* Move all VSIS associated with this VSIG to the default VSIG */
3957 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3958 /* If the VSIG has at least 1 VSI then iterate through the list
3959 * and remove the VSIs before deleting the group.
3961 if (vsi_cur)
3962 do {
3963 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
3964 struct ice_chs_chg *p;
3966 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
3967 GFP_KERNEL);
3968 if (!p)
3969 return ICE_ERR_NO_MEMORY;
3971 p->type = ICE_VSIG_REM;
3972 p->orig_vsig = vsig;
3973 p->vsig = ICE_DEFAULT_VSIG;
3974 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
3976 list_add(&p->list_entry, chg);
3978 vsi_cur = tmp;
3979 } while (vsi_cur);
3981 return ice_vsig_free(hw, blk, vsig);
3985 * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
3986 * @hw: pointer to the HW struct
3987 * @blk: hardware block
3988 * @vsig: VSIG to remove the profile from
3989 * @hdl: profile handle indicating which profile to remove
3990 * @chg: list to receive a record of changes
3992 static enum ice_status
3993 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3994 struct list_head *chg)
3996 u16 idx = vsig & ICE_VSIG_IDX_M;
3997 struct ice_vsig_prof *p, *t;
3998 enum ice_status status;
4000 list_for_each_entry_safe(p, t,
4001 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4002 list)
4003 if (p->profile_cookie == hdl) {
4004 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4005 /* this is the last profile, remove the VSIG */
4006 return ice_rem_vsig(hw, blk, vsig, chg);
4008 status = ice_rem_prof_id(hw, blk, p);
4009 if (!status) {
4010 list_del(&p->list);
4011 devm_kfree(ice_hw_to_dev(hw), p);
4013 return status;
4016 return ICE_ERR_DOES_NOT_EXIST;
4020 * ice_rem_flow_all - remove all flows with a particular profile
4021 * @hw: pointer to the HW struct
4022 * @blk: hardware block
4023 * @id: profile tracking ID
4025 static enum ice_status
4026 ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4028 struct ice_chs_chg *del, *tmp;
4029 enum ice_status status;
4030 struct list_head chg;
4031 u16 i;
4033 INIT_LIST_HEAD(&chg);
4035 for (i = 1; i < ICE_MAX_VSIGS; i++)
4036 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4037 if (ice_has_prof_vsig(hw, blk, i, id)) {
4038 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4039 &chg);
4040 if (status)
4041 goto err_ice_rem_flow_all;
4045 status = ice_upd_prof_hw(hw, blk, &chg);
4047 err_ice_rem_flow_all:
4048 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4049 list_del(&del->list_entry);
4050 devm_kfree(ice_hw_to_dev(hw), del);
4053 return status;
4057 * ice_rem_prof - remove profile
4058 * @hw: pointer to the HW struct
4059 * @blk: hardware block
4060 * @id: profile tracking ID
4062 * This will remove the profile specified by the ID parameter, which was
4063 * previously created through ice_add_prof. If any existing entries
4064 * are associated with this profile, they will be removed as well.
4066 enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4068 struct ice_prof_map *pmap;
4069 enum ice_status status;
4071 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4073 pmap = ice_search_prof_id(hw, blk, id);
4074 if (!pmap) {
4075 status = ICE_ERR_DOES_NOT_EXIST;
4076 goto err_ice_rem_prof;
4079 /* remove all flows with this profile */
4080 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4081 if (status)
4082 goto err_ice_rem_prof;
4084 /* dereference profile, and possibly remove */
4085 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4087 list_del(&pmap->list);
4088 devm_kfree(ice_hw_to_dev(hw), pmap);
4090 err_ice_rem_prof:
4091 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4092 return status;
4096 * ice_get_prof - get profile
4097 * @hw: pointer to the HW struct
4098 * @blk: hardware block
4099 * @hdl: profile handle
4100 * @chg: change list
4102 static enum ice_status
4103 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4104 struct list_head *chg)
4106 enum ice_status status = 0;
4107 struct ice_prof_map *map;
4108 struct ice_chs_chg *p;
4109 u16 i;
4111 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4112 /* Get the details on the profile specified by the handle ID */
4113 map = ice_search_prof_id(hw, blk, hdl);
4114 if (!map) {
4115 status = ICE_ERR_DOES_NOT_EXIST;
4116 goto err_ice_get_prof;
4119 for (i = 0; i < map->ptg_cnt; i++)
4120 if (!hw->blk[blk].es.written[map->prof_id]) {
4121 /* add ES to change list */
4122 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4123 GFP_KERNEL);
4124 if (!p) {
4125 status = ICE_ERR_NO_MEMORY;
4126 goto err_ice_get_prof;
4129 p->type = ICE_PTG_ES_ADD;
4130 p->ptype = 0;
4131 p->ptg = map->ptg[i];
4132 p->add_ptg = 0;
4134 p->add_prof = 1;
4135 p->prof_id = map->prof_id;
4137 hw->blk[blk].es.written[map->prof_id] = true;
4139 list_add(&p->list_entry, chg);
4142 err_ice_get_prof:
4143 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4144 /* let caller clean up the change list */
4145 return status;
4149 * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
4150 * @hw: pointer to the HW struct
4151 * @blk: hardware block
4152 * @vsig: VSIG from which to copy the list
4153 * @lst: output list
4155 * This routine makes a copy of the list of profiles in the specified VSIG.
4157 static enum ice_status
4158 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4159 struct list_head *lst)
4161 struct ice_vsig_prof *ent1, *ent2;
4162 u16 idx = vsig & ICE_VSIG_IDX_M;
4164 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4165 list) {
4166 struct ice_vsig_prof *p;
4168 /* copy to the input list */
4169 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4170 GFP_KERNEL);
4171 if (!p)
4172 goto err_ice_get_profs_vsig;
4174 list_add_tail(&p->list, lst);
4177 return 0;
4179 err_ice_get_profs_vsig:
4180 list_for_each_entry_safe(ent1, ent2, lst, list) {
4181 list_del(&ent1->list);
4182 devm_kfree(ice_hw_to_dev(hw), ent1);
4185 return ICE_ERR_NO_MEMORY;
4189 * ice_add_prof_to_lst - add profile entry to a list
4190 * @hw: pointer to the HW struct
4191 * @blk: hardware block
4192 * @lst: the list to be added to
4193 * @hdl: profile handle of entry to add
4195 static enum ice_status
4196 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4197 struct list_head *lst, u64 hdl)
4199 enum ice_status status = 0;
4200 struct ice_prof_map *map;
4201 struct ice_vsig_prof *p;
4202 u16 i;
4204 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4205 map = ice_search_prof_id(hw, blk, hdl);
4206 if (!map) {
4207 status = ICE_ERR_DOES_NOT_EXIST;
4208 goto err_ice_add_prof_to_lst;
4211 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4212 if (!p) {
4213 status = ICE_ERR_NO_MEMORY;
4214 goto err_ice_add_prof_to_lst;
4217 p->profile_cookie = map->profile_cookie;
4218 p->prof_id = map->prof_id;
4219 p->tcam_count = map->ptg_cnt;
4221 for (i = 0; i < map->ptg_cnt; i++) {
4222 p->tcam[i].prof_id = map->prof_id;
4223 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4224 p->tcam[i].ptg = map->ptg[i];
4227 list_add(&p->list, lst);
4229 err_ice_add_prof_to_lst:
4230 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4231 return status;
4235 * ice_move_vsi - move VSI to another VSIG
4236 * @hw: pointer to the HW struct
4237 * @blk: hardware block
4238 * @vsi: the VSI to move
4239 * @vsig: the VSIG to move the VSI to
4240 * @chg: the change list
4242 static enum ice_status
4243 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4244 struct list_head *chg)
4246 enum ice_status status;
4247 struct ice_chs_chg *p;
4248 u16 orig_vsig;
4250 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4251 if (!p)
4252 return ICE_ERR_NO_MEMORY;
4254 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4255 if (!status)
4256 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4258 if (status) {
4259 devm_kfree(ice_hw_to_dev(hw), p);
4260 return status;
4263 p->type = ICE_VSI_MOVE;
4264 p->vsi = vsi;
4265 p->orig_vsig = orig_vsig;
4266 p->vsig = vsig;
4268 list_add(&p->list_entry, chg);
4270 return 0;
4274 * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
4275 * @hw: pointer to the HW struct
4276 * @idx: the index of the TCAM entry to remove
4277 * @chg: the list of change structures to search
4279 static void
4280 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
4282 struct ice_chs_chg *pos, *tmp;
4284 list_for_each_entry_safe(tmp, pos, chg, list_entry)
4285 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4286 list_del(&tmp->list_entry);
4287 devm_kfree(ice_hw_to_dev(hw), tmp);
4292 * ice_prof_tcam_ena_dis - add enable or disable TCAM change
4293 * @hw: pointer to the HW struct
4294 * @blk: hardware block
4295 * @enable: true to enable, false to disable
4296 * @vsig: the VSIG of the TCAM entry
4297 * @tcam: pointer the TCAM info structure of the TCAM to disable
4298 * @chg: the change list
4300 * This function appends an enable or disable TCAM entry in the change log
4302 static enum ice_status
4303 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4304 u16 vsig, struct ice_tcam_inf *tcam,
4305 struct list_head *chg)
4307 enum ice_status status;
4308 struct ice_chs_chg *p;
4310 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4311 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4312 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4314 /* if disabling, free the TCAM */
4315 if (!enable) {
4316 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4318 /* if we have already created a change for this TCAM entry, then
4319 * we need to remove that entry, in order to prevent writing to
4320 * a TCAM entry we no longer will have ownership of.
4322 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4323 tcam->tcam_idx = 0;
4324 tcam->in_use = 0;
4325 return status;
4328 /* for re-enabling, reallocate a TCAM */
4329 status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
4330 if (status)
4331 return status;
4333 /* add TCAM to change list */
4334 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4335 if (!p)
4336 return ICE_ERR_NO_MEMORY;
4338 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4339 tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
4340 nm_msk);
4341 if (status)
4342 goto err_ice_prof_tcam_ena_dis;
4344 tcam->in_use = 1;
4346 p->type = ICE_TCAM_ADD;
4347 p->add_tcam_idx = true;
4348 p->prof_id = tcam->prof_id;
4349 p->ptg = tcam->ptg;
4350 p->vsig = 0;
4351 p->tcam_idx = tcam->tcam_idx;
4353 /* log change */
4354 list_add(&p->list_entry, chg);
4356 return 0;
4358 err_ice_prof_tcam_ena_dis:
4359 devm_kfree(ice_hw_to_dev(hw), p);
4360 return status;
4364 * ice_adj_prof_priorities - adjust profile based on priorities
4365 * @hw: pointer to the HW struct
4366 * @blk: hardware block
4367 * @vsig: the VSIG for which to adjust profile priorities
4368 * @chg: the change list
4370 static enum ice_status
4371 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4372 struct list_head *chg)
4374 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4375 struct ice_vsig_prof *t;
4376 enum ice_status status;
4377 u16 idx;
4379 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4380 idx = vsig & ICE_VSIG_IDX_M;
4382 /* Priority is based on the order in which the profiles are added. The
4383 * newest added profile has highest priority and the oldest added
4384 * profile has the lowest priority. Since the profile property list for
4385 * a VSIG is sorted from newest to oldest, this code traverses the list
4386 * in order and enables the first of each PTG that it finds (that is not
4387 * already enabled); it also disables any duplicate PTGs that it finds
4388 * in the older profiles (that are currently enabled).
4391 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4392 list) {
4393 u16 i;
4395 for (i = 0; i < t->tcam_count; i++) {
4396 /* Scan the priorities from newest to oldest.
4397 * Make sure that the newest profiles take priority.
4399 if (test_bit(t->tcam[i].ptg, ptgs_used) &&
4400 t->tcam[i].in_use) {
4401 /* need to mark this PTG as never match, as it
4402 * was already in use and therefore duplicate
4403 * (and lower priority)
4405 status = ice_prof_tcam_ena_dis(hw, blk, false,
4406 vsig,
4407 &t->tcam[i],
4408 chg);
4409 if (status)
4410 return status;
4411 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
4412 !t->tcam[i].in_use) {
4413 /* need to enable this PTG, as it in not in use
4414 * and not enabled (highest priority)
4416 status = ice_prof_tcam_ena_dis(hw, blk, true,
4417 vsig,
4418 &t->tcam[i],
4419 chg);
4420 if (status)
4421 return status;
4424 /* keep track of used ptgs */
4425 set_bit(t->tcam[i].ptg, ptgs_used);
4429 return 0;
4433 * ice_add_prof_id_vsig - add profile to VSIG
4434 * @hw: pointer to the HW struct
4435 * @blk: hardware block
4436 * @vsig: the VSIG to which this profile is to be added
4437 * @hdl: the profile handle indicating the profile to add
4438 * @rev: true to add entries to the end of the list
4439 * @chg: the change list
4441 static enum ice_status
4442 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4443 bool rev, struct list_head *chg)
4445 /* Masks that ignore flags */
4446 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4447 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4448 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4449 enum ice_status status = 0;
4450 struct ice_prof_map *map;
4451 struct ice_vsig_prof *t;
4452 struct ice_chs_chg *p;
4453 u16 vsig_idx, i;
4455 /* Error, if this VSIG already has this profile */
4456 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
4457 return ICE_ERR_ALREADY_EXISTS;
4459 /* new VSIG profile structure */
4460 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
4461 if (!t)
4462 return ICE_ERR_NO_MEMORY;
4464 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4465 /* Get the details on the profile specified by the handle ID */
4466 map = ice_search_prof_id(hw, blk, hdl);
4467 if (!map) {
4468 status = ICE_ERR_DOES_NOT_EXIST;
4469 goto err_ice_add_prof_id_vsig;
4472 t->profile_cookie = map->profile_cookie;
4473 t->prof_id = map->prof_id;
4474 t->tcam_count = map->ptg_cnt;
4476 /* create TCAM entries */
4477 for (i = 0; i < map->ptg_cnt; i++) {
4478 u16 tcam_idx;
4480 /* add TCAM to change list */
4481 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4482 if (!p) {
4483 status = ICE_ERR_NO_MEMORY;
4484 goto err_ice_add_prof_id_vsig;
4487 /* allocate the TCAM entry index */
4488 status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
4489 if (status) {
4490 devm_kfree(ice_hw_to_dev(hw), p);
4491 goto err_ice_add_prof_id_vsig;
4494 t->tcam[i].ptg = map->ptg[i];
4495 t->tcam[i].prof_id = map->prof_id;
4496 t->tcam[i].tcam_idx = tcam_idx;
4497 t->tcam[i].in_use = true;
4499 p->type = ICE_TCAM_ADD;
4500 p->add_tcam_idx = true;
4501 p->prof_id = t->tcam[i].prof_id;
4502 p->ptg = t->tcam[i].ptg;
4503 p->vsig = vsig;
4504 p->tcam_idx = t->tcam[i].tcam_idx;
4506 /* write the TCAM entry */
4507 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
4508 t->tcam[i].prof_id,
4509 t->tcam[i].ptg, vsig, 0, 0,
4510 vl_msk, dc_msk, nm_msk);
4511 if (status) {
4512 devm_kfree(ice_hw_to_dev(hw), p);
4513 goto err_ice_add_prof_id_vsig;
4516 /* log change */
4517 list_add(&p->list_entry, chg);
4520 /* add profile to VSIG */
4521 vsig_idx = vsig & ICE_VSIG_IDX_M;
4522 if (rev)
4523 list_add_tail(&t->list,
4524 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4525 else
4526 list_add(&t->list,
4527 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
4529 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4530 return status;
4532 err_ice_add_prof_id_vsig:
4533 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4534 /* let caller clean up the change list */
4535 devm_kfree(ice_hw_to_dev(hw), t);
4536 return status;
4540 * ice_create_prof_id_vsig - add a new VSIG with a single profile
4541 * @hw: pointer to the HW struct
4542 * @blk: hardware block
4543 * @vsi: the initial VSI that will be in VSIG
4544 * @hdl: the profile handle of the profile that will be added to the VSIG
4545 * @chg: the change list
4547 static enum ice_status
4548 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
4549 struct list_head *chg)
4551 enum ice_status status;
4552 struct ice_chs_chg *p;
4553 u16 new_vsig;
4555 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4556 if (!p)
4557 return ICE_ERR_NO_MEMORY;
4559 new_vsig = ice_vsig_alloc(hw, blk);
4560 if (!new_vsig) {
4561 status = ICE_ERR_HW_TABLE;
4562 goto err_ice_create_prof_id_vsig;
4565 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
4566 if (status)
4567 goto err_ice_create_prof_id_vsig;
4569 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
4570 if (status)
4571 goto err_ice_create_prof_id_vsig;
4573 p->type = ICE_VSIG_ADD;
4574 p->vsi = vsi;
4575 p->orig_vsig = ICE_DEFAULT_VSIG;
4576 p->vsig = new_vsig;
4578 list_add(&p->list_entry, chg);
4580 return 0;
4582 err_ice_create_prof_id_vsig:
4583 /* let caller clean up the change list */
4584 devm_kfree(ice_hw_to_dev(hw), p);
4585 return status;
4589 * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
4590 * @hw: pointer to the HW struct
4591 * @blk: hardware block
4592 * @vsi: the initial VSI that will be in VSIG
4593 * @lst: the list of profile that will be added to the VSIG
4594 * @new_vsig: return of new VSIG
4595 * @chg: the change list
4597 static enum ice_status
4598 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
4599 struct list_head *lst, u16 *new_vsig,
4600 struct list_head *chg)
4602 struct ice_vsig_prof *t;
4603 enum ice_status status;
4604 u16 vsig;
4606 vsig = ice_vsig_alloc(hw, blk);
4607 if (!vsig)
4608 return ICE_ERR_HW_TABLE;
4610 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
4611 if (status)
4612 return status;
4614 list_for_each_entry(t, lst, list) {
4615 /* Reverse the order here since we are copying the list */
4616 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
4617 true, chg);
4618 if (status)
4619 return status;
4622 *new_vsig = vsig;
4624 return 0;
4628 * ice_find_prof_vsig - find a VSIG with a specific profile handle
4629 * @hw: pointer to the HW struct
4630 * @blk: hardware block
4631 * @hdl: the profile handle of the profile to search for
4632 * @vsig: returns the VSIG with the matching profile
4634 static bool
4635 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
4637 struct ice_vsig_prof *t;
4638 enum ice_status status;
4639 struct list_head lst;
4641 INIT_LIST_HEAD(&lst);
4643 t = kzalloc(sizeof(*t), GFP_KERNEL);
4644 if (!t)
4645 return false;
4647 t->profile_cookie = hdl;
4648 list_add(&t->list, &lst);
4650 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
4652 list_del(&t->list);
4653 kfree(t);
4655 return !status;
4659 * ice_add_prof_id_flow - add profile flow
4660 * @hw: pointer to the HW struct
4661 * @blk: hardware block
4662 * @vsi: the VSI to enable with the profile specified by ID
4663 * @hdl: profile handle
4665 * Calling this function will update the hardware tables to enable the
4666 * profile indicated by the ID parameter for the VSIs specified in the VSI
4667 * array. Once successfully called, the flow will be enabled.
4669 enum ice_status
4670 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4672 struct ice_vsig_prof *tmp1, *del1;
4673 struct ice_chs_chg *tmp, *del;
4674 struct list_head union_lst;
4675 enum ice_status status;
4676 struct list_head chg;
4677 u16 vsig;
4679 INIT_LIST_HEAD(&union_lst);
4680 INIT_LIST_HEAD(&chg);
4682 /* Get profile */
4683 status = ice_get_prof(hw, blk, hdl, &chg);
4684 if (status)
4685 return status;
4687 /* determine if VSI is already part of a VSIG */
4688 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4689 if (!status && vsig) {
4690 bool only_vsi;
4691 u16 or_vsig;
4692 u16 ref;
4694 /* found in VSIG */
4695 or_vsig = vsig;
4697 /* make sure that there is no overlap/conflict between the new
4698 * characteristics and the existing ones; we don't support that
4699 * scenario
4701 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
4702 status = ICE_ERR_ALREADY_EXISTS;
4703 goto err_ice_add_prof_id_flow;
4706 /* last VSI in the VSIG? */
4707 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4708 if (status)
4709 goto err_ice_add_prof_id_flow;
4710 only_vsi = (ref == 1);
4712 /* create a union of the current profiles and the one being
4713 * added
4715 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
4716 if (status)
4717 goto err_ice_add_prof_id_flow;
4719 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
4720 if (status)
4721 goto err_ice_add_prof_id_flow;
4723 /* search for an existing VSIG with an exact charc match */
4724 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
4725 if (!status) {
4726 /* move VSI to the VSIG that matches */
4727 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4728 if (status)
4729 goto err_ice_add_prof_id_flow;
4731 /* VSI has been moved out of or_vsig. If the or_vsig had
4732 * only that VSI it is now empty and can be removed.
4734 if (only_vsi) {
4735 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
4736 if (status)
4737 goto err_ice_add_prof_id_flow;
4739 } else if (only_vsi) {
4740 /* If the original VSIG only contains one VSI, then it
4741 * will be the requesting VSI. In this case the VSI is
4742 * not sharing entries and we can simply add the new
4743 * profile to the VSIG.
4745 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
4746 &chg);
4747 if (status)
4748 goto err_ice_add_prof_id_flow;
4750 /* Adjust priorities */
4751 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4752 if (status)
4753 goto err_ice_add_prof_id_flow;
4754 } else {
4755 /* No match, so we need a new VSIG */
4756 status = ice_create_vsig_from_lst(hw, blk, vsi,
4757 &union_lst, &vsig,
4758 &chg);
4759 if (status)
4760 goto err_ice_add_prof_id_flow;
4762 /* Adjust priorities */
4763 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4764 if (status)
4765 goto err_ice_add_prof_id_flow;
4767 } else {
4768 /* need to find or add a VSIG */
4769 /* search for an existing VSIG with an exact charc match */
4770 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
4771 /* found an exact match */
4772 /* add or move VSI to the VSIG that matches */
4773 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4774 if (status)
4775 goto err_ice_add_prof_id_flow;
4776 } else {
4777 /* we did not find an exact match */
4778 /* we need to add a VSIG */
4779 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
4780 &chg);
4781 if (status)
4782 goto err_ice_add_prof_id_flow;
4786 /* update hardware */
4787 if (!status)
4788 status = ice_upd_prof_hw(hw, blk, &chg);
4790 err_ice_add_prof_id_flow:
4791 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4792 list_del(&del->list_entry);
4793 devm_kfree(ice_hw_to_dev(hw), del);
4796 list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
4797 list_del(&del1->list);
4798 devm_kfree(ice_hw_to_dev(hw), del1);
4801 return status;
4805 * ice_rem_prof_from_list - remove a profile from list
4806 * @hw: pointer to the HW struct
4807 * @lst: list to remove the profile from
4808 * @hdl: the profile handle indicating the profile to remove
4810 static enum ice_status
4811 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
4813 struct ice_vsig_prof *ent, *tmp;
4815 list_for_each_entry_safe(ent, tmp, lst, list)
4816 if (ent->profile_cookie == hdl) {
4817 list_del(&ent->list);
4818 devm_kfree(ice_hw_to_dev(hw), ent);
4819 return 0;
4822 return ICE_ERR_DOES_NOT_EXIST;
4826 * ice_rem_prof_id_flow - remove flow
4827 * @hw: pointer to the HW struct
4828 * @blk: hardware block
4829 * @vsi: the VSI from which to remove the profile specified by ID
4830 * @hdl: profile tracking handle
4832 * Calling this function will update the hardware tables to remove the
4833 * profile indicated by the ID parameter for the VSIs specified in the VSI
4834 * array. Once successfully called, the flow will be disabled.
4836 enum ice_status
4837 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4839 struct ice_vsig_prof *tmp1, *del1;
4840 struct ice_chs_chg *tmp, *del;
4841 struct list_head chg, copy;
4842 enum ice_status status;
4843 u16 vsig;
4845 INIT_LIST_HEAD(&copy);
4846 INIT_LIST_HEAD(&chg);
4848 /* determine if VSI is already part of a VSIG */
4849 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4850 if (!status && vsig) {
4851 bool last_profile;
4852 bool only_vsi;
4853 u16 ref;
4855 /* found in VSIG */
4856 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
4857 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4858 if (status)
4859 goto err_ice_rem_prof_id_flow;
4860 only_vsi = (ref == 1);
4862 if (only_vsi) {
4863 /* If the original VSIG only contains one reference,
4864 * which will be the requesting VSI, then the VSI is not
4865 * sharing entries and we can simply remove the specific
4866 * characteristics from the VSIG.
4869 if (last_profile) {
4870 /* If there are no profiles left for this VSIG,
4871 * then simply remove the VSIG.
4873 status = ice_rem_vsig(hw, blk, vsig, &chg);
4874 if (status)
4875 goto err_ice_rem_prof_id_flow;
4876 } else {
4877 status = ice_rem_prof_id_vsig(hw, blk, vsig,
4878 hdl, &chg);
4879 if (status)
4880 goto err_ice_rem_prof_id_flow;
4882 /* Adjust priorities */
4883 status = ice_adj_prof_priorities(hw, blk, vsig,
4884 &chg);
4885 if (status)
4886 goto err_ice_rem_prof_id_flow;
4889 } else {
4890 /* Make a copy of the VSIG's list of Profiles */
4891 status = ice_get_profs_vsig(hw, blk, vsig, &copy);
4892 if (status)
4893 goto err_ice_rem_prof_id_flow;
4895 /* Remove specified profile entry from the list */
4896 status = ice_rem_prof_from_list(hw, &copy, hdl);
4897 if (status)
4898 goto err_ice_rem_prof_id_flow;
4900 if (list_empty(&copy)) {
4901 status = ice_move_vsi(hw, blk, vsi,
4902 ICE_DEFAULT_VSIG, &chg);
4903 if (status)
4904 goto err_ice_rem_prof_id_flow;
4906 } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
4907 &vsig)) {
4908 /* found an exact match */
4909 /* add or move VSI to the VSIG that matches */
4910 /* Search for a VSIG with a matching profile
4911 * list
4914 /* Found match, move VSI to the matching VSIG */
4915 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4916 if (status)
4917 goto err_ice_rem_prof_id_flow;
4918 } else {
4919 /* since no existing VSIG supports this
4920 * characteristic pattern, we need to create a
4921 * new VSIG and TCAM entries
4923 status = ice_create_vsig_from_lst(hw, blk, vsi,
4924 &copy, &vsig,
4925 &chg);
4926 if (status)
4927 goto err_ice_rem_prof_id_flow;
4929 /* Adjust priorities */
4930 status = ice_adj_prof_priorities(hw, blk, vsig,
4931 &chg);
4932 if (status)
4933 goto err_ice_rem_prof_id_flow;
4936 } else {
4937 status = ICE_ERR_DOES_NOT_EXIST;
4940 /* update hardware tables */
4941 if (!status)
4942 status = ice_upd_prof_hw(hw, blk, &chg);
4944 err_ice_rem_prof_id_flow:
4945 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4946 list_del(&del->list_entry);
4947 devm_kfree(ice_hw_to_dev(hw), del);
4950 list_for_each_entry_safe(del1, tmp1, &copy, list) {
4951 list_del(&del1->list);
4952 devm_kfree(ice_hw_to_dev(hw), del1);
4955 return status;