1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
14 #include <linux/qed/qed_chain.h>
20 #include "qed_reg_addr.h"
22 #include "qed_sriov.h"
24 void qed_sp_destroy_request(struct qed_hwfn
*p_hwfn
,
25 struct qed_spq_entry
*p_ent
)
27 /* qed_spq_get_entry() can either get an entry from the free_pool,
28 * or, if no entries are left, allocate a new entry and add it to
29 * the unlimited_pending list.
31 if (p_ent
->queue
== &p_hwfn
->p_spq
->unlimited_pending
)
34 qed_spq_return_entry(p_hwfn
, p_ent
);
37 int qed_sp_init_request(struct qed_hwfn
*p_hwfn
,
38 struct qed_spq_entry
**pp_ent
,
39 u8 cmd
, u8 protocol
, struct qed_sp_init_data
*p_data
)
41 u32 opaque_cid
= p_data
->opaque_fid
<< 16 | p_data
->cid
;
42 struct qed_spq_entry
*p_ent
= NULL
;
48 rc
= qed_spq_get_entry(p_hwfn
, pp_ent
);
55 p_ent
->elem
.hdr
.cid
= cpu_to_le32(opaque_cid
);
56 p_ent
->elem
.hdr
.cmd_id
= cmd
;
57 p_ent
->elem
.hdr
.protocol_id
= protocol
;
59 p_ent
->priority
= QED_SPQ_PRIORITY_NORMAL
;
60 p_ent
->comp_mode
= p_data
->comp_mode
;
61 p_ent
->comp_done
.done
= 0;
63 switch (p_ent
->comp_mode
) {
64 case QED_SPQ_MODE_EBLOCK
:
65 p_ent
->comp_cb
.cookie
= &p_ent
->comp_done
;
68 case QED_SPQ_MODE_BLOCK
:
69 if (!p_data
->p_comp_data
)
72 p_ent
->comp_cb
.cookie
= p_data
->p_comp_data
->cookie
;
76 if (!p_data
->p_comp_data
)
77 p_ent
->comp_cb
.function
= NULL
;
79 p_ent
->comp_cb
= *p_data
->p_comp_data
;
83 DP_NOTICE(p_hwfn
, "Unknown SPQE completion mode %d\n",
88 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
89 "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
90 opaque_cid
, cmd
, protocol
,
91 (unsigned long)&p_ent
->ramrod
,
92 D_TRINE(p_ent
->comp_mode
, QED_SPQ_MODE_EBLOCK
,
93 QED_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
96 memset(&p_ent
->ramrod
, 0, sizeof(p_ent
->ramrod
));
101 qed_sp_destroy_request(p_hwfn
, p_ent
);
106 static enum tunnel_clss
qed_tunn_clss_to_fw_clss(u8 type
)
109 case QED_TUNN_CLSS_MAC_VLAN
:
110 return TUNNEL_CLSS_MAC_VLAN
;
111 case QED_TUNN_CLSS_MAC_VNI
:
112 return TUNNEL_CLSS_MAC_VNI
;
113 case QED_TUNN_CLSS_INNER_MAC_VLAN
:
114 return TUNNEL_CLSS_INNER_MAC_VLAN
;
115 case QED_TUNN_CLSS_INNER_MAC_VNI
:
116 return TUNNEL_CLSS_INNER_MAC_VNI
;
117 case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE
:
118 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE
;
120 return TUNNEL_CLSS_MAC_VLAN
;
125 qed_set_pf_update_tunn_mode(struct qed_tunnel_info
*p_tun
,
126 struct qed_tunnel_info
*p_src
, bool b_pf_start
)
128 if (p_src
->vxlan
.b_update_mode
|| b_pf_start
)
129 p_tun
->vxlan
.b_mode_enabled
= p_src
->vxlan
.b_mode_enabled
;
131 if (p_src
->l2_gre
.b_update_mode
|| b_pf_start
)
132 p_tun
->l2_gre
.b_mode_enabled
= p_src
->l2_gre
.b_mode_enabled
;
134 if (p_src
->ip_gre
.b_update_mode
|| b_pf_start
)
135 p_tun
->ip_gre
.b_mode_enabled
= p_src
->ip_gre
.b_mode_enabled
;
137 if (p_src
->l2_geneve
.b_update_mode
|| b_pf_start
)
138 p_tun
->l2_geneve
.b_mode_enabled
=
139 p_src
->l2_geneve
.b_mode_enabled
;
141 if (p_src
->ip_geneve
.b_update_mode
|| b_pf_start
)
142 p_tun
->ip_geneve
.b_mode_enabled
=
143 p_src
->ip_geneve
.b_mode_enabled
;
146 static void qed_set_tunn_cls_info(struct qed_tunnel_info
*p_tun
,
147 struct qed_tunnel_info
*p_src
)
151 p_tun
->b_update_rx_cls
= p_src
->b_update_rx_cls
;
152 p_tun
->b_update_tx_cls
= p_src
->b_update_tx_cls
;
154 type
= qed_tunn_clss_to_fw_clss(p_src
->vxlan
.tun_cls
);
155 p_tun
->vxlan
.tun_cls
= type
;
156 type
= qed_tunn_clss_to_fw_clss(p_src
->l2_gre
.tun_cls
);
157 p_tun
->l2_gre
.tun_cls
= type
;
158 type
= qed_tunn_clss_to_fw_clss(p_src
->ip_gre
.tun_cls
);
159 p_tun
->ip_gre
.tun_cls
= type
;
160 type
= qed_tunn_clss_to_fw_clss(p_src
->l2_geneve
.tun_cls
);
161 p_tun
->l2_geneve
.tun_cls
= type
;
162 type
= qed_tunn_clss_to_fw_clss(p_src
->ip_geneve
.tun_cls
);
163 p_tun
->ip_geneve
.tun_cls
= type
;
166 static void qed_set_tunn_ports(struct qed_tunnel_info
*p_tun
,
167 struct qed_tunnel_info
*p_src
)
169 p_tun
->geneve_port
.b_update_port
= p_src
->geneve_port
.b_update_port
;
170 p_tun
->vxlan_port
.b_update_port
= p_src
->vxlan_port
.b_update_port
;
172 if (p_src
->geneve_port
.b_update_port
)
173 p_tun
->geneve_port
.port
= p_src
->geneve_port
.port
;
175 if (p_src
->vxlan_port
.b_update_port
)
176 p_tun
->vxlan_port
.port
= p_src
->vxlan_port
.port
;
180 __qed_set_ramrod_tunnel_param(u8
*p_tunn_cls
,
181 struct qed_tunn_update_type
*tun_type
)
183 *p_tunn_cls
= tun_type
->tun_cls
;
187 qed_set_ramrod_tunnel_param(u8
*p_tunn_cls
,
188 struct qed_tunn_update_type
*tun_type
,
191 struct qed_tunn_update_udp_port
*p_udp_port
)
193 __qed_set_ramrod_tunnel_param(p_tunn_cls
, tun_type
);
194 if (p_udp_port
->b_update_port
) {
196 *p_port
= cpu_to_le16(p_udp_port
->port
);
201 qed_tunn_set_pf_update_params(struct qed_hwfn
*p_hwfn
,
202 struct qed_tunnel_info
*p_src
,
203 struct pf_update_tunnel_config
*p_tunn_cfg
)
205 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
207 qed_set_pf_update_tunn_mode(p_tun
, p_src
, false);
208 qed_set_tunn_cls_info(p_tun
, p_src
);
209 qed_set_tunn_ports(p_tun
, p_src
);
211 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_vxlan
,
213 &p_tunn_cfg
->set_vxlan_udp_port_flg
,
214 &p_tunn_cfg
->vxlan_udp_port
,
217 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2geneve
,
219 &p_tunn_cfg
->set_geneve_udp_port_flg
,
220 &p_tunn_cfg
->geneve_udp_port
,
221 &p_tun
->geneve_port
);
223 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgeneve
,
226 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2gre
,
229 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgre
,
232 p_tunn_cfg
->update_rx_pf_clss
= p_tun
->b_update_rx_cls
;
235 static void qed_set_hw_tunn_mode(struct qed_hwfn
*p_hwfn
,
236 struct qed_ptt
*p_ptt
,
237 struct qed_tunnel_info
*p_tun
)
239 qed_set_gre_enable(p_hwfn
, p_ptt
, p_tun
->l2_gre
.b_mode_enabled
,
240 p_tun
->ip_gre
.b_mode_enabled
);
241 qed_set_vxlan_enable(p_hwfn
, p_ptt
, p_tun
->vxlan
.b_mode_enabled
);
243 qed_set_geneve_enable(p_hwfn
, p_ptt
, p_tun
->l2_geneve
.b_mode_enabled
,
244 p_tun
->ip_geneve
.b_mode_enabled
);
247 static void qed_set_hw_tunn_mode_port(struct qed_hwfn
*p_hwfn
,
248 struct qed_ptt
*p_ptt
,
249 struct qed_tunnel_info
*p_tunn
)
251 if (p_tunn
->vxlan_port
.b_update_port
)
252 qed_set_vxlan_dest_port(p_hwfn
, p_ptt
,
253 p_tunn
->vxlan_port
.port
);
255 if (p_tunn
->geneve_port
.b_update_port
)
256 qed_set_geneve_dest_port(p_hwfn
, p_ptt
,
257 p_tunn
->geneve_port
.port
);
259 qed_set_hw_tunn_mode(p_hwfn
, p_ptt
, p_tunn
);
263 qed_tunn_set_pf_start_params(struct qed_hwfn
*p_hwfn
,
264 struct qed_tunnel_info
*p_src
,
265 struct pf_start_tunnel_config
*p_tunn_cfg
)
267 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
272 qed_set_pf_update_tunn_mode(p_tun
, p_src
, true);
273 qed_set_tunn_cls_info(p_tun
, p_src
);
274 qed_set_tunn_ports(p_tun
, p_src
);
276 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_vxlan
,
278 &p_tunn_cfg
->set_vxlan_udp_port_flg
,
279 &p_tunn_cfg
->vxlan_udp_port
,
282 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2geneve
,
284 &p_tunn_cfg
->set_geneve_udp_port_flg
,
285 &p_tunn_cfg
->geneve_udp_port
,
286 &p_tun
->geneve_port
);
288 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgeneve
,
291 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2gre
,
294 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgre
,
298 int qed_sp_pf_start(struct qed_hwfn
*p_hwfn
,
299 struct qed_ptt
*p_ptt
,
300 struct qed_tunnel_info
*p_tunn
,
301 bool allow_npar_tx_switch
)
303 struct outer_tag_config_struct
*outer_tag_config
;
304 struct pf_start_ramrod_data
*p_ramrod
= NULL
;
305 u16 sb
= qed_int_get_sp_sb_id(p_hwfn
);
306 u8 sb_index
= p_hwfn
->p_eq
->eq_sb_index
;
307 struct qed_spq_entry
*p_ent
= NULL
;
308 struct qed_sp_init_data init_data
;
312 /* update initial eq producer */
313 qed_eq_prod_update(p_hwfn
,
314 qed_chain_get_prod_idx(&p_hwfn
->p_eq
->chain
));
316 memset(&init_data
, 0, sizeof(init_data
));
317 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
318 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
319 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
321 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
322 COMMON_RAMROD_PF_START
,
323 PROTOCOLID_COMMON
, &init_data
);
327 p_ramrod
= &p_ent
->ramrod
.pf_start
;
329 p_ramrod
->event_ring_sb_id
= cpu_to_le16(sb
);
330 p_ramrod
->event_ring_sb_index
= sb_index
;
331 p_ramrod
->path_id
= QED_PATH_ID(p_hwfn
);
332 p_ramrod
->dont_log_ramrods
= 0;
333 p_ramrod
->log_type_mask
= cpu_to_le16(0xf);
335 if (test_bit(QED_MF_OVLAN_CLSS
, &p_hwfn
->cdev
->mf_bits
))
336 p_ramrod
->mf_mode
= MF_OVLAN
;
338 p_ramrod
->mf_mode
= MF_NPAR
;
340 outer_tag_config
= &p_ramrod
->outer_tag_config
;
341 outer_tag_config
->outer_tag
.tci
= cpu_to_le16(p_hwfn
->hw_info
.ovlan
);
343 if (test_bit(QED_MF_8021Q_TAGGING
, &p_hwfn
->cdev
->mf_bits
)) {
344 outer_tag_config
->outer_tag
.tpid
= cpu_to_le16(ETH_P_8021Q
);
345 } else if (test_bit(QED_MF_8021AD_TAGGING
, &p_hwfn
->cdev
->mf_bits
)) {
346 outer_tag_config
->outer_tag
.tpid
= cpu_to_le16(ETH_P_8021AD
);
347 outer_tag_config
->enable_stag_pri_change
= 1;
350 outer_tag_config
->pri_map_valid
= 1;
351 for (i
= 0; i
< QED_MAX_PFC_PRIORITIES
; i
++)
352 outer_tag_config
->inner_to_outer_pri_map
[i
] = i
;
354 /* enable_stag_pri_change should be set if port is in BD mode or,
355 * UFP with Host Control mode.
357 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
)) {
358 if (p_hwfn
->ufp_info
.pri_type
== QED_UFP_PRI_OS
)
359 outer_tag_config
->enable_stag_pri_change
= 1;
361 outer_tag_config
->enable_stag_pri_change
= 0;
363 outer_tag_config
->outer_tag
.tci
|=
364 cpu_to_le16(((u16
)p_hwfn
->ufp_info
.tc
<< 13));
367 /* Place EQ address in RAMROD */
368 DMA_REGPAIR_LE(p_ramrod
->event_ring_pbl_addr
,
369 qed_chain_get_pbl_phys(&p_hwfn
->p_eq
->chain
));
370 page_cnt
= (u8
)qed_chain_get_page_cnt(&p_hwfn
->p_eq
->chain
);
371 p_ramrod
->event_ring_num_pages
= page_cnt
;
372 DMA_REGPAIR_LE(p_ramrod
->consolid_q_pbl_addr
,
373 qed_chain_get_pbl_phys(&p_hwfn
->p_consq
->chain
));
375 qed_tunn_set_pf_start_params(p_hwfn
, p_tunn
, &p_ramrod
->tunnel_config
);
377 if (test_bit(QED_MF_INTER_PF_SWITCH
, &p_hwfn
->cdev
->mf_bits
))
378 p_ramrod
->allow_npar_tx_switching
= allow_npar_tx_switch
;
380 switch (p_hwfn
->hw_info
.personality
) {
382 p_ramrod
->personality
= PERSONALITY_ETH
;
385 p_ramrod
->personality
= PERSONALITY_FCOE
;
388 p_ramrod
->personality
= PERSONALITY_ISCSI
;
390 case QED_PCI_ETH_ROCE
:
391 case QED_PCI_ETH_IWARP
:
392 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
395 DP_NOTICE(p_hwfn
, "Unknown personality %d\n",
396 p_hwfn
->hw_info
.personality
);
397 p_ramrod
->personality
= PERSONALITY_ETH
;
400 if (p_hwfn
->cdev
->p_iov_info
) {
401 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
403 p_ramrod
->base_vf_id
= (u8
) p_iov
->first_vf_in_pf
;
404 p_ramrod
->num_vfs
= (u8
) p_iov
->total_vfs
;
406 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
407 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MINOR
;
409 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
410 "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
411 sb
, sb_index
, outer_tag_config
->outer_tag
.tci
);
413 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
416 qed_set_hw_tunn_mode_port(p_hwfn
, p_ptt
,
417 &p_hwfn
->cdev
->tunnel
);
422 int qed_sp_pf_update(struct qed_hwfn
*p_hwfn
)
424 struct qed_spq_entry
*p_ent
= NULL
;
425 struct qed_sp_init_data init_data
;
429 memset(&init_data
, 0, sizeof(init_data
));
430 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
431 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
432 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
434 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
435 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
440 qed_dcbx_set_pf_update_params(&p_hwfn
->p_dcbx_info
->results
,
441 &p_ent
->ramrod
.pf_update
);
443 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
446 int qed_sp_pf_update_ufp(struct qed_hwfn
*p_hwfn
)
448 struct qed_spq_entry
*p_ent
= NULL
;
449 struct qed_sp_init_data init_data
;
452 if (p_hwfn
->ufp_info
.pri_type
== QED_UFP_PRI_UNKNOWN
) {
453 DP_INFO(p_hwfn
, "Invalid priority type %d\n",
454 p_hwfn
->ufp_info
.pri_type
);
459 memset(&init_data
, 0, sizeof(init_data
));
460 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
461 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
462 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
464 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
465 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
470 p_ent
->ramrod
.pf_update
.update_enable_stag_pri_change
= true;
471 if (p_hwfn
->ufp_info
.pri_type
== QED_UFP_PRI_OS
)
472 p_ent
->ramrod
.pf_update
.enable_stag_pri_change
= 1;
474 p_ent
->ramrod
.pf_update
.enable_stag_pri_change
= 0;
476 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
479 /* Set pf update ramrod command params */
480 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn
*p_hwfn
,
481 struct qed_ptt
*p_ptt
,
482 struct qed_tunnel_info
*p_tunn
,
483 enum spq_mode comp_mode
,
484 struct qed_spq_comp_cb
*p_comp_data
)
486 struct qed_spq_entry
*p_ent
= NULL
;
487 struct qed_sp_init_data init_data
;
490 if (IS_VF(p_hwfn
->cdev
))
491 return qed_vf_pf_tunnel_param_update(p_hwfn
, p_tunn
);
497 memset(&init_data
, 0, sizeof(init_data
));
498 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
499 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
500 init_data
.comp_mode
= comp_mode
;
501 init_data
.p_comp_data
= p_comp_data
;
503 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
504 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
509 qed_tunn_set_pf_update_params(p_hwfn
, p_tunn
,
510 &p_ent
->ramrod
.pf_update
.tunnel_config
);
512 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
516 qed_set_hw_tunn_mode_port(p_hwfn
, p_ptt
, &p_hwfn
->cdev
->tunnel
);
521 int qed_sp_pf_stop(struct qed_hwfn
*p_hwfn
)
523 struct qed_spq_entry
*p_ent
= NULL
;
524 struct qed_sp_init_data init_data
;
528 memset(&init_data
, 0, sizeof(init_data
));
529 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
530 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
531 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
533 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
534 COMMON_RAMROD_PF_STOP
, PROTOCOLID_COMMON
,
539 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
542 int qed_sp_heartbeat_ramrod(struct qed_hwfn
*p_hwfn
)
544 struct qed_spq_entry
*p_ent
= NULL
;
545 struct qed_sp_init_data init_data
;
549 memset(&init_data
, 0, sizeof(init_data
));
550 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
551 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
552 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
554 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
555 COMMON_RAMROD_EMPTY
, PROTOCOLID_COMMON
,
560 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
563 int qed_sp_pf_update_stag(struct qed_hwfn
*p_hwfn
)
565 struct qed_spq_entry
*p_ent
= NULL
;
566 struct qed_sp_init_data init_data
;
570 memset(&init_data
, 0, sizeof(init_data
));
571 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
572 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
573 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
575 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
576 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
581 p_ent
->ramrod
.pf_update
.update_mf_vlan_flag
= true;
582 p_ent
->ramrod
.pf_update
.mf_vlan
= cpu_to_le16(p_hwfn
->hw_info
.ovlan
);
583 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
584 p_ent
->ramrod
.pf_update
.mf_vlan
|=
585 cpu_to_le16(((u16
)p_hwfn
->ufp_info
.tc
<< 13));
587 return qed_spq_post(p_hwfn
, p_ent
, NULL
);