1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
14 #include <linux/qed/qed_chain.h>
20 #include "qed_reg_addr.h"
22 #include "qed_sriov.h"
24 void qed_sp_destroy_request(struct qed_hwfn
*p_hwfn
,
25 struct qed_spq_entry
*p_ent
)
27 /* qed_spq_get_entry() can either get an entry from the free_pool,
28 * or, if no entries are left, allocate a new entry and add it to
29 * the unlimited_pending list.
31 if (p_ent
->queue
== &p_hwfn
->p_spq
->unlimited_pending
)
34 qed_spq_return_entry(p_hwfn
, p_ent
);
37 int qed_sp_init_request(struct qed_hwfn
*p_hwfn
,
38 struct qed_spq_entry
**pp_ent
,
39 u8 cmd
, u8 protocol
, struct qed_sp_init_data
*p_data
)
41 u32 opaque_cid
= p_data
->opaque_fid
<< 16 | p_data
->cid
;
42 struct qed_spq_entry
*p_ent
= NULL
;
48 rc
= qed_spq_get_entry(p_hwfn
, pp_ent
);
55 p_ent
->elem
.hdr
.cid
= cpu_to_le32(opaque_cid
);
56 p_ent
->elem
.hdr
.cmd_id
= cmd
;
57 p_ent
->elem
.hdr
.protocol_id
= protocol
;
59 p_ent
->priority
= QED_SPQ_PRIORITY_NORMAL
;
60 p_ent
->comp_mode
= p_data
->comp_mode
;
61 p_ent
->comp_done
.done
= 0;
63 switch (p_ent
->comp_mode
) {
64 case QED_SPQ_MODE_EBLOCK
:
65 p_ent
->comp_cb
.cookie
= &p_ent
->comp_done
;
68 case QED_SPQ_MODE_BLOCK
:
69 if (!p_data
->p_comp_data
)
72 p_ent
->comp_cb
.cookie
= p_data
->p_comp_data
->cookie
;
76 if (!p_data
->p_comp_data
)
77 p_ent
->comp_cb
.function
= NULL
;
79 p_ent
->comp_cb
= *p_data
->p_comp_data
;
83 DP_NOTICE(p_hwfn
, "Unknown SPQE completion mode %d\n",
90 "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n",
91 opaque_cid
, qed_get_ramrod_cmd_id_str(protocol
, cmd
),
92 cmd
, qed_get_protocol_type_str(protocol
), protocol
,
93 (unsigned long long)(uintptr_t)&p_ent
->ramrod
,
94 D_TRINE(p_ent
->comp_mode
, QED_SPQ_MODE_EBLOCK
,
95 QED_SPQ_MODE_BLOCK
, "MODE_EBLOCK", "MODE_BLOCK",
98 memset(&p_ent
->ramrod
, 0, sizeof(p_ent
->ramrod
));
103 qed_sp_destroy_request(p_hwfn
, p_ent
);
108 static enum tunnel_clss
qed_tunn_clss_to_fw_clss(u8 type
)
111 case QED_TUNN_CLSS_MAC_VLAN
:
112 return TUNNEL_CLSS_MAC_VLAN
;
113 case QED_TUNN_CLSS_MAC_VNI
:
114 return TUNNEL_CLSS_MAC_VNI
;
115 case QED_TUNN_CLSS_INNER_MAC_VLAN
:
116 return TUNNEL_CLSS_INNER_MAC_VLAN
;
117 case QED_TUNN_CLSS_INNER_MAC_VNI
:
118 return TUNNEL_CLSS_INNER_MAC_VNI
;
119 case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE
:
120 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE
;
122 return TUNNEL_CLSS_MAC_VLAN
;
127 qed_set_pf_update_tunn_mode(struct qed_tunnel_info
*p_tun
,
128 struct qed_tunnel_info
*p_src
, bool b_pf_start
)
130 if (p_src
->vxlan
.b_update_mode
|| b_pf_start
)
131 p_tun
->vxlan
.b_mode_enabled
= p_src
->vxlan
.b_mode_enabled
;
133 if (p_src
->l2_gre
.b_update_mode
|| b_pf_start
)
134 p_tun
->l2_gre
.b_mode_enabled
= p_src
->l2_gre
.b_mode_enabled
;
136 if (p_src
->ip_gre
.b_update_mode
|| b_pf_start
)
137 p_tun
->ip_gre
.b_mode_enabled
= p_src
->ip_gre
.b_mode_enabled
;
139 if (p_src
->l2_geneve
.b_update_mode
|| b_pf_start
)
140 p_tun
->l2_geneve
.b_mode_enabled
=
141 p_src
->l2_geneve
.b_mode_enabled
;
143 if (p_src
->ip_geneve
.b_update_mode
|| b_pf_start
)
144 p_tun
->ip_geneve
.b_mode_enabled
=
145 p_src
->ip_geneve
.b_mode_enabled
;
148 static void qed_set_tunn_cls_info(struct qed_tunnel_info
*p_tun
,
149 struct qed_tunnel_info
*p_src
)
153 p_tun
->b_update_rx_cls
= p_src
->b_update_rx_cls
;
154 p_tun
->b_update_tx_cls
= p_src
->b_update_tx_cls
;
156 type
= qed_tunn_clss_to_fw_clss(p_src
->vxlan
.tun_cls
);
157 p_tun
->vxlan
.tun_cls
= type
;
158 type
= qed_tunn_clss_to_fw_clss(p_src
->l2_gre
.tun_cls
);
159 p_tun
->l2_gre
.tun_cls
= type
;
160 type
= qed_tunn_clss_to_fw_clss(p_src
->ip_gre
.tun_cls
);
161 p_tun
->ip_gre
.tun_cls
= type
;
162 type
= qed_tunn_clss_to_fw_clss(p_src
->l2_geneve
.tun_cls
);
163 p_tun
->l2_geneve
.tun_cls
= type
;
164 type
= qed_tunn_clss_to_fw_clss(p_src
->ip_geneve
.tun_cls
);
165 p_tun
->ip_geneve
.tun_cls
= type
;
168 static void qed_set_tunn_ports(struct qed_tunnel_info
*p_tun
,
169 struct qed_tunnel_info
*p_src
)
171 p_tun
->geneve_port
.b_update_port
= p_src
->geneve_port
.b_update_port
;
172 p_tun
->vxlan_port
.b_update_port
= p_src
->vxlan_port
.b_update_port
;
174 if (p_src
->geneve_port
.b_update_port
)
175 p_tun
->geneve_port
.port
= p_src
->geneve_port
.port
;
177 if (p_src
->vxlan_port
.b_update_port
)
178 p_tun
->vxlan_port
.port
= p_src
->vxlan_port
.port
;
182 __qed_set_ramrod_tunnel_param(u8
*p_tunn_cls
,
183 struct qed_tunn_update_type
*tun_type
)
185 *p_tunn_cls
= tun_type
->tun_cls
;
189 qed_set_ramrod_tunnel_param(u8
*p_tunn_cls
,
190 struct qed_tunn_update_type
*tun_type
,
193 struct qed_tunn_update_udp_port
*p_udp_port
)
195 __qed_set_ramrod_tunnel_param(p_tunn_cls
, tun_type
);
196 if (p_udp_port
->b_update_port
) {
198 *p_port
= cpu_to_le16(p_udp_port
->port
);
203 qed_tunn_set_pf_update_params(struct qed_hwfn
*p_hwfn
,
204 struct qed_tunnel_info
*p_src
,
205 struct pf_update_tunnel_config
*p_tunn_cfg
)
207 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
209 qed_set_pf_update_tunn_mode(p_tun
, p_src
, false);
210 qed_set_tunn_cls_info(p_tun
, p_src
);
211 qed_set_tunn_ports(p_tun
, p_src
);
213 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_vxlan
,
215 &p_tunn_cfg
->set_vxlan_udp_port_flg
,
216 &p_tunn_cfg
->vxlan_udp_port
,
219 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2geneve
,
221 &p_tunn_cfg
->set_geneve_udp_port_flg
,
222 &p_tunn_cfg
->geneve_udp_port
,
223 &p_tun
->geneve_port
);
225 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgeneve
,
228 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2gre
,
231 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgre
,
234 p_tunn_cfg
->update_rx_pf_clss
= p_tun
->b_update_rx_cls
;
237 static void qed_set_hw_tunn_mode(struct qed_hwfn
*p_hwfn
,
238 struct qed_ptt
*p_ptt
,
239 struct qed_tunnel_info
*p_tun
)
241 qed_set_gre_enable(p_hwfn
, p_ptt
, p_tun
->l2_gre
.b_mode_enabled
,
242 p_tun
->ip_gre
.b_mode_enabled
);
243 qed_set_vxlan_enable(p_hwfn
, p_ptt
, p_tun
->vxlan
.b_mode_enabled
);
245 qed_set_geneve_enable(p_hwfn
, p_ptt
, p_tun
->l2_geneve
.b_mode_enabled
,
246 p_tun
->ip_geneve
.b_mode_enabled
);
249 static void qed_set_hw_tunn_mode_port(struct qed_hwfn
*p_hwfn
,
250 struct qed_ptt
*p_ptt
,
251 struct qed_tunnel_info
*p_tunn
)
253 if (p_tunn
->vxlan_port
.b_update_port
)
254 qed_set_vxlan_dest_port(p_hwfn
, p_ptt
,
255 p_tunn
->vxlan_port
.port
);
257 if (p_tunn
->geneve_port
.b_update_port
)
258 qed_set_geneve_dest_port(p_hwfn
, p_ptt
,
259 p_tunn
->geneve_port
.port
);
261 qed_set_hw_tunn_mode(p_hwfn
, p_ptt
, p_tunn
);
265 qed_tunn_set_pf_start_params(struct qed_hwfn
*p_hwfn
,
266 struct qed_tunnel_info
*p_src
,
267 struct pf_start_tunnel_config
*p_tunn_cfg
)
269 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
274 qed_set_pf_update_tunn_mode(p_tun
, p_src
, true);
275 qed_set_tunn_cls_info(p_tun
, p_src
);
276 qed_set_tunn_ports(p_tun
, p_src
);
278 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_vxlan
,
280 &p_tunn_cfg
->set_vxlan_udp_port_flg
,
281 &p_tunn_cfg
->vxlan_udp_port
,
284 qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2geneve
,
286 &p_tunn_cfg
->set_geneve_udp_port_flg
,
287 &p_tunn_cfg
->geneve_udp_port
,
288 &p_tun
->geneve_port
);
290 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgeneve
,
293 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_l2gre
,
296 __qed_set_ramrod_tunnel_param(&p_tunn_cfg
->tunnel_clss_ipgre
,
300 int qed_sp_pf_start(struct qed_hwfn
*p_hwfn
,
301 struct qed_ptt
*p_ptt
,
302 struct qed_tunnel_info
*p_tunn
,
303 bool allow_npar_tx_switch
)
305 struct outer_tag_config_struct
*outer_tag_config
;
306 struct pf_start_ramrod_data
*p_ramrod
= NULL
;
307 u16 sb
= qed_int_get_sp_sb_id(p_hwfn
);
308 u8 sb_index
= p_hwfn
->p_eq
->eq_sb_index
;
309 struct qed_spq_entry
*p_ent
= NULL
;
310 struct qed_sp_init_data init_data
;
314 /* update initial eq producer */
315 qed_eq_prod_update(p_hwfn
,
316 qed_chain_get_prod_idx(&p_hwfn
->p_eq
->chain
));
318 memset(&init_data
, 0, sizeof(init_data
));
319 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
320 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
321 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
323 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
324 COMMON_RAMROD_PF_START
,
325 PROTOCOLID_COMMON
, &init_data
);
329 p_ramrod
= &p_ent
->ramrod
.pf_start
;
331 p_ramrod
->event_ring_sb_id
= cpu_to_le16(sb
);
332 p_ramrod
->event_ring_sb_index
= sb_index
;
333 p_ramrod
->path_id
= QED_PATH_ID(p_hwfn
);
334 p_ramrod
->dont_log_ramrods
= 0;
335 p_ramrod
->log_type_mask
= cpu_to_le16(0xf);
337 if (test_bit(QED_MF_OVLAN_CLSS
, &p_hwfn
->cdev
->mf_bits
))
338 p_ramrod
->mf_mode
= MF_OVLAN
;
340 p_ramrod
->mf_mode
= MF_NPAR
;
342 outer_tag_config
= &p_ramrod
->outer_tag_config
;
343 outer_tag_config
->outer_tag
.tci
= cpu_to_le16(p_hwfn
->hw_info
.ovlan
);
345 if (test_bit(QED_MF_8021Q_TAGGING
, &p_hwfn
->cdev
->mf_bits
)) {
346 outer_tag_config
->outer_tag
.tpid
= cpu_to_le16(ETH_P_8021Q
);
347 } else if (test_bit(QED_MF_8021AD_TAGGING
, &p_hwfn
->cdev
->mf_bits
)) {
348 outer_tag_config
->outer_tag
.tpid
= cpu_to_le16(ETH_P_8021AD
);
349 outer_tag_config
->enable_stag_pri_change
= 1;
352 outer_tag_config
->pri_map_valid
= 1;
353 for (i
= 0; i
< QED_MAX_PFC_PRIORITIES
; i
++)
354 outer_tag_config
->inner_to_outer_pri_map
[i
] = i
;
356 /* enable_stag_pri_change should be set if port is in BD mode or,
357 * UFP with Host Control mode.
359 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
)) {
360 if (p_hwfn
->ufp_info
.pri_type
== QED_UFP_PRI_OS
)
361 outer_tag_config
->enable_stag_pri_change
= 1;
363 outer_tag_config
->enable_stag_pri_change
= 0;
365 outer_tag_config
->outer_tag
.tci
|=
366 cpu_to_le16(((u16
)p_hwfn
->ufp_info
.tc
<< 13));
369 /* Place EQ address in RAMROD */
370 DMA_REGPAIR_LE(p_ramrod
->event_ring_pbl_addr
,
371 qed_chain_get_pbl_phys(&p_hwfn
->p_eq
->chain
));
372 page_cnt
= (u8
)qed_chain_get_page_cnt(&p_hwfn
->p_eq
->chain
);
373 p_ramrod
->event_ring_num_pages
= page_cnt
;
375 /* Place consolidation queue address in ramrod */
376 DMA_REGPAIR_LE(p_ramrod
->consolid_q_pbl_base_addr
,
377 qed_chain_get_pbl_phys(&p_hwfn
->p_consq
->chain
));
378 page_cnt
= (u8
)qed_chain_get_page_cnt(&p_hwfn
->p_consq
->chain
);
379 p_ramrod
->consolid_q_num_pages
= page_cnt
;
381 qed_tunn_set_pf_start_params(p_hwfn
, p_tunn
, &p_ramrod
->tunnel_config
);
383 if (test_bit(QED_MF_INTER_PF_SWITCH
, &p_hwfn
->cdev
->mf_bits
))
384 p_ramrod
->allow_npar_tx_switching
= allow_npar_tx_switch
;
386 switch (p_hwfn
->hw_info
.personality
) {
388 p_ramrod
->personality
= PERSONALITY_ETH
;
391 p_ramrod
->personality
= PERSONALITY_FCOE
;
394 case QED_PCI_NVMETCP
:
395 p_ramrod
->personality
= PERSONALITY_TCP_ULP
;
397 case QED_PCI_ETH_ROCE
:
398 case QED_PCI_ETH_IWARP
:
399 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
402 DP_NOTICE(p_hwfn
, "Unknown personality %d\n",
403 p_hwfn
->hw_info
.personality
);
404 p_ramrod
->personality
= PERSONALITY_ETH
;
407 if (p_hwfn
->cdev
->p_iov_info
) {
408 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
410 p_ramrod
->base_vf_id
= (u8
)p_iov
->first_vf_in_pf
;
411 p_ramrod
->num_vfs
= (u8
)p_iov
->total_vfs
;
413 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
414 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MINOR
;
416 DP_VERBOSE(p_hwfn
, QED_MSG_SPQ
,
417 "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
418 sb
, sb_index
, outer_tag_config
->outer_tag
.tci
);
420 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
423 qed_set_hw_tunn_mode_port(p_hwfn
, p_ptt
,
424 &p_hwfn
->cdev
->tunnel
);
429 int qed_sp_pf_update(struct qed_hwfn
*p_hwfn
)
431 struct qed_spq_entry
*p_ent
= NULL
;
432 struct qed_sp_init_data init_data
;
436 memset(&init_data
, 0, sizeof(init_data
));
437 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
438 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
439 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
441 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
442 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
447 qed_dcbx_set_pf_update_params(&p_hwfn
->p_dcbx_info
->results
,
448 &p_ent
->ramrod
.pf_update
);
450 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
453 int qed_sp_pf_update_ufp(struct qed_hwfn
*p_hwfn
)
455 struct qed_spq_entry
*p_ent
= NULL
;
456 struct qed_sp_init_data init_data
;
459 if (p_hwfn
->ufp_info
.pri_type
== QED_UFP_PRI_UNKNOWN
) {
460 DP_INFO(p_hwfn
, "Invalid priority type %d\n",
461 p_hwfn
->ufp_info
.pri_type
);
466 memset(&init_data
, 0, sizeof(init_data
));
467 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
468 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
469 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
471 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
472 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
477 p_ent
->ramrod
.pf_update
.update_enable_stag_pri_change
= true;
478 if (p_hwfn
->ufp_info
.pri_type
== QED_UFP_PRI_OS
)
479 p_ent
->ramrod
.pf_update
.enable_stag_pri_change
= 1;
481 p_ent
->ramrod
.pf_update
.enable_stag_pri_change
= 0;
483 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
486 /* Set pf update ramrod command params */
487 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn
*p_hwfn
,
488 struct qed_ptt
*p_ptt
,
489 struct qed_tunnel_info
*p_tunn
,
490 enum spq_mode comp_mode
,
491 struct qed_spq_comp_cb
*p_comp_data
)
493 struct qed_spq_entry
*p_ent
= NULL
;
494 struct qed_sp_init_data init_data
;
497 if (IS_VF(p_hwfn
->cdev
))
498 return qed_vf_pf_tunnel_param_update(p_hwfn
, p_tunn
);
504 memset(&init_data
, 0, sizeof(init_data
));
505 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
506 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
507 init_data
.comp_mode
= comp_mode
;
508 init_data
.p_comp_data
= p_comp_data
;
510 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
511 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
516 qed_tunn_set_pf_update_params(p_hwfn
, p_tunn
,
517 &p_ent
->ramrod
.pf_update
.tunnel_config
);
519 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
523 qed_set_hw_tunn_mode_port(p_hwfn
, p_ptt
, &p_hwfn
->cdev
->tunnel
);
528 int qed_sp_pf_stop(struct qed_hwfn
*p_hwfn
)
530 struct qed_spq_entry
*p_ent
= NULL
;
531 struct qed_sp_init_data init_data
;
535 memset(&init_data
, 0, sizeof(init_data
));
536 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
537 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
538 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
540 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
541 COMMON_RAMROD_PF_STOP
, PROTOCOLID_COMMON
,
546 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
549 int qed_sp_heartbeat_ramrod(struct qed_hwfn
*p_hwfn
)
551 struct qed_spq_entry
*p_ent
= NULL
;
552 struct qed_sp_init_data init_data
;
556 memset(&init_data
, 0, sizeof(init_data
));
557 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
558 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
559 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
561 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
562 COMMON_RAMROD_EMPTY
, PROTOCOLID_COMMON
,
567 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
570 int qed_sp_pf_update_stag(struct qed_hwfn
*p_hwfn
)
572 struct qed_spq_entry
*p_ent
= NULL
;
573 struct qed_sp_init_data init_data
;
577 memset(&init_data
, 0, sizeof(init_data
));
578 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
579 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
580 init_data
.comp_mode
= QED_SPQ_MODE_CB
;
582 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
583 COMMON_RAMROD_PF_UPDATE
, PROTOCOLID_COMMON
,
588 p_ent
->ramrod
.pf_update
.update_mf_vlan_flag
= true;
589 p_ent
->ramrod
.pf_update
.mf_vlan
= cpu_to_le16(p_hwfn
->hw_info
.ovlan
);
590 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
591 p_ent
->ramrod
.pf_update
.mf_vlan
|=
592 cpu_to_le16(((u16
)p_hwfn
->ufp_info
.tc
<< 13));
594 return qed_spq_post(p_hwfn
, p_ent
, NULL
);