1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/etherdevice.h>
8 #include <linux/crc32.h>
9 #include <linux/vmalloc.h>
10 #include <linux/crash_dump.h>
11 #include <linux/qed/qed_iov_if.h>
14 #include "qed_iro_hsi.h"
16 #include "qed_init_ops.h"
19 #include "qed_reg_addr.h"
21 #include "qed_sriov.h"
23 static int qed_iov_bulletin_set_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
, int vfid
);
25 static u16
qed_vf_from_entity_id(__le16 entity_id
)
27 return le16_to_cpu(entity_id
) - MAX_NUM_PFS
;
30 static u8
qed_vf_calculate_legacy(struct qed_vf_info
*p_vf
)
34 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
35 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
36 legacy
|= QED_QCID_LEGACY_VF_RX_PROD
;
38 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
39 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
40 legacy
|= QED_QCID_LEGACY_VF_CID
;
46 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
, struct qed_vf_info
*p_vf
)
48 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
49 struct qed_spq_entry
*p_ent
= NULL
;
50 struct qed_sp_init_data init_data
;
55 memset(&init_data
, 0, sizeof(init_data
));
56 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
57 init_data
.opaque_fid
= p_vf
->opaque_fid
;
58 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
60 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
61 COMMON_RAMROD_VF_START
,
62 PROTOCOLID_COMMON
, &init_data
);
66 p_ramrod
= &p_ent
->ramrod
.vf_start
;
68 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
69 p_ramrod
->opaque_fid
= cpu_to_le16(p_vf
->opaque_fid
);
71 switch (p_hwfn
->hw_info
.personality
) {
73 p_ramrod
->personality
= PERSONALITY_ETH
;
75 case QED_PCI_ETH_ROCE
:
76 case QED_PCI_ETH_IWARP
:
77 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
80 DP_NOTICE(p_hwfn
, "Unknown VF personality %d\n",
81 p_hwfn
->hw_info
.personality
);
82 qed_sp_destroy_request(p_hwfn
, p_ent
);
86 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
87 if (fp_minor
> ETH_HSI_VER_MINOR
&&
88 fp_minor
!= ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
91 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
94 fp_minor
, ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
95 fp_minor
= ETH_HSI_VER_MINOR
;
98 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
99 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
101 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
102 "VF[%d] - Starting using HSI %02x.%02x\n",
103 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
105 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
108 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
109 u32 concrete_vfid
, u16 opaque_vfid
)
111 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
112 struct qed_spq_entry
*p_ent
= NULL
;
113 struct qed_sp_init_data init_data
;
117 memset(&init_data
, 0, sizeof(init_data
));
118 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
119 init_data
.opaque_fid
= opaque_vfid
;
120 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
122 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
123 COMMON_RAMROD_VF_STOP
,
124 PROTOCOLID_COMMON
, &init_data
);
128 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
130 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
132 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
135 bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
137 bool b_enabled_only
, bool b_non_malicious
)
139 if (!p_hwfn
->pf_iov_info
) {
140 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
144 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
148 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
152 if ((p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_malicious
) &&
159 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
163 struct qed_vf_info
*vf
= NULL
;
165 if (!p_hwfn
->pf_iov_info
) {
166 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
170 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
,
171 b_enabled_only
, false))
172 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
174 DP_ERR(p_hwfn
, "%s: VF[%d] is not enabled\n",
175 __func__
, relative_vf_id
);
180 static struct qed_queue_cid
*
181 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue
*p_queue
)
185 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
186 if (p_queue
->cids
[i
].p_cid
&& !p_queue
->cids
[i
].b_is_tx
)
187 return p_queue
->cids
[i
].p_cid
;
193 enum qed_iov_validate_q_mode
{
194 QED_IOV_VALIDATE_Q_NA
,
195 QED_IOV_VALIDATE_Q_ENABLE
,
196 QED_IOV_VALIDATE_Q_DISABLE
,
199 static bool qed_iov_validate_queue_mode(struct qed_hwfn
*p_hwfn
,
200 struct qed_vf_info
*p_vf
,
202 enum qed_iov_validate_q_mode mode
,
207 if (mode
== QED_IOV_VALIDATE_Q_NA
)
210 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
211 struct qed_vf_queue_cid
*p_qcid
;
213 p_qcid
= &p_vf
->vf_queues
[qid
].cids
[i
];
218 if (p_qcid
->b_is_tx
!= b_is_tx
)
221 return mode
== QED_IOV_VALIDATE_Q_ENABLE
;
224 /* In case we haven't found any valid cid, then its disabled */
225 return mode
== QED_IOV_VALIDATE_Q_DISABLE
;
228 static bool qed_iov_validate_rxq(struct qed_hwfn
*p_hwfn
,
229 struct qed_vf_info
*p_vf
,
231 enum qed_iov_validate_q_mode mode
)
233 if (rx_qid
>= p_vf
->num_rxqs
) {
236 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
237 p_vf
->abs_vf_id
, rx_qid
, p_vf
->num_rxqs
);
241 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, rx_qid
, mode
, false);
244 static bool qed_iov_validate_txq(struct qed_hwfn
*p_hwfn
,
245 struct qed_vf_info
*p_vf
,
247 enum qed_iov_validate_q_mode mode
)
249 if (tx_qid
>= p_vf
->num_txqs
) {
252 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
253 p_vf
->abs_vf_id
, tx_qid
, p_vf
->num_txqs
);
257 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, tx_qid
, mode
, true);
260 static bool qed_iov_validate_sb(struct qed_hwfn
*p_hwfn
,
261 struct qed_vf_info
*p_vf
, u16 sb_idx
)
265 for (i
= 0; i
< p_vf
->num_sbs
; i
++)
266 if (p_vf
->igu_sbs
[i
] == sb_idx
)
271 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
272 p_vf
->abs_vf_id
, sb_idx
, p_vf
->num_sbs
);
277 static bool qed_iov_validate_active_rxq(struct qed_hwfn
*p_hwfn
,
278 struct qed_vf_info
*p_vf
)
282 for (i
= 0; i
< p_vf
->num_rxqs
; i
++)
283 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
284 QED_IOV_VALIDATE_Q_ENABLE
,
291 static bool qed_iov_validate_active_txq(struct qed_hwfn
*p_hwfn
,
292 struct qed_vf_info
*p_vf
)
296 for (i
= 0; i
< p_vf
->num_txqs
; i
++)
297 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
298 QED_IOV_VALIDATE_Q_ENABLE
,
305 static int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
306 int vfid
, struct qed_ptt
*p_ptt
)
308 struct qed_bulletin_content
*p_bulletin
;
309 int crc_size
= sizeof(p_bulletin
->crc
);
310 struct qed_dmae_params params
;
311 struct qed_vf_info
*p_vf
;
313 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
317 if (!p_vf
->vf_bulletin
)
320 p_bulletin
= p_vf
->bulletin
.p_virt
;
322 /* Increment bulletin board version and compute crc */
323 p_bulletin
->version
++;
324 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
325 p_vf
->bulletin
.size
- crc_size
);
327 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
328 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
329 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
331 /* propagate bulletin board via dmae to vm memory */
332 memset(¶ms
, 0, sizeof(params
));
333 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_DST_VF_VALID
, 0x1);
334 params
.dst_vfid
= p_vf
->abs_vf_id
;
335 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
336 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
340 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
342 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
345 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
346 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
348 pci_read_config_word(cdev
->pdev
,
349 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
350 pci_read_config_word(cdev
->pdev
,
351 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
353 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
357 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
361 pci_read_config_word(cdev
->pdev
,
362 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
364 pci_read_config_word(cdev
->pdev
,
365 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
367 pci_read_config_word(cdev
->pdev
,
368 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
370 pci_read_config_dword(cdev
->pdev
,
371 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
373 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
375 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
379 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
385 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
387 /* Some sanity checks */
388 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
389 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
390 /* This can happen only due to a bug. In this case we set
391 * num_vfs to zero to avoid memory corruption in the code that
392 * assumes max number of vfs
395 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
405 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
407 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
408 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
409 struct qed_bulletin_content
*p_bulletin_virt
;
410 dma_addr_t req_p
, rply_p
, bulletin_p
;
411 union pfvf_tlvs
*p_reply_virt_addr
;
412 union vfpf_tlvs
*p_req_virt_addr
;
415 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
417 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
418 req_p
= p_iov_info
->mbx_msg_phys_addr
;
419 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
420 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
421 p_bulletin_virt
= p_iov_info
->p_bulletins
;
422 bulletin_p
= p_iov_info
->bulletins_phys
;
423 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
425 "%s called without allocating mem first\n", __func__
);
429 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
430 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
433 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
434 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
435 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
436 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
438 vf
->state
= VF_STOPPED
;
441 vf
->bulletin
.phys
= idx
*
442 sizeof(struct qed_bulletin_content
) +
444 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
445 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
447 vf
->relative_vf_id
= idx
;
448 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
449 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
450 vf
->concrete_fid
= concrete
;
451 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
452 (vf
->abs_vf_id
<< 8);
453 vf
->vport_id
= idx
+ 1;
455 vf
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
456 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
460 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
462 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
466 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
468 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
469 "%s for %d VFs\n", __func__
, num_vfs
);
471 /* Allocate PF Mailbox buffer (per-VF) */
472 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
473 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
474 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
475 p_iov_info
->mbx_msg_size
,
476 &p_iov_info
->mbx_msg_phys_addr
,
481 /* Allocate PF Mailbox Reply buffer (per-VF) */
482 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
483 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
484 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
485 p_iov_info
->mbx_reply_size
,
486 &p_iov_info
->mbx_reply_phys_addr
,
491 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
493 p_v_addr
= &p_iov_info
->p_bulletins
;
494 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
495 p_iov_info
->bulletins_size
,
496 &p_iov_info
->bulletins_phys
,
503 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
504 p_iov_info
->mbx_msg_virt_addr
,
505 (u64
)p_iov_info
->mbx_msg_phys_addr
,
506 p_iov_info
->mbx_reply_virt_addr
,
507 (u64
)p_iov_info
->mbx_reply_phys_addr
,
508 p_iov_info
->p_bulletins
, (u64
)p_iov_info
->bulletins_phys
);
513 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
515 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
517 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
518 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
519 p_iov_info
->mbx_msg_size
,
520 p_iov_info
->mbx_msg_virt_addr
,
521 p_iov_info
->mbx_msg_phys_addr
);
523 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
524 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
525 p_iov_info
->mbx_reply_size
,
526 p_iov_info
->mbx_reply_virt_addr
,
527 p_iov_info
->mbx_reply_phys_addr
);
529 if (p_iov_info
->p_bulletins
)
530 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
531 p_iov_info
->bulletins_size
,
532 p_iov_info
->p_bulletins
,
533 p_iov_info
->bulletins_phys
);
536 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
538 struct qed_pf_iov
*p_sriov
;
540 if (!IS_PF_SRIOV(p_hwfn
)) {
541 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
542 "No SR-IOV - no need for IOV db\n");
546 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
550 p_hwfn
->pf_iov_info
= p_sriov
;
552 qed_spq_register_async_cb(p_hwfn
, PROTOCOLID_COMMON
,
553 qed_sriov_eqe_event
);
555 return qed_iov_allocate_vfdb(p_hwfn
);
558 void qed_iov_setup(struct qed_hwfn
*p_hwfn
)
560 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
563 qed_iov_setup_vfdb(p_hwfn
);
566 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
568 qed_spq_unregister_async_cb(p_hwfn
, PROTOCOLID_COMMON
);
570 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
571 qed_iov_free_vfdb(p_hwfn
);
572 kfree(p_hwfn
->pf_iov_info
);
576 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
578 kfree(cdev
->p_iov_info
);
579 cdev
->p_iov_info
= NULL
;
582 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
584 struct qed_dev
*cdev
= p_hwfn
->cdev
;
588 if (is_kdump_kernel())
591 if (IS_VF(p_hwfn
->cdev
))
594 /* Learn the PCI configuration */
595 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
596 PCI_EXT_CAP_ID_SRIOV
);
598 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
602 /* Allocate a new struct for IOV information */
603 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
604 if (!cdev
->p_iov_info
)
607 cdev
->p_iov_info
->pos
= pos
;
609 rc
= qed_iov_pci_cfg_info(cdev
);
613 /* We want PF IOV to be synonemous with the existence of p_iov_info;
614 * In case the capability is published but there are no VFs, simply
615 * de-allocate the struct.
617 if (!cdev
->p_iov_info
->total_vfs
) {
618 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
619 "IOV capabilities, but no VFs are published\n");
620 kfree(cdev
->p_iov_info
);
621 cdev
->p_iov_info
= NULL
;
625 /* First VF index based on offset is tricky:
626 * - If ARI is supported [likely], offset - (16 - pf_id) would
627 * provide the number for eng0. 2nd engine Vfs would begin
628 * after the first engine's VFs.
629 * - If !ARI, VFs would start on next device.
630 * so offset - (256 - pf_id) would provide the number.
631 * Utilize the fact that (256 - pf_id) is achieved only by later
632 * to differentiate between the two.
635 if (p_hwfn
->cdev
->p_iov_info
->offset
< (256 - p_hwfn
->abs_pf_id
)) {
636 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
637 p_hwfn
->abs_pf_id
- 16;
639 cdev
->p_iov_info
->first_vf_in_pf
= first
;
641 if (QED_PATH_ID(p_hwfn
))
642 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
644 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
645 p_hwfn
->abs_pf_id
- 256;
647 cdev
->p_iov_info
->first_vf_in_pf
= first
;
650 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
651 "First VF in hwfn 0x%08x\n",
652 cdev
->p_iov_info
->first_vf_in_pf
);
657 static bool _qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
,
658 int vfid
, bool b_fail_malicious
)
660 /* Check PF supports sriov */
661 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
662 !IS_PF_SRIOV_ALLOC(p_hwfn
))
665 /* Check VF validity */
666 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true, b_fail_malicious
))
672 static bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
674 return _qed_iov_pf_sanity_check(p_hwfn
, vfid
, true);
677 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
678 u16 rel_vf_id
, u8 to_disable
)
680 struct qed_vf_info
*vf
;
683 for_each_hwfn(cdev
, i
) {
684 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
686 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
690 vf
->to_disable
= to_disable
;
694 static void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
698 if (!IS_QED_SRIOV(cdev
))
701 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
702 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
705 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
706 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
708 qed_wr(p_hwfn
, p_ptt
,
709 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
710 1 << (abs_vfid
& 0x1f));
713 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
714 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
718 /* Set VF masks and configuration - pretend */
719 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
721 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
724 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
726 /* iterate over all queues, clear sb consumer */
727 for (i
= 0; i
< vf
->num_sbs
; i
++)
728 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
730 vf
->opaque_fid
, true);
733 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
734 struct qed_ptt
*p_ptt
,
735 struct qed_vf_info
*vf
, bool enable
)
739 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
741 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
744 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
746 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
748 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
751 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
755 qed_iov_enable_vf_access_msix(struct qed_hwfn
*p_hwfn
,
756 struct qed_ptt
*p_ptt
, u8 abs_vf_id
, u8 num_sbs
)
761 /* For AH onward, configuration is per-PF. Find maximum of all
762 * the currently enabled child VFs, and set the number to be that.
764 if (!QED_IS_BB(p_hwfn
->cdev
)) {
765 qed_for_each_vf(p_hwfn
, i
) {
766 struct qed_vf_info
*p_vf
;
768 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)i
, true);
772 current_max
= max_t(u8
, current_max
, p_vf
->num_sbs
);
776 if (num_sbs
> current_max
)
777 return qed_mcp_config_vf_msix(p_hwfn
, p_ptt
,
783 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
784 struct qed_ptt
*p_ptt
,
785 struct qed_vf_info
*vf
)
787 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
790 /* It's possible VF was previously considered malicious -
791 * clear the indication even if we're only going to disable VF.
793 vf
->b_malicious
= false;
800 "Enable internal access for vf %x [abs %x]\n",
801 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
803 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
805 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
807 rc
= qed_iov_enable_vf_access_msix(p_hwfn
, p_ptt
,
808 vf
->abs_vf_id
, vf
->num_sbs
);
812 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)vf
->concrete_fid
);
814 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
815 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
817 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
818 p_hwfn
->hw_info
.hw_mode
);
821 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
829 * qed_iov_config_perm_table() - Configure the permission zone table.
831 * @p_hwfn: HW device data.
832 * @p_ptt: PTT window for writing the registers.
834 * @enable: The actual permission for this VF.
836 * In E4, queue zone permission table size is 320x9. There
837 * are 320 VF queues for single engine device (256 for dual
838 * engine device), and each entry has the following format:
841 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
842 struct qed_ptt
*p_ptt
,
843 struct qed_vf_info
*vf
, u8 enable
)
849 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
850 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
853 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
854 val
= enable
? (vf
->abs_vf_id
| BIT(8)) : 0;
855 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
859 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
860 struct qed_ptt
*p_ptt
,
861 struct qed_vf_info
*vf
)
863 /* Reset vf in IGU - interrupts are still disabled */
864 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
866 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
868 /* Permission Table */
869 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
872 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
873 struct qed_ptt
*p_ptt
,
874 struct qed_vf_info
*vf
, u16 num_rx_queues
)
876 struct qed_igu_block
*p_block
;
877 struct cau_sb_entry sb_entry
;
881 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
)
882 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
;
883 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
-= num_rx_queues
;
885 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
886 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
887 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
889 for (qid
= 0; qid
< num_rx_queues
; qid
++) {
890 p_block
= qed_get_igu_free_sb(p_hwfn
, false);
891 vf
->igu_sbs
[qid
] = p_block
->igu_sb_id
;
892 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
893 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
895 qed_wr(p_hwfn
, p_ptt
,
896 IGU_REG_MAPPING_MEMORY
+
897 sizeof(u32
) * p_block
->igu_sb_id
, val
);
899 /* Configure igu sb in CAU which were marked valid */
900 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
901 p_hwfn
->rel_pf_id
, vf
->abs_vf_id
, 1);
903 qed_dmae_host2grc(p_hwfn
, p_ptt
,
904 (u64
)(uintptr_t)&sb_entry
,
905 CAU_REG_SB_VAR_MEMORY
+
906 p_block
->igu_sb_id
* sizeof(u64
), 2, NULL
);
909 vf
->num_sbs
= (u8
)num_rx_queues
;
914 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
915 struct qed_ptt
*p_ptt
,
916 struct qed_vf_info
*vf
)
918 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
922 /* Invalidate igu CAM lines and mark them as free */
923 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
924 igu_id
= vf
->igu_sbs
[idx
];
925 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
927 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
928 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
929 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
931 p_info
->entry
[igu_id
].status
|= QED_IGU_STATUS_FREE
;
932 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
++;
938 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
940 struct qed_mcp_link_params
*params
,
941 struct qed_mcp_link_state
*link
,
942 struct qed_mcp_link_capabilities
*p_caps
)
944 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
947 struct qed_bulletin_content
*p_bulletin
;
952 p_bulletin
= p_vf
->bulletin
.p_virt
;
953 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
954 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
955 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
956 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
957 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
958 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
959 p_bulletin
->req_loopback
= params
->loopback_mode
;
961 p_bulletin
->link_up
= link
->link_up
;
962 p_bulletin
->speed
= link
->speed
;
963 p_bulletin
->full_duplex
= link
->full_duplex
;
964 p_bulletin
->autoneg
= link
->an
;
965 p_bulletin
->autoneg_complete
= link
->an_complete
;
966 p_bulletin
->parallel_detection
= link
->parallel_detection
;
967 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
968 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
969 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
970 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
971 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
972 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
974 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
977 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
978 struct qed_ptt
*p_ptt
,
979 struct qed_iov_vf_init_params
*p_params
)
981 struct qed_mcp_link_capabilities link_caps
;
982 struct qed_mcp_link_params link_params
;
983 struct qed_mcp_link_state link_state
;
984 u8 num_of_vf_avaiable_chains
= 0;
985 struct qed_vf_info
*vf
= NULL
;
991 vf
= qed_iov_get_vf_info(p_hwfn
, p_params
->rel_vf_id
, false);
993 DP_ERR(p_hwfn
, "%s : vf is NULL\n", __func__
);
998 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n",
999 p_params
->rel_vf_id
);
1003 /* Perform sanity checking on the requested queue_id */
1004 for (i
= 0; i
< p_params
->num_queues
; i
++) {
1005 u16 min_vf_qzone
= FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
);
1006 u16 max_vf_qzone
= min_vf_qzone
+
1007 FEAT_NUM(p_hwfn
, QED_VF_L2_QUE
) - 1;
1009 qid
= p_params
->req_rx_queue
[i
];
1010 if (qid
< min_vf_qzone
|| qid
> max_vf_qzone
) {
1012 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1014 p_params
->rel_vf_id
,
1015 min_vf_qzone
, max_vf_qzone
);
1019 qid
= p_params
->req_tx_queue
[i
];
1020 if (qid
> max_vf_qzone
) {
1022 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1023 qid
, p_params
->rel_vf_id
, max_vf_qzone
);
1027 /* If client *really* wants, Tx qid can be shared with PF */
1028 if (qid
< min_vf_qzone
)
1031 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1032 p_params
->rel_vf_id
, qid
, i
);
1035 /* Limit number of queues according to number of CIDs */
1036 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
1039 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1040 vf
->relative_vf_id
, p_params
->num_queues
, (u16
)cids
);
1041 num_irqs
= min_t(u16
, p_params
->num_queues
, ((u16
)cids
));
1043 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
1046 if (!num_of_vf_avaiable_chains
) {
1047 DP_ERR(p_hwfn
, "no available igu sbs\n");
1051 /* Choose queue number and index ranges */
1052 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
1053 vf
->num_txqs
= num_of_vf_avaiable_chains
;
1055 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
1056 struct qed_vf_queue
*p_queue
= &vf
->vf_queues
[i
];
1058 p_queue
->fw_rx_qid
= p_params
->req_rx_queue
[i
];
1059 p_queue
->fw_tx_qid
= p_params
->req_tx_queue
[i
];
1061 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1062 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1063 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
],
1064 p_queue
->fw_rx_qid
, p_queue
->fw_tx_qid
);
1067 /* Update the link configuration in bulletin */
1068 memcpy(&link_params
, qed_mcp_get_link_params(p_hwfn
),
1069 sizeof(link_params
));
1070 memcpy(&link_state
, qed_mcp_get_link_state(p_hwfn
), sizeof(link_state
));
1071 memcpy(&link_caps
, qed_mcp_get_link_capabilities(p_hwfn
),
1073 qed_iov_set_link(p_hwfn
, p_params
->rel_vf_id
,
1074 &link_params
, &link_state
, &link_caps
);
1076 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
1080 if (IS_LEAD_HWFN(p_hwfn
))
1081 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
1087 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
1088 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
1090 struct qed_mcp_link_capabilities caps
;
1091 struct qed_mcp_link_params params
;
1092 struct qed_mcp_link_state link
;
1093 struct qed_vf_info
*vf
= NULL
;
1095 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
1097 DP_ERR(p_hwfn
, "%s : vf is NULL\n", __func__
);
1101 if (vf
->bulletin
.p_virt
)
1102 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
1104 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
1106 /* Get the link configuration back in bulletin so
1107 * that when VFs are re-enabled they get the actual
1108 * link configuration.
1110 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
1111 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
1112 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
1113 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
1115 /* Forget the VF's acquisition message */
1116 memset(&vf
->acquire
, 0, sizeof(vf
->acquire
));
1118 /* disablng interrupts and resetting permission table was done during
1119 * vf-close, however, we could get here without going through vf_close
1121 /* Disable Interrupts for VF */
1122 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
1124 /* Reset Permission table */
1125 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
1129 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
1134 if (IS_LEAD_HWFN(p_hwfn
))
1135 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
1141 static bool qed_iov_tlv_supported(u16 tlvtype
)
1143 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
1146 /* place a given tlv on the tlv buffer, continuing current tlv list */
1147 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
1149 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
1152 tl
->length
= length
;
1154 /* Offset should keep pointing to next TLV (the end of the last) */
1157 /* Return a pointer to the start of the added tlv */
1158 return *offset
- length
;
1161 /* list the types and lengths of the tlvs on the buffer */
1162 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
1164 u16 i
= 1, total_length
= 0;
1165 struct channel_tlv
*tlv
;
1168 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
1171 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1172 "TLV number %d: type %d, length %d\n",
1173 i
, tlv
->type
, tlv
->length
);
1175 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
1178 /* Validate entry - protect against malicious VFs */
1180 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
1184 total_length
+= tlv
->length
;
1186 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
1187 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
1195 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
1196 struct qed_ptt
*p_ptt
,
1197 struct qed_vf_info
*p_vf
,
1198 u16 length
, u8 status
)
1200 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1201 struct qed_dmae_params params
;
1204 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
1206 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
1208 eng_vf_id
= p_vf
->abs_vf_id
;
1210 memset(¶ms
, 0, sizeof(params
));
1211 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_DST_VF_VALID
, 0x1);
1212 params
.dst_vfid
= eng_vf_id
;
1214 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
1215 mbx
->req_virt
->first_tlv
.reply_address
+
1217 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
1220 /* Once PF copies the rc to the VF, the latter can continue
1221 * and send an additional message. So we have to make sure the
1222 * channel would be re-set to ready prior to that.
1225 GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM
,
1226 USTORM_VF_PF_CHANNEL_READY
, eng_vf_id
), 1);
1228 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1229 mbx
->req_virt
->first_tlv
.reply_address
,
1230 sizeof(u64
) / 4, ¶ms
);
1233 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1234 enum qed_iov_vport_update_flag flag
)
1237 case QED_IOV_VP_UPDATE_ACTIVATE
:
1238 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1239 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1240 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1241 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1242 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1243 case QED_IOV_VP_UPDATE_MCAST
:
1244 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1245 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1246 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1247 case QED_IOV_VP_UPDATE_RSS
:
1248 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1249 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1250 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1251 case QED_IOV_VP_UPDATE_SGE_TPA
:
1252 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1258 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1259 struct qed_vf_info
*p_vf
,
1260 struct qed_iov_vf_mbx
*p_mbx
,
1262 u16 tlvs_mask
, u16 tlvs_accepted
)
1264 struct pfvf_def_resp_tlv
*resp
;
1265 u16 size
, total_len
, i
;
1267 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1268 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1269 size
= sizeof(struct pfvf_def_resp_tlv
);
1272 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1274 /* Prepare response for all extended tlvs if they are found by PF */
1275 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1276 if (!(tlvs_mask
& BIT(i
)))
1279 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1280 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1282 if (tlvs_accepted
& BIT(i
))
1283 resp
->hdr
.status
= status
;
1285 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1289 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1290 p_vf
->relative_vf_id
,
1291 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1296 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1297 sizeof(struct channel_list_end_tlv
));
1302 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1303 struct qed_ptt
*p_ptt
,
1304 struct qed_vf_info
*vf_info
,
1305 u16 type
, u16 length
, u8 status
)
1307 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1309 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1311 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1312 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1313 sizeof(struct channel_list_end_tlv
));
1315 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1319 qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1321 bool b_enabled_only
)
1323 struct qed_vf_info
*vf
= NULL
;
1325 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1329 return &vf
->p_vf_info
;
1332 static void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1334 struct qed_public_vf_info
*vf_info
;
1336 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1341 /* Clear the VF mac */
1342 eth_zero_addr(vf_info
->mac
);
1344 vf_info
->rx_accept_mode
= 0;
1345 vf_info
->tx_accept_mode
= 0;
1348 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1349 struct qed_vf_info
*p_vf
)
1353 p_vf
->vf_bulletin
= 0;
1354 p_vf
->vport_instance
= 0;
1355 p_vf
->configured_features
= 0;
1357 /* If VF previously requested less resources, go back to default */
1358 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1359 p_vf
->num_txqs
= p_vf
->num_sbs
;
1361 p_vf
->num_active_rxqs
= 0;
1363 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1364 struct qed_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
1366 for (j
= 0; j
< MAX_QUEUES_PER_QZONE
; j
++) {
1367 if (!p_queue
->cids
[j
].p_cid
)
1370 qed_eth_queue_cid_release(p_hwfn
,
1371 p_queue
->cids
[j
].p_cid
);
1372 p_queue
->cids
[j
].p_cid
= NULL
;
1376 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1377 memset(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1378 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1381 /* Returns either 0, or log(size) */
1382 static u32
qed_iov_vf_db_bar_size(struct qed_hwfn
*p_hwfn
,
1383 struct qed_ptt
*p_ptt
)
1385 u32 val
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_VF_BAR1_SIZE
);
1393 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn
*p_hwfn
,
1394 struct qed_ptt
*p_ptt
,
1395 struct qed_vf_info
*p_vf
,
1396 struct vf_pf_resc_request
*p_req
,
1397 struct pf_vf_resc
*p_resp
)
1399 u8 num_vf_cons
= p_hwfn
->pf_params
.eth_pf_params
.num_vf_cons
;
1400 u8 db_size
= qed_db_addr_vf(1, DQ_DEMS_LEGACY
) -
1401 qed_db_addr_vf(0, DQ_DEMS_LEGACY
);
1404 p_resp
->num_cids
= min_t(u8
, p_req
->num_cids
, num_vf_cons
);
1406 /* If VF didn't bother asking for QIDs than don't bother limiting
1407 * number of CIDs. The VF doesn't care about the number, and this
1408 * has the likely result of causing an additional acquisition.
1410 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
1411 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
1414 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1415 * that would make sure doorbells for all CIDs fall within the bar.
1416 * If it doesn't, make sure regview window is sufficient.
1418 if (p_vf
->acquire
.vfdev_info
.capabilities
&
1419 VFPF_ACQUIRE_CAP_PHYSICAL_BAR
) {
1420 bar_size
= qed_iov_vf_db_bar_size(p_hwfn
, p_ptt
);
1422 bar_size
= 1 << bar_size
;
1424 if (p_hwfn
->cdev
->num_hwfns
> 1)
1427 bar_size
= PXP_VF_BAR0_DQ_LENGTH
;
1430 if (bar_size
/ db_size
< 256)
1431 p_resp
->num_cids
= min_t(u8
, p_resp
->num_cids
,
1432 (u8
)(bar_size
/ db_size
));
1435 static u8
qed_iov_vf_mbx_acquire_resc(struct qed_hwfn
*p_hwfn
,
1436 struct qed_ptt
*p_ptt
,
1437 struct qed_vf_info
*p_vf
,
1438 struct vf_pf_resc_request
*p_req
,
1439 struct pf_vf_resc
*p_resp
)
1443 /* Queue related information */
1444 p_resp
->num_rxqs
= p_vf
->num_rxqs
;
1445 p_resp
->num_txqs
= p_vf
->num_txqs
;
1446 p_resp
->num_sbs
= p_vf
->num_sbs
;
1448 for (i
= 0; i
< p_resp
->num_sbs
; i
++) {
1449 p_resp
->hw_sbs
[i
].hw_sb_id
= p_vf
->igu_sbs
[i
];
1450 p_resp
->hw_sbs
[i
].sb_qid
= 0;
1453 /* These fields are filled for backward compatibility.
1454 * Unused by modern vfs.
1456 for (i
= 0; i
< p_resp
->num_rxqs
; i
++) {
1457 qed_fw_l2_queue(p_hwfn
, p_vf
->vf_queues
[i
].fw_rx_qid
,
1458 (u16
*)&p_resp
->hw_qid
[i
]);
1462 /* Filter related information */
1463 p_resp
->num_mac_filters
= min_t(u8
, p_vf
->num_mac_filters
,
1464 p_req
->num_mac_filters
);
1465 p_resp
->num_vlan_filters
= min_t(u8
, p_vf
->num_vlan_filters
,
1466 p_req
->num_vlan_filters
);
1468 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn
, p_ptt
, p_vf
, p_req
, p_resp
);
1470 /* This isn't really needed/enforced, but some legacy VFs might depend
1471 * on the correct filling of this field.
1473 p_resp
->num_mc_filters
= QED_MAX_MC_ADDRS
;
1475 /* Validate sufficient resources for VF */
1476 if (p_resp
->num_rxqs
< p_req
->num_rxqs
||
1477 p_resp
->num_txqs
< p_req
->num_txqs
||
1478 p_resp
->num_sbs
< p_req
->num_sbs
||
1479 p_resp
->num_mac_filters
< p_req
->num_mac_filters
||
1480 p_resp
->num_vlan_filters
< p_req
->num_vlan_filters
||
1481 p_resp
->num_mc_filters
< p_req
->num_mc_filters
||
1482 p_resp
->num_cids
< p_req
->num_cids
) {
1485 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1493 p_req
->num_mac_filters
,
1494 p_resp
->num_mac_filters
,
1495 p_req
->num_vlan_filters
,
1496 p_resp
->num_vlan_filters
,
1497 p_req
->num_mc_filters
,
1498 p_resp
->num_mc_filters
,
1499 p_req
->num_cids
, p_resp
->num_cids
);
1501 /* Some legacy OSes are incapable of correctly handling this
1504 if ((p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1505 ETH_HSI_VER_NO_PKT_LEN_TUNN
) &&
1506 (p_vf
->acquire
.vfdev_info
.os_type
==
1507 VFPF_ACQUIRE_OS_WINDOWS
))
1508 return PFVF_STATUS_SUCCESS
;
1510 return PFVF_STATUS_NO_RESOURCE
;
1513 return PFVF_STATUS_SUCCESS
;
1516 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn
*p_hwfn
,
1517 struct pfvf_stats_info
*p_stats
)
1519 p_stats
->mstats
.address
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1520 offsetof(struct mstorm_vf_zone
,
1521 non_trigger
.eth_queue_stat
);
1522 p_stats
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
1523 p_stats
->ustats
.address
= PXP_VF_BAR0_START_USDM_ZONE_B
+
1524 offsetof(struct ustorm_vf_zone
,
1525 non_trigger
.eth_queue_stat
);
1526 p_stats
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
1527 p_stats
->pstats
.address
= PXP_VF_BAR0_START_PSDM_ZONE_B
+
1528 offsetof(struct pstorm_vf_zone
,
1529 non_trigger
.eth_queue_stat
);
1530 p_stats
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
1531 p_stats
->tstats
.address
= 0;
1532 p_stats
->tstats
.len
= 0;
1535 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1536 struct qed_ptt
*p_ptt
,
1537 struct qed_vf_info
*vf
)
1539 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1540 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1541 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1542 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1543 u8 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1544 struct pf_vf_resc
*resc
= &resp
->resc
;
1547 memset(resp
, 0, sizeof(*resp
));
1549 /* Write the PF version so that VF would know which version
1550 * is supported - might be later overridden. This guarantees that
1551 * VF could recognize legacy PF based on lack of versions in reply.
1553 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1554 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1556 if (vf
->state
!= VF_FREE
&& vf
->state
!= VF_STOPPED
) {
1559 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1560 vf
->abs_vf_id
, vf
->state
);
1564 /* Validate FW compatibility */
1565 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1566 if (req
->vfdev_info
.capabilities
&
1567 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
1568 struct vf_pf_vfdev_info
*p_vfdev
= &req
->vfdev_info
;
1570 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1571 "VF[%d] is pre-fastpath HSI\n",
1573 p_vfdev
->eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
1574 p_vfdev
->eth_fp_hsi_minor
= ETH_HSI_VER_NO_PKT_LEN_TUNN
;
1577 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1579 req
->vfdev_info
.eth_fp_hsi_major
,
1580 req
->vfdev_info
.eth_fp_hsi_minor
,
1581 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1587 /* On 100g PFs, prevent old VFs from loading */
1588 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1589 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1591 "VF[%d] is running an old driver that doesn't support 100g\n",
1596 /* Store the acquire message */
1597 memcpy(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1599 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1601 vf
->vf_bulletin
= req
->bulletin_addr
;
1602 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1603 vf
->bulletin
.size
: req
->bulletin_size
;
1605 /* fill in pfdev info */
1606 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1607 pfdev_info
->db_size
= 0;
1608 pfdev_info
->indices_per_sb
= PIS_PER_SB
;
1610 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1611 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1612 if (p_hwfn
->cdev
->num_hwfns
> 1)
1613 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1615 /* Share our ability to use multiple queue-ids only with VFs
1618 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_QUEUE_QIDS
)
1619 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_QUEUE_QIDS
;
1621 /* Share the sizes of the bars with VF */
1622 resp
->pfdev_info
.bar_size
= qed_iov_vf_db_bar_size(p_hwfn
, p_ptt
);
1624 qed_iov_vf_mbx_acquire_stats(p_hwfn
, &pfdev_info
->stats_info
);
1626 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1628 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1629 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1630 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1631 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1633 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1636 pfdev_info
->minor_fp_hsi
= min_t(u8
, ETH_HSI_VER_MINOR
,
1637 req
->vfdev_info
.eth_fp_hsi_minor
);
1638 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1639 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1641 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1642 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1644 /* Fill resources available to VF; Make sure there are enough to
1645 * satisfy the VF's request.
1647 vfpf_status
= qed_iov_vf_mbx_acquire_resc(p_hwfn
, p_ptt
, vf
,
1648 &req
->resc_request
, resc
);
1649 if (vfpf_status
!= PFVF_STATUS_SUCCESS
)
1652 /* Start the VF in FW */
1653 rc
= qed_sp_vf_start(p_hwfn
, vf
);
1655 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
1656 vfpf_status
= PFVF_STATUS_FAILURE
;
1660 /* Fill agreed size of bulletin board in response */
1661 resp
->bulletin_size
= vf
->bulletin
.size
;
1662 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1666 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1667 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1669 resp
->pfdev_info
.chip_num
,
1670 resp
->pfdev_info
.db_size
,
1671 resp
->pfdev_info
.indices_per_sb
,
1672 resp
->pfdev_info
.capabilities
,
1676 resc
->num_mac_filters
,
1677 resc
->num_vlan_filters
);
1678 vf
->state
= VF_ACQUIRED
;
1680 /* Prepare Response */
1682 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1683 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1686 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1687 struct qed_vf_info
*p_vf
, bool val
)
1689 struct qed_sp_vport_update_params params
;
1692 if (val
== p_vf
->spoof_chk
) {
1693 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1694 "Spoofchk value[%d] is already configured\n", val
);
1698 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1699 params
.opaque_fid
= p_vf
->opaque_fid
;
1700 params
.vport_id
= p_vf
->vport_id
;
1701 params
.update_anti_spoofing_en_flg
= 1;
1702 params
.anti_spoofing_en
= val
;
1704 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1706 p_vf
->spoof_chk
= val
;
1707 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1708 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1709 "Spoofchk val[%d] configured\n", val
);
1711 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1712 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1713 val
, p_vf
->relative_vf_id
);
1719 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1720 struct qed_vf_info
*p_vf
)
1722 struct qed_filter_ucast filter
;
1726 memset(&filter
, 0, sizeof(filter
));
1727 filter
.is_rx_filter
= 1;
1728 filter
.is_tx_filter
= 1;
1729 filter
.vport_to_add_to
= p_vf
->vport_id
;
1730 filter
.opcode
= QED_FILTER_ADD
;
1732 /* Reconfigure vlans */
1733 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1734 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1737 filter
.type
= QED_FILTER_VLAN
;
1738 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1739 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1740 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1741 filter
.vlan
, p_vf
->relative_vf_id
);
1742 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1743 &filter
, QED_SPQ_MODE_CB
, NULL
);
1746 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1747 filter
.vlan
, p_vf
->relative_vf_id
);
1756 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1757 struct qed_vf_info
*p_vf
, u64 events
)
1761 if ((events
& BIT(VLAN_ADDR_FORCED
)) &&
1762 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1763 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1768 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1769 struct qed_vf_info
*p_vf
, u64 events
)
1772 struct qed_filter_ucast filter
;
1774 if (!p_vf
->vport_instance
)
1777 if ((events
& BIT(MAC_ADDR_FORCED
)) ||
1778 p_vf
->p_vf_info
.is_trusted_configured
) {
1779 /* Since there's no way [currently] of removing the MAC,
1780 * we can always assume this means we need to force it.
1782 memset(&filter
, 0, sizeof(filter
));
1783 filter
.type
= QED_FILTER_MAC
;
1784 filter
.opcode
= QED_FILTER_REPLACE
;
1785 filter
.is_rx_filter
= 1;
1786 filter
.is_tx_filter
= 1;
1787 filter
.vport_to_add_to
= p_vf
->vport_id
;
1788 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1790 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1791 &filter
, QED_SPQ_MODE_CB
, NULL
);
1794 "PF failed to configure MAC for VF\n");
1797 if (p_vf
->p_vf_info
.is_trusted_configured
)
1798 p_vf
->configured_features
|=
1799 BIT(VFPF_BULLETIN_MAC_ADDR
);
1801 p_vf
->configured_features
|=
1802 BIT(MAC_ADDR_FORCED
);
1805 if (events
& BIT(VLAN_ADDR_FORCED
)) {
1806 struct qed_sp_vport_update_params vport_update
;
1810 memset(&filter
, 0, sizeof(filter
));
1811 filter
.type
= QED_FILTER_VLAN
;
1812 filter
.is_rx_filter
= 1;
1813 filter
.is_tx_filter
= 1;
1814 filter
.vport_to_add_to
= p_vf
->vport_id
;
1815 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1816 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1819 /* Send the ramrod */
1820 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1821 &filter
, QED_SPQ_MODE_CB
, NULL
);
1824 "PF failed to configure VLAN for VF\n");
1828 /* Update the default-vlan & silent vlan stripping */
1829 memset(&vport_update
, 0, sizeof(vport_update
));
1830 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1831 vport_update
.vport_id
= p_vf
->vport_id
;
1832 vport_update
.update_default_vlan_enable_flg
= 1;
1833 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1834 vport_update
.update_default_vlan_flg
= 1;
1835 vport_update
.default_vlan
= filter
.vlan
;
1837 vport_update
.update_inner_vlan_removal_flg
= 1;
1838 removal
= filter
.vlan
? 1
1839 : p_vf
->shadow_config
.inner_vlan_removal
;
1840 vport_update
.inner_vlan_removal_flg
= removal
;
1841 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1842 rc
= qed_sp_vport_update(p_hwfn
,
1844 QED_SPQ_MODE_EBLOCK
, NULL
);
1847 "PF failed to configure VF vport for vlan\n");
1851 /* Update all the Rx queues */
1852 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1853 struct qed_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
1854 struct qed_queue_cid
*p_cid
= NULL
;
1856 /* There can be at most 1 Rx queue on qzone. Find it */
1857 p_cid
= qed_iov_get_vf_rx_queue_cid(p_queue
);
1861 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
1864 QED_SPQ_MODE_EBLOCK
,
1868 "Failed to send Rx update fo queue[0x%04x]\n",
1869 p_cid
->rel
.queue_id
);
1875 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1877 p_vf
->configured_features
&= ~BIT(VLAN_ADDR_FORCED
);
1880 /* If forced features are terminated, we need to configure the shadow
1881 * configuration back again.
1884 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1889 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1890 struct qed_ptt
*p_ptt
,
1891 struct qed_vf_info
*vf
)
1893 struct qed_sp_vport_start_params params
= { 0 };
1894 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1895 struct vfpf_vport_start_tlv
*start
;
1896 u8 status
= PFVF_STATUS_SUCCESS
;
1897 struct qed_vf_info
*vf_info
;
1902 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vf
->relative_vf_id
, true);
1904 DP_NOTICE(p_hwfn
->cdev
,
1905 "Failed to get VF info, invalid vfid [%d]\n",
1906 vf
->relative_vf_id
);
1910 vf
->state
= VF_ENABLED
;
1911 start
= &mbx
->req_virt
->start_vport
;
1913 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1915 /* Initialize Status block in CAU */
1916 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1917 if (!start
->sb_addr
[sb_id
]) {
1918 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1919 "VF[%d] did not fill the address of SB %d\n",
1920 vf
->relative_vf_id
, sb_id
);
1924 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1925 start
->sb_addr
[sb_id
],
1926 vf
->igu_sbs
[sb_id
], vf
->abs_vf_id
, 1);
1929 vf
->mtu
= start
->mtu
;
1930 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1932 /* Take into consideration configuration forced by hypervisor;
1933 * If none is configured, use the supplied VF values [for old
1934 * vfs that would still be fine, since they passed '0' as padding].
1936 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1937 if (!(*p_bitmap
& BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1938 u8 vf_req
= start
->only_untagged
;
1940 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1941 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1944 params
.tpa_mode
= start
->tpa_mode
;
1945 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1946 params
.tx_switching
= true;
1948 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1949 params
.drop_ttl0
= false;
1950 params
.concrete_fid
= vf
->concrete_fid
;
1951 params
.opaque_fid
= vf
->opaque_fid
;
1952 params
.vport_id
= vf
->vport_id
;
1953 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1954 params
.mtu
= vf
->mtu
;
1956 /* Non trusted VFs should enable control frame filtering */
1957 params
.check_mac
= !vf
->p_vf_info
.is_trusted_configured
;
1959 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1962 "%s returned error %d\n", __func__
, rc
);
1963 status
= PFVF_STATUS_FAILURE
;
1965 vf
->vport_instance
++;
1967 /* Force configuration if needed on the newly opened vport */
1968 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1970 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1972 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1973 sizeof(struct pfvf_def_resp_tlv
), status
);
1976 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1977 struct qed_ptt
*p_ptt
,
1978 struct qed_vf_info
*vf
)
1980 u8 status
= PFVF_STATUS_SUCCESS
;
1983 vf
->vport_instance
--;
1984 vf
->spoof_chk
= false;
1986 if ((qed_iov_validate_active_rxq(p_hwfn
, vf
)) ||
1987 (qed_iov_validate_active_txq(p_hwfn
, vf
))) {
1988 vf
->b_malicious
= true;
1990 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
1992 status
= PFVF_STATUS_MALICIOUS
;
1996 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
1998 DP_ERR(p_hwfn
, "%s returned error %d\n",
2000 status
= PFVF_STATUS_FAILURE
;
2003 /* Forget the configuration on the vport */
2004 vf
->configured_features
= 0;
2005 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
2008 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
2009 sizeof(struct pfvf_def_resp_tlv
), status
);
2012 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
2013 struct qed_ptt
*p_ptt
,
2014 struct qed_vf_info
*vf
,
2015 u8 status
, bool b_legacy
)
2017 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2018 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2019 struct vfpf_start_rxq_tlv
*req
;
2022 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2024 /* Taking a bigger struct instead of adding a TLV to list was a
2025 * mistake, but one which we're now stuck with, as some older
2026 * clients assume the size of the previous response.
2029 length
= sizeof(*p_tlv
);
2031 length
= sizeof(struct pfvf_def_resp_tlv
);
2033 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
2035 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2036 sizeof(struct channel_list_end_tlv
));
2038 /* Update the TLV with the response */
2039 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
2040 req
= &mbx
->req_virt
->start_rxq
;
2041 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
2042 offsetof(struct mstorm_vf_zone
,
2043 non_trigger
.eth_rx_queue_producers
) +
2044 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
2047 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2050 static u8
qed_iov_vf_mbx_qid(struct qed_hwfn
*p_hwfn
,
2051 struct qed_vf_info
*p_vf
, bool b_is_tx
)
2053 struct qed_iov_vf_mbx
*p_mbx
= &p_vf
->vf_mbx
;
2054 struct vfpf_qid_tlv
*p_qid_tlv
;
2056 /* Search for the qid if the VF published its going to provide it */
2057 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
2058 VFPF_ACQUIRE_CAP_QUEUE_QIDS
)) {
2060 return QED_IOV_LEGACY_QID_TX
;
2062 return QED_IOV_LEGACY_QID_RX
;
2065 p_qid_tlv
= (struct vfpf_qid_tlv
*)
2066 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2069 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2070 "VF[%2x]: Failed to provide qid\n",
2071 p_vf
->relative_vf_id
);
2073 return QED_IOV_QID_INVALID
;
2076 if (p_qid_tlv
->qid
>= MAX_QUEUES_PER_QZONE
) {
2077 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2078 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2079 p_vf
->relative_vf_id
, p_qid_tlv
->qid
);
2080 return QED_IOV_QID_INVALID
;
2083 return p_qid_tlv
->qid
;
2086 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
2087 struct qed_ptt
*p_ptt
,
2088 struct qed_vf_info
*vf
)
2090 struct qed_queue_start_common_params params
;
2091 struct qed_queue_cid_vf_params vf_params
;
2092 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2093 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2094 u8 qid_usage_idx
, vf_legacy
= 0;
2095 struct vfpf_start_rxq_tlv
*req
;
2096 struct qed_vf_queue
*p_queue
;
2097 struct qed_queue_cid
*p_cid
;
2098 struct qed_sb_info sb_dummy
;
2101 req
= &mbx
->req_virt
->start_rxq
;
2103 if (!qed_iov_validate_rxq(p_hwfn
, vf
, req
->rx_qid
,
2104 QED_IOV_VALIDATE_Q_DISABLE
) ||
2105 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2108 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2109 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2112 p_queue
= &vf
->vf_queues
[req
->rx_qid
];
2113 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2116 vf_legacy
= qed_vf_calculate_legacy(vf
);
2118 /* Acquire a new queue-cid */
2119 memset(¶ms
, 0, sizeof(params
));
2120 params
.queue_id
= p_queue
->fw_rx_qid
;
2121 params
.vport_id
= vf
->vport_id
;
2122 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2123 /* Since IGU index is passed via sb_info, construct a dummy one */
2124 memset(&sb_dummy
, 0, sizeof(sb_dummy
));
2125 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2126 params
.p_sb
= &sb_dummy
;
2127 params
.sb_idx
= req
->sb_index
;
2129 memset(&vf_params
, 0, sizeof(vf_params
));
2130 vf_params
.vfid
= vf
->relative_vf_id
;
2131 vf_params
.vf_qid
= (u8
)req
->rx_qid
;
2132 vf_params
.vf_legacy
= vf_legacy
;
2133 vf_params
.qid_usage_idx
= qid_usage_idx
;
2134 p_cid
= qed_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2135 ¶ms
, true, &vf_params
);
2139 /* Legacy VFs have their Producers in a different location, which they
2140 * calculate on their own and clean the producer prior to this.
2142 if (!(vf_legacy
& QED_QCID_LEGACY_VF_RX_PROD
))
2143 qed_wr(p_hwfn
, p_ptt
, MSEM_REG_FAST_MEMORY
+
2144 SEM_FAST_REG_INT_RAM
+
2145 MSTORM_ETH_VF_PRODS_OFFSET(vf
->abs_vf_id
,
2148 rc
= qed_eth_rxq_start_ramrod(p_hwfn
, p_cid
,
2151 req
->cqe_pbl_addr
, req
->cqe_pbl_size
);
2153 status
= PFVF_STATUS_FAILURE
;
2154 qed_eth_queue_cid_release(p_hwfn
, p_cid
);
2156 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2157 p_queue
->cids
[qid_usage_idx
].b_is_tx
= false;
2158 status
= PFVF_STATUS_SUCCESS
;
2159 vf
->num_active_rxqs
++;
2163 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
,
2165 QED_QCID_LEGACY_VF_RX_PROD
));
2169 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv
*p_resp
,
2170 struct qed_tunnel_info
*p_tun
,
2171 u16 tunn_feature_mask
)
2173 p_resp
->tunn_feature_mask
= tunn_feature_mask
;
2174 p_resp
->vxlan_mode
= p_tun
->vxlan
.b_mode_enabled
;
2175 p_resp
->l2geneve_mode
= p_tun
->l2_geneve
.b_mode_enabled
;
2176 p_resp
->ipgeneve_mode
= p_tun
->ip_geneve
.b_mode_enabled
;
2177 p_resp
->l2gre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2178 p_resp
->ipgre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2179 p_resp
->vxlan_clss
= p_tun
->vxlan
.tun_cls
;
2180 p_resp
->l2gre_clss
= p_tun
->l2_gre
.tun_cls
;
2181 p_resp
->ipgre_clss
= p_tun
->ip_gre
.tun_cls
;
2182 p_resp
->l2geneve_clss
= p_tun
->l2_geneve
.tun_cls
;
2183 p_resp
->ipgeneve_clss
= p_tun
->ip_geneve
.tun_cls
;
2184 p_resp
->geneve_udp_port
= p_tun
->geneve_port
.port
;
2185 p_resp
->vxlan_udp_port
= p_tun
->vxlan_port
.port
;
2189 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2190 struct qed_tunn_update_type
*p_tun
,
2191 enum qed_tunn_mode mask
, u8 tun_cls
)
2193 if (p_req
->tun_mode_update_mask
& BIT(mask
)) {
2194 p_tun
->b_update_mode
= true;
2196 if (p_req
->tunn_mode
& BIT(mask
))
2197 p_tun
->b_mode_enabled
= true;
2200 p_tun
->tun_cls
= tun_cls
;
2204 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2205 struct qed_tunn_update_type
*p_tun
,
2206 struct qed_tunn_update_udp_port
*p_port
,
2207 enum qed_tunn_mode mask
,
2208 u8 tun_cls
, u8 update_port
, u16 port
)
2211 p_port
->b_update_port
= true;
2212 p_port
->port
= port
;
2215 __qed_iov_pf_update_tun_param(p_req
, p_tun
, mask
, tun_cls
);
2219 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv
*p_req
)
2221 bool b_update_requested
= false;
2223 if (p_req
->tun_mode_update_mask
|| p_req
->update_tun_cls
||
2224 p_req
->update_geneve_port
|| p_req
->update_vxlan_port
)
2225 b_update_requested
= true;
2227 return b_update_requested
;
2230 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type
*tun
, int *rc
)
2232 if (tun
->b_update_mode
&& !tun
->b_mode_enabled
) {
2233 tun
->b_update_mode
= false;
2239 qed_pf_validate_modify_tunn_config(struct qed_hwfn
*p_hwfn
,
2240 u16
*tun_features
, bool *update
,
2241 struct qed_tunnel_info
*tun_src
)
2243 struct qed_eth_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.eth
;
2244 struct qed_tunnel_info
*tun
= &p_hwfn
->cdev
->tunnel
;
2245 u16 bultn_vxlan_port
, bultn_geneve_port
;
2246 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2249 *tun_features
= p_hwfn
->cdev
->tunn_feature_mask
;
2250 bultn_vxlan_port
= tun
->vxlan_port
.port
;
2251 bultn_geneve_port
= tun
->geneve_port
.port
;
2252 qed_pf_validate_tunn_mode(&tun_src
->vxlan
, &rc
);
2253 qed_pf_validate_tunn_mode(&tun_src
->l2_geneve
, &rc
);
2254 qed_pf_validate_tunn_mode(&tun_src
->ip_geneve
, &rc
);
2255 qed_pf_validate_tunn_mode(&tun_src
->l2_gre
, &rc
);
2256 qed_pf_validate_tunn_mode(&tun_src
->ip_gre
, &rc
);
2258 if ((tun_src
->b_update_rx_cls
|| tun_src
->b_update_tx_cls
) &&
2259 (tun_src
->vxlan
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2260 tun_src
->l2_geneve
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2261 tun_src
->ip_geneve
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2262 tun_src
->l2_gre
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2263 tun_src
->ip_gre
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
)) {
2264 tun_src
->b_update_rx_cls
= false;
2265 tun_src
->b_update_tx_cls
= false;
2269 if (tun_src
->vxlan_port
.b_update_port
) {
2270 if (tun_src
->vxlan_port
.port
== tun
->vxlan_port
.port
) {
2271 tun_src
->vxlan_port
.b_update_port
= false;
2274 bultn_vxlan_port
= tun_src
->vxlan_port
.port
;
2278 if (tun_src
->geneve_port
.b_update_port
) {
2279 if (tun_src
->geneve_port
.port
== tun
->geneve_port
.port
) {
2280 tun_src
->geneve_port
.b_update_port
= false;
2283 bultn_geneve_port
= tun_src
->geneve_port
.port
;
2287 qed_for_each_vf(p_hwfn
, i
) {
2288 qed_iov_bulletin_set_udp_ports(p_hwfn
, i
, bultn_vxlan_port
,
2292 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
2293 ops
->ports_update(cookie
, bultn_vxlan_port
, bultn_geneve_port
);
2298 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn
*p_hwfn
,
2299 struct qed_ptt
*p_ptt
,
2300 struct qed_vf_info
*p_vf
)
2302 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
2303 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2304 struct pfvf_update_tunn_param_tlv
*p_resp
;
2305 struct vfpf_update_tunn_param_tlv
*p_req
;
2306 u8 status
= PFVF_STATUS_SUCCESS
;
2307 bool b_update_required
= false;
2308 struct qed_tunnel_info tunn
;
2309 u16 tunn_feature_mask
= 0;
2312 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2314 memset(&tunn
, 0, sizeof(tunn
));
2315 p_req
= &mbx
->req_virt
->tunn_param_update
;
2317 if (!qed_iov_pf_validate_tunn_param(p_req
)) {
2318 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2319 "No tunnel update requested by VF\n");
2320 status
= PFVF_STATUS_FAILURE
;
2324 tunn
.b_update_rx_cls
= p_req
->update_tun_cls
;
2325 tunn
.b_update_tx_cls
= p_req
->update_tun_cls
;
2327 qed_iov_pf_update_tun_param(p_req
, &tunn
.vxlan
, &tunn
.vxlan_port
,
2328 QED_MODE_VXLAN_TUNN
, p_req
->vxlan_clss
,
2329 p_req
->update_vxlan_port
,
2331 qed_iov_pf_update_tun_param(p_req
, &tunn
.l2_geneve
, &tunn
.geneve_port
,
2332 QED_MODE_L2GENEVE_TUNN
,
2333 p_req
->l2geneve_clss
,
2334 p_req
->update_geneve_port
,
2335 p_req
->geneve_port
);
2336 __qed_iov_pf_update_tun_param(p_req
, &tunn
.ip_geneve
,
2337 QED_MODE_IPGENEVE_TUNN
,
2338 p_req
->ipgeneve_clss
);
2339 __qed_iov_pf_update_tun_param(p_req
, &tunn
.l2_gre
,
2340 QED_MODE_L2GRE_TUNN
, p_req
->l2gre_clss
);
2341 __qed_iov_pf_update_tun_param(p_req
, &tunn
.ip_gre
,
2342 QED_MODE_IPGRE_TUNN
, p_req
->ipgre_clss
);
2344 /* If PF modifies VF's req then it should
2345 * still return an error in case of partial configuration
2346 * or modified configuration as opposed to requested one.
2348 rc
= qed_pf_validate_modify_tunn_config(p_hwfn
, &tunn_feature_mask
,
2349 &b_update_required
, &tunn
);
2352 status
= PFVF_STATUS_FAILURE
;
2354 /* If QED client is willing to update anything ? */
2355 if (b_update_required
) {
2358 rc
= qed_sp_pf_update_tunn_cfg(p_hwfn
, p_ptt
, &tunn
,
2359 QED_SPQ_MODE_EBLOCK
, NULL
);
2361 status
= PFVF_STATUS_FAILURE
;
2363 geneve_port
= p_tun
->geneve_port
.port
;
2364 qed_for_each_vf(p_hwfn
, i
) {
2365 qed_iov_bulletin_set_udp_ports(p_hwfn
, i
,
2366 p_tun
->vxlan_port
.port
,
2372 p_resp
= qed_add_tlv(p_hwfn
, &mbx
->offset
,
2373 CHANNEL_TLV_UPDATE_TUNN_PARAM
, sizeof(*p_resp
));
2375 qed_iov_pf_update_tun_response(p_resp
, p_tun
, tunn_feature_mask
);
2376 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2377 sizeof(struct channel_list_end_tlv
));
2379 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
2382 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn
*p_hwfn
,
2383 struct qed_ptt
*p_ptt
,
2384 struct qed_vf_info
*p_vf
,
2387 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2388 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2389 bool b_legacy
= false;
2392 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2394 /* Taking a bigger struct instead of adding a TLV to list was a
2395 * mistake, but one which we're now stuck with, as some older
2396 * clients assume the size of the previous response.
2398 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
2399 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
2403 length
= sizeof(*p_tlv
);
2405 length
= sizeof(struct pfvf_def_resp_tlv
);
2407 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_TXQ
,
2409 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2410 sizeof(struct channel_list_end_tlv
));
2412 /* Update the TLV with the response */
2413 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
)
2414 p_tlv
->offset
= qed_db_addr_vf(cid
, DQ_DEMS_LEGACY
);
2416 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, length
, status
);
2419 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
2420 struct qed_ptt
*p_ptt
,
2421 struct qed_vf_info
*vf
)
2423 struct qed_queue_start_common_params params
;
2424 struct qed_queue_cid_vf_params vf_params
;
2425 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2426 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2427 struct vfpf_start_txq_tlv
*req
;
2428 struct qed_vf_queue
*p_queue
;
2429 struct qed_queue_cid
*p_cid
;
2430 struct qed_sb_info sb_dummy
;
2431 u8 qid_usage_idx
, vf_legacy
;
2436 memset(¶ms
, 0, sizeof(params
));
2437 req
= &mbx
->req_virt
->start_txq
;
2439 if (!qed_iov_validate_txq(p_hwfn
, vf
, req
->tx_qid
,
2440 QED_IOV_VALIDATE_Q_NA
) ||
2441 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2444 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2445 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2448 p_queue
= &vf
->vf_queues
[req
->tx_qid
];
2449 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2452 vf_legacy
= qed_vf_calculate_legacy(vf
);
2454 /* Acquire a new queue-cid */
2455 params
.queue_id
= p_queue
->fw_tx_qid
;
2456 params
.vport_id
= vf
->vport_id
;
2457 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2459 /* Since IGU index is passed via sb_info, construct a dummy one */
2460 memset(&sb_dummy
, 0, sizeof(sb_dummy
));
2461 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2462 params
.p_sb
= &sb_dummy
;
2463 params
.sb_idx
= req
->sb_index
;
2465 memset(&vf_params
, 0, sizeof(vf_params
));
2466 vf_params
.vfid
= vf
->relative_vf_id
;
2467 vf_params
.vf_qid
= (u8
)req
->tx_qid
;
2468 vf_params
.vf_legacy
= vf_legacy
;
2469 vf_params
.qid_usage_idx
= qid_usage_idx
;
2471 p_cid
= qed_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2472 ¶ms
, false, &vf_params
);
2476 pq
= qed_get_cm_pq_idx_vf(p_hwfn
, vf
->relative_vf_id
);
2477 rc
= qed_eth_txq_start_ramrod(p_hwfn
, p_cid
,
2478 req
->pbl_addr
, req
->pbl_size
, pq
);
2480 status
= PFVF_STATUS_FAILURE
;
2481 qed_eth_queue_cid_release(p_hwfn
, p_cid
);
2483 status
= PFVF_STATUS_SUCCESS
;
2484 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2485 p_queue
->cids
[qid_usage_idx
].b_is_tx
= true;
2490 qed_iov_vf_mbx_start_txq_resp(p_hwfn
, p_ptt
, vf
, cid
, status
);
2493 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2494 struct qed_vf_info
*vf
,
2496 u8 qid_usage_idx
, bool cqe_completion
)
2498 struct qed_vf_queue
*p_queue
;
2501 if (!qed_iov_validate_rxq(p_hwfn
, vf
, rxq_id
, QED_IOV_VALIDATE_Q_NA
)) {
2504 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2505 vf
->relative_vf_id
, rxq_id
, qid_usage_idx
);
2509 p_queue
= &vf
->vf_queues
[rxq_id
];
2511 /* We've validated the index and the existence of the active RXQ -
2512 * now we need to make sure that it's using the correct qid.
2514 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2515 p_queue
->cids
[qid_usage_idx
].b_is_tx
) {
2516 struct qed_queue_cid
*p_cid
;
2518 p_cid
= qed_iov_get_vf_rx_queue_cid(p_queue
);
2521 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2523 rxq_id
, qid_usage_idx
, rxq_id
, p_cid
->qid_usage_idx
);
2527 /* Now that we know we have a valid Rx-queue - close it */
2528 rc
= qed_eth_rx_queue_stop(p_hwfn
,
2529 p_queue
->cids
[qid_usage_idx
].p_cid
,
2530 false, cqe_completion
);
2534 p_queue
->cids
[qid_usage_idx
].p_cid
= NULL
;
2535 vf
->num_active_rxqs
--;
2540 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
2541 struct qed_vf_info
*vf
,
2542 u16 txq_id
, u8 qid_usage_idx
)
2544 struct qed_vf_queue
*p_queue
;
2547 if (!qed_iov_validate_txq(p_hwfn
, vf
, txq_id
, QED_IOV_VALIDATE_Q_NA
))
2550 p_queue
= &vf
->vf_queues
[txq_id
];
2551 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2552 !p_queue
->cids
[qid_usage_idx
].b_is_tx
)
2555 rc
= qed_eth_tx_queue_stop(p_hwfn
, p_queue
->cids
[qid_usage_idx
].p_cid
);
2559 p_queue
->cids
[qid_usage_idx
].p_cid
= NULL
;
2563 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2564 struct qed_ptt
*p_ptt
,
2565 struct qed_vf_info
*vf
)
2567 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2568 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2569 u8 status
= PFVF_STATUS_FAILURE
;
2570 struct vfpf_stop_rxqs_tlv
*req
;
2574 /* There has never been an official driver that used this interface
2575 * for stopping multiple queues, and it is now considered deprecated.
2576 * Validate this isn't used here.
2578 req
= &mbx
->req_virt
->stop_rxqs
;
2579 if (req
->num_rxqs
!= 1) {
2580 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2581 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2582 vf
->relative_vf_id
);
2583 status
= PFVF_STATUS_NOT_SUPPORTED
;
2587 /* Find which qid-index is associated with the queue */
2588 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2589 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2592 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
2593 qid_usage_idx
, req
->cqe_completion
);
2595 status
= PFVF_STATUS_SUCCESS
;
2597 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
2601 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
2602 struct qed_ptt
*p_ptt
,
2603 struct qed_vf_info
*vf
)
2605 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2606 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2607 u8 status
= PFVF_STATUS_FAILURE
;
2608 struct vfpf_stop_txqs_tlv
*req
;
2612 /* There has never been an official driver that used this interface
2613 * for stopping multiple queues, and it is now considered deprecated.
2614 * Validate this isn't used here.
2616 req
= &mbx
->req_virt
->stop_txqs
;
2617 if (req
->num_txqs
!= 1) {
2618 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2619 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2620 vf
->relative_vf_id
);
2621 status
= PFVF_STATUS_NOT_SUPPORTED
;
2625 /* Find which qid-index is associated with the queue */
2626 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2627 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2630 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, qid_usage_idx
);
2632 status
= PFVF_STATUS_SUCCESS
;
2635 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
2639 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
2640 struct qed_ptt
*p_ptt
,
2641 struct qed_vf_info
*vf
)
2643 struct qed_queue_cid
*handlers
[QED_MAX_VF_CHAINS_PER_PF
];
2644 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2645 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2646 struct vfpf_update_rxq_tlv
*req
;
2647 u8 status
= PFVF_STATUS_FAILURE
;
2648 u8 complete_event_flg
;
2649 u8 complete_cqe_flg
;
2654 req
= &mbx
->req_virt
->update_rxq
;
2655 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
2656 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
2658 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2659 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2662 /* There shouldn't exist a VF that uses queue-qids yet uses this
2663 * API with multiple Rx queues. Validate this.
2665 if ((vf
->acquire
.vfdev_info
.capabilities
&
2666 VFPF_ACQUIRE_CAP_QUEUE_QIDS
) && req
->num_rxqs
!= 1) {
2667 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2668 "VF[%d] supports QIDs but sends multiple queues\n",
2669 vf
->relative_vf_id
);
2673 /* Validate inputs - for the legacy case this is still true since
2674 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2676 for (i
= req
->rx_qid
; i
< req
->rx_qid
+ req
->num_rxqs
; i
++) {
2677 if (!qed_iov_validate_rxq(p_hwfn
, vf
, i
,
2678 QED_IOV_VALIDATE_Q_NA
) ||
2679 !vf
->vf_queues
[i
].cids
[qid_usage_idx
].p_cid
||
2680 vf
->vf_queues
[i
].cids
[qid_usage_idx
].b_is_tx
) {
2681 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2682 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2683 vf
->relative_vf_id
, req
->rx_qid
,
2689 /* Prepare the handlers */
2690 for (i
= 0; i
< req
->num_rxqs
; i
++) {
2691 u16 qid
= req
->rx_qid
+ i
;
2693 handlers
[i
] = vf
->vf_queues
[qid
].cids
[qid_usage_idx
].p_cid
;
2696 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, (void **)&handlers
,
2700 QED_SPQ_MODE_EBLOCK
, NULL
);
2704 status
= PFVF_STATUS_SUCCESS
;
2706 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
2710 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
2711 void *p_tlvs_list
, u16 req_type
)
2713 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
2717 if (!p_tlv
->length
) {
2718 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
2722 if (p_tlv
->type
== req_type
) {
2723 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2724 "Extended tlv type %d, length %d found\n",
2725 p_tlv
->type
, p_tlv
->length
);
2729 len
+= p_tlv
->length
;
2730 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
2732 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
2733 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
2736 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
2742 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
2743 struct qed_sp_vport_update_params
*p_data
,
2744 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2746 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
2747 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
2749 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
2750 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2754 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
2755 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
2756 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
2757 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
2758 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
2762 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
2763 struct qed_sp_vport_update_params
*p_data
,
2764 struct qed_vf_info
*p_vf
,
2765 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2767 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
2768 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
2770 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
2771 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2775 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
2777 /* Ignore the VF request if we're forcing a vlan */
2778 if (!(p_vf
->configured_features
& BIT(VLAN_ADDR_FORCED
))) {
2779 p_data
->update_inner_vlan_removal_flg
= 1;
2780 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
2783 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
2787 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
2788 struct qed_sp_vport_update_params
*p_data
,
2789 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2791 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
2792 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
2794 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
2795 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2797 if (!p_tx_switch_tlv
)
2800 p_data
->update_tx_switching_flg
= 1;
2801 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
2802 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
2806 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
2807 struct qed_sp_vport_update_params
*p_data
,
2808 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2810 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
2811 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
2813 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
2814 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2818 p_data
->update_approx_mcast_flg
= 1;
2819 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
2820 sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
2821 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
2825 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
2826 struct qed_sp_vport_update_params
*p_data
,
2827 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2829 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
2830 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
2831 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
2833 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
2834 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2838 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
2839 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
2840 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
2841 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
2842 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
2846 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
2847 struct qed_sp_vport_update_params
*p_data
,
2848 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2850 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
2851 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
2853 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
2854 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2856 if (!p_accept_any_vlan
)
2859 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
2860 p_data
->update_accept_any_vlan_flg
=
2861 p_accept_any_vlan
->update_accept_any_vlan_flg
;
2862 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
2866 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
2867 struct qed_vf_info
*vf
,
2868 struct qed_sp_vport_update_params
*p_data
,
2869 struct qed_rss_params
*p_rss
,
2870 struct qed_iov_vf_mbx
*p_mbx
,
2871 u16
*tlvs_mask
, u16
*tlvs_accepted
)
2873 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2874 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2875 bool b_reject
= false;
2879 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2880 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2882 p_data
->rss_params
= NULL
;
2886 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2888 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2889 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2890 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2891 VFPF_UPDATE_RSS_CAPS_FLAG
);
2892 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2893 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2894 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2895 VFPF_UPDATE_RSS_KEY_FLAG
);
2897 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2898 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2899 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2900 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2901 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2903 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2904 (1 << p_rss_tlv
->rss_table_size_log
));
2906 for (i
= 0; i
< table_size
; i
++) {
2907 struct qed_queue_cid
*p_cid
;
2909 q_idx
= p_rss_tlv
->rss_ind_table
[i
];
2910 if (!qed_iov_validate_rxq(p_hwfn
, vf
, q_idx
,
2911 QED_IOV_VALIDATE_Q_ENABLE
)) {
2914 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2915 vf
->relative_vf_id
, q_idx
);
2920 p_cid
= qed_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[q_idx
]);
2921 p_rss
->rss_ind_table
[i
] = p_cid
;
2924 p_data
->rss_params
= p_rss
;
2926 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2928 *tlvs_accepted
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2932 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2933 struct qed_vf_info
*vf
,
2934 struct qed_sp_vport_update_params
*p_data
,
2935 struct qed_sge_tpa_params
*p_sge_tpa
,
2936 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2938 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2939 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2941 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2942 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2944 if (!p_sge_tpa_tlv
) {
2945 p_data
->sge_tpa_params
= NULL
;
2949 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2951 p_sge_tpa
->update_tpa_en_flg
=
2952 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2953 p_sge_tpa
->update_tpa_param_flg
=
2954 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2955 VFPF_UPDATE_TPA_PARAM_FLAG
);
2957 p_sge_tpa
->tpa_ipv4_en_flg
=
2958 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2959 p_sge_tpa
->tpa_ipv6_en_flg
=
2960 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2961 p_sge_tpa
->tpa_pkt_split_flg
=
2962 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2963 p_sge_tpa
->tpa_hdr_data_split_flg
=
2964 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2965 p_sge_tpa
->tpa_gro_consistent_flg
=
2966 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2968 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2969 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2970 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2971 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2972 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2974 p_data
->sge_tpa_params
= p_sge_tpa
;
2976 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2979 static int qed_iov_pre_update_vport(struct qed_hwfn
*hwfn
,
2981 struct qed_sp_vport_update_params
*params
,
2984 u8 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
2985 struct qed_filter_accept_flags
*flags
= ¶ms
->accept_flags
;
2986 struct qed_public_vf_info
*vf_info
;
2989 tlv_mask
= BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM
) |
2990 BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
);
2992 /* Untrusted VFs can't even be trusted to know that fact.
2993 * Simply indicate everything is configured fine, and trace
2994 * configuration 'behind their back'.
2996 if (!(*tlvs
& tlv_mask
))
2999 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3001 if (flags
->update_rx_mode_config
) {
3002 vf_info
->rx_accept_mode
= flags
->rx_accept_filter
;
3003 if (!vf_info
->is_trusted_configured
)
3004 flags
->rx_accept_filter
&= ~mask
;
3007 if (flags
->update_tx_mode_config
) {
3008 vf_info
->tx_accept_mode
= flags
->tx_accept_filter
;
3009 if (!vf_info
->is_trusted_configured
)
3010 flags
->tx_accept_filter
&= ~mask
;
3013 if (params
->update_accept_any_vlan_flg
) {
3014 vf_info
->accept_any_vlan
= params
->accept_any_vlan
;
3016 if (vf_info
->forced_vlan
&& !vf_info
->is_trusted_configured
)
3017 params
->accept_any_vlan
= false;
3023 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
3024 struct qed_ptt
*p_ptt
,
3025 struct qed_vf_info
*vf
)
3027 struct qed_rss_params
*p_rss_params
= NULL
;
3028 struct qed_sp_vport_update_params params
;
3029 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3030 struct qed_sge_tpa_params sge_tpa_params
;
3031 u16 tlvs_mask
= 0, tlvs_accepted
= 0;
3032 u8 status
= PFVF_STATUS_SUCCESS
;
3036 /* Valiate PF can send such a request */
3037 if (!vf
->vport_instance
) {
3040 "No VPORT instance available for VF[%d], failing vport update\n",
3042 status
= PFVF_STATUS_FAILURE
;
3045 p_rss_params
= vzalloc(sizeof(*p_rss_params
));
3046 if (!p_rss_params
) {
3047 status
= PFVF_STATUS_FAILURE
;
3051 memset(¶ms
, 0, sizeof(params
));
3052 params
.opaque_fid
= vf
->opaque_fid
;
3053 params
.vport_id
= vf
->vport_id
;
3054 params
.rss_params
= NULL
;
3056 /* Search for extended tlvs list and update values
3057 * from VF in struct qed_sp_vport_update_params.
3059 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3060 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
3061 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3062 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3063 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3064 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3065 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
3066 &sge_tpa_params
, mbx
, &tlvs_mask
);
3068 tlvs_accepted
= tlvs_mask
;
3070 /* Some of the extended TLVs need to be validated first; In that case,
3071 * they can update the mask without updating the accepted [so that
3072 * PF could communicate to VF it has rejected request].
3074 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, p_rss_params
,
3075 mbx
, &tlvs_mask
, &tlvs_accepted
);
3077 if (qed_iov_pre_update_vport(p_hwfn
, vf
->relative_vf_id
,
3078 ¶ms
, &tlvs_accepted
)) {
3080 status
= PFVF_STATUS_NOT_SUPPORTED
;
3084 if (!tlvs_accepted
) {
3086 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3087 "Upper-layer prevents VF vport configuration\n");
3089 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3090 "No feature tlvs found for vport update\n");
3091 status
= PFVF_STATUS_NOT_SUPPORTED
;
3095 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
3098 status
= PFVF_STATUS_FAILURE
;
3101 vfree(p_rss_params
);
3102 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
3103 tlvs_mask
, tlvs_accepted
);
3104 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
3107 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn
*p_hwfn
,
3108 struct qed_vf_info
*p_vf
,
3109 struct qed_filter_ucast
*p_params
)
3113 /* First remove entries and then add new ones */
3114 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
3115 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3116 if (p_vf
->shadow_config
.vlans
[i
].used
&&
3117 p_vf
->shadow_config
.vlans
[i
].vid
==
3119 p_vf
->shadow_config
.vlans
[i
].used
= false;
3122 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3125 "VF [%d] - Tries to remove a non-existing vlan\n",
3126 p_vf
->relative_vf_id
);
3129 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
3130 p_params
->opcode
== QED_FILTER_FLUSH
) {
3131 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3132 p_vf
->shadow_config
.vlans
[i
].used
= false;
3135 /* In forced mode, we're willing to remove entries - but we don't add
3138 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
))
3141 if (p_params
->opcode
== QED_FILTER_ADD
||
3142 p_params
->opcode
== QED_FILTER_REPLACE
) {
3143 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
3144 if (p_vf
->shadow_config
.vlans
[i
].used
)
3147 p_vf
->shadow_config
.vlans
[i
].used
= true;
3148 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
3152 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3155 "VF [%d] - Tries to configure more than %d vlan filters\n",
3156 p_vf
->relative_vf_id
,
3157 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
3165 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn
*p_hwfn
,
3166 struct qed_vf_info
*p_vf
,
3167 struct qed_filter_ucast
*p_params
)
3171 /* If we're in forced-mode, we don't allow any change */
3172 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
))
3175 /* Don't keep track of shadow copy since we don't intend to restore. */
3176 if (p_vf
->p_vf_info
.is_trusted_configured
)
3179 /* First remove entries and then add new ones */
3180 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
3181 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3182 if (ether_addr_equal(p_vf
->shadow_config
.macs
[i
],
3184 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
3189 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
3190 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3191 "MAC isn't configured\n");
3194 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
3195 p_params
->opcode
== QED_FILTER_FLUSH
) {
3196 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++)
3197 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
3200 /* List the new MAC address */
3201 if (p_params
->opcode
!= QED_FILTER_ADD
&&
3202 p_params
->opcode
!= QED_FILTER_REPLACE
)
3205 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3206 if (is_zero_ether_addr(p_vf
->shadow_config
.macs
[i
])) {
3207 ether_addr_copy(p_vf
->shadow_config
.macs
[i
],
3209 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3210 "Added MAC at %d entry in shadow\n", i
);
3215 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
3216 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No available place for MAC\n");
3224 qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
3225 struct qed_vf_info
*p_vf
,
3226 struct qed_filter_ucast
*p_params
)
3230 if (p_params
->type
== QED_FILTER_MAC
) {
3231 rc
= qed_iov_vf_update_mac_shadow(p_hwfn
, p_vf
, p_params
);
3236 if (p_params
->type
== QED_FILTER_VLAN
)
3237 rc
= qed_iov_vf_update_vlan_shadow(p_hwfn
, p_vf
, p_params
);
3242 static int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
3243 int vfid
, struct qed_filter_ucast
*params
)
3245 struct qed_public_vf_info
*vf
;
3247 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3251 /* No real decision to make; Store the configured MAC */
3252 if (params
->type
== QED_FILTER_MAC
||
3253 params
->type
== QED_FILTER_MAC_VLAN
) {
3254 ether_addr_copy(vf
->mac
, params
->mac
);
3256 if (vf
->is_trusted_configured
) {
3257 qed_iov_bulletin_set_mac(hwfn
, vf
->mac
, vfid
);
3259 /* Update and post bulleitin again */
3260 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3267 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
3268 struct qed_ptt
*p_ptt
,
3269 struct qed_vf_info
*vf
)
3271 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
3272 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3273 struct vfpf_ucast_filter_tlv
*req
;
3274 u8 status
= PFVF_STATUS_SUCCESS
;
3275 struct qed_filter_ucast params
;
3278 /* Prepare the unicast filter params */
3279 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
3280 req
= &mbx
->req_virt
->ucast_filter
;
3281 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
3282 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
3284 params
.is_rx_filter
= 1;
3285 params
.is_tx_filter
= 1;
3286 params
.vport_to_remove_from
= vf
->vport_id
;
3287 params
.vport_to_add_to
= vf
->vport_id
;
3288 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
3289 params
.vlan
= req
->vlan
;
3293 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n",
3294 vf
->abs_vf_id
, params
.opcode
, params
.type
,
3295 params
.is_rx_filter
? "RX" : "",
3296 params
.is_tx_filter
? "TX" : "",
3297 params
.vport_to_add_to
,
3298 params
.mac
, params
.vlan
);
3300 if (!vf
->vport_instance
) {
3303 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3305 status
= PFVF_STATUS_FAILURE
;
3309 /* Update shadow copy of the VF configuration */
3310 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
3311 status
= PFVF_STATUS_FAILURE
;
3315 /* Determine if the unicast filtering is acceptible by PF */
3316 if ((p_bulletin
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)) &&
3317 (params
.type
== QED_FILTER_VLAN
||
3318 params
.type
== QED_FILTER_MAC_VLAN
)) {
3319 /* Once VLAN is forced or PVID is set, do not allow
3320 * to add/replace any further VLANs.
3322 if (params
.opcode
== QED_FILTER_ADD
||
3323 params
.opcode
== QED_FILTER_REPLACE
)
3324 status
= PFVF_STATUS_FORCED
;
3328 if ((p_bulletin
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)) &&
3329 (params
.type
== QED_FILTER_MAC
||
3330 params
.type
== QED_FILTER_MAC_VLAN
)) {
3331 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
3332 (params
.opcode
!= QED_FILTER_ADD
&&
3333 params
.opcode
!= QED_FILTER_REPLACE
))
3334 status
= PFVF_STATUS_FORCED
;
3338 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
3340 status
= PFVF_STATUS_FAILURE
;
3344 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
3345 QED_SPQ_MODE_CB
, NULL
);
3347 status
= PFVF_STATUS_FAILURE
;
3350 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
3351 sizeof(struct pfvf_def_resp_tlv
), status
);
3354 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
3355 struct qed_ptt
*p_ptt
,
3356 struct qed_vf_info
*vf
)
3361 for (i
= 0; i
< vf
->num_sbs
; i
++)
3362 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
3364 vf
->opaque_fid
, false);
3366 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
3367 sizeof(struct pfvf_def_resp_tlv
),
3368 PFVF_STATUS_SUCCESS
);
3371 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
3372 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
3374 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3375 u8 status
= PFVF_STATUS_SUCCESS
;
3377 /* Disable Interrupts for VF */
3378 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
3380 /* Reset Permission table */
3381 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
3383 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
3387 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
3388 struct qed_ptt
*p_ptt
,
3389 struct qed_vf_info
*p_vf
)
3391 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3392 u8 status
= PFVF_STATUS_SUCCESS
;
3395 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
3397 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
3398 /* Stopping the VF */
3399 rc
= qed_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
3403 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
3405 status
= PFVF_STATUS_FAILURE
;
3408 p_vf
->state
= VF_STOPPED
;
3411 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
3415 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn
*p_hwfn
,
3416 struct qed_ptt
*p_ptt
,
3417 struct qed_vf_info
*p_vf
)
3419 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
3420 struct pfvf_read_coal_resp_tlv
*p_resp
;
3421 struct vfpf_read_coal_req_tlv
*req
;
3422 u8 status
= PFVF_STATUS_FAILURE
;
3423 struct qed_vf_queue
*p_queue
;
3424 struct qed_queue_cid
*p_cid
;
3425 u16 coal
= 0, qid
, i
;
3429 mbx
->offset
= (u8
*)mbx
->reply_virt
;
3430 req
= &mbx
->req_virt
->read_coal_req
;
3433 b_is_rx
= req
->is_rx
? true : false;
3436 if (!qed_iov_validate_rxq(p_hwfn
, p_vf
, qid
,
3437 QED_IOV_VALIDATE_Q_ENABLE
)) {
3438 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3439 "VF[%d]: Invalid Rx queue_id = %d\n",
3440 p_vf
->abs_vf_id
, qid
);
3444 p_cid
= qed_iov_get_vf_rx_queue_cid(&p_vf
->vf_queues
[qid
]);
3445 rc
= qed_get_rxq_coalesce(p_hwfn
, p_ptt
, p_cid
, &coal
);
3449 if (!qed_iov_validate_txq(p_hwfn
, p_vf
, qid
,
3450 QED_IOV_VALIDATE_Q_ENABLE
)) {
3451 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3452 "VF[%d]: Invalid Tx queue_id = %d\n",
3453 p_vf
->abs_vf_id
, qid
);
3456 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3457 p_queue
= &p_vf
->vf_queues
[qid
];
3458 if ((!p_queue
->cids
[i
].p_cid
) ||
3459 (!p_queue
->cids
[i
].b_is_tx
))
3462 p_cid
= p_queue
->cids
[i
].p_cid
;
3464 rc
= qed_get_txq_coalesce(p_hwfn
, p_ptt
, p_cid
, &coal
);
3471 status
= PFVF_STATUS_SUCCESS
;
3474 p_resp
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_COALESCE_READ
,
3476 p_resp
->coal
= coal
;
3478 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
3479 sizeof(struct channel_list_end_tlv
));
3481 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
3484 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn
*p_hwfn
,
3485 struct qed_ptt
*p_ptt
,
3486 struct qed_vf_info
*vf
)
3488 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3489 struct vfpf_update_coalesce
*req
;
3490 u8 status
= PFVF_STATUS_FAILURE
;
3491 struct qed_queue_cid
*p_cid
;
3492 u16 rx_coal
, tx_coal
;
3496 req
= &mbx
->req_virt
->update_coalesce
;
3498 rx_coal
= req
->rx_coal
;
3499 tx_coal
= req
->tx_coal
;
3502 if (!qed_iov_validate_rxq(p_hwfn
, vf
, qid
,
3503 QED_IOV_VALIDATE_Q_ENABLE
) && rx_coal
) {
3504 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3505 "VF[%d]: Invalid Rx queue_id = %d\n",
3506 vf
->abs_vf_id
, qid
);
3510 if (!qed_iov_validate_txq(p_hwfn
, vf
, qid
,
3511 QED_IOV_VALIDATE_Q_ENABLE
) && tx_coal
) {
3512 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3513 "VF[%d]: Invalid Tx queue_id = %d\n",
3514 vf
->abs_vf_id
, qid
);
3520 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3521 vf
->abs_vf_id
, rx_coal
, tx_coal
, qid
);
3524 p_cid
= qed_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[qid
]);
3526 rc
= qed_set_rxq_coalesce(p_hwfn
, p_ptt
, rx_coal
, p_cid
);
3530 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3531 vf
->abs_vf_id
, vf
->vf_queues
[qid
].fw_rx_qid
);
3534 vf
->rx_coal
= rx_coal
;
3538 struct qed_vf_queue
*p_queue
= &vf
->vf_queues
[qid
];
3540 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3541 if (!p_queue
->cids
[i
].p_cid
)
3544 if (!p_queue
->cids
[i
].b_is_tx
)
3547 rc
= qed_set_txq_coalesce(p_hwfn
, p_ptt
, tx_coal
,
3548 p_queue
->cids
[i
].p_cid
);
3553 "VF[%d]: Unable to set tx queue coalesce\n",
3558 vf
->tx_coal
= tx_coal
;
3561 status
= PFVF_STATUS_SUCCESS
;
3563 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_COALESCE_UPDATE
,
3564 sizeof(struct pfvf_def_resp_tlv
), status
);
3568 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
3569 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3574 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_vf
->concrete_fid
);
3576 for (cnt
= 0; cnt
< 50; cnt
++) {
3577 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
3582 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
)p_hwfn
->hw_info
.concrete_fid
);
3586 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3587 p_vf
->abs_vf_id
, val
);
3594 #define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
3597 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
3598 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3600 u32 prod
, cons
[MAX_NUM_EXT_VOQS
], distance
[MAX_NUM_EXT_VOQS
], tmp
;
3601 u8 max_phys_tcs_per_port
= p_hwfn
->qm_info
.max_phys_tcs_per_port
;
3602 u8 max_ports_per_engine
= p_hwfn
->cdev
->num_ports_in_engine
;
3603 u32 prod_voq0_addr
= PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
;
3604 u32 cons_voq0_addr
= PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
;
3605 u8 port_id
, tc
, tc_id
= 0, voq
= 0;
3608 memset(cons
, 0, MAX_NUM_EXT_VOQS
* sizeof(u32
));
3609 memset(distance
, 0, MAX_NUM_EXT_VOQS
* sizeof(u32
));
3611 /* Read initial consumers & producers */
3612 for (port_id
= 0; port_id
< max_ports_per_engine
; port_id
++) {
3613 /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
3614 for (tc
= 0; tc
< max_phys_tcs_per_port
+ 1; tc
++) {
3615 tc_id
= (tc
< max_phys_tcs_per_port
) ? tc
: PURE_LB_TC
;
3616 voq
= VOQ(port_id
, tc_id
, max_phys_tcs_per_port
);
3617 cons
[voq
] = qed_rd(p_hwfn
, p_ptt
,
3618 cons_voq0_addr
+ voq
* 0x40);
3619 prod
= qed_rd(p_hwfn
, p_ptt
,
3620 prod_voq0_addr
+ voq
* 0x40);
3621 distance
[voq
] = prod
- cons
[voq
];
3625 /* Wait for consumers to pass the producers */
3628 for (cnt
= 0; cnt
< 50; cnt
++) {
3629 for (; port_id
< max_ports_per_engine
; port_id
++) {
3630 /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
3631 for (; tc
< max_phys_tcs_per_port
+ 1; tc
++) {
3632 tc_id
= (tc
< max_phys_tcs_per_port
) ?
3635 tc_id
, max_phys_tcs_per_port
);
3636 tmp
= qed_rd(p_hwfn
, p_ptt
,
3637 cons_voq0_addr
+ voq
* 0x40);
3638 if (distance
[voq
] > tmp
- cons
[voq
])
3642 if (tc
== max_phys_tcs_per_port
+ 1)
3648 if (port_id
== max_ports_per_engine
)
3655 DP_ERR(p_hwfn
, "VF[%d]: pbf poll failed on VOQ%d\n",
3656 p_vf
->abs_vf_id
, (int)voq
);
3658 DP_ERR(p_hwfn
, "VOQ %d has port_id as %d and tc_id as %d]\n",
3659 (int)voq
, (int)port_id
, (int)tc_id
);
3667 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
3668 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3672 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
3676 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
3684 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
3685 struct qed_ptt
*p_ptt
,
3686 u16 rel_vf_id
, u32
*ack_vfs
)
3688 struct qed_vf_info
*p_vf
;
3691 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
3695 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
3696 (1ULL << (rel_vf_id
% 64))) {
3697 u16 vfid
= p_vf
->abs_vf_id
;
3699 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3700 "VF[%d] - Handling FLR\n", vfid
);
3702 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
3704 /* If VF isn't active, no need for anything but SW */
3708 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
3712 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
3714 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
3718 /* Workaround to make VF-PF channel ready, as FW
3719 * doesn't do that as a part of FLR.
3722 GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM
,
3723 USTORM_VF_PF_CHANNEL_READY
, vfid
), 1);
3725 /* VF_STOPPED has to be set only after final cleanup
3726 * but prior to re-enabling the VF.
3728 p_vf
->state
= VF_STOPPED
;
3730 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
3732 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
3737 /* Mark VF for ack and clean pending state */
3738 if (p_vf
->state
== VF_RESET
)
3739 p_vf
->state
= VF_STOPPED
;
3740 ack_vfs
[vfid
/ 32] |= BIT((vfid
% 32));
3741 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
3742 ~(1ULL << (rel_vf_id
% 64));
3743 p_vf
->vf_mbx
.b_pending_msg
= false;
3750 qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3752 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
3756 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
3758 /* Since BRB <-> PRS interface can't be tested as part of the flr
3759 * polling due to HW limitations, simply sleep a bit. And since
3760 * there's no need to wait per-vf, do it before looping.
3764 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
3765 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
3767 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
3771 bool qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
3776 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
3777 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
3778 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3779 "[%08x,...,%08x]: %08x\n",
3780 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
3782 if (!p_hwfn
->cdev
->p_iov_info
) {
3783 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
3788 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3789 struct qed_vf_info
*p_vf
;
3792 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
3796 vfid
= p_vf
->abs_vf_id
;
3797 if (BIT((vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
3798 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
3799 u16 rel_vf_id
= p_vf
->relative_vf_id
;
3801 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3802 "VF[%d] [rel %d] got FLR-ed\n",
3805 p_vf
->state
= VF_RESET
;
3807 /* No need to lock here, since pending_flr should
3808 * only change here and before ACKing MFw. Since
3809 * MFW will not trigger an additional attention for
3810 * VF flr until ACKs, we're safe.
3812 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
3820 static int qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
3822 struct qed_mcp_link_params
*p_params
,
3823 struct qed_mcp_link_state
*p_link
,
3824 struct qed_mcp_link_capabilities
*p_caps
)
3826 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
3829 struct qed_bulletin_content
*p_bulletin
;
3834 p_bulletin
= p_vf
->bulletin
.p_virt
;
3837 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
3839 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
3841 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
3846 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn
*p_hwfn
,
3847 struct qed_ptt
*p_ptt
,
3848 struct qed_vf_info
*p_vf
)
3850 struct qed_bulletin_content
*p_bulletin
= p_vf
->bulletin
.p_virt
;
3851 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
3852 struct vfpf_bulletin_update_mac_tlv
*p_req
;
3853 u8 status
= PFVF_STATUS_SUCCESS
;
3856 if (!p_vf
->p_vf_info
.is_trusted_configured
) {
3859 "Blocking bulletin update request from untrusted VF[%d]\n",
3861 status
= PFVF_STATUS_NOT_SUPPORTED
;
3866 p_req
= &mbx
->req_virt
->bulletin_update_mac
;
3867 ether_addr_copy(p_bulletin
->mac
, p_req
->mac
);
3868 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3869 "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3870 p_vf
->abs_vf_id
, p_req
->mac
);
3873 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3874 CHANNEL_TLV_BULLETIN_UPDATE_MAC
,
3875 sizeof(struct pfvf_def_resp_tlv
), status
);
3879 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
3880 struct qed_ptt
*p_ptt
, int vfid
)
3882 struct qed_iov_vf_mbx
*mbx
;
3883 struct qed_vf_info
*p_vf
;
3885 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
3889 mbx
= &p_vf
->vf_mbx
;
3891 /* qed_iov_process_mbx_request */
3892 if (!mbx
->b_pending_msg
) {
3894 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3898 mbx
->b_pending_msg
= false;
3900 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
3902 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3903 "VF[%02x]: Processing mailbox message [type %04x]\n",
3904 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3906 /* check if tlv type is known */
3907 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
) &&
3908 !p_vf
->b_malicious
) {
3909 switch (mbx
->first_tlv
.tl
.type
) {
3910 case CHANNEL_TLV_ACQUIRE
:
3911 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
3913 case CHANNEL_TLV_VPORT_START
:
3914 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
3916 case CHANNEL_TLV_VPORT_TEARDOWN
:
3917 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
3919 case CHANNEL_TLV_START_RXQ
:
3920 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
3922 case CHANNEL_TLV_START_TXQ
:
3923 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
3925 case CHANNEL_TLV_STOP_RXQS
:
3926 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
3928 case CHANNEL_TLV_STOP_TXQS
:
3929 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
3931 case CHANNEL_TLV_UPDATE_RXQ
:
3932 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
3934 case CHANNEL_TLV_VPORT_UPDATE
:
3935 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
3937 case CHANNEL_TLV_UCAST_FILTER
:
3938 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
3940 case CHANNEL_TLV_CLOSE
:
3941 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
3943 case CHANNEL_TLV_INT_CLEANUP
:
3944 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
3946 case CHANNEL_TLV_RELEASE
:
3947 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
3949 case CHANNEL_TLV_UPDATE_TUNN_PARAM
:
3950 qed_iov_vf_mbx_update_tunn_param(p_hwfn
, p_ptt
, p_vf
);
3952 case CHANNEL_TLV_COALESCE_UPDATE
:
3953 qed_iov_vf_pf_set_coalesce(p_hwfn
, p_ptt
, p_vf
);
3955 case CHANNEL_TLV_COALESCE_READ
:
3956 qed_iov_vf_pf_get_coalesce(p_hwfn
, p_ptt
, p_vf
);
3958 case CHANNEL_TLV_BULLETIN_UPDATE_MAC
:
3959 qed_iov_vf_pf_bulletin_update_mac(p_hwfn
, p_ptt
, p_vf
);
3962 } else if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
3963 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3964 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3965 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3967 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3968 mbx
->first_tlv
.tl
.type
,
3969 sizeof(struct pfvf_def_resp_tlv
),
3970 PFVF_STATUS_MALICIOUS
);
3972 /* unknown TLV - this may belong to a VF driver from the future
3973 * - a version written after this PF driver was written, which
3974 * supports features unknown as of yet. Too bad since we don't
3975 * support them. Or this may be because someone wrote a crappy
3976 * VF driver and is sending garbage over the channel.
3979 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3981 mbx
->first_tlv
.tl
.type
,
3982 mbx
->first_tlv
.tl
.length
,
3983 mbx
->first_tlv
.padding
, mbx
->first_tlv
.reply_address
);
3985 /* Try replying in case reply address matches the acquisition's
3988 if (p_vf
->acquire
.first_tlv
.reply_address
&&
3989 (mbx
->first_tlv
.reply_address
==
3990 p_vf
->acquire
.first_tlv
.reply_address
)) {
3991 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3992 mbx
->first_tlv
.tl
.type
,
3993 sizeof(struct pfvf_def_resp_tlv
),
3994 PFVF_STATUS_NOT_SUPPORTED
);
3998 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
4004 static void qed_iov_pf_get_pending_events(struct qed_hwfn
*p_hwfn
, u64
*events
)
4008 memset(events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
4010 qed_for_each_vf(p_hwfn
, i
) {
4011 struct qed_vf_info
*p_vf
;
4013 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[i
];
4014 if (p_vf
->vf_mbx
.b_pending_msg
)
4015 events
[i
/ 64] |= 1ULL << (i
% 64);
4019 static struct qed_vf_info
*qed_sriov_get_vf_from_absid(struct qed_hwfn
*p_hwfn
,
4022 u8 min
= (u8
)p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
4024 if (!_qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
, false)) {
4027 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4032 return &p_hwfn
->pf_iov_info
->vfs_array
[(u8
)abs_vfid
- min
];
4035 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
4036 u16 abs_vfid
, struct regpair
*vf_msg
)
4038 struct qed_vf_info
*p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
,
4044 /* List the physical address of the request so that handler
4045 * could later on copy the message from it.
4047 p_vf
->vf_mbx
.pending_req
= HILO_64(vf_msg
->hi
, vf_msg
->lo
);
4049 /* Mark the event and schedule the workqueue */
4050 p_vf
->vf_mbx
.b_pending_msg
= true;
4051 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
4056 void qed_sriov_vfpf_malicious(struct qed_hwfn
*p_hwfn
,
4057 struct fw_err_data
*p_data
)
4059 struct qed_vf_info
*p_vf
;
4061 p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
, qed_vf_from_entity_id
4062 (p_data
->entity_id
));
4066 if (!p_vf
->b_malicious
) {
4068 "VF [%d] - Malicious behavior [%02x]\n",
4069 p_vf
->abs_vf_id
, p_data
->err_id
);
4071 p_vf
->b_malicious
= true;
4074 "VF [%d] - Malicious behavior [%02x]\n",
4075 p_vf
->abs_vf_id
, p_data
->err_id
);
4079 int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
, u8 opcode
, __le16 echo
,
4080 union event_ring_data
*data
, u8 fw_return_code
)
4083 case COMMON_EVENT_VF_PF_CHANNEL
:
4084 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
4085 &data
->vf_pf_channel
.msg_addr
);
4087 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
4093 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4095 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
4101 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
4102 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true, false))
4109 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
4112 struct qed_dmae_params params
;
4113 struct qed_vf_info
*vf_info
;
4115 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4119 memset(¶ms
, 0, sizeof(params
));
4120 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_SRC_VF_VALID
, 0x1);
4121 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_COMPLETION_DST
, 0x1);
4122 params
.src_vfid
= vf_info
->abs_vf_id
;
4124 if (qed_dmae_host2host(p_hwfn
, ptt
,
4125 vf_info
->vf_mbx
.pending_req
,
4126 vf_info
->vf_mbx
.req_phys
,
4127 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
4128 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4129 "Failed to copy message from VF 0x%02x\n", vfid
);
4137 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
4140 struct qed_vf_info
*vf_info
;
4143 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4145 DP_NOTICE(p_hwfn
->cdev
,
4146 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
4150 if (vf_info
->b_malicious
) {
4151 DP_NOTICE(p_hwfn
->cdev
,
4152 "Can't set forced MAC to malicious VF [%d]\n", vfid
);
4156 if (vf_info
->p_vf_info
.is_trusted_configured
) {
4157 feature
= BIT(VFPF_BULLETIN_MAC_ADDR
);
4158 /* Trust mode will disable Forced MAC */
4159 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
4160 ~BIT(MAC_ADDR_FORCED
);
4162 feature
= BIT(MAC_ADDR_FORCED
);
4163 /* Forced MAC will disable MAC_ADDR */
4164 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
4165 ~BIT(VFPF_BULLETIN_MAC_ADDR
);
4168 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
4170 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4172 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4175 static int qed_iov_bulletin_set_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
, int vfid
)
4177 struct qed_vf_info
*vf_info
;
4180 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4182 DP_NOTICE(p_hwfn
->cdev
, "Can not set MAC, invalid vfid [%d]\n",
4187 if (vf_info
->b_malicious
) {
4188 DP_NOTICE(p_hwfn
->cdev
, "Can't set MAC to malicious VF [%d]\n",
4193 if (vf_info
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)) {
4194 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4195 "Can not set MAC, Forced MAC is configured\n");
4199 feature
= BIT(VFPF_BULLETIN_MAC_ADDR
);
4200 ether_addr_copy(vf_info
->bulletin
.p_virt
->mac
, mac
);
4202 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4204 if (vf_info
->p_vf_info
.is_trusted_configured
)
4205 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4210 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
4213 struct qed_vf_info
*vf_info
;
4216 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4218 DP_NOTICE(p_hwfn
->cdev
,
4219 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
4223 if (vf_info
->b_malicious
) {
4224 DP_NOTICE(p_hwfn
->cdev
,
4225 "Can't set forced vlan to malicious VF [%d]\n", vfid
);
4229 feature
= 1 << VLAN_ADDR_FORCED
;
4230 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
4232 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4234 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
4236 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4239 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn
*p_hwfn
,
4240 int vfid
, u16 vxlan_port
, u16 geneve_port
)
4242 struct qed_vf_info
*vf_info
;
4244 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4246 DP_NOTICE(p_hwfn
->cdev
,
4247 "Can not set udp ports, invalid vfid [%d]\n", vfid
);
4251 if (vf_info
->b_malicious
) {
4252 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4253 "Can not set udp ports to malicious VF [%d]\n",
4258 vf_info
->bulletin
.p_virt
->vxlan_udp_port
= vxlan_port
;
4259 vf_info
->bulletin
.p_virt
->geneve_udp_port
= geneve_port
;
4262 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
4264 struct qed_vf_info
*p_vf_info
;
4266 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4270 return !!p_vf_info
->vport_instance
;
4273 static bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
4275 struct qed_vf_info
*p_vf_info
;
4277 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4281 return p_vf_info
->state
== VF_STOPPED
;
4284 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
4286 struct qed_vf_info
*vf_info
;
4288 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4292 return vf_info
->spoof_chk
;
4295 static int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
4297 struct qed_vf_info
*vf
;
4300 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4302 "SR-IOV sanity check failed, can't set spoofchk\n");
4306 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4310 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
4311 /* After VF VPORT start PF will configure spoof check */
4312 vf
->req_spoofchk_val
= val
;
4317 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
4323 static u8
*qed_iov_bulletin_get_mac(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4325 struct qed_vf_info
*p_vf
;
4327 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4328 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4331 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
&
4332 BIT(VFPF_BULLETIN_MAC_ADDR
)))
4335 return p_vf
->bulletin
.p_virt
->mac
;
4338 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
4341 struct qed_vf_info
*p_vf
;
4343 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4344 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4347 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)))
4350 return p_vf
->bulletin
.p_virt
->mac
;
4354 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4356 struct qed_vf_info
*p_vf
;
4358 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4359 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4362 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)))
4365 return p_vf
->bulletin
.p_virt
->pvid
;
4368 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
4369 struct qed_ptt
*p_ptt
, int vfid
, int val
)
4371 struct qed_vf_info
*vf
;
4376 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4380 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
4384 rl_id
= abs_vp_id
; /* The "rl_id" is set as the "vport_id" */
4385 return qed_init_global_rl(p_hwfn
, p_ptt
, rl_id
, (u32
)val
,
4390 qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
4392 struct qed_vf_info
*vf
;
4396 for_each_hwfn(cdev
, i
) {
4397 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4399 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4401 "SR-IOV sanity check failed, can't set min rate\n");
4406 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
4410 vport_id
= vf
->vport_id
;
4412 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
4415 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
4417 struct qed_wfq_data
*vf_vp_wfq
;
4418 struct qed_vf_info
*vf_info
;
4420 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4424 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
4426 if (vf_vp_wfq
->configured
)
4427 return vf_vp_wfq
->min_speed
;
4433 * qed_schedule_iov - schedules IOV task for VF and PF
4434 * @hwfn: hardware function pointer
4435 * @flag: IOV flag for VF/PF
4437 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
4439 /* Memory barrier for setting atomic bit */
4440 smp_mb__before_atomic();
4441 set_bit(flag
, &hwfn
->iov_task_flags
);
4442 /* Memory barrier after setting atomic bit */
4443 smp_mb__after_atomic();
4444 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
4445 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
4448 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
4452 for_each_hwfn(cdev
, i
)
4453 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
4454 &cdev
->hwfns
[i
].iov_task
, 0);
4457 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
4461 for_each_hwfn(cdev
, i
)
4462 if (cdev
->hwfns
[i
].iov_wq
)
4463 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
4465 /* Mark VFs for disablement */
4466 qed_iov_set_vfs_to_disable(cdev
, true);
4468 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
4469 pci_disable_sriov(cdev
->pdev
);
4471 if (cdev
->recov_in_prog
) {
4474 "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4478 for_each_hwfn(cdev
, i
) {
4479 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4480 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
4482 /* Failure to acquire the ptt in 100g creates an odd error
4483 * where the first engine has already relased IOV.
4486 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4490 /* Clean WFQ db and configure equal weight for all vports */
4491 qed_clean_wfq_db(hwfn
, ptt
);
4493 qed_for_each_vf(hwfn
, j
) {
4496 if (!qed_iov_is_valid_vfid(hwfn
, j
, true, false))
4499 /* Wait until VF is disabled before releasing */
4500 for (k
= 0; k
< 100; k
++) {
4501 if (!qed_iov_is_vf_stopped(hwfn
, j
))
4508 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
4512 "Timeout waiting for VF's FLR to end\n");
4515 qed_ptt_release(hwfn
, ptt
);
4518 qed_iov_set_vfs_to_disable(cdev
, false);
4523 static void qed_sriov_enable_qid_config(struct qed_hwfn
*hwfn
,
4525 struct qed_iov_vf_init_params
*params
)
4529 /* Since we have an equal resource distribution per-VF, and we assume
4530 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4531 * sequentially from there.
4533 base
= FEAT_NUM(hwfn
, QED_PF_L2_QUE
) + vfid
* params
->num_queues
;
4535 params
->rel_vf_id
= vfid
;
4536 for (i
= 0; i
< params
->num_queues
; i
++) {
4537 params
->req_rx_queue
[i
] = base
+ i
;
4538 params
->req_tx_queue
[i
] = base
+ i
;
4542 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
4544 struct qed_iov_vf_init_params params
;
4545 struct qed_hwfn
*hwfn
;
4546 struct qed_ptt
*ptt
;
4549 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
4550 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
4551 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
4555 memset(¶ms
, 0, sizeof(params
));
4557 /* Initialize HW for VF access */
4558 for_each_hwfn(cdev
, j
) {
4559 hwfn
= &cdev
->hwfns
[j
];
4560 ptt
= qed_ptt_acquire(hwfn
);
4562 /* Make sure not to use more than 16 queues per VF */
4563 params
.num_queues
= min_t(int,
4564 FEAT_NUM(hwfn
, QED_VF_L2_QUE
) / num
,
4568 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4573 for (i
= 0; i
< num
; i
++) {
4574 if (!qed_iov_is_valid_vfid(hwfn
, i
, false, true))
4577 qed_sriov_enable_qid_config(hwfn
, i
, ¶ms
);
4578 rc
= qed_iov_init_hw_for_vf(hwfn
, ptt
, ¶ms
);
4580 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
4581 qed_ptt_release(hwfn
, ptt
);
4586 qed_ptt_release(hwfn
, ptt
);
4589 /* Enable SRIOV PCIe functions */
4590 rc
= pci_enable_sriov(cdev
->pdev
, num
);
4592 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
4596 hwfn
= QED_LEADING_HWFN(cdev
);
4597 ptt
= qed_ptt_acquire(hwfn
);
4599 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4604 rc
= qed_mcp_ov_update_eswitch(hwfn
, ptt
, QED_OV_ESWITCH_VEB
);
4606 DP_INFO(cdev
, "Failed to update eswitch mode\n");
4607 qed_ptt_release(hwfn
, ptt
);
4612 qed_sriov_disable(cdev
, false);
4616 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
4618 if (!IS_QED_SRIOV(cdev
)) {
4619 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
4624 return qed_sriov_enable(cdev
, num_vfs_param
);
4626 return qed_sriov_disable(cdev
, true);
4629 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
4633 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
4634 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4635 "Cannot set a VF MAC; Sriov is not enabled\n");
4639 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
4640 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4641 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
4645 for_each_hwfn(cdev
, i
) {
4646 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4647 struct qed_public_vf_info
*vf_info
;
4649 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4653 /* Set the MAC, and schedule the IOV task */
4654 if (vf_info
->is_trusted_configured
)
4655 ether_addr_copy(vf_info
->mac
, mac
);
4657 ether_addr_copy(vf_info
->forced_mac
, mac
);
4659 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
4665 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
4669 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
4670 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4671 "Cannot set a VF MAC; Sriov is not enabled\n");
4675 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
4676 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4677 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
4681 for_each_hwfn(cdev
, i
) {
4682 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4683 struct qed_public_vf_info
*vf_info
;
4685 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4689 /* Set the forced vlan, and schedule the IOV task */
4690 vf_info
->forced_vlan
= vid
;
4691 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
4697 static int qed_get_vf_config(struct qed_dev
*cdev
,
4698 int vf_id
, struct ifla_vf_info
*ivi
)
4700 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
4701 struct qed_public_vf_info
*vf_info
;
4702 struct qed_mcp_link_state link
;
4706 /* Sanitize request */
4710 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, false)) {
4711 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4712 "VF index [%d] isn't active\n", vf_id
);
4716 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
4718 ret
= qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
4722 /* Fill information about VF */
4725 if (is_valid_ether_addr(vf_info
->forced_mac
))
4726 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
4728 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
4730 ivi
->vlan
= vf_info
->forced_vlan
;
4731 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
4732 ivi
->linkstate
= vf_info
->link_state
;
4733 tx_rate
= vf_info
->tx_rate
;
4734 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
4735 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
4736 ivi
->trusted
= vf_info
->is_trusted_request
;
4741 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
4743 struct qed_hwfn
*lead_hwfn
= QED_LEADING_HWFN(hwfn
->cdev
);
4744 struct qed_mcp_link_capabilities caps
;
4745 struct qed_mcp_link_params params
;
4746 struct qed_mcp_link_state link
;
4749 if (!hwfn
->pf_iov_info
)
4752 /* Update bulletin of all future possible VFs with link configuration */
4753 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
4754 struct qed_public_vf_info
*vf_info
;
4756 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
4760 /* Only hwfn0 is actually interested in the link speed.
4761 * But since only it would receive an MFW indication of link,
4762 * need to take configuration from it - otherwise things like
4763 * rate limiting for hwfn1 VF would not work.
4765 memcpy(¶ms
, qed_mcp_get_link_params(lead_hwfn
),
4767 memcpy(&link
, qed_mcp_get_link_state(lead_hwfn
), sizeof(link
));
4768 memcpy(&caps
, qed_mcp_get_link_capabilities(lead_hwfn
),
4771 /* Modify link according to the VF's configured link state */
4772 switch (vf_info
->link_state
) {
4773 case IFLA_VF_LINK_STATE_DISABLE
:
4774 link
.link_up
= false;
4776 case IFLA_VF_LINK_STATE_ENABLE
:
4777 link
.link_up
= true;
4778 /* Set speed according to maximum supported by HW.
4779 * that is 40G for regular devices and 100G for CMT
4782 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
4786 /* In auto mode pass PF link image to VF */
4790 if (link
.link_up
&& vf_info
->tx_rate
) {
4791 struct qed_ptt
*ptt
;
4794 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
4796 ptt
= qed_ptt_acquire(hwfn
);
4798 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
4802 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
4803 vf_info
->tx_rate
= rate
;
4807 qed_ptt_release(hwfn
, ptt
);
4810 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
4813 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4816 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
4817 int vf_id
, int link_state
)
4821 /* Sanitize request */
4825 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, true)) {
4826 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4827 "VF index [%d] isn't active\n", vf_id
);
4831 /* Handle configuration of link state */
4832 for_each_hwfn(cdev
, i
) {
4833 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4834 struct qed_public_vf_info
*vf
;
4836 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
4840 if (vf
->link_state
== link_state
)
4843 vf
->link_state
= link_state
;
4844 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
4850 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
4852 int i
, rc
= -EINVAL
;
4854 for_each_hwfn(cdev
, i
) {
4855 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4857 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
4865 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
4869 for_each_hwfn(cdev
, i
) {
4870 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4871 struct qed_public_vf_info
*vf
;
4873 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4875 "SR-IOV sanity check failed, can't set tx rate\n");
4879 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
4883 qed_inform_vf_link_state(p_hwfn
);
4889 static int qed_set_vf_rate(struct qed_dev
*cdev
,
4890 int vfid
, u32 min_rate
, u32 max_rate
)
4892 int rc_min
= 0, rc_max
= 0;
4895 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
4898 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
4900 if (rc_max
| rc_min
)
4906 static int qed_set_vf_trust(struct qed_dev
*cdev
, int vfid
, bool trust
)
4910 for_each_hwfn(cdev
, i
) {
4911 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4912 struct qed_public_vf_info
*vf
;
4914 if (!qed_iov_pf_sanity_check(hwfn
, vfid
)) {
4916 "SR-IOV sanity check failed, can't set trust\n");
4920 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4922 if (vf
->is_trusted_request
== trust
)
4924 vf
->is_trusted_request
= trust
;
4926 qed_schedule_iov(hwfn
, QED_IOV_WQ_TRUST_FLAG
);
4932 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
4934 u64 events
[QED_VF_ARRAY_LENGTH
];
4935 struct qed_ptt
*ptt
;
4938 ptt
= qed_ptt_acquire(hwfn
);
4940 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4941 "Can't acquire PTT; re-scheduling\n");
4942 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
4946 qed_iov_pf_get_pending_events(hwfn
, events
);
4948 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4949 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4950 events
[0], events
[1], events
[2]);
4952 qed_for_each_vf(hwfn
, i
) {
4953 /* Skip VFs with no pending messages */
4954 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
4957 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4958 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4959 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4961 /* Copy VF's message to PF's request buffer for that VF */
4962 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
4965 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
4968 qed_ptt_release(hwfn
, ptt
);
4971 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn
*hwfn
,
4973 struct qed_public_vf_info
*info
)
4975 if (info
->is_trusted_configured
) {
4976 if (is_valid_ether_addr(info
->mac
) &&
4977 (!mac
|| !ether_addr_equal(mac
, info
->mac
)))
4980 if (is_valid_ether_addr(info
->forced_mac
) &&
4981 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
)))
4988 static void qed_set_bulletin_mac(struct qed_hwfn
*hwfn
,
4989 struct qed_public_vf_info
*info
,
4992 if (info
->is_trusted_configured
)
4993 qed_iov_bulletin_set_mac(hwfn
, info
->mac
, vfid
);
4995 qed_iov_bulletin_set_forced_mac(hwfn
, info
->forced_mac
, vfid
);
4998 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
5002 qed_for_each_vf(hwfn
, i
) {
5003 struct qed_public_vf_info
*info
;
5004 bool update
= false;
5007 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
5011 /* Update data on bulletin board */
5012 if (info
->is_trusted_configured
)
5013 mac
= qed_iov_bulletin_get_mac(hwfn
, i
);
5015 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
5017 if (qed_pf_validate_req_vf_mac(hwfn
, mac
, info
)) {
5020 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
5022 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
5024 /* Update bulletin board with MAC */
5025 qed_set_bulletin_mac(hwfn
, info
, i
);
5029 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
5030 info
->forced_vlan
) {
5033 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
5036 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
5037 qed_iov_bulletin_set_forced_vlan(hwfn
,
5038 info
->forced_vlan
, i
);
5043 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5047 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
5049 struct qed_ptt
*ptt
;
5052 ptt
= qed_ptt_acquire(hwfn
);
5054 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
5055 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5059 qed_for_each_vf(hwfn
, i
)
5060 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
5062 qed_ptt_release(hwfn
, ptt
);
5065 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn
*hwfn
, int vf_id
)
5067 struct qed_public_vf_info
*vf_info
;
5068 struct qed_vf_info
*vf
;
5072 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
5073 vf
= qed_iov_get_vf_info(hwfn
, vf_id
, true);
5075 if (!vf_info
|| !vf
)
5078 /* Force MAC converted to generic MAC in case of VF trust on */
5079 if (vf_info
->is_trusted_configured
&&
5080 (vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
))) {
5081 force_mac
= qed_iov_bulletin_get_forced_mac(hwfn
, vf_id
);
5084 /* Clear existing shadow copy of MAC to have a clean
5087 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
5088 if (ether_addr_equal(vf
->shadow_config
.macs
[i
],
5090 eth_zero_addr(vf
->shadow_config
.macs
[i
]);
5091 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
5092 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5093 vf_info
->mac
, vf_id
);
5098 ether_addr_copy(vf_info
->mac
, force_mac
);
5099 eth_zero_addr(vf_info
->forced_mac
);
5100 vf
->bulletin
.p_virt
->valid_bitmap
&=
5101 ~BIT(MAC_ADDR_FORCED
);
5102 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5106 /* Update shadow copy with VF MAC when trust mode is turned off */
5107 if (!vf_info
->is_trusted_configured
) {
5108 u8 empty_mac
[ETH_ALEN
];
5110 eth_zero_addr(empty_mac
);
5111 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
5112 if (ether_addr_equal(vf
->shadow_config
.macs
[i
],
5114 ether_addr_copy(vf
->shadow_config
.macs
[i
],
5116 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
5117 "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5118 vf_info
->mac
, vf_id
);
5122 /* Clear bulletin when trust mode is turned off,
5123 * to have a clean slate for next (normal) operations.
5125 qed_iov_bulletin_set_mac(hwfn
, empty_mac
, vf_id
);
5126 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5130 static void qed_iov_handle_trust_change(struct qed_hwfn
*hwfn
)
5132 struct qed_sp_vport_update_params params
;
5133 struct qed_filter_accept_flags
*flags
;
5134 struct qed_public_vf_info
*vf_info
;
5135 struct qed_vf_info
*vf
;
5139 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
5140 flags
= ¶ms
.accept_flags
;
5142 qed_for_each_vf(hwfn
, i
) {
5143 /* Need to make sure current requested configuration didn't
5144 * flip so that we'll end up configuring something that's not
5147 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
5148 if (vf_info
->is_trusted_configured
==
5149 vf_info
->is_trusted_request
)
5151 vf_info
->is_trusted_configured
= vf_info
->is_trusted_request
;
5153 /* Handle forced MAC mode */
5154 qed_update_mac_for_vf_trust_change(hwfn
, i
);
5156 /* Validate that the VF has a configured vport */
5157 vf
= qed_iov_get_vf_info(hwfn
, i
, true);
5158 if (!vf
|| !vf
->vport_instance
)
5161 memset(¶ms
, 0, sizeof(params
));
5162 params
.opaque_fid
= vf
->opaque_fid
;
5163 params
.vport_id
= vf
->vport_id
;
5165 params
.update_ctl_frame_check
= 1;
5166 params
.mac_chk_en
= !vf_info
->is_trusted_configured
;
5167 params
.update_accept_any_vlan_flg
= 0;
5169 if (vf_info
->accept_any_vlan
&& vf_info
->forced_vlan
) {
5170 params
.update_accept_any_vlan_flg
= 1;
5171 params
.accept_any_vlan
= vf_info
->accept_any_vlan
;
5174 if (vf_info
->rx_accept_mode
& mask
) {
5175 flags
->update_rx_mode_config
= 1;
5176 flags
->rx_accept_filter
= vf_info
->rx_accept_mode
;
5179 if (vf_info
->tx_accept_mode
& mask
) {
5180 flags
->update_tx_mode_config
= 1;
5181 flags
->tx_accept_filter
= vf_info
->tx_accept_mode
;
5184 /* Remove if needed; Otherwise this would set the mask */
5185 if (!vf_info
->is_trusted_configured
) {
5186 flags
->rx_accept_filter
&= ~mask
;
5187 flags
->tx_accept_filter
&= ~mask
;
5188 params
.accept_any_vlan
= false;
5191 if (flags
->update_rx_mode_config
||
5192 flags
->update_tx_mode_config
||
5193 params
.update_ctl_frame_check
||
5194 params
.update_accept_any_vlan_flg
) {
5195 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
5196 "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
5197 vf_info
->is_trusted_configured
? "trusted" : "untrusted",
5198 vf
->abs_vf_id
, vf
->relative_vf_id
);
5199 qed_sp_vport_update(hwfn
, ¶ms
,
5200 QED_SPQ_MODE_EBLOCK
, NULL
);
5205 static void qed_iov_pf_task(struct work_struct
*work
)
5208 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
5212 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
5215 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
5216 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
5219 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
5223 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
5225 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
5227 qed_ptt_release(hwfn
, ptt
);
5230 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
5231 qed_handle_vf_msg(hwfn
);
5233 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
5234 &hwfn
->iov_task_flags
))
5235 qed_handle_pf_set_vf_unicast(hwfn
);
5237 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
5238 &hwfn
->iov_task_flags
))
5239 qed_handle_bulletin_post(hwfn
);
5241 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG
, &hwfn
->iov_task_flags
))
5242 qed_iov_handle_trust_change(hwfn
);
5245 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
5249 for_each_hwfn(cdev
, i
) {
5250 if (!cdev
->hwfns
[i
].iov_wq
)
5253 if (schedule_first
) {
5254 qed_schedule_iov(&cdev
->hwfns
[i
],
5255 QED_IOV_WQ_STOP_WQ_FLAG
);
5256 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
5259 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
5263 int qed_iov_wq_start(struct qed_dev
*cdev
)
5265 char name
[NAME_SIZE
];
5268 for_each_hwfn(cdev
, i
) {
5269 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
5271 /* PFs needs a dedicated workqueue only if they support IOV.
5272 * VFs always require one.
5274 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
5277 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
5278 cdev
->pdev
->bus
->number
,
5279 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
5281 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
5282 if (!p_hwfn
->iov_wq
) {
5283 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
5288 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
5290 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
5296 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
5297 .configure
= &qed_sriov_configure
,
5298 .set_mac
= &qed_sriov_pf_set_mac
,
5299 .set_vlan
= &qed_sriov_pf_set_vlan
,
5300 .get_config
= &qed_get_vf_config
,
5301 .set_link_state
= &qed_set_vf_link_state
,
5302 .set_spoof
= &qed_spoof_configure
,
5303 .set_rate
= &qed_set_vf_rate
,
5304 .set_trust
= &qed_set_vf_trust
,