1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/qed/qed_iov_if.h>
40 #include "qed_init_ops.h"
43 #include "qed_reg_addr.h"
45 #include "qed_sriov.h"
47 static int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
50 union event_ring_data
*data
, u8 fw_return_code
);
51 static int qed_iov_bulletin_set_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
, int vfid
);
53 static u8
qed_vf_calculate_legacy(struct qed_vf_info
*p_vf
)
57 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
58 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
59 legacy
|= QED_QCID_LEGACY_VF_RX_PROD
;
61 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
62 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
63 legacy
|= QED_QCID_LEGACY_VF_CID
;
69 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
, struct qed_vf_info
*p_vf
)
71 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
72 struct qed_spq_entry
*p_ent
= NULL
;
73 struct qed_sp_init_data init_data
;
78 memset(&init_data
, 0, sizeof(init_data
));
79 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
80 init_data
.opaque_fid
= p_vf
->opaque_fid
;
81 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
83 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
84 COMMON_RAMROD_VF_START
,
85 PROTOCOLID_COMMON
, &init_data
);
89 p_ramrod
= &p_ent
->ramrod
.vf_start
;
91 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
92 p_ramrod
->opaque_fid
= cpu_to_le16(p_vf
->opaque_fid
);
94 switch (p_hwfn
->hw_info
.personality
) {
96 p_ramrod
->personality
= PERSONALITY_ETH
;
98 case QED_PCI_ETH_ROCE
:
99 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
102 DP_NOTICE(p_hwfn
, "Unknown VF personality %d\n",
103 p_hwfn
->hw_info
.personality
);
104 qed_sp_destroy_request(p_hwfn
, p_ent
);
108 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
109 if (fp_minor
> ETH_HSI_VER_MINOR
&&
110 fp_minor
!= ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
113 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
116 fp_minor
, ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
117 fp_minor
= ETH_HSI_VER_MINOR
;
120 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
121 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
123 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
124 "VF[%d] - Starting using HSI %02x.%02x\n",
125 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
127 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
130 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
131 u32 concrete_vfid
, u16 opaque_vfid
)
133 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
134 struct qed_spq_entry
*p_ent
= NULL
;
135 struct qed_sp_init_data init_data
;
139 memset(&init_data
, 0, sizeof(init_data
));
140 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
141 init_data
.opaque_fid
= opaque_vfid
;
142 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
144 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
145 COMMON_RAMROD_VF_STOP
,
146 PROTOCOLID_COMMON
, &init_data
);
150 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
152 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
154 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
157 bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
159 bool b_enabled_only
, bool b_non_malicious
)
161 if (!p_hwfn
->pf_iov_info
) {
162 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
166 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
170 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
174 if ((p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_malicious
) &&
181 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
185 struct qed_vf_info
*vf
= NULL
;
187 if (!p_hwfn
->pf_iov_info
) {
188 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
192 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
,
193 b_enabled_only
, false))
194 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
196 DP_ERR(p_hwfn
, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
202 static struct qed_queue_cid
*
203 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue
*p_queue
)
207 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
208 if (p_queue
->cids
[i
].p_cid
&& !p_queue
->cids
[i
].b_is_tx
)
209 return p_queue
->cids
[i
].p_cid
;
215 enum qed_iov_validate_q_mode
{
216 QED_IOV_VALIDATE_Q_NA
,
217 QED_IOV_VALIDATE_Q_ENABLE
,
218 QED_IOV_VALIDATE_Q_DISABLE
,
221 static bool qed_iov_validate_queue_mode(struct qed_hwfn
*p_hwfn
,
222 struct qed_vf_info
*p_vf
,
224 enum qed_iov_validate_q_mode mode
,
229 if (mode
== QED_IOV_VALIDATE_Q_NA
)
232 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
233 struct qed_vf_queue_cid
*p_qcid
;
235 p_qcid
= &p_vf
->vf_queues
[qid
].cids
[i
];
240 if (p_qcid
->b_is_tx
!= b_is_tx
)
243 return mode
== QED_IOV_VALIDATE_Q_ENABLE
;
246 /* In case we haven't found any valid cid, then its disabled */
247 return mode
== QED_IOV_VALIDATE_Q_DISABLE
;
250 static bool qed_iov_validate_rxq(struct qed_hwfn
*p_hwfn
,
251 struct qed_vf_info
*p_vf
,
253 enum qed_iov_validate_q_mode mode
)
255 if (rx_qid
>= p_vf
->num_rxqs
) {
258 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
259 p_vf
->abs_vf_id
, rx_qid
, p_vf
->num_rxqs
);
263 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, rx_qid
, mode
, false);
266 static bool qed_iov_validate_txq(struct qed_hwfn
*p_hwfn
,
267 struct qed_vf_info
*p_vf
,
269 enum qed_iov_validate_q_mode mode
)
271 if (tx_qid
>= p_vf
->num_txqs
) {
274 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
275 p_vf
->abs_vf_id
, tx_qid
, p_vf
->num_txqs
);
279 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, tx_qid
, mode
, true);
282 static bool qed_iov_validate_sb(struct qed_hwfn
*p_hwfn
,
283 struct qed_vf_info
*p_vf
, u16 sb_idx
)
287 for (i
= 0; i
< p_vf
->num_sbs
; i
++)
288 if (p_vf
->igu_sbs
[i
] == sb_idx
)
293 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
294 p_vf
->abs_vf_id
, sb_idx
, p_vf
->num_sbs
);
299 static bool qed_iov_validate_active_rxq(struct qed_hwfn
*p_hwfn
,
300 struct qed_vf_info
*p_vf
)
304 for (i
= 0; i
< p_vf
->num_rxqs
; i
++)
305 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
306 QED_IOV_VALIDATE_Q_ENABLE
,
313 static bool qed_iov_validate_active_txq(struct qed_hwfn
*p_hwfn
,
314 struct qed_vf_info
*p_vf
)
318 for (i
= 0; i
< p_vf
->num_txqs
; i
++)
319 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
320 QED_IOV_VALIDATE_Q_ENABLE
,
327 static int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
328 int vfid
, struct qed_ptt
*p_ptt
)
330 struct qed_bulletin_content
*p_bulletin
;
331 int crc_size
= sizeof(p_bulletin
->crc
);
332 struct qed_dmae_params params
;
333 struct qed_vf_info
*p_vf
;
335 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
339 if (!p_vf
->vf_bulletin
)
342 p_bulletin
= p_vf
->bulletin
.p_virt
;
344 /* Increment bulletin board version and compute crc */
345 p_bulletin
->version
++;
346 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
347 p_vf
->bulletin
.size
- crc_size
);
349 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
350 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
351 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
353 /* propagate bulletin board via dmae to vm memory */
354 memset(¶ms
, 0, sizeof(params
));
355 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_DST_VF_VALID
, 0x1);
356 params
.dst_vfid
= p_vf
->abs_vf_id
;
357 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
358 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
362 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
364 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
367 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
368 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
370 pci_read_config_word(cdev
->pdev
,
371 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
372 pci_read_config_word(cdev
->pdev
,
373 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
375 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
379 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
383 pci_read_config_word(cdev
->pdev
,
384 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
386 pci_read_config_word(cdev
->pdev
,
387 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
389 pci_read_config_word(cdev
->pdev
,
390 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
392 pci_read_config_dword(cdev
->pdev
,
393 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
395 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
397 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
401 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
407 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
409 /* Some sanity checks */
410 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
411 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
412 /* This can happen only due to a bug. In this case we set
413 * num_vfs to zero to avoid memory corruption in the code that
414 * assumes max number of vfs
417 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
427 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
429 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
430 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
431 struct qed_bulletin_content
*p_bulletin_virt
;
432 dma_addr_t req_p
, rply_p
, bulletin_p
;
433 union pfvf_tlvs
*p_reply_virt_addr
;
434 union vfpf_tlvs
*p_req_virt_addr
;
437 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
439 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
440 req_p
= p_iov_info
->mbx_msg_phys_addr
;
441 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
442 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
443 p_bulletin_virt
= p_iov_info
->p_bulletins
;
444 bulletin_p
= p_iov_info
->bulletins_phys
;
445 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
447 "qed_iov_setup_vfdb called without allocating mem first\n");
451 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
452 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
455 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
456 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
457 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
458 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
460 vf
->state
= VF_STOPPED
;
463 vf
->bulletin
.phys
= idx
*
464 sizeof(struct qed_bulletin_content
) +
466 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
467 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
469 vf
->relative_vf_id
= idx
;
470 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
471 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
472 vf
->concrete_fid
= concrete
;
473 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
474 (vf
->abs_vf_id
<< 8);
475 vf
->vport_id
= idx
+ 1;
477 vf
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
478 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
482 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
484 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
488 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
490 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
491 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs
);
493 /* Allocate PF Mailbox buffer (per-VF) */
494 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
495 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
496 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
497 p_iov_info
->mbx_msg_size
,
498 &p_iov_info
->mbx_msg_phys_addr
,
503 /* Allocate PF Mailbox Reply buffer (per-VF) */
504 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
505 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
506 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
507 p_iov_info
->mbx_reply_size
,
508 &p_iov_info
->mbx_reply_phys_addr
,
513 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
515 p_v_addr
= &p_iov_info
->p_bulletins
;
516 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
517 p_iov_info
->bulletins_size
,
518 &p_iov_info
->bulletins_phys
,
525 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
526 p_iov_info
->mbx_msg_virt_addr
,
527 (u64
) p_iov_info
->mbx_msg_phys_addr
,
528 p_iov_info
->mbx_reply_virt_addr
,
529 (u64
) p_iov_info
->mbx_reply_phys_addr
,
530 p_iov_info
->p_bulletins
, (u64
) p_iov_info
->bulletins_phys
);
535 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
537 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
539 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
540 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
541 p_iov_info
->mbx_msg_size
,
542 p_iov_info
->mbx_msg_virt_addr
,
543 p_iov_info
->mbx_msg_phys_addr
);
545 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
546 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
547 p_iov_info
->mbx_reply_size
,
548 p_iov_info
->mbx_reply_virt_addr
,
549 p_iov_info
->mbx_reply_phys_addr
);
551 if (p_iov_info
->p_bulletins
)
552 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
553 p_iov_info
->bulletins_size
,
554 p_iov_info
->p_bulletins
,
555 p_iov_info
->bulletins_phys
);
558 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
560 struct qed_pf_iov
*p_sriov
;
562 if (!IS_PF_SRIOV(p_hwfn
)) {
563 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
564 "No SR-IOV - no need for IOV db\n");
568 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
572 p_hwfn
->pf_iov_info
= p_sriov
;
574 qed_spq_register_async_cb(p_hwfn
, PROTOCOLID_COMMON
,
575 qed_sriov_eqe_event
);
577 return qed_iov_allocate_vfdb(p_hwfn
);
580 void qed_iov_setup(struct qed_hwfn
*p_hwfn
)
582 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
585 qed_iov_setup_vfdb(p_hwfn
);
588 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
590 qed_spq_unregister_async_cb(p_hwfn
, PROTOCOLID_COMMON
);
592 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
593 qed_iov_free_vfdb(p_hwfn
);
594 kfree(p_hwfn
->pf_iov_info
);
598 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
600 kfree(cdev
->p_iov_info
);
601 cdev
->p_iov_info
= NULL
;
604 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
606 struct qed_dev
*cdev
= p_hwfn
->cdev
;
610 if (IS_VF(p_hwfn
->cdev
))
613 /* Learn the PCI configuration */
614 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
615 PCI_EXT_CAP_ID_SRIOV
);
617 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
621 /* Allocate a new struct for IOV information */
622 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
623 if (!cdev
->p_iov_info
)
626 cdev
->p_iov_info
->pos
= pos
;
628 rc
= qed_iov_pci_cfg_info(cdev
);
632 /* We want PF IOV to be synonemous with the existance of p_iov_info;
633 * In case the capability is published but there are no VFs, simply
634 * de-allocate the struct.
636 if (!cdev
->p_iov_info
->total_vfs
) {
637 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
638 "IOV capabilities, but no VFs are published\n");
639 kfree(cdev
->p_iov_info
);
640 cdev
->p_iov_info
= NULL
;
644 /* First VF index based on offset is tricky:
645 * - If ARI is supported [likely], offset - (16 - pf_id) would
646 * provide the number for eng0. 2nd engine Vfs would begin
647 * after the first engine's VFs.
648 * - If !ARI, VFs would start on next device.
649 * so offset - (256 - pf_id) would provide the number.
650 * Utilize the fact that (256 - pf_id) is achieved only by later
651 * to differentiate between the two.
654 if (p_hwfn
->cdev
->p_iov_info
->offset
< (256 - p_hwfn
->abs_pf_id
)) {
655 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
656 p_hwfn
->abs_pf_id
- 16;
658 cdev
->p_iov_info
->first_vf_in_pf
= first
;
660 if (QED_PATH_ID(p_hwfn
))
661 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
663 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
664 p_hwfn
->abs_pf_id
- 256;
666 cdev
->p_iov_info
->first_vf_in_pf
= first
;
669 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
670 "First VF in hwfn 0x%08x\n",
671 cdev
->p_iov_info
->first_vf_in_pf
);
676 static bool _qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
,
677 int vfid
, bool b_fail_malicious
)
679 /* Check PF supports sriov */
680 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
681 !IS_PF_SRIOV_ALLOC(p_hwfn
))
684 /* Check VF validity */
685 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true, b_fail_malicious
))
691 static bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
693 return _qed_iov_pf_sanity_check(p_hwfn
, vfid
, true);
696 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
697 u16 rel_vf_id
, u8 to_disable
)
699 struct qed_vf_info
*vf
;
702 for_each_hwfn(cdev
, i
) {
703 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
705 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
709 vf
->to_disable
= to_disable
;
713 static void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
717 if (!IS_QED_SRIOV(cdev
))
720 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
721 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
724 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
725 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
727 qed_wr(p_hwfn
, p_ptt
,
728 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
729 1 << (abs_vfid
& 0x1f));
732 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
733 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
737 /* Set VF masks and configuration - pretend */
738 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
740 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
743 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
745 /* iterate over all queues, clear sb consumer */
746 for (i
= 0; i
< vf
->num_sbs
; i
++)
747 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
749 vf
->opaque_fid
, true);
752 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
753 struct qed_ptt
*p_ptt
,
754 struct qed_vf_info
*vf
, bool enable
)
758 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
760 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
763 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
765 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
767 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
770 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
774 qed_iov_enable_vf_access_msix(struct qed_hwfn
*p_hwfn
,
775 struct qed_ptt
*p_ptt
, u8 abs_vf_id
, u8 num_sbs
)
780 /* For AH onward, configuration is per-PF. Find maximum of all
781 * the currently enabled child VFs, and set the number to be that.
783 if (!QED_IS_BB(p_hwfn
->cdev
)) {
784 qed_for_each_vf(p_hwfn
, i
) {
785 struct qed_vf_info
*p_vf
;
787 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)i
, true);
791 current_max
= max_t(u8
, current_max
, p_vf
->num_sbs
);
795 if (num_sbs
> current_max
)
796 return qed_mcp_config_vf_msix(p_hwfn
, p_ptt
,
802 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
803 struct qed_ptt
*p_ptt
,
804 struct qed_vf_info
*vf
)
806 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
809 /* It's possible VF was previously considered malicious -
810 * clear the indication even if we're only going to disable VF.
812 vf
->b_malicious
= false;
819 "Enable internal access for vf %x [abs %x]\n",
820 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
822 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
824 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
826 rc
= qed_iov_enable_vf_access_msix(p_hwfn
, p_ptt
,
827 vf
->abs_vf_id
, vf
->num_sbs
);
831 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
833 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
834 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
836 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
837 p_hwfn
->hw_info
.hw_mode
);
840 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
848 * @brief qed_iov_config_perm_table - configure the permission
850 * In E4, queue zone permission table size is 320x9. There
851 * are 320 VF queues for single engine device (256 for dual
852 * engine device), and each entry has the following format:
859 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
860 struct qed_ptt
*p_ptt
,
861 struct qed_vf_info
*vf
, u8 enable
)
867 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
868 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
871 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
872 val
= enable
? (vf
->abs_vf_id
| BIT(8)) : 0;
873 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
877 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
878 struct qed_ptt
*p_ptt
,
879 struct qed_vf_info
*vf
)
881 /* Reset vf in IGU - interrupts are still disabled */
882 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
884 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
886 /* Permission Table */
887 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
890 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
891 struct qed_ptt
*p_ptt
,
892 struct qed_vf_info
*vf
, u16 num_rx_queues
)
894 struct qed_igu_block
*p_block
;
895 struct cau_sb_entry sb_entry
;
899 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
)
900 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
;
901 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
-= num_rx_queues
;
903 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
904 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
905 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
907 for (qid
= 0; qid
< num_rx_queues
; qid
++) {
908 p_block
= qed_get_igu_free_sb(p_hwfn
, false);
909 vf
->igu_sbs
[qid
] = p_block
->igu_sb_id
;
910 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
911 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
913 qed_wr(p_hwfn
, p_ptt
,
914 IGU_REG_MAPPING_MEMORY
+
915 sizeof(u32
) * p_block
->igu_sb_id
, val
);
917 /* Configure igu sb in CAU which were marked valid */
918 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
919 p_hwfn
->rel_pf_id
, vf
->abs_vf_id
, 1);
921 qed_dmae_host2grc(p_hwfn
, p_ptt
,
922 (u64
)(uintptr_t)&sb_entry
,
923 CAU_REG_SB_VAR_MEMORY
+
924 p_block
->igu_sb_id
* sizeof(u64
), 2, NULL
);
927 vf
->num_sbs
= (u8
) num_rx_queues
;
932 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
933 struct qed_ptt
*p_ptt
,
934 struct qed_vf_info
*vf
)
936 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
940 /* Invalidate igu CAM lines and mark them as free */
941 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
942 igu_id
= vf
->igu_sbs
[idx
];
943 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
945 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
946 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
947 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
949 p_info
->entry
[igu_id
].status
|= QED_IGU_STATUS_FREE
;
950 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
++;
956 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
958 struct qed_mcp_link_params
*params
,
959 struct qed_mcp_link_state
*link
,
960 struct qed_mcp_link_capabilities
*p_caps
)
962 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
965 struct qed_bulletin_content
*p_bulletin
;
970 p_bulletin
= p_vf
->bulletin
.p_virt
;
971 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
972 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
973 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
974 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
975 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
976 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
977 p_bulletin
->req_loopback
= params
->loopback_mode
;
979 p_bulletin
->link_up
= link
->link_up
;
980 p_bulletin
->speed
= link
->speed
;
981 p_bulletin
->full_duplex
= link
->full_duplex
;
982 p_bulletin
->autoneg
= link
->an
;
983 p_bulletin
->autoneg_complete
= link
->an_complete
;
984 p_bulletin
->parallel_detection
= link
->parallel_detection
;
985 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
986 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
987 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
988 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
989 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
990 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
992 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
995 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
996 struct qed_ptt
*p_ptt
,
997 struct qed_iov_vf_init_params
*p_params
)
999 struct qed_mcp_link_capabilities link_caps
;
1000 struct qed_mcp_link_params link_params
;
1001 struct qed_mcp_link_state link_state
;
1002 u8 num_of_vf_avaiable_chains
= 0;
1003 struct qed_vf_info
*vf
= NULL
;
1009 vf
= qed_iov_get_vf_info(p_hwfn
, p_params
->rel_vf_id
, false);
1011 DP_ERR(p_hwfn
, "qed_iov_init_hw_for_vf : vf is NULL\n");
1016 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n",
1017 p_params
->rel_vf_id
);
1021 /* Perform sanity checking on the requested queue_id */
1022 for (i
= 0; i
< p_params
->num_queues
; i
++) {
1023 u16 min_vf_qzone
= FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
);
1024 u16 max_vf_qzone
= min_vf_qzone
+
1025 FEAT_NUM(p_hwfn
, QED_VF_L2_QUE
) - 1;
1027 qid
= p_params
->req_rx_queue
[i
];
1028 if (qid
< min_vf_qzone
|| qid
> max_vf_qzone
) {
1030 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1032 p_params
->rel_vf_id
,
1033 min_vf_qzone
, max_vf_qzone
);
1037 qid
= p_params
->req_tx_queue
[i
];
1038 if (qid
> max_vf_qzone
) {
1040 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1041 qid
, p_params
->rel_vf_id
, max_vf_qzone
);
1045 /* If client *really* wants, Tx qid can be shared with PF */
1046 if (qid
< min_vf_qzone
)
1049 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1050 p_params
->rel_vf_id
, qid
, i
);
1053 /* Limit number of queues according to number of CIDs */
1054 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
1057 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1058 vf
->relative_vf_id
, p_params
->num_queues
, (u16
)cids
);
1059 num_irqs
= min_t(u16
, p_params
->num_queues
, ((u16
)cids
));
1061 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
1064 if (!num_of_vf_avaiable_chains
) {
1065 DP_ERR(p_hwfn
, "no available igu sbs\n");
1069 /* Choose queue number and index ranges */
1070 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
1071 vf
->num_txqs
= num_of_vf_avaiable_chains
;
1073 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
1074 struct qed_vf_queue
*p_queue
= &vf
->vf_queues
[i
];
1076 p_queue
->fw_rx_qid
= p_params
->req_rx_queue
[i
];
1077 p_queue
->fw_tx_qid
= p_params
->req_tx_queue
[i
];
1079 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1080 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1081 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
],
1082 p_queue
->fw_rx_qid
, p_queue
->fw_tx_qid
);
1085 /* Update the link configuration in bulletin */
1086 memcpy(&link_params
, qed_mcp_get_link_params(p_hwfn
),
1087 sizeof(link_params
));
1088 memcpy(&link_state
, qed_mcp_get_link_state(p_hwfn
), sizeof(link_state
));
1089 memcpy(&link_caps
, qed_mcp_get_link_capabilities(p_hwfn
),
1091 qed_iov_set_link(p_hwfn
, p_params
->rel_vf_id
,
1092 &link_params
, &link_state
, &link_caps
);
1094 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
1098 if (IS_LEAD_HWFN(p_hwfn
))
1099 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
1105 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
1106 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
1108 struct qed_mcp_link_capabilities caps
;
1109 struct qed_mcp_link_params params
;
1110 struct qed_mcp_link_state link
;
1111 struct qed_vf_info
*vf
= NULL
;
1113 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
1115 DP_ERR(p_hwfn
, "qed_iov_release_hw_for_vf : vf is NULL\n");
1119 if (vf
->bulletin
.p_virt
)
1120 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
1122 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
1124 /* Get the link configuration back in bulletin so
1125 * that when VFs are re-enabled they get the actual
1126 * link configuration.
1128 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
1129 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
1130 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
1131 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
1133 /* Forget the VF's acquisition message */
1134 memset(&vf
->acquire
, 0, sizeof(vf
->acquire
));
1136 /* disablng interrupts and resetting permission table was done during
1137 * vf-close, however, we could get here without going through vf_close
1139 /* Disable Interrupts for VF */
1140 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
1142 /* Reset Permission table */
1143 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
1147 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
1152 if (IS_LEAD_HWFN(p_hwfn
))
1153 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
1159 static bool qed_iov_tlv_supported(u16 tlvtype
)
1161 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
1164 /* place a given tlv on the tlv buffer, continuing current tlv list */
1165 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
1167 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
1170 tl
->length
= length
;
1172 /* Offset should keep pointing to next TLV (the end of the last) */
1175 /* Return a pointer to the start of the added tlv */
1176 return *offset
- length
;
1179 /* list the types and lengths of the tlvs on the buffer */
1180 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
1182 u16 i
= 1, total_length
= 0;
1183 struct channel_tlv
*tlv
;
1186 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
1189 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1190 "TLV number %d: type %d, length %d\n",
1191 i
, tlv
->type
, tlv
->length
);
1193 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
1196 /* Validate entry - protect against malicious VFs */
1198 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
1202 total_length
+= tlv
->length
;
1204 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
1205 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
1213 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
1214 struct qed_ptt
*p_ptt
,
1215 struct qed_vf_info
*p_vf
,
1216 u16 length
, u8 status
)
1218 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1219 struct qed_dmae_params params
;
1222 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
1224 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
1226 eng_vf_id
= p_vf
->abs_vf_id
;
1228 memset(¶ms
, 0, sizeof(params
));
1229 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_DST_VF_VALID
, 0x1);
1230 params
.dst_vfid
= eng_vf_id
;
1232 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
1233 mbx
->req_virt
->first_tlv
.reply_address
+
1235 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
1238 /* Once PF copies the rc to the VF, the latter can continue
1239 * and send an additional message. So we have to make sure the
1240 * channel would be re-set to ready prior to that.
1243 GTT_BAR0_MAP_REG_USDM_RAM
+
1244 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1246 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1247 mbx
->req_virt
->first_tlv
.reply_address
,
1248 sizeof(u64
) / 4, ¶ms
);
1251 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1252 enum qed_iov_vport_update_flag flag
)
1255 case QED_IOV_VP_UPDATE_ACTIVATE
:
1256 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1257 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1258 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1259 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1260 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1261 case QED_IOV_VP_UPDATE_MCAST
:
1262 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1263 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1264 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1265 case QED_IOV_VP_UPDATE_RSS
:
1266 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1267 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1268 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1269 case QED_IOV_VP_UPDATE_SGE_TPA
:
1270 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1276 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1277 struct qed_vf_info
*p_vf
,
1278 struct qed_iov_vf_mbx
*p_mbx
,
1280 u16 tlvs_mask
, u16 tlvs_accepted
)
1282 struct pfvf_def_resp_tlv
*resp
;
1283 u16 size
, total_len
, i
;
1285 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1286 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1287 size
= sizeof(struct pfvf_def_resp_tlv
);
1290 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1292 /* Prepare response for all extended tlvs if they are found by PF */
1293 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1294 if (!(tlvs_mask
& BIT(i
)))
1297 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1298 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1300 if (tlvs_accepted
& BIT(i
))
1301 resp
->hdr
.status
= status
;
1303 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1307 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1308 p_vf
->relative_vf_id
,
1309 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1314 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1315 sizeof(struct channel_list_end_tlv
));
1320 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1321 struct qed_ptt
*p_ptt
,
1322 struct qed_vf_info
*vf_info
,
1323 u16 type
, u16 length
, u8 status
)
1325 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1327 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1329 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1330 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1331 sizeof(struct channel_list_end_tlv
));
1333 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1337 qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1339 bool b_enabled_only
)
1341 struct qed_vf_info
*vf
= NULL
;
1343 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1347 return &vf
->p_vf_info
;
1350 static void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1352 struct qed_public_vf_info
*vf_info
;
1354 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1359 /* Clear the VF mac */
1360 eth_zero_addr(vf_info
->mac
);
1362 vf_info
->rx_accept_mode
= 0;
1363 vf_info
->tx_accept_mode
= 0;
1366 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1367 struct qed_vf_info
*p_vf
)
1371 p_vf
->vf_bulletin
= 0;
1372 p_vf
->vport_instance
= 0;
1373 p_vf
->configured_features
= 0;
1375 /* If VF previously requested less resources, go back to default */
1376 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1377 p_vf
->num_txqs
= p_vf
->num_sbs
;
1379 p_vf
->num_active_rxqs
= 0;
1381 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1382 struct qed_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
1384 for (j
= 0; j
< MAX_QUEUES_PER_QZONE
; j
++) {
1385 if (!p_queue
->cids
[j
].p_cid
)
1388 qed_eth_queue_cid_release(p_hwfn
,
1389 p_queue
->cids
[j
].p_cid
);
1390 p_queue
->cids
[j
].p_cid
= NULL
;
1394 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1395 memset(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1396 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1399 /* Returns either 0, or log(size) */
1400 static u32
qed_iov_vf_db_bar_size(struct qed_hwfn
*p_hwfn
,
1401 struct qed_ptt
*p_ptt
)
1403 u32 val
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_VF_BAR1_SIZE
);
1411 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn
*p_hwfn
,
1412 struct qed_ptt
*p_ptt
,
1413 struct qed_vf_info
*p_vf
,
1414 struct vf_pf_resc_request
*p_req
,
1415 struct pf_vf_resc
*p_resp
)
1417 u8 num_vf_cons
= p_hwfn
->pf_params
.eth_pf_params
.num_vf_cons
;
1418 u8 db_size
= qed_db_addr_vf(1, DQ_DEMS_LEGACY
) -
1419 qed_db_addr_vf(0, DQ_DEMS_LEGACY
);
1422 p_resp
->num_cids
= min_t(u8
, p_req
->num_cids
, num_vf_cons
);
1424 /* If VF didn't bother asking for QIDs than don't bother limiting
1425 * number of CIDs. The VF doesn't care about the number, and this
1426 * has the likely result of causing an additional acquisition.
1428 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
1429 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
1432 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1433 * that would make sure doorbells for all CIDs fall within the bar.
1434 * If it doesn't, make sure regview window is sufficient.
1436 if (p_vf
->acquire
.vfdev_info
.capabilities
&
1437 VFPF_ACQUIRE_CAP_PHYSICAL_BAR
) {
1438 bar_size
= qed_iov_vf_db_bar_size(p_hwfn
, p_ptt
);
1440 bar_size
= 1 << bar_size
;
1442 if (p_hwfn
->cdev
->num_hwfns
> 1)
1445 bar_size
= PXP_VF_BAR0_DQ_LENGTH
;
1448 if (bar_size
/ db_size
< 256)
1449 p_resp
->num_cids
= min_t(u8
, p_resp
->num_cids
,
1450 (u8
)(bar_size
/ db_size
));
1453 static u8
qed_iov_vf_mbx_acquire_resc(struct qed_hwfn
*p_hwfn
,
1454 struct qed_ptt
*p_ptt
,
1455 struct qed_vf_info
*p_vf
,
1456 struct vf_pf_resc_request
*p_req
,
1457 struct pf_vf_resc
*p_resp
)
1461 /* Queue related information */
1462 p_resp
->num_rxqs
= p_vf
->num_rxqs
;
1463 p_resp
->num_txqs
= p_vf
->num_txqs
;
1464 p_resp
->num_sbs
= p_vf
->num_sbs
;
1466 for (i
= 0; i
< p_resp
->num_sbs
; i
++) {
1467 p_resp
->hw_sbs
[i
].hw_sb_id
= p_vf
->igu_sbs
[i
];
1468 p_resp
->hw_sbs
[i
].sb_qid
= 0;
1471 /* These fields are filled for backward compatibility.
1472 * Unused by modern vfs.
1474 for (i
= 0; i
< p_resp
->num_rxqs
; i
++) {
1475 qed_fw_l2_queue(p_hwfn
, p_vf
->vf_queues
[i
].fw_rx_qid
,
1476 (u16
*)&p_resp
->hw_qid
[i
]);
1480 /* Filter related information */
1481 p_resp
->num_mac_filters
= min_t(u8
, p_vf
->num_mac_filters
,
1482 p_req
->num_mac_filters
);
1483 p_resp
->num_vlan_filters
= min_t(u8
, p_vf
->num_vlan_filters
,
1484 p_req
->num_vlan_filters
);
1486 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn
, p_ptt
, p_vf
, p_req
, p_resp
);
1488 /* This isn't really needed/enforced, but some legacy VFs might depend
1489 * on the correct filling of this field.
1491 p_resp
->num_mc_filters
= QED_MAX_MC_ADDRS
;
1493 /* Validate sufficient resources for VF */
1494 if (p_resp
->num_rxqs
< p_req
->num_rxqs
||
1495 p_resp
->num_txqs
< p_req
->num_txqs
||
1496 p_resp
->num_sbs
< p_req
->num_sbs
||
1497 p_resp
->num_mac_filters
< p_req
->num_mac_filters
||
1498 p_resp
->num_vlan_filters
< p_req
->num_vlan_filters
||
1499 p_resp
->num_mc_filters
< p_req
->num_mc_filters
||
1500 p_resp
->num_cids
< p_req
->num_cids
) {
1503 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1511 p_req
->num_mac_filters
,
1512 p_resp
->num_mac_filters
,
1513 p_req
->num_vlan_filters
,
1514 p_resp
->num_vlan_filters
,
1515 p_req
->num_mc_filters
,
1516 p_resp
->num_mc_filters
,
1517 p_req
->num_cids
, p_resp
->num_cids
);
1519 /* Some legacy OSes are incapable of correctly handling this
1522 if ((p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1523 ETH_HSI_VER_NO_PKT_LEN_TUNN
) &&
1524 (p_vf
->acquire
.vfdev_info
.os_type
==
1525 VFPF_ACQUIRE_OS_WINDOWS
))
1526 return PFVF_STATUS_SUCCESS
;
1528 return PFVF_STATUS_NO_RESOURCE
;
1531 return PFVF_STATUS_SUCCESS
;
1534 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn
*p_hwfn
,
1535 struct pfvf_stats_info
*p_stats
)
1537 p_stats
->mstats
.address
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1538 offsetof(struct mstorm_vf_zone
,
1539 non_trigger
.eth_queue_stat
);
1540 p_stats
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
1541 p_stats
->ustats
.address
= PXP_VF_BAR0_START_USDM_ZONE_B
+
1542 offsetof(struct ustorm_vf_zone
,
1543 non_trigger
.eth_queue_stat
);
1544 p_stats
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
1545 p_stats
->pstats
.address
= PXP_VF_BAR0_START_PSDM_ZONE_B
+
1546 offsetof(struct pstorm_vf_zone
,
1547 non_trigger
.eth_queue_stat
);
1548 p_stats
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
1549 p_stats
->tstats
.address
= 0;
1550 p_stats
->tstats
.len
= 0;
1553 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1554 struct qed_ptt
*p_ptt
,
1555 struct qed_vf_info
*vf
)
1557 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1558 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1559 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1560 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1561 u8 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1562 struct pf_vf_resc
*resc
= &resp
->resc
;
1565 memset(resp
, 0, sizeof(*resp
));
1567 /* Write the PF version so that VF would know which version
1568 * is supported - might be later overriden. This guarantees that
1569 * VF could recognize legacy PF based on lack of versions in reply.
1571 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1572 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1574 if (vf
->state
!= VF_FREE
&& vf
->state
!= VF_STOPPED
) {
1577 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1578 vf
->abs_vf_id
, vf
->state
);
1582 /* Validate FW compatibility */
1583 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1584 if (req
->vfdev_info
.capabilities
&
1585 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
1586 struct vf_pf_vfdev_info
*p_vfdev
= &req
->vfdev_info
;
1588 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1589 "VF[%d] is pre-fastpath HSI\n",
1591 p_vfdev
->eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
1592 p_vfdev
->eth_fp_hsi_minor
= ETH_HSI_VER_NO_PKT_LEN_TUNN
;
1595 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1597 req
->vfdev_info
.eth_fp_hsi_major
,
1598 req
->vfdev_info
.eth_fp_hsi_minor
,
1599 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1605 /* On 100g PFs, prevent old VFs from loading */
1606 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1607 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1609 "VF[%d] is running an old driver that doesn't support 100g\n",
1614 /* Store the acquire message */
1615 memcpy(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1617 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1619 vf
->vf_bulletin
= req
->bulletin_addr
;
1620 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1621 vf
->bulletin
.size
: req
->bulletin_size
;
1623 /* fill in pfdev info */
1624 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1625 pfdev_info
->db_size
= 0;
1626 pfdev_info
->indices_per_sb
= PIS_PER_SB_E4
;
1628 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1629 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1630 if (p_hwfn
->cdev
->num_hwfns
> 1)
1631 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1633 /* Share our ability to use multiple queue-ids only with VFs
1636 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_QUEUE_QIDS
)
1637 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_QUEUE_QIDS
;
1639 /* Share the sizes of the bars with VF */
1640 resp
->pfdev_info
.bar_size
= qed_iov_vf_db_bar_size(p_hwfn
, p_ptt
);
1642 qed_iov_vf_mbx_acquire_stats(p_hwfn
, &pfdev_info
->stats_info
);
1644 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1646 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1647 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1648 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1649 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1651 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1654 pfdev_info
->minor_fp_hsi
= min_t(u8
, ETH_HSI_VER_MINOR
,
1655 req
->vfdev_info
.eth_fp_hsi_minor
);
1656 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1657 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1659 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1660 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1662 /* Fill resources available to VF; Make sure there are enough to
1663 * satisfy the VF's request.
1665 vfpf_status
= qed_iov_vf_mbx_acquire_resc(p_hwfn
, p_ptt
, vf
,
1666 &req
->resc_request
, resc
);
1667 if (vfpf_status
!= PFVF_STATUS_SUCCESS
)
1670 /* Start the VF in FW */
1671 rc
= qed_sp_vf_start(p_hwfn
, vf
);
1673 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
1674 vfpf_status
= PFVF_STATUS_FAILURE
;
1678 /* Fill agreed size of bulletin board in response */
1679 resp
->bulletin_size
= vf
->bulletin
.size
;
1680 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1684 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1685 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1687 resp
->pfdev_info
.chip_num
,
1688 resp
->pfdev_info
.db_size
,
1689 resp
->pfdev_info
.indices_per_sb
,
1690 resp
->pfdev_info
.capabilities
,
1694 resc
->num_mac_filters
,
1695 resc
->num_vlan_filters
);
1696 vf
->state
= VF_ACQUIRED
;
1698 /* Prepare Response */
1700 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1701 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1704 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1705 struct qed_vf_info
*p_vf
, bool val
)
1707 struct qed_sp_vport_update_params params
;
1710 if (val
== p_vf
->spoof_chk
) {
1711 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1712 "Spoofchk value[%d] is already configured\n", val
);
1716 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1717 params
.opaque_fid
= p_vf
->opaque_fid
;
1718 params
.vport_id
= p_vf
->vport_id
;
1719 params
.update_anti_spoofing_en_flg
= 1;
1720 params
.anti_spoofing_en
= val
;
1722 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1724 p_vf
->spoof_chk
= val
;
1725 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1726 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1727 "Spoofchk val[%d] configured\n", val
);
1729 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1730 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1731 val
, p_vf
->relative_vf_id
);
1737 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1738 struct qed_vf_info
*p_vf
)
1740 struct qed_filter_ucast filter
;
1744 memset(&filter
, 0, sizeof(filter
));
1745 filter
.is_rx_filter
= 1;
1746 filter
.is_tx_filter
= 1;
1747 filter
.vport_to_add_to
= p_vf
->vport_id
;
1748 filter
.opcode
= QED_FILTER_ADD
;
1750 /* Reconfigure vlans */
1751 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1752 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1755 filter
.type
= QED_FILTER_VLAN
;
1756 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1757 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1758 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1759 filter
.vlan
, p_vf
->relative_vf_id
);
1760 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1761 &filter
, QED_SPQ_MODE_CB
, NULL
);
1764 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1765 filter
.vlan
, p_vf
->relative_vf_id
);
1774 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1775 struct qed_vf_info
*p_vf
, u64 events
)
1779 if ((events
& BIT(VLAN_ADDR_FORCED
)) &&
1780 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1781 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1786 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1787 struct qed_vf_info
*p_vf
, u64 events
)
1790 struct qed_filter_ucast filter
;
1792 if (!p_vf
->vport_instance
)
1795 if ((events
& BIT(MAC_ADDR_FORCED
)) ||
1796 p_vf
->p_vf_info
.is_trusted_configured
) {
1797 /* Since there's no way [currently] of removing the MAC,
1798 * we can always assume this means we need to force it.
1800 memset(&filter
, 0, sizeof(filter
));
1801 filter
.type
= QED_FILTER_MAC
;
1802 filter
.opcode
= QED_FILTER_REPLACE
;
1803 filter
.is_rx_filter
= 1;
1804 filter
.is_tx_filter
= 1;
1805 filter
.vport_to_add_to
= p_vf
->vport_id
;
1806 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1808 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1809 &filter
, QED_SPQ_MODE_CB
, NULL
);
1812 "PF failed to configure MAC for VF\n");
1815 if (p_vf
->p_vf_info
.is_trusted_configured
)
1816 p_vf
->configured_features
|=
1817 BIT(VFPF_BULLETIN_MAC_ADDR
);
1819 p_vf
->configured_features
|=
1820 BIT(MAC_ADDR_FORCED
);
1823 if (events
& BIT(VLAN_ADDR_FORCED
)) {
1824 struct qed_sp_vport_update_params vport_update
;
1828 memset(&filter
, 0, sizeof(filter
));
1829 filter
.type
= QED_FILTER_VLAN
;
1830 filter
.is_rx_filter
= 1;
1831 filter
.is_tx_filter
= 1;
1832 filter
.vport_to_add_to
= p_vf
->vport_id
;
1833 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1834 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1837 /* Send the ramrod */
1838 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1839 &filter
, QED_SPQ_MODE_CB
, NULL
);
1842 "PF failed to configure VLAN for VF\n");
1846 /* Update the default-vlan & silent vlan stripping */
1847 memset(&vport_update
, 0, sizeof(vport_update
));
1848 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1849 vport_update
.vport_id
= p_vf
->vport_id
;
1850 vport_update
.update_default_vlan_enable_flg
= 1;
1851 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1852 vport_update
.update_default_vlan_flg
= 1;
1853 vport_update
.default_vlan
= filter
.vlan
;
1855 vport_update
.update_inner_vlan_removal_flg
= 1;
1856 removal
= filter
.vlan
? 1
1857 : p_vf
->shadow_config
.inner_vlan_removal
;
1858 vport_update
.inner_vlan_removal_flg
= removal
;
1859 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1860 rc
= qed_sp_vport_update(p_hwfn
,
1862 QED_SPQ_MODE_EBLOCK
, NULL
);
1865 "PF failed to configure VF vport for vlan\n");
1869 /* Update all the Rx queues */
1870 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1871 struct qed_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
1872 struct qed_queue_cid
*p_cid
= NULL
;
1874 /* There can be at most 1 Rx queue on qzone. Find it */
1875 p_cid
= qed_iov_get_vf_rx_queue_cid(p_queue
);
1879 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
1882 QED_SPQ_MODE_EBLOCK
,
1886 "Failed to send Rx update fo queue[0x%04x]\n",
1887 p_cid
->rel
.queue_id
);
1893 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1895 p_vf
->configured_features
&= ~BIT(VLAN_ADDR_FORCED
);
1898 /* If forced features are terminated, we need to configure the shadow
1899 * configuration back again.
1902 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1907 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1908 struct qed_ptt
*p_ptt
,
1909 struct qed_vf_info
*vf
)
1911 struct qed_sp_vport_start_params params
= { 0 };
1912 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1913 struct vfpf_vport_start_tlv
*start
;
1914 u8 status
= PFVF_STATUS_SUCCESS
;
1915 struct qed_vf_info
*vf_info
;
1920 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vf
->relative_vf_id
, true);
1922 DP_NOTICE(p_hwfn
->cdev
,
1923 "Failed to get VF info, invalid vfid [%d]\n",
1924 vf
->relative_vf_id
);
1928 vf
->state
= VF_ENABLED
;
1929 start
= &mbx
->req_virt
->start_vport
;
1931 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1933 /* Initialize Status block in CAU */
1934 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1935 if (!start
->sb_addr
[sb_id
]) {
1936 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1937 "VF[%d] did not fill the address of SB %d\n",
1938 vf
->relative_vf_id
, sb_id
);
1942 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1943 start
->sb_addr
[sb_id
],
1944 vf
->igu_sbs
[sb_id
], vf
->abs_vf_id
, 1);
1947 vf
->mtu
= start
->mtu
;
1948 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1950 /* Take into consideration configuration forced by hypervisor;
1951 * If none is configured, use the supplied VF values [for old
1952 * vfs that would still be fine, since they passed '0' as padding].
1954 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1955 if (!(*p_bitmap
& BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1956 u8 vf_req
= start
->only_untagged
;
1958 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1959 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1962 params
.tpa_mode
= start
->tpa_mode
;
1963 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1964 params
.tx_switching
= true;
1966 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1967 params
.drop_ttl0
= false;
1968 params
.concrete_fid
= vf
->concrete_fid
;
1969 params
.opaque_fid
= vf
->opaque_fid
;
1970 params
.vport_id
= vf
->vport_id
;
1971 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1972 params
.mtu
= vf
->mtu
;
1974 /* Non trusted VFs should enable control frame filtering */
1975 params
.check_mac
= !vf
->p_vf_info
.is_trusted_configured
;
1977 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1980 "qed_iov_vf_mbx_start_vport returned error %d\n", rc
);
1981 status
= PFVF_STATUS_FAILURE
;
1983 vf
->vport_instance
++;
1985 /* Force configuration if needed on the newly opened vport */
1986 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1988 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1990 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1991 sizeof(struct pfvf_def_resp_tlv
), status
);
1994 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1995 struct qed_ptt
*p_ptt
,
1996 struct qed_vf_info
*vf
)
1998 u8 status
= PFVF_STATUS_SUCCESS
;
2001 vf
->vport_instance
--;
2002 vf
->spoof_chk
= false;
2004 if ((qed_iov_validate_active_rxq(p_hwfn
, vf
)) ||
2005 (qed_iov_validate_active_txq(p_hwfn
, vf
))) {
2006 vf
->b_malicious
= true;
2008 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
2010 status
= PFVF_STATUS_MALICIOUS
;
2014 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
2016 DP_ERR(p_hwfn
, "qed_iov_vf_mbx_stop_vport returned error %d\n",
2018 status
= PFVF_STATUS_FAILURE
;
2021 /* Forget the configuration on the vport */
2022 vf
->configured_features
= 0;
2023 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
2026 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
2027 sizeof(struct pfvf_def_resp_tlv
), status
);
2030 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
2031 struct qed_ptt
*p_ptt
,
2032 struct qed_vf_info
*vf
,
2033 u8 status
, bool b_legacy
)
2035 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2036 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2037 struct vfpf_start_rxq_tlv
*req
;
2040 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2042 /* Taking a bigger struct instead of adding a TLV to list was a
2043 * mistake, but one which we're now stuck with, as some older
2044 * clients assume the size of the previous response.
2047 length
= sizeof(*p_tlv
);
2049 length
= sizeof(struct pfvf_def_resp_tlv
);
2051 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
2053 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2054 sizeof(struct channel_list_end_tlv
));
2056 /* Update the TLV with the response */
2057 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
2058 req
= &mbx
->req_virt
->start_rxq
;
2059 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
2060 offsetof(struct mstorm_vf_zone
,
2061 non_trigger
.eth_rx_queue_producers
) +
2062 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
2065 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2068 static u8
qed_iov_vf_mbx_qid(struct qed_hwfn
*p_hwfn
,
2069 struct qed_vf_info
*p_vf
, bool b_is_tx
)
2071 struct qed_iov_vf_mbx
*p_mbx
= &p_vf
->vf_mbx
;
2072 struct vfpf_qid_tlv
*p_qid_tlv
;
2074 /* Search for the qid if the VF published its going to provide it */
2075 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
2076 VFPF_ACQUIRE_CAP_QUEUE_QIDS
)) {
2078 return QED_IOV_LEGACY_QID_TX
;
2080 return QED_IOV_LEGACY_QID_RX
;
2083 p_qid_tlv
= (struct vfpf_qid_tlv
*)
2084 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2087 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2088 "VF[%2x]: Failed to provide qid\n",
2089 p_vf
->relative_vf_id
);
2091 return QED_IOV_QID_INVALID
;
2094 if (p_qid_tlv
->qid
>= MAX_QUEUES_PER_QZONE
) {
2095 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2096 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2097 p_vf
->relative_vf_id
, p_qid_tlv
->qid
);
2098 return QED_IOV_QID_INVALID
;
2101 return p_qid_tlv
->qid
;
2104 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
2105 struct qed_ptt
*p_ptt
,
2106 struct qed_vf_info
*vf
)
2108 struct qed_queue_start_common_params params
;
2109 struct qed_queue_cid_vf_params vf_params
;
2110 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2111 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2112 u8 qid_usage_idx
, vf_legacy
= 0;
2113 struct vfpf_start_rxq_tlv
*req
;
2114 struct qed_vf_queue
*p_queue
;
2115 struct qed_queue_cid
*p_cid
;
2116 struct qed_sb_info sb_dummy
;
2119 req
= &mbx
->req_virt
->start_rxq
;
2121 if (!qed_iov_validate_rxq(p_hwfn
, vf
, req
->rx_qid
,
2122 QED_IOV_VALIDATE_Q_DISABLE
) ||
2123 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2126 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2127 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2130 p_queue
= &vf
->vf_queues
[req
->rx_qid
];
2131 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2134 vf_legacy
= qed_vf_calculate_legacy(vf
);
2136 /* Acquire a new queue-cid */
2137 memset(¶ms
, 0, sizeof(params
));
2138 params
.queue_id
= p_queue
->fw_rx_qid
;
2139 params
.vport_id
= vf
->vport_id
;
2140 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2141 /* Since IGU index is passed via sb_info, construct a dummy one */
2142 memset(&sb_dummy
, 0, sizeof(sb_dummy
));
2143 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2144 params
.p_sb
= &sb_dummy
;
2145 params
.sb_idx
= req
->sb_index
;
2147 memset(&vf_params
, 0, sizeof(vf_params
));
2148 vf_params
.vfid
= vf
->relative_vf_id
;
2149 vf_params
.vf_qid
= (u8
)req
->rx_qid
;
2150 vf_params
.vf_legacy
= vf_legacy
;
2151 vf_params
.qid_usage_idx
= qid_usage_idx
;
2152 p_cid
= qed_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2153 ¶ms
, true, &vf_params
);
2157 /* Legacy VFs have their Producers in a different location, which they
2158 * calculate on their own and clean the producer prior to this.
2160 if (!(vf_legacy
& QED_QCID_LEGACY_VF_RX_PROD
))
2162 GTT_BAR0_MAP_REG_MSDM_RAM
+
2163 MSTORM_ETH_VF_PRODS_OFFSET(vf
->abs_vf_id
, req
->rx_qid
),
2166 rc
= qed_eth_rxq_start_ramrod(p_hwfn
, p_cid
,
2169 req
->cqe_pbl_addr
, req
->cqe_pbl_size
);
2171 status
= PFVF_STATUS_FAILURE
;
2172 qed_eth_queue_cid_release(p_hwfn
, p_cid
);
2174 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2175 p_queue
->cids
[qid_usage_idx
].b_is_tx
= false;
2176 status
= PFVF_STATUS_SUCCESS
;
2177 vf
->num_active_rxqs
++;
2181 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
,
2183 QED_QCID_LEGACY_VF_RX_PROD
));
2187 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv
*p_resp
,
2188 struct qed_tunnel_info
*p_tun
,
2189 u16 tunn_feature_mask
)
2191 p_resp
->tunn_feature_mask
= tunn_feature_mask
;
2192 p_resp
->vxlan_mode
= p_tun
->vxlan
.b_mode_enabled
;
2193 p_resp
->l2geneve_mode
= p_tun
->l2_geneve
.b_mode_enabled
;
2194 p_resp
->ipgeneve_mode
= p_tun
->ip_geneve
.b_mode_enabled
;
2195 p_resp
->l2gre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2196 p_resp
->ipgre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2197 p_resp
->vxlan_clss
= p_tun
->vxlan
.tun_cls
;
2198 p_resp
->l2gre_clss
= p_tun
->l2_gre
.tun_cls
;
2199 p_resp
->ipgre_clss
= p_tun
->ip_gre
.tun_cls
;
2200 p_resp
->l2geneve_clss
= p_tun
->l2_geneve
.tun_cls
;
2201 p_resp
->ipgeneve_clss
= p_tun
->ip_geneve
.tun_cls
;
2202 p_resp
->geneve_udp_port
= p_tun
->geneve_port
.port
;
2203 p_resp
->vxlan_udp_port
= p_tun
->vxlan_port
.port
;
2207 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2208 struct qed_tunn_update_type
*p_tun
,
2209 enum qed_tunn_mode mask
, u8 tun_cls
)
2211 if (p_req
->tun_mode_update_mask
& BIT(mask
)) {
2212 p_tun
->b_update_mode
= true;
2214 if (p_req
->tunn_mode
& BIT(mask
))
2215 p_tun
->b_mode_enabled
= true;
2218 p_tun
->tun_cls
= tun_cls
;
2222 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2223 struct qed_tunn_update_type
*p_tun
,
2224 struct qed_tunn_update_udp_port
*p_port
,
2225 enum qed_tunn_mode mask
,
2226 u8 tun_cls
, u8 update_port
, u16 port
)
2229 p_port
->b_update_port
= true;
2230 p_port
->port
= port
;
2233 __qed_iov_pf_update_tun_param(p_req
, p_tun
, mask
, tun_cls
);
2237 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv
*p_req
)
2239 bool b_update_requested
= false;
2241 if (p_req
->tun_mode_update_mask
|| p_req
->update_tun_cls
||
2242 p_req
->update_geneve_port
|| p_req
->update_vxlan_port
)
2243 b_update_requested
= true;
2245 return b_update_requested
;
2248 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type
*tun
, int *rc
)
2250 if (tun
->b_update_mode
&& !tun
->b_mode_enabled
) {
2251 tun
->b_update_mode
= false;
2257 qed_pf_validate_modify_tunn_config(struct qed_hwfn
*p_hwfn
,
2258 u16
*tun_features
, bool *update
,
2259 struct qed_tunnel_info
*tun_src
)
2261 struct qed_eth_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.eth
;
2262 struct qed_tunnel_info
*tun
= &p_hwfn
->cdev
->tunnel
;
2263 u16 bultn_vxlan_port
, bultn_geneve_port
;
2264 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2267 *tun_features
= p_hwfn
->cdev
->tunn_feature_mask
;
2268 bultn_vxlan_port
= tun
->vxlan_port
.port
;
2269 bultn_geneve_port
= tun
->geneve_port
.port
;
2270 qed_pf_validate_tunn_mode(&tun_src
->vxlan
, &rc
);
2271 qed_pf_validate_tunn_mode(&tun_src
->l2_geneve
, &rc
);
2272 qed_pf_validate_tunn_mode(&tun_src
->ip_geneve
, &rc
);
2273 qed_pf_validate_tunn_mode(&tun_src
->l2_gre
, &rc
);
2274 qed_pf_validate_tunn_mode(&tun_src
->ip_gre
, &rc
);
2276 if ((tun_src
->b_update_rx_cls
|| tun_src
->b_update_tx_cls
) &&
2277 (tun_src
->vxlan
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2278 tun_src
->l2_geneve
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2279 tun_src
->ip_geneve
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2280 tun_src
->l2_gre
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2281 tun_src
->ip_gre
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
)) {
2282 tun_src
->b_update_rx_cls
= false;
2283 tun_src
->b_update_tx_cls
= false;
2287 if (tun_src
->vxlan_port
.b_update_port
) {
2288 if (tun_src
->vxlan_port
.port
== tun
->vxlan_port
.port
) {
2289 tun_src
->vxlan_port
.b_update_port
= false;
2292 bultn_vxlan_port
= tun_src
->vxlan_port
.port
;
2296 if (tun_src
->geneve_port
.b_update_port
) {
2297 if (tun_src
->geneve_port
.port
== tun
->geneve_port
.port
) {
2298 tun_src
->geneve_port
.b_update_port
= false;
2301 bultn_geneve_port
= tun_src
->geneve_port
.port
;
2305 qed_for_each_vf(p_hwfn
, i
) {
2306 qed_iov_bulletin_set_udp_ports(p_hwfn
, i
, bultn_vxlan_port
,
2310 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
2311 ops
->ports_update(cookie
, bultn_vxlan_port
, bultn_geneve_port
);
2316 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn
*p_hwfn
,
2317 struct qed_ptt
*p_ptt
,
2318 struct qed_vf_info
*p_vf
)
2320 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
2321 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2322 struct pfvf_update_tunn_param_tlv
*p_resp
;
2323 struct vfpf_update_tunn_param_tlv
*p_req
;
2324 u8 status
= PFVF_STATUS_SUCCESS
;
2325 bool b_update_required
= false;
2326 struct qed_tunnel_info tunn
;
2327 u16 tunn_feature_mask
= 0;
2330 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2332 memset(&tunn
, 0, sizeof(tunn
));
2333 p_req
= &mbx
->req_virt
->tunn_param_update
;
2335 if (!qed_iov_pf_validate_tunn_param(p_req
)) {
2336 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2337 "No tunnel update requested by VF\n");
2338 status
= PFVF_STATUS_FAILURE
;
2342 tunn
.b_update_rx_cls
= p_req
->update_tun_cls
;
2343 tunn
.b_update_tx_cls
= p_req
->update_tun_cls
;
2345 qed_iov_pf_update_tun_param(p_req
, &tunn
.vxlan
, &tunn
.vxlan_port
,
2346 QED_MODE_VXLAN_TUNN
, p_req
->vxlan_clss
,
2347 p_req
->update_vxlan_port
,
2349 qed_iov_pf_update_tun_param(p_req
, &tunn
.l2_geneve
, &tunn
.geneve_port
,
2350 QED_MODE_L2GENEVE_TUNN
,
2351 p_req
->l2geneve_clss
,
2352 p_req
->update_geneve_port
,
2353 p_req
->geneve_port
);
2354 __qed_iov_pf_update_tun_param(p_req
, &tunn
.ip_geneve
,
2355 QED_MODE_IPGENEVE_TUNN
,
2356 p_req
->ipgeneve_clss
);
2357 __qed_iov_pf_update_tun_param(p_req
, &tunn
.l2_gre
,
2358 QED_MODE_L2GRE_TUNN
, p_req
->l2gre_clss
);
2359 __qed_iov_pf_update_tun_param(p_req
, &tunn
.ip_gre
,
2360 QED_MODE_IPGRE_TUNN
, p_req
->ipgre_clss
);
2362 /* If PF modifies VF's req then it should
2363 * still return an error in case of partial configuration
2364 * or modified configuration as opposed to requested one.
2366 rc
= qed_pf_validate_modify_tunn_config(p_hwfn
, &tunn_feature_mask
,
2367 &b_update_required
, &tunn
);
2370 status
= PFVF_STATUS_FAILURE
;
2372 /* If QED client is willing to update anything ? */
2373 if (b_update_required
) {
2376 rc
= qed_sp_pf_update_tunn_cfg(p_hwfn
, p_ptt
, &tunn
,
2377 QED_SPQ_MODE_EBLOCK
, NULL
);
2379 status
= PFVF_STATUS_FAILURE
;
2381 geneve_port
= p_tun
->geneve_port
.port
;
2382 qed_for_each_vf(p_hwfn
, i
) {
2383 qed_iov_bulletin_set_udp_ports(p_hwfn
, i
,
2384 p_tun
->vxlan_port
.port
,
2390 p_resp
= qed_add_tlv(p_hwfn
, &mbx
->offset
,
2391 CHANNEL_TLV_UPDATE_TUNN_PARAM
, sizeof(*p_resp
));
2393 qed_iov_pf_update_tun_response(p_resp
, p_tun
, tunn_feature_mask
);
2394 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2395 sizeof(struct channel_list_end_tlv
));
2397 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
2400 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn
*p_hwfn
,
2401 struct qed_ptt
*p_ptt
,
2402 struct qed_vf_info
*p_vf
,
2405 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2406 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2407 bool b_legacy
= false;
2410 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2412 /* Taking a bigger struct instead of adding a TLV to list was a
2413 * mistake, but one which we're now stuck with, as some older
2414 * clients assume the size of the previous response.
2416 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
2417 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
2421 length
= sizeof(*p_tlv
);
2423 length
= sizeof(struct pfvf_def_resp_tlv
);
2425 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_TXQ
,
2427 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2428 sizeof(struct channel_list_end_tlv
));
2430 /* Update the TLV with the response */
2431 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
)
2432 p_tlv
->offset
= qed_db_addr_vf(cid
, DQ_DEMS_LEGACY
);
2434 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, length
, status
);
2437 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
2438 struct qed_ptt
*p_ptt
,
2439 struct qed_vf_info
*vf
)
2441 struct qed_queue_start_common_params params
;
2442 struct qed_queue_cid_vf_params vf_params
;
2443 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2444 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2445 struct vfpf_start_txq_tlv
*req
;
2446 struct qed_vf_queue
*p_queue
;
2447 struct qed_queue_cid
*p_cid
;
2448 struct qed_sb_info sb_dummy
;
2449 u8 qid_usage_idx
, vf_legacy
;
2454 memset(¶ms
, 0, sizeof(params
));
2455 req
= &mbx
->req_virt
->start_txq
;
2457 if (!qed_iov_validate_txq(p_hwfn
, vf
, req
->tx_qid
,
2458 QED_IOV_VALIDATE_Q_NA
) ||
2459 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2462 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2463 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2466 p_queue
= &vf
->vf_queues
[req
->tx_qid
];
2467 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2470 vf_legacy
= qed_vf_calculate_legacy(vf
);
2472 /* Acquire a new queue-cid */
2473 params
.queue_id
= p_queue
->fw_tx_qid
;
2474 params
.vport_id
= vf
->vport_id
;
2475 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2477 /* Since IGU index is passed via sb_info, construct a dummy one */
2478 memset(&sb_dummy
, 0, sizeof(sb_dummy
));
2479 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2480 params
.p_sb
= &sb_dummy
;
2481 params
.sb_idx
= req
->sb_index
;
2483 memset(&vf_params
, 0, sizeof(vf_params
));
2484 vf_params
.vfid
= vf
->relative_vf_id
;
2485 vf_params
.vf_qid
= (u8
)req
->tx_qid
;
2486 vf_params
.vf_legacy
= vf_legacy
;
2487 vf_params
.qid_usage_idx
= qid_usage_idx
;
2489 p_cid
= qed_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2490 ¶ms
, false, &vf_params
);
2494 pq
= qed_get_cm_pq_idx_vf(p_hwfn
, vf
->relative_vf_id
);
2495 rc
= qed_eth_txq_start_ramrod(p_hwfn
, p_cid
,
2496 req
->pbl_addr
, req
->pbl_size
, pq
);
2498 status
= PFVF_STATUS_FAILURE
;
2499 qed_eth_queue_cid_release(p_hwfn
, p_cid
);
2501 status
= PFVF_STATUS_SUCCESS
;
2502 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2503 p_queue
->cids
[qid_usage_idx
].b_is_tx
= true;
2508 qed_iov_vf_mbx_start_txq_resp(p_hwfn
, p_ptt
, vf
, cid
, status
);
2511 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2512 struct qed_vf_info
*vf
,
2514 u8 qid_usage_idx
, bool cqe_completion
)
2516 struct qed_vf_queue
*p_queue
;
2519 if (!qed_iov_validate_rxq(p_hwfn
, vf
, rxq_id
, QED_IOV_VALIDATE_Q_NA
)) {
2522 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2523 vf
->relative_vf_id
, rxq_id
, qid_usage_idx
);
2527 p_queue
= &vf
->vf_queues
[rxq_id
];
2529 /* We've validated the index and the existence of the active RXQ -
2530 * now we need to make sure that it's using the correct qid.
2532 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2533 p_queue
->cids
[qid_usage_idx
].b_is_tx
) {
2534 struct qed_queue_cid
*p_cid
;
2536 p_cid
= qed_iov_get_vf_rx_queue_cid(p_queue
);
2539 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2541 rxq_id
, qid_usage_idx
, rxq_id
, p_cid
->qid_usage_idx
);
2545 /* Now that we know we have a valid Rx-queue - close it */
2546 rc
= qed_eth_rx_queue_stop(p_hwfn
,
2547 p_queue
->cids
[qid_usage_idx
].p_cid
,
2548 false, cqe_completion
);
2552 p_queue
->cids
[qid_usage_idx
].p_cid
= NULL
;
2553 vf
->num_active_rxqs
--;
2558 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
2559 struct qed_vf_info
*vf
,
2560 u16 txq_id
, u8 qid_usage_idx
)
2562 struct qed_vf_queue
*p_queue
;
2565 if (!qed_iov_validate_txq(p_hwfn
, vf
, txq_id
, QED_IOV_VALIDATE_Q_NA
))
2568 p_queue
= &vf
->vf_queues
[txq_id
];
2569 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2570 !p_queue
->cids
[qid_usage_idx
].b_is_tx
)
2573 rc
= qed_eth_tx_queue_stop(p_hwfn
, p_queue
->cids
[qid_usage_idx
].p_cid
);
2577 p_queue
->cids
[qid_usage_idx
].p_cid
= NULL
;
2581 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2582 struct qed_ptt
*p_ptt
,
2583 struct qed_vf_info
*vf
)
2585 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2586 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2587 u8 status
= PFVF_STATUS_FAILURE
;
2588 struct vfpf_stop_rxqs_tlv
*req
;
2592 /* There has never been an official driver that used this interface
2593 * for stopping multiple queues, and it is now considered deprecated.
2594 * Validate this isn't used here.
2596 req
= &mbx
->req_virt
->stop_rxqs
;
2597 if (req
->num_rxqs
!= 1) {
2598 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2599 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2600 vf
->relative_vf_id
);
2601 status
= PFVF_STATUS_NOT_SUPPORTED
;
2605 /* Find which qid-index is associated with the queue */
2606 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2607 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2610 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
2611 qid_usage_idx
, req
->cqe_completion
);
2613 status
= PFVF_STATUS_SUCCESS
;
2615 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
2619 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
2620 struct qed_ptt
*p_ptt
,
2621 struct qed_vf_info
*vf
)
2623 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2624 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2625 u8 status
= PFVF_STATUS_FAILURE
;
2626 struct vfpf_stop_txqs_tlv
*req
;
2630 /* There has never been an official driver that used this interface
2631 * for stopping multiple queues, and it is now considered deprecated.
2632 * Validate this isn't used here.
2634 req
= &mbx
->req_virt
->stop_txqs
;
2635 if (req
->num_txqs
!= 1) {
2636 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2637 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2638 vf
->relative_vf_id
);
2639 status
= PFVF_STATUS_NOT_SUPPORTED
;
2643 /* Find which qid-index is associated with the queue */
2644 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2645 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2648 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, qid_usage_idx
);
2650 status
= PFVF_STATUS_SUCCESS
;
2653 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
2657 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
2658 struct qed_ptt
*p_ptt
,
2659 struct qed_vf_info
*vf
)
2661 struct qed_queue_cid
*handlers
[QED_MAX_VF_CHAINS_PER_PF
];
2662 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2663 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2664 struct vfpf_update_rxq_tlv
*req
;
2665 u8 status
= PFVF_STATUS_FAILURE
;
2666 u8 complete_event_flg
;
2667 u8 complete_cqe_flg
;
2672 req
= &mbx
->req_virt
->update_rxq
;
2673 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
2674 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
2676 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2677 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2680 /* There shouldn't exist a VF that uses queue-qids yet uses this
2681 * API with multiple Rx queues. Validate this.
2683 if ((vf
->acquire
.vfdev_info
.capabilities
&
2684 VFPF_ACQUIRE_CAP_QUEUE_QIDS
) && req
->num_rxqs
!= 1) {
2685 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2686 "VF[%d] supports QIDs but sends multiple queues\n",
2687 vf
->relative_vf_id
);
2691 /* Validate inputs - for the legacy case this is still true since
2692 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2694 for (i
= req
->rx_qid
; i
< req
->rx_qid
+ req
->num_rxqs
; i
++) {
2695 if (!qed_iov_validate_rxq(p_hwfn
, vf
, i
,
2696 QED_IOV_VALIDATE_Q_NA
) ||
2697 !vf
->vf_queues
[i
].cids
[qid_usage_idx
].p_cid
||
2698 vf
->vf_queues
[i
].cids
[qid_usage_idx
].b_is_tx
) {
2699 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2700 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2701 vf
->relative_vf_id
, req
->rx_qid
,
2707 /* Prepare the handlers */
2708 for (i
= 0; i
< req
->num_rxqs
; i
++) {
2709 u16 qid
= req
->rx_qid
+ i
;
2711 handlers
[i
] = vf
->vf_queues
[qid
].cids
[qid_usage_idx
].p_cid
;
2714 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, (void **)&handlers
,
2718 QED_SPQ_MODE_EBLOCK
, NULL
);
2722 status
= PFVF_STATUS_SUCCESS
;
2724 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
2728 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
2729 void *p_tlvs_list
, u16 req_type
)
2731 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
2735 if (!p_tlv
->length
) {
2736 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
2740 if (p_tlv
->type
== req_type
) {
2741 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2742 "Extended tlv type %d, length %d found\n",
2743 p_tlv
->type
, p_tlv
->length
);
2747 len
+= p_tlv
->length
;
2748 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
2750 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
2751 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
2754 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
2760 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
2761 struct qed_sp_vport_update_params
*p_data
,
2762 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2764 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
2765 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
2767 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
2768 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2772 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
2773 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
2774 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
2775 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
2776 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
2780 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
2781 struct qed_sp_vport_update_params
*p_data
,
2782 struct qed_vf_info
*p_vf
,
2783 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2785 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
2786 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
2788 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
2789 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2793 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
2795 /* Ignore the VF request if we're forcing a vlan */
2796 if (!(p_vf
->configured_features
& BIT(VLAN_ADDR_FORCED
))) {
2797 p_data
->update_inner_vlan_removal_flg
= 1;
2798 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
2801 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
2805 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
2806 struct qed_sp_vport_update_params
*p_data
,
2807 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2809 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
2810 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
2812 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
2813 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2815 if (!p_tx_switch_tlv
)
2818 p_data
->update_tx_switching_flg
= 1;
2819 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
2820 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
2824 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
2825 struct qed_sp_vport_update_params
*p_data
,
2826 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2828 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
2829 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
2831 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
2832 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2836 p_data
->update_approx_mcast_flg
= 1;
2837 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
2838 sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
2839 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
2843 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
2844 struct qed_sp_vport_update_params
*p_data
,
2845 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2847 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
2848 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
2849 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
2851 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
2852 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2856 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
2857 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
2858 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
2859 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
2860 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
2864 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
2865 struct qed_sp_vport_update_params
*p_data
,
2866 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2868 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
2869 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
2871 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
2872 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2874 if (!p_accept_any_vlan
)
2877 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
2878 p_data
->update_accept_any_vlan_flg
=
2879 p_accept_any_vlan
->update_accept_any_vlan_flg
;
2880 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
2884 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
2885 struct qed_vf_info
*vf
,
2886 struct qed_sp_vport_update_params
*p_data
,
2887 struct qed_rss_params
*p_rss
,
2888 struct qed_iov_vf_mbx
*p_mbx
,
2889 u16
*tlvs_mask
, u16
*tlvs_accepted
)
2891 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2892 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2893 bool b_reject
= false;
2897 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2898 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2900 p_data
->rss_params
= NULL
;
2904 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2906 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2907 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2908 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2909 VFPF_UPDATE_RSS_CAPS_FLAG
);
2910 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2911 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2912 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2913 VFPF_UPDATE_RSS_KEY_FLAG
);
2915 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2916 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2917 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2918 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2919 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2921 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2922 (1 << p_rss_tlv
->rss_table_size_log
));
2924 for (i
= 0; i
< table_size
; i
++) {
2925 struct qed_queue_cid
*p_cid
;
2927 q_idx
= p_rss_tlv
->rss_ind_table
[i
];
2928 if (!qed_iov_validate_rxq(p_hwfn
, vf
, q_idx
,
2929 QED_IOV_VALIDATE_Q_ENABLE
)) {
2932 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2933 vf
->relative_vf_id
, q_idx
);
2938 p_cid
= qed_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[q_idx
]);
2939 p_rss
->rss_ind_table
[i
] = p_cid
;
2942 p_data
->rss_params
= p_rss
;
2944 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2946 *tlvs_accepted
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2950 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2951 struct qed_vf_info
*vf
,
2952 struct qed_sp_vport_update_params
*p_data
,
2953 struct qed_sge_tpa_params
*p_sge_tpa
,
2954 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2956 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2957 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2959 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2960 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2962 if (!p_sge_tpa_tlv
) {
2963 p_data
->sge_tpa_params
= NULL
;
2967 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2969 p_sge_tpa
->update_tpa_en_flg
=
2970 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2971 p_sge_tpa
->update_tpa_param_flg
=
2972 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2973 VFPF_UPDATE_TPA_PARAM_FLAG
);
2975 p_sge_tpa
->tpa_ipv4_en_flg
=
2976 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2977 p_sge_tpa
->tpa_ipv6_en_flg
=
2978 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2979 p_sge_tpa
->tpa_pkt_split_flg
=
2980 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2981 p_sge_tpa
->tpa_hdr_data_split_flg
=
2982 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2983 p_sge_tpa
->tpa_gro_consistent_flg
=
2984 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2986 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2987 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2988 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2989 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2990 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2992 p_data
->sge_tpa_params
= p_sge_tpa
;
2994 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2997 static int qed_iov_pre_update_vport(struct qed_hwfn
*hwfn
,
2999 struct qed_sp_vport_update_params
*params
,
3002 u8 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
3003 struct qed_filter_accept_flags
*flags
= ¶ms
->accept_flags
;
3004 struct qed_public_vf_info
*vf_info
;
3006 /* Untrusted VFs can't even be trusted to know that fact.
3007 * Simply indicate everything is configured fine, and trace
3008 * configuration 'behind their back'.
3010 if (!(*tlvs
& BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM
)))
3013 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3015 if (flags
->update_rx_mode_config
) {
3016 vf_info
->rx_accept_mode
= flags
->rx_accept_filter
;
3017 if (!vf_info
->is_trusted_configured
)
3018 flags
->rx_accept_filter
&= ~mask
;
3021 if (flags
->update_tx_mode_config
) {
3022 vf_info
->tx_accept_mode
= flags
->tx_accept_filter
;
3023 if (!vf_info
->is_trusted_configured
)
3024 flags
->tx_accept_filter
&= ~mask
;
3030 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
3031 struct qed_ptt
*p_ptt
,
3032 struct qed_vf_info
*vf
)
3034 struct qed_rss_params
*p_rss_params
= NULL
;
3035 struct qed_sp_vport_update_params params
;
3036 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3037 struct qed_sge_tpa_params sge_tpa_params
;
3038 u16 tlvs_mask
= 0, tlvs_accepted
= 0;
3039 u8 status
= PFVF_STATUS_SUCCESS
;
3043 /* Valiate PF can send such a request */
3044 if (!vf
->vport_instance
) {
3047 "No VPORT instance available for VF[%d], failing vport update\n",
3049 status
= PFVF_STATUS_FAILURE
;
3052 p_rss_params
= vzalloc(sizeof(*p_rss_params
));
3053 if (p_rss_params
== NULL
) {
3054 status
= PFVF_STATUS_FAILURE
;
3058 memset(¶ms
, 0, sizeof(params
));
3059 params
.opaque_fid
= vf
->opaque_fid
;
3060 params
.vport_id
= vf
->vport_id
;
3061 params
.rss_params
= NULL
;
3063 /* Search for extended tlvs list and update values
3064 * from VF in struct qed_sp_vport_update_params.
3066 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3067 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
3068 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3069 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3070 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3071 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3072 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
3073 &sge_tpa_params
, mbx
, &tlvs_mask
);
3075 tlvs_accepted
= tlvs_mask
;
3077 /* Some of the extended TLVs need to be validated first; In that case,
3078 * they can update the mask without updating the accepted [so that
3079 * PF could communicate to VF it has rejected request].
3081 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, p_rss_params
,
3082 mbx
, &tlvs_mask
, &tlvs_accepted
);
3084 if (qed_iov_pre_update_vport(p_hwfn
, vf
->relative_vf_id
,
3085 ¶ms
, &tlvs_accepted
)) {
3087 status
= PFVF_STATUS_NOT_SUPPORTED
;
3091 if (!tlvs_accepted
) {
3093 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3094 "Upper-layer prevents VF vport configuration\n");
3096 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3097 "No feature tlvs found for vport update\n");
3098 status
= PFVF_STATUS_NOT_SUPPORTED
;
3102 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
3105 status
= PFVF_STATUS_FAILURE
;
3108 vfree(p_rss_params
);
3109 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
3110 tlvs_mask
, tlvs_accepted
);
3111 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
3114 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn
*p_hwfn
,
3115 struct qed_vf_info
*p_vf
,
3116 struct qed_filter_ucast
*p_params
)
3120 /* First remove entries and then add new ones */
3121 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
3122 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3123 if (p_vf
->shadow_config
.vlans
[i
].used
&&
3124 p_vf
->shadow_config
.vlans
[i
].vid
==
3126 p_vf
->shadow_config
.vlans
[i
].used
= false;
3129 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3132 "VF [%d] - Tries to remove a non-existing vlan\n",
3133 p_vf
->relative_vf_id
);
3136 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
3137 p_params
->opcode
== QED_FILTER_FLUSH
) {
3138 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3139 p_vf
->shadow_config
.vlans
[i
].used
= false;
3142 /* In forced mode, we're willing to remove entries - but we don't add
3145 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
))
3148 if (p_params
->opcode
== QED_FILTER_ADD
||
3149 p_params
->opcode
== QED_FILTER_REPLACE
) {
3150 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
3151 if (p_vf
->shadow_config
.vlans
[i
].used
)
3154 p_vf
->shadow_config
.vlans
[i
].used
= true;
3155 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
3159 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3162 "VF [%d] - Tries to configure more than %d vlan filters\n",
3163 p_vf
->relative_vf_id
,
3164 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
3172 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn
*p_hwfn
,
3173 struct qed_vf_info
*p_vf
,
3174 struct qed_filter_ucast
*p_params
)
3178 /* If we're in forced-mode, we don't allow any change */
3179 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
))
3182 /* Don't keep track of shadow copy since we don't intend to restore. */
3183 if (p_vf
->p_vf_info
.is_trusted_configured
)
3186 /* First remove entries and then add new ones */
3187 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
3188 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3189 if (ether_addr_equal(p_vf
->shadow_config
.macs
[i
],
3191 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
3196 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
3197 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3198 "MAC isn't configured\n");
3201 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
3202 p_params
->opcode
== QED_FILTER_FLUSH
) {
3203 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++)
3204 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
3207 /* List the new MAC address */
3208 if (p_params
->opcode
!= QED_FILTER_ADD
&&
3209 p_params
->opcode
!= QED_FILTER_REPLACE
)
3212 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3213 if (is_zero_ether_addr(p_vf
->shadow_config
.macs
[i
])) {
3214 ether_addr_copy(p_vf
->shadow_config
.macs
[i
],
3216 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3217 "Added MAC at %d entry in shadow\n", i
);
3222 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
3223 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No available place for MAC\n");
3231 qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
3232 struct qed_vf_info
*p_vf
,
3233 struct qed_filter_ucast
*p_params
)
3237 if (p_params
->type
== QED_FILTER_MAC
) {
3238 rc
= qed_iov_vf_update_mac_shadow(p_hwfn
, p_vf
, p_params
);
3243 if (p_params
->type
== QED_FILTER_VLAN
)
3244 rc
= qed_iov_vf_update_vlan_shadow(p_hwfn
, p_vf
, p_params
);
3249 static int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
3250 int vfid
, struct qed_filter_ucast
*params
)
3252 struct qed_public_vf_info
*vf
;
3254 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3258 /* No real decision to make; Store the configured MAC */
3259 if (params
->type
== QED_FILTER_MAC
||
3260 params
->type
== QED_FILTER_MAC_VLAN
) {
3261 ether_addr_copy(vf
->mac
, params
->mac
);
3263 if (vf
->is_trusted_configured
) {
3264 qed_iov_bulletin_set_mac(hwfn
, vf
->mac
, vfid
);
3266 /* Update and post bulleitin again */
3267 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
3274 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
3275 struct qed_ptt
*p_ptt
,
3276 struct qed_vf_info
*vf
)
3278 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
3279 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3280 struct vfpf_ucast_filter_tlv
*req
;
3281 u8 status
= PFVF_STATUS_SUCCESS
;
3282 struct qed_filter_ucast params
;
3285 /* Prepare the unicast filter params */
3286 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
3287 req
= &mbx
->req_virt
->ucast_filter
;
3288 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
3289 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
3291 params
.is_rx_filter
= 1;
3292 params
.is_tx_filter
= 1;
3293 params
.vport_to_remove_from
= vf
->vport_id
;
3294 params
.vport_to_add_to
= vf
->vport_id
;
3295 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
3296 params
.vlan
= req
->vlan
;
3300 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3301 vf
->abs_vf_id
, params
.opcode
, params
.type
,
3302 params
.is_rx_filter
? "RX" : "",
3303 params
.is_tx_filter
? "TX" : "",
3304 params
.vport_to_add_to
,
3305 params
.mac
[0], params
.mac
[1],
3306 params
.mac
[2], params
.mac
[3],
3307 params
.mac
[4], params
.mac
[5], params
.vlan
);
3309 if (!vf
->vport_instance
) {
3312 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3314 status
= PFVF_STATUS_FAILURE
;
3318 /* Update shadow copy of the VF configuration */
3319 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
3320 status
= PFVF_STATUS_FAILURE
;
3324 /* Determine if the unicast filtering is acceptible by PF */
3325 if ((p_bulletin
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)) &&
3326 (params
.type
== QED_FILTER_VLAN
||
3327 params
.type
== QED_FILTER_MAC_VLAN
)) {
3328 /* Once VLAN is forced or PVID is set, do not allow
3329 * to add/replace any further VLANs.
3331 if (params
.opcode
== QED_FILTER_ADD
||
3332 params
.opcode
== QED_FILTER_REPLACE
)
3333 status
= PFVF_STATUS_FORCED
;
3337 if ((p_bulletin
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)) &&
3338 (params
.type
== QED_FILTER_MAC
||
3339 params
.type
== QED_FILTER_MAC_VLAN
)) {
3340 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
3341 (params
.opcode
!= QED_FILTER_ADD
&&
3342 params
.opcode
!= QED_FILTER_REPLACE
))
3343 status
= PFVF_STATUS_FORCED
;
3347 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
3349 status
= PFVF_STATUS_FAILURE
;
3353 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
3354 QED_SPQ_MODE_CB
, NULL
);
3356 status
= PFVF_STATUS_FAILURE
;
3359 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
3360 sizeof(struct pfvf_def_resp_tlv
), status
);
3363 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
3364 struct qed_ptt
*p_ptt
,
3365 struct qed_vf_info
*vf
)
3370 for (i
= 0; i
< vf
->num_sbs
; i
++)
3371 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
3373 vf
->opaque_fid
, false);
3375 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
3376 sizeof(struct pfvf_def_resp_tlv
),
3377 PFVF_STATUS_SUCCESS
);
3380 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
3381 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
3383 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3384 u8 status
= PFVF_STATUS_SUCCESS
;
3386 /* Disable Interrupts for VF */
3387 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
3389 /* Reset Permission table */
3390 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
3392 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
3396 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
3397 struct qed_ptt
*p_ptt
,
3398 struct qed_vf_info
*p_vf
)
3400 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3401 u8 status
= PFVF_STATUS_SUCCESS
;
3404 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
3406 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
3407 /* Stopping the VF */
3408 rc
= qed_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
3412 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
3414 status
= PFVF_STATUS_FAILURE
;
3417 p_vf
->state
= VF_STOPPED
;
3420 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
3424 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn
*p_hwfn
,
3425 struct qed_ptt
*p_ptt
,
3426 struct qed_vf_info
*p_vf
)
3428 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
3429 struct pfvf_read_coal_resp_tlv
*p_resp
;
3430 struct vfpf_read_coal_req_tlv
*req
;
3431 u8 status
= PFVF_STATUS_FAILURE
;
3432 struct qed_vf_queue
*p_queue
;
3433 struct qed_queue_cid
*p_cid
;
3434 u16 coal
= 0, qid
, i
;
3438 mbx
->offset
= (u8
*)mbx
->reply_virt
;
3439 req
= &mbx
->req_virt
->read_coal_req
;
3442 b_is_rx
= req
->is_rx
? true : false;
3445 if (!qed_iov_validate_rxq(p_hwfn
, p_vf
, qid
,
3446 QED_IOV_VALIDATE_Q_ENABLE
)) {
3447 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3448 "VF[%d]: Invalid Rx queue_id = %d\n",
3449 p_vf
->abs_vf_id
, qid
);
3453 p_cid
= qed_iov_get_vf_rx_queue_cid(&p_vf
->vf_queues
[qid
]);
3454 rc
= qed_get_rxq_coalesce(p_hwfn
, p_ptt
, p_cid
, &coal
);
3458 if (!qed_iov_validate_txq(p_hwfn
, p_vf
, qid
,
3459 QED_IOV_VALIDATE_Q_ENABLE
)) {
3460 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3461 "VF[%d]: Invalid Tx queue_id = %d\n",
3462 p_vf
->abs_vf_id
, qid
);
3465 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3466 p_queue
= &p_vf
->vf_queues
[qid
];
3467 if ((!p_queue
->cids
[i
].p_cid
) ||
3468 (!p_queue
->cids
[i
].b_is_tx
))
3471 p_cid
= p_queue
->cids
[i
].p_cid
;
3473 rc
= qed_get_txq_coalesce(p_hwfn
, p_ptt
, p_cid
, &coal
);
3480 status
= PFVF_STATUS_SUCCESS
;
3483 p_resp
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_COALESCE_READ
,
3485 p_resp
->coal
= coal
;
3487 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
3488 sizeof(struct channel_list_end_tlv
));
3490 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
3493 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn
*p_hwfn
,
3494 struct qed_ptt
*p_ptt
,
3495 struct qed_vf_info
*vf
)
3497 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3498 struct vfpf_update_coalesce
*req
;
3499 u8 status
= PFVF_STATUS_FAILURE
;
3500 struct qed_queue_cid
*p_cid
;
3501 u16 rx_coal
, tx_coal
;
3505 req
= &mbx
->req_virt
->update_coalesce
;
3507 rx_coal
= req
->rx_coal
;
3508 tx_coal
= req
->tx_coal
;
3511 if (!qed_iov_validate_rxq(p_hwfn
, vf
, qid
,
3512 QED_IOV_VALIDATE_Q_ENABLE
) && rx_coal
) {
3513 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3514 "VF[%d]: Invalid Rx queue_id = %d\n",
3515 vf
->abs_vf_id
, qid
);
3519 if (!qed_iov_validate_txq(p_hwfn
, vf
, qid
,
3520 QED_IOV_VALIDATE_Q_ENABLE
) && tx_coal
) {
3521 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3522 "VF[%d]: Invalid Tx queue_id = %d\n",
3523 vf
->abs_vf_id
, qid
);
3529 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3530 vf
->abs_vf_id
, rx_coal
, tx_coal
, qid
);
3533 p_cid
= qed_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[qid
]);
3535 rc
= qed_set_rxq_coalesce(p_hwfn
, p_ptt
, rx_coal
, p_cid
);
3539 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3540 vf
->abs_vf_id
, vf
->vf_queues
[qid
].fw_rx_qid
);
3543 vf
->rx_coal
= rx_coal
;
3547 struct qed_vf_queue
*p_queue
= &vf
->vf_queues
[qid
];
3549 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3550 if (!p_queue
->cids
[i
].p_cid
)
3553 if (!p_queue
->cids
[i
].b_is_tx
)
3556 rc
= qed_set_txq_coalesce(p_hwfn
, p_ptt
, tx_coal
,
3557 p_queue
->cids
[i
].p_cid
);
3562 "VF[%d]: Unable to set tx queue coalesce\n",
3567 vf
->tx_coal
= tx_coal
;
3570 status
= PFVF_STATUS_SUCCESS
;
3572 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_COALESCE_UPDATE
,
3573 sizeof(struct pfvf_def_resp_tlv
), status
);
3576 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
3577 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3582 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_vf
->concrete_fid
);
3584 for (cnt
= 0; cnt
< 50; cnt
++) {
3585 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
3590 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
3594 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3595 p_vf
->abs_vf_id
, val
);
3603 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
3604 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3606 u32 cons
[MAX_NUM_VOQS_E4
], distance
[MAX_NUM_VOQS_E4
];
3609 /* Read initial consumers & producers */
3610 for (i
= 0; i
< MAX_NUM_VOQS_E4
; i
++) {
3613 cons
[i
] = qed_rd(p_hwfn
, p_ptt
,
3614 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3616 prod
= qed_rd(p_hwfn
, p_ptt
,
3617 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
3619 distance
[i
] = prod
- cons
[i
];
3622 /* Wait for consumers to pass the producers */
3624 for (cnt
= 0; cnt
< 50; cnt
++) {
3625 for (; i
< MAX_NUM_VOQS_E4
; i
++) {
3628 tmp
= qed_rd(p_hwfn
, p_ptt
,
3629 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3631 if (distance
[i
] > tmp
- cons
[i
])
3635 if (i
== MAX_NUM_VOQS_E4
)
3642 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
3643 p_vf
->abs_vf_id
, i
);
3650 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
3651 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3655 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
3659 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
3667 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
3668 struct qed_ptt
*p_ptt
,
3669 u16 rel_vf_id
, u32
*ack_vfs
)
3671 struct qed_vf_info
*p_vf
;
3674 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
3678 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
3679 (1ULL << (rel_vf_id
% 64))) {
3680 u16 vfid
= p_vf
->abs_vf_id
;
3682 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3683 "VF[%d] - Handling FLR\n", vfid
);
3685 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
3687 /* If VF isn't active, no need for anything but SW */
3691 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
3695 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
3697 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
3701 /* Workaround to make VF-PF channel ready, as FW
3702 * doesn't do that as a part of FLR.
3705 GTT_BAR0_MAP_REG_USDM_RAM
+
3706 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid
), 1);
3708 /* VF_STOPPED has to be set only after final cleanup
3709 * but prior to re-enabling the VF.
3711 p_vf
->state
= VF_STOPPED
;
3713 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
3715 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
3720 /* Mark VF for ack and clean pending state */
3721 if (p_vf
->state
== VF_RESET
)
3722 p_vf
->state
= VF_STOPPED
;
3723 ack_vfs
[vfid
/ 32] |= BIT((vfid
% 32));
3724 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
3725 ~(1ULL << (rel_vf_id
% 64));
3726 p_vf
->vf_mbx
.b_pending_msg
= false;
3733 qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3735 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
3739 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
3741 /* Since BRB <-> PRS interface can't be tested as part of the flr
3742 * polling due to HW limitations, simply sleep a bit. And since
3743 * there's no need to wait per-vf, do it before looping.
3747 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
3748 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
3750 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
3754 bool qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
3759 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
3760 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
3761 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3762 "[%08x,...,%08x]: %08x\n",
3763 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
3765 if (!p_hwfn
->cdev
->p_iov_info
) {
3766 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
3771 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3772 struct qed_vf_info
*p_vf
;
3775 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
3779 vfid
= p_vf
->abs_vf_id
;
3780 if (BIT((vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
3781 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
3782 u16 rel_vf_id
= p_vf
->relative_vf_id
;
3784 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3785 "VF[%d] [rel %d] got FLR-ed\n",
3788 p_vf
->state
= VF_RESET
;
3790 /* No need to lock here, since pending_flr should
3791 * only change here and before ACKing MFw. Since
3792 * MFW will not trigger an additional attention for
3793 * VF flr until ACKs, we're safe.
3795 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
3803 static void qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
3805 struct qed_mcp_link_params
*p_params
,
3806 struct qed_mcp_link_state
*p_link
,
3807 struct qed_mcp_link_capabilities
*p_caps
)
3809 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
3812 struct qed_bulletin_content
*p_bulletin
;
3817 p_bulletin
= p_vf
->bulletin
.p_virt
;
3820 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
3822 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
3824 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
3828 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn
*p_hwfn
,
3829 struct qed_ptt
*p_ptt
,
3830 struct qed_vf_info
*p_vf
)
3832 struct qed_bulletin_content
*p_bulletin
= p_vf
->bulletin
.p_virt
;
3833 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
3834 struct vfpf_bulletin_update_mac_tlv
*p_req
;
3835 u8 status
= PFVF_STATUS_SUCCESS
;
3838 if (!p_vf
->p_vf_info
.is_trusted_configured
) {
3841 "Blocking bulletin update request from untrusted VF[%d]\n",
3843 status
= PFVF_STATUS_NOT_SUPPORTED
;
3848 p_req
= &mbx
->req_virt
->bulletin_update_mac
;
3849 ether_addr_copy(p_bulletin
->mac
, p_req
->mac
);
3850 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3851 "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3852 p_vf
->abs_vf_id
, p_req
->mac
);
3855 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3856 CHANNEL_TLV_BULLETIN_UPDATE_MAC
,
3857 sizeof(struct pfvf_def_resp_tlv
), status
);
3861 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
3862 struct qed_ptt
*p_ptt
, int vfid
)
3864 struct qed_iov_vf_mbx
*mbx
;
3865 struct qed_vf_info
*p_vf
;
3867 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3871 mbx
= &p_vf
->vf_mbx
;
3873 /* qed_iov_process_mbx_request */
3874 if (!mbx
->b_pending_msg
) {
3876 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3880 mbx
->b_pending_msg
= false;
3882 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
3884 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3885 "VF[%02x]: Processing mailbox message [type %04x]\n",
3886 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3888 /* check if tlv type is known */
3889 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
) &&
3890 !p_vf
->b_malicious
) {
3891 switch (mbx
->first_tlv
.tl
.type
) {
3892 case CHANNEL_TLV_ACQUIRE
:
3893 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
3895 case CHANNEL_TLV_VPORT_START
:
3896 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
3898 case CHANNEL_TLV_VPORT_TEARDOWN
:
3899 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
3901 case CHANNEL_TLV_START_RXQ
:
3902 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
3904 case CHANNEL_TLV_START_TXQ
:
3905 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
3907 case CHANNEL_TLV_STOP_RXQS
:
3908 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
3910 case CHANNEL_TLV_STOP_TXQS
:
3911 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
3913 case CHANNEL_TLV_UPDATE_RXQ
:
3914 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
3916 case CHANNEL_TLV_VPORT_UPDATE
:
3917 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
3919 case CHANNEL_TLV_UCAST_FILTER
:
3920 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
3922 case CHANNEL_TLV_CLOSE
:
3923 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
3925 case CHANNEL_TLV_INT_CLEANUP
:
3926 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
3928 case CHANNEL_TLV_RELEASE
:
3929 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
3931 case CHANNEL_TLV_UPDATE_TUNN_PARAM
:
3932 qed_iov_vf_mbx_update_tunn_param(p_hwfn
, p_ptt
, p_vf
);
3934 case CHANNEL_TLV_COALESCE_UPDATE
:
3935 qed_iov_vf_pf_set_coalesce(p_hwfn
, p_ptt
, p_vf
);
3937 case CHANNEL_TLV_COALESCE_READ
:
3938 qed_iov_vf_pf_get_coalesce(p_hwfn
, p_ptt
, p_vf
);
3940 case CHANNEL_TLV_BULLETIN_UPDATE_MAC
:
3941 qed_iov_vf_pf_bulletin_update_mac(p_hwfn
, p_ptt
, p_vf
);
3944 } else if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
3945 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3946 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3947 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3949 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3950 mbx
->first_tlv
.tl
.type
,
3951 sizeof(struct pfvf_def_resp_tlv
),
3952 PFVF_STATUS_MALICIOUS
);
3954 /* unknown TLV - this may belong to a VF driver from the future
3955 * - a version written after this PF driver was written, which
3956 * supports features unknown as of yet. Too bad since we don't
3957 * support them. Or this may be because someone wrote a crappy
3958 * VF driver and is sending garbage over the channel.
3961 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3963 mbx
->first_tlv
.tl
.type
,
3964 mbx
->first_tlv
.tl
.length
,
3965 mbx
->first_tlv
.padding
, mbx
->first_tlv
.reply_address
);
3967 /* Try replying in case reply address matches the acquisition's
3970 if (p_vf
->acquire
.first_tlv
.reply_address
&&
3971 (mbx
->first_tlv
.reply_address
==
3972 p_vf
->acquire
.first_tlv
.reply_address
)) {
3973 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3974 mbx
->first_tlv
.tl
.type
,
3975 sizeof(struct pfvf_def_resp_tlv
),
3976 PFVF_STATUS_NOT_SUPPORTED
);
3980 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3986 static void qed_iov_pf_get_pending_events(struct qed_hwfn
*p_hwfn
, u64
*events
)
3990 memset(events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
3992 qed_for_each_vf(p_hwfn
, i
) {
3993 struct qed_vf_info
*p_vf
;
3995 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[i
];
3996 if (p_vf
->vf_mbx
.b_pending_msg
)
3997 events
[i
/ 64] |= 1ULL << (i
% 64);
4001 static struct qed_vf_info
*qed_sriov_get_vf_from_absid(struct qed_hwfn
*p_hwfn
,
4004 u8 min
= (u8
) p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
4006 if (!_qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
, false)) {
4009 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4014 return &p_hwfn
->pf_iov_info
->vfs_array
[(u8
) abs_vfid
- min
];
4017 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
4018 u16 abs_vfid
, struct regpair
*vf_msg
)
4020 struct qed_vf_info
*p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
,
4026 /* List the physical address of the request so that handler
4027 * could later on copy the message from it.
4029 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
4031 /* Mark the event and schedule the workqueue */
4032 p_vf
->vf_mbx
.b_pending_msg
= true;
4033 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
4038 static void qed_sriov_vfpf_malicious(struct qed_hwfn
*p_hwfn
,
4039 struct malicious_vf_eqe_data
*p_data
)
4041 struct qed_vf_info
*p_vf
;
4043 p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
, p_data
->vf_id
);
4048 if (!p_vf
->b_malicious
) {
4050 "VF [%d] - Malicious behavior [%02x]\n",
4051 p_vf
->abs_vf_id
, p_data
->err_id
);
4053 p_vf
->b_malicious
= true;
4056 "VF [%d] - Malicious behavior [%02x]\n",
4057 p_vf
->abs_vf_id
, p_data
->err_id
);
4061 static int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
4064 union event_ring_data
*data
, u8 fw_return_code
)
4067 case COMMON_EVENT_VF_PF_CHANNEL
:
4068 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
4069 &data
->vf_pf_channel
.msg_addr
);
4070 case COMMON_EVENT_MALICIOUS_VF
:
4071 qed_sriov_vfpf_malicious(p_hwfn
, &data
->malicious_vf
);
4074 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
4080 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4082 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
4088 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
4089 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true, false))
4096 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
4099 struct qed_dmae_params params
;
4100 struct qed_vf_info
*vf_info
;
4102 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4106 memset(¶ms
, 0, sizeof(params
));
4107 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_SRC_VF_VALID
, 0x1);
4108 SET_FIELD(params
.flags
, QED_DMAE_PARAMS_COMPLETION_DST
, 0x1);
4109 params
.src_vfid
= vf_info
->abs_vf_id
;
4111 if (qed_dmae_host2host(p_hwfn
, ptt
,
4112 vf_info
->vf_mbx
.pending_req
,
4113 vf_info
->vf_mbx
.req_phys
,
4114 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
4115 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4116 "Failed to copy message from VF 0x%02x\n", vfid
);
4124 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
4127 struct qed_vf_info
*vf_info
;
4130 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4132 DP_NOTICE(p_hwfn
->cdev
,
4133 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
4137 if (vf_info
->b_malicious
) {
4138 DP_NOTICE(p_hwfn
->cdev
,
4139 "Can't set forced MAC to malicious VF [%d]\n", vfid
);
4143 if (vf_info
->p_vf_info
.is_trusted_configured
) {
4144 feature
= BIT(VFPF_BULLETIN_MAC_ADDR
);
4145 /* Trust mode will disable Forced MAC */
4146 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
4147 ~BIT(MAC_ADDR_FORCED
);
4149 feature
= BIT(MAC_ADDR_FORCED
);
4150 /* Forced MAC will disable MAC_ADDR */
4151 vf_info
->bulletin
.p_virt
->valid_bitmap
&=
4152 ~BIT(VFPF_BULLETIN_MAC_ADDR
);
4155 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
4157 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4159 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4162 static int qed_iov_bulletin_set_mac(struct qed_hwfn
*p_hwfn
, u8
*mac
, int vfid
)
4164 struct qed_vf_info
*vf_info
;
4167 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4169 DP_NOTICE(p_hwfn
->cdev
, "Can not set MAC, invalid vfid [%d]\n",
4174 if (vf_info
->b_malicious
) {
4175 DP_NOTICE(p_hwfn
->cdev
, "Can't set MAC to malicious VF [%d]\n",
4180 if (vf_info
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)) {
4181 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4182 "Can not set MAC, Forced MAC is configured\n");
4186 feature
= BIT(VFPF_BULLETIN_MAC_ADDR
);
4187 ether_addr_copy(vf_info
->bulletin
.p_virt
->mac
, mac
);
4189 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4191 if (vf_info
->p_vf_info
.is_trusted_configured
)
4192 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4197 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
4200 struct qed_vf_info
*vf_info
;
4203 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4205 DP_NOTICE(p_hwfn
->cdev
,
4206 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
4210 if (vf_info
->b_malicious
) {
4211 DP_NOTICE(p_hwfn
->cdev
,
4212 "Can't set forced vlan to malicious VF [%d]\n", vfid
);
4216 feature
= 1 << VLAN_ADDR_FORCED
;
4217 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
4219 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4221 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
4223 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4226 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn
*p_hwfn
,
4227 int vfid
, u16 vxlan_port
, u16 geneve_port
)
4229 struct qed_vf_info
*vf_info
;
4231 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4233 DP_NOTICE(p_hwfn
->cdev
,
4234 "Can not set udp ports, invalid vfid [%d]\n", vfid
);
4238 if (vf_info
->b_malicious
) {
4239 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4240 "Can not set udp ports to malicious VF [%d]\n",
4245 vf_info
->bulletin
.p_virt
->vxlan_udp_port
= vxlan_port
;
4246 vf_info
->bulletin
.p_virt
->geneve_udp_port
= geneve_port
;
4249 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
4251 struct qed_vf_info
*p_vf_info
;
4253 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4257 return !!p_vf_info
->vport_instance
;
4260 static bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
4262 struct qed_vf_info
*p_vf_info
;
4264 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4268 return p_vf_info
->state
== VF_STOPPED
;
4271 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
4273 struct qed_vf_info
*vf_info
;
4275 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4279 return vf_info
->spoof_chk
;
4282 static int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
4284 struct qed_vf_info
*vf
;
4287 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4289 "SR-IOV sanity check failed, can't set spoofchk\n");
4293 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4297 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
4298 /* After VF VPORT start PF will configure spoof check */
4299 vf
->req_spoofchk_val
= val
;
4304 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
4310 static u8
*qed_iov_bulletin_get_mac(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4312 struct qed_vf_info
*p_vf
;
4314 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4315 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4318 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
&
4319 BIT(VFPF_BULLETIN_MAC_ADDR
)))
4322 return p_vf
->bulletin
.p_virt
->mac
;
4325 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
4328 struct qed_vf_info
*p_vf
;
4330 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4331 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4334 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)))
4337 return p_vf
->bulletin
.p_virt
->mac
;
4341 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4343 struct qed_vf_info
*p_vf
;
4345 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4346 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4349 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)))
4352 return p_vf
->bulletin
.p_virt
->pvid
;
4355 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
4356 struct qed_ptt
*p_ptt
, int vfid
, int val
)
4358 struct qed_vf_info
*vf
;
4363 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4367 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
4371 rl_id
= abs_vp_id
; /* The "rl_id" is set as the "vport_id" */
4372 return qed_init_global_rl(p_hwfn
, p_ptt
, rl_id
, (u32
)val
);
4376 qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
4378 struct qed_vf_info
*vf
;
4382 for_each_hwfn(cdev
, i
) {
4383 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4385 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4387 "SR-IOV sanity check failed, can't set min rate\n");
4392 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
4393 vport_id
= vf
->vport_id
;
4395 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
4398 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
4400 struct qed_wfq_data
*vf_vp_wfq
;
4401 struct qed_vf_info
*vf_info
;
4403 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4407 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
4409 if (vf_vp_wfq
->configured
)
4410 return vf_vp_wfq
->min_speed
;
4416 * qed_schedule_iov - schedules IOV task for VF and PF
4417 * @hwfn: hardware function pointer
4418 * @flag: IOV flag for VF/PF
4420 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
4422 smp_mb__before_atomic();
4423 set_bit(flag
, &hwfn
->iov_task_flags
);
4424 smp_mb__after_atomic();
4425 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
4426 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
4429 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
4433 for_each_hwfn(cdev
, i
)
4434 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
4435 &cdev
->hwfns
[i
].iov_task
, 0);
4438 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
4442 for_each_hwfn(cdev
, i
)
4443 if (cdev
->hwfns
[i
].iov_wq
)
4444 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
4446 /* Mark VFs for disablement */
4447 qed_iov_set_vfs_to_disable(cdev
, true);
4449 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
4450 pci_disable_sriov(cdev
->pdev
);
4452 if (cdev
->recov_in_prog
) {
4455 "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4459 for_each_hwfn(cdev
, i
) {
4460 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4461 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
4463 /* Failure to acquire the ptt in 100g creates an odd error
4464 * where the first engine has already relased IOV.
4467 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4471 /* Clean WFQ db and configure equal weight for all vports */
4472 qed_clean_wfq_db(hwfn
, ptt
);
4474 qed_for_each_vf(hwfn
, j
) {
4477 if (!qed_iov_is_valid_vfid(hwfn
, j
, true, false))
4480 /* Wait until VF is disabled before releasing */
4481 for (k
= 0; k
< 100; k
++) {
4482 if (!qed_iov_is_vf_stopped(hwfn
, j
))
4489 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
4493 "Timeout waiting for VF's FLR to end\n");
4496 qed_ptt_release(hwfn
, ptt
);
4499 qed_iov_set_vfs_to_disable(cdev
, false);
4504 static void qed_sriov_enable_qid_config(struct qed_hwfn
*hwfn
,
4506 struct qed_iov_vf_init_params
*params
)
4510 /* Since we have an equal resource distribution per-VF, and we assume
4511 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4512 * sequentially from there.
4514 base
= FEAT_NUM(hwfn
, QED_PF_L2_QUE
) + vfid
* params
->num_queues
;
4516 params
->rel_vf_id
= vfid
;
4517 for (i
= 0; i
< params
->num_queues
; i
++) {
4518 params
->req_rx_queue
[i
] = base
+ i
;
4519 params
->req_tx_queue
[i
] = base
+ i
;
4523 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
4525 struct qed_iov_vf_init_params params
;
4526 struct qed_hwfn
*hwfn
;
4527 struct qed_ptt
*ptt
;
4530 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
4531 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
4532 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
4536 memset(¶ms
, 0, sizeof(params
));
4538 /* Initialize HW for VF access */
4539 for_each_hwfn(cdev
, j
) {
4540 hwfn
= &cdev
->hwfns
[j
];
4541 ptt
= qed_ptt_acquire(hwfn
);
4543 /* Make sure not to use more than 16 queues per VF */
4544 params
.num_queues
= min_t(int,
4545 FEAT_NUM(hwfn
, QED_VF_L2_QUE
) / num
,
4549 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4554 for (i
= 0; i
< num
; i
++) {
4555 if (!qed_iov_is_valid_vfid(hwfn
, i
, false, true))
4558 qed_sriov_enable_qid_config(hwfn
, i
, ¶ms
);
4559 rc
= qed_iov_init_hw_for_vf(hwfn
, ptt
, ¶ms
);
4561 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
4562 qed_ptt_release(hwfn
, ptt
);
4567 qed_ptt_release(hwfn
, ptt
);
4570 /* Enable SRIOV PCIe functions */
4571 rc
= pci_enable_sriov(cdev
->pdev
, num
);
4573 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
4577 hwfn
= QED_LEADING_HWFN(cdev
);
4578 ptt
= qed_ptt_acquire(hwfn
);
4580 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4585 rc
= qed_mcp_ov_update_eswitch(hwfn
, ptt
, QED_OV_ESWITCH_VEB
);
4587 DP_INFO(cdev
, "Failed to update eswitch mode\n");
4588 qed_ptt_release(hwfn
, ptt
);
4593 qed_sriov_disable(cdev
, false);
4597 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
4599 if (!IS_QED_SRIOV(cdev
)) {
4600 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
4605 return qed_sriov_enable(cdev
, num_vfs_param
);
4607 return qed_sriov_disable(cdev
, true);
4610 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
4614 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
4615 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4616 "Cannot set a VF MAC; Sriov is not enabled\n");
4620 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
4621 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4622 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
4626 for_each_hwfn(cdev
, i
) {
4627 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4628 struct qed_public_vf_info
*vf_info
;
4630 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4634 /* Set the MAC, and schedule the IOV task */
4635 if (vf_info
->is_trusted_configured
)
4636 ether_addr_copy(vf_info
->mac
, mac
);
4638 ether_addr_copy(vf_info
->forced_mac
, mac
);
4640 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
4646 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
4650 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
4651 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4652 "Cannot set a VF MAC; Sriov is not enabled\n");
4656 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
4657 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4658 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
4662 for_each_hwfn(cdev
, i
) {
4663 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4664 struct qed_public_vf_info
*vf_info
;
4666 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4670 /* Set the forced vlan, and schedule the IOV task */
4671 vf_info
->forced_vlan
= vid
;
4672 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
4678 static int qed_get_vf_config(struct qed_dev
*cdev
,
4679 int vf_id
, struct ifla_vf_info
*ivi
)
4681 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
4682 struct qed_public_vf_info
*vf_info
;
4683 struct qed_mcp_link_state link
;
4686 /* Sanitize request */
4690 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, false)) {
4691 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4692 "VF index [%d] isn't active\n", vf_id
);
4696 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
4698 qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
4700 /* Fill information about VF */
4703 if (is_valid_ether_addr(vf_info
->forced_mac
))
4704 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
4706 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
4708 ivi
->vlan
= vf_info
->forced_vlan
;
4709 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
4710 ivi
->linkstate
= vf_info
->link_state
;
4711 tx_rate
= vf_info
->tx_rate
;
4712 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
4713 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
4718 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
4720 struct qed_hwfn
*lead_hwfn
= QED_LEADING_HWFN(hwfn
->cdev
);
4721 struct qed_mcp_link_capabilities caps
;
4722 struct qed_mcp_link_params params
;
4723 struct qed_mcp_link_state link
;
4726 if (!hwfn
->pf_iov_info
)
4729 /* Update bulletin of all future possible VFs with link configuration */
4730 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
4731 struct qed_public_vf_info
*vf_info
;
4733 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
4737 /* Only hwfn0 is actually interested in the link speed.
4738 * But since only it would receive an MFW indication of link,
4739 * need to take configuration from it - otherwise things like
4740 * rate limiting for hwfn1 VF would not work.
4742 memcpy(¶ms
, qed_mcp_get_link_params(lead_hwfn
),
4744 memcpy(&link
, qed_mcp_get_link_state(lead_hwfn
), sizeof(link
));
4745 memcpy(&caps
, qed_mcp_get_link_capabilities(lead_hwfn
),
4748 /* Modify link according to the VF's configured link state */
4749 switch (vf_info
->link_state
) {
4750 case IFLA_VF_LINK_STATE_DISABLE
:
4751 link
.link_up
= false;
4753 case IFLA_VF_LINK_STATE_ENABLE
:
4754 link
.link_up
= true;
4755 /* Set speed according to maximum supported by HW.
4756 * that is 40G for regular devices and 100G for CMT
4759 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
4762 /* In auto mode pass PF link image to VF */
4766 if (link
.link_up
&& vf_info
->tx_rate
) {
4767 struct qed_ptt
*ptt
;
4770 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
4772 ptt
= qed_ptt_acquire(hwfn
);
4774 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
4778 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
4779 vf_info
->tx_rate
= rate
;
4783 qed_ptt_release(hwfn
, ptt
);
4786 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
4789 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4792 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
4793 int vf_id
, int link_state
)
4797 /* Sanitize request */
4801 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, true)) {
4802 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4803 "VF index [%d] isn't active\n", vf_id
);
4807 /* Handle configuration of link state */
4808 for_each_hwfn(cdev
, i
) {
4809 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4810 struct qed_public_vf_info
*vf
;
4812 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
4816 if (vf
->link_state
== link_state
)
4819 vf
->link_state
= link_state
;
4820 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
4826 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
4828 int i
, rc
= -EINVAL
;
4830 for_each_hwfn(cdev
, i
) {
4831 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4833 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
4841 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
4845 for_each_hwfn(cdev
, i
) {
4846 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4847 struct qed_public_vf_info
*vf
;
4849 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4851 "SR-IOV sanity check failed, can't set tx rate\n");
4855 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
4859 qed_inform_vf_link_state(p_hwfn
);
4865 static int qed_set_vf_rate(struct qed_dev
*cdev
,
4866 int vfid
, u32 min_rate
, u32 max_rate
)
4868 int rc_min
= 0, rc_max
= 0;
4871 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
4874 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
4876 if (rc_max
| rc_min
)
4882 static int qed_set_vf_trust(struct qed_dev
*cdev
, int vfid
, bool trust
)
4886 for_each_hwfn(cdev
, i
) {
4887 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4888 struct qed_public_vf_info
*vf
;
4890 if (!qed_iov_pf_sanity_check(hwfn
, vfid
)) {
4892 "SR-IOV sanity check failed, can't set trust\n");
4896 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4898 if (vf
->is_trusted_request
== trust
)
4900 vf
->is_trusted_request
= trust
;
4902 qed_schedule_iov(hwfn
, QED_IOV_WQ_TRUST_FLAG
);
4908 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
4910 u64 events
[QED_VF_ARRAY_LENGTH
];
4911 struct qed_ptt
*ptt
;
4914 ptt
= qed_ptt_acquire(hwfn
);
4916 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4917 "Can't acquire PTT; re-scheduling\n");
4918 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
4922 qed_iov_pf_get_pending_events(hwfn
, events
);
4924 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4925 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4926 events
[0], events
[1], events
[2]);
4928 qed_for_each_vf(hwfn
, i
) {
4929 /* Skip VFs with no pending messages */
4930 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
4933 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4934 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4935 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4937 /* Copy VF's message to PF's request buffer for that VF */
4938 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
4941 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
4944 qed_ptt_release(hwfn
, ptt
);
4947 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn
*hwfn
,
4949 struct qed_public_vf_info
*info
)
4951 if (info
->is_trusted_configured
) {
4952 if (is_valid_ether_addr(info
->mac
) &&
4953 (!mac
|| !ether_addr_equal(mac
, info
->mac
)))
4956 if (is_valid_ether_addr(info
->forced_mac
) &&
4957 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
)))
4964 static void qed_set_bulletin_mac(struct qed_hwfn
*hwfn
,
4965 struct qed_public_vf_info
*info
,
4968 if (info
->is_trusted_configured
)
4969 qed_iov_bulletin_set_mac(hwfn
, info
->mac
, vfid
);
4971 qed_iov_bulletin_set_forced_mac(hwfn
, info
->forced_mac
, vfid
);
4974 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
4978 qed_for_each_vf(hwfn
, i
) {
4979 struct qed_public_vf_info
*info
;
4980 bool update
= false;
4983 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
4987 /* Update data on bulletin board */
4988 if (info
->is_trusted_configured
)
4989 mac
= qed_iov_bulletin_get_mac(hwfn
, i
);
4991 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
4993 if (qed_pf_validate_req_vf_mac(hwfn
, mac
, info
)) {
4996 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4998 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
5000 /* Update bulletin board with MAC */
5001 qed_set_bulletin_mac(hwfn
, info
, i
);
5005 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
5006 info
->forced_vlan
) {
5009 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
5012 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
5013 qed_iov_bulletin_set_forced_vlan(hwfn
,
5014 info
->forced_vlan
, i
);
5019 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5023 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
5025 struct qed_ptt
*ptt
;
5028 ptt
= qed_ptt_acquire(hwfn
);
5030 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
5031 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5035 qed_for_each_vf(hwfn
, i
)
5036 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
5038 qed_ptt_release(hwfn
, ptt
);
5041 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn
*hwfn
, int vf_id
)
5043 struct qed_public_vf_info
*vf_info
;
5044 struct qed_vf_info
*vf
;
5048 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
5049 vf
= qed_iov_get_vf_info(hwfn
, vf_id
, true);
5051 if (!vf_info
|| !vf
)
5054 /* Force MAC converted to generic MAC in case of VF trust on */
5055 if (vf_info
->is_trusted_configured
&&
5056 (vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
))) {
5057 force_mac
= qed_iov_bulletin_get_forced_mac(hwfn
, vf_id
);
5060 /* Clear existing shadow copy of MAC to have a clean
5063 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
5064 if (ether_addr_equal(vf
->shadow_config
.macs
[i
],
5066 memset(vf
->shadow_config
.macs
[i
], 0,
5068 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
5069 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5070 vf_info
->mac
, vf_id
);
5075 ether_addr_copy(vf_info
->mac
, force_mac
);
5076 memset(vf_info
->forced_mac
, 0, ETH_ALEN
);
5077 vf
->bulletin
.p_virt
->valid_bitmap
&=
5078 ~BIT(MAC_ADDR_FORCED
);
5079 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5083 /* Update shadow copy with VF MAC when trust mode is turned off */
5084 if (!vf_info
->is_trusted_configured
) {
5085 u8 empty_mac
[ETH_ALEN
];
5087 memset(empty_mac
, 0, ETH_ALEN
);
5088 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
5089 if (ether_addr_equal(vf
->shadow_config
.macs
[i
],
5091 ether_addr_copy(vf
->shadow_config
.macs
[i
],
5093 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
5094 "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5095 vf_info
->mac
, vf_id
);
5099 /* Clear bulletin when trust mode is turned off,
5100 * to have a clean slate for next (normal) operations.
5102 qed_iov_bulletin_set_mac(hwfn
, empty_mac
, vf_id
);
5103 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
5107 static void qed_iov_handle_trust_change(struct qed_hwfn
*hwfn
)
5109 struct qed_sp_vport_update_params params
;
5110 struct qed_filter_accept_flags
*flags
;
5111 struct qed_public_vf_info
*vf_info
;
5112 struct qed_vf_info
*vf
;
5116 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
5117 flags
= ¶ms
.accept_flags
;
5119 qed_for_each_vf(hwfn
, i
) {
5120 /* Need to make sure current requested configuration didn't
5121 * flip so that we'll end up configuring something that's not
5124 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
5125 if (vf_info
->is_trusted_configured
==
5126 vf_info
->is_trusted_request
)
5128 vf_info
->is_trusted_configured
= vf_info
->is_trusted_request
;
5130 /* Handle forced MAC mode */
5131 qed_update_mac_for_vf_trust_change(hwfn
, i
);
5133 /* Validate that the VF has a configured vport */
5134 vf
= qed_iov_get_vf_info(hwfn
, i
, true);
5135 if (!vf
->vport_instance
)
5138 memset(¶ms
, 0, sizeof(params
));
5139 params
.opaque_fid
= vf
->opaque_fid
;
5140 params
.vport_id
= vf
->vport_id
;
5142 params
.update_ctl_frame_check
= 1;
5143 params
.mac_chk_en
= !vf_info
->is_trusted_configured
;
5145 if (vf_info
->rx_accept_mode
& mask
) {
5146 flags
->update_rx_mode_config
= 1;
5147 flags
->rx_accept_filter
= vf_info
->rx_accept_mode
;
5150 if (vf_info
->tx_accept_mode
& mask
) {
5151 flags
->update_tx_mode_config
= 1;
5152 flags
->tx_accept_filter
= vf_info
->tx_accept_mode
;
5155 /* Remove if needed; Otherwise this would set the mask */
5156 if (!vf_info
->is_trusted_configured
) {
5157 flags
->rx_accept_filter
&= ~mask
;
5158 flags
->tx_accept_filter
&= ~mask
;
5161 if (flags
->update_rx_mode_config
||
5162 flags
->update_tx_mode_config
||
5163 params
.update_ctl_frame_check
)
5164 qed_sp_vport_update(hwfn
, ¶ms
,
5165 QED_SPQ_MODE_EBLOCK
, NULL
);
5169 static void qed_iov_pf_task(struct work_struct
*work
)
5172 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
5176 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
5179 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
5180 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
5183 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
5187 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
5189 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
5191 qed_ptt_release(hwfn
, ptt
);
5194 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
5195 qed_handle_vf_msg(hwfn
);
5197 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
5198 &hwfn
->iov_task_flags
))
5199 qed_handle_pf_set_vf_unicast(hwfn
);
5201 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
5202 &hwfn
->iov_task_flags
))
5203 qed_handle_bulletin_post(hwfn
);
5205 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG
, &hwfn
->iov_task_flags
))
5206 qed_iov_handle_trust_change(hwfn
);
5209 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
5213 for_each_hwfn(cdev
, i
) {
5214 if (!cdev
->hwfns
[i
].iov_wq
)
5217 if (schedule_first
) {
5218 qed_schedule_iov(&cdev
->hwfns
[i
],
5219 QED_IOV_WQ_STOP_WQ_FLAG
);
5220 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
5223 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
5224 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
5228 int qed_iov_wq_start(struct qed_dev
*cdev
)
5230 char name
[NAME_SIZE
];
5233 for_each_hwfn(cdev
, i
) {
5234 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
5236 /* PFs needs a dedicated workqueue only if they support IOV.
5237 * VFs always require one.
5239 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
5242 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
5243 cdev
->pdev
->bus
->number
,
5244 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
5246 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
5247 if (!p_hwfn
->iov_wq
) {
5248 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
5253 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
5255 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
5261 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
5262 .configure
= &qed_sriov_configure
,
5263 .set_mac
= &qed_sriov_pf_set_mac
,
5264 .set_vlan
= &qed_sriov_pf_set_vlan
,
5265 .get_config
= &qed_get_vf_config
,
5266 .set_link_state
= &qed_set_vf_link_state
,
5267 .set_spoof
= &qed_spoof_configure
,
5268 .set_rate
= &qed_set_vf_rate
,
5269 .set_trust
= &qed_set_vf_trust
,