1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/qed/qed_iov_if.h>
40 #include "qed_init_ops.h"
43 #include "qed_reg_addr.h"
45 #include "qed_sriov.h"
47 static int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
50 union event_ring_data
*data
, u8 fw_return_code
);
53 static u8
qed_vf_calculate_legacy(struct qed_vf_info
*p_vf
)
57 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
58 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
59 legacy
|= QED_QCID_LEGACY_VF_RX_PROD
;
61 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
62 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
63 legacy
|= QED_QCID_LEGACY_VF_CID
;
69 static int qed_sp_vf_start(struct qed_hwfn
*p_hwfn
, struct qed_vf_info
*p_vf
)
71 struct vf_start_ramrod_data
*p_ramrod
= NULL
;
72 struct qed_spq_entry
*p_ent
= NULL
;
73 struct qed_sp_init_data init_data
;
78 memset(&init_data
, 0, sizeof(init_data
));
79 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
80 init_data
.opaque_fid
= p_vf
->opaque_fid
;
81 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
83 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
84 COMMON_RAMROD_VF_START
,
85 PROTOCOLID_COMMON
, &init_data
);
89 p_ramrod
= &p_ent
->ramrod
.vf_start
;
91 p_ramrod
->vf_id
= GET_FIELD(p_vf
->concrete_fid
, PXP_CONCRETE_FID_VFID
);
92 p_ramrod
->opaque_fid
= cpu_to_le16(p_vf
->opaque_fid
);
94 switch (p_hwfn
->hw_info
.personality
) {
96 p_ramrod
->personality
= PERSONALITY_ETH
;
98 case QED_PCI_ETH_ROCE
:
99 p_ramrod
->personality
= PERSONALITY_RDMA_AND_ETH
;
102 DP_NOTICE(p_hwfn
, "Unknown VF personality %d\n",
103 p_hwfn
->hw_info
.personality
);
107 fp_minor
= p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
;
108 if (fp_minor
> ETH_HSI_VER_MINOR
&&
109 fp_minor
!= ETH_HSI_VER_NO_PKT_LEN_TUNN
) {
112 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
115 fp_minor
, ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
116 fp_minor
= ETH_HSI_VER_MINOR
;
119 p_ramrod
->hsi_fp_ver
.major_ver_arr
[ETH_VER_KEY
] = ETH_HSI_VER_MAJOR
;
120 p_ramrod
->hsi_fp_ver
.minor_ver_arr
[ETH_VER_KEY
] = fp_minor
;
122 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
123 "VF[%d] - Starting using HSI %02x.%02x\n",
124 p_vf
->abs_vf_id
, ETH_HSI_VER_MAJOR
, fp_minor
);
126 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
129 static int qed_sp_vf_stop(struct qed_hwfn
*p_hwfn
,
130 u32 concrete_vfid
, u16 opaque_vfid
)
132 struct vf_stop_ramrod_data
*p_ramrod
= NULL
;
133 struct qed_spq_entry
*p_ent
= NULL
;
134 struct qed_sp_init_data init_data
;
138 memset(&init_data
, 0, sizeof(init_data
));
139 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
140 init_data
.opaque_fid
= opaque_vfid
;
141 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
143 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
144 COMMON_RAMROD_VF_STOP
,
145 PROTOCOLID_COMMON
, &init_data
);
149 p_ramrod
= &p_ent
->ramrod
.vf_stop
;
151 p_ramrod
->vf_id
= GET_FIELD(concrete_vfid
, PXP_CONCRETE_FID_VFID
);
153 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
156 bool qed_iov_is_valid_vfid(struct qed_hwfn
*p_hwfn
,
158 bool b_enabled_only
, bool b_non_malicious
)
160 if (!p_hwfn
->pf_iov_info
) {
161 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
165 if ((rel_vf_id
>= p_hwfn
->cdev
->p_iov_info
->total_vfs
) ||
169 if ((!p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_init
) &&
173 if ((p_hwfn
->pf_iov_info
->vfs_array
[rel_vf_id
].b_malicious
) &&
180 static struct qed_vf_info
*qed_iov_get_vf_info(struct qed_hwfn
*p_hwfn
,
184 struct qed_vf_info
*vf
= NULL
;
186 if (!p_hwfn
->pf_iov_info
) {
187 DP_NOTICE(p_hwfn
->cdev
, "No iov info\n");
191 if (qed_iov_is_valid_vfid(p_hwfn
, relative_vf_id
,
192 b_enabled_only
, false))
193 vf
= &p_hwfn
->pf_iov_info
->vfs_array
[relative_vf_id
];
195 DP_ERR(p_hwfn
, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
201 static struct qed_queue_cid
*
202 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue
*p_queue
)
206 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
207 if (p_queue
->cids
[i
].p_cid
&& !p_queue
->cids
[i
].b_is_tx
)
208 return p_queue
->cids
[i
].p_cid
;
214 enum qed_iov_validate_q_mode
{
215 QED_IOV_VALIDATE_Q_NA
,
216 QED_IOV_VALIDATE_Q_ENABLE
,
217 QED_IOV_VALIDATE_Q_DISABLE
,
220 static bool qed_iov_validate_queue_mode(struct qed_hwfn
*p_hwfn
,
221 struct qed_vf_info
*p_vf
,
223 enum qed_iov_validate_q_mode mode
,
228 if (mode
== QED_IOV_VALIDATE_Q_NA
)
231 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
232 struct qed_vf_queue_cid
*p_qcid
;
234 p_qcid
= &p_vf
->vf_queues
[qid
].cids
[i
];
239 if (p_qcid
->b_is_tx
!= b_is_tx
)
242 return mode
== QED_IOV_VALIDATE_Q_ENABLE
;
245 /* In case we haven't found any valid cid, then its disabled */
246 return mode
== QED_IOV_VALIDATE_Q_DISABLE
;
249 static bool qed_iov_validate_rxq(struct qed_hwfn
*p_hwfn
,
250 struct qed_vf_info
*p_vf
,
252 enum qed_iov_validate_q_mode mode
)
254 if (rx_qid
>= p_vf
->num_rxqs
) {
257 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
258 p_vf
->abs_vf_id
, rx_qid
, p_vf
->num_rxqs
);
262 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, rx_qid
, mode
, false);
265 static bool qed_iov_validate_txq(struct qed_hwfn
*p_hwfn
,
266 struct qed_vf_info
*p_vf
,
268 enum qed_iov_validate_q_mode mode
)
270 if (tx_qid
>= p_vf
->num_txqs
) {
273 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
274 p_vf
->abs_vf_id
, tx_qid
, p_vf
->num_txqs
);
278 return qed_iov_validate_queue_mode(p_hwfn
, p_vf
, tx_qid
, mode
, true);
281 static bool qed_iov_validate_sb(struct qed_hwfn
*p_hwfn
,
282 struct qed_vf_info
*p_vf
, u16 sb_idx
)
286 for (i
= 0; i
< p_vf
->num_sbs
; i
++)
287 if (p_vf
->igu_sbs
[i
] == sb_idx
)
292 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
293 p_vf
->abs_vf_id
, sb_idx
, p_vf
->num_sbs
);
298 static bool qed_iov_validate_active_rxq(struct qed_hwfn
*p_hwfn
,
299 struct qed_vf_info
*p_vf
)
303 for (i
= 0; i
< p_vf
->num_rxqs
; i
++)
304 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
305 QED_IOV_VALIDATE_Q_ENABLE
,
312 static bool qed_iov_validate_active_txq(struct qed_hwfn
*p_hwfn
,
313 struct qed_vf_info
*p_vf
)
317 for (i
= 0; i
< p_vf
->num_txqs
; i
++)
318 if (qed_iov_validate_queue_mode(p_hwfn
, p_vf
, i
,
319 QED_IOV_VALIDATE_Q_ENABLE
,
326 static int qed_iov_post_vf_bulletin(struct qed_hwfn
*p_hwfn
,
327 int vfid
, struct qed_ptt
*p_ptt
)
329 struct qed_bulletin_content
*p_bulletin
;
330 int crc_size
= sizeof(p_bulletin
->crc
);
331 struct qed_dmae_params params
;
332 struct qed_vf_info
*p_vf
;
334 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
338 if (!p_vf
->vf_bulletin
)
341 p_bulletin
= p_vf
->bulletin
.p_virt
;
343 /* Increment bulletin board version and compute crc */
344 p_bulletin
->version
++;
345 p_bulletin
->crc
= crc32(0, (u8
*)p_bulletin
+ crc_size
,
346 p_vf
->bulletin
.size
- crc_size
);
348 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
349 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
350 p_bulletin
->version
, p_vf
->relative_vf_id
, p_bulletin
->crc
);
352 /* propagate bulletin board via dmae to vm memory */
353 memset(¶ms
, 0, sizeof(params
));
354 params
.flags
= QED_DMAE_FLAG_VF_DST
;
355 params
.dst_vfid
= p_vf
->abs_vf_id
;
356 return qed_dmae_host2host(p_hwfn
, p_ptt
, p_vf
->bulletin
.phys
,
357 p_vf
->vf_bulletin
, p_vf
->bulletin
.size
/ 4,
361 static int qed_iov_pci_cfg_info(struct qed_dev
*cdev
)
363 struct qed_hw_sriov_info
*iov
= cdev
->p_iov_info
;
366 DP_VERBOSE(cdev
, QED_MSG_IOV
, "sriov ext pos %d\n", pos
);
367 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_CTRL
, &iov
->ctrl
);
369 pci_read_config_word(cdev
->pdev
,
370 pos
+ PCI_SRIOV_TOTAL_VF
, &iov
->total_vfs
);
371 pci_read_config_word(cdev
->pdev
,
372 pos
+ PCI_SRIOV_INITIAL_VF
, &iov
->initial_vfs
);
374 pci_read_config_word(cdev
->pdev
, pos
+ PCI_SRIOV_NUM_VF
, &iov
->num_vfs
);
378 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
382 pci_read_config_word(cdev
->pdev
,
383 pos
+ PCI_SRIOV_VF_OFFSET
, &iov
->offset
);
385 pci_read_config_word(cdev
->pdev
,
386 pos
+ PCI_SRIOV_VF_STRIDE
, &iov
->stride
);
388 pci_read_config_word(cdev
->pdev
,
389 pos
+ PCI_SRIOV_VF_DID
, &iov
->vf_device_id
);
391 pci_read_config_dword(cdev
->pdev
,
392 pos
+ PCI_SRIOV_SUP_PGSIZE
, &iov
->pgsz
);
394 pci_read_config_dword(cdev
->pdev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
396 pci_read_config_byte(cdev
->pdev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
400 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
406 iov
->nr_virtfn
, iov
->offset
, iov
->stride
, iov
->pgsz
);
408 /* Some sanity checks */
409 if (iov
->num_vfs
> NUM_OF_VFS(cdev
) ||
410 iov
->total_vfs
> NUM_OF_VFS(cdev
)) {
411 /* This can happen only due to a bug. In this case we set
412 * num_vfs to zero to avoid memory corruption in the code that
413 * assumes max number of vfs
416 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
426 static void qed_iov_setup_vfdb(struct qed_hwfn
*p_hwfn
)
428 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
429 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
430 struct qed_bulletin_content
*p_bulletin_virt
;
431 dma_addr_t req_p
, rply_p
, bulletin_p
;
432 union pfvf_tlvs
*p_reply_virt_addr
;
433 union vfpf_tlvs
*p_req_virt_addr
;
436 memset(p_iov_info
->vfs_array
, 0, sizeof(p_iov_info
->vfs_array
));
438 p_req_virt_addr
= p_iov_info
->mbx_msg_virt_addr
;
439 req_p
= p_iov_info
->mbx_msg_phys_addr
;
440 p_reply_virt_addr
= p_iov_info
->mbx_reply_virt_addr
;
441 rply_p
= p_iov_info
->mbx_reply_phys_addr
;
442 p_bulletin_virt
= p_iov_info
->p_bulletins
;
443 bulletin_p
= p_iov_info
->bulletins_phys
;
444 if (!p_req_virt_addr
|| !p_reply_virt_addr
|| !p_bulletin_virt
) {
446 "qed_iov_setup_vfdb called without allocating mem first\n");
450 for (idx
= 0; idx
< p_iov
->total_vfs
; idx
++) {
451 struct qed_vf_info
*vf
= &p_iov_info
->vfs_array
[idx
];
454 vf
->vf_mbx
.req_virt
= p_req_virt_addr
+ idx
;
455 vf
->vf_mbx
.req_phys
= req_p
+ idx
* sizeof(union vfpf_tlvs
);
456 vf
->vf_mbx
.reply_virt
= p_reply_virt_addr
+ idx
;
457 vf
->vf_mbx
.reply_phys
= rply_p
+ idx
* sizeof(union pfvf_tlvs
);
459 vf
->state
= VF_STOPPED
;
462 vf
->bulletin
.phys
= idx
*
463 sizeof(struct qed_bulletin_content
) +
465 vf
->bulletin
.p_virt
= p_bulletin_virt
+ idx
;
466 vf
->bulletin
.size
= sizeof(struct qed_bulletin_content
);
468 vf
->relative_vf_id
= idx
;
469 vf
->abs_vf_id
= idx
+ p_iov
->first_vf_in_pf
;
470 concrete
= qed_vfid_to_concrete(p_hwfn
, vf
->abs_vf_id
);
471 vf
->concrete_fid
= concrete
;
472 vf
->opaque_fid
= (p_hwfn
->hw_info
.opaque_fid
& 0xff) |
473 (vf
->abs_vf_id
<< 8);
474 vf
->vport_id
= idx
+ 1;
476 vf
->num_mac_filters
= QED_ETH_VF_NUM_MAC_FILTERS
;
477 vf
->num_vlan_filters
= QED_ETH_VF_NUM_VLAN_FILTERS
;
481 static int qed_iov_allocate_vfdb(struct qed_hwfn
*p_hwfn
)
483 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
487 num_vfs
= p_hwfn
->cdev
->p_iov_info
->total_vfs
;
489 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
490 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs
);
492 /* Allocate PF Mailbox buffer (per-VF) */
493 p_iov_info
->mbx_msg_size
= sizeof(union vfpf_tlvs
) * num_vfs
;
494 p_v_addr
= &p_iov_info
->mbx_msg_virt_addr
;
495 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
496 p_iov_info
->mbx_msg_size
,
497 &p_iov_info
->mbx_msg_phys_addr
,
502 /* Allocate PF Mailbox Reply buffer (per-VF) */
503 p_iov_info
->mbx_reply_size
= sizeof(union pfvf_tlvs
) * num_vfs
;
504 p_v_addr
= &p_iov_info
->mbx_reply_virt_addr
;
505 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
506 p_iov_info
->mbx_reply_size
,
507 &p_iov_info
->mbx_reply_phys_addr
,
512 p_iov_info
->bulletins_size
= sizeof(struct qed_bulletin_content
) *
514 p_v_addr
= &p_iov_info
->p_bulletins
;
515 *p_v_addr
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
516 p_iov_info
->bulletins_size
,
517 &p_iov_info
->bulletins_phys
,
524 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
525 p_iov_info
->mbx_msg_virt_addr
,
526 (u64
) p_iov_info
->mbx_msg_phys_addr
,
527 p_iov_info
->mbx_reply_virt_addr
,
528 (u64
) p_iov_info
->mbx_reply_phys_addr
,
529 p_iov_info
->p_bulletins
, (u64
) p_iov_info
->bulletins_phys
);
534 static void qed_iov_free_vfdb(struct qed_hwfn
*p_hwfn
)
536 struct qed_pf_iov
*p_iov_info
= p_hwfn
->pf_iov_info
;
538 if (p_hwfn
->pf_iov_info
->mbx_msg_virt_addr
)
539 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
540 p_iov_info
->mbx_msg_size
,
541 p_iov_info
->mbx_msg_virt_addr
,
542 p_iov_info
->mbx_msg_phys_addr
);
544 if (p_hwfn
->pf_iov_info
->mbx_reply_virt_addr
)
545 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
546 p_iov_info
->mbx_reply_size
,
547 p_iov_info
->mbx_reply_virt_addr
,
548 p_iov_info
->mbx_reply_phys_addr
);
550 if (p_iov_info
->p_bulletins
)
551 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
552 p_iov_info
->bulletins_size
,
553 p_iov_info
->p_bulletins
,
554 p_iov_info
->bulletins_phys
);
557 int qed_iov_alloc(struct qed_hwfn
*p_hwfn
)
559 struct qed_pf_iov
*p_sriov
;
561 if (!IS_PF_SRIOV(p_hwfn
)) {
562 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
563 "No SR-IOV - no need for IOV db\n");
567 p_sriov
= kzalloc(sizeof(*p_sriov
), GFP_KERNEL
);
571 p_hwfn
->pf_iov_info
= p_sriov
;
573 qed_spq_register_async_cb(p_hwfn
, PROTOCOLID_COMMON
,
574 qed_sriov_eqe_event
);
576 return qed_iov_allocate_vfdb(p_hwfn
);
579 void qed_iov_setup(struct qed_hwfn
*p_hwfn
)
581 if (!IS_PF_SRIOV(p_hwfn
) || !IS_PF_SRIOV_ALLOC(p_hwfn
))
584 qed_iov_setup_vfdb(p_hwfn
);
587 void qed_iov_free(struct qed_hwfn
*p_hwfn
)
589 qed_spq_unregister_async_cb(p_hwfn
, PROTOCOLID_COMMON
);
591 if (IS_PF_SRIOV_ALLOC(p_hwfn
)) {
592 qed_iov_free_vfdb(p_hwfn
);
593 kfree(p_hwfn
->pf_iov_info
);
597 void qed_iov_free_hw_info(struct qed_dev
*cdev
)
599 kfree(cdev
->p_iov_info
);
600 cdev
->p_iov_info
= NULL
;
603 int qed_iov_hw_info(struct qed_hwfn
*p_hwfn
)
605 struct qed_dev
*cdev
= p_hwfn
->cdev
;
609 if (IS_VF(p_hwfn
->cdev
))
612 /* Learn the PCI configuration */
613 pos
= pci_find_ext_capability(p_hwfn
->cdev
->pdev
,
614 PCI_EXT_CAP_ID_SRIOV
);
616 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No PCIe IOV support\n");
620 /* Allocate a new struct for IOV information */
621 cdev
->p_iov_info
= kzalloc(sizeof(*cdev
->p_iov_info
), GFP_KERNEL
);
622 if (!cdev
->p_iov_info
)
625 cdev
->p_iov_info
->pos
= pos
;
627 rc
= qed_iov_pci_cfg_info(cdev
);
631 /* We want PF IOV to be synonemous with the existance of p_iov_info;
632 * In case the capability is published but there are no VFs, simply
633 * de-allocate the struct.
635 if (!cdev
->p_iov_info
->total_vfs
) {
636 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
637 "IOV capabilities, but no VFs are published\n");
638 kfree(cdev
->p_iov_info
);
639 cdev
->p_iov_info
= NULL
;
643 /* First VF index based on offset is tricky:
644 * - If ARI is supported [likely], offset - (16 - pf_id) would
645 * provide the number for eng0. 2nd engine Vfs would begin
646 * after the first engine's VFs.
647 * - If !ARI, VFs would start on next device.
648 * so offset - (256 - pf_id) would provide the number.
649 * Utilize the fact that (256 - pf_id) is achieved only by later
650 * to differentiate between the two.
653 if (p_hwfn
->cdev
->p_iov_info
->offset
< (256 - p_hwfn
->abs_pf_id
)) {
654 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
655 p_hwfn
->abs_pf_id
- 16;
657 cdev
->p_iov_info
->first_vf_in_pf
= first
;
659 if (QED_PATH_ID(p_hwfn
))
660 cdev
->p_iov_info
->first_vf_in_pf
-= MAX_NUM_VFS_BB
;
662 u32 first
= p_hwfn
->cdev
->p_iov_info
->offset
+
663 p_hwfn
->abs_pf_id
- 256;
665 cdev
->p_iov_info
->first_vf_in_pf
= first
;
668 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
669 "First VF in hwfn 0x%08x\n",
670 cdev
->p_iov_info
->first_vf_in_pf
);
675 bool _qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
,
676 int vfid
, bool b_fail_malicious
)
678 /* Check PF supports sriov */
679 if (IS_VF(p_hwfn
->cdev
) || !IS_QED_SRIOV(p_hwfn
->cdev
) ||
680 !IS_PF_SRIOV_ALLOC(p_hwfn
))
683 /* Check VF validity */
684 if (!qed_iov_is_valid_vfid(p_hwfn
, vfid
, true, b_fail_malicious
))
690 bool qed_iov_pf_sanity_check(struct qed_hwfn
*p_hwfn
, int vfid
)
692 return _qed_iov_pf_sanity_check(p_hwfn
, vfid
, true);
695 static void qed_iov_set_vf_to_disable(struct qed_dev
*cdev
,
696 u16 rel_vf_id
, u8 to_disable
)
698 struct qed_vf_info
*vf
;
701 for_each_hwfn(cdev
, i
) {
702 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
704 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
708 vf
->to_disable
= to_disable
;
712 static void qed_iov_set_vfs_to_disable(struct qed_dev
*cdev
, u8 to_disable
)
716 if (!IS_QED_SRIOV(cdev
))
719 for (i
= 0; i
< cdev
->p_iov_info
->total_vfs
; i
++)
720 qed_iov_set_vf_to_disable(cdev
, i
, to_disable
);
723 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn
*p_hwfn
,
724 struct qed_ptt
*p_ptt
, u8 abs_vfid
)
726 qed_wr(p_hwfn
, p_ptt
,
727 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR
+ (abs_vfid
>> 5) * 4,
728 1 << (abs_vfid
& 0x1f));
731 static void qed_iov_vf_igu_reset(struct qed_hwfn
*p_hwfn
,
732 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
736 /* Set VF masks and configuration - pretend */
737 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
739 qed_wr(p_hwfn
, p_ptt
, IGU_REG_STATISTIC_NUM_VF_MSG_SENT
, 0);
742 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
744 /* iterate over all queues, clear sb consumer */
745 for (i
= 0; i
< vf
->num_sbs
; i
++)
746 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
748 vf
->opaque_fid
, true);
751 static void qed_iov_vf_igu_set_int(struct qed_hwfn
*p_hwfn
,
752 struct qed_ptt
*p_ptt
,
753 struct qed_vf_info
*vf
, bool enable
)
757 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
759 igu_vf_conf
= qed_rd(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
);
762 igu_vf_conf
|= IGU_VF_CONF_MSI_MSIX_EN
;
764 igu_vf_conf
&= ~IGU_VF_CONF_MSI_MSIX_EN
;
766 qed_wr(p_hwfn
, p_ptt
, IGU_REG_VF_CONFIGURATION
, igu_vf_conf
);
769 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
773 qed_iov_enable_vf_access_msix(struct qed_hwfn
*p_hwfn
,
774 struct qed_ptt
*p_ptt
, u8 abs_vf_id
, u8 num_sbs
)
779 /* For AH onward, configuration is per-PF. Find maximum of all
780 * the currently enabled child VFs, and set the number to be that.
782 if (!QED_IS_BB(p_hwfn
->cdev
)) {
783 qed_for_each_vf(p_hwfn
, i
) {
784 struct qed_vf_info
*p_vf
;
786 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)i
, true);
790 current_max
= max_t(u8
, current_max
, p_vf
->num_sbs
);
794 if (num_sbs
> current_max
)
795 return qed_mcp_config_vf_msix(p_hwfn
, p_ptt
,
801 static int qed_iov_enable_vf_access(struct qed_hwfn
*p_hwfn
,
802 struct qed_ptt
*p_ptt
,
803 struct qed_vf_info
*vf
)
805 u32 igu_vf_conf
= IGU_VF_CONF_FUNC_EN
;
808 /* It's possible VF was previously considered malicious -
809 * clear the indication even if we're only going to disable VF.
811 vf
->b_malicious
= false;
818 "Enable internal access for vf %x [abs %x]\n",
819 vf
->abs_vf_id
, QED_VF_ABS_ID(p_hwfn
, vf
));
821 qed_iov_vf_pglue_clear_err(p_hwfn
, p_ptt
, QED_VF_ABS_ID(p_hwfn
, vf
));
823 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
825 rc
= qed_iov_enable_vf_access_msix(p_hwfn
, p_ptt
,
826 vf
->abs_vf_id
, vf
->num_sbs
);
830 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) vf
->concrete_fid
);
832 SET_FIELD(igu_vf_conf
, IGU_VF_CONF_PARENT
, p_hwfn
->rel_pf_id
);
833 STORE_RT_REG(p_hwfn
, IGU_REG_VF_CONFIGURATION_RT_OFFSET
, igu_vf_conf
);
835 qed_init_run(p_hwfn
, p_ptt
, PHASE_VF
, vf
->abs_vf_id
,
836 p_hwfn
->hw_info
.hw_mode
);
839 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
847 * @brief qed_iov_config_perm_table - configure the permission
849 * In E4, queue zone permission table size is 320x9. There
850 * are 320 VF queues for single engine device (256 for dual
851 * engine device), and each entry has the following format:
858 static void qed_iov_config_perm_table(struct qed_hwfn
*p_hwfn
,
859 struct qed_ptt
*p_ptt
,
860 struct qed_vf_info
*vf
, u8 enable
)
866 for (qid
= 0; qid
< vf
->num_rxqs
; qid
++) {
867 qed_fw_l2_queue(p_hwfn
, vf
->vf_queues
[qid
].fw_rx_qid
,
870 reg_addr
= PSWHST_REG_ZONE_PERMISSION_TABLE
+ qzone_id
* 4;
871 val
= enable
? (vf
->abs_vf_id
| BIT(8)) : 0;
872 qed_wr(p_hwfn
, p_ptt
, reg_addr
, val
);
876 static void qed_iov_enable_vf_traffic(struct qed_hwfn
*p_hwfn
,
877 struct qed_ptt
*p_ptt
,
878 struct qed_vf_info
*vf
)
880 /* Reset vf in IGU - interrupts are still disabled */
881 qed_iov_vf_igu_reset(p_hwfn
, p_ptt
, vf
);
883 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 1);
885 /* Permission Table */
886 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, true);
889 static u8
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
890 struct qed_ptt
*p_ptt
,
891 struct qed_vf_info
*vf
, u16 num_rx_queues
)
893 struct qed_igu_block
*p_block
;
894 struct cau_sb_entry sb_entry
;
898 if (num_rx_queues
> p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
)
899 num_rx_queues
= p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
;
900 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
-= num_rx_queues
;
902 SET_FIELD(val
, IGU_MAPPING_LINE_FUNCTION_NUMBER
, vf
->abs_vf_id
);
903 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 1);
904 SET_FIELD(val
, IGU_MAPPING_LINE_PF_VALID
, 0);
906 for (qid
= 0; qid
< num_rx_queues
; qid
++) {
907 p_block
= qed_get_igu_free_sb(p_hwfn
, false);
908 vf
->igu_sbs
[qid
] = p_block
->igu_sb_id
;
909 p_block
->status
&= ~QED_IGU_STATUS_FREE
;
910 SET_FIELD(val
, IGU_MAPPING_LINE_VECTOR_NUMBER
, qid
);
912 qed_wr(p_hwfn
, p_ptt
,
913 IGU_REG_MAPPING_MEMORY
+
914 sizeof(u32
) * p_block
->igu_sb_id
, val
);
916 /* Configure igu sb in CAU which were marked valid */
917 qed_init_cau_sb_entry(p_hwfn
, &sb_entry
,
918 p_hwfn
->rel_pf_id
, vf
->abs_vf_id
, 1);
919 qed_dmae_host2grc(p_hwfn
, p_ptt
,
920 (u64
)(uintptr_t)&sb_entry
,
921 CAU_REG_SB_VAR_MEMORY
+
922 p_block
->igu_sb_id
* sizeof(u64
), 2, 0);
925 vf
->num_sbs
= (u8
) num_rx_queues
;
930 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn
*p_hwfn
,
931 struct qed_ptt
*p_ptt
,
932 struct qed_vf_info
*vf
)
934 struct qed_igu_info
*p_info
= p_hwfn
->hw_info
.p_igu_info
;
938 /* Invalidate igu CAM lines and mark them as free */
939 for (idx
= 0; idx
< vf
->num_sbs
; idx
++) {
940 igu_id
= vf
->igu_sbs
[idx
];
941 addr
= IGU_REG_MAPPING_MEMORY
+ sizeof(u32
) * igu_id
;
943 val
= qed_rd(p_hwfn
, p_ptt
, addr
);
944 SET_FIELD(val
, IGU_MAPPING_LINE_VALID
, 0);
945 qed_wr(p_hwfn
, p_ptt
, addr
, val
);
947 p_info
->entry
[igu_id
].status
|= QED_IGU_STATUS_FREE
;
948 p_hwfn
->hw_info
.p_igu_info
->usage
.free_cnt_iov
++;
954 static void qed_iov_set_link(struct qed_hwfn
*p_hwfn
,
956 struct qed_mcp_link_params
*params
,
957 struct qed_mcp_link_state
*link
,
958 struct qed_mcp_link_capabilities
*p_caps
)
960 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
963 struct qed_bulletin_content
*p_bulletin
;
968 p_bulletin
= p_vf
->bulletin
.p_virt
;
969 p_bulletin
->req_autoneg
= params
->speed
.autoneg
;
970 p_bulletin
->req_adv_speed
= params
->speed
.advertised_speeds
;
971 p_bulletin
->req_forced_speed
= params
->speed
.forced_speed
;
972 p_bulletin
->req_autoneg_pause
= params
->pause
.autoneg
;
973 p_bulletin
->req_forced_rx
= params
->pause
.forced_rx
;
974 p_bulletin
->req_forced_tx
= params
->pause
.forced_tx
;
975 p_bulletin
->req_loopback
= params
->loopback_mode
;
977 p_bulletin
->link_up
= link
->link_up
;
978 p_bulletin
->speed
= link
->speed
;
979 p_bulletin
->full_duplex
= link
->full_duplex
;
980 p_bulletin
->autoneg
= link
->an
;
981 p_bulletin
->autoneg_complete
= link
->an_complete
;
982 p_bulletin
->parallel_detection
= link
->parallel_detection
;
983 p_bulletin
->pfc_enabled
= link
->pfc_enabled
;
984 p_bulletin
->partner_adv_speed
= link
->partner_adv_speed
;
985 p_bulletin
->partner_tx_flow_ctrl_en
= link
->partner_tx_flow_ctrl_en
;
986 p_bulletin
->partner_rx_flow_ctrl_en
= link
->partner_rx_flow_ctrl_en
;
987 p_bulletin
->partner_adv_pause
= link
->partner_adv_pause
;
988 p_bulletin
->sfp_tx_fault
= link
->sfp_tx_fault
;
990 p_bulletin
->capability_speed
= p_caps
->speed_capabilities
;
993 static int qed_iov_init_hw_for_vf(struct qed_hwfn
*p_hwfn
,
994 struct qed_ptt
*p_ptt
,
995 struct qed_iov_vf_init_params
*p_params
)
997 struct qed_mcp_link_capabilities link_caps
;
998 struct qed_mcp_link_params link_params
;
999 struct qed_mcp_link_state link_state
;
1000 u8 num_of_vf_avaiable_chains
= 0;
1001 struct qed_vf_info
*vf
= NULL
;
1007 vf
= qed_iov_get_vf_info(p_hwfn
, p_params
->rel_vf_id
, false);
1009 DP_ERR(p_hwfn
, "qed_iov_init_hw_for_vf : vf is NULL\n");
1014 DP_NOTICE(p_hwfn
, "VF[%d] is already active.\n",
1015 p_params
->rel_vf_id
);
1019 /* Perform sanity checking on the requested queue_id */
1020 for (i
= 0; i
< p_params
->num_queues
; i
++) {
1021 u16 min_vf_qzone
= FEAT_NUM(p_hwfn
, QED_PF_L2_QUE
);
1022 u16 max_vf_qzone
= min_vf_qzone
+
1023 FEAT_NUM(p_hwfn
, QED_VF_L2_QUE
) - 1;
1025 qid
= p_params
->req_rx_queue
[i
];
1026 if (qid
< min_vf_qzone
|| qid
> max_vf_qzone
) {
1028 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1030 p_params
->rel_vf_id
,
1031 min_vf_qzone
, max_vf_qzone
);
1035 qid
= p_params
->req_tx_queue
[i
];
1036 if (qid
> max_vf_qzone
) {
1038 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1039 qid
, p_params
->rel_vf_id
, max_vf_qzone
);
1043 /* If client *really* wants, Tx qid can be shared with PF */
1044 if (qid
< min_vf_qzone
)
1047 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1048 p_params
->rel_vf_id
, qid
, i
);
1051 /* Limit number of queues according to number of CIDs */
1052 qed_cxt_get_proto_cid_count(p_hwfn
, PROTOCOLID_ETH
, &cids
);
1055 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1056 vf
->relative_vf_id
, p_params
->num_queues
, (u16
)cids
);
1057 num_irqs
= min_t(u16
, p_params
->num_queues
, ((u16
)cids
));
1059 num_of_vf_avaiable_chains
= qed_iov_alloc_vf_igu_sbs(p_hwfn
,
1062 if (!num_of_vf_avaiable_chains
) {
1063 DP_ERR(p_hwfn
, "no available igu sbs\n");
1067 /* Choose queue number and index ranges */
1068 vf
->num_rxqs
= num_of_vf_avaiable_chains
;
1069 vf
->num_txqs
= num_of_vf_avaiable_chains
;
1071 for (i
= 0; i
< vf
->num_rxqs
; i
++) {
1072 struct qed_vf_queue
*p_queue
= &vf
->vf_queues
[i
];
1074 p_queue
->fw_rx_qid
= p_params
->req_rx_queue
[i
];
1075 p_queue
->fw_tx_qid
= p_params
->req_tx_queue
[i
];
1077 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1078 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1079 vf
->relative_vf_id
, i
, vf
->igu_sbs
[i
],
1080 p_queue
->fw_rx_qid
, p_queue
->fw_tx_qid
);
1083 /* Update the link configuration in bulletin */
1084 memcpy(&link_params
, qed_mcp_get_link_params(p_hwfn
),
1085 sizeof(link_params
));
1086 memcpy(&link_state
, qed_mcp_get_link_state(p_hwfn
), sizeof(link_state
));
1087 memcpy(&link_caps
, qed_mcp_get_link_capabilities(p_hwfn
),
1089 qed_iov_set_link(p_hwfn
, p_params
->rel_vf_id
,
1090 &link_params
, &link_state
, &link_caps
);
1092 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, vf
);
1096 if (IS_LEAD_HWFN(p_hwfn
))
1097 p_hwfn
->cdev
->p_iov_info
->num_vfs
++;
1103 static int qed_iov_release_hw_for_vf(struct qed_hwfn
*p_hwfn
,
1104 struct qed_ptt
*p_ptt
, u16 rel_vf_id
)
1106 struct qed_mcp_link_capabilities caps
;
1107 struct qed_mcp_link_params params
;
1108 struct qed_mcp_link_state link
;
1109 struct qed_vf_info
*vf
= NULL
;
1111 vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
1113 DP_ERR(p_hwfn
, "qed_iov_release_hw_for_vf : vf is NULL\n");
1117 if (vf
->bulletin
.p_virt
)
1118 memset(vf
->bulletin
.p_virt
, 0, sizeof(*vf
->bulletin
.p_virt
));
1120 memset(&vf
->p_vf_info
, 0, sizeof(vf
->p_vf_info
));
1122 /* Get the link configuration back in bulletin so
1123 * that when VFs are re-enabled they get the actual
1124 * link configuration.
1126 memcpy(¶ms
, qed_mcp_get_link_params(p_hwfn
), sizeof(params
));
1127 memcpy(&link
, qed_mcp_get_link_state(p_hwfn
), sizeof(link
));
1128 memcpy(&caps
, qed_mcp_get_link_capabilities(p_hwfn
), sizeof(caps
));
1129 qed_iov_set_link(p_hwfn
, rel_vf_id
, ¶ms
, &link
, &caps
);
1131 /* Forget the VF's acquisition message */
1132 memset(&vf
->acquire
, 0, sizeof(vf
->acquire
));
1134 /* disablng interrupts and resetting permission table was done during
1135 * vf-close, however, we could get here without going through vf_close
1137 /* Disable Interrupts for VF */
1138 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
1140 /* Reset Permission table */
1141 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
1145 qed_iov_free_vf_igu_sbs(p_hwfn
, p_ptt
, vf
);
1150 if (IS_LEAD_HWFN(p_hwfn
))
1151 p_hwfn
->cdev
->p_iov_info
->num_vfs
--;
1157 static bool qed_iov_tlv_supported(u16 tlvtype
)
1159 return CHANNEL_TLV_NONE
< tlvtype
&& tlvtype
< CHANNEL_TLV_MAX
;
1162 /* place a given tlv on the tlv buffer, continuing current tlv list */
1163 void *qed_add_tlv(struct qed_hwfn
*p_hwfn
, u8
**offset
, u16 type
, u16 length
)
1165 struct channel_tlv
*tl
= (struct channel_tlv
*)*offset
;
1168 tl
->length
= length
;
1170 /* Offset should keep pointing to next TLV (the end of the last) */
1173 /* Return a pointer to the start of the added tlv */
1174 return *offset
- length
;
1177 /* list the types and lengths of the tlvs on the buffer */
1178 void qed_dp_tlv_list(struct qed_hwfn
*p_hwfn
, void *tlvs_list
)
1180 u16 i
= 1, total_length
= 0;
1181 struct channel_tlv
*tlv
;
1184 tlv
= (struct channel_tlv
*)((u8
*)tlvs_list
+ total_length
);
1187 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1188 "TLV number %d: type %d, length %d\n",
1189 i
, tlv
->type
, tlv
->length
);
1191 if (tlv
->type
== CHANNEL_TLV_LIST_END
)
1194 /* Validate entry - protect against malicious VFs */
1196 DP_NOTICE(p_hwfn
, "TLV of length 0 found\n");
1200 total_length
+= tlv
->length
;
1202 if (total_length
>= sizeof(struct tlv_buffer_size
)) {
1203 DP_NOTICE(p_hwfn
, "TLV ==> Buffer overflow\n");
1211 static void qed_iov_send_response(struct qed_hwfn
*p_hwfn
,
1212 struct qed_ptt
*p_ptt
,
1213 struct qed_vf_info
*p_vf
,
1214 u16 length
, u8 status
)
1216 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
1217 struct qed_dmae_params params
;
1220 mbx
->reply_virt
->default_resp
.hdr
.status
= status
;
1222 qed_dp_tlv_list(p_hwfn
, mbx
->reply_virt
);
1224 eng_vf_id
= p_vf
->abs_vf_id
;
1226 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
1227 params
.flags
= QED_DMAE_FLAG_VF_DST
;
1228 params
.dst_vfid
= eng_vf_id
;
1230 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
+ sizeof(u64
),
1231 mbx
->req_virt
->first_tlv
.reply_address
+
1233 (sizeof(union pfvf_tlvs
) - sizeof(u64
)) / 4,
1236 /* Once PF copies the rc to the VF, the latter can continue
1237 * and send an additional message. So we have to make sure the
1238 * channel would be re-set to ready prior to that.
1241 GTT_BAR0_MAP_REG_USDM_RAM
+
1242 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id
), 1);
1244 qed_dmae_host2host(p_hwfn
, p_ptt
, mbx
->reply_phys
,
1245 mbx
->req_virt
->first_tlv
.reply_address
,
1246 sizeof(u64
) / 4, ¶ms
);
1249 static u16
qed_iov_vport_to_tlv(struct qed_hwfn
*p_hwfn
,
1250 enum qed_iov_vport_update_flag flag
)
1253 case QED_IOV_VP_UPDATE_ACTIVATE
:
1254 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
1255 case QED_IOV_VP_UPDATE_VLAN_STRIP
:
1256 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
1257 case QED_IOV_VP_UPDATE_TX_SWITCH
:
1258 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
1259 case QED_IOV_VP_UPDATE_MCAST
:
1260 return CHANNEL_TLV_VPORT_UPDATE_MCAST
;
1261 case QED_IOV_VP_UPDATE_ACCEPT_PARAM
:
1262 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
1263 case QED_IOV_VP_UPDATE_RSS
:
1264 return CHANNEL_TLV_VPORT_UPDATE_RSS
;
1265 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
:
1266 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
1267 case QED_IOV_VP_UPDATE_SGE_TPA
:
1268 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
1274 static u16
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn
*p_hwfn
,
1275 struct qed_vf_info
*p_vf
,
1276 struct qed_iov_vf_mbx
*p_mbx
,
1278 u16 tlvs_mask
, u16 tlvs_accepted
)
1280 struct pfvf_def_resp_tlv
*resp
;
1281 u16 size
, total_len
, i
;
1283 memset(p_mbx
->reply_virt
, 0, sizeof(union pfvf_tlvs
));
1284 p_mbx
->offset
= (u8
*)p_mbx
->reply_virt
;
1285 size
= sizeof(struct pfvf_def_resp_tlv
);
1288 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_VPORT_UPDATE
, size
);
1290 /* Prepare response for all extended tlvs if they are found by PF */
1291 for (i
= 0; i
< QED_IOV_VP_UPDATE_MAX
; i
++) {
1292 if (!(tlvs_mask
& BIT(i
)))
1295 resp
= qed_add_tlv(p_hwfn
, &p_mbx
->offset
,
1296 qed_iov_vport_to_tlv(p_hwfn
, i
), size
);
1298 if (tlvs_accepted
& BIT(i
))
1299 resp
->hdr
.status
= status
;
1301 resp
->hdr
.status
= PFVF_STATUS_NOT_SUPPORTED
;
1305 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1306 p_vf
->relative_vf_id
,
1307 qed_iov_vport_to_tlv(p_hwfn
, i
), resp
->hdr
.status
);
1312 qed_add_tlv(p_hwfn
, &p_mbx
->offset
, CHANNEL_TLV_LIST_END
,
1313 sizeof(struct channel_list_end_tlv
));
1318 static void qed_iov_prepare_resp(struct qed_hwfn
*p_hwfn
,
1319 struct qed_ptt
*p_ptt
,
1320 struct qed_vf_info
*vf_info
,
1321 u16 type
, u16 length
, u8 status
)
1323 struct qed_iov_vf_mbx
*mbx
= &vf_info
->vf_mbx
;
1325 mbx
->offset
= (u8
*)mbx
->reply_virt
;
1327 qed_add_tlv(p_hwfn
, &mbx
->offset
, type
, length
);
1328 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
1329 sizeof(struct channel_list_end_tlv
));
1331 qed_iov_send_response(p_hwfn
, p_ptt
, vf_info
, length
, status
);
1335 qed_public_vf_info
*qed_iov_get_public_vf_info(struct qed_hwfn
*p_hwfn
,
1337 bool b_enabled_only
)
1339 struct qed_vf_info
*vf
= NULL
;
1341 vf
= qed_iov_get_vf_info(p_hwfn
, relative_vf_id
, b_enabled_only
);
1345 return &vf
->p_vf_info
;
1348 static void qed_iov_clean_vf(struct qed_hwfn
*p_hwfn
, u8 vfid
)
1350 struct qed_public_vf_info
*vf_info
;
1352 vf_info
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, false);
1357 /* Clear the VF mac */
1358 eth_zero_addr(vf_info
->mac
);
1360 vf_info
->rx_accept_mode
= 0;
1361 vf_info
->tx_accept_mode
= 0;
1364 static void qed_iov_vf_cleanup(struct qed_hwfn
*p_hwfn
,
1365 struct qed_vf_info
*p_vf
)
1369 p_vf
->vf_bulletin
= 0;
1370 p_vf
->vport_instance
= 0;
1371 p_vf
->configured_features
= 0;
1373 /* If VF previously requested less resources, go back to default */
1374 p_vf
->num_rxqs
= p_vf
->num_sbs
;
1375 p_vf
->num_txqs
= p_vf
->num_sbs
;
1377 p_vf
->num_active_rxqs
= 0;
1379 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1380 struct qed_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
1382 for (j
= 0; j
< MAX_QUEUES_PER_QZONE
; j
++) {
1383 if (!p_queue
->cids
[j
].p_cid
)
1386 qed_eth_queue_cid_release(p_hwfn
,
1387 p_queue
->cids
[j
].p_cid
);
1388 p_queue
->cids
[j
].p_cid
= NULL
;
1392 memset(&p_vf
->shadow_config
, 0, sizeof(p_vf
->shadow_config
));
1393 memset(&p_vf
->acquire
, 0, sizeof(p_vf
->acquire
));
1394 qed_iov_clean_vf(p_hwfn
, p_vf
->relative_vf_id
);
1397 /* Returns either 0, or log(size) */
1398 static u32
qed_iov_vf_db_bar_size(struct qed_hwfn
*p_hwfn
,
1399 struct qed_ptt
*p_ptt
)
1401 u32 val
= qed_rd(p_hwfn
, p_ptt
, PGLUE_B_REG_VF_BAR1_SIZE
);
1409 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn
*p_hwfn
,
1410 struct qed_ptt
*p_ptt
,
1411 struct qed_vf_info
*p_vf
,
1412 struct vf_pf_resc_request
*p_req
,
1413 struct pf_vf_resc
*p_resp
)
1415 u8 num_vf_cons
= p_hwfn
->pf_params
.eth_pf_params
.num_vf_cons
;
1416 u8 db_size
= qed_db_addr_vf(1, DQ_DEMS_LEGACY
) -
1417 qed_db_addr_vf(0, DQ_DEMS_LEGACY
);
1420 p_resp
->num_cids
= min_t(u8
, p_req
->num_cids
, num_vf_cons
);
1422 /* If VF didn't bother asking for QIDs than don't bother limiting
1423 * number of CIDs. The VF doesn't care about the number, and this
1424 * has the likely result of causing an additional acquisition.
1426 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
1427 VFPF_ACQUIRE_CAP_QUEUE_QIDS
))
1430 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1431 * that would make sure doorbells for all CIDs fall within the bar.
1432 * If it doesn't, make sure regview window is sufficient.
1434 if (p_vf
->acquire
.vfdev_info
.capabilities
&
1435 VFPF_ACQUIRE_CAP_PHYSICAL_BAR
) {
1436 bar_size
= qed_iov_vf_db_bar_size(p_hwfn
, p_ptt
);
1438 bar_size
= 1 << bar_size
;
1440 if (p_hwfn
->cdev
->num_hwfns
> 1)
1443 bar_size
= PXP_VF_BAR0_DQ_LENGTH
;
1446 if (bar_size
/ db_size
< 256)
1447 p_resp
->num_cids
= min_t(u8
, p_resp
->num_cids
,
1448 (u8
)(bar_size
/ db_size
));
1451 static u8
qed_iov_vf_mbx_acquire_resc(struct qed_hwfn
*p_hwfn
,
1452 struct qed_ptt
*p_ptt
,
1453 struct qed_vf_info
*p_vf
,
1454 struct vf_pf_resc_request
*p_req
,
1455 struct pf_vf_resc
*p_resp
)
1459 /* Queue related information */
1460 p_resp
->num_rxqs
= p_vf
->num_rxqs
;
1461 p_resp
->num_txqs
= p_vf
->num_txqs
;
1462 p_resp
->num_sbs
= p_vf
->num_sbs
;
1464 for (i
= 0; i
< p_resp
->num_sbs
; i
++) {
1465 p_resp
->hw_sbs
[i
].hw_sb_id
= p_vf
->igu_sbs
[i
];
1466 p_resp
->hw_sbs
[i
].sb_qid
= 0;
1469 /* These fields are filled for backward compatibility.
1470 * Unused by modern vfs.
1472 for (i
= 0; i
< p_resp
->num_rxqs
; i
++) {
1473 qed_fw_l2_queue(p_hwfn
, p_vf
->vf_queues
[i
].fw_rx_qid
,
1474 (u16
*)&p_resp
->hw_qid
[i
]);
1478 /* Filter related information */
1479 p_resp
->num_mac_filters
= min_t(u8
, p_vf
->num_mac_filters
,
1480 p_req
->num_mac_filters
);
1481 p_resp
->num_vlan_filters
= min_t(u8
, p_vf
->num_vlan_filters
,
1482 p_req
->num_vlan_filters
);
1484 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn
, p_ptt
, p_vf
, p_req
, p_resp
);
1486 /* This isn't really needed/enforced, but some legacy VFs might depend
1487 * on the correct filling of this field.
1489 p_resp
->num_mc_filters
= QED_MAX_MC_ADDRS
;
1491 /* Validate sufficient resources for VF */
1492 if (p_resp
->num_rxqs
< p_req
->num_rxqs
||
1493 p_resp
->num_txqs
< p_req
->num_txqs
||
1494 p_resp
->num_sbs
< p_req
->num_sbs
||
1495 p_resp
->num_mac_filters
< p_req
->num_mac_filters
||
1496 p_resp
->num_vlan_filters
< p_req
->num_vlan_filters
||
1497 p_resp
->num_mc_filters
< p_req
->num_mc_filters
||
1498 p_resp
->num_cids
< p_req
->num_cids
) {
1501 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1509 p_req
->num_mac_filters
,
1510 p_resp
->num_mac_filters
,
1511 p_req
->num_vlan_filters
,
1512 p_resp
->num_vlan_filters
,
1513 p_req
->num_mc_filters
,
1514 p_resp
->num_mc_filters
,
1515 p_req
->num_cids
, p_resp
->num_cids
);
1517 /* Some legacy OSes are incapable of correctly handling this
1520 if ((p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
1521 ETH_HSI_VER_NO_PKT_LEN_TUNN
) &&
1522 (p_vf
->acquire
.vfdev_info
.os_type
==
1523 VFPF_ACQUIRE_OS_WINDOWS
))
1524 return PFVF_STATUS_SUCCESS
;
1526 return PFVF_STATUS_NO_RESOURCE
;
1529 return PFVF_STATUS_SUCCESS
;
1532 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn
*p_hwfn
,
1533 struct pfvf_stats_info
*p_stats
)
1535 p_stats
->mstats
.address
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
1536 offsetof(struct mstorm_vf_zone
,
1537 non_trigger
.eth_queue_stat
);
1538 p_stats
->mstats
.len
= sizeof(struct eth_mstorm_per_queue_stat
);
1539 p_stats
->ustats
.address
= PXP_VF_BAR0_START_USDM_ZONE_B
+
1540 offsetof(struct ustorm_vf_zone
,
1541 non_trigger
.eth_queue_stat
);
1542 p_stats
->ustats
.len
= sizeof(struct eth_ustorm_per_queue_stat
);
1543 p_stats
->pstats
.address
= PXP_VF_BAR0_START_PSDM_ZONE_B
+
1544 offsetof(struct pstorm_vf_zone
,
1545 non_trigger
.eth_queue_stat
);
1546 p_stats
->pstats
.len
= sizeof(struct eth_pstorm_per_queue_stat
);
1547 p_stats
->tstats
.address
= 0;
1548 p_stats
->tstats
.len
= 0;
1551 static void qed_iov_vf_mbx_acquire(struct qed_hwfn
*p_hwfn
,
1552 struct qed_ptt
*p_ptt
,
1553 struct qed_vf_info
*vf
)
1555 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1556 struct pfvf_acquire_resp_tlv
*resp
= &mbx
->reply_virt
->acquire_resp
;
1557 struct pf_vf_pfdev_info
*pfdev_info
= &resp
->pfdev_info
;
1558 struct vfpf_acquire_tlv
*req
= &mbx
->req_virt
->acquire
;
1559 u8 vfpf_status
= PFVF_STATUS_NOT_SUPPORTED
;
1560 struct pf_vf_resc
*resc
= &resp
->resc
;
1563 memset(resp
, 0, sizeof(*resp
));
1565 /* Write the PF version so that VF would know which version
1566 * is supported - might be later overriden. This guarantees that
1567 * VF could recognize legacy PF based on lack of versions in reply.
1569 pfdev_info
->major_fp_hsi
= ETH_HSI_VER_MAJOR
;
1570 pfdev_info
->minor_fp_hsi
= ETH_HSI_VER_MINOR
;
1572 if (vf
->state
!= VF_FREE
&& vf
->state
!= VF_STOPPED
) {
1575 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1576 vf
->abs_vf_id
, vf
->state
);
1580 /* Validate FW compatibility */
1581 if (req
->vfdev_info
.eth_fp_hsi_major
!= ETH_HSI_VER_MAJOR
) {
1582 if (req
->vfdev_info
.capabilities
&
1583 VFPF_ACQUIRE_CAP_PRE_FP_HSI
) {
1584 struct vf_pf_vfdev_info
*p_vfdev
= &req
->vfdev_info
;
1586 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1587 "VF[%d] is pre-fastpath HSI\n",
1589 p_vfdev
->eth_fp_hsi_major
= ETH_HSI_VER_MAJOR
;
1590 p_vfdev
->eth_fp_hsi_minor
= ETH_HSI_VER_NO_PKT_LEN_TUNN
;
1593 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1595 req
->vfdev_info
.eth_fp_hsi_major
,
1596 req
->vfdev_info
.eth_fp_hsi_minor
,
1597 ETH_HSI_VER_MAJOR
, ETH_HSI_VER_MINOR
);
1603 /* On 100g PFs, prevent old VFs from loading */
1604 if ((p_hwfn
->cdev
->num_hwfns
> 1) &&
1605 !(req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_100G
)) {
1607 "VF[%d] is running an old driver that doesn't support 100g\n",
1612 /* Store the acquire message */
1613 memcpy(&vf
->acquire
, req
, sizeof(vf
->acquire
));
1615 vf
->opaque_fid
= req
->vfdev_info
.opaque_fid
;
1617 vf
->vf_bulletin
= req
->bulletin_addr
;
1618 vf
->bulletin
.size
= (vf
->bulletin
.size
< req
->bulletin_size
) ?
1619 vf
->bulletin
.size
: req
->bulletin_size
;
1621 /* fill in pfdev info */
1622 pfdev_info
->chip_num
= p_hwfn
->cdev
->chip_num
;
1623 pfdev_info
->db_size
= 0;
1624 pfdev_info
->indices_per_sb
= PIS_PER_SB_E4
;
1626 pfdev_info
->capabilities
= PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED
|
1627 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE
;
1628 if (p_hwfn
->cdev
->num_hwfns
> 1)
1629 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_100G
;
1631 /* Share our ability to use multiple queue-ids only with VFs
1634 if (req
->vfdev_info
.capabilities
& VFPF_ACQUIRE_CAP_QUEUE_QIDS
)
1635 pfdev_info
->capabilities
|= PFVF_ACQUIRE_CAP_QUEUE_QIDS
;
1637 /* Share the sizes of the bars with VF */
1638 resp
->pfdev_info
.bar_size
= qed_iov_vf_db_bar_size(p_hwfn
, p_ptt
);
1640 qed_iov_vf_mbx_acquire_stats(p_hwfn
, &pfdev_info
->stats_info
);
1642 memcpy(pfdev_info
->port_mac
, p_hwfn
->hw_info
.hw_mac_addr
, ETH_ALEN
);
1644 pfdev_info
->fw_major
= FW_MAJOR_VERSION
;
1645 pfdev_info
->fw_minor
= FW_MINOR_VERSION
;
1646 pfdev_info
->fw_rev
= FW_REVISION_VERSION
;
1647 pfdev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
1649 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1652 pfdev_info
->minor_fp_hsi
= min_t(u8
, ETH_HSI_VER_MINOR
,
1653 req
->vfdev_info
.eth_fp_hsi_minor
);
1654 pfdev_info
->os_type
= VFPF_ACQUIRE_OS_LINUX
;
1655 qed_mcp_get_mfw_ver(p_hwfn
, p_ptt
, &pfdev_info
->mfw_ver
, NULL
);
1657 pfdev_info
->dev_type
= p_hwfn
->cdev
->type
;
1658 pfdev_info
->chip_rev
= p_hwfn
->cdev
->chip_rev
;
1660 /* Fill resources available to VF; Make sure there are enough to
1661 * satisfy the VF's request.
1663 vfpf_status
= qed_iov_vf_mbx_acquire_resc(p_hwfn
, p_ptt
, vf
,
1664 &req
->resc_request
, resc
);
1665 if (vfpf_status
!= PFVF_STATUS_SUCCESS
)
1668 /* Start the VF in FW */
1669 rc
= qed_sp_vf_start(p_hwfn
, vf
);
1671 DP_NOTICE(p_hwfn
, "Failed to start VF[%02x]\n", vf
->abs_vf_id
);
1672 vfpf_status
= PFVF_STATUS_FAILURE
;
1676 /* Fill agreed size of bulletin board in response */
1677 resp
->bulletin_size
= vf
->bulletin
.size
;
1678 qed_iov_post_vf_bulletin(p_hwfn
, vf
->relative_vf_id
, p_ptt
);
1682 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1683 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1685 resp
->pfdev_info
.chip_num
,
1686 resp
->pfdev_info
.db_size
,
1687 resp
->pfdev_info
.indices_per_sb
,
1688 resp
->pfdev_info
.capabilities
,
1692 resc
->num_mac_filters
,
1693 resc
->num_vlan_filters
);
1694 vf
->state
= VF_ACQUIRED
;
1696 /* Prepare Response */
1698 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_ACQUIRE
,
1699 sizeof(struct pfvf_acquire_resp_tlv
), vfpf_status
);
1702 static int __qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
,
1703 struct qed_vf_info
*p_vf
, bool val
)
1705 struct qed_sp_vport_update_params params
;
1708 if (val
== p_vf
->spoof_chk
) {
1709 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1710 "Spoofchk value[%d] is already configured\n", val
);
1714 memset(¶ms
, 0, sizeof(struct qed_sp_vport_update_params
));
1715 params
.opaque_fid
= p_vf
->opaque_fid
;
1716 params
.vport_id
= p_vf
->vport_id
;
1717 params
.update_anti_spoofing_en_flg
= 1;
1718 params
.anti_spoofing_en
= val
;
1720 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
1722 p_vf
->spoof_chk
= val
;
1723 p_vf
->req_spoofchk_val
= p_vf
->spoof_chk
;
1724 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1725 "Spoofchk val[%d] configured\n", val
);
1727 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1728 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1729 val
, p_vf
->relative_vf_id
);
1735 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn
*p_hwfn
,
1736 struct qed_vf_info
*p_vf
)
1738 struct qed_filter_ucast filter
;
1742 memset(&filter
, 0, sizeof(filter
));
1743 filter
.is_rx_filter
= 1;
1744 filter
.is_tx_filter
= 1;
1745 filter
.vport_to_add_to
= p_vf
->vport_id
;
1746 filter
.opcode
= QED_FILTER_ADD
;
1748 /* Reconfigure vlans */
1749 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
1750 if (!p_vf
->shadow_config
.vlans
[i
].used
)
1753 filter
.type
= QED_FILTER_VLAN
;
1754 filter
.vlan
= p_vf
->shadow_config
.vlans
[i
].vid
;
1755 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1756 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1757 filter
.vlan
, p_vf
->relative_vf_id
);
1758 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1759 &filter
, QED_SPQ_MODE_CB
, NULL
);
1762 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1763 filter
.vlan
, p_vf
->relative_vf_id
);
1772 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn
*p_hwfn
,
1773 struct qed_vf_info
*p_vf
, u64 events
)
1777 if ((events
& BIT(VLAN_ADDR_FORCED
)) &&
1778 !(p_vf
->configured_features
& (1 << VLAN_ADDR_FORCED
)))
1779 rc
= qed_iov_reconfigure_unicast_vlan(p_hwfn
, p_vf
);
1784 static int qed_iov_configure_vport_forced(struct qed_hwfn
*p_hwfn
,
1785 struct qed_vf_info
*p_vf
, u64 events
)
1788 struct qed_filter_ucast filter
;
1790 if (!p_vf
->vport_instance
)
1793 if (events
& BIT(MAC_ADDR_FORCED
)) {
1794 /* Since there's no way [currently] of removing the MAC,
1795 * we can always assume this means we need to force it.
1797 memset(&filter
, 0, sizeof(filter
));
1798 filter
.type
= QED_FILTER_MAC
;
1799 filter
.opcode
= QED_FILTER_REPLACE
;
1800 filter
.is_rx_filter
= 1;
1801 filter
.is_tx_filter
= 1;
1802 filter
.vport_to_add_to
= p_vf
->vport_id
;
1803 ether_addr_copy(filter
.mac
, p_vf
->bulletin
.p_virt
->mac
);
1805 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1806 &filter
, QED_SPQ_MODE_CB
, NULL
);
1809 "PF failed to configure MAC for VF\n");
1813 p_vf
->configured_features
|= 1 << MAC_ADDR_FORCED
;
1816 if (events
& BIT(VLAN_ADDR_FORCED
)) {
1817 struct qed_sp_vport_update_params vport_update
;
1821 memset(&filter
, 0, sizeof(filter
));
1822 filter
.type
= QED_FILTER_VLAN
;
1823 filter
.is_rx_filter
= 1;
1824 filter
.is_tx_filter
= 1;
1825 filter
.vport_to_add_to
= p_vf
->vport_id
;
1826 filter
.vlan
= p_vf
->bulletin
.p_virt
->pvid
;
1827 filter
.opcode
= filter
.vlan
? QED_FILTER_REPLACE
:
1830 /* Send the ramrod */
1831 rc
= qed_sp_eth_filter_ucast(p_hwfn
, p_vf
->opaque_fid
,
1832 &filter
, QED_SPQ_MODE_CB
, NULL
);
1835 "PF failed to configure VLAN for VF\n");
1839 /* Update the default-vlan & silent vlan stripping */
1840 memset(&vport_update
, 0, sizeof(vport_update
));
1841 vport_update
.opaque_fid
= p_vf
->opaque_fid
;
1842 vport_update
.vport_id
= p_vf
->vport_id
;
1843 vport_update
.update_default_vlan_enable_flg
= 1;
1844 vport_update
.default_vlan_enable_flg
= filter
.vlan
? 1 : 0;
1845 vport_update
.update_default_vlan_flg
= 1;
1846 vport_update
.default_vlan
= filter
.vlan
;
1848 vport_update
.update_inner_vlan_removal_flg
= 1;
1849 removal
= filter
.vlan
? 1
1850 : p_vf
->shadow_config
.inner_vlan_removal
;
1851 vport_update
.inner_vlan_removal_flg
= removal
;
1852 vport_update
.silent_vlan_removal_flg
= filter
.vlan
? 1 : 0;
1853 rc
= qed_sp_vport_update(p_hwfn
,
1855 QED_SPQ_MODE_EBLOCK
, NULL
);
1858 "PF failed to configure VF vport for vlan\n");
1862 /* Update all the Rx queues */
1863 for (i
= 0; i
< QED_MAX_VF_CHAINS_PER_PF
; i
++) {
1864 struct qed_vf_queue
*p_queue
= &p_vf
->vf_queues
[i
];
1865 struct qed_queue_cid
*p_cid
= NULL
;
1867 /* There can be at most 1 Rx queue on qzone. Find it */
1868 p_cid
= qed_iov_get_vf_rx_queue_cid(p_queue
);
1872 rc
= qed_sp_eth_rx_queues_update(p_hwfn
,
1875 QED_SPQ_MODE_EBLOCK
,
1879 "Failed to send Rx update fo queue[0x%04x]\n",
1880 p_cid
->rel
.queue_id
);
1886 p_vf
->configured_features
|= 1 << VLAN_ADDR_FORCED
;
1888 p_vf
->configured_features
&= ~BIT(VLAN_ADDR_FORCED
);
1891 /* If forced features are terminated, we need to configure the shadow
1892 * configuration back again.
1895 qed_iov_reconfigure_unicast_shadow(p_hwfn
, p_vf
, events
);
1900 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn
*p_hwfn
,
1901 struct qed_ptt
*p_ptt
,
1902 struct qed_vf_info
*vf
)
1904 struct qed_sp_vport_start_params params
= { 0 };
1905 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
1906 struct vfpf_vport_start_tlv
*start
;
1907 u8 status
= PFVF_STATUS_SUCCESS
;
1908 struct qed_vf_info
*vf_info
;
1913 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vf
->relative_vf_id
, true);
1915 DP_NOTICE(p_hwfn
->cdev
,
1916 "Failed to get VF info, invalid vfid [%d]\n",
1917 vf
->relative_vf_id
);
1921 vf
->state
= VF_ENABLED
;
1922 start
= &mbx
->req_virt
->start_vport
;
1924 qed_iov_enable_vf_traffic(p_hwfn
, p_ptt
, vf
);
1926 /* Initialize Status block in CAU */
1927 for (sb_id
= 0; sb_id
< vf
->num_sbs
; sb_id
++) {
1928 if (!start
->sb_addr
[sb_id
]) {
1929 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
1930 "VF[%d] did not fill the address of SB %d\n",
1931 vf
->relative_vf_id
, sb_id
);
1935 qed_int_cau_conf_sb(p_hwfn
, p_ptt
,
1936 start
->sb_addr
[sb_id
],
1937 vf
->igu_sbs
[sb_id
], vf
->abs_vf_id
, 1);
1940 vf
->mtu
= start
->mtu
;
1941 vf
->shadow_config
.inner_vlan_removal
= start
->inner_vlan_removal
;
1943 /* Take into consideration configuration forced by hypervisor;
1944 * If none is configured, use the supplied VF values [for old
1945 * vfs that would still be fine, since they passed '0' as padding].
1947 p_bitmap
= &vf_info
->bulletin
.p_virt
->valid_bitmap
;
1948 if (!(*p_bitmap
& BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED
))) {
1949 u8 vf_req
= start
->only_untagged
;
1951 vf_info
->bulletin
.p_virt
->default_only_untagged
= vf_req
;
1952 *p_bitmap
|= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT
;
1955 params
.tpa_mode
= start
->tpa_mode
;
1956 params
.remove_inner_vlan
= start
->inner_vlan_removal
;
1957 params
.tx_switching
= true;
1959 params
.only_untagged
= vf_info
->bulletin
.p_virt
->default_only_untagged
;
1960 params
.drop_ttl0
= false;
1961 params
.concrete_fid
= vf
->concrete_fid
;
1962 params
.opaque_fid
= vf
->opaque_fid
;
1963 params
.vport_id
= vf
->vport_id
;
1964 params
.max_buffers_per_cqe
= start
->max_buffers_per_cqe
;
1965 params
.mtu
= vf
->mtu
;
1966 params
.check_mac
= true;
1968 rc
= qed_sp_eth_vport_start(p_hwfn
, ¶ms
);
1971 "qed_iov_vf_mbx_start_vport returned error %d\n", rc
);
1972 status
= PFVF_STATUS_FAILURE
;
1974 vf
->vport_instance
++;
1976 /* Force configuration if needed on the newly opened vport */
1977 qed_iov_configure_vport_forced(p_hwfn
, vf
, *p_bitmap
);
1979 __qed_iov_spoofchk_set(p_hwfn
, vf
, vf
->req_spoofchk_val
);
1981 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_START
,
1982 sizeof(struct pfvf_def_resp_tlv
), status
);
1985 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn
*p_hwfn
,
1986 struct qed_ptt
*p_ptt
,
1987 struct qed_vf_info
*vf
)
1989 u8 status
= PFVF_STATUS_SUCCESS
;
1992 vf
->vport_instance
--;
1993 vf
->spoof_chk
= false;
1995 if ((qed_iov_validate_active_rxq(p_hwfn
, vf
)) ||
1996 (qed_iov_validate_active_txq(p_hwfn
, vf
))) {
1997 vf
->b_malicious
= true;
1999 "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
2001 status
= PFVF_STATUS_MALICIOUS
;
2005 rc
= qed_sp_vport_stop(p_hwfn
, vf
->opaque_fid
, vf
->vport_id
);
2007 DP_ERR(p_hwfn
, "qed_iov_vf_mbx_stop_vport returned error %d\n",
2009 status
= PFVF_STATUS_FAILURE
;
2012 /* Forget the configuration on the vport */
2013 vf
->configured_features
= 0;
2014 memset(&vf
->shadow_config
, 0, sizeof(vf
->shadow_config
));
2017 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_VPORT_TEARDOWN
,
2018 sizeof(struct pfvf_def_resp_tlv
), status
);
2021 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn
*p_hwfn
,
2022 struct qed_ptt
*p_ptt
,
2023 struct qed_vf_info
*vf
,
2024 u8 status
, bool b_legacy
)
2026 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2027 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2028 struct vfpf_start_rxq_tlv
*req
;
2031 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2033 /* Taking a bigger struct instead of adding a TLV to list was a
2034 * mistake, but one which we're now stuck with, as some older
2035 * clients assume the size of the previous response.
2038 length
= sizeof(*p_tlv
);
2040 length
= sizeof(struct pfvf_def_resp_tlv
);
2042 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_RXQ
,
2044 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2045 sizeof(struct channel_list_end_tlv
));
2047 /* Update the TLV with the response */
2048 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
) {
2049 req
= &mbx
->req_virt
->start_rxq
;
2050 p_tlv
->offset
= PXP_VF_BAR0_START_MSDM_ZONE_B
+
2051 offsetof(struct mstorm_vf_zone
,
2052 non_trigger
.eth_rx_queue_producers
) +
2053 sizeof(struct eth_rx_prod_data
) * req
->rx_qid
;
2056 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
2059 static u8
qed_iov_vf_mbx_qid(struct qed_hwfn
*p_hwfn
,
2060 struct qed_vf_info
*p_vf
, bool b_is_tx
)
2062 struct qed_iov_vf_mbx
*p_mbx
= &p_vf
->vf_mbx
;
2063 struct vfpf_qid_tlv
*p_qid_tlv
;
2065 /* Search for the qid if the VF published its going to provide it */
2066 if (!(p_vf
->acquire
.vfdev_info
.capabilities
&
2067 VFPF_ACQUIRE_CAP_QUEUE_QIDS
)) {
2069 return QED_IOV_LEGACY_QID_TX
;
2071 return QED_IOV_LEGACY_QID_RX
;
2074 p_qid_tlv
= (struct vfpf_qid_tlv
*)
2075 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2078 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2079 "VF[%2x]: Failed to provide qid\n",
2080 p_vf
->relative_vf_id
);
2082 return QED_IOV_QID_INVALID
;
2085 if (p_qid_tlv
->qid
>= MAX_QUEUES_PER_QZONE
) {
2086 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2087 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2088 p_vf
->relative_vf_id
, p_qid_tlv
->qid
);
2089 return QED_IOV_QID_INVALID
;
2092 return p_qid_tlv
->qid
;
2095 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn
*p_hwfn
,
2096 struct qed_ptt
*p_ptt
,
2097 struct qed_vf_info
*vf
)
2099 struct qed_queue_start_common_params params
;
2100 struct qed_queue_cid_vf_params vf_params
;
2101 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2102 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2103 u8 qid_usage_idx
, vf_legacy
= 0;
2104 struct vfpf_start_rxq_tlv
*req
;
2105 struct qed_vf_queue
*p_queue
;
2106 struct qed_queue_cid
*p_cid
;
2107 struct qed_sb_info sb_dummy
;
2110 req
= &mbx
->req_virt
->start_rxq
;
2112 if (!qed_iov_validate_rxq(p_hwfn
, vf
, req
->rx_qid
,
2113 QED_IOV_VALIDATE_Q_DISABLE
) ||
2114 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2117 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2118 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2121 p_queue
= &vf
->vf_queues
[req
->rx_qid
];
2122 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2125 vf_legacy
= qed_vf_calculate_legacy(vf
);
2127 /* Acquire a new queue-cid */
2128 memset(¶ms
, 0, sizeof(params
));
2129 params
.queue_id
= p_queue
->fw_rx_qid
;
2130 params
.vport_id
= vf
->vport_id
;
2131 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2132 /* Since IGU index is passed via sb_info, construct a dummy one */
2133 memset(&sb_dummy
, 0, sizeof(sb_dummy
));
2134 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2135 params
.p_sb
= &sb_dummy
;
2136 params
.sb_idx
= req
->sb_index
;
2138 memset(&vf_params
, 0, sizeof(vf_params
));
2139 vf_params
.vfid
= vf
->relative_vf_id
;
2140 vf_params
.vf_qid
= (u8
)req
->rx_qid
;
2141 vf_params
.vf_legacy
= vf_legacy
;
2142 vf_params
.qid_usage_idx
= qid_usage_idx
;
2143 p_cid
= qed_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2144 ¶ms
, true, &vf_params
);
2148 /* Legacy VFs have their Producers in a different location, which they
2149 * calculate on their own and clean the producer prior to this.
2151 if (!(vf_legacy
& QED_QCID_LEGACY_VF_RX_PROD
))
2153 GTT_BAR0_MAP_REG_MSDM_RAM
+
2154 MSTORM_ETH_VF_PRODS_OFFSET(vf
->abs_vf_id
, req
->rx_qid
),
2157 rc
= qed_eth_rxq_start_ramrod(p_hwfn
, p_cid
,
2160 req
->cqe_pbl_addr
, req
->cqe_pbl_size
);
2162 status
= PFVF_STATUS_FAILURE
;
2163 qed_eth_queue_cid_release(p_hwfn
, p_cid
);
2165 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2166 p_queue
->cids
[qid_usage_idx
].b_is_tx
= false;
2167 status
= PFVF_STATUS_SUCCESS
;
2168 vf
->num_active_rxqs
++;
2172 qed_iov_vf_mbx_start_rxq_resp(p_hwfn
, p_ptt
, vf
, status
,
2174 QED_QCID_LEGACY_VF_RX_PROD
));
2178 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv
*p_resp
,
2179 struct qed_tunnel_info
*p_tun
,
2180 u16 tunn_feature_mask
)
2182 p_resp
->tunn_feature_mask
= tunn_feature_mask
;
2183 p_resp
->vxlan_mode
= p_tun
->vxlan
.b_mode_enabled
;
2184 p_resp
->l2geneve_mode
= p_tun
->l2_geneve
.b_mode_enabled
;
2185 p_resp
->ipgeneve_mode
= p_tun
->ip_geneve
.b_mode_enabled
;
2186 p_resp
->l2gre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2187 p_resp
->ipgre_mode
= p_tun
->l2_gre
.b_mode_enabled
;
2188 p_resp
->vxlan_clss
= p_tun
->vxlan
.tun_cls
;
2189 p_resp
->l2gre_clss
= p_tun
->l2_gre
.tun_cls
;
2190 p_resp
->ipgre_clss
= p_tun
->ip_gre
.tun_cls
;
2191 p_resp
->l2geneve_clss
= p_tun
->l2_geneve
.tun_cls
;
2192 p_resp
->ipgeneve_clss
= p_tun
->ip_geneve
.tun_cls
;
2193 p_resp
->geneve_udp_port
= p_tun
->geneve_port
.port
;
2194 p_resp
->vxlan_udp_port
= p_tun
->vxlan_port
.port
;
2198 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2199 struct qed_tunn_update_type
*p_tun
,
2200 enum qed_tunn_mode mask
, u8 tun_cls
)
2202 if (p_req
->tun_mode_update_mask
& BIT(mask
)) {
2203 p_tun
->b_update_mode
= true;
2205 if (p_req
->tunn_mode
& BIT(mask
))
2206 p_tun
->b_mode_enabled
= true;
2209 p_tun
->tun_cls
= tun_cls
;
2213 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv
*p_req
,
2214 struct qed_tunn_update_type
*p_tun
,
2215 struct qed_tunn_update_udp_port
*p_port
,
2216 enum qed_tunn_mode mask
,
2217 u8 tun_cls
, u8 update_port
, u16 port
)
2220 p_port
->b_update_port
= true;
2221 p_port
->port
= port
;
2224 __qed_iov_pf_update_tun_param(p_req
, p_tun
, mask
, tun_cls
);
2228 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv
*p_req
)
2230 bool b_update_requested
= false;
2232 if (p_req
->tun_mode_update_mask
|| p_req
->update_tun_cls
||
2233 p_req
->update_geneve_port
|| p_req
->update_vxlan_port
)
2234 b_update_requested
= true;
2236 return b_update_requested
;
2239 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type
*tun
, int *rc
)
2241 if (tun
->b_update_mode
&& !tun
->b_mode_enabled
) {
2242 tun
->b_update_mode
= false;
2248 qed_pf_validate_modify_tunn_config(struct qed_hwfn
*p_hwfn
,
2249 u16
*tun_features
, bool *update
,
2250 struct qed_tunnel_info
*tun_src
)
2252 struct qed_eth_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.eth
;
2253 struct qed_tunnel_info
*tun
= &p_hwfn
->cdev
->tunnel
;
2254 u16 bultn_vxlan_port
, bultn_geneve_port
;
2255 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2258 *tun_features
= p_hwfn
->cdev
->tunn_feature_mask
;
2259 bultn_vxlan_port
= tun
->vxlan_port
.port
;
2260 bultn_geneve_port
= tun
->geneve_port
.port
;
2261 qed_pf_validate_tunn_mode(&tun_src
->vxlan
, &rc
);
2262 qed_pf_validate_tunn_mode(&tun_src
->l2_geneve
, &rc
);
2263 qed_pf_validate_tunn_mode(&tun_src
->ip_geneve
, &rc
);
2264 qed_pf_validate_tunn_mode(&tun_src
->l2_gre
, &rc
);
2265 qed_pf_validate_tunn_mode(&tun_src
->ip_gre
, &rc
);
2267 if ((tun_src
->b_update_rx_cls
|| tun_src
->b_update_tx_cls
) &&
2268 (tun_src
->vxlan
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2269 tun_src
->l2_geneve
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2270 tun_src
->ip_geneve
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2271 tun_src
->l2_gre
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
||
2272 tun_src
->ip_gre
.tun_cls
!= QED_TUNN_CLSS_MAC_VLAN
)) {
2273 tun_src
->b_update_rx_cls
= false;
2274 tun_src
->b_update_tx_cls
= false;
2278 if (tun_src
->vxlan_port
.b_update_port
) {
2279 if (tun_src
->vxlan_port
.port
== tun
->vxlan_port
.port
) {
2280 tun_src
->vxlan_port
.b_update_port
= false;
2283 bultn_vxlan_port
= tun_src
->vxlan_port
.port
;
2287 if (tun_src
->geneve_port
.b_update_port
) {
2288 if (tun_src
->geneve_port
.port
== tun
->geneve_port
.port
) {
2289 tun_src
->geneve_port
.b_update_port
= false;
2292 bultn_geneve_port
= tun_src
->geneve_port
.port
;
2296 qed_for_each_vf(p_hwfn
, i
) {
2297 qed_iov_bulletin_set_udp_ports(p_hwfn
, i
, bultn_vxlan_port
,
2301 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
2302 ops
->ports_update(cookie
, bultn_vxlan_port
, bultn_geneve_port
);
2307 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn
*p_hwfn
,
2308 struct qed_ptt
*p_ptt
,
2309 struct qed_vf_info
*p_vf
)
2311 struct qed_tunnel_info
*p_tun
= &p_hwfn
->cdev
->tunnel
;
2312 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2313 struct pfvf_update_tunn_param_tlv
*p_resp
;
2314 struct vfpf_update_tunn_param_tlv
*p_req
;
2315 u8 status
= PFVF_STATUS_SUCCESS
;
2316 bool b_update_required
= false;
2317 struct qed_tunnel_info tunn
;
2318 u16 tunn_feature_mask
= 0;
2321 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2323 memset(&tunn
, 0, sizeof(tunn
));
2324 p_req
= &mbx
->req_virt
->tunn_param_update
;
2326 if (!qed_iov_pf_validate_tunn_param(p_req
)) {
2327 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2328 "No tunnel update requested by VF\n");
2329 status
= PFVF_STATUS_FAILURE
;
2333 tunn
.b_update_rx_cls
= p_req
->update_tun_cls
;
2334 tunn
.b_update_tx_cls
= p_req
->update_tun_cls
;
2336 qed_iov_pf_update_tun_param(p_req
, &tunn
.vxlan
, &tunn
.vxlan_port
,
2337 QED_MODE_VXLAN_TUNN
, p_req
->vxlan_clss
,
2338 p_req
->update_vxlan_port
,
2340 qed_iov_pf_update_tun_param(p_req
, &tunn
.l2_geneve
, &tunn
.geneve_port
,
2341 QED_MODE_L2GENEVE_TUNN
,
2342 p_req
->l2geneve_clss
,
2343 p_req
->update_geneve_port
,
2344 p_req
->geneve_port
);
2345 __qed_iov_pf_update_tun_param(p_req
, &tunn
.ip_geneve
,
2346 QED_MODE_IPGENEVE_TUNN
,
2347 p_req
->ipgeneve_clss
);
2348 __qed_iov_pf_update_tun_param(p_req
, &tunn
.l2_gre
,
2349 QED_MODE_L2GRE_TUNN
, p_req
->l2gre_clss
);
2350 __qed_iov_pf_update_tun_param(p_req
, &tunn
.ip_gre
,
2351 QED_MODE_IPGRE_TUNN
, p_req
->ipgre_clss
);
2353 /* If PF modifies VF's req then it should
2354 * still return an error in case of partial configuration
2355 * or modified configuration as opposed to requested one.
2357 rc
= qed_pf_validate_modify_tunn_config(p_hwfn
, &tunn_feature_mask
,
2358 &b_update_required
, &tunn
);
2361 status
= PFVF_STATUS_FAILURE
;
2363 /* If QED client is willing to update anything ? */
2364 if (b_update_required
) {
2367 rc
= qed_sp_pf_update_tunn_cfg(p_hwfn
, p_ptt
, &tunn
,
2368 QED_SPQ_MODE_EBLOCK
, NULL
);
2370 status
= PFVF_STATUS_FAILURE
;
2372 geneve_port
= p_tun
->geneve_port
.port
;
2373 qed_for_each_vf(p_hwfn
, i
) {
2374 qed_iov_bulletin_set_udp_ports(p_hwfn
, i
,
2375 p_tun
->vxlan_port
.port
,
2381 p_resp
= qed_add_tlv(p_hwfn
, &mbx
->offset
,
2382 CHANNEL_TLV_UPDATE_TUNN_PARAM
, sizeof(*p_resp
));
2384 qed_iov_pf_update_tun_response(p_resp
, p_tun
, tunn_feature_mask
);
2385 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2386 sizeof(struct channel_list_end_tlv
));
2388 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
2391 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn
*p_hwfn
,
2392 struct qed_ptt
*p_ptt
,
2393 struct qed_vf_info
*p_vf
,
2396 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
2397 struct pfvf_start_queue_resp_tlv
*p_tlv
;
2398 bool b_legacy
= false;
2401 mbx
->offset
= (u8
*)mbx
->reply_virt
;
2403 /* Taking a bigger struct instead of adding a TLV to list was a
2404 * mistake, but one which we're now stuck with, as some older
2405 * clients assume the size of the previous response.
2407 if (p_vf
->acquire
.vfdev_info
.eth_fp_hsi_minor
==
2408 ETH_HSI_VER_NO_PKT_LEN_TUNN
)
2412 length
= sizeof(*p_tlv
);
2414 length
= sizeof(struct pfvf_def_resp_tlv
);
2416 p_tlv
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_START_TXQ
,
2418 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
2419 sizeof(struct channel_list_end_tlv
));
2421 /* Update the TLV with the response */
2422 if ((status
== PFVF_STATUS_SUCCESS
) && !b_legacy
)
2423 p_tlv
->offset
= qed_db_addr_vf(cid
, DQ_DEMS_LEGACY
);
2425 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, length
, status
);
2428 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn
*p_hwfn
,
2429 struct qed_ptt
*p_ptt
,
2430 struct qed_vf_info
*vf
)
2432 struct qed_queue_start_common_params params
;
2433 struct qed_queue_cid_vf_params vf_params
;
2434 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2435 u8 status
= PFVF_STATUS_NO_RESOURCE
;
2436 struct vfpf_start_txq_tlv
*req
;
2437 struct qed_vf_queue
*p_queue
;
2438 struct qed_queue_cid
*p_cid
;
2439 struct qed_sb_info sb_dummy
;
2440 u8 qid_usage_idx
, vf_legacy
;
2445 memset(¶ms
, 0, sizeof(params
));
2446 req
= &mbx
->req_virt
->start_txq
;
2448 if (!qed_iov_validate_txq(p_hwfn
, vf
, req
->tx_qid
,
2449 QED_IOV_VALIDATE_Q_NA
) ||
2450 !qed_iov_validate_sb(p_hwfn
, vf
, req
->hw_sb
))
2453 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2454 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2457 p_queue
= &vf
->vf_queues
[req
->tx_qid
];
2458 if (p_queue
->cids
[qid_usage_idx
].p_cid
)
2461 vf_legacy
= qed_vf_calculate_legacy(vf
);
2463 /* Acquire a new queue-cid */
2464 params
.queue_id
= p_queue
->fw_tx_qid
;
2465 params
.vport_id
= vf
->vport_id
;
2466 params
.stats_id
= vf
->abs_vf_id
+ 0x10;
2468 /* Since IGU index is passed via sb_info, construct a dummy one */
2469 memset(&sb_dummy
, 0, sizeof(sb_dummy
));
2470 sb_dummy
.igu_sb_id
= req
->hw_sb
;
2471 params
.p_sb
= &sb_dummy
;
2472 params
.sb_idx
= req
->sb_index
;
2474 memset(&vf_params
, 0, sizeof(vf_params
));
2475 vf_params
.vfid
= vf
->relative_vf_id
;
2476 vf_params
.vf_qid
= (u8
)req
->tx_qid
;
2477 vf_params
.vf_legacy
= vf_legacy
;
2478 vf_params
.qid_usage_idx
= qid_usage_idx
;
2480 p_cid
= qed_eth_queue_to_cid(p_hwfn
, vf
->opaque_fid
,
2481 ¶ms
, false, &vf_params
);
2485 pq
= qed_get_cm_pq_idx_vf(p_hwfn
, vf
->relative_vf_id
);
2486 rc
= qed_eth_txq_start_ramrod(p_hwfn
, p_cid
,
2487 req
->pbl_addr
, req
->pbl_size
, pq
);
2489 status
= PFVF_STATUS_FAILURE
;
2490 qed_eth_queue_cid_release(p_hwfn
, p_cid
);
2492 status
= PFVF_STATUS_SUCCESS
;
2493 p_queue
->cids
[qid_usage_idx
].p_cid
= p_cid
;
2494 p_queue
->cids
[qid_usage_idx
].b_is_tx
= true;
2499 qed_iov_vf_mbx_start_txq_resp(p_hwfn
, p_ptt
, vf
, cid
, status
);
2502 static int qed_iov_vf_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2503 struct qed_vf_info
*vf
,
2505 u8 qid_usage_idx
, bool cqe_completion
)
2507 struct qed_vf_queue
*p_queue
;
2510 if (!qed_iov_validate_rxq(p_hwfn
, vf
, rxq_id
, QED_IOV_VALIDATE_Q_NA
)) {
2513 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2514 vf
->relative_vf_id
, rxq_id
, qid_usage_idx
);
2518 p_queue
= &vf
->vf_queues
[rxq_id
];
2520 /* We've validated the index and the existence of the active RXQ -
2521 * now we need to make sure that it's using the correct qid.
2523 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2524 p_queue
->cids
[qid_usage_idx
].b_is_tx
) {
2525 struct qed_queue_cid
*p_cid
;
2527 p_cid
= qed_iov_get_vf_rx_queue_cid(p_queue
);
2530 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2532 rxq_id
, qid_usage_idx
, rxq_id
, p_cid
->qid_usage_idx
);
2536 /* Now that we know we have a valid Rx-queue - close it */
2537 rc
= qed_eth_rx_queue_stop(p_hwfn
,
2538 p_queue
->cids
[qid_usage_idx
].p_cid
,
2539 false, cqe_completion
);
2543 p_queue
->cids
[qid_usage_idx
].p_cid
= NULL
;
2544 vf
->num_active_rxqs
--;
2549 static int qed_iov_vf_stop_txqs(struct qed_hwfn
*p_hwfn
,
2550 struct qed_vf_info
*vf
,
2551 u16 txq_id
, u8 qid_usage_idx
)
2553 struct qed_vf_queue
*p_queue
;
2556 if (!qed_iov_validate_txq(p_hwfn
, vf
, txq_id
, QED_IOV_VALIDATE_Q_NA
))
2559 p_queue
= &vf
->vf_queues
[txq_id
];
2560 if (!p_queue
->cids
[qid_usage_idx
].p_cid
||
2561 !p_queue
->cids
[qid_usage_idx
].b_is_tx
)
2564 rc
= qed_eth_tx_queue_stop(p_hwfn
, p_queue
->cids
[qid_usage_idx
].p_cid
);
2568 p_queue
->cids
[qid_usage_idx
].p_cid
= NULL
;
2572 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn
*p_hwfn
,
2573 struct qed_ptt
*p_ptt
,
2574 struct qed_vf_info
*vf
)
2576 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2577 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2578 u8 status
= PFVF_STATUS_FAILURE
;
2579 struct vfpf_stop_rxqs_tlv
*req
;
2583 /* There has never been an official driver that used this interface
2584 * for stopping multiple queues, and it is now considered deprecated.
2585 * Validate this isn't used here.
2587 req
= &mbx
->req_virt
->stop_rxqs
;
2588 if (req
->num_rxqs
!= 1) {
2589 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2590 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2591 vf
->relative_vf_id
);
2592 status
= PFVF_STATUS_NOT_SUPPORTED
;
2596 /* Find which qid-index is associated with the queue */
2597 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2598 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2601 rc
= qed_iov_vf_stop_rxqs(p_hwfn
, vf
, req
->rx_qid
,
2602 qid_usage_idx
, req
->cqe_completion
);
2604 status
= PFVF_STATUS_SUCCESS
;
2606 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_RXQS
,
2610 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn
*p_hwfn
,
2611 struct qed_ptt
*p_ptt
,
2612 struct qed_vf_info
*vf
)
2614 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2615 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2616 u8 status
= PFVF_STATUS_FAILURE
;
2617 struct vfpf_stop_txqs_tlv
*req
;
2621 /* There has never been an official driver that used this interface
2622 * for stopping multiple queues, and it is now considered deprecated.
2623 * Validate this isn't used here.
2625 req
= &mbx
->req_virt
->stop_txqs
;
2626 if (req
->num_txqs
!= 1) {
2627 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2628 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2629 vf
->relative_vf_id
);
2630 status
= PFVF_STATUS_NOT_SUPPORTED
;
2634 /* Find which qid-index is associated with the queue */
2635 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, true);
2636 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2639 rc
= qed_iov_vf_stop_txqs(p_hwfn
, vf
, req
->tx_qid
, qid_usage_idx
);
2641 status
= PFVF_STATUS_SUCCESS
;
2644 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_STOP_TXQS
,
2648 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn
*p_hwfn
,
2649 struct qed_ptt
*p_ptt
,
2650 struct qed_vf_info
*vf
)
2652 struct qed_queue_cid
*handlers
[QED_MAX_VF_CHAINS_PER_PF
];
2653 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
2654 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
2655 struct vfpf_update_rxq_tlv
*req
;
2656 u8 status
= PFVF_STATUS_FAILURE
;
2657 u8 complete_event_flg
;
2658 u8 complete_cqe_flg
;
2663 req
= &mbx
->req_virt
->update_rxq
;
2664 complete_cqe_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_CQE_FLAG
);
2665 complete_event_flg
= !!(req
->flags
& VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG
);
2667 qid_usage_idx
= qed_iov_vf_mbx_qid(p_hwfn
, vf
, false);
2668 if (qid_usage_idx
== QED_IOV_QID_INVALID
)
2671 /* There shouldn't exist a VF that uses queue-qids yet uses this
2672 * API with multiple Rx queues. Validate this.
2674 if ((vf
->acquire
.vfdev_info
.capabilities
&
2675 VFPF_ACQUIRE_CAP_QUEUE_QIDS
) && req
->num_rxqs
!= 1) {
2676 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2677 "VF[%d] supports QIDs but sends multiple queues\n",
2678 vf
->relative_vf_id
);
2682 /* Validate inputs - for the legacy case this is still true since
2683 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2685 for (i
= req
->rx_qid
; i
< req
->rx_qid
+ req
->num_rxqs
; i
++) {
2686 if (!qed_iov_validate_rxq(p_hwfn
, vf
, i
,
2687 QED_IOV_VALIDATE_Q_NA
) ||
2688 !vf
->vf_queues
[i
].cids
[qid_usage_idx
].p_cid
||
2689 vf
->vf_queues
[i
].cids
[qid_usage_idx
].b_is_tx
) {
2690 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2691 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2692 vf
->relative_vf_id
, req
->rx_qid
,
2698 /* Prepare the handlers */
2699 for (i
= 0; i
< req
->num_rxqs
; i
++) {
2700 u16 qid
= req
->rx_qid
+ i
;
2702 handlers
[i
] = vf
->vf_queues
[qid
].cids
[qid_usage_idx
].p_cid
;
2705 rc
= qed_sp_eth_rx_queues_update(p_hwfn
, (void **)&handlers
,
2709 QED_SPQ_MODE_EBLOCK
, NULL
);
2713 status
= PFVF_STATUS_SUCCESS
;
2715 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UPDATE_RXQ
,
2719 void *qed_iov_search_list_tlvs(struct qed_hwfn
*p_hwfn
,
2720 void *p_tlvs_list
, u16 req_type
)
2722 struct channel_tlv
*p_tlv
= (struct channel_tlv
*)p_tlvs_list
;
2726 if (!p_tlv
->length
) {
2727 DP_NOTICE(p_hwfn
, "Zero length TLV found\n");
2731 if (p_tlv
->type
== req_type
) {
2732 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2733 "Extended tlv type %d, length %d found\n",
2734 p_tlv
->type
, p_tlv
->length
);
2738 len
+= p_tlv
->length
;
2739 p_tlv
= (struct channel_tlv
*)((u8
*)p_tlv
+ p_tlv
->length
);
2741 if ((len
+ p_tlv
->length
) > TLV_BUFFER_SIZE
) {
2742 DP_NOTICE(p_hwfn
, "TLVs has overrun the buffer size\n");
2745 } while (p_tlv
->type
!= CHANNEL_TLV_LIST_END
);
2751 qed_iov_vp_update_act_param(struct qed_hwfn
*p_hwfn
,
2752 struct qed_sp_vport_update_params
*p_data
,
2753 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2755 struct vfpf_vport_update_activate_tlv
*p_act_tlv
;
2756 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACTIVATE
;
2758 p_act_tlv
= (struct vfpf_vport_update_activate_tlv
*)
2759 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2763 p_data
->update_vport_active_rx_flg
= p_act_tlv
->update_rx
;
2764 p_data
->vport_active_rx_flg
= p_act_tlv
->active_rx
;
2765 p_data
->update_vport_active_tx_flg
= p_act_tlv
->update_tx
;
2766 p_data
->vport_active_tx_flg
= p_act_tlv
->active_tx
;
2767 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACTIVATE
;
2771 qed_iov_vp_update_vlan_param(struct qed_hwfn
*p_hwfn
,
2772 struct qed_sp_vport_update_params
*p_data
,
2773 struct qed_vf_info
*p_vf
,
2774 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2776 struct vfpf_vport_update_vlan_strip_tlv
*p_vlan_tlv
;
2777 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP
;
2779 p_vlan_tlv
= (struct vfpf_vport_update_vlan_strip_tlv
*)
2780 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2784 p_vf
->shadow_config
.inner_vlan_removal
= p_vlan_tlv
->remove_vlan
;
2786 /* Ignore the VF request if we're forcing a vlan */
2787 if (!(p_vf
->configured_features
& BIT(VLAN_ADDR_FORCED
))) {
2788 p_data
->update_inner_vlan_removal_flg
= 1;
2789 p_data
->inner_vlan_removal_flg
= p_vlan_tlv
->remove_vlan
;
2792 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP
;
2796 qed_iov_vp_update_tx_switch(struct qed_hwfn
*p_hwfn
,
2797 struct qed_sp_vport_update_params
*p_data
,
2798 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2800 struct vfpf_vport_update_tx_switch_tlv
*p_tx_switch_tlv
;
2801 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH
;
2803 p_tx_switch_tlv
= (struct vfpf_vport_update_tx_switch_tlv
*)
2804 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2806 if (!p_tx_switch_tlv
)
2809 p_data
->update_tx_switching_flg
= 1;
2810 p_data
->tx_switching_flg
= p_tx_switch_tlv
->tx_switching
;
2811 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_TX_SWITCH
;
2815 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn
*p_hwfn
,
2816 struct qed_sp_vport_update_params
*p_data
,
2817 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2819 struct vfpf_vport_update_mcast_bin_tlv
*p_mcast_tlv
;
2820 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_MCAST
;
2822 p_mcast_tlv
= (struct vfpf_vport_update_mcast_bin_tlv
*)
2823 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2827 p_data
->update_approx_mcast_flg
= 1;
2828 memcpy(p_data
->bins
, p_mcast_tlv
->bins
,
2829 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
2830 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_MCAST
;
2834 qed_iov_vp_update_accept_flag(struct qed_hwfn
*p_hwfn
,
2835 struct qed_sp_vport_update_params
*p_data
,
2836 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2838 struct qed_filter_accept_flags
*p_flags
= &p_data
->accept_flags
;
2839 struct vfpf_vport_update_accept_param_tlv
*p_accept_tlv
;
2840 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM
;
2842 p_accept_tlv
= (struct vfpf_vport_update_accept_param_tlv
*)
2843 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2847 p_flags
->update_rx_mode_config
= p_accept_tlv
->update_rx_mode
;
2848 p_flags
->rx_accept_filter
= p_accept_tlv
->rx_accept_filter
;
2849 p_flags
->update_tx_mode_config
= p_accept_tlv
->update_tx_mode
;
2850 p_flags
->tx_accept_filter
= p_accept_tlv
->tx_accept_filter
;
2851 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM
;
2855 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn
*p_hwfn
,
2856 struct qed_sp_vport_update_params
*p_data
,
2857 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2859 struct vfpf_vport_update_accept_any_vlan_tlv
*p_accept_any_vlan
;
2860 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN
;
2862 p_accept_any_vlan
= (struct vfpf_vport_update_accept_any_vlan_tlv
*)
2863 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
,
2865 if (!p_accept_any_vlan
)
2868 p_data
->accept_any_vlan
= p_accept_any_vlan
->accept_any_vlan
;
2869 p_data
->update_accept_any_vlan_flg
=
2870 p_accept_any_vlan
->update_accept_any_vlan_flg
;
2871 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN
;
2875 qed_iov_vp_update_rss_param(struct qed_hwfn
*p_hwfn
,
2876 struct qed_vf_info
*vf
,
2877 struct qed_sp_vport_update_params
*p_data
,
2878 struct qed_rss_params
*p_rss
,
2879 struct qed_iov_vf_mbx
*p_mbx
,
2880 u16
*tlvs_mask
, u16
*tlvs_accepted
)
2882 struct vfpf_vport_update_rss_tlv
*p_rss_tlv
;
2883 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_RSS
;
2884 bool b_reject
= false;
2888 p_rss_tlv
= (struct vfpf_vport_update_rss_tlv
*)
2889 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2891 p_data
->rss_params
= NULL
;
2895 memset(p_rss
, 0, sizeof(struct qed_rss_params
));
2897 p_rss
->update_rss_config
= !!(p_rss_tlv
->update_rss_flags
&
2898 VFPF_UPDATE_RSS_CONFIG_FLAG
);
2899 p_rss
->update_rss_capabilities
= !!(p_rss_tlv
->update_rss_flags
&
2900 VFPF_UPDATE_RSS_CAPS_FLAG
);
2901 p_rss
->update_rss_ind_table
= !!(p_rss_tlv
->update_rss_flags
&
2902 VFPF_UPDATE_RSS_IND_TABLE_FLAG
);
2903 p_rss
->update_rss_key
= !!(p_rss_tlv
->update_rss_flags
&
2904 VFPF_UPDATE_RSS_KEY_FLAG
);
2906 p_rss
->rss_enable
= p_rss_tlv
->rss_enable
;
2907 p_rss
->rss_eng_id
= vf
->relative_vf_id
+ 1;
2908 p_rss
->rss_caps
= p_rss_tlv
->rss_caps
;
2909 p_rss
->rss_table_size_log
= p_rss_tlv
->rss_table_size_log
;
2910 memcpy(p_rss
->rss_key
, p_rss_tlv
->rss_key
, sizeof(p_rss
->rss_key
));
2912 table_size
= min_t(u16
, ARRAY_SIZE(p_rss
->rss_ind_table
),
2913 (1 << p_rss_tlv
->rss_table_size_log
));
2915 for (i
= 0; i
< table_size
; i
++) {
2916 struct qed_queue_cid
*p_cid
;
2918 q_idx
= p_rss_tlv
->rss_ind_table
[i
];
2919 if (!qed_iov_validate_rxq(p_hwfn
, vf
, q_idx
,
2920 QED_IOV_VALIDATE_Q_ENABLE
)) {
2923 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2924 vf
->relative_vf_id
, q_idx
);
2929 p_cid
= qed_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[q_idx
]);
2930 p_rss
->rss_ind_table
[i
] = p_cid
;
2933 p_data
->rss_params
= p_rss
;
2935 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2937 *tlvs_accepted
|= 1 << QED_IOV_VP_UPDATE_RSS
;
2941 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn
*p_hwfn
,
2942 struct qed_vf_info
*vf
,
2943 struct qed_sp_vport_update_params
*p_data
,
2944 struct qed_sge_tpa_params
*p_sge_tpa
,
2945 struct qed_iov_vf_mbx
*p_mbx
, u16
*tlvs_mask
)
2947 struct vfpf_vport_update_sge_tpa_tlv
*p_sge_tpa_tlv
;
2948 u16 tlv
= CHANNEL_TLV_VPORT_UPDATE_SGE_TPA
;
2950 p_sge_tpa_tlv
= (struct vfpf_vport_update_sge_tpa_tlv
*)
2951 qed_iov_search_list_tlvs(p_hwfn
, p_mbx
->req_virt
, tlv
);
2953 if (!p_sge_tpa_tlv
) {
2954 p_data
->sge_tpa_params
= NULL
;
2958 memset(p_sge_tpa
, 0, sizeof(struct qed_sge_tpa_params
));
2960 p_sge_tpa
->update_tpa_en_flg
=
2961 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
& VFPF_UPDATE_TPA_EN_FLAG
);
2962 p_sge_tpa
->update_tpa_param_flg
=
2963 !!(p_sge_tpa_tlv
->update_sge_tpa_flags
&
2964 VFPF_UPDATE_TPA_PARAM_FLAG
);
2966 p_sge_tpa
->tpa_ipv4_en_flg
=
2967 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV4_EN_FLAG
);
2968 p_sge_tpa
->tpa_ipv6_en_flg
=
2969 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_IPV6_EN_FLAG
);
2970 p_sge_tpa
->tpa_pkt_split_flg
=
2971 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_PKT_SPLIT_FLAG
);
2972 p_sge_tpa
->tpa_hdr_data_split_flg
=
2973 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_HDR_DATA_SPLIT_FLAG
);
2974 p_sge_tpa
->tpa_gro_consistent_flg
=
2975 !!(p_sge_tpa_tlv
->sge_tpa_flags
& VFPF_TPA_GRO_CONSIST_FLAG
);
2977 p_sge_tpa
->tpa_max_aggs_num
= p_sge_tpa_tlv
->tpa_max_aggs_num
;
2978 p_sge_tpa
->tpa_max_size
= p_sge_tpa_tlv
->tpa_max_size
;
2979 p_sge_tpa
->tpa_min_size_to_start
= p_sge_tpa_tlv
->tpa_min_size_to_start
;
2980 p_sge_tpa
->tpa_min_size_to_cont
= p_sge_tpa_tlv
->tpa_min_size_to_cont
;
2981 p_sge_tpa
->max_buffers_per_cqe
= p_sge_tpa_tlv
->max_buffers_per_cqe
;
2983 p_data
->sge_tpa_params
= p_sge_tpa
;
2985 *tlvs_mask
|= 1 << QED_IOV_VP_UPDATE_SGE_TPA
;
2988 static int qed_iov_pre_update_vport(struct qed_hwfn
*hwfn
,
2990 struct qed_sp_vport_update_params
*params
,
2993 u8 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
2994 struct qed_filter_accept_flags
*flags
= ¶ms
->accept_flags
;
2995 struct qed_public_vf_info
*vf_info
;
2997 /* Untrusted VFs can't even be trusted to know that fact.
2998 * Simply indicate everything is configured fine, and trace
2999 * configuration 'behind their back'.
3001 if (!(*tlvs
& BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM
)))
3004 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3006 if (flags
->update_rx_mode_config
) {
3007 vf_info
->rx_accept_mode
= flags
->rx_accept_filter
;
3008 if (!vf_info
->is_trusted_configured
)
3009 flags
->rx_accept_filter
&= ~mask
;
3012 if (flags
->update_tx_mode_config
) {
3013 vf_info
->tx_accept_mode
= flags
->tx_accept_filter
;
3014 if (!vf_info
->is_trusted_configured
)
3015 flags
->tx_accept_filter
&= ~mask
;
3021 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn
*p_hwfn
,
3022 struct qed_ptt
*p_ptt
,
3023 struct qed_vf_info
*vf
)
3025 struct qed_rss_params
*p_rss_params
= NULL
;
3026 struct qed_sp_vport_update_params params
;
3027 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3028 struct qed_sge_tpa_params sge_tpa_params
;
3029 u16 tlvs_mask
= 0, tlvs_accepted
= 0;
3030 u8 status
= PFVF_STATUS_SUCCESS
;
3034 /* Valiate PF can send such a request */
3035 if (!vf
->vport_instance
) {
3038 "No VPORT instance available for VF[%d], failing vport update\n",
3040 status
= PFVF_STATUS_FAILURE
;
3043 p_rss_params
= vzalloc(sizeof(*p_rss_params
));
3044 if (p_rss_params
== NULL
) {
3045 status
= PFVF_STATUS_FAILURE
;
3049 memset(¶ms
, 0, sizeof(params
));
3050 params
.opaque_fid
= vf
->opaque_fid
;
3051 params
.vport_id
= vf
->vport_id
;
3052 params
.rss_params
= NULL
;
3054 /* Search for extended tlvs list and update values
3055 * from VF in struct qed_sp_vport_update_params.
3057 qed_iov_vp_update_act_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3058 qed_iov_vp_update_vlan_param(p_hwfn
, ¶ms
, vf
, mbx
, &tlvs_mask
);
3059 qed_iov_vp_update_tx_switch(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3060 qed_iov_vp_update_mcast_bin_param(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3061 qed_iov_vp_update_accept_flag(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3062 qed_iov_vp_update_accept_any_vlan(p_hwfn
, ¶ms
, mbx
, &tlvs_mask
);
3063 qed_iov_vp_update_sge_tpa_param(p_hwfn
, vf
, ¶ms
,
3064 &sge_tpa_params
, mbx
, &tlvs_mask
);
3066 tlvs_accepted
= tlvs_mask
;
3068 /* Some of the extended TLVs need to be validated first; In that case,
3069 * they can update the mask without updating the accepted [so that
3070 * PF could communicate to VF it has rejected request].
3072 qed_iov_vp_update_rss_param(p_hwfn
, vf
, ¶ms
, p_rss_params
,
3073 mbx
, &tlvs_mask
, &tlvs_accepted
);
3075 if (qed_iov_pre_update_vport(p_hwfn
, vf
->relative_vf_id
,
3076 ¶ms
, &tlvs_accepted
)) {
3078 status
= PFVF_STATUS_NOT_SUPPORTED
;
3082 if (!tlvs_accepted
) {
3084 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3085 "Upper-layer prevents VF vport configuration\n");
3087 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3088 "No feature tlvs found for vport update\n");
3089 status
= PFVF_STATUS_NOT_SUPPORTED
;
3093 rc
= qed_sp_vport_update(p_hwfn
, ¶ms
, QED_SPQ_MODE_EBLOCK
, NULL
);
3096 status
= PFVF_STATUS_FAILURE
;
3099 vfree(p_rss_params
);
3100 length
= qed_iov_prep_vp_update_resp_tlvs(p_hwfn
, vf
, mbx
, status
,
3101 tlvs_mask
, tlvs_accepted
);
3102 qed_iov_send_response(p_hwfn
, p_ptt
, vf
, length
, status
);
3105 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn
*p_hwfn
,
3106 struct qed_vf_info
*p_vf
,
3107 struct qed_filter_ucast
*p_params
)
3111 /* First remove entries and then add new ones */
3112 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
3113 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3114 if (p_vf
->shadow_config
.vlans
[i
].used
&&
3115 p_vf
->shadow_config
.vlans
[i
].vid
==
3117 p_vf
->shadow_config
.vlans
[i
].used
= false;
3120 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3123 "VF [%d] - Tries to remove a non-existing vlan\n",
3124 p_vf
->relative_vf_id
);
3127 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
3128 p_params
->opcode
== QED_FILTER_FLUSH
) {
3129 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++)
3130 p_vf
->shadow_config
.vlans
[i
].used
= false;
3133 /* In forced mode, we're willing to remove entries - but we don't add
3136 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
))
3139 if (p_params
->opcode
== QED_FILTER_ADD
||
3140 p_params
->opcode
== QED_FILTER_REPLACE
) {
3141 for (i
= 0; i
< QED_ETH_VF_NUM_VLAN_FILTERS
+ 1; i
++) {
3142 if (p_vf
->shadow_config
.vlans
[i
].used
)
3145 p_vf
->shadow_config
.vlans
[i
].used
= true;
3146 p_vf
->shadow_config
.vlans
[i
].vid
= p_params
->vlan
;
3150 if (i
== QED_ETH_VF_NUM_VLAN_FILTERS
+ 1) {
3153 "VF [%d] - Tries to configure more than %d vlan filters\n",
3154 p_vf
->relative_vf_id
,
3155 QED_ETH_VF_NUM_VLAN_FILTERS
+ 1);
3163 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn
*p_hwfn
,
3164 struct qed_vf_info
*p_vf
,
3165 struct qed_filter_ucast
*p_params
)
3169 /* If we're in forced-mode, we don't allow any change */
3170 if (p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
))
3173 /* First remove entries and then add new ones */
3174 if (p_params
->opcode
== QED_FILTER_REMOVE
) {
3175 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3176 if (ether_addr_equal(p_vf
->shadow_config
.macs
[i
],
3178 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
3183 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
3184 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3185 "MAC isn't configured\n");
3188 } else if (p_params
->opcode
== QED_FILTER_REPLACE
||
3189 p_params
->opcode
== QED_FILTER_FLUSH
) {
3190 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++)
3191 eth_zero_addr(p_vf
->shadow_config
.macs
[i
]);
3194 /* List the new MAC address */
3195 if (p_params
->opcode
!= QED_FILTER_ADD
&&
3196 p_params
->opcode
!= QED_FILTER_REPLACE
)
3199 for (i
= 0; i
< QED_ETH_VF_NUM_MAC_FILTERS
; i
++) {
3200 if (is_zero_ether_addr(p_vf
->shadow_config
.macs
[i
])) {
3201 ether_addr_copy(p_vf
->shadow_config
.macs
[i
],
3203 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3204 "Added MAC at %d entry in shadow\n", i
);
3209 if (i
== QED_ETH_VF_NUM_MAC_FILTERS
) {
3210 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "No available place for MAC\n");
3218 qed_iov_vf_update_unicast_shadow(struct qed_hwfn
*p_hwfn
,
3219 struct qed_vf_info
*p_vf
,
3220 struct qed_filter_ucast
*p_params
)
3224 if (p_params
->type
== QED_FILTER_MAC
) {
3225 rc
= qed_iov_vf_update_mac_shadow(p_hwfn
, p_vf
, p_params
);
3230 if (p_params
->type
== QED_FILTER_VLAN
)
3231 rc
= qed_iov_vf_update_vlan_shadow(p_hwfn
, p_vf
, p_params
);
3236 static int qed_iov_chk_ucast(struct qed_hwfn
*hwfn
,
3237 int vfid
, struct qed_filter_ucast
*params
)
3239 struct qed_public_vf_info
*vf
;
3241 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
3245 /* No real decision to make; Store the configured MAC */
3246 if (params
->type
== QED_FILTER_MAC
||
3247 params
->type
== QED_FILTER_MAC_VLAN
)
3248 ether_addr_copy(vf
->mac
, params
->mac
);
3253 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn
*p_hwfn
,
3254 struct qed_ptt
*p_ptt
,
3255 struct qed_vf_info
*vf
)
3257 struct qed_bulletin_content
*p_bulletin
= vf
->bulletin
.p_virt
;
3258 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3259 struct vfpf_ucast_filter_tlv
*req
;
3260 u8 status
= PFVF_STATUS_SUCCESS
;
3261 struct qed_filter_ucast params
;
3264 /* Prepare the unicast filter params */
3265 memset(¶ms
, 0, sizeof(struct qed_filter_ucast
));
3266 req
= &mbx
->req_virt
->ucast_filter
;
3267 params
.opcode
= (enum qed_filter_opcode
)req
->opcode
;
3268 params
.type
= (enum qed_filter_ucast_type
)req
->type
;
3270 params
.is_rx_filter
= 1;
3271 params
.is_tx_filter
= 1;
3272 params
.vport_to_remove_from
= vf
->vport_id
;
3273 params
.vport_to_add_to
= vf
->vport_id
;
3274 memcpy(params
.mac
, req
->mac
, ETH_ALEN
);
3275 params
.vlan
= req
->vlan
;
3279 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3280 vf
->abs_vf_id
, params
.opcode
, params
.type
,
3281 params
.is_rx_filter
? "RX" : "",
3282 params
.is_tx_filter
? "TX" : "",
3283 params
.vport_to_add_to
,
3284 params
.mac
[0], params
.mac
[1],
3285 params
.mac
[2], params
.mac
[3],
3286 params
.mac
[4], params
.mac
[5], params
.vlan
);
3288 if (!vf
->vport_instance
) {
3291 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3293 status
= PFVF_STATUS_FAILURE
;
3297 /* Update shadow copy of the VF configuration */
3298 if (qed_iov_vf_update_unicast_shadow(p_hwfn
, vf
, ¶ms
)) {
3299 status
= PFVF_STATUS_FAILURE
;
3303 /* Determine if the unicast filtering is acceptible by PF */
3304 if ((p_bulletin
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)) &&
3305 (params
.type
== QED_FILTER_VLAN
||
3306 params
.type
== QED_FILTER_MAC_VLAN
)) {
3307 /* Once VLAN is forced or PVID is set, do not allow
3308 * to add/replace any further VLANs.
3310 if (params
.opcode
== QED_FILTER_ADD
||
3311 params
.opcode
== QED_FILTER_REPLACE
)
3312 status
= PFVF_STATUS_FORCED
;
3316 if ((p_bulletin
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)) &&
3317 (params
.type
== QED_FILTER_MAC
||
3318 params
.type
== QED_FILTER_MAC_VLAN
)) {
3319 if (!ether_addr_equal(p_bulletin
->mac
, params
.mac
) ||
3320 (params
.opcode
!= QED_FILTER_ADD
&&
3321 params
.opcode
!= QED_FILTER_REPLACE
))
3322 status
= PFVF_STATUS_FORCED
;
3326 rc
= qed_iov_chk_ucast(p_hwfn
, vf
->relative_vf_id
, ¶ms
);
3328 status
= PFVF_STATUS_FAILURE
;
3332 rc
= qed_sp_eth_filter_ucast(p_hwfn
, vf
->opaque_fid
, ¶ms
,
3333 QED_SPQ_MODE_CB
, NULL
);
3335 status
= PFVF_STATUS_FAILURE
;
3338 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_UCAST_FILTER
,
3339 sizeof(struct pfvf_def_resp_tlv
), status
);
3342 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn
*p_hwfn
,
3343 struct qed_ptt
*p_ptt
,
3344 struct qed_vf_info
*vf
)
3349 for (i
= 0; i
< vf
->num_sbs
; i
++)
3350 qed_int_igu_init_pure_rt_single(p_hwfn
, p_ptt
,
3352 vf
->opaque_fid
, false);
3354 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_INT_CLEANUP
,
3355 sizeof(struct pfvf_def_resp_tlv
),
3356 PFVF_STATUS_SUCCESS
);
3359 static void qed_iov_vf_mbx_close(struct qed_hwfn
*p_hwfn
,
3360 struct qed_ptt
*p_ptt
, struct qed_vf_info
*vf
)
3362 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3363 u8 status
= PFVF_STATUS_SUCCESS
;
3365 /* Disable Interrupts for VF */
3366 qed_iov_vf_igu_set_int(p_hwfn
, p_ptt
, vf
, 0);
3368 /* Reset Permission table */
3369 qed_iov_config_perm_table(p_hwfn
, p_ptt
, vf
, 0);
3371 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_CLOSE
,
3375 static void qed_iov_vf_mbx_release(struct qed_hwfn
*p_hwfn
,
3376 struct qed_ptt
*p_ptt
,
3377 struct qed_vf_info
*p_vf
)
3379 u16 length
= sizeof(struct pfvf_def_resp_tlv
);
3380 u8 status
= PFVF_STATUS_SUCCESS
;
3383 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
3385 if (p_vf
->state
!= VF_STOPPED
&& p_vf
->state
!= VF_FREE
) {
3386 /* Stopping the VF */
3387 rc
= qed_sp_vf_stop(p_hwfn
, p_vf
->concrete_fid
,
3391 DP_ERR(p_hwfn
, "qed_sp_vf_stop returned error %d\n",
3393 status
= PFVF_STATUS_FAILURE
;
3396 p_vf
->state
= VF_STOPPED
;
3399 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
, CHANNEL_TLV_RELEASE
,
3403 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn
*p_hwfn
,
3404 struct qed_ptt
*p_ptt
,
3405 struct qed_vf_info
*p_vf
)
3407 struct qed_iov_vf_mbx
*mbx
= &p_vf
->vf_mbx
;
3408 struct pfvf_read_coal_resp_tlv
*p_resp
;
3409 struct vfpf_read_coal_req_tlv
*req
;
3410 u8 status
= PFVF_STATUS_FAILURE
;
3411 struct qed_vf_queue
*p_queue
;
3412 struct qed_queue_cid
*p_cid
;
3413 u16 coal
= 0, qid
, i
;
3417 mbx
->offset
= (u8
*)mbx
->reply_virt
;
3418 req
= &mbx
->req_virt
->read_coal_req
;
3421 b_is_rx
= req
->is_rx
? true : false;
3424 if (!qed_iov_validate_rxq(p_hwfn
, p_vf
, qid
,
3425 QED_IOV_VALIDATE_Q_ENABLE
)) {
3426 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3427 "VF[%d]: Invalid Rx queue_id = %d\n",
3428 p_vf
->abs_vf_id
, qid
);
3432 p_cid
= qed_iov_get_vf_rx_queue_cid(&p_vf
->vf_queues
[qid
]);
3433 rc
= qed_get_rxq_coalesce(p_hwfn
, p_ptt
, p_cid
, &coal
);
3437 if (!qed_iov_validate_txq(p_hwfn
, p_vf
, qid
,
3438 QED_IOV_VALIDATE_Q_ENABLE
)) {
3439 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3440 "VF[%d]: Invalid Tx queue_id = %d\n",
3441 p_vf
->abs_vf_id
, qid
);
3444 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3445 p_queue
= &p_vf
->vf_queues
[qid
];
3446 if ((!p_queue
->cids
[i
].p_cid
) ||
3447 (!p_queue
->cids
[i
].b_is_tx
))
3450 p_cid
= p_queue
->cids
[i
].p_cid
;
3452 rc
= qed_get_txq_coalesce(p_hwfn
, p_ptt
, p_cid
, &coal
);
3459 status
= PFVF_STATUS_SUCCESS
;
3462 p_resp
= qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_COALESCE_READ
,
3464 p_resp
->coal
= coal
;
3466 qed_add_tlv(p_hwfn
, &mbx
->offset
, CHANNEL_TLV_LIST_END
,
3467 sizeof(struct channel_list_end_tlv
));
3469 qed_iov_send_response(p_hwfn
, p_ptt
, p_vf
, sizeof(*p_resp
), status
);
3472 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn
*p_hwfn
,
3473 struct qed_ptt
*p_ptt
,
3474 struct qed_vf_info
*vf
)
3476 struct qed_iov_vf_mbx
*mbx
= &vf
->vf_mbx
;
3477 struct vfpf_update_coalesce
*req
;
3478 u8 status
= PFVF_STATUS_FAILURE
;
3479 struct qed_queue_cid
*p_cid
;
3480 u16 rx_coal
, tx_coal
;
3484 req
= &mbx
->req_virt
->update_coalesce
;
3486 rx_coal
= req
->rx_coal
;
3487 tx_coal
= req
->tx_coal
;
3490 if (!qed_iov_validate_rxq(p_hwfn
, vf
, qid
,
3491 QED_IOV_VALIDATE_Q_ENABLE
) && rx_coal
) {
3492 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3493 "VF[%d]: Invalid Rx queue_id = %d\n",
3494 vf
->abs_vf_id
, qid
);
3498 if (!qed_iov_validate_txq(p_hwfn
, vf
, qid
,
3499 QED_IOV_VALIDATE_Q_ENABLE
) && tx_coal
) {
3500 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3501 "VF[%d]: Invalid Tx queue_id = %d\n",
3502 vf
->abs_vf_id
, qid
);
3508 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3509 vf
->abs_vf_id
, rx_coal
, tx_coal
, qid
);
3512 p_cid
= qed_iov_get_vf_rx_queue_cid(&vf
->vf_queues
[qid
]);
3514 rc
= qed_set_rxq_coalesce(p_hwfn
, p_ptt
, rx_coal
, p_cid
);
3518 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3519 vf
->abs_vf_id
, vf
->vf_queues
[qid
].fw_rx_qid
);
3522 vf
->rx_coal
= rx_coal
;
3526 struct qed_vf_queue
*p_queue
= &vf
->vf_queues
[qid
];
3528 for (i
= 0; i
< MAX_QUEUES_PER_QZONE
; i
++) {
3529 if (!p_queue
->cids
[i
].p_cid
)
3532 if (!p_queue
->cids
[i
].b_is_tx
)
3535 rc
= qed_set_txq_coalesce(p_hwfn
, p_ptt
, tx_coal
,
3536 p_queue
->cids
[i
].p_cid
);
3541 "VF[%d]: Unable to set tx queue coalesce\n",
3546 vf
->tx_coal
= tx_coal
;
3549 status
= PFVF_STATUS_SUCCESS
;
3551 qed_iov_prepare_resp(p_hwfn
, p_ptt
, vf
, CHANNEL_TLV_COALESCE_UPDATE
,
3552 sizeof(struct pfvf_def_resp_tlv
), status
);
3555 qed_iov_vf_flr_poll_dorq(struct qed_hwfn
*p_hwfn
,
3556 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3561 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_vf
->concrete_fid
);
3563 for (cnt
= 0; cnt
< 50; cnt
++) {
3564 val
= qed_rd(p_hwfn
, p_ptt
, DORQ_REG_VF_USAGE_CNT
);
3569 qed_fid_pretend(p_hwfn
, p_ptt
, (u16
) p_hwfn
->hw_info
.concrete_fid
);
3573 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3574 p_vf
->abs_vf_id
, val
);
3582 qed_iov_vf_flr_poll_pbf(struct qed_hwfn
*p_hwfn
,
3583 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3585 u32 cons
[MAX_NUM_VOQS_E4
], distance
[MAX_NUM_VOQS_E4
];
3588 /* Read initial consumers & producers */
3589 for (i
= 0; i
< MAX_NUM_VOQS_E4
; i
++) {
3592 cons
[i
] = qed_rd(p_hwfn
, p_ptt
,
3593 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3595 prod
= qed_rd(p_hwfn
, p_ptt
,
3596 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0
+
3598 distance
[i
] = prod
- cons
[i
];
3601 /* Wait for consumers to pass the producers */
3603 for (cnt
= 0; cnt
< 50; cnt
++) {
3604 for (; i
< MAX_NUM_VOQS_E4
; i
++) {
3607 tmp
= qed_rd(p_hwfn
, p_ptt
,
3608 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0
+
3610 if (distance
[i
] > tmp
- cons
[i
])
3614 if (i
== MAX_NUM_VOQS_E4
)
3621 DP_ERR(p_hwfn
, "VF[%d] - pbf polling failed on VOQ %d\n",
3622 p_vf
->abs_vf_id
, i
);
3629 static int qed_iov_vf_flr_poll(struct qed_hwfn
*p_hwfn
,
3630 struct qed_vf_info
*p_vf
, struct qed_ptt
*p_ptt
)
3634 rc
= qed_iov_vf_flr_poll_dorq(p_hwfn
, p_vf
, p_ptt
);
3638 rc
= qed_iov_vf_flr_poll_pbf(p_hwfn
, p_vf
, p_ptt
);
3646 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
,
3647 struct qed_ptt
*p_ptt
,
3648 u16 rel_vf_id
, u32
*ack_vfs
)
3650 struct qed_vf_info
*p_vf
;
3653 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, false);
3657 if (p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &
3658 (1ULL << (rel_vf_id
% 64))) {
3659 u16 vfid
= p_vf
->abs_vf_id
;
3661 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3662 "VF[%d] - Handling FLR\n", vfid
);
3664 qed_iov_vf_cleanup(p_hwfn
, p_vf
);
3666 /* If VF isn't active, no need for anything but SW */
3670 rc
= qed_iov_vf_flr_poll(p_hwfn
, p_vf
, p_ptt
);
3674 rc
= qed_final_cleanup(p_hwfn
, p_ptt
, vfid
, true);
3676 DP_ERR(p_hwfn
, "Failed handle FLR of VF[%d]\n", vfid
);
3680 /* Workaround to make VF-PF channel ready, as FW
3681 * doesn't do that as a part of FLR.
3684 GTT_BAR0_MAP_REG_USDM_RAM
+
3685 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid
), 1);
3687 /* VF_STOPPED has to be set only after final cleanup
3688 * but prior to re-enabling the VF.
3690 p_vf
->state
= VF_STOPPED
;
3692 rc
= qed_iov_enable_vf_access(p_hwfn
, p_ptt
, p_vf
);
3694 DP_ERR(p_hwfn
, "Failed to re-enable VF[%d] acces\n",
3699 /* Mark VF for ack and clean pending state */
3700 if (p_vf
->state
== VF_RESET
)
3701 p_vf
->state
= VF_STOPPED
;
3702 ack_vfs
[vfid
/ 32] |= BIT((vfid
% 32));
3703 p_hwfn
->pf_iov_info
->pending_flr
[rel_vf_id
/ 64] &=
3704 ~(1ULL << (rel_vf_id
% 64));
3705 p_vf
->vf_mbx
.b_pending_msg
= false;
3712 qed_iov_vf_flr_cleanup(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3714 u32 ack_vfs
[VF_MAX_STATIC
/ 32];
3718 memset(ack_vfs
, 0, sizeof(u32
) * (VF_MAX_STATIC
/ 32));
3720 /* Since BRB <-> PRS interface can't be tested as part of the flr
3721 * polling due to HW limitations, simply sleep a bit. And since
3722 * there's no need to wait per-vf, do it before looping.
3726 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++)
3727 qed_iov_execute_vf_flr_cleanup(p_hwfn
, p_ptt
, i
, ack_vfs
);
3729 rc
= qed_mcp_ack_vf_flr(p_hwfn
, p_ptt
, ack_vfs
);
3733 bool qed_iov_mark_vf_flr(struct qed_hwfn
*p_hwfn
, u32
*p_disabled_vfs
)
3738 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
, "Marking FLR-ed VFs\n");
3739 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
3740 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3741 "[%08x,...,%08x]: %08x\n",
3742 i
* 32, (i
+ 1) * 32 - 1, p_disabled_vfs
[i
]);
3744 if (!p_hwfn
->cdev
->p_iov_info
) {
3745 DP_NOTICE(p_hwfn
, "VF flr but no IOV\n");
3750 for (i
= 0; i
< p_hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
3751 struct qed_vf_info
*p_vf
;
3754 p_vf
= qed_iov_get_vf_info(p_hwfn
, i
, false);
3758 vfid
= p_vf
->abs_vf_id
;
3759 if (BIT((vfid
% 32)) & p_disabled_vfs
[vfid
/ 32]) {
3760 u64
*p_flr
= p_hwfn
->pf_iov_info
->pending_flr
;
3761 u16 rel_vf_id
= p_vf
->relative_vf_id
;
3763 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3764 "VF[%d] [rel %d] got FLR-ed\n",
3767 p_vf
->state
= VF_RESET
;
3769 /* No need to lock here, since pending_flr should
3770 * only change here and before ACKing MFw. Since
3771 * MFW will not trigger an additional attention for
3772 * VF flr until ACKs, we're safe.
3774 p_flr
[rel_vf_id
/ 64] |= 1ULL << (rel_vf_id
% 64);
3782 static void qed_iov_get_link(struct qed_hwfn
*p_hwfn
,
3784 struct qed_mcp_link_params
*p_params
,
3785 struct qed_mcp_link_state
*p_link
,
3786 struct qed_mcp_link_capabilities
*p_caps
)
3788 struct qed_vf_info
*p_vf
= qed_iov_get_vf_info(p_hwfn
,
3791 struct qed_bulletin_content
*p_bulletin
;
3796 p_bulletin
= p_vf
->bulletin
.p_virt
;
3799 __qed_vf_get_link_params(p_hwfn
, p_params
, p_bulletin
);
3801 __qed_vf_get_link_state(p_hwfn
, p_link
, p_bulletin
);
3803 __qed_vf_get_link_caps(p_hwfn
, p_caps
, p_bulletin
);
3806 static void qed_iov_process_mbx_req(struct qed_hwfn
*p_hwfn
,
3807 struct qed_ptt
*p_ptt
, int vfid
)
3809 struct qed_iov_vf_mbx
*mbx
;
3810 struct qed_vf_info
*p_vf
;
3812 p_vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
3816 mbx
= &p_vf
->vf_mbx
;
3818 /* qed_iov_process_mbx_request */
3819 if (!mbx
->b_pending_msg
) {
3821 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3825 mbx
->b_pending_msg
= false;
3827 mbx
->first_tlv
= mbx
->req_virt
->first_tlv
;
3829 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3830 "VF[%02x]: Processing mailbox message [type %04x]\n",
3831 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3833 /* check if tlv type is known */
3834 if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
) &&
3835 !p_vf
->b_malicious
) {
3836 switch (mbx
->first_tlv
.tl
.type
) {
3837 case CHANNEL_TLV_ACQUIRE
:
3838 qed_iov_vf_mbx_acquire(p_hwfn
, p_ptt
, p_vf
);
3840 case CHANNEL_TLV_VPORT_START
:
3841 qed_iov_vf_mbx_start_vport(p_hwfn
, p_ptt
, p_vf
);
3843 case CHANNEL_TLV_VPORT_TEARDOWN
:
3844 qed_iov_vf_mbx_stop_vport(p_hwfn
, p_ptt
, p_vf
);
3846 case CHANNEL_TLV_START_RXQ
:
3847 qed_iov_vf_mbx_start_rxq(p_hwfn
, p_ptt
, p_vf
);
3849 case CHANNEL_TLV_START_TXQ
:
3850 qed_iov_vf_mbx_start_txq(p_hwfn
, p_ptt
, p_vf
);
3852 case CHANNEL_TLV_STOP_RXQS
:
3853 qed_iov_vf_mbx_stop_rxqs(p_hwfn
, p_ptt
, p_vf
);
3855 case CHANNEL_TLV_STOP_TXQS
:
3856 qed_iov_vf_mbx_stop_txqs(p_hwfn
, p_ptt
, p_vf
);
3858 case CHANNEL_TLV_UPDATE_RXQ
:
3859 qed_iov_vf_mbx_update_rxqs(p_hwfn
, p_ptt
, p_vf
);
3861 case CHANNEL_TLV_VPORT_UPDATE
:
3862 qed_iov_vf_mbx_vport_update(p_hwfn
, p_ptt
, p_vf
);
3864 case CHANNEL_TLV_UCAST_FILTER
:
3865 qed_iov_vf_mbx_ucast_filter(p_hwfn
, p_ptt
, p_vf
);
3867 case CHANNEL_TLV_CLOSE
:
3868 qed_iov_vf_mbx_close(p_hwfn
, p_ptt
, p_vf
);
3870 case CHANNEL_TLV_INT_CLEANUP
:
3871 qed_iov_vf_mbx_int_cleanup(p_hwfn
, p_ptt
, p_vf
);
3873 case CHANNEL_TLV_RELEASE
:
3874 qed_iov_vf_mbx_release(p_hwfn
, p_ptt
, p_vf
);
3876 case CHANNEL_TLV_UPDATE_TUNN_PARAM
:
3877 qed_iov_vf_mbx_update_tunn_param(p_hwfn
, p_ptt
, p_vf
);
3879 case CHANNEL_TLV_COALESCE_UPDATE
:
3880 qed_iov_vf_pf_set_coalesce(p_hwfn
, p_ptt
, p_vf
);
3882 case CHANNEL_TLV_COALESCE_READ
:
3883 qed_iov_vf_pf_get_coalesce(p_hwfn
, p_ptt
, p_vf
);
3886 } else if (qed_iov_tlv_supported(mbx
->first_tlv
.tl
.type
)) {
3887 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
3888 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3889 p_vf
->abs_vf_id
, mbx
->first_tlv
.tl
.type
);
3891 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3892 mbx
->first_tlv
.tl
.type
,
3893 sizeof(struct pfvf_def_resp_tlv
),
3894 PFVF_STATUS_MALICIOUS
);
3896 /* unknown TLV - this may belong to a VF driver from the future
3897 * - a version written after this PF driver was written, which
3898 * supports features unknown as of yet. Too bad since we don't
3899 * support them. Or this may be because someone wrote a crappy
3900 * VF driver and is sending garbage over the channel.
3903 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3905 mbx
->first_tlv
.tl
.type
,
3906 mbx
->first_tlv
.tl
.length
,
3907 mbx
->first_tlv
.padding
, mbx
->first_tlv
.reply_address
);
3909 /* Try replying in case reply address matches the acquisition's
3912 if (p_vf
->acquire
.first_tlv
.reply_address
&&
3913 (mbx
->first_tlv
.reply_address
==
3914 p_vf
->acquire
.first_tlv
.reply_address
)) {
3915 qed_iov_prepare_resp(p_hwfn
, p_ptt
, p_vf
,
3916 mbx
->first_tlv
.tl
.type
,
3917 sizeof(struct pfvf_def_resp_tlv
),
3918 PFVF_STATUS_NOT_SUPPORTED
);
3922 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3928 void qed_iov_pf_get_pending_events(struct qed_hwfn
*p_hwfn
, u64
*events
)
3932 memset(events
, 0, sizeof(u64
) * QED_VF_ARRAY_LENGTH
);
3934 qed_for_each_vf(p_hwfn
, i
) {
3935 struct qed_vf_info
*p_vf
;
3937 p_vf
= &p_hwfn
->pf_iov_info
->vfs_array
[i
];
3938 if (p_vf
->vf_mbx
.b_pending_msg
)
3939 events
[i
/ 64] |= 1ULL << (i
% 64);
3943 static struct qed_vf_info
*qed_sriov_get_vf_from_absid(struct qed_hwfn
*p_hwfn
,
3946 u8 min
= (u8
) p_hwfn
->cdev
->p_iov_info
->first_vf_in_pf
;
3948 if (!_qed_iov_pf_sanity_check(p_hwfn
, (int)abs_vfid
- min
, false)) {
3951 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3956 return &p_hwfn
->pf_iov_info
->vfs_array
[(u8
) abs_vfid
- min
];
3959 static int qed_sriov_vfpf_msg(struct qed_hwfn
*p_hwfn
,
3960 u16 abs_vfid
, struct regpair
*vf_msg
)
3962 struct qed_vf_info
*p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
,
3968 /* List the physical address of the request so that handler
3969 * could later on copy the message from it.
3971 p_vf
->vf_mbx
.pending_req
= (((u64
)vf_msg
->hi
) << 32) | vf_msg
->lo
;
3973 /* Mark the event and schedule the workqueue */
3974 p_vf
->vf_mbx
.b_pending_msg
= true;
3975 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_MSG_FLAG
);
3980 static void qed_sriov_vfpf_malicious(struct qed_hwfn
*p_hwfn
,
3981 struct malicious_vf_eqe_data
*p_data
)
3983 struct qed_vf_info
*p_vf
;
3985 p_vf
= qed_sriov_get_vf_from_absid(p_hwfn
, p_data
->vf_id
);
3990 if (!p_vf
->b_malicious
) {
3992 "VF [%d] - Malicious behavior [%02x]\n",
3993 p_vf
->abs_vf_id
, p_data
->err_id
);
3995 p_vf
->b_malicious
= true;
3998 "VF [%d] - Malicious behavior [%02x]\n",
3999 p_vf
->abs_vf_id
, p_data
->err_id
);
4003 static int qed_sriov_eqe_event(struct qed_hwfn
*p_hwfn
,
4006 union event_ring_data
*data
, u8 fw_return_code
)
4009 case COMMON_EVENT_VF_PF_CHANNEL
:
4010 return qed_sriov_vfpf_msg(p_hwfn
, le16_to_cpu(echo
),
4011 &data
->vf_pf_channel
.msg_addr
);
4012 case COMMON_EVENT_MALICIOUS_VF
:
4013 qed_sriov_vfpf_malicious(p_hwfn
, &data
->malicious_vf
);
4016 DP_INFO(p_hwfn
->cdev
, "Unknown sriov eqe event 0x%02x\n",
4022 u16
qed_iov_get_next_active_vf(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4024 struct qed_hw_sriov_info
*p_iov
= p_hwfn
->cdev
->p_iov_info
;
4030 for (i
= rel_vf_id
; i
< p_iov
->total_vfs
; i
++)
4031 if (qed_iov_is_valid_vfid(p_hwfn
, rel_vf_id
, true, false))
4038 static int qed_iov_copy_vf_msg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*ptt
,
4041 struct qed_dmae_params params
;
4042 struct qed_vf_info
*vf_info
;
4044 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4048 memset(¶ms
, 0, sizeof(struct qed_dmae_params
));
4049 params
.flags
= QED_DMAE_FLAG_VF_SRC
| QED_DMAE_FLAG_COMPLETION_DST
;
4050 params
.src_vfid
= vf_info
->abs_vf_id
;
4052 if (qed_dmae_host2host(p_hwfn
, ptt
,
4053 vf_info
->vf_mbx
.pending_req
,
4054 vf_info
->vf_mbx
.req_phys
,
4055 sizeof(union vfpf_tlvs
) / 4, ¶ms
)) {
4056 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4057 "Failed to copy message from VF 0x%02x\n", vfid
);
4065 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn
*p_hwfn
,
4068 struct qed_vf_info
*vf_info
;
4071 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4073 DP_NOTICE(p_hwfn
->cdev
,
4074 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
4078 if (vf_info
->b_malicious
) {
4079 DP_NOTICE(p_hwfn
->cdev
,
4080 "Can't set forced MAC to malicious VF [%d]\n", vfid
);
4084 feature
= 1 << MAC_ADDR_FORCED
;
4085 memcpy(vf_info
->bulletin
.p_virt
->mac
, mac
, ETH_ALEN
);
4087 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4088 /* Forced MAC will disable MAC_ADDR */
4089 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~BIT(VFPF_BULLETIN_MAC_ADDR
);
4091 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4094 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn
*p_hwfn
,
4097 struct qed_vf_info
*vf_info
;
4100 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4102 DP_NOTICE(p_hwfn
->cdev
,
4103 "Can not set forced MAC, invalid vfid [%d]\n", vfid
);
4107 if (vf_info
->b_malicious
) {
4108 DP_NOTICE(p_hwfn
->cdev
,
4109 "Can't set forced vlan to malicious VF [%d]\n", vfid
);
4113 feature
= 1 << VLAN_ADDR_FORCED
;
4114 vf_info
->bulletin
.p_virt
->pvid
= pvid
;
4116 vf_info
->bulletin
.p_virt
->valid_bitmap
|= feature
;
4118 vf_info
->bulletin
.p_virt
->valid_bitmap
&= ~feature
;
4120 qed_iov_configure_vport_forced(p_hwfn
, vf_info
, feature
);
4123 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn
*p_hwfn
,
4124 int vfid
, u16 vxlan_port
, u16 geneve_port
)
4126 struct qed_vf_info
*vf_info
;
4128 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4130 DP_NOTICE(p_hwfn
->cdev
,
4131 "Can not set udp ports, invalid vfid [%d]\n", vfid
);
4135 if (vf_info
->b_malicious
) {
4136 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
4137 "Can not set udp ports to malicious VF [%d]\n",
4142 vf_info
->bulletin
.p_virt
->vxlan_udp_port
= vxlan_port
;
4143 vf_info
->bulletin
.p_virt
->geneve_udp_port
= geneve_port
;
4146 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn
*p_hwfn
, int vfid
)
4148 struct qed_vf_info
*p_vf_info
;
4150 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4154 return !!p_vf_info
->vport_instance
;
4157 static bool qed_iov_is_vf_stopped(struct qed_hwfn
*p_hwfn
, int vfid
)
4159 struct qed_vf_info
*p_vf_info
;
4161 p_vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4165 return p_vf_info
->state
== VF_STOPPED
;
4168 static bool qed_iov_spoofchk_get(struct qed_hwfn
*p_hwfn
, int vfid
)
4170 struct qed_vf_info
*vf_info
;
4172 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4176 return vf_info
->spoof_chk
;
4179 static int qed_iov_spoofchk_set(struct qed_hwfn
*p_hwfn
, int vfid
, bool val
)
4181 struct qed_vf_info
*vf
;
4184 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4186 "SR-IOV sanity check failed, can't set spoofchk\n");
4190 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4194 if (!qed_iov_vf_has_vport_instance(p_hwfn
, vfid
)) {
4195 /* After VF VPORT start PF will configure spoof check */
4196 vf
->req_spoofchk_val
= val
;
4201 rc
= __qed_iov_spoofchk_set(p_hwfn
, vf
, val
);
4207 static u8
*qed_iov_bulletin_get_forced_mac(struct qed_hwfn
*p_hwfn
,
4210 struct qed_vf_info
*p_vf
;
4212 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4213 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4216 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(MAC_ADDR_FORCED
)))
4219 return p_vf
->bulletin
.p_virt
->mac
;
4223 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn
*p_hwfn
, u16 rel_vf_id
)
4225 struct qed_vf_info
*p_vf
;
4227 p_vf
= qed_iov_get_vf_info(p_hwfn
, rel_vf_id
, true);
4228 if (!p_vf
|| !p_vf
->bulletin
.p_virt
)
4231 if (!(p_vf
->bulletin
.p_virt
->valid_bitmap
& BIT(VLAN_ADDR_FORCED
)))
4234 return p_vf
->bulletin
.p_virt
->pvid
;
4237 static int qed_iov_configure_tx_rate(struct qed_hwfn
*p_hwfn
,
4238 struct qed_ptt
*p_ptt
, int vfid
, int val
)
4240 struct qed_mcp_link_state
*p_link
;
4241 struct qed_vf_info
*vf
;
4245 vf
= qed_iov_get_vf_info(p_hwfn
, (u16
)vfid
, true);
4249 rc
= qed_fw_vport(p_hwfn
, vf
->vport_id
, &abs_vp_id
);
4253 p_link
= &QED_LEADING_HWFN(p_hwfn
->cdev
)->mcp_info
->link_output
;
4255 return qed_init_vport_rl(p_hwfn
, p_ptt
, abs_vp_id
, (u32
)val
,
4260 qed_iov_configure_min_tx_rate(struct qed_dev
*cdev
, int vfid
, u32 rate
)
4262 struct qed_vf_info
*vf
;
4266 for_each_hwfn(cdev
, i
) {
4267 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4269 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4271 "SR-IOV sanity check failed, can't set min rate\n");
4276 vf
= qed_iov_get_vf_info(QED_LEADING_HWFN(cdev
), (u16
)vfid
, true);
4277 vport_id
= vf
->vport_id
;
4279 return qed_configure_vport_wfq(cdev
, vport_id
, rate
);
4282 static int qed_iov_get_vf_min_rate(struct qed_hwfn
*p_hwfn
, int vfid
)
4284 struct qed_wfq_data
*vf_vp_wfq
;
4285 struct qed_vf_info
*vf_info
;
4287 vf_info
= qed_iov_get_vf_info(p_hwfn
, (u16
) vfid
, true);
4291 vf_vp_wfq
= &p_hwfn
->qm_info
.wfq_data
[vf_info
->vport_id
];
4293 if (vf_vp_wfq
->configured
)
4294 return vf_vp_wfq
->min_speed
;
4300 * qed_schedule_iov - schedules IOV task for VF and PF
4301 * @hwfn: hardware function pointer
4302 * @flag: IOV flag for VF/PF
4304 void qed_schedule_iov(struct qed_hwfn
*hwfn
, enum qed_iov_wq_flag flag
)
4306 smp_mb__before_atomic();
4307 set_bit(flag
, &hwfn
->iov_task_flags
);
4308 smp_mb__after_atomic();
4309 DP_VERBOSE(hwfn
, QED_MSG_IOV
, "Scheduling iov task [Flag: %d]\n", flag
);
4310 queue_delayed_work(hwfn
->iov_wq
, &hwfn
->iov_task
, 0);
4313 void qed_vf_start_iov_wq(struct qed_dev
*cdev
)
4317 for_each_hwfn(cdev
, i
)
4318 queue_delayed_work(cdev
->hwfns
[i
].iov_wq
,
4319 &cdev
->hwfns
[i
].iov_task
, 0);
4322 int qed_sriov_disable(struct qed_dev
*cdev
, bool pci_enabled
)
4326 for_each_hwfn(cdev
, i
)
4327 if (cdev
->hwfns
[i
].iov_wq
)
4328 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
4330 /* Mark VFs for disablement */
4331 qed_iov_set_vfs_to_disable(cdev
, true);
4333 if (cdev
->p_iov_info
&& cdev
->p_iov_info
->num_vfs
&& pci_enabled
)
4334 pci_disable_sriov(cdev
->pdev
);
4336 for_each_hwfn(cdev
, i
) {
4337 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4338 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
4340 /* Failure to acquire the ptt in 100g creates an odd error
4341 * where the first engine has already relased IOV.
4344 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4348 /* Clean WFQ db and configure equal weight for all vports */
4349 qed_clean_wfq_db(hwfn
, ptt
);
4351 qed_for_each_vf(hwfn
, j
) {
4354 if (!qed_iov_is_valid_vfid(hwfn
, j
, true, false))
4357 /* Wait until VF is disabled before releasing */
4358 for (k
= 0; k
< 100; k
++) {
4359 if (!qed_iov_is_vf_stopped(hwfn
, j
))
4366 qed_iov_release_hw_for_vf(&cdev
->hwfns
[i
],
4370 "Timeout waiting for VF's FLR to end\n");
4373 qed_ptt_release(hwfn
, ptt
);
4376 qed_iov_set_vfs_to_disable(cdev
, false);
4381 static void qed_sriov_enable_qid_config(struct qed_hwfn
*hwfn
,
4383 struct qed_iov_vf_init_params
*params
)
4387 /* Since we have an equal resource distribution per-VF, and we assume
4388 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4389 * sequentially from there.
4391 base
= FEAT_NUM(hwfn
, QED_PF_L2_QUE
) + vfid
* params
->num_queues
;
4393 params
->rel_vf_id
= vfid
;
4394 for (i
= 0; i
< params
->num_queues
; i
++) {
4395 params
->req_rx_queue
[i
] = base
+ i
;
4396 params
->req_tx_queue
[i
] = base
+ i
;
4400 static int qed_sriov_enable(struct qed_dev
*cdev
, int num
)
4402 struct qed_iov_vf_init_params params
;
4405 if (num
>= RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
)) {
4406 DP_NOTICE(cdev
, "Can start at most %d VFs\n",
4407 RESC_NUM(&cdev
->hwfns
[0], QED_VPORT
) - 1);
4411 memset(¶ms
, 0, sizeof(params
));
4413 /* Initialize HW for VF access */
4414 for_each_hwfn(cdev
, j
) {
4415 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[j
];
4416 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
4418 /* Make sure not to use more than 16 queues per VF */
4419 params
.num_queues
= min_t(int,
4420 FEAT_NUM(hwfn
, QED_VF_L2_QUE
) / num
,
4424 DP_ERR(hwfn
, "Failed to acquire ptt\n");
4429 for (i
= 0; i
< num
; i
++) {
4430 if (!qed_iov_is_valid_vfid(hwfn
, i
, false, true))
4433 qed_sriov_enable_qid_config(hwfn
, i
, ¶ms
);
4434 rc
= qed_iov_init_hw_for_vf(hwfn
, ptt
, ¶ms
);
4436 DP_ERR(cdev
, "Failed to enable VF[%d]\n", i
);
4437 qed_ptt_release(hwfn
, ptt
);
4442 qed_ptt_release(hwfn
, ptt
);
4445 /* Enable SRIOV PCIe functions */
4446 rc
= pci_enable_sriov(cdev
->pdev
, num
);
4448 DP_ERR(cdev
, "Failed to enable sriov [%d]\n", rc
);
4455 qed_sriov_disable(cdev
, false);
4459 static int qed_sriov_configure(struct qed_dev
*cdev
, int num_vfs_param
)
4461 if (!IS_QED_SRIOV(cdev
)) {
4462 DP_VERBOSE(cdev
, QED_MSG_IOV
, "SR-IOV is not supported\n");
4467 return qed_sriov_enable(cdev
, num_vfs_param
);
4469 return qed_sriov_disable(cdev
, true);
4472 static int qed_sriov_pf_set_mac(struct qed_dev
*cdev
, u8
*mac
, int vfid
)
4476 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
4477 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4478 "Cannot set a VF MAC; Sriov is not enabled\n");
4482 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
4483 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4484 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
4488 for_each_hwfn(cdev
, i
) {
4489 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4490 struct qed_public_vf_info
*vf_info
;
4492 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4496 /* Set the forced MAC, and schedule the IOV task */
4497 ether_addr_copy(vf_info
->forced_mac
, mac
);
4498 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
4504 static int qed_sriov_pf_set_vlan(struct qed_dev
*cdev
, u16 vid
, int vfid
)
4508 if (!IS_QED_SRIOV(cdev
) || !IS_PF_SRIOV_ALLOC(&cdev
->hwfns
[0])) {
4509 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4510 "Cannot set a VF MAC; Sriov is not enabled\n");
4514 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vfid
, true, true)) {
4515 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4516 "Cannot set VF[%d] MAC (VF is not active)\n", vfid
);
4520 for_each_hwfn(cdev
, i
) {
4521 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4522 struct qed_public_vf_info
*vf_info
;
4524 vf_info
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4528 /* Set the forced vlan, and schedule the IOV task */
4529 vf_info
->forced_vlan
= vid
;
4530 qed_schedule_iov(hwfn
, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
);
4536 static int qed_get_vf_config(struct qed_dev
*cdev
,
4537 int vf_id
, struct ifla_vf_info
*ivi
)
4539 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
4540 struct qed_public_vf_info
*vf_info
;
4541 struct qed_mcp_link_state link
;
4544 /* Sanitize request */
4548 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, false)) {
4549 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4550 "VF index [%d] isn't active\n", vf_id
);
4554 vf_info
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
4556 qed_iov_get_link(hwfn
, vf_id
, NULL
, &link
, NULL
);
4558 /* Fill information about VF */
4561 if (is_valid_ether_addr(vf_info
->forced_mac
))
4562 ether_addr_copy(ivi
->mac
, vf_info
->forced_mac
);
4564 ether_addr_copy(ivi
->mac
, vf_info
->mac
);
4566 ivi
->vlan
= vf_info
->forced_vlan
;
4567 ivi
->spoofchk
= qed_iov_spoofchk_get(hwfn
, vf_id
);
4568 ivi
->linkstate
= vf_info
->link_state
;
4569 tx_rate
= vf_info
->tx_rate
;
4570 ivi
->max_tx_rate
= tx_rate
? tx_rate
: link
.speed
;
4571 ivi
->min_tx_rate
= qed_iov_get_vf_min_rate(hwfn
, vf_id
);
4576 void qed_inform_vf_link_state(struct qed_hwfn
*hwfn
)
4578 struct qed_hwfn
*lead_hwfn
= QED_LEADING_HWFN(hwfn
->cdev
);
4579 struct qed_mcp_link_capabilities caps
;
4580 struct qed_mcp_link_params params
;
4581 struct qed_mcp_link_state link
;
4584 if (!hwfn
->pf_iov_info
)
4587 /* Update bulletin of all future possible VFs with link configuration */
4588 for (i
= 0; i
< hwfn
->cdev
->p_iov_info
->total_vfs
; i
++) {
4589 struct qed_public_vf_info
*vf_info
;
4591 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, false);
4595 /* Only hwfn0 is actually interested in the link speed.
4596 * But since only it would receive an MFW indication of link,
4597 * need to take configuration from it - otherwise things like
4598 * rate limiting for hwfn1 VF would not work.
4600 memcpy(¶ms
, qed_mcp_get_link_params(lead_hwfn
),
4602 memcpy(&link
, qed_mcp_get_link_state(lead_hwfn
), sizeof(link
));
4603 memcpy(&caps
, qed_mcp_get_link_capabilities(lead_hwfn
),
4606 /* Modify link according to the VF's configured link state */
4607 switch (vf_info
->link_state
) {
4608 case IFLA_VF_LINK_STATE_DISABLE
:
4609 link
.link_up
= false;
4611 case IFLA_VF_LINK_STATE_ENABLE
:
4612 link
.link_up
= true;
4613 /* Set speed according to maximum supported by HW.
4614 * that is 40G for regular devices and 100G for CMT
4617 link
.speed
= (hwfn
->cdev
->num_hwfns
> 1) ?
4620 /* In auto mode pass PF link image to VF */
4624 if (link
.link_up
&& vf_info
->tx_rate
) {
4625 struct qed_ptt
*ptt
;
4628 rate
= min_t(int, vf_info
->tx_rate
, link
.speed
);
4630 ptt
= qed_ptt_acquire(hwfn
);
4632 DP_NOTICE(hwfn
, "Failed to acquire PTT\n");
4636 if (!qed_iov_configure_tx_rate(hwfn
, ptt
, i
, rate
)) {
4637 vf_info
->tx_rate
= rate
;
4641 qed_ptt_release(hwfn
, ptt
);
4644 qed_iov_set_link(hwfn
, i
, ¶ms
, &link
, &caps
);
4647 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4650 static int qed_set_vf_link_state(struct qed_dev
*cdev
,
4651 int vf_id
, int link_state
)
4655 /* Sanitize request */
4659 if (!qed_iov_is_valid_vfid(&cdev
->hwfns
[0], vf_id
, true, true)) {
4660 DP_VERBOSE(cdev
, QED_MSG_IOV
,
4661 "VF index [%d] isn't active\n", vf_id
);
4665 /* Handle configuration of link state */
4666 for_each_hwfn(cdev
, i
) {
4667 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4668 struct qed_public_vf_info
*vf
;
4670 vf
= qed_iov_get_public_vf_info(hwfn
, vf_id
, true);
4674 if (vf
->link_state
== link_state
)
4677 vf
->link_state
= link_state
;
4678 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
4684 static int qed_spoof_configure(struct qed_dev
*cdev
, int vfid
, bool val
)
4686 int i
, rc
= -EINVAL
;
4688 for_each_hwfn(cdev
, i
) {
4689 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4691 rc
= qed_iov_spoofchk_set(p_hwfn
, vfid
, val
);
4699 static int qed_configure_max_vf_rate(struct qed_dev
*cdev
, int vfid
, int rate
)
4703 for_each_hwfn(cdev
, i
) {
4704 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4705 struct qed_public_vf_info
*vf
;
4707 if (!qed_iov_pf_sanity_check(p_hwfn
, vfid
)) {
4709 "SR-IOV sanity check failed, can't set tx rate\n");
4713 vf
= qed_iov_get_public_vf_info(p_hwfn
, vfid
, true);
4717 qed_inform_vf_link_state(p_hwfn
);
4723 static int qed_set_vf_rate(struct qed_dev
*cdev
,
4724 int vfid
, u32 min_rate
, u32 max_rate
)
4726 int rc_min
= 0, rc_max
= 0;
4729 rc_max
= qed_configure_max_vf_rate(cdev
, vfid
, max_rate
);
4732 rc_min
= qed_iov_configure_min_tx_rate(cdev
, vfid
, min_rate
);
4734 if (rc_max
| rc_min
)
4740 static int qed_set_vf_trust(struct qed_dev
*cdev
, int vfid
, bool trust
)
4744 for_each_hwfn(cdev
, i
) {
4745 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[i
];
4746 struct qed_public_vf_info
*vf
;
4748 if (!qed_iov_pf_sanity_check(hwfn
, vfid
)) {
4750 "SR-IOV sanity check failed, can't set trust\n");
4754 vf
= qed_iov_get_public_vf_info(hwfn
, vfid
, true);
4756 if (vf
->is_trusted_request
== trust
)
4758 vf
->is_trusted_request
= trust
;
4760 qed_schedule_iov(hwfn
, QED_IOV_WQ_TRUST_FLAG
);
4766 static void qed_handle_vf_msg(struct qed_hwfn
*hwfn
)
4768 u64 events
[QED_VF_ARRAY_LENGTH
];
4769 struct qed_ptt
*ptt
;
4772 ptt
= qed_ptt_acquire(hwfn
);
4774 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4775 "Can't acquire PTT; re-scheduling\n");
4776 qed_schedule_iov(hwfn
, QED_IOV_WQ_MSG_FLAG
);
4780 qed_iov_pf_get_pending_events(hwfn
, events
);
4782 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4783 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4784 events
[0], events
[1], events
[2]);
4786 qed_for_each_vf(hwfn
, i
) {
4787 /* Skip VFs with no pending messages */
4788 if (!(events
[i
/ 64] & (1ULL << (i
% 64))))
4791 DP_VERBOSE(hwfn
, QED_MSG_IOV
,
4792 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4793 i
, hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4795 /* Copy VF's message to PF's request buffer for that VF */
4796 if (qed_iov_copy_vf_msg(hwfn
, ptt
, i
))
4799 qed_iov_process_mbx_req(hwfn
, ptt
, i
);
4802 qed_ptt_release(hwfn
, ptt
);
4805 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn
*hwfn
)
4809 qed_for_each_vf(hwfn
, i
) {
4810 struct qed_public_vf_info
*info
;
4811 bool update
= false;
4814 info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
4818 /* Update data on bulletin board */
4819 mac
= qed_iov_bulletin_get_forced_mac(hwfn
, i
);
4820 if (is_valid_ether_addr(info
->forced_mac
) &&
4821 (!mac
|| !ether_addr_equal(mac
, info
->forced_mac
))) {
4824 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4826 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4828 /* Update bulletin board with forced MAC */
4829 qed_iov_bulletin_set_forced_mac(hwfn
,
4830 info
->forced_mac
, i
);
4834 if (qed_iov_bulletin_get_forced_vlan(hwfn
, i
) ^
4835 info
->forced_vlan
) {
4838 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4841 hwfn
->cdev
->p_iov_info
->first_vf_in_pf
+ i
);
4842 qed_iov_bulletin_set_forced_vlan(hwfn
,
4843 info
->forced_vlan
, i
);
4848 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4852 static void qed_handle_bulletin_post(struct qed_hwfn
*hwfn
)
4854 struct qed_ptt
*ptt
;
4857 ptt
= qed_ptt_acquire(hwfn
);
4859 DP_NOTICE(hwfn
, "Failed allocating a ptt entry\n");
4860 qed_schedule_iov(hwfn
, QED_IOV_WQ_BULLETIN_UPDATE_FLAG
);
4864 qed_for_each_vf(hwfn
, i
)
4865 qed_iov_post_vf_bulletin(hwfn
, i
, ptt
);
4867 qed_ptt_release(hwfn
, ptt
);
4870 static void qed_iov_handle_trust_change(struct qed_hwfn
*hwfn
)
4872 struct qed_sp_vport_update_params params
;
4873 struct qed_filter_accept_flags
*flags
;
4874 struct qed_public_vf_info
*vf_info
;
4875 struct qed_vf_info
*vf
;
4879 mask
= QED_ACCEPT_UCAST_UNMATCHED
| QED_ACCEPT_MCAST_UNMATCHED
;
4880 flags
= ¶ms
.accept_flags
;
4882 qed_for_each_vf(hwfn
, i
) {
4883 /* Need to make sure current requested configuration didn't
4884 * flip so that we'll end up configuring something that's not
4887 vf_info
= qed_iov_get_public_vf_info(hwfn
, i
, true);
4888 if (vf_info
->is_trusted_configured
==
4889 vf_info
->is_trusted_request
)
4891 vf_info
->is_trusted_configured
= vf_info
->is_trusted_request
;
4893 /* Validate that the VF has a configured vport */
4894 vf
= qed_iov_get_vf_info(hwfn
, i
, true);
4895 if (!vf
->vport_instance
)
4898 memset(¶ms
, 0, sizeof(params
));
4899 params
.opaque_fid
= vf
->opaque_fid
;
4900 params
.vport_id
= vf
->vport_id
;
4902 if (vf_info
->rx_accept_mode
& mask
) {
4903 flags
->update_rx_mode_config
= 1;
4904 flags
->rx_accept_filter
= vf_info
->rx_accept_mode
;
4907 if (vf_info
->tx_accept_mode
& mask
) {
4908 flags
->update_tx_mode_config
= 1;
4909 flags
->tx_accept_filter
= vf_info
->tx_accept_mode
;
4912 /* Remove if needed; Otherwise this would set the mask */
4913 if (!vf_info
->is_trusted_configured
) {
4914 flags
->rx_accept_filter
&= ~mask
;
4915 flags
->tx_accept_filter
&= ~mask
;
4918 if (flags
->update_rx_mode_config
||
4919 flags
->update_tx_mode_config
)
4920 qed_sp_vport_update(hwfn
, ¶ms
,
4921 QED_SPQ_MODE_EBLOCK
, NULL
);
4925 static void qed_iov_pf_task(struct work_struct
*work
)
4928 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
4932 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG
, &hwfn
->iov_task_flags
))
4935 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG
, &hwfn
->iov_task_flags
)) {
4936 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
4939 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
4943 rc
= qed_iov_vf_flr_cleanup(hwfn
, ptt
);
4945 qed_schedule_iov(hwfn
, QED_IOV_WQ_FLR_FLAG
);
4947 qed_ptt_release(hwfn
, ptt
);
4950 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG
, &hwfn
->iov_task_flags
))
4951 qed_handle_vf_msg(hwfn
);
4953 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG
,
4954 &hwfn
->iov_task_flags
))
4955 qed_handle_pf_set_vf_unicast(hwfn
);
4957 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG
,
4958 &hwfn
->iov_task_flags
))
4959 qed_handle_bulletin_post(hwfn
);
4961 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG
, &hwfn
->iov_task_flags
))
4962 qed_iov_handle_trust_change(hwfn
);
4965 void qed_iov_wq_stop(struct qed_dev
*cdev
, bool schedule_first
)
4969 for_each_hwfn(cdev
, i
) {
4970 if (!cdev
->hwfns
[i
].iov_wq
)
4973 if (schedule_first
) {
4974 qed_schedule_iov(&cdev
->hwfns
[i
],
4975 QED_IOV_WQ_STOP_WQ_FLAG
);
4976 cancel_delayed_work_sync(&cdev
->hwfns
[i
].iov_task
);
4979 flush_workqueue(cdev
->hwfns
[i
].iov_wq
);
4980 destroy_workqueue(cdev
->hwfns
[i
].iov_wq
);
4984 int qed_iov_wq_start(struct qed_dev
*cdev
)
4986 char name
[NAME_SIZE
];
4989 for_each_hwfn(cdev
, i
) {
4990 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
4992 /* PFs needs a dedicated workqueue only if they support IOV.
4993 * VFs always require one.
4995 if (IS_PF(p_hwfn
->cdev
) && !IS_PF_SRIOV(p_hwfn
))
4998 snprintf(name
, NAME_SIZE
, "iov-%02x:%02x.%02x",
4999 cdev
->pdev
->bus
->number
,
5000 PCI_SLOT(cdev
->pdev
->devfn
), p_hwfn
->abs_pf_id
);
5002 p_hwfn
->iov_wq
= create_singlethread_workqueue(name
);
5003 if (!p_hwfn
->iov_wq
) {
5004 DP_NOTICE(p_hwfn
, "Cannot create iov workqueue\n");
5009 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_pf_task
);
5011 INIT_DELAYED_WORK(&p_hwfn
->iov_task
, qed_iov_vf_task
);
5017 const struct qed_iov_hv_ops qed_iov_ops_pass
= {
5018 .configure
= &qed_sriov_configure
,
5019 .set_mac
= &qed_sriov_pf_set_mac
,
5020 .set_vlan
= &qed_sriov_pf_set_vlan
,
5021 .get_config
= &qed_get_vf_config
,
5022 .set_link_state
= &qed_set_vf_link_state
,
5023 .set_spoof
= &qed_spoof_configure
,
5024 .set_rate
= &qed_set_vf_rate
,
5025 .set_trust
= &qed_set_vf_trust
,