WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.c
blobb8dc5c4591ef52e212776aeece736303092c4ff6
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
7 #include <linux/etherdevice.h>
8 #include <linux/crc32.h>
9 #include <linux/vmalloc.h>
10 #include <linux/crash_dump.h>
11 #include <linux/qed/qed_iov_if.h>
12 #include "qed_cxt.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_init_ops.h"
16 #include "qed_int.h"
17 #include "qed_mcp.h"
18 #include "qed_reg_addr.h"
19 #include "qed_sp.h"
20 #include "qed_sriov.h"
21 #include "qed_vf.h"
22 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
23 u8 opcode,
24 __le16 echo,
25 union event_ring_data *data, u8 fw_return_code);
26 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
28 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
30 u8 legacy = 0;
32 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
33 ETH_HSI_VER_NO_PKT_LEN_TUNN)
34 legacy |= QED_QCID_LEGACY_VF_RX_PROD;
36 if (!(p_vf->acquire.vfdev_info.capabilities &
37 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
38 legacy |= QED_QCID_LEGACY_VF_CID;
40 return legacy;
43 /* IOV ramrods */
44 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
46 struct vf_start_ramrod_data *p_ramrod = NULL;
47 struct qed_spq_entry *p_ent = NULL;
48 struct qed_sp_init_data init_data;
49 int rc = -EINVAL;
50 u8 fp_minor;
52 /* Get SPQ entry */
53 memset(&init_data, 0, sizeof(init_data));
54 init_data.cid = qed_spq_get_cid(p_hwfn);
55 init_data.opaque_fid = p_vf->opaque_fid;
56 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
58 rc = qed_sp_init_request(p_hwfn, &p_ent,
59 COMMON_RAMROD_VF_START,
60 PROTOCOLID_COMMON, &init_data);
61 if (rc)
62 return rc;
64 p_ramrod = &p_ent->ramrod.vf_start;
66 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
67 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
69 switch (p_hwfn->hw_info.personality) {
70 case QED_PCI_ETH:
71 p_ramrod->personality = PERSONALITY_ETH;
72 break;
73 case QED_PCI_ETH_ROCE:
74 case QED_PCI_ETH_IWARP:
75 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
76 break;
77 default:
78 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
79 p_hwfn->hw_info.personality);
80 qed_sp_destroy_request(p_hwfn, p_ent);
81 return -EINVAL;
84 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
85 if (fp_minor > ETH_HSI_VER_MINOR &&
86 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
87 DP_VERBOSE(p_hwfn,
88 QED_MSG_IOV,
89 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
90 p_vf->abs_vf_id,
91 ETH_HSI_VER_MAJOR,
92 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
93 fp_minor = ETH_HSI_VER_MINOR;
96 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
97 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
99 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
100 "VF[%d] - Starting using HSI %02x.%02x\n",
101 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
103 return qed_spq_post(p_hwfn, p_ent, NULL);
106 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
107 u32 concrete_vfid, u16 opaque_vfid)
109 struct vf_stop_ramrod_data *p_ramrod = NULL;
110 struct qed_spq_entry *p_ent = NULL;
111 struct qed_sp_init_data init_data;
112 int rc = -EINVAL;
114 /* Get SPQ entry */
115 memset(&init_data, 0, sizeof(init_data));
116 init_data.cid = qed_spq_get_cid(p_hwfn);
117 init_data.opaque_fid = opaque_vfid;
118 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
120 rc = qed_sp_init_request(p_hwfn, &p_ent,
121 COMMON_RAMROD_VF_STOP,
122 PROTOCOLID_COMMON, &init_data);
123 if (rc)
124 return rc;
126 p_ramrod = &p_ent->ramrod.vf_stop;
128 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
130 return qed_spq_post(p_hwfn, p_ent, NULL);
133 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
134 int rel_vf_id,
135 bool b_enabled_only, bool b_non_malicious)
137 if (!p_hwfn->pf_iov_info) {
138 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
139 return false;
142 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
143 (rel_vf_id < 0))
144 return false;
146 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
147 b_enabled_only)
148 return false;
150 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
151 b_non_malicious)
152 return false;
154 return true;
157 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
158 u16 relative_vf_id,
159 bool b_enabled_only)
161 struct qed_vf_info *vf = NULL;
163 if (!p_hwfn->pf_iov_info) {
164 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
165 return NULL;
168 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
169 b_enabled_only, false))
170 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
171 else
172 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
173 relative_vf_id);
175 return vf;
178 static struct qed_queue_cid *
179 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
181 int i;
183 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
184 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
185 return p_queue->cids[i].p_cid;
188 return NULL;
191 enum qed_iov_validate_q_mode {
192 QED_IOV_VALIDATE_Q_NA,
193 QED_IOV_VALIDATE_Q_ENABLE,
194 QED_IOV_VALIDATE_Q_DISABLE,
197 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
198 struct qed_vf_info *p_vf,
199 u16 qid,
200 enum qed_iov_validate_q_mode mode,
201 bool b_is_tx)
203 int i;
205 if (mode == QED_IOV_VALIDATE_Q_NA)
206 return true;
208 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
209 struct qed_vf_queue_cid *p_qcid;
211 p_qcid = &p_vf->vf_queues[qid].cids[i];
213 if (!p_qcid->p_cid)
214 continue;
216 if (p_qcid->b_is_tx != b_is_tx)
217 continue;
219 return mode == QED_IOV_VALIDATE_Q_ENABLE;
222 /* In case we haven't found any valid cid, then its disabled */
223 return mode == QED_IOV_VALIDATE_Q_DISABLE;
226 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
227 struct qed_vf_info *p_vf,
228 u16 rx_qid,
229 enum qed_iov_validate_q_mode mode)
231 if (rx_qid >= p_vf->num_rxqs) {
232 DP_VERBOSE(p_hwfn,
233 QED_MSG_IOV,
234 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
235 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
236 return false;
239 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
242 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
243 struct qed_vf_info *p_vf,
244 u16 tx_qid,
245 enum qed_iov_validate_q_mode mode)
247 if (tx_qid >= p_vf->num_txqs) {
248 DP_VERBOSE(p_hwfn,
249 QED_MSG_IOV,
250 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
251 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
252 return false;
255 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
258 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
259 struct qed_vf_info *p_vf, u16 sb_idx)
261 int i;
263 for (i = 0; i < p_vf->num_sbs; i++)
264 if (p_vf->igu_sbs[i] == sb_idx)
265 return true;
267 DP_VERBOSE(p_hwfn,
268 QED_MSG_IOV,
269 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
270 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
272 return false;
275 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
276 struct qed_vf_info *p_vf)
278 u8 i;
280 for (i = 0; i < p_vf->num_rxqs; i++)
281 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
282 QED_IOV_VALIDATE_Q_ENABLE,
283 false))
284 return true;
286 return false;
289 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
290 struct qed_vf_info *p_vf)
292 u8 i;
294 for (i = 0; i < p_vf->num_txqs; i++)
295 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
296 QED_IOV_VALIDATE_Q_ENABLE,
297 true))
298 return true;
300 return false;
303 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
304 int vfid, struct qed_ptt *p_ptt)
306 struct qed_bulletin_content *p_bulletin;
307 int crc_size = sizeof(p_bulletin->crc);
308 struct qed_dmae_params params;
309 struct qed_vf_info *p_vf;
311 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
312 if (!p_vf)
313 return -EINVAL;
315 if (!p_vf->vf_bulletin)
316 return -EINVAL;
318 p_bulletin = p_vf->bulletin.p_virt;
320 /* Increment bulletin board version and compute crc */
321 p_bulletin->version++;
322 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
323 p_vf->bulletin.size - crc_size);
325 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
326 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
327 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
329 /* propagate bulletin board via dmae to vm memory */
330 memset(&params, 0, sizeof(params));
331 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
332 params.dst_vfid = p_vf->abs_vf_id;
333 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
334 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
335 &params);
338 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
340 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
341 int pos = iov->pos;
343 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
344 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
346 pci_read_config_word(cdev->pdev,
347 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
348 pci_read_config_word(cdev->pdev,
349 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
351 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
352 if (iov->num_vfs) {
353 DP_VERBOSE(cdev,
354 QED_MSG_IOV,
355 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
356 iov->num_vfs = 0;
359 pci_read_config_word(cdev->pdev,
360 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
362 pci_read_config_word(cdev->pdev,
363 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
365 pci_read_config_word(cdev->pdev,
366 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
368 pci_read_config_dword(cdev->pdev,
369 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
371 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
373 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
375 DP_VERBOSE(cdev,
376 QED_MSG_IOV,
377 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
378 iov->nres,
379 iov->cap,
380 iov->ctrl,
381 iov->total_vfs,
382 iov->initial_vfs,
383 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
385 /* Some sanity checks */
386 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
387 iov->total_vfs > NUM_OF_VFS(cdev)) {
388 /* This can happen only due to a bug. In this case we set
389 * num_vfs to zero to avoid memory corruption in the code that
390 * assumes max number of vfs
392 DP_NOTICE(cdev,
393 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
394 iov->num_vfs);
396 iov->num_vfs = 0;
397 iov->total_vfs = 0;
400 return 0;
403 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
405 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
406 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
407 struct qed_bulletin_content *p_bulletin_virt;
408 dma_addr_t req_p, rply_p, bulletin_p;
409 union pfvf_tlvs *p_reply_virt_addr;
410 union vfpf_tlvs *p_req_virt_addr;
411 u8 idx = 0;
413 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
415 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
416 req_p = p_iov_info->mbx_msg_phys_addr;
417 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
418 rply_p = p_iov_info->mbx_reply_phys_addr;
419 p_bulletin_virt = p_iov_info->p_bulletins;
420 bulletin_p = p_iov_info->bulletins_phys;
421 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
422 DP_ERR(p_hwfn,
423 "qed_iov_setup_vfdb called without allocating mem first\n");
424 return;
427 for (idx = 0; idx < p_iov->total_vfs; idx++) {
428 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
429 u32 concrete;
431 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
432 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
433 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
434 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
436 vf->state = VF_STOPPED;
437 vf->b_init = false;
439 vf->bulletin.phys = idx *
440 sizeof(struct qed_bulletin_content) +
441 bulletin_p;
442 vf->bulletin.p_virt = p_bulletin_virt + idx;
443 vf->bulletin.size = sizeof(struct qed_bulletin_content);
445 vf->relative_vf_id = idx;
446 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
447 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
448 vf->concrete_fid = concrete;
449 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
450 (vf->abs_vf_id << 8);
451 vf->vport_id = idx + 1;
453 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
454 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
458 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
460 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
461 void **p_v_addr;
462 u16 num_vfs = 0;
464 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
466 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
467 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
469 /* Allocate PF Mailbox buffer (per-VF) */
470 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
471 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
472 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
473 p_iov_info->mbx_msg_size,
474 &p_iov_info->mbx_msg_phys_addr,
475 GFP_KERNEL);
476 if (!*p_v_addr)
477 return -ENOMEM;
479 /* Allocate PF Mailbox Reply buffer (per-VF) */
480 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
481 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
482 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
483 p_iov_info->mbx_reply_size,
484 &p_iov_info->mbx_reply_phys_addr,
485 GFP_KERNEL);
486 if (!*p_v_addr)
487 return -ENOMEM;
489 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
490 num_vfs;
491 p_v_addr = &p_iov_info->p_bulletins;
492 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
493 p_iov_info->bulletins_size,
494 &p_iov_info->bulletins_phys,
495 GFP_KERNEL);
496 if (!*p_v_addr)
497 return -ENOMEM;
499 DP_VERBOSE(p_hwfn,
500 QED_MSG_IOV,
501 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
502 p_iov_info->mbx_msg_virt_addr,
503 (u64) p_iov_info->mbx_msg_phys_addr,
504 p_iov_info->mbx_reply_virt_addr,
505 (u64) p_iov_info->mbx_reply_phys_addr,
506 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
508 return 0;
511 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
513 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
515 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
516 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
517 p_iov_info->mbx_msg_size,
518 p_iov_info->mbx_msg_virt_addr,
519 p_iov_info->mbx_msg_phys_addr);
521 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
522 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
523 p_iov_info->mbx_reply_size,
524 p_iov_info->mbx_reply_virt_addr,
525 p_iov_info->mbx_reply_phys_addr);
527 if (p_iov_info->p_bulletins)
528 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
529 p_iov_info->bulletins_size,
530 p_iov_info->p_bulletins,
531 p_iov_info->bulletins_phys);
534 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
536 struct qed_pf_iov *p_sriov;
538 if (!IS_PF_SRIOV(p_hwfn)) {
539 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
540 "No SR-IOV - no need for IOV db\n");
541 return 0;
544 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
545 if (!p_sriov)
546 return -ENOMEM;
548 p_hwfn->pf_iov_info = p_sriov;
550 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
551 qed_sriov_eqe_event);
553 return qed_iov_allocate_vfdb(p_hwfn);
556 void qed_iov_setup(struct qed_hwfn *p_hwfn)
558 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
559 return;
561 qed_iov_setup_vfdb(p_hwfn);
564 void qed_iov_free(struct qed_hwfn *p_hwfn)
566 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
568 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
569 qed_iov_free_vfdb(p_hwfn);
570 kfree(p_hwfn->pf_iov_info);
574 void qed_iov_free_hw_info(struct qed_dev *cdev)
576 kfree(cdev->p_iov_info);
577 cdev->p_iov_info = NULL;
580 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
582 struct qed_dev *cdev = p_hwfn->cdev;
583 int pos;
584 int rc;
586 if (is_kdump_kernel())
587 return 0;
589 if (IS_VF(p_hwfn->cdev))
590 return 0;
592 /* Learn the PCI configuration */
593 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
594 PCI_EXT_CAP_ID_SRIOV);
595 if (!pos) {
596 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
597 return 0;
600 /* Allocate a new struct for IOV information */
601 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
602 if (!cdev->p_iov_info)
603 return -ENOMEM;
605 cdev->p_iov_info->pos = pos;
607 rc = qed_iov_pci_cfg_info(cdev);
608 if (rc)
609 return rc;
611 /* We want PF IOV to be synonemous with the existance of p_iov_info;
612 * In case the capability is published but there are no VFs, simply
613 * de-allocate the struct.
615 if (!cdev->p_iov_info->total_vfs) {
616 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
617 "IOV capabilities, but no VFs are published\n");
618 kfree(cdev->p_iov_info);
619 cdev->p_iov_info = NULL;
620 return 0;
623 /* First VF index based on offset is tricky:
624 * - If ARI is supported [likely], offset - (16 - pf_id) would
625 * provide the number for eng0. 2nd engine Vfs would begin
626 * after the first engine's VFs.
627 * - If !ARI, VFs would start on next device.
628 * so offset - (256 - pf_id) would provide the number.
629 * Utilize the fact that (256 - pf_id) is achieved only by later
630 * to differentiate between the two.
633 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
634 u32 first = p_hwfn->cdev->p_iov_info->offset +
635 p_hwfn->abs_pf_id - 16;
637 cdev->p_iov_info->first_vf_in_pf = first;
639 if (QED_PATH_ID(p_hwfn))
640 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
641 } else {
642 u32 first = p_hwfn->cdev->p_iov_info->offset +
643 p_hwfn->abs_pf_id - 256;
645 cdev->p_iov_info->first_vf_in_pf = first;
648 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
649 "First VF in hwfn 0x%08x\n",
650 cdev->p_iov_info->first_vf_in_pf);
652 return 0;
655 static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
656 int vfid, bool b_fail_malicious)
658 /* Check PF supports sriov */
659 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
660 !IS_PF_SRIOV_ALLOC(p_hwfn))
661 return false;
663 /* Check VF validity */
664 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
665 return false;
667 return true;
670 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
672 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
675 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
676 u16 rel_vf_id, u8 to_disable)
678 struct qed_vf_info *vf;
679 int i;
681 for_each_hwfn(cdev, i) {
682 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
684 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
685 if (!vf)
686 continue;
688 vf->to_disable = to_disable;
692 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
694 u16 i;
696 if (!IS_QED_SRIOV(cdev))
697 return;
699 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
700 qed_iov_set_vf_to_disable(cdev, i, to_disable);
703 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
704 struct qed_ptt *p_ptt, u8 abs_vfid)
706 qed_wr(p_hwfn, p_ptt,
707 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
708 1 << (abs_vfid & 0x1f));
711 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
712 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
714 int i;
716 /* Set VF masks and configuration - pretend */
717 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
719 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
721 /* unpretend */
722 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
724 /* iterate over all queues, clear sb consumer */
725 for (i = 0; i < vf->num_sbs; i++)
726 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
727 vf->igu_sbs[i],
728 vf->opaque_fid, true);
731 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
732 struct qed_ptt *p_ptt,
733 struct qed_vf_info *vf, bool enable)
735 u32 igu_vf_conf;
737 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
739 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
741 if (enable)
742 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
743 else
744 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
746 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
748 /* unpretend */
749 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
752 static int
753 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
754 struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
756 u8 current_max = 0;
757 int i;
759 /* For AH onward, configuration is per-PF. Find maximum of all
760 * the currently enabled child VFs, and set the number to be that.
762 if (!QED_IS_BB(p_hwfn->cdev)) {
763 qed_for_each_vf(p_hwfn, i) {
764 struct qed_vf_info *p_vf;
766 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
767 if (!p_vf)
768 continue;
770 current_max = max_t(u8, current_max, p_vf->num_sbs);
774 if (num_sbs > current_max)
775 return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
776 abs_vf_id, num_sbs);
778 return 0;
781 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
782 struct qed_ptt *p_ptt,
783 struct qed_vf_info *vf)
785 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
786 int rc;
788 /* It's possible VF was previously considered malicious -
789 * clear the indication even if we're only going to disable VF.
791 vf->b_malicious = false;
793 if (vf->to_disable)
794 return 0;
796 DP_VERBOSE(p_hwfn,
797 QED_MSG_IOV,
798 "Enable internal access for vf %x [abs %x]\n",
799 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
801 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
803 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
805 rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
806 vf->abs_vf_id, vf->num_sbs);
807 if (rc)
808 return rc;
810 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
812 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
813 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
815 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
816 p_hwfn->hw_info.hw_mode);
818 /* unpretend */
819 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
821 vf->state = VF_FREE;
823 return rc;
827 * qed_iov_config_perm_table() - Configure the permission zone table.
829 * @p_hwfn: HW device data.
830 * @p_ptt: PTT window for writing the registers.
831 * @vf: VF info data.
832 * @enable: The actual permision for this VF.
834 * In E4, queue zone permission table size is 320x9. There
835 * are 320 VF queues for single engine device (256 for dual
836 * engine device), and each entry has the following format:
837 * {Valid, VF[7:0]}
839 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
840 struct qed_ptt *p_ptt,
841 struct qed_vf_info *vf, u8 enable)
843 u32 reg_addr, val;
844 u16 qzone_id = 0;
845 int qid;
847 for (qid = 0; qid < vf->num_rxqs; qid++) {
848 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
849 &qzone_id);
851 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
852 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
853 qed_wr(p_hwfn, p_ptt, reg_addr, val);
857 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
858 struct qed_ptt *p_ptt,
859 struct qed_vf_info *vf)
861 /* Reset vf in IGU - interrupts are still disabled */
862 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
864 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
866 /* Permission Table */
867 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
870 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
871 struct qed_ptt *p_ptt,
872 struct qed_vf_info *vf, u16 num_rx_queues)
874 struct qed_igu_block *p_block;
875 struct cau_sb_entry sb_entry;
876 int qid = 0;
877 u32 val = 0;
879 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
880 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
881 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
883 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
884 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
885 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
887 for (qid = 0; qid < num_rx_queues; qid++) {
888 p_block = qed_get_igu_free_sb(p_hwfn, false);
889 vf->igu_sbs[qid] = p_block->igu_sb_id;
890 p_block->status &= ~QED_IGU_STATUS_FREE;
891 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
893 qed_wr(p_hwfn, p_ptt,
894 IGU_REG_MAPPING_MEMORY +
895 sizeof(u32) * p_block->igu_sb_id, val);
897 /* Configure igu sb in CAU which were marked valid */
898 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
899 p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
901 qed_dmae_host2grc(p_hwfn, p_ptt,
902 (u64)(uintptr_t)&sb_entry,
903 CAU_REG_SB_VAR_MEMORY +
904 p_block->igu_sb_id * sizeof(u64), 2, NULL);
907 vf->num_sbs = (u8) num_rx_queues;
909 return vf->num_sbs;
912 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
913 struct qed_ptt *p_ptt,
914 struct qed_vf_info *vf)
916 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
917 int idx, igu_id;
918 u32 addr, val;
920 /* Invalidate igu CAM lines and mark them as free */
921 for (idx = 0; idx < vf->num_sbs; idx++) {
922 igu_id = vf->igu_sbs[idx];
923 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
925 val = qed_rd(p_hwfn, p_ptt, addr);
926 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
927 qed_wr(p_hwfn, p_ptt, addr, val);
929 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
930 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
933 vf->num_sbs = 0;
936 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
937 u16 vfid,
938 struct qed_mcp_link_params *params,
939 struct qed_mcp_link_state *link,
940 struct qed_mcp_link_capabilities *p_caps)
942 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
943 vfid,
944 false);
945 struct qed_bulletin_content *p_bulletin;
947 if (!p_vf)
948 return;
950 p_bulletin = p_vf->bulletin.p_virt;
951 p_bulletin->req_autoneg = params->speed.autoneg;
952 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
953 p_bulletin->req_forced_speed = params->speed.forced_speed;
954 p_bulletin->req_autoneg_pause = params->pause.autoneg;
955 p_bulletin->req_forced_rx = params->pause.forced_rx;
956 p_bulletin->req_forced_tx = params->pause.forced_tx;
957 p_bulletin->req_loopback = params->loopback_mode;
959 p_bulletin->link_up = link->link_up;
960 p_bulletin->speed = link->speed;
961 p_bulletin->full_duplex = link->full_duplex;
962 p_bulletin->autoneg = link->an;
963 p_bulletin->autoneg_complete = link->an_complete;
964 p_bulletin->parallel_detection = link->parallel_detection;
965 p_bulletin->pfc_enabled = link->pfc_enabled;
966 p_bulletin->partner_adv_speed = link->partner_adv_speed;
967 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
968 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
969 p_bulletin->partner_adv_pause = link->partner_adv_pause;
970 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
972 p_bulletin->capability_speed = p_caps->speed_capabilities;
975 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
976 struct qed_ptt *p_ptt,
977 struct qed_iov_vf_init_params *p_params)
979 struct qed_mcp_link_capabilities link_caps;
980 struct qed_mcp_link_params link_params;
981 struct qed_mcp_link_state link_state;
982 u8 num_of_vf_avaiable_chains = 0;
983 struct qed_vf_info *vf = NULL;
984 u16 qid, num_irqs;
985 int rc = 0;
986 u32 cids;
987 u8 i;
989 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
990 if (!vf) {
991 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
992 return -EINVAL;
995 if (vf->b_init) {
996 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
997 p_params->rel_vf_id);
998 return -EINVAL;
1001 /* Perform sanity checking on the requested queue_id */
1002 for (i = 0; i < p_params->num_queues; i++) {
1003 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1004 u16 max_vf_qzone = min_vf_qzone +
1005 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1007 qid = p_params->req_rx_queue[i];
1008 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1009 DP_NOTICE(p_hwfn,
1010 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1011 qid,
1012 p_params->rel_vf_id,
1013 min_vf_qzone, max_vf_qzone);
1014 return -EINVAL;
1017 qid = p_params->req_tx_queue[i];
1018 if (qid > max_vf_qzone) {
1019 DP_NOTICE(p_hwfn,
1020 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1021 qid, p_params->rel_vf_id, max_vf_qzone);
1022 return -EINVAL;
1025 /* If client *really* wants, Tx qid can be shared with PF */
1026 if (qid < min_vf_qzone)
1027 DP_VERBOSE(p_hwfn,
1028 QED_MSG_IOV,
1029 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1030 p_params->rel_vf_id, qid, i);
1033 /* Limit number of queues according to number of CIDs */
1034 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1035 DP_VERBOSE(p_hwfn,
1036 QED_MSG_IOV,
1037 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1038 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1039 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1041 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1042 p_ptt,
1043 vf, num_irqs);
1044 if (!num_of_vf_avaiable_chains) {
1045 DP_ERR(p_hwfn, "no available igu sbs\n");
1046 return -ENOMEM;
1049 /* Choose queue number and index ranges */
1050 vf->num_rxqs = num_of_vf_avaiable_chains;
1051 vf->num_txqs = num_of_vf_avaiable_chains;
1053 for (i = 0; i < vf->num_rxqs; i++) {
1054 struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1056 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1057 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1059 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1060 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1061 vf->relative_vf_id, i, vf->igu_sbs[i],
1062 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1065 /* Update the link configuration in bulletin */
1066 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1067 sizeof(link_params));
1068 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1069 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1070 sizeof(link_caps));
1071 qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1072 &link_params, &link_state, &link_caps);
1074 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1075 if (!rc) {
1076 vf->b_init = true;
1078 if (IS_LEAD_HWFN(p_hwfn))
1079 p_hwfn->cdev->p_iov_info->num_vfs++;
1082 return rc;
1085 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1086 struct qed_ptt *p_ptt, u16 rel_vf_id)
1088 struct qed_mcp_link_capabilities caps;
1089 struct qed_mcp_link_params params;
1090 struct qed_mcp_link_state link;
1091 struct qed_vf_info *vf = NULL;
1093 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1094 if (!vf) {
1095 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1096 return -EINVAL;
1099 if (vf->bulletin.p_virt)
1100 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1102 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1104 /* Get the link configuration back in bulletin so
1105 * that when VFs are re-enabled they get the actual
1106 * link configuration.
1108 memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1109 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1110 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1111 qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1113 /* Forget the VF's acquisition message */
1114 memset(&vf->acquire, 0, sizeof(vf->acquire));
1116 /* disablng interrupts and resetting permission table was done during
1117 * vf-close, however, we could get here without going through vf_close
1119 /* Disable Interrupts for VF */
1120 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1122 /* Reset Permission table */
1123 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1125 vf->num_rxqs = 0;
1126 vf->num_txqs = 0;
1127 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1129 if (vf->b_init) {
1130 vf->b_init = false;
1132 if (IS_LEAD_HWFN(p_hwfn))
1133 p_hwfn->cdev->p_iov_info->num_vfs--;
1136 return 0;
1139 static bool qed_iov_tlv_supported(u16 tlvtype)
1141 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1144 /* place a given tlv on the tlv buffer, continuing current tlv list */
1145 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1147 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1149 tl->type = type;
1150 tl->length = length;
1152 /* Offset should keep pointing to next TLV (the end of the last) */
1153 *offset += length;
1155 /* Return a pointer to the start of the added tlv */
1156 return *offset - length;
1159 /* list the types and lengths of the tlvs on the buffer */
1160 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1162 u16 i = 1, total_length = 0;
1163 struct channel_tlv *tlv;
1165 do {
1166 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1168 /* output tlv */
1169 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1170 "TLV number %d: type %d, length %d\n",
1171 i, tlv->type, tlv->length);
1173 if (tlv->type == CHANNEL_TLV_LIST_END)
1174 return;
1176 /* Validate entry - protect against malicious VFs */
1177 if (!tlv->length) {
1178 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1179 return;
1182 total_length += tlv->length;
1184 if (total_length >= sizeof(struct tlv_buffer_size)) {
1185 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1186 return;
1189 i++;
1190 } while (1);
1193 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1194 struct qed_ptt *p_ptt,
1195 struct qed_vf_info *p_vf,
1196 u16 length, u8 status)
1198 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1199 struct qed_dmae_params params;
1200 u8 eng_vf_id;
1202 mbx->reply_virt->default_resp.hdr.status = status;
1204 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1206 eng_vf_id = p_vf->abs_vf_id;
1208 memset(&params, 0, sizeof(params));
1209 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
1210 params.dst_vfid = eng_vf_id;
1212 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1213 mbx->req_virt->first_tlv.reply_address +
1214 sizeof(u64),
1215 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1216 &params);
1218 /* Once PF copies the rc to the VF, the latter can continue
1219 * and send an additional message. So we have to make sure the
1220 * channel would be re-set to ready prior to that.
1222 REG_WR(p_hwfn,
1223 GTT_BAR0_MAP_REG_USDM_RAM +
1224 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1226 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1227 mbx->req_virt->first_tlv.reply_address,
1228 sizeof(u64) / 4, &params);
1231 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1232 enum qed_iov_vport_update_flag flag)
1234 switch (flag) {
1235 case QED_IOV_VP_UPDATE_ACTIVATE:
1236 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1237 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1238 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1239 case QED_IOV_VP_UPDATE_TX_SWITCH:
1240 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1241 case QED_IOV_VP_UPDATE_MCAST:
1242 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1243 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1244 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1245 case QED_IOV_VP_UPDATE_RSS:
1246 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1247 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1248 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1249 case QED_IOV_VP_UPDATE_SGE_TPA:
1250 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1251 default:
1252 return 0;
1256 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1257 struct qed_vf_info *p_vf,
1258 struct qed_iov_vf_mbx *p_mbx,
1259 u8 status,
1260 u16 tlvs_mask, u16 tlvs_accepted)
1262 struct pfvf_def_resp_tlv *resp;
1263 u16 size, total_len, i;
1265 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1266 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1267 size = sizeof(struct pfvf_def_resp_tlv);
1268 total_len = size;
1270 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1272 /* Prepare response for all extended tlvs if they are found by PF */
1273 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1274 if (!(tlvs_mask & BIT(i)))
1275 continue;
1277 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1278 qed_iov_vport_to_tlv(p_hwfn, i), size);
1280 if (tlvs_accepted & BIT(i))
1281 resp->hdr.status = status;
1282 else
1283 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1285 DP_VERBOSE(p_hwfn,
1286 QED_MSG_IOV,
1287 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1288 p_vf->relative_vf_id,
1289 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1291 total_len += size;
1294 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1295 sizeof(struct channel_list_end_tlv));
1297 return total_len;
1300 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1301 struct qed_ptt *p_ptt,
1302 struct qed_vf_info *vf_info,
1303 u16 type, u16 length, u8 status)
1305 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1307 mbx->offset = (u8 *)mbx->reply_virt;
1309 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1310 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1311 sizeof(struct channel_list_end_tlv));
1313 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1316 static struct
1317 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1318 u16 relative_vf_id,
1319 bool b_enabled_only)
1321 struct qed_vf_info *vf = NULL;
1323 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1324 if (!vf)
1325 return NULL;
1327 return &vf->p_vf_info;
1330 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1332 struct qed_public_vf_info *vf_info;
1334 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1336 if (!vf_info)
1337 return;
1339 /* Clear the VF mac */
1340 eth_zero_addr(vf_info->mac);
1342 vf_info->rx_accept_mode = 0;
1343 vf_info->tx_accept_mode = 0;
1346 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1347 struct qed_vf_info *p_vf)
1349 u32 i, j;
1351 p_vf->vf_bulletin = 0;
1352 p_vf->vport_instance = 0;
1353 p_vf->configured_features = 0;
1355 /* If VF previously requested less resources, go back to default */
1356 p_vf->num_rxqs = p_vf->num_sbs;
1357 p_vf->num_txqs = p_vf->num_sbs;
1359 p_vf->num_active_rxqs = 0;
1361 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1362 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1364 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1365 if (!p_queue->cids[j].p_cid)
1366 continue;
1368 qed_eth_queue_cid_release(p_hwfn,
1369 p_queue->cids[j].p_cid);
1370 p_queue->cids[j].p_cid = NULL;
1374 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1375 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1376 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1379 /* Returns either 0, or log(size) */
1380 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1381 struct qed_ptt *p_ptt)
1383 u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1385 if (val)
1386 return val + 11;
1387 return 0;
1390 static void
1391 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1392 struct qed_ptt *p_ptt,
1393 struct qed_vf_info *p_vf,
1394 struct vf_pf_resc_request *p_req,
1395 struct pf_vf_resc *p_resp)
1397 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1398 u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1399 qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1400 u32 bar_size;
1402 p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1404 /* If VF didn't bother asking for QIDs than don't bother limiting
1405 * number of CIDs. The VF doesn't care about the number, and this
1406 * has the likely result of causing an additional acquisition.
1408 if (!(p_vf->acquire.vfdev_info.capabilities &
1409 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1410 return;
1412 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1413 * that would make sure doorbells for all CIDs fall within the bar.
1414 * If it doesn't, make sure regview window is sufficient.
1416 if (p_vf->acquire.vfdev_info.capabilities &
1417 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1418 bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1419 if (bar_size)
1420 bar_size = 1 << bar_size;
1422 if (p_hwfn->cdev->num_hwfns > 1)
1423 bar_size /= 2;
1424 } else {
1425 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1428 if (bar_size / db_size < 256)
1429 p_resp->num_cids = min_t(u8, p_resp->num_cids,
1430 (u8)(bar_size / db_size));
1433 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1434 struct qed_ptt *p_ptt,
1435 struct qed_vf_info *p_vf,
1436 struct vf_pf_resc_request *p_req,
1437 struct pf_vf_resc *p_resp)
1439 u8 i;
1441 /* Queue related information */
1442 p_resp->num_rxqs = p_vf->num_rxqs;
1443 p_resp->num_txqs = p_vf->num_txqs;
1444 p_resp->num_sbs = p_vf->num_sbs;
1446 for (i = 0; i < p_resp->num_sbs; i++) {
1447 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1448 p_resp->hw_sbs[i].sb_qid = 0;
1451 /* These fields are filled for backward compatibility.
1452 * Unused by modern vfs.
1454 for (i = 0; i < p_resp->num_rxqs; i++) {
1455 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1456 (u16 *)&p_resp->hw_qid[i]);
1457 p_resp->cid[i] = i;
1460 /* Filter related information */
1461 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1462 p_req->num_mac_filters);
1463 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1464 p_req->num_vlan_filters);
1466 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1468 /* This isn't really needed/enforced, but some legacy VFs might depend
1469 * on the correct filling of this field.
1471 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1473 /* Validate sufficient resources for VF */
1474 if (p_resp->num_rxqs < p_req->num_rxqs ||
1475 p_resp->num_txqs < p_req->num_txqs ||
1476 p_resp->num_sbs < p_req->num_sbs ||
1477 p_resp->num_mac_filters < p_req->num_mac_filters ||
1478 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1479 p_resp->num_mc_filters < p_req->num_mc_filters ||
1480 p_resp->num_cids < p_req->num_cids) {
1481 DP_VERBOSE(p_hwfn,
1482 QED_MSG_IOV,
1483 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1484 p_vf->abs_vf_id,
1485 p_req->num_rxqs,
1486 p_resp->num_rxqs,
1487 p_req->num_rxqs,
1488 p_resp->num_txqs,
1489 p_req->num_sbs,
1490 p_resp->num_sbs,
1491 p_req->num_mac_filters,
1492 p_resp->num_mac_filters,
1493 p_req->num_vlan_filters,
1494 p_resp->num_vlan_filters,
1495 p_req->num_mc_filters,
1496 p_resp->num_mc_filters,
1497 p_req->num_cids, p_resp->num_cids);
1499 /* Some legacy OSes are incapable of correctly handling this
1500 * failure.
1502 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1503 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1504 (p_vf->acquire.vfdev_info.os_type ==
1505 VFPF_ACQUIRE_OS_WINDOWS))
1506 return PFVF_STATUS_SUCCESS;
1508 return PFVF_STATUS_NO_RESOURCE;
1511 return PFVF_STATUS_SUCCESS;
1514 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1515 struct pfvf_stats_info *p_stats)
1517 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1518 offsetof(struct mstorm_vf_zone,
1519 non_trigger.eth_queue_stat);
1520 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1521 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1522 offsetof(struct ustorm_vf_zone,
1523 non_trigger.eth_queue_stat);
1524 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1525 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1526 offsetof(struct pstorm_vf_zone,
1527 non_trigger.eth_queue_stat);
1528 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1529 p_stats->tstats.address = 0;
1530 p_stats->tstats.len = 0;
1533 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1534 struct qed_ptt *p_ptt,
1535 struct qed_vf_info *vf)
1537 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1538 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1539 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1540 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1541 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1542 struct pf_vf_resc *resc = &resp->resc;
1543 int rc;
1545 memset(resp, 0, sizeof(*resp));
1547 /* Write the PF version so that VF would know which version
1548 * is supported - might be later overriden. This guarantees that
1549 * VF could recognize legacy PF based on lack of versions in reply.
1551 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1552 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1554 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1555 DP_VERBOSE(p_hwfn,
1556 QED_MSG_IOV,
1557 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1558 vf->abs_vf_id, vf->state);
1559 goto out;
1562 /* Validate FW compatibility */
1563 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1564 if (req->vfdev_info.capabilities &
1565 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1566 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1568 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1569 "VF[%d] is pre-fastpath HSI\n",
1570 vf->abs_vf_id);
1571 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1572 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1573 } else {
1574 DP_INFO(p_hwfn,
1575 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1576 vf->abs_vf_id,
1577 req->vfdev_info.eth_fp_hsi_major,
1578 req->vfdev_info.eth_fp_hsi_minor,
1579 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1581 goto out;
1585 /* On 100g PFs, prevent old VFs from loading */
1586 if ((p_hwfn->cdev->num_hwfns > 1) &&
1587 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1588 DP_INFO(p_hwfn,
1589 "VF[%d] is running an old driver that doesn't support 100g\n",
1590 vf->abs_vf_id);
1591 goto out;
1594 /* Store the acquire message */
1595 memcpy(&vf->acquire, req, sizeof(vf->acquire));
1597 vf->opaque_fid = req->vfdev_info.opaque_fid;
1599 vf->vf_bulletin = req->bulletin_addr;
1600 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1601 vf->bulletin.size : req->bulletin_size;
1603 /* fill in pfdev info */
1604 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1605 pfdev_info->db_size = 0;
1606 pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1608 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1609 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1610 if (p_hwfn->cdev->num_hwfns > 1)
1611 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1613 /* Share our ability to use multiple queue-ids only with VFs
1614 * that request it.
1616 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1617 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1619 /* Share the sizes of the bars with VF */
1620 resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1622 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1624 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1626 pfdev_info->fw_major = FW_MAJOR_VERSION;
1627 pfdev_info->fw_minor = FW_MINOR_VERSION;
1628 pfdev_info->fw_rev = FW_REVISION_VERSION;
1629 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1631 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1632 * this field.
1634 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1635 req->vfdev_info.eth_fp_hsi_minor);
1636 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1637 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1639 pfdev_info->dev_type = p_hwfn->cdev->type;
1640 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1642 /* Fill resources available to VF; Make sure there are enough to
1643 * satisfy the VF's request.
1645 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1646 &req->resc_request, resc);
1647 if (vfpf_status != PFVF_STATUS_SUCCESS)
1648 goto out;
1650 /* Start the VF in FW */
1651 rc = qed_sp_vf_start(p_hwfn, vf);
1652 if (rc) {
1653 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1654 vfpf_status = PFVF_STATUS_FAILURE;
1655 goto out;
1658 /* Fill agreed size of bulletin board in response */
1659 resp->bulletin_size = vf->bulletin.size;
1660 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1662 DP_VERBOSE(p_hwfn,
1663 QED_MSG_IOV,
1664 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1665 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1666 vf->abs_vf_id,
1667 resp->pfdev_info.chip_num,
1668 resp->pfdev_info.db_size,
1669 resp->pfdev_info.indices_per_sb,
1670 resp->pfdev_info.capabilities,
1671 resc->num_rxqs,
1672 resc->num_txqs,
1673 resc->num_sbs,
1674 resc->num_mac_filters,
1675 resc->num_vlan_filters);
1676 vf->state = VF_ACQUIRED;
1678 /* Prepare Response */
1679 out:
1680 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1681 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1684 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1685 struct qed_vf_info *p_vf, bool val)
1687 struct qed_sp_vport_update_params params;
1688 int rc;
1690 if (val == p_vf->spoof_chk) {
1691 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1692 "Spoofchk value[%d] is already configured\n", val);
1693 return 0;
1696 memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
1697 params.opaque_fid = p_vf->opaque_fid;
1698 params.vport_id = p_vf->vport_id;
1699 params.update_anti_spoofing_en_flg = 1;
1700 params.anti_spoofing_en = val;
1702 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
1703 if (!rc) {
1704 p_vf->spoof_chk = val;
1705 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1706 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1707 "Spoofchk val[%d] configured\n", val);
1708 } else {
1709 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1710 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1711 val, p_vf->relative_vf_id);
1714 return rc;
1717 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1718 struct qed_vf_info *p_vf)
1720 struct qed_filter_ucast filter;
1721 int rc = 0;
1722 int i;
1724 memset(&filter, 0, sizeof(filter));
1725 filter.is_rx_filter = 1;
1726 filter.is_tx_filter = 1;
1727 filter.vport_to_add_to = p_vf->vport_id;
1728 filter.opcode = QED_FILTER_ADD;
1730 /* Reconfigure vlans */
1731 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1732 if (!p_vf->shadow_config.vlans[i].used)
1733 continue;
1735 filter.type = QED_FILTER_VLAN;
1736 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1737 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1738 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1739 filter.vlan, p_vf->relative_vf_id);
1740 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1741 &filter, QED_SPQ_MODE_CB, NULL);
1742 if (rc) {
1743 DP_NOTICE(p_hwfn,
1744 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1745 filter.vlan, p_vf->relative_vf_id);
1746 break;
1750 return rc;
1753 static int
1754 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1755 struct qed_vf_info *p_vf, u64 events)
1757 int rc = 0;
1759 if ((events & BIT(VLAN_ADDR_FORCED)) &&
1760 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1761 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1763 return rc;
1766 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1767 struct qed_vf_info *p_vf, u64 events)
1769 int rc = 0;
1770 struct qed_filter_ucast filter;
1772 if (!p_vf->vport_instance)
1773 return -EINVAL;
1775 if ((events & BIT(MAC_ADDR_FORCED)) ||
1776 p_vf->p_vf_info.is_trusted_configured) {
1777 /* Since there's no way [currently] of removing the MAC,
1778 * we can always assume this means we need to force it.
1780 memset(&filter, 0, sizeof(filter));
1781 filter.type = QED_FILTER_MAC;
1782 filter.opcode = QED_FILTER_REPLACE;
1783 filter.is_rx_filter = 1;
1784 filter.is_tx_filter = 1;
1785 filter.vport_to_add_to = p_vf->vport_id;
1786 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1788 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1789 &filter, QED_SPQ_MODE_CB, NULL);
1790 if (rc) {
1791 DP_NOTICE(p_hwfn,
1792 "PF failed to configure MAC for VF\n");
1793 return rc;
1795 if (p_vf->p_vf_info.is_trusted_configured)
1796 p_vf->configured_features |=
1797 BIT(VFPF_BULLETIN_MAC_ADDR);
1798 else
1799 p_vf->configured_features |=
1800 BIT(MAC_ADDR_FORCED);
1803 if (events & BIT(VLAN_ADDR_FORCED)) {
1804 struct qed_sp_vport_update_params vport_update;
1805 u8 removal;
1806 int i;
1808 memset(&filter, 0, sizeof(filter));
1809 filter.type = QED_FILTER_VLAN;
1810 filter.is_rx_filter = 1;
1811 filter.is_tx_filter = 1;
1812 filter.vport_to_add_to = p_vf->vport_id;
1813 filter.vlan = p_vf->bulletin.p_virt->pvid;
1814 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1815 QED_FILTER_FLUSH;
1817 /* Send the ramrod */
1818 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1819 &filter, QED_SPQ_MODE_CB, NULL);
1820 if (rc) {
1821 DP_NOTICE(p_hwfn,
1822 "PF failed to configure VLAN for VF\n");
1823 return rc;
1826 /* Update the default-vlan & silent vlan stripping */
1827 memset(&vport_update, 0, sizeof(vport_update));
1828 vport_update.opaque_fid = p_vf->opaque_fid;
1829 vport_update.vport_id = p_vf->vport_id;
1830 vport_update.update_default_vlan_enable_flg = 1;
1831 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1832 vport_update.update_default_vlan_flg = 1;
1833 vport_update.default_vlan = filter.vlan;
1835 vport_update.update_inner_vlan_removal_flg = 1;
1836 removal = filter.vlan ? 1
1837 : p_vf->shadow_config.inner_vlan_removal;
1838 vport_update.inner_vlan_removal_flg = removal;
1839 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1840 rc = qed_sp_vport_update(p_hwfn,
1841 &vport_update,
1842 QED_SPQ_MODE_EBLOCK, NULL);
1843 if (rc) {
1844 DP_NOTICE(p_hwfn,
1845 "PF failed to configure VF vport for vlan\n");
1846 return rc;
1849 /* Update all the Rx queues */
1850 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1851 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1852 struct qed_queue_cid *p_cid = NULL;
1854 /* There can be at most 1 Rx queue on qzone. Find it */
1855 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1856 if (!p_cid)
1857 continue;
1859 rc = qed_sp_eth_rx_queues_update(p_hwfn,
1860 (void **)&p_cid,
1861 1, 0, 1,
1862 QED_SPQ_MODE_EBLOCK,
1863 NULL);
1864 if (rc) {
1865 DP_NOTICE(p_hwfn,
1866 "Failed to send Rx update fo queue[0x%04x]\n",
1867 p_cid->rel.queue_id);
1868 return rc;
1872 if (filter.vlan)
1873 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1874 else
1875 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1878 /* If forced features are terminated, we need to configure the shadow
1879 * configuration back again.
1881 if (events)
1882 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1884 return rc;
1887 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1888 struct qed_ptt *p_ptt,
1889 struct qed_vf_info *vf)
1891 struct qed_sp_vport_start_params params = { 0 };
1892 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1893 struct vfpf_vport_start_tlv *start;
1894 u8 status = PFVF_STATUS_SUCCESS;
1895 struct qed_vf_info *vf_info;
1896 u64 *p_bitmap;
1897 int sb_id;
1898 int rc;
1900 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1901 if (!vf_info) {
1902 DP_NOTICE(p_hwfn->cdev,
1903 "Failed to get VF info, invalid vfid [%d]\n",
1904 vf->relative_vf_id);
1905 return;
1908 vf->state = VF_ENABLED;
1909 start = &mbx->req_virt->start_vport;
1911 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1913 /* Initialize Status block in CAU */
1914 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1915 if (!start->sb_addr[sb_id]) {
1916 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1917 "VF[%d] did not fill the address of SB %d\n",
1918 vf->relative_vf_id, sb_id);
1919 break;
1922 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1923 start->sb_addr[sb_id],
1924 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1927 vf->mtu = start->mtu;
1928 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1930 /* Take into consideration configuration forced by hypervisor;
1931 * If none is configured, use the supplied VF values [for old
1932 * vfs that would still be fine, since they passed '0' as padding].
1934 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1935 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1936 u8 vf_req = start->only_untagged;
1938 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1939 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1942 params.tpa_mode = start->tpa_mode;
1943 params.remove_inner_vlan = start->inner_vlan_removal;
1944 params.tx_switching = true;
1946 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1947 params.drop_ttl0 = false;
1948 params.concrete_fid = vf->concrete_fid;
1949 params.opaque_fid = vf->opaque_fid;
1950 params.vport_id = vf->vport_id;
1951 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1952 params.mtu = vf->mtu;
1954 /* Non trusted VFs should enable control frame filtering */
1955 params.check_mac = !vf->p_vf_info.is_trusted_configured;
1957 rc = qed_sp_eth_vport_start(p_hwfn, &params);
1958 if (rc) {
1959 DP_ERR(p_hwfn,
1960 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1961 status = PFVF_STATUS_FAILURE;
1962 } else {
1963 vf->vport_instance++;
1965 /* Force configuration if needed on the newly opened vport */
1966 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1968 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1970 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1971 sizeof(struct pfvf_def_resp_tlv), status);
1974 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1975 struct qed_ptt *p_ptt,
1976 struct qed_vf_info *vf)
1978 u8 status = PFVF_STATUS_SUCCESS;
1979 int rc;
1981 vf->vport_instance--;
1982 vf->spoof_chk = false;
1984 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
1985 (qed_iov_validate_active_txq(p_hwfn, vf))) {
1986 vf->b_malicious = true;
1987 DP_NOTICE(p_hwfn,
1988 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
1989 vf->abs_vf_id);
1990 status = PFVF_STATUS_MALICIOUS;
1991 goto out;
1994 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1995 if (rc) {
1996 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1997 rc);
1998 status = PFVF_STATUS_FAILURE;
2001 /* Forget the configuration on the vport */
2002 vf->configured_features = 0;
2003 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2005 out:
2006 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2007 sizeof(struct pfvf_def_resp_tlv), status);
2010 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2011 struct qed_ptt *p_ptt,
2012 struct qed_vf_info *vf,
2013 u8 status, bool b_legacy)
2015 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2016 struct pfvf_start_queue_resp_tlv *p_tlv;
2017 struct vfpf_start_rxq_tlv *req;
2018 u16 length;
2020 mbx->offset = (u8 *)mbx->reply_virt;
2022 /* Taking a bigger struct instead of adding a TLV to list was a
2023 * mistake, but one which we're now stuck with, as some older
2024 * clients assume the size of the previous response.
2026 if (!b_legacy)
2027 length = sizeof(*p_tlv);
2028 else
2029 length = sizeof(struct pfvf_def_resp_tlv);
2031 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2032 length);
2033 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2034 sizeof(struct channel_list_end_tlv));
2036 /* Update the TLV with the response */
2037 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2038 req = &mbx->req_virt->start_rxq;
2039 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2040 offsetof(struct mstorm_vf_zone,
2041 non_trigger.eth_rx_queue_producers) +
2042 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2045 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2048 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2049 struct qed_vf_info *p_vf, bool b_is_tx)
2051 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2052 struct vfpf_qid_tlv *p_qid_tlv;
2054 /* Search for the qid if the VF published its going to provide it */
2055 if (!(p_vf->acquire.vfdev_info.capabilities &
2056 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2057 if (b_is_tx)
2058 return QED_IOV_LEGACY_QID_TX;
2059 else
2060 return QED_IOV_LEGACY_QID_RX;
2063 p_qid_tlv = (struct vfpf_qid_tlv *)
2064 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2065 CHANNEL_TLV_QID);
2066 if (!p_qid_tlv) {
2067 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2068 "VF[%2x]: Failed to provide qid\n",
2069 p_vf->relative_vf_id);
2071 return QED_IOV_QID_INVALID;
2074 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2075 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2076 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2077 p_vf->relative_vf_id, p_qid_tlv->qid);
2078 return QED_IOV_QID_INVALID;
2081 return p_qid_tlv->qid;
2084 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2085 struct qed_ptt *p_ptt,
2086 struct qed_vf_info *vf)
2088 struct qed_queue_start_common_params params;
2089 struct qed_queue_cid_vf_params vf_params;
2090 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2091 u8 status = PFVF_STATUS_NO_RESOURCE;
2092 u8 qid_usage_idx, vf_legacy = 0;
2093 struct vfpf_start_rxq_tlv *req;
2094 struct qed_vf_queue *p_queue;
2095 struct qed_queue_cid *p_cid;
2096 struct qed_sb_info sb_dummy;
2097 int rc;
2099 req = &mbx->req_virt->start_rxq;
2101 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2102 QED_IOV_VALIDATE_Q_DISABLE) ||
2103 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2104 goto out;
2106 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2107 if (qid_usage_idx == QED_IOV_QID_INVALID)
2108 goto out;
2110 p_queue = &vf->vf_queues[req->rx_qid];
2111 if (p_queue->cids[qid_usage_idx].p_cid)
2112 goto out;
2114 vf_legacy = qed_vf_calculate_legacy(vf);
2116 /* Acquire a new queue-cid */
2117 memset(&params, 0, sizeof(params));
2118 params.queue_id = p_queue->fw_rx_qid;
2119 params.vport_id = vf->vport_id;
2120 params.stats_id = vf->abs_vf_id + 0x10;
2121 /* Since IGU index is passed via sb_info, construct a dummy one */
2122 memset(&sb_dummy, 0, sizeof(sb_dummy));
2123 sb_dummy.igu_sb_id = req->hw_sb;
2124 params.p_sb = &sb_dummy;
2125 params.sb_idx = req->sb_index;
2127 memset(&vf_params, 0, sizeof(vf_params));
2128 vf_params.vfid = vf->relative_vf_id;
2129 vf_params.vf_qid = (u8)req->rx_qid;
2130 vf_params.vf_legacy = vf_legacy;
2131 vf_params.qid_usage_idx = qid_usage_idx;
2132 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2133 &params, true, &vf_params);
2134 if (!p_cid)
2135 goto out;
2137 /* Legacy VFs have their Producers in a different location, which they
2138 * calculate on their own and clean the producer prior to this.
2140 if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2141 REG_WR(p_hwfn,
2142 GTT_BAR0_MAP_REG_MSDM_RAM +
2143 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2146 rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2147 req->bd_max_bytes,
2148 req->rxq_addr,
2149 req->cqe_pbl_addr, req->cqe_pbl_size);
2150 if (rc) {
2151 status = PFVF_STATUS_FAILURE;
2152 qed_eth_queue_cid_release(p_hwfn, p_cid);
2153 } else {
2154 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2155 p_queue->cids[qid_usage_idx].b_is_tx = false;
2156 status = PFVF_STATUS_SUCCESS;
2157 vf->num_active_rxqs++;
2160 out:
2161 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2162 !!(vf_legacy &
2163 QED_QCID_LEGACY_VF_RX_PROD));
2166 static void
2167 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2168 struct qed_tunnel_info *p_tun,
2169 u16 tunn_feature_mask)
2171 p_resp->tunn_feature_mask = tunn_feature_mask;
2172 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2173 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2174 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2175 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2176 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2177 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2178 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2179 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2180 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2181 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2182 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2183 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2186 static void
2187 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2188 struct qed_tunn_update_type *p_tun,
2189 enum qed_tunn_mode mask, u8 tun_cls)
2191 if (p_req->tun_mode_update_mask & BIT(mask)) {
2192 p_tun->b_update_mode = true;
2194 if (p_req->tunn_mode & BIT(mask))
2195 p_tun->b_mode_enabled = true;
2198 p_tun->tun_cls = tun_cls;
2201 static void
2202 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2203 struct qed_tunn_update_type *p_tun,
2204 struct qed_tunn_update_udp_port *p_port,
2205 enum qed_tunn_mode mask,
2206 u8 tun_cls, u8 update_port, u16 port)
2208 if (update_port) {
2209 p_port->b_update_port = true;
2210 p_port->port = port;
2213 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2216 static bool
2217 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2219 bool b_update_requested = false;
2221 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2222 p_req->update_geneve_port || p_req->update_vxlan_port)
2223 b_update_requested = true;
2225 return b_update_requested;
2228 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2230 if (tun->b_update_mode && !tun->b_mode_enabled) {
2231 tun->b_update_mode = false;
2232 *rc = -EINVAL;
2236 static int
2237 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2238 u16 *tun_features, bool *update,
2239 struct qed_tunnel_info *tun_src)
2241 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2242 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2243 u16 bultn_vxlan_port, bultn_geneve_port;
2244 void *cookie = p_hwfn->cdev->ops_cookie;
2245 int i, rc = 0;
2247 *tun_features = p_hwfn->cdev->tunn_feature_mask;
2248 bultn_vxlan_port = tun->vxlan_port.port;
2249 bultn_geneve_port = tun->geneve_port.port;
2250 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2251 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2252 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2253 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2254 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2256 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2257 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2258 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2259 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2260 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2261 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2262 tun_src->b_update_rx_cls = false;
2263 tun_src->b_update_tx_cls = false;
2264 rc = -EINVAL;
2267 if (tun_src->vxlan_port.b_update_port) {
2268 if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2269 tun_src->vxlan_port.b_update_port = false;
2270 } else {
2271 *update = true;
2272 bultn_vxlan_port = tun_src->vxlan_port.port;
2276 if (tun_src->geneve_port.b_update_port) {
2277 if (tun_src->geneve_port.port == tun->geneve_port.port) {
2278 tun_src->geneve_port.b_update_port = false;
2279 } else {
2280 *update = true;
2281 bultn_geneve_port = tun_src->geneve_port.port;
2285 qed_for_each_vf(p_hwfn, i) {
2286 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2287 bultn_geneve_port);
2290 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2291 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2293 return rc;
2296 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2297 struct qed_ptt *p_ptt,
2298 struct qed_vf_info *p_vf)
2300 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2301 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2302 struct pfvf_update_tunn_param_tlv *p_resp;
2303 struct vfpf_update_tunn_param_tlv *p_req;
2304 u8 status = PFVF_STATUS_SUCCESS;
2305 bool b_update_required = false;
2306 struct qed_tunnel_info tunn;
2307 u16 tunn_feature_mask = 0;
2308 int i, rc = 0;
2310 mbx->offset = (u8 *)mbx->reply_virt;
2312 memset(&tunn, 0, sizeof(tunn));
2313 p_req = &mbx->req_virt->tunn_param_update;
2315 if (!qed_iov_pf_validate_tunn_param(p_req)) {
2316 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2317 "No tunnel update requested by VF\n");
2318 status = PFVF_STATUS_FAILURE;
2319 goto send_resp;
2322 tunn.b_update_rx_cls = p_req->update_tun_cls;
2323 tunn.b_update_tx_cls = p_req->update_tun_cls;
2325 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2326 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2327 p_req->update_vxlan_port,
2328 p_req->vxlan_port);
2329 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2330 QED_MODE_L2GENEVE_TUNN,
2331 p_req->l2geneve_clss,
2332 p_req->update_geneve_port,
2333 p_req->geneve_port);
2334 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2335 QED_MODE_IPGENEVE_TUNN,
2336 p_req->ipgeneve_clss);
2337 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2338 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2339 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2340 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2342 /* If PF modifies VF's req then it should
2343 * still return an error in case of partial configuration
2344 * or modified configuration as opposed to requested one.
2346 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2347 &b_update_required, &tunn);
2349 if (rc)
2350 status = PFVF_STATUS_FAILURE;
2352 /* If QED client is willing to update anything ? */
2353 if (b_update_required) {
2354 u16 geneve_port;
2356 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2357 QED_SPQ_MODE_EBLOCK, NULL);
2358 if (rc)
2359 status = PFVF_STATUS_FAILURE;
2361 geneve_port = p_tun->geneve_port.port;
2362 qed_for_each_vf(p_hwfn, i) {
2363 qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2364 p_tun->vxlan_port.port,
2365 geneve_port);
2369 send_resp:
2370 p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2371 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2373 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2374 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2375 sizeof(struct channel_list_end_tlv));
2377 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2380 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2381 struct qed_ptt *p_ptt,
2382 struct qed_vf_info *p_vf,
2383 u32 cid, u8 status)
2385 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2386 struct pfvf_start_queue_resp_tlv *p_tlv;
2387 bool b_legacy = false;
2388 u16 length;
2390 mbx->offset = (u8 *)mbx->reply_virt;
2392 /* Taking a bigger struct instead of adding a TLV to list was a
2393 * mistake, but one which we're now stuck with, as some older
2394 * clients assume the size of the previous response.
2396 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2397 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2398 b_legacy = true;
2400 if (!b_legacy)
2401 length = sizeof(*p_tlv);
2402 else
2403 length = sizeof(struct pfvf_def_resp_tlv);
2405 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2406 length);
2407 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2408 sizeof(struct channel_list_end_tlv));
2410 /* Update the TLV with the response */
2411 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2412 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2414 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2417 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2418 struct qed_ptt *p_ptt,
2419 struct qed_vf_info *vf)
2421 struct qed_queue_start_common_params params;
2422 struct qed_queue_cid_vf_params vf_params;
2423 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2424 u8 status = PFVF_STATUS_NO_RESOURCE;
2425 struct vfpf_start_txq_tlv *req;
2426 struct qed_vf_queue *p_queue;
2427 struct qed_queue_cid *p_cid;
2428 struct qed_sb_info sb_dummy;
2429 u8 qid_usage_idx, vf_legacy;
2430 u32 cid = 0;
2431 int rc;
2432 u16 pq;
2434 memset(&params, 0, sizeof(params));
2435 req = &mbx->req_virt->start_txq;
2437 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2438 QED_IOV_VALIDATE_Q_NA) ||
2439 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2440 goto out;
2442 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2443 if (qid_usage_idx == QED_IOV_QID_INVALID)
2444 goto out;
2446 p_queue = &vf->vf_queues[req->tx_qid];
2447 if (p_queue->cids[qid_usage_idx].p_cid)
2448 goto out;
2450 vf_legacy = qed_vf_calculate_legacy(vf);
2452 /* Acquire a new queue-cid */
2453 params.queue_id = p_queue->fw_tx_qid;
2454 params.vport_id = vf->vport_id;
2455 params.stats_id = vf->abs_vf_id + 0x10;
2457 /* Since IGU index is passed via sb_info, construct a dummy one */
2458 memset(&sb_dummy, 0, sizeof(sb_dummy));
2459 sb_dummy.igu_sb_id = req->hw_sb;
2460 params.p_sb = &sb_dummy;
2461 params.sb_idx = req->sb_index;
2463 memset(&vf_params, 0, sizeof(vf_params));
2464 vf_params.vfid = vf->relative_vf_id;
2465 vf_params.vf_qid = (u8)req->tx_qid;
2466 vf_params.vf_legacy = vf_legacy;
2467 vf_params.qid_usage_idx = qid_usage_idx;
2469 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2470 &params, false, &vf_params);
2471 if (!p_cid)
2472 goto out;
2474 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2475 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2476 req->pbl_addr, req->pbl_size, pq);
2477 if (rc) {
2478 status = PFVF_STATUS_FAILURE;
2479 qed_eth_queue_cid_release(p_hwfn, p_cid);
2480 } else {
2481 status = PFVF_STATUS_SUCCESS;
2482 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2483 p_queue->cids[qid_usage_idx].b_is_tx = true;
2484 cid = p_cid->cid;
2487 out:
2488 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2491 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2492 struct qed_vf_info *vf,
2493 u16 rxq_id,
2494 u8 qid_usage_idx, bool cqe_completion)
2496 struct qed_vf_queue *p_queue;
2497 int rc = 0;
2499 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2500 DP_VERBOSE(p_hwfn,
2501 QED_MSG_IOV,
2502 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2503 vf->relative_vf_id, rxq_id, qid_usage_idx);
2504 return -EINVAL;
2507 p_queue = &vf->vf_queues[rxq_id];
2509 /* We've validated the index and the existence of the active RXQ -
2510 * now we need to make sure that it's using the correct qid.
2512 if (!p_queue->cids[qid_usage_idx].p_cid ||
2513 p_queue->cids[qid_usage_idx].b_is_tx) {
2514 struct qed_queue_cid *p_cid;
2516 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2517 DP_VERBOSE(p_hwfn,
2518 QED_MSG_IOV,
2519 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2520 vf->relative_vf_id,
2521 rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2522 return -EINVAL;
2525 /* Now that we know we have a valid Rx-queue - close it */
2526 rc = qed_eth_rx_queue_stop(p_hwfn,
2527 p_queue->cids[qid_usage_idx].p_cid,
2528 false, cqe_completion);
2529 if (rc)
2530 return rc;
2532 p_queue->cids[qid_usage_idx].p_cid = NULL;
2533 vf->num_active_rxqs--;
2535 return 0;
2538 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2539 struct qed_vf_info *vf,
2540 u16 txq_id, u8 qid_usage_idx)
2542 struct qed_vf_queue *p_queue;
2543 int rc = 0;
2545 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2546 return -EINVAL;
2548 p_queue = &vf->vf_queues[txq_id];
2549 if (!p_queue->cids[qid_usage_idx].p_cid ||
2550 !p_queue->cids[qid_usage_idx].b_is_tx)
2551 return -EINVAL;
2553 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2554 if (rc)
2555 return rc;
2557 p_queue->cids[qid_usage_idx].p_cid = NULL;
2558 return 0;
2561 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2562 struct qed_ptt *p_ptt,
2563 struct qed_vf_info *vf)
2565 u16 length = sizeof(struct pfvf_def_resp_tlv);
2566 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2567 u8 status = PFVF_STATUS_FAILURE;
2568 struct vfpf_stop_rxqs_tlv *req;
2569 u8 qid_usage_idx;
2570 int rc;
2572 /* There has never been an official driver that used this interface
2573 * for stopping multiple queues, and it is now considered deprecated.
2574 * Validate this isn't used here.
2576 req = &mbx->req_virt->stop_rxqs;
2577 if (req->num_rxqs != 1) {
2578 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2579 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2580 vf->relative_vf_id);
2581 status = PFVF_STATUS_NOT_SUPPORTED;
2582 goto out;
2585 /* Find which qid-index is associated with the queue */
2586 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2587 if (qid_usage_idx == QED_IOV_QID_INVALID)
2588 goto out;
2590 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2591 qid_usage_idx, req->cqe_completion);
2592 if (!rc)
2593 status = PFVF_STATUS_SUCCESS;
2594 out:
2595 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2596 length, status);
2599 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2600 struct qed_ptt *p_ptt,
2601 struct qed_vf_info *vf)
2603 u16 length = sizeof(struct pfvf_def_resp_tlv);
2604 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2605 u8 status = PFVF_STATUS_FAILURE;
2606 struct vfpf_stop_txqs_tlv *req;
2607 u8 qid_usage_idx;
2608 int rc;
2610 /* There has never been an official driver that used this interface
2611 * for stopping multiple queues, and it is now considered deprecated.
2612 * Validate this isn't used here.
2614 req = &mbx->req_virt->stop_txqs;
2615 if (req->num_txqs != 1) {
2616 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2617 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2618 vf->relative_vf_id);
2619 status = PFVF_STATUS_NOT_SUPPORTED;
2620 goto out;
2623 /* Find which qid-index is associated with the queue */
2624 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2625 if (qid_usage_idx == QED_IOV_QID_INVALID)
2626 goto out;
2628 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2629 if (!rc)
2630 status = PFVF_STATUS_SUCCESS;
2632 out:
2633 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2634 length, status);
2637 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2638 struct qed_ptt *p_ptt,
2639 struct qed_vf_info *vf)
2641 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2642 u16 length = sizeof(struct pfvf_def_resp_tlv);
2643 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2644 struct vfpf_update_rxq_tlv *req;
2645 u8 status = PFVF_STATUS_FAILURE;
2646 u8 complete_event_flg;
2647 u8 complete_cqe_flg;
2648 u8 qid_usage_idx;
2649 int rc;
2650 u8 i;
2652 req = &mbx->req_virt->update_rxq;
2653 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2654 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2656 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2657 if (qid_usage_idx == QED_IOV_QID_INVALID)
2658 goto out;
2660 /* There shouldn't exist a VF that uses queue-qids yet uses this
2661 * API with multiple Rx queues. Validate this.
2663 if ((vf->acquire.vfdev_info.capabilities &
2664 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2665 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2666 "VF[%d] supports QIDs but sends multiple queues\n",
2667 vf->relative_vf_id);
2668 goto out;
2671 /* Validate inputs - for the legacy case this is still true since
2672 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2674 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2675 if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2676 QED_IOV_VALIDATE_Q_NA) ||
2677 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2678 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2679 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2680 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2681 vf->relative_vf_id, req->rx_qid,
2682 req->num_rxqs);
2683 goto out;
2687 /* Prepare the handlers */
2688 for (i = 0; i < req->num_rxqs; i++) {
2689 u16 qid = req->rx_qid + i;
2691 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2694 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2695 req->num_rxqs,
2696 complete_cqe_flg,
2697 complete_event_flg,
2698 QED_SPQ_MODE_EBLOCK, NULL);
2699 if (rc)
2700 goto out;
2702 status = PFVF_STATUS_SUCCESS;
2703 out:
2704 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2705 length, status);
2708 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2709 void *p_tlvs_list, u16 req_type)
2711 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2712 int len = 0;
2714 do {
2715 if (!p_tlv->length) {
2716 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2717 return NULL;
2720 if (p_tlv->type == req_type) {
2721 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2722 "Extended tlv type %d, length %d found\n",
2723 p_tlv->type, p_tlv->length);
2724 return p_tlv;
2727 len += p_tlv->length;
2728 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2730 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2731 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2732 return NULL;
2734 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2736 return NULL;
2739 static void
2740 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2741 struct qed_sp_vport_update_params *p_data,
2742 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2744 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2745 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2747 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2748 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2749 if (!p_act_tlv)
2750 return;
2752 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2753 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2754 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2755 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2756 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2759 static void
2760 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2761 struct qed_sp_vport_update_params *p_data,
2762 struct qed_vf_info *p_vf,
2763 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2765 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2766 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2768 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2769 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2770 if (!p_vlan_tlv)
2771 return;
2773 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2775 /* Ignore the VF request if we're forcing a vlan */
2776 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2777 p_data->update_inner_vlan_removal_flg = 1;
2778 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2781 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2784 static void
2785 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2786 struct qed_sp_vport_update_params *p_data,
2787 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2789 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2790 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2792 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2793 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2794 tlv);
2795 if (!p_tx_switch_tlv)
2796 return;
2798 p_data->update_tx_switching_flg = 1;
2799 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2800 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2803 static void
2804 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2805 struct qed_sp_vport_update_params *p_data,
2806 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2808 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2809 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2811 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2812 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2813 if (!p_mcast_tlv)
2814 return;
2816 p_data->update_approx_mcast_flg = 1;
2817 memcpy(p_data->bins, p_mcast_tlv->bins,
2818 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2819 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2822 static void
2823 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2824 struct qed_sp_vport_update_params *p_data,
2825 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2827 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2828 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2829 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2831 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2832 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2833 if (!p_accept_tlv)
2834 return;
2836 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2837 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2838 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2839 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2840 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2843 static void
2844 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2845 struct qed_sp_vport_update_params *p_data,
2846 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2848 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2849 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2851 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2852 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2853 tlv);
2854 if (!p_accept_any_vlan)
2855 return;
2857 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2858 p_data->update_accept_any_vlan_flg =
2859 p_accept_any_vlan->update_accept_any_vlan_flg;
2860 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2863 static void
2864 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2865 struct qed_vf_info *vf,
2866 struct qed_sp_vport_update_params *p_data,
2867 struct qed_rss_params *p_rss,
2868 struct qed_iov_vf_mbx *p_mbx,
2869 u16 *tlvs_mask, u16 *tlvs_accepted)
2871 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2872 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2873 bool b_reject = false;
2874 u16 table_size;
2875 u16 i, q_idx;
2877 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2878 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2879 if (!p_rss_tlv) {
2880 p_data->rss_params = NULL;
2881 return;
2884 memset(p_rss, 0, sizeof(struct qed_rss_params));
2886 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2887 VFPF_UPDATE_RSS_CONFIG_FLAG);
2888 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2889 VFPF_UPDATE_RSS_CAPS_FLAG);
2890 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2891 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2892 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2893 VFPF_UPDATE_RSS_KEY_FLAG);
2895 p_rss->rss_enable = p_rss_tlv->rss_enable;
2896 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2897 p_rss->rss_caps = p_rss_tlv->rss_caps;
2898 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2899 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2901 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2902 (1 << p_rss_tlv->rss_table_size_log));
2904 for (i = 0; i < table_size; i++) {
2905 struct qed_queue_cid *p_cid;
2907 q_idx = p_rss_tlv->rss_ind_table[i];
2908 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2909 QED_IOV_VALIDATE_Q_ENABLE)) {
2910 DP_VERBOSE(p_hwfn,
2911 QED_MSG_IOV,
2912 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2913 vf->relative_vf_id, q_idx);
2914 b_reject = true;
2915 goto out;
2918 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2919 p_rss->rss_ind_table[i] = p_cid;
2922 p_data->rss_params = p_rss;
2923 out:
2924 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2925 if (!b_reject)
2926 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2929 static void
2930 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2931 struct qed_vf_info *vf,
2932 struct qed_sp_vport_update_params *p_data,
2933 struct qed_sge_tpa_params *p_sge_tpa,
2934 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2936 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2937 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2939 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2940 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2942 if (!p_sge_tpa_tlv) {
2943 p_data->sge_tpa_params = NULL;
2944 return;
2947 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2949 p_sge_tpa->update_tpa_en_flg =
2950 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2951 p_sge_tpa->update_tpa_param_flg =
2952 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2953 VFPF_UPDATE_TPA_PARAM_FLAG);
2955 p_sge_tpa->tpa_ipv4_en_flg =
2956 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2957 p_sge_tpa->tpa_ipv6_en_flg =
2958 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2959 p_sge_tpa->tpa_pkt_split_flg =
2960 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2961 p_sge_tpa->tpa_hdr_data_split_flg =
2962 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2963 p_sge_tpa->tpa_gro_consistent_flg =
2964 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2966 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2967 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2968 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2969 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2970 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2972 p_data->sge_tpa_params = p_sge_tpa;
2974 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2977 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2978 u8 vfid,
2979 struct qed_sp_vport_update_params *params,
2980 u16 *tlvs)
2982 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2983 struct qed_filter_accept_flags *flags = &params->accept_flags;
2984 struct qed_public_vf_info *vf_info;
2986 /* Untrusted VFs can't even be trusted to know that fact.
2987 * Simply indicate everything is configured fine, and trace
2988 * configuration 'behind their back'.
2990 if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
2991 return 0;
2993 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
2995 if (flags->update_rx_mode_config) {
2996 vf_info->rx_accept_mode = flags->rx_accept_filter;
2997 if (!vf_info->is_trusted_configured)
2998 flags->rx_accept_filter &= ~mask;
3001 if (flags->update_tx_mode_config) {
3002 vf_info->tx_accept_mode = flags->tx_accept_filter;
3003 if (!vf_info->is_trusted_configured)
3004 flags->tx_accept_filter &= ~mask;
3007 return 0;
3010 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3011 struct qed_ptt *p_ptt,
3012 struct qed_vf_info *vf)
3014 struct qed_rss_params *p_rss_params = NULL;
3015 struct qed_sp_vport_update_params params;
3016 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3017 struct qed_sge_tpa_params sge_tpa_params;
3018 u16 tlvs_mask = 0, tlvs_accepted = 0;
3019 u8 status = PFVF_STATUS_SUCCESS;
3020 u16 length;
3021 int rc;
3023 /* Valiate PF can send such a request */
3024 if (!vf->vport_instance) {
3025 DP_VERBOSE(p_hwfn,
3026 QED_MSG_IOV,
3027 "No VPORT instance available for VF[%d], failing vport update\n",
3028 vf->abs_vf_id);
3029 status = PFVF_STATUS_FAILURE;
3030 goto out;
3032 p_rss_params = vzalloc(sizeof(*p_rss_params));
3033 if (p_rss_params == NULL) {
3034 status = PFVF_STATUS_FAILURE;
3035 goto out;
3038 memset(&params, 0, sizeof(params));
3039 params.opaque_fid = vf->opaque_fid;
3040 params.vport_id = vf->vport_id;
3041 params.rss_params = NULL;
3043 /* Search for extended tlvs list and update values
3044 * from VF in struct qed_sp_vport_update_params.
3046 qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
3047 qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
3048 qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
3049 qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
3050 qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
3051 qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
3052 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
3053 &sge_tpa_params, mbx, &tlvs_mask);
3055 tlvs_accepted = tlvs_mask;
3057 /* Some of the extended TLVs need to be validated first; In that case,
3058 * they can update the mask without updating the accepted [so that
3059 * PF could communicate to VF it has rejected request].
3061 qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
3062 mbx, &tlvs_mask, &tlvs_accepted);
3064 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3065 &params, &tlvs_accepted)) {
3066 tlvs_accepted = 0;
3067 status = PFVF_STATUS_NOT_SUPPORTED;
3068 goto out;
3071 if (!tlvs_accepted) {
3072 if (tlvs_mask)
3073 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3074 "Upper-layer prevents VF vport configuration\n");
3075 else
3076 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3077 "No feature tlvs found for vport update\n");
3078 status = PFVF_STATUS_NOT_SUPPORTED;
3079 goto out;
3082 rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
3084 if (rc)
3085 status = PFVF_STATUS_FAILURE;
3087 out:
3088 vfree(p_rss_params);
3089 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3090 tlvs_mask, tlvs_accepted);
3091 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3094 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3095 struct qed_vf_info *p_vf,
3096 struct qed_filter_ucast *p_params)
3098 int i;
3100 /* First remove entries and then add new ones */
3101 if (p_params->opcode == QED_FILTER_REMOVE) {
3102 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3103 if (p_vf->shadow_config.vlans[i].used &&
3104 p_vf->shadow_config.vlans[i].vid ==
3105 p_params->vlan) {
3106 p_vf->shadow_config.vlans[i].used = false;
3107 break;
3109 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3110 DP_VERBOSE(p_hwfn,
3111 QED_MSG_IOV,
3112 "VF [%d] - Tries to remove a non-existing vlan\n",
3113 p_vf->relative_vf_id);
3114 return -EINVAL;
3116 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3117 p_params->opcode == QED_FILTER_FLUSH) {
3118 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3119 p_vf->shadow_config.vlans[i].used = false;
3122 /* In forced mode, we're willing to remove entries - but we don't add
3123 * new ones.
3125 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3126 return 0;
3128 if (p_params->opcode == QED_FILTER_ADD ||
3129 p_params->opcode == QED_FILTER_REPLACE) {
3130 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3131 if (p_vf->shadow_config.vlans[i].used)
3132 continue;
3134 p_vf->shadow_config.vlans[i].used = true;
3135 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3136 break;
3139 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3140 DP_VERBOSE(p_hwfn,
3141 QED_MSG_IOV,
3142 "VF [%d] - Tries to configure more than %d vlan filters\n",
3143 p_vf->relative_vf_id,
3144 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3145 return -EINVAL;
3149 return 0;
3152 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3153 struct qed_vf_info *p_vf,
3154 struct qed_filter_ucast *p_params)
3156 int i;
3158 /* If we're in forced-mode, we don't allow any change */
3159 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3160 return 0;
3162 /* Don't keep track of shadow copy since we don't intend to restore. */
3163 if (p_vf->p_vf_info.is_trusted_configured)
3164 return 0;
3166 /* First remove entries and then add new ones */
3167 if (p_params->opcode == QED_FILTER_REMOVE) {
3168 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3169 if (ether_addr_equal(p_vf->shadow_config.macs[i],
3170 p_params->mac)) {
3171 eth_zero_addr(p_vf->shadow_config.macs[i]);
3172 break;
3176 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3177 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3178 "MAC isn't configured\n");
3179 return -EINVAL;
3181 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3182 p_params->opcode == QED_FILTER_FLUSH) {
3183 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3184 eth_zero_addr(p_vf->shadow_config.macs[i]);
3187 /* List the new MAC address */
3188 if (p_params->opcode != QED_FILTER_ADD &&
3189 p_params->opcode != QED_FILTER_REPLACE)
3190 return 0;
3192 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3193 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3194 ether_addr_copy(p_vf->shadow_config.macs[i],
3195 p_params->mac);
3196 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3197 "Added MAC at %d entry in shadow\n", i);
3198 break;
3202 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3203 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3204 return -EINVAL;
3207 return 0;
3210 static int
3211 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3212 struct qed_vf_info *p_vf,
3213 struct qed_filter_ucast *p_params)
3215 int rc = 0;
3217 if (p_params->type == QED_FILTER_MAC) {
3218 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3219 if (rc)
3220 return rc;
3223 if (p_params->type == QED_FILTER_VLAN)
3224 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3226 return rc;
3229 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3230 int vfid, struct qed_filter_ucast *params)
3232 struct qed_public_vf_info *vf;
3234 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3235 if (!vf)
3236 return -EINVAL;
3238 /* No real decision to make; Store the configured MAC */
3239 if (params->type == QED_FILTER_MAC ||
3240 params->type == QED_FILTER_MAC_VLAN) {
3241 ether_addr_copy(vf->mac, params->mac);
3243 if (vf->is_trusted_configured) {
3244 qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
3246 /* Update and post bulleitin again */
3247 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3251 return 0;
3254 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3255 struct qed_ptt *p_ptt,
3256 struct qed_vf_info *vf)
3258 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3259 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3260 struct vfpf_ucast_filter_tlv *req;
3261 u8 status = PFVF_STATUS_SUCCESS;
3262 struct qed_filter_ucast params;
3263 int rc;
3265 /* Prepare the unicast filter params */
3266 memset(&params, 0, sizeof(struct qed_filter_ucast));
3267 req = &mbx->req_virt->ucast_filter;
3268 params.opcode = (enum qed_filter_opcode)req->opcode;
3269 params.type = (enum qed_filter_ucast_type)req->type;
3271 params.is_rx_filter = 1;
3272 params.is_tx_filter = 1;
3273 params.vport_to_remove_from = vf->vport_id;
3274 params.vport_to_add_to = vf->vport_id;
3275 memcpy(params.mac, req->mac, ETH_ALEN);
3276 params.vlan = req->vlan;
3278 DP_VERBOSE(p_hwfn,
3279 QED_MSG_IOV,
3280 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n",
3281 vf->abs_vf_id, params.opcode, params.type,
3282 params.is_rx_filter ? "RX" : "",
3283 params.is_tx_filter ? "TX" : "",
3284 params.vport_to_add_to,
3285 params.mac, params.vlan);
3287 if (!vf->vport_instance) {
3288 DP_VERBOSE(p_hwfn,
3289 QED_MSG_IOV,
3290 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3291 vf->abs_vf_id);
3292 status = PFVF_STATUS_FAILURE;
3293 goto out;
3296 /* Update shadow copy of the VF configuration */
3297 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
3298 status = PFVF_STATUS_FAILURE;
3299 goto out;
3302 /* Determine if the unicast filtering is acceptible by PF */
3303 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3304 (params.type == QED_FILTER_VLAN ||
3305 params.type == QED_FILTER_MAC_VLAN)) {
3306 /* Once VLAN is forced or PVID is set, do not allow
3307 * to add/replace any further VLANs.
3309 if (params.opcode == QED_FILTER_ADD ||
3310 params.opcode == QED_FILTER_REPLACE)
3311 status = PFVF_STATUS_FORCED;
3312 goto out;
3315 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3316 (params.type == QED_FILTER_MAC ||
3317 params.type == QED_FILTER_MAC_VLAN)) {
3318 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3319 (params.opcode != QED_FILTER_ADD &&
3320 params.opcode != QED_FILTER_REPLACE))
3321 status = PFVF_STATUS_FORCED;
3322 goto out;
3325 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
3326 if (rc) {
3327 status = PFVF_STATUS_FAILURE;
3328 goto out;
3331 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3332 QED_SPQ_MODE_CB, NULL);
3333 if (rc)
3334 status = PFVF_STATUS_FAILURE;
3336 out:
3337 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3338 sizeof(struct pfvf_def_resp_tlv), status);
3341 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3342 struct qed_ptt *p_ptt,
3343 struct qed_vf_info *vf)
3345 int i;
3347 /* Reset the SBs */
3348 for (i = 0; i < vf->num_sbs; i++)
3349 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3350 vf->igu_sbs[i],
3351 vf->opaque_fid, false);
3353 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3354 sizeof(struct pfvf_def_resp_tlv),
3355 PFVF_STATUS_SUCCESS);
3358 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3359 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3361 u16 length = sizeof(struct pfvf_def_resp_tlv);
3362 u8 status = PFVF_STATUS_SUCCESS;
3364 /* Disable Interrupts for VF */
3365 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3367 /* Reset Permission table */
3368 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3370 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3371 length, status);
3374 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3375 struct qed_ptt *p_ptt,
3376 struct qed_vf_info *p_vf)
3378 u16 length = sizeof(struct pfvf_def_resp_tlv);
3379 u8 status = PFVF_STATUS_SUCCESS;
3380 int rc = 0;
3382 qed_iov_vf_cleanup(p_hwfn, p_vf);
3384 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3385 /* Stopping the VF */
3386 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3387 p_vf->opaque_fid);
3389 if (rc) {
3390 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3391 rc);
3392 status = PFVF_STATUS_FAILURE;
3395 p_vf->state = VF_STOPPED;
3398 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3399 length, status);
3402 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3403 struct qed_ptt *p_ptt,
3404 struct qed_vf_info *p_vf)
3406 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3407 struct pfvf_read_coal_resp_tlv *p_resp;
3408 struct vfpf_read_coal_req_tlv *req;
3409 u8 status = PFVF_STATUS_FAILURE;
3410 struct qed_vf_queue *p_queue;
3411 struct qed_queue_cid *p_cid;
3412 u16 coal = 0, qid, i;
3413 bool b_is_rx;
3414 int rc = 0;
3416 mbx->offset = (u8 *)mbx->reply_virt;
3417 req = &mbx->req_virt->read_coal_req;
3419 qid = req->qid;
3420 b_is_rx = req->is_rx ? true : false;
3422 if (b_is_rx) {
3423 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3424 QED_IOV_VALIDATE_Q_ENABLE)) {
3425 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3426 "VF[%d]: Invalid Rx queue_id = %d\n",
3427 p_vf->abs_vf_id, qid);
3428 goto send_resp;
3431 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3432 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3433 if (rc)
3434 goto send_resp;
3435 } else {
3436 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3437 QED_IOV_VALIDATE_Q_ENABLE)) {
3438 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3439 "VF[%d]: Invalid Tx queue_id = %d\n",
3440 p_vf->abs_vf_id, qid);
3441 goto send_resp;
3443 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3444 p_queue = &p_vf->vf_queues[qid];
3445 if ((!p_queue->cids[i].p_cid) ||
3446 (!p_queue->cids[i].b_is_tx))
3447 continue;
3449 p_cid = p_queue->cids[i].p_cid;
3451 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3452 if (rc)
3453 goto send_resp;
3454 break;
3458 status = PFVF_STATUS_SUCCESS;
3460 send_resp:
3461 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3462 sizeof(*p_resp));
3463 p_resp->coal = coal;
3465 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3466 sizeof(struct channel_list_end_tlv));
3468 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3471 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3472 struct qed_ptt *p_ptt,
3473 struct qed_vf_info *vf)
3475 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3476 struct vfpf_update_coalesce *req;
3477 u8 status = PFVF_STATUS_FAILURE;
3478 struct qed_queue_cid *p_cid;
3479 u16 rx_coal, tx_coal;
3480 int rc = 0, i;
3481 u16 qid;
3483 req = &mbx->req_virt->update_coalesce;
3485 rx_coal = req->rx_coal;
3486 tx_coal = req->tx_coal;
3487 qid = req->qid;
3489 if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3490 QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3491 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3492 "VF[%d]: Invalid Rx queue_id = %d\n",
3493 vf->abs_vf_id, qid);
3494 goto out;
3497 if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3498 QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3499 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3500 "VF[%d]: Invalid Tx queue_id = %d\n",
3501 vf->abs_vf_id, qid);
3502 goto out;
3505 DP_VERBOSE(p_hwfn,
3506 QED_MSG_IOV,
3507 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3508 vf->abs_vf_id, rx_coal, tx_coal, qid);
3510 if (rx_coal) {
3511 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3513 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3514 if (rc) {
3515 DP_VERBOSE(p_hwfn,
3516 QED_MSG_IOV,
3517 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3518 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3519 goto out;
3521 vf->rx_coal = rx_coal;
3524 if (tx_coal) {
3525 struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3527 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3528 if (!p_queue->cids[i].p_cid)
3529 continue;
3531 if (!p_queue->cids[i].b_is_tx)
3532 continue;
3534 rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3535 p_queue->cids[i].p_cid);
3537 if (rc) {
3538 DP_VERBOSE(p_hwfn,
3539 QED_MSG_IOV,
3540 "VF[%d]: Unable to set tx queue coalesce\n",
3541 vf->abs_vf_id);
3542 goto out;
3545 vf->tx_coal = tx_coal;
3548 status = PFVF_STATUS_SUCCESS;
3549 out:
3550 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3551 sizeof(struct pfvf_def_resp_tlv), status);
3553 static int
3554 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3555 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3557 int cnt;
3558 u32 val;
3560 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3562 for (cnt = 0; cnt < 50; cnt++) {
3563 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3564 if (!val)
3565 break;
3566 msleep(20);
3568 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3570 if (cnt == 50) {
3571 DP_ERR(p_hwfn,
3572 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3573 p_vf->abs_vf_id, val);
3574 return -EBUSY;
3577 return 0;
3580 static int
3581 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3582 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3584 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3585 int i, cnt;
3587 /* Read initial consumers & producers */
3588 for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3589 u32 prod;
3591 cons[i] = qed_rd(p_hwfn, p_ptt,
3592 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3593 i * 0x40);
3594 prod = qed_rd(p_hwfn, p_ptt,
3595 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3596 i * 0x40);
3597 distance[i] = prod - cons[i];
3600 /* Wait for consumers to pass the producers */
3601 i = 0;
3602 for (cnt = 0; cnt < 50; cnt++) {
3603 for (; i < MAX_NUM_VOQS_E4; i++) {
3604 u32 tmp;
3606 tmp = qed_rd(p_hwfn, p_ptt,
3607 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3608 i * 0x40);
3609 if (distance[i] > tmp - cons[i])
3610 break;
3613 if (i == MAX_NUM_VOQS_E4)
3614 break;
3616 msleep(20);
3619 if (cnt == 50) {
3620 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3621 p_vf->abs_vf_id, i);
3622 return -EBUSY;
3625 return 0;
3628 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3629 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3631 int rc;
3633 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3634 if (rc)
3635 return rc;
3637 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3638 if (rc)
3639 return rc;
3641 return 0;
3644 static int
3645 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3646 struct qed_ptt *p_ptt,
3647 u16 rel_vf_id, u32 *ack_vfs)
3649 struct qed_vf_info *p_vf;
3650 int rc = 0;
3652 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3653 if (!p_vf)
3654 return 0;
3656 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3657 (1ULL << (rel_vf_id % 64))) {
3658 u16 vfid = p_vf->abs_vf_id;
3660 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3661 "VF[%d] - Handling FLR\n", vfid);
3663 qed_iov_vf_cleanup(p_hwfn, p_vf);
3665 /* If VF isn't active, no need for anything but SW */
3666 if (!p_vf->b_init)
3667 goto cleanup;
3669 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3670 if (rc)
3671 goto cleanup;
3673 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3674 if (rc) {
3675 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3676 return rc;
3679 /* Workaround to make VF-PF channel ready, as FW
3680 * doesn't do that as a part of FLR.
3682 REG_WR(p_hwfn,
3683 GTT_BAR0_MAP_REG_USDM_RAM +
3684 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3686 /* VF_STOPPED has to be set only after final cleanup
3687 * but prior to re-enabling the VF.
3689 p_vf->state = VF_STOPPED;
3691 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3692 if (rc) {
3693 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3694 vfid);
3695 return rc;
3697 cleanup:
3698 /* Mark VF for ack and clean pending state */
3699 if (p_vf->state == VF_RESET)
3700 p_vf->state = VF_STOPPED;
3701 ack_vfs[vfid / 32] |= BIT((vfid % 32));
3702 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3703 ~(1ULL << (rel_vf_id % 64));
3704 p_vf->vf_mbx.b_pending_msg = false;
3707 return rc;
3710 static int
3711 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3713 u32 ack_vfs[VF_MAX_STATIC / 32];
3714 int rc = 0;
3715 u16 i;
3717 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3719 /* Since BRB <-> PRS interface can't be tested as part of the flr
3720 * polling due to HW limitations, simply sleep a bit. And since
3721 * there's no need to wait per-vf, do it before looping.
3723 msleep(100);
3725 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3726 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3728 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3729 return rc;
3732 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3734 bool found = false;
3735 u16 i;
3737 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3738 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3739 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3740 "[%08x,...,%08x]: %08x\n",
3741 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3743 if (!p_hwfn->cdev->p_iov_info) {
3744 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3745 return false;
3748 /* Mark VFs */
3749 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3750 struct qed_vf_info *p_vf;
3751 u8 vfid;
3753 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3754 if (!p_vf)
3755 continue;
3757 vfid = p_vf->abs_vf_id;
3758 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3759 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3760 u16 rel_vf_id = p_vf->relative_vf_id;
3762 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3763 "VF[%d] [rel %d] got FLR-ed\n",
3764 vfid, rel_vf_id);
3766 p_vf->state = VF_RESET;
3768 /* No need to lock here, since pending_flr should
3769 * only change here and before ACKing MFw. Since
3770 * MFW will not trigger an additional attention for
3771 * VF flr until ACKs, we're safe.
3773 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3774 found = true;
3778 return found;
3781 static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3782 u16 vfid,
3783 struct qed_mcp_link_params *p_params,
3784 struct qed_mcp_link_state *p_link,
3785 struct qed_mcp_link_capabilities *p_caps)
3787 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3788 vfid,
3789 false);
3790 struct qed_bulletin_content *p_bulletin;
3792 if (!p_vf)
3793 return;
3795 p_bulletin = p_vf->bulletin.p_virt;
3797 if (p_params)
3798 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3799 if (p_link)
3800 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3801 if (p_caps)
3802 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3805 static int
3806 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
3807 struct qed_ptt *p_ptt,
3808 struct qed_vf_info *p_vf)
3810 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
3811 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3812 struct vfpf_bulletin_update_mac_tlv *p_req;
3813 u8 status = PFVF_STATUS_SUCCESS;
3814 int rc = 0;
3816 if (!p_vf->p_vf_info.is_trusted_configured) {
3817 DP_VERBOSE(p_hwfn,
3818 QED_MSG_IOV,
3819 "Blocking bulletin update request from untrusted VF[%d]\n",
3820 p_vf->abs_vf_id);
3821 status = PFVF_STATUS_NOT_SUPPORTED;
3822 rc = -EINVAL;
3823 goto send_status;
3826 p_req = &mbx->req_virt->bulletin_update_mac;
3827 ether_addr_copy(p_bulletin->mac, p_req->mac);
3828 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3829 "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3830 p_vf->abs_vf_id, p_req->mac);
3832 send_status:
3833 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3834 CHANNEL_TLV_BULLETIN_UPDATE_MAC,
3835 sizeof(struct pfvf_def_resp_tlv), status);
3836 return rc;
3839 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3840 struct qed_ptt *p_ptt, int vfid)
3842 struct qed_iov_vf_mbx *mbx;
3843 struct qed_vf_info *p_vf;
3845 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3846 if (!p_vf)
3847 return;
3849 mbx = &p_vf->vf_mbx;
3851 /* qed_iov_process_mbx_request */
3852 if (!mbx->b_pending_msg) {
3853 DP_NOTICE(p_hwfn,
3854 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3855 p_vf->abs_vf_id);
3856 return;
3858 mbx->b_pending_msg = false;
3860 mbx->first_tlv = mbx->req_virt->first_tlv;
3862 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3863 "VF[%02x]: Processing mailbox message [type %04x]\n",
3864 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3866 /* check if tlv type is known */
3867 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3868 !p_vf->b_malicious) {
3869 switch (mbx->first_tlv.tl.type) {
3870 case CHANNEL_TLV_ACQUIRE:
3871 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3872 break;
3873 case CHANNEL_TLV_VPORT_START:
3874 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3875 break;
3876 case CHANNEL_TLV_VPORT_TEARDOWN:
3877 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3878 break;
3879 case CHANNEL_TLV_START_RXQ:
3880 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3881 break;
3882 case CHANNEL_TLV_START_TXQ:
3883 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3884 break;
3885 case CHANNEL_TLV_STOP_RXQS:
3886 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3887 break;
3888 case CHANNEL_TLV_STOP_TXQS:
3889 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3890 break;
3891 case CHANNEL_TLV_UPDATE_RXQ:
3892 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3893 break;
3894 case CHANNEL_TLV_VPORT_UPDATE:
3895 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3896 break;
3897 case CHANNEL_TLV_UCAST_FILTER:
3898 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3899 break;
3900 case CHANNEL_TLV_CLOSE:
3901 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3902 break;
3903 case CHANNEL_TLV_INT_CLEANUP:
3904 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3905 break;
3906 case CHANNEL_TLV_RELEASE:
3907 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3908 break;
3909 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3910 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3911 break;
3912 case CHANNEL_TLV_COALESCE_UPDATE:
3913 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3914 break;
3915 case CHANNEL_TLV_COALESCE_READ:
3916 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3917 break;
3918 case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
3919 qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
3920 break;
3922 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3923 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3924 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3925 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3927 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3928 mbx->first_tlv.tl.type,
3929 sizeof(struct pfvf_def_resp_tlv),
3930 PFVF_STATUS_MALICIOUS);
3931 } else {
3932 /* unknown TLV - this may belong to a VF driver from the future
3933 * - a version written after this PF driver was written, which
3934 * supports features unknown as of yet. Too bad since we don't
3935 * support them. Or this may be because someone wrote a crappy
3936 * VF driver and is sending garbage over the channel.
3938 DP_NOTICE(p_hwfn,
3939 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3940 p_vf->abs_vf_id,
3941 mbx->first_tlv.tl.type,
3942 mbx->first_tlv.tl.length,
3943 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3945 /* Try replying in case reply address matches the acquisition's
3946 * posted address.
3948 if (p_vf->acquire.first_tlv.reply_address &&
3949 (mbx->first_tlv.reply_address ==
3950 p_vf->acquire.first_tlv.reply_address)) {
3951 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3952 mbx->first_tlv.tl.type,
3953 sizeof(struct pfvf_def_resp_tlv),
3954 PFVF_STATUS_NOT_SUPPORTED);
3955 } else {
3956 DP_VERBOSE(p_hwfn,
3957 QED_MSG_IOV,
3958 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3959 p_vf->abs_vf_id);
3964 static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3966 int i;
3968 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3970 qed_for_each_vf(p_hwfn, i) {
3971 struct qed_vf_info *p_vf;
3973 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3974 if (p_vf->vf_mbx.b_pending_msg)
3975 events[i / 64] |= 1ULL << (i % 64);
3979 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3980 u16 abs_vfid)
3982 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3984 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3985 DP_VERBOSE(p_hwfn,
3986 QED_MSG_IOV,
3987 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3988 abs_vfid);
3989 return NULL;
3992 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
3995 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3996 u16 abs_vfid, struct regpair *vf_msg)
3998 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
3999 abs_vfid);
4001 if (!p_vf)
4002 return 0;
4004 /* List the physical address of the request so that handler
4005 * could later on copy the message from it.
4007 p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo);
4009 /* Mark the event and schedule the workqueue */
4010 p_vf->vf_mbx.b_pending_msg = true;
4011 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
4013 return 0;
4016 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
4017 struct malicious_vf_eqe_data *p_data)
4019 struct qed_vf_info *p_vf;
4021 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4023 if (!p_vf)
4024 return;
4026 if (!p_vf->b_malicious) {
4027 DP_NOTICE(p_hwfn,
4028 "VF [%d] - Malicious behavior [%02x]\n",
4029 p_vf->abs_vf_id, p_data->err_id);
4031 p_vf->b_malicious = true;
4032 } else {
4033 DP_INFO(p_hwfn,
4034 "VF [%d] - Malicious behavior [%02x]\n",
4035 p_vf->abs_vf_id, p_data->err_id);
4039 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
4040 union event_ring_data *data, u8 fw_return_code)
4042 switch (opcode) {
4043 case COMMON_EVENT_VF_PF_CHANNEL:
4044 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4045 &data->vf_pf_channel.msg_addr);
4046 case COMMON_EVENT_MALICIOUS_VF:
4047 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4048 return 0;
4049 default:
4050 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4051 opcode);
4052 return -EINVAL;
4056 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4058 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4059 u16 i;
4061 if (!p_iov)
4062 goto out;
4064 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4065 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4066 return i;
4068 out:
4069 return MAX_NUM_VFS;
4072 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4073 int vfid)
4075 struct qed_dmae_params params;
4076 struct qed_vf_info *vf_info;
4078 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4079 if (!vf_info)
4080 return -EINVAL;
4082 memset(&params, 0, sizeof(params));
4083 SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
4084 SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
4085 params.src_vfid = vf_info->abs_vf_id;
4087 if (qed_dmae_host2host(p_hwfn, ptt,
4088 vf_info->vf_mbx.pending_req,
4089 vf_info->vf_mbx.req_phys,
4090 sizeof(union vfpf_tlvs) / 4, &params)) {
4091 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4092 "Failed to copy message from VF 0x%02x\n", vfid);
4094 return -EIO;
4097 return 0;
4100 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4101 u8 *mac, int vfid)
4103 struct qed_vf_info *vf_info;
4104 u64 feature;
4106 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4107 if (!vf_info) {
4108 DP_NOTICE(p_hwfn->cdev,
4109 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4110 return;
4113 if (vf_info->b_malicious) {
4114 DP_NOTICE(p_hwfn->cdev,
4115 "Can't set forced MAC to malicious VF [%d]\n", vfid);
4116 return;
4119 if (vf_info->p_vf_info.is_trusted_configured) {
4120 feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4121 /* Trust mode will disable Forced MAC */
4122 vf_info->bulletin.p_virt->valid_bitmap &=
4123 ~BIT(MAC_ADDR_FORCED);
4124 } else {
4125 feature = BIT(MAC_ADDR_FORCED);
4126 /* Forced MAC will disable MAC_ADDR */
4127 vf_info->bulletin.p_virt->valid_bitmap &=
4128 ~BIT(VFPF_BULLETIN_MAC_ADDR);
4131 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4133 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4135 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4138 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
4140 struct qed_vf_info *vf_info;
4141 u64 feature;
4143 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4144 if (!vf_info) {
4145 DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
4146 vfid);
4147 return -EINVAL;
4150 if (vf_info->b_malicious) {
4151 DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
4152 vfid);
4153 return -EINVAL;
4156 if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
4157 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4158 "Can not set MAC, Forced MAC is configured\n");
4159 return -EINVAL;
4162 feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4163 ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
4165 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4167 if (vf_info->p_vf_info.is_trusted_configured)
4168 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4170 return 0;
4173 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4174 u16 pvid, int vfid)
4176 struct qed_vf_info *vf_info;
4177 u64 feature;
4179 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4180 if (!vf_info) {
4181 DP_NOTICE(p_hwfn->cdev,
4182 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4183 return;
4186 if (vf_info->b_malicious) {
4187 DP_NOTICE(p_hwfn->cdev,
4188 "Can't set forced vlan to malicious VF [%d]\n", vfid);
4189 return;
4192 feature = 1 << VLAN_ADDR_FORCED;
4193 vf_info->bulletin.p_virt->pvid = pvid;
4194 if (pvid)
4195 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4196 else
4197 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4199 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4202 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4203 int vfid, u16 vxlan_port, u16 geneve_port)
4205 struct qed_vf_info *vf_info;
4207 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4208 if (!vf_info) {
4209 DP_NOTICE(p_hwfn->cdev,
4210 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4211 return;
4214 if (vf_info->b_malicious) {
4215 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4216 "Can not set udp ports to malicious VF [%d]\n",
4217 vfid);
4218 return;
4221 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4222 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4225 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4227 struct qed_vf_info *p_vf_info;
4229 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4230 if (!p_vf_info)
4231 return false;
4233 return !!p_vf_info->vport_instance;
4236 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4238 struct qed_vf_info *p_vf_info;
4240 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4241 if (!p_vf_info)
4242 return true;
4244 return p_vf_info->state == VF_STOPPED;
4247 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4249 struct qed_vf_info *vf_info;
4251 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4252 if (!vf_info)
4253 return false;
4255 return vf_info->spoof_chk;
4258 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4260 struct qed_vf_info *vf;
4261 int rc = -EINVAL;
4263 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4264 DP_NOTICE(p_hwfn,
4265 "SR-IOV sanity check failed, can't set spoofchk\n");
4266 goto out;
4269 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4270 if (!vf)
4271 goto out;
4273 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4274 /* After VF VPORT start PF will configure spoof check */
4275 vf->req_spoofchk_val = val;
4276 rc = 0;
4277 goto out;
4280 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4282 out:
4283 return rc;
4286 static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4288 struct qed_vf_info *p_vf;
4290 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4291 if (!p_vf || !p_vf->bulletin.p_virt)
4292 return NULL;
4294 if (!(p_vf->bulletin.p_virt->valid_bitmap &
4295 BIT(VFPF_BULLETIN_MAC_ADDR)))
4296 return NULL;
4298 return p_vf->bulletin.p_virt->mac;
4301 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4302 u16 rel_vf_id)
4304 struct qed_vf_info *p_vf;
4306 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4307 if (!p_vf || !p_vf->bulletin.p_virt)
4308 return NULL;
4310 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4311 return NULL;
4313 return p_vf->bulletin.p_virt->mac;
4316 static u16
4317 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4319 struct qed_vf_info *p_vf;
4321 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4322 if (!p_vf || !p_vf->bulletin.p_virt)
4323 return 0;
4325 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4326 return 0;
4328 return p_vf->bulletin.p_virt->pvid;
4331 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4332 struct qed_ptt *p_ptt, int vfid, int val)
4334 struct qed_vf_info *vf;
4335 u8 abs_vp_id = 0;
4336 u16 rl_id;
4337 int rc;
4339 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4340 if (!vf)
4341 return -EINVAL;
4343 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4344 if (rc)
4345 return rc;
4347 rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
4348 return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4351 static int
4352 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4354 struct qed_vf_info *vf;
4355 u8 vport_id;
4356 int i;
4358 for_each_hwfn(cdev, i) {
4359 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4361 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4362 DP_NOTICE(p_hwfn,
4363 "SR-IOV sanity check failed, can't set min rate\n");
4364 return -EINVAL;
4368 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4369 vport_id = vf->vport_id;
4371 return qed_configure_vport_wfq(cdev, vport_id, rate);
4374 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4376 struct qed_wfq_data *vf_vp_wfq;
4377 struct qed_vf_info *vf_info;
4379 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4380 if (!vf_info)
4381 return 0;
4383 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4385 if (vf_vp_wfq->configured)
4386 return vf_vp_wfq->min_speed;
4387 else
4388 return 0;
4392 * qed_schedule_iov - schedules IOV task for VF and PF
4393 * @hwfn: hardware function pointer
4394 * @flag: IOV flag for VF/PF
4396 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4398 smp_mb__before_atomic();
4399 set_bit(flag, &hwfn->iov_task_flags);
4400 smp_mb__after_atomic();
4401 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4402 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4405 void qed_vf_start_iov_wq(struct qed_dev *cdev)
4407 int i;
4409 for_each_hwfn(cdev, i)
4410 queue_delayed_work(cdev->hwfns[i].iov_wq,
4411 &cdev->hwfns[i].iov_task, 0);
4414 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4416 int i, j;
4418 for_each_hwfn(cdev, i)
4419 if (cdev->hwfns[i].iov_wq)
4420 flush_workqueue(cdev->hwfns[i].iov_wq);
4422 /* Mark VFs for disablement */
4423 qed_iov_set_vfs_to_disable(cdev, true);
4425 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4426 pci_disable_sriov(cdev->pdev);
4428 if (cdev->recov_in_prog) {
4429 DP_VERBOSE(cdev,
4430 QED_MSG_IOV,
4431 "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4432 goto out;
4435 for_each_hwfn(cdev, i) {
4436 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4437 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4439 /* Failure to acquire the ptt in 100g creates an odd error
4440 * where the first engine has already relased IOV.
4442 if (!ptt) {
4443 DP_ERR(hwfn, "Failed to acquire ptt\n");
4444 return -EBUSY;
4447 /* Clean WFQ db and configure equal weight for all vports */
4448 qed_clean_wfq_db(hwfn, ptt);
4450 qed_for_each_vf(hwfn, j) {
4451 int k;
4453 if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4454 continue;
4456 /* Wait until VF is disabled before releasing */
4457 for (k = 0; k < 100; k++) {
4458 if (!qed_iov_is_vf_stopped(hwfn, j))
4459 msleep(20);
4460 else
4461 break;
4464 if (k < 100)
4465 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4466 ptt, j);
4467 else
4468 DP_ERR(hwfn,
4469 "Timeout waiting for VF's FLR to end\n");
4472 qed_ptt_release(hwfn, ptt);
4474 out:
4475 qed_iov_set_vfs_to_disable(cdev, false);
4477 return 0;
4480 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4481 u16 vfid,
4482 struct qed_iov_vf_init_params *params)
4484 u16 base, i;
4486 /* Since we have an equal resource distribution per-VF, and we assume
4487 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4488 * sequentially from there.
4490 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4492 params->rel_vf_id = vfid;
4493 for (i = 0; i < params->num_queues; i++) {
4494 params->req_rx_queue[i] = base + i;
4495 params->req_tx_queue[i] = base + i;
4499 static int qed_sriov_enable(struct qed_dev *cdev, int num)
4501 struct qed_iov_vf_init_params params;
4502 struct qed_hwfn *hwfn;
4503 struct qed_ptt *ptt;
4504 int i, j, rc;
4506 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4507 DP_NOTICE(cdev, "Can start at most %d VFs\n",
4508 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4509 return -EINVAL;
4512 memset(&params, 0, sizeof(params));
4514 /* Initialize HW for VF access */
4515 for_each_hwfn(cdev, j) {
4516 hwfn = &cdev->hwfns[j];
4517 ptt = qed_ptt_acquire(hwfn);
4519 /* Make sure not to use more than 16 queues per VF */
4520 params.num_queues = min_t(int,
4521 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4522 16);
4524 if (!ptt) {
4525 DP_ERR(hwfn, "Failed to acquire ptt\n");
4526 rc = -EBUSY;
4527 goto err;
4530 for (i = 0; i < num; i++) {
4531 if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4532 continue;
4534 qed_sriov_enable_qid_config(hwfn, i, &params);
4535 rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
4536 if (rc) {
4537 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4538 qed_ptt_release(hwfn, ptt);
4539 goto err;
4543 qed_ptt_release(hwfn, ptt);
4546 /* Enable SRIOV PCIe functions */
4547 rc = pci_enable_sriov(cdev->pdev, num);
4548 if (rc) {
4549 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4550 goto err;
4553 hwfn = QED_LEADING_HWFN(cdev);
4554 ptt = qed_ptt_acquire(hwfn);
4555 if (!ptt) {
4556 DP_ERR(hwfn, "Failed to acquire ptt\n");
4557 rc = -EBUSY;
4558 goto err;
4561 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4562 if (rc)
4563 DP_INFO(cdev, "Failed to update eswitch mode\n");
4564 qed_ptt_release(hwfn, ptt);
4566 return num;
4568 err:
4569 qed_sriov_disable(cdev, false);
4570 return rc;
4573 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4575 if (!IS_QED_SRIOV(cdev)) {
4576 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4577 return -EOPNOTSUPP;
4580 if (num_vfs_param)
4581 return qed_sriov_enable(cdev, num_vfs_param);
4582 else
4583 return qed_sriov_disable(cdev, true);
4586 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4588 int i;
4590 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4591 DP_VERBOSE(cdev, QED_MSG_IOV,
4592 "Cannot set a VF MAC; Sriov is not enabled\n");
4593 return -EINVAL;
4596 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4597 DP_VERBOSE(cdev, QED_MSG_IOV,
4598 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4599 return -EINVAL;
4602 for_each_hwfn(cdev, i) {
4603 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4604 struct qed_public_vf_info *vf_info;
4606 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4607 if (!vf_info)
4608 continue;
4610 /* Set the MAC, and schedule the IOV task */
4611 if (vf_info->is_trusted_configured)
4612 ether_addr_copy(vf_info->mac, mac);
4613 else
4614 ether_addr_copy(vf_info->forced_mac, mac);
4616 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4619 return 0;
4622 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4624 int i;
4626 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4627 DP_VERBOSE(cdev, QED_MSG_IOV,
4628 "Cannot set a VF MAC; Sriov is not enabled\n");
4629 return -EINVAL;
4632 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4633 DP_VERBOSE(cdev, QED_MSG_IOV,
4634 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4635 return -EINVAL;
4638 for_each_hwfn(cdev, i) {
4639 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4640 struct qed_public_vf_info *vf_info;
4642 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4643 if (!vf_info)
4644 continue;
4646 /* Set the forced vlan, and schedule the IOV task */
4647 vf_info->forced_vlan = vid;
4648 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4651 return 0;
4654 static int qed_get_vf_config(struct qed_dev *cdev,
4655 int vf_id, struct ifla_vf_info *ivi)
4657 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4658 struct qed_public_vf_info *vf_info;
4659 struct qed_mcp_link_state link;
4660 u32 tx_rate;
4662 /* Sanitize request */
4663 if (IS_VF(cdev))
4664 return -EINVAL;
4666 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4667 DP_VERBOSE(cdev, QED_MSG_IOV,
4668 "VF index [%d] isn't active\n", vf_id);
4669 return -EINVAL;
4672 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4674 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4676 /* Fill information about VF */
4677 ivi->vf = vf_id;
4679 if (is_valid_ether_addr(vf_info->forced_mac))
4680 ether_addr_copy(ivi->mac, vf_info->forced_mac);
4681 else
4682 ether_addr_copy(ivi->mac, vf_info->mac);
4684 ivi->vlan = vf_info->forced_vlan;
4685 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4686 ivi->linkstate = vf_info->link_state;
4687 tx_rate = vf_info->tx_rate;
4688 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4689 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4691 return 0;
4694 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4696 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4697 struct qed_mcp_link_capabilities caps;
4698 struct qed_mcp_link_params params;
4699 struct qed_mcp_link_state link;
4700 int i;
4702 if (!hwfn->pf_iov_info)
4703 return;
4705 /* Update bulletin of all future possible VFs with link configuration */
4706 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4707 struct qed_public_vf_info *vf_info;
4709 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4710 if (!vf_info)
4711 continue;
4713 /* Only hwfn0 is actually interested in the link speed.
4714 * But since only it would receive an MFW indication of link,
4715 * need to take configuration from it - otherwise things like
4716 * rate limiting for hwfn1 VF would not work.
4718 memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
4719 sizeof(params));
4720 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4721 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4722 sizeof(caps));
4724 /* Modify link according to the VF's configured link state */
4725 switch (vf_info->link_state) {
4726 case IFLA_VF_LINK_STATE_DISABLE:
4727 link.link_up = false;
4728 break;
4729 case IFLA_VF_LINK_STATE_ENABLE:
4730 link.link_up = true;
4731 /* Set speed according to maximum supported by HW.
4732 * that is 40G for regular devices and 100G for CMT
4733 * mode devices.
4735 link.speed = (hwfn->cdev->num_hwfns > 1) ?
4736 100000 : 40000;
4737 default:
4738 /* In auto mode pass PF link image to VF */
4739 break;
4742 if (link.link_up && vf_info->tx_rate) {
4743 struct qed_ptt *ptt;
4744 int rate;
4746 rate = min_t(int, vf_info->tx_rate, link.speed);
4748 ptt = qed_ptt_acquire(hwfn);
4749 if (!ptt) {
4750 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4751 return;
4754 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4755 vf_info->tx_rate = rate;
4756 link.speed = rate;
4759 qed_ptt_release(hwfn, ptt);
4762 qed_iov_set_link(hwfn, i, &params, &link, &caps);
4765 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4768 static int qed_set_vf_link_state(struct qed_dev *cdev,
4769 int vf_id, int link_state)
4771 int i;
4773 /* Sanitize request */
4774 if (IS_VF(cdev))
4775 return -EINVAL;
4777 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4778 DP_VERBOSE(cdev, QED_MSG_IOV,
4779 "VF index [%d] isn't active\n", vf_id);
4780 return -EINVAL;
4783 /* Handle configuration of link state */
4784 for_each_hwfn(cdev, i) {
4785 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4786 struct qed_public_vf_info *vf;
4788 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4789 if (!vf)
4790 continue;
4792 if (vf->link_state == link_state)
4793 continue;
4795 vf->link_state = link_state;
4796 qed_inform_vf_link_state(&cdev->hwfns[i]);
4799 return 0;
4802 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4804 int i, rc = -EINVAL;
4806 for_each_hwfn(cdev, i) {
4807 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4809 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4810 if (rc)
4811 break;
4814 return rc;
4817 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4819 int i;
4821 for_each_hwfn(cdev, i) {
4822 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4823 struct qed_public_vf_info *vf;
4825 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4826 DP_NOTICE(p_hwfn,
4827 "SR-IOV sanity check failed, can't set tx rate\n");
4828 return -EINVAL;
4831 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4833 vf->tx_rate = rate;
4835 qed_inform_vf_link_state(p_hwfn);
4838 return 0;
4841 static int qed_set_vf_rate(struct qed_dev *cdev,
4842 int vfid, u32 min_rate, u32 max_rate)
4844 int rc_min = 0, rc_max = 0;
4846 if (max_rate)
4847 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4849 if (min_rate)
4850 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4852 if (rc_max | rc_min)
4853 return -EINVAL;
4855 return 0;
4858 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4860 int i;
4862 for_each_hwfn(cdev, i) {
4863 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4864 struct qed_public_vf_info *vf;
4866 if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4867 DP_NOTICE(hwfn,
4868 "SR-IOV sanity check failed, can't set trust\n");
4869 return -EINVAL;
4872 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4874 if (vf->is_trusted_request == trust)
4875 return 0;
4876 vf->is_trusted_request = trust;
4878 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4881 return 0;
4884 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4886 u64 events[QED_VF_ARRAY_LENGTH];
4887 struct qed_ptt *ptt;
4888 int i;
4890 ptt = qed_ptt_acquire(hwfn);
4891 if (!ptt) {
4892 DP_VERBOSE(hwfn, QED_MSG_IOV,
4893 "Can't acquire PTT; re-scheduling\n");
4894 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4895 return;
4898 qed_iov_pf_get_pending_events(hwfn, events);
4900 DP_VERBOSE(hwfn, QED_MSG_IOV,
4901 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4902 events[0], events[1], events[2]);
4904 qed_for_each_vf(hwfn, i) {
4905 /* Skip VFs with no pending messages */
4906 if (!(events[i / 64] & (1ULL << (i % 64))))
4907 continue;
4909 DP_VERBOSE(hwfn, QED_MSG_IOV,
4910 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4911 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4913 /* Copy VF's message to PF's request buffer for that VF */
4914 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4915 continue;
4917 qed_iov_process_mbx_req(hwfn, ptt, i);
4920 qed_ptt_release(hwfn, ptt);
4923 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
4924 u8 *mac,
4925 struct qed_public_vf_info *info)
4927 if (info->is_trusted_configured) {
4928 if (is_valid_ether_addr(info->mac) &&
4929 (!mac || !ether_addr_equal(mac, info->mac)))
4930 return true;
4931 } else {
4932 if (is_valid_ether_addr(info->forced_mac) &&
4933 (!mac || !ether_addr_equal(mac, info->forced_mac)))
4934 return true;
4937 return false;
4940 static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
4941 struct qed_public_vf_info *info,
4942 int vfid)
4944 if (info->is_trusted_configured)
4945 qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
4946 else
4947 qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
4950 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4952 int i;
4954 qed_for_each_vf(hwfn, i) {
4955 struct qed_public_vf_info *info;
4956 bool update = false;
4957 u8 *mac;
4959 info = qed_iov_get_public_vf_info(hwfn, i, true);
4960 if (!info)
4961 continue;
4963 /* Update data on bulletin board */
4964 if (info->is_trusted_configured)
4965 mac = qed_iov_bulletin_get_mac(hwfn, i);
4966 else
4967 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4969 if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
4970 DP_VERBOSE(hwfn,
4971 QED_MSG_IOV,
4972 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4974 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4976 /* Update bulletin board with MAC */
4977 qed_set_bulletin_mac(hwfn, info, i);
4978 update = true;
4981 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
4982 info->forced_vlan) {
4983 DP_VERBOSE(hwfn,
4984 QED_MSG_IOV,
4985 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4986 info->forced_vlan,
4988 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4989 qed_iov_bulletin_set_forced_vlan(hwfn,
4990 info->forced_vlan, i);
4991 update = true;
4994 if (update)
4995 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4999 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
5001 struct qed_ptt *ptt;
5002 int i;
5004 ptt = qed_ptt_acquire(hwfn);
5005 if (!ptt) {
5006 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
5007 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5008 return;
5011 qed_for_each_vf(hwfn, i)
5012 qed_iov_post_vf_bulletin(hwfn, i, ptt);
5014 qed_ptt_release(hwfn, ptt);
5017 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
5019 struct qed_public_vf_info *vf_info;
5020 struct qed_vf_info *vf;
5021 u8 *force_mac;
5022 int i;
5024 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
5025 vf = qed_iov_get_vf_info(hwfn, vf_id, true);
5027 if (!vf_info || !vf)
5028 return;
5030 /* Force MAC converted to generic MAC in case of VF trust on */
5031 if (vf_info->is_trusted_configured &&
5032 (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
5033 force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
5035 if (force_mac) {
5036 /* Clear existing shadow copy of MAC to have a clean
5037 * slate.
5039 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5040 if (ether_addr_equal(vf->shadow_config.macs[i],
5041 vf_info->mac)) {
5042 eth_zero_addr(vf->shadow_config.macs[i]);
5043 DP_VERBOSE(hwfn, QED_MSG_IOV,
5044 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5045 vf_info->mac, vf_id);
5046 break;
5050 ether_addr_copy(vf_info->mac, force_mac);
5051 eth_zero_addr(vf_info->forced_mac);
5052 vf->bulletin.p_virt->valid_bitmap &=
5053 ~BIT(MAC_ADDR_FORCED);
5054 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5058 /* Update shadow copy with VF MAC when trust mode is turned off */
5059 if (!vf_info->is_trusted_configured) {
5060 u8 empty_mac[ETH_ALEN];
5062 eth_zero_addr(empty_mac);
5063 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5064 if (ether_addr_equal(vf->shadow_config.macs[i],
5065 empty_mac)) {
5066 ether_addr_copy(vf->shadow_config.macs[i],
5067 vf_info->mac);
5068 DP_VERBOSE(hwfn, QED_MSG_IOV,
5069 "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5070 vf_info->mac, vf_id);
5071 break;
5074 /* Clear bulletin when trust mode is turned off,
5075 * to have a clean slate for next (normal) operations.
5077 qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
5078 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5082 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5084 struct qed_sp_vport_update_params params;
5085 struct qed_filter_accept_flags *flags;
5086 struct qed_public_vf_info *vf_info;
5087 struct qed_vf_info *vf;
5088 u8 mask;
5089 int i;
5091 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
5092 flags = &params.accept_flags;
5094 qed_for_each_vf(hwfn, i) {
5095 /* Need to make sure current requested configuration didn't
5096 * flip so that we'll end up configuring something that's not
5097 * needed.
5099 vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
5100 if (vf_info->is_trusted_configured ==
5101 vf_info->is_trusted_request)
5102 continue;
5103 vf_info->is_trusted_configured = vf_info->is_trusted_request;
5105 /* Handle forced MAC mode */
5106 qed_update_mac_for_vf_trust_change(hwfn, i);
5108 /* Validate that the VF has a configured vport */
5109 vf = qed_iov_get_vf_info(hwfn, i, true);
5110 if (!vf->vport_instance)
5111 continue;
5113 memset(&params, 0, sizeof(params));
5114 params.opaque_fid = vf->opaque_fid;
5115 params.vport_id = vf->vport_id;
5117 params.update_ctl_frame_check = 1;
5118 params.mac_chk_en = !vf_info->is_trusted_configured;
5120 if (vf_info->rx_accept_mode & mask) {
5121 flags->update_rx_mode_config = 1;
5122 flags->rx_accept_filter = vf_info->rx_accept_mode;
5125 if (vf_info->tx_accept_mode & mask) {
5126 flags->update_tx_mode_config = 1;
5127 flags->tx_accept_filter = vf_info->tx_accept_mode;
5130 /* Remove if needed; Otherwise this would set the mask */
5131 if (!vf_info->is_trusted_configured) {
5132 flags->rx_accept_filter &= ~mask;
5133 flags->tx_accept_filter &= ~mask;
5136 if (flags->update_rx_mode_config ||
5137 flags->update_tx_mode_config ||
5138 params.update_ctl_frame_check)
5139 qed_sp_vport_update(hwfn, &params,
5140 QED_SPQ_MODE_EBLOCK, NULL);
5144 static void qed_iov_pf_task(struct work_struct *work)
5147 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
5148 iov_task.work);
5149 int rc;
5151 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
5152 return;
5154 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
5155 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
5157 if (!ptt) {
5158 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5159 return;
5162 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
5163 if (rc)
5164 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5166 qed_ptt_release(hwfn, ptt);
5169 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
5170 qed_handle_vf_msg(hwfn);
5172 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5173 &hwfn->iov_task_flags))
5174 qed_handle_pf_set_vf_unicast(hwfn);
5176 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5177 &hwfn->iov_task_flags))
5178 qed_handle_bulletin_post(hwfn);
5180 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5181 qed_iov_handle_trust_change(hwfn);
5184 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5186 int i;
5188 for_each_hwfn(cdev, i) {
5189 if (!cdev->hwfns[i].iov_wq)
5190 continue;
5192 if (schedule_first) {
5193 qed_schedule_iov(&cdev->hwfns[i],
5194 QED_IOV_WQ_STOP_WQ_FLAG);
5195 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5198 flush_workqueue(cdev->hwfns[i].iov_wq);
5199 destroy_workqueue(cdev->hwfns[i].iov_wq);
5203 int qed_iov_wq_start(struct qed_dev *cdev)
5205 char name[NAME_SIZE];
5206 int i;
5208 for_each_hwfn(cdev, i) {
5209 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5211 /* PFs needs a dedicated workqueue only if they support IOV.
5212 * VFs always require one.
5214 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5215 continue;
5217 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5218 cdev->pdev->bus->number,
5219 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5221 p_hwfn->iov_wq = create_singlethread_workqueue(name);
5222 if (!p_hwfn->iov_wq) {
5223 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5224 return -ENOMEM;
5227 if (IS_PF(cdev))
5228 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5229 else
5230 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5233 return 0;
5236 const struct qed_iov_hv_ops qed_iov_ops_pass = {
5237 .configure = &qed_sriov_configure,
5238 .set_mac = &qed_sriov_pf_set_mac,
5239 .set_vlan = &qed_sriov_pf_set_vlan,
5240 .get_config = &qed_get_vf_config,
5241 .set_link_state = &qed_set_vf_link_state,
5242 .set_spoof = &qed_spoof_configure,
5243 .set_rate = &qed_set_vf_rate,
5244 .set_trust = &qed_set_vf_trust,