Merge tag 'net-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[drm/drm-misc.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_sriov.c
blob12b6ed51fd884c1069f7db271e1edb026b0d1221
1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
11 #include <linux/ethtool.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/etherdevice.h>
18 #include <net/dcbnl.h>
19 #include "bnxt_hsi.h"
20 #include "bnxt.h"
21 #include "bnxt_hwrm.h"
22 #include "bnxt_ulp.h"
23 #include "bnxt_sriov.h"
24 #include "bnxt_vfr.h"
25 #include "bnxt_ethtool.h"
27 #ifdef CONFIG_BNXT_SRIOV
28 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
29 struct bnxt_vf_info *vf, u16 event_id)
31 struct hwrm_fwd_async_event_cmpl_input *req;
32 struct hwrm_async_event_cmpl *async_cmpl;
33 int rc = 0;
35 rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
36 if (rc)
37 goto exit;
39 if (vf)
40 req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
41 else
42 /* broadcast this async event to all VFs */
43 req->encap_async_event_target_id = cpu_to_le16(0xffff);
44 async_cmpl =
45 (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
46 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
47 async_cmpl->event_id = cpu_to_le16(event_id);
49 rc = hwrm_req_send(bp, req);
50 exit:
51 if (rc)
52 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
53 rc);
54 return rc;
57 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
59 if (!bp->pf.active_vfs) {
60 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
61 return -EINVAL;
63 if (vf_id >= bp->pf.active_vfs) {
64 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
65 return -EINVAL;
67 return 0;
70 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
72 struct bnxt *bp = netdev_priv(dev);
73 struct hwrm_func_cfg_input *req;
74 bool old_setting = false;
75 struct bnxt_vf_info *vf;
76 u32 func_flags;
77 int rc;
79 if (bp->hwrm_spec_code < 0x10701)
80 return -ENOTSUPP;
82 rc = bnxt_vf_ndo_prep(bp, vf_id);
83 if (rc)
84 return rc;
86 vf = &bp->pf.vf[vf_id];
87 if (vf->flags & BNXT_VF_SPOOFCHK)
88 old_setting = true;
89 if (old_setting == setting)
90 return 0;
92 if (setting)
93 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
94 else
95 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
96 /*TODO: if the driver supports VLAN filter on guest VLAN,
97 * the spoof check should also include vlan anti-spoofing
99 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
100 if (!rc) {
101 req->fid = cpu_to_le16(vf->fw_fid);
102 req->flags = cpu_to_le32(func_flags);
103 rc = hwrm_req_send(bp, req);
104 if (!rc) {
105 if (setting)
106 vf->flags |= BNXT_VF_SPOOFCHK;
107 else
108 vf->flags &= ~BNXT_VF_SPOOFCHK;
111 return rc;
114 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
116 struct hwrm_func_qcfg_output *resp;
117 struct hwrm_func_qcfg_input *req;
118 int rc;
120 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
121 if (rc)
122 return rc;
124 req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
125 resp = hwrm_req_hold(bp, req);
126 rc = hwrm_req_send(bp, req);
127 if (!rc)
128 vf->func_qcfg_flags = le16_to_cpu(resp->flags);
129 hwrm_req_drop(bp, req);
130 return rc;
133 bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
135 if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
136 return !!(vf->flags & BNXT_VF_TRUST);
138 bnxt_hwrm_func_qcfg_flags(bp, vf);
139 return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
142 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
144 struct hwrm_func_cfg_input *req;
145 int rc;
147 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
148 return 0;
150 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
151 if (rc)
152 return rc;
154 req->fid = cpu_to_le16(vf->fw_fid);
155 if (vf->flags & BNXT_VF_TRUST)
156 req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
157 else
158 req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
159 return hwrm_req_send(bp, req);
162 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
164 struct bnxt *bp = netdev_priv(dev);
165 struct bnxt_vf_info *vf;
167 if (bnxt_vf_ndo_prep(bp, vf_id))
168 return -EINVAL;
170 vf = &bp->pf.vf[vf_id];
171 if (trusted)
172 vf->flags |= BNXT_VF_TRUST;
173 else
174 vf->flags &= ~BNXT_VF_TRUST;
176 bnxt_hwrm_set_trusted_vf(bp, vf);
177 return 0;
180 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
181 struct ifla_vf_info *ivi)
183 struct bnxt *bp = netdev_priv(dev);
184 struct bnxt_vf_info *vf;
185 int rc;
187 rc = bnxt_vf_ndo_prep(bp, vf_id);
188 if (rc)
189 return rc;
191 ivi->vf = vf_id;
192 vf = &bp->pf.vf[vf_id];
194 if (is_valid_ether_addr(vf->mac_addr))
195 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
196 else
197 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
198 ivi->max_tx_rate = vf->max_tx_rate;
199 ivi->min_tx_rate = vf->min_tx_rate;
200 ivi->vlan = vf->vlan & VLAN_VID_MASK;
201 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
202 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
203 ivi->trusted = bnxt_is_trusted_vf(bp, vf);
204 if (!(vf->flags & BNXT_VF_LINK_FORCED))
205 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
206 else if (vf->flags & BNXT_VF_LINK_UP)
207 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
208 else
209 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
211 return 0;
214 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
216 struct bnxt *bp = netdev_priv(dev);
217 struct hwrm_func_cfg_input *req;
218 struct bnxt_vf_info *vf;
219 int rc;
221 rc = bnxt_vf_ndo_prep(bp, vf_id);
222 if (rc)
223 return rc;
224 /* reject bc or mc mac addr, zero mac addr means allow
225 * VF to use its own mac addr
227 if (is_multicast_ether_addr(mac)) {
228 netdev_err(dev, "Invalid VF ethernet address\n");
229 return -EINVAL;
231 vf = &bp->pf.vf[vf_id];
233 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
234 if (rc)
235 return rc;
237 memcpy(vf->mac_addr, mac, ETH_ALEN);
239 req->fid = cpu_to_le16(vf->fw_fid);
240 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
241 memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
242 return hwrm_req_send(bp, req);
245 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
246 __be16 vlan_proto)
248 struct bnxt *bp = netdev_priv(dev);
249 struct hwrm_func_cfg_input *req;
250 struct bnxt_vf_info *vf;
251 u16 vlan_tag;
252 int rc;
254 if (bp->hwrm_spec_code < 0x10201)
255 return -ENOTSUPP;
257 if (vlan_proto != htons(ETH_P_8021Q) &&
258 (vlan_proto != htons(ETH_P_8021AD) ||
259 !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
260 return -EPROTONOSUPPORT;
262 rc = bnxt_vf_ndo_prep(bp, vf_id);
263 if (rc)
264 return rc;
266 if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
267 (!vlan_id && qos))
268 return -EINVAL;
270 vf = &bp->pf.vf[vf_id];
271 vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
272 if (vlan_tag == vf->vlan)
273 return 0;
275 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
276 if (!rc) {
277 req->fid = cpu_to_le16(vf->fw_fid);
278 req->dflt_vlan = cpu_to_le16(vlan_tag);
279 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
280 if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
281 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
282 req->tpid = vlan_proto;
284 rc = hwrm_req_send(bp, req);
285 if (!rc)
286 vf->vlan = vlan_tag;
288 return rc;
291 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
292 int max_tx_rate)
294 struct bnxt *bp = netdev_priv(dev);
295 struct hwrm_func_cfg_input *req;
296 struct bnxt_vf_info *vf;
297 u32 pf_link_speed;
298 int rc;
300 rc = bnxt_vf_ndo_prep(bp, vf_id);
301 if (rc)
302 return rc;
304 vf = &bp->pf.vf[vf_id];
305 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
306 if (max_tx_rate > pf_link_speed) {
307 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
308 max_tx_rate, vf_id);
309 return -EINVAL;
312 if (min_tx_rate > pf_link_speed) {
313 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
314 min_tx_rate, vf_id);
315 return -EINVAL;
317 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
318 return 0;
319 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
320 if (!rc) {
321 req->fid = cpu_to_le16(vf->fw_fid);
322 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
323 FUNC_CFG_REQ_ENABLES_MIN_BW);
324 req->max_bw = cpu_to_le32(max_tx_rate);
325 req->min_bw = cpu_to_le32(min_tx_rate);
326 rc = hwrm_req_send(bp, req);
327 if (!rc) {
328 vf->min_tx_rate = min_tx_rate;
329 vf->max_tx_rate = max_tx_rate;
332 return rc;
335 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
337 struct bnxt *bp = netdev_priv(dev);
338 struct bnxt_vf_info *vf;
339 int rc;
341 rc = bnxt_vf_ndo_prep(bp, vf_id);
342 if (rc)
343 return rc;
345 vf = &bp->pf.vf[vf_id];
347 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
348 switch (link) {
349 case IFLA_VF_LINK_STATE_AUTO:
350 vf->flags |= BNXT_VF_LINK_UP;
351 break;
352 case IFLA_VF_LINK_STATE_DISABLE:
353 vf->flags |= BNXT_VF_LINK_FORCED;
354 break;
355 case IFLA_VF_LINK_STATE_ENABLE:
356 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
357 break;
358 default:
359 netdev_err(bp->dev, "Invalid link option\n");
360 rc = -EINVAL;
361 break;
363 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
364 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
365 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
366 return rc;
369 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
371 int i;
372 struct bnxt_vf_info *vf;
374 for (i = 0; i < num_vfs; i++) {
375 vf = &bp->pf.vf[i];
376 memset(vf, 0, sizeof(*vf));
378 return 0;
381 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
383 struct hwrm_func_vf_resc_free_input *req;
384 struct bnxt_pf_info *pf = &bp->pf;
385 int i, rc;
387 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
388 if (rc)
389 return rc;
391 hwrm_req_hold(bp, req);
392 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
393 req->vf_id = cpu_to_le16(i);
394 rc = hwrm_req_send(bp, req);
395 if (rc)
396 break;
398 hwrm_req_drop(bp, req);
399 return rc;
402 static void bnxt_free_vf_resources(struct bnxt *bp)
404 struct pci_dev *pdev = bp->pdev;
405 int i;
407 kfree(bp->pf.vf_event_bmap);
408 bp->pf.vf_event_bmap = NULL;
410 for (i = 0; i < 4; i++) {
411 if (bp->pf.hwrm_cmd_req_addr[i]) {
412 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
413 bp->pf.hwrm_cmd_req_addr[i],
414 bp->pf.hwrm_cmd_req_dma_addr[i]);
415 bp->pf.hwrm_cmd_req_addr[i] = NULL;
419 bp->pf.active_vfs = 0;
420 kfree(bp->pf.vf);
421 bp->pf.vf = NULL;
424 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
426 struct pci_dev *pdev = bp->pdev;
427 u32 nr_pages, size, i, j, k = 0;
429 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
430 if (!bp->pf.vf)
431 return -ENOMEM;
433 bnxt_set_vf_attr(bp, num_vfs);
435 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
436 nr_pages = size / BNXT_PAGE_SIZE;
437 if (size & (BNXT_PAGE_SIZE - 1))
438 nr_pages++;
440 for (i = 0; i < nr_pages; i++) {
441 bp->pf.hwrm_cmd_req_addr[i] =
442 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
443 &bp->pf.hwrm_cmd_req_dma_addr[i],
444 GFP_KERNEL);
446 if (!bp->pf.hwrm_cmd_req_addr[i])
447 return -ENOMEM;
449 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
450 struct bnxt_vf_info *vf = &bp->pf.vf[k];
452 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
453 j * BNXT_HWRM_REQ_MAX_SIZE;
454 vf->hwrm_cmd_req_dma_addr =
455 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
456 BNXT_HWRM_REQ_MAX_SIZE;
457 k++;
461 /* Max 128 VF's */
462 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
463 if (!bp->pf.vf_event_bmap)
464 return -ENOMEM;
466 bp->pf.hwrm_cmd_req_pages = nr_pages;
467 return 0;
470 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
472 struct hwrm_func_buf_rgtr_input *req;
473 int rc;
475 rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
476 if (rc)
477 return rc;
479 req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
480 req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
481 req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
482 req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
483 req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
484 req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
485 req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
487 return hwrm_req_send(bp, req);
490 static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
492 struct hwrm_func_cfg_input *req;
493 struct bnxt_vf_info *vf;
494 int rc;
496 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
497 if (rc)
498 return rc;
500 vf = &bp->pf.vf[vf_id];
501 req->fid = cpu_to_le16(vf->fw_fid);
503 if (is_valid_ether_addr(vf->mac_addr)) {
504 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
505 memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
507 if (vf->vlan) {
508 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
509 req->dflt_vlan = cpu_to_le16(vf->vlan);
511 if (vf->max_tx_rate) {
512 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
513 FUNC_CFG_REQ_ENABLES_MIN_BW);
514 req->max_bw = cpu_to_le32(vf->max_tx_rate);
515 req->min_bw = cpu_to_le32(vf->min_tx_rate);
517 if (vf->flags & BNXT_VF_TRUST)
518 req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
520 return hwrm_req_send(bp, req);
523 static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
525 struct hwrm_func_qcaps_output *resp;
526 struct hwrm_func_cfg_input *cfg_req;
527 struct hwrm_func_qcaps_input *req;
528 int rc;
530 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
531 if (rc)
532 return;
534 req->fid = cpu_to_le16(0xffff);
535 resp = hwrm_req_hold(bp, req);
536 rc = hwrm_req_send(bp, req);
537 if (rc)
538 goto err;
540 rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG);
541 if (rc)
542 goto err;
544 cfg_req->fid = cpu_to_le16(0xffff);
545 cfg_req->enables2 =
546 cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
547 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF |
548 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF |
549 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF |
550 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF |
551 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF);
552 cfg_req->roce_max_av_per_vf =
553 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs);
554 cfg_req->roce_max_cq_per_vf =
555 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs);
556 cfg_req->roce_max_mrw_per_vf =
557 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs);
558 cfg_req->roce_max_qp_per_vf =
559 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs);
560 cfg_req->roce_max_srq_per_vf =
561 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs);
562 cfg_req->roce_max_gid_per_vf =
563 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs);
565 rc = hwrm_req_send(bp, cfg_req);
567 err:
568 hwrm_req_drop(bp, req);
569 if (rc)
570 netdev_err(bp->dev, "RoCE sriov configuration failed\n");
573 /* Only called by PF to reserve resources for VFs, returns actual number of
574 * VFs configured, or < 0 on error.
576 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
578 struct hwrm_func_vf_resource_cfg_input *req;
579 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
580 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
581 u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
582 struct bnxt_pf_info *pf = &bp->pf;
583 int i, rc = 0, min = 1;
584 u16 vf_msix = 0;
585 u16 vf_rss;
587 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
588 if (rc)
589 return rc;
591 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
592 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
593 vf_ring_grps = 0;
594 } else {
595 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
597 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
598 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
599 if (bp->flags & BNXT_FLAG_AGG_RINGS)
600 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
601 else
602 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
603 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
604 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
605 vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
607 req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
608 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
609 min = 0;
610 req->min_rsscos_ctx = cpu_to_le16(min);
612 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
613 pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
614 req->min_cmpl_rings = cpu_to_le16(min);
615 req->min_tx_rings = cpu_to_le16(min);
616 req->min_rx_rings = cpu_to_le16(min);
617 req->min_l2_ctxs = cpu_to_le16(min);
618 req->min_vnics = cpu_to_le16(min);
619 req->min_stat_ctx = cpu_to_le16(min);
620 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
621 req->min_hw_ring_grps = cpu_to_le16(min);
622 } else {
623 vf_cp_rings /= num_vfs;
624 vf_tx_rings /= num_vfs;
625 vf_rx_rings /= num_vfs;
626 if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) &&
627 vf_vnics >= pf->max_vfs) {
628 /* Take into account that FW has pre-reserved 1 VNIC for
629 * each pf->max_vfs.
631 vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs;
632 } else {
633 vf_vnics /= num_vfs;
635 vf_stat_ctx /= num_vfs;
636 vf_ring_grps /= num_vfs;
637 vf_rss /= num_vfs;
639 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
640 req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
641 req->min_tx_rings = cpu_to_le16(vf_tx_rings);
642 req->min_rx_rings = cpu_to_le16(vf_rx_rings);
643 req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
644 req->min_vnics = cpu_to_le16(vf_vnics);
645 req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
646 req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
647 req->min_rsscos_ctx = cpu_to_le16(vf_rss);
649 req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
650 req->max_tx_rings = cpu_to_le16(vf_tx_rings);
651 req->max_rx_rings = cpu_to_le16(vf_rx_rings);
652 req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
653 req->max_vnics = cpu_to_le16(vf_vnics);
654 req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
655 req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
656 req->max_rsscos_ctx = cpu_to_le16(vf_rss);
657 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
658 req->max_msix = cpu_to_le16(vf_msix / num_vfs);
660 hwrm_req_hold(bp, req);
661 for (i = 0; i < num_vfs; i++) {
662 if (reset)
663 __bnxt_set_vf_params(bp, i);
665 req->vf_id = cpu_to_le16(pf->first_vf_id + i);
666 rc = hwrm_req_send(bp, req);
667 if (rc)
668 break;
669 pf->active_vfs = i + 1;
670 pf->vf[i].fw_fid = pf->first_vf_id + i;
673 if (pf->active_vfs) {
674 u16 n = pf->active_vfs;
676 hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
677 hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
678 hw_resc->max_hw_ring_grps -=
679 le16_to_cpu(req->min_hw_ring_grps) * n;
680 hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
681 hw_resc->max_rsscos_ctxs -=
682 le16_to_cpu(req->min_rsscos_ctx) * n;
683 hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
684 hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
685 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
686 hw_resc->max_nqs -= vf_msix;
688 rc = pf->active_vfs;
690 hwrm_req_drop(bp, req);
691 return rc;
694 /* Only called by PF to reserve resources for VFs, returns actual number of
695 * VFs configured, or < 0 on error.
697 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
699 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
700 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
701 struct bnxt_pf_info *pf = &bp->pf;
702 struct hwrm_func_cfg_input *req;
703 int total_vf_tx_rings = 0;
704 u16 vf_ring_grps;
705 u32 mtu, i;
706 int rc;
708 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
709 if (rc)
710 return rc;
712 /* Remaining rings are distributed equally amongs VF's for now */
713 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
714 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
715 if (bp->flags & BNXT_FLAG_AGG_RINGS)
716 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
717 num_vfs;
718 else
719 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
720 num_vfs;
721 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
722 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
723 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
724 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
726 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
727 FUNC_CFG_REQ_ENABLES_MRU |
728 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
729 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
730 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
731 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
732 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
733 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
734 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
735 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
737 mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
738 req->mru = cpu_to_le16(mtu);
739 req->admin_mtu = cpu_to_le16(mtu);
741 req->num_rsscos_ctxs = cpu_to_le16(1);
742 req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
743 req->num_tx_rings = cpu_to_le16(vf_tx_rings);
744 req->num_rx_rings = cpu_to_le16(vf_rx_rings);
745 req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
746 req->num_l2_ctxs = cpu_to_le16(4);
748 req->num_vnics = cpu_to_le16(vf_vnics);
749 /* FIXME spec currently uses 1 bit for stats ctx */
750 req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
752 hwrm_req_hold(bp, req);
753 for (i = 0; i < num_vfs; i++) {
754 int vf_tx_rsvd = vf_tx_rings;
756 req->fid = cpu_to_le16(pf->first_vf_id + i);
757 rc = hwrm_req_send(bp, req);
758 if (rc)
759 break;
760 pf->active_vfs = i + 1;
761 pf->vf[i].fw_fid = le16_to_cpu(req->fid);
762 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
763 &vf_tx_rsvd);
764 if (rc)
765 break;
766 total_vf_tx_rings += vf_tx_rsvd;
768 hwrm_req_drop(bp, req);
769 if (pf->active_vfs) {
770 hw_resc->max_tx_rings -= total_vf_tx_rings;
771 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
772 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
773 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
774 hw_resc->max_rsscos_ctxs -= num_vfs;
775 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
776 hw_resc->max_vnics -= vf_vnics * num_vfs;
777 rc = pf->active_vfs;
779 return rc;
782 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
784 if (BNXT_NEW_RM(bp))
785 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
786 else
787 return bnxt_hwrm_func_cfg(bp, num_vfs);
790 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
792 int rc;
794 /* Register buffers for VFs */
795 rc = bnxt_hwrm_func_buf_rgtr(bp);
796 if (rc)
797 return rc;
799 /* Reserve resources for VFs */
800 rc = bnxt_func_cfg(bp, *num_vfs, reset);
801 if (rc != *num_vfs) {
802 if (rc <= 0) {
803 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
804 *num_vfs = 0;
805 return rc;
807 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
808 rc);
809 *num_vfs = rc;
812 if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp))
813 bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs);
815 return 0;
818 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
820 int rc = 0, vfs_supported;
821 int min_rx_rings, min_tx_rings, min_rss_ctxs;
822 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
823 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
824 int avail_cp, avail_stat;
826 /* Check if we can enable requested num of vf's. At a mininum
827 * we require 1 RX 1 TX rings for each VF. In this minimum conf
828 * features like TPA will not be available.
830 vfs_supported = *num_vfs;
832 avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
833 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
834 avail_cp = min_t(int, avail_cp, avail_stat);
836 while (vfs_supported) {
837 min_rx_rings = vfs_supported;
838 min_tx_rings = vfs_supported;
839 min_rss_ctxs = vfs_supported;
841 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
842 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
843 min_rx_rings)
844 rx_ok = 1;
845 } else {
846 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
847 min_rx_rings)
848 rx_ok = 1;
850 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
851 avail_cp < min_rx_rings)
852 rx_ok = 0;
854 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
855 avail_cp >= min_tx_rings)
856 tx_ok = 1;
858 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
859 min_rss_ctxs)
860 rss_ok = 1;
862 if (tx_ok && rx_ok && rss_ok)
863 break;
865 vfs_supported--;
868 if (!vfs_supported) {
869 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
870 return -EINVAL;
873 if (vfs_supported != *num_vfs) {
874 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
875 *num_vfs, vfs_supported);
876 *num_vfs = vfs_supported;
879 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
880 if (rc)
881 goto err_out1;
883 rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
884 if (rc)
885 goto err_out2;
887 rc = pci_enable_sriov(bp->pdev, *num_vfs);
888 if (rc)
889 goto err_out2;
891 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
892 return 0;
894 /* Create representors for VFs in switchdev mode */
895 devl_lock(bp->dl);
896 rc = bnxt_vf_reps_create(bp);
897 devl_unlock(bp->dl);
898 if (rc) {
899 netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n");
900 goto err_out3;
903 return 0;
905 err_out3:
906 /* Disable SR-IOV */
907 pci_disable_sriov(bp->pdev);
909 err_out2:
910 /* Free the resources reserved for various VF's */
911 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
913 /* Restore the max resources */
914 bnxt_hwrm_func_qcaps(bp);
916 err_out1:
917 bnxt_free_vf_resources(bp);
919 return rc;
922 void bnxt_sriov_disable(struct bnxt *bp)
924 u16 num_vfs = pci_num_vf(bp->pdev);
926 if (!num_vfs)
927 return;
929 /* synchronize VF and VF-rep create and destroy */
930 devl_lock(bp->dl);
931 bnxt_vf_reps_destroy(bp);
933 if (pci_vfs_assigned(bp->pdev)) {
934 bnxt_hwrm_fwd_async_event_cmpl(
935 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
936 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
937 num_vfs);
938 } else {
939 pci_disable_sriov(bp->pdev);
940 /* Free the HW resources reserved for various VF's */
941 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
943 devl_unlock(bp->dl);
945 bnxt_free_vf_resources(bp);
947 /* Reclaim all resources for the PF. */
948 rtnl_lock();
949 bnxt_restore_pf_fw_resources(bp);
950 rtnl_unlock();
953 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
955 struct net_device *dev = pci_get_drvdata(pdev);
956 struct bnxt *bp = netdev_priv(dev);
958 rtnl_lock();
959 if (!netif_running(dev)) {
960 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
961 rtnl_unlock();
962 return 0;
964 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
965 netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
966 rtnl_unlock();
967 return 0;
969 bp->sriov_cfg = true;
970 rtnl_unlock();
972 if (pci_vfs_assigned(bp->pdev)) {
973 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
974 num_vfs = 0;
975 goto sriov_cfg_exit;
978 /* Check if enabled VFs is same as requested */
979 if (num_vfs && num_vfs == bp->pf.active_vfs)
980 goto sriov_cfg_exit;
982 /* if there are previous existing VFs, clean them up */
983 bnxt_sriov_disable(bp);
984 if (!num_vfs)
985 goto sriov_cfg_exit;
987 bnxt_sriov_enable(bp, &num_vfs);
989 sriov_cfg_exit:
990 bp->sriov_cfg = false;
991 wake_up(&bp->sriov_cfg_wait);
993 return num_vfs;
996 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
997 void *encap_resp, __le64 encap_resp_addr,
998 __le16 encap_resp_cpr, u32 msg_size)
1000 struct hwrm_fwd_resp_input *req;
1001 int rc;
1003 if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
1004 netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
1005 msg_size);
1006 return -EINVAL;
1009 rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
1010 if (!rc) {
1011 /* Set the new target id */
1012 req->target_id = cpu_to_le16(vf->fw_fid);
1013 req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
1014 req->encap_resp_len = cpu_to_le16(msg_size);
1015 req->encap_resp_addr = encap_resp_addr;
1016 req->encap_resp_cmpl_ring = encap_resp_cpr;
1017 memcpy(req->encap_resp, encap_resp, msg_size);
1019 rc = hwrm_req_send(bp, req);
1021 if (rc)
1022 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
1023 return rc;
1026 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
1027 u32 msg_size)
1029 struct hwrm_reject_fwd_resp_input *req;
1030 int rc;
1032 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
1033 return -EINVAL;
1035 rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
1036 if (!rc) {
1037 /* Set the new target id */
1038 req->target_id = cpu_to_le16(vf->fw_fid);
1039 req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
1040 memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
1042 rc = hwrm_req_send(bp, req);
1044 if (rc)
1045 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
1046 return rc;
1049 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
1050 u32 msg_size)
1052 struct hwrm_exec_fwd_resp_input *req;
1053 int rc;
1055 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
1056 return -EINVAL;
1058 rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
1059 if (!rc) {
1060 /* Set the new target id */
1061 req->target_id = cpu_to_le16(vf->fw_fid);
1062 req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
1063 memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
1065 rc = hwrm_req_send(bp, req);
1067 if (rc)
1068 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
1069 return rc;
1072 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1074 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
1075 struct hwrm_func_vf_cfg_input *req =
1076 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
1078 /* Allow VF to set a valid MAC address, if trust is set to on or
1079 * if the PF assigned MAC address is zero
1081 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
1082 bool trust = bnxt_is_trusted_vf(bp, vf);
1084 if (is_valid_ether_addr(req->dflt_mac_addr) &&
1085 (trust || !is_valid_ether_addr(vf->mac_addr) ||
1086 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
1087 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
1088 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1090 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1092 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1095 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1097 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
1098 struct hwrm_cfa_l2_filter_alloc_input *req =
1099 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
1100 bool mac_ok = false;
1102 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
1103 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1105 /* Allow VF to set a valid MAC address, if trust is set to on.
1106 * Or VF MAC address must first match MAC address in PF's context.
1107 * Otherwise, it must match the VF MAC address if firmware spec >=
1108 * 1.2.2
1110 if (bnxt_is_trusted_vf(bp, vf)) {
1111 mac_ok = true;
1112 } else if (is_valid_ether_addr(vf->mac_addr)) {
1113 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1114 mac_ok = true;
1115 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1116 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1117 mac_ok = true;
1118 } else {
1119 /* There are two cases:
1120 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1121 * to the PF and so it doesn't have to match
1122 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1123 * valid MAC address and firmware spec >= 0x10202
1125 mac_ok = true;
1127 if (mac_ok)
1128 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1129 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1132 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1134 int rc = 0;
1136 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1137 /* real link */
1138 rc = bnxt_hwrm_exec_fwd_resp(
1139 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1140 } else {
1141 struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
1142 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1144 phy_qcfg_req =
1145 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1146 mutex_lock(&bp->link_lock);
1147 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1148 sizeof(phy_qcfg_resp));
1149 mutex_unlock(&bp->link_lock);
1150 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1151 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1152 /* New SPEEDS2 fields are beyond the legacy structure, so
1153 * clear the SPEEDS2_SUPPORTED flag.
1155 phy_qcfg_resp.option_flags &=
1156 ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
1157 phy_qcfg_resp.valid = 1;
1159 if (vf->flags & BNXT_VF_LINK_UP) {
1160 /* if physical link is down, force link up on VF */
1161 if (phy_qcfg_resp.link !=
1162 PORT_PHY_QCFG_RESP_LINK_LINK) {
1163 phy_qcfg_resp.link =
1164 PORT_PHY_QCFG_RESP_LINK_LINK;
1165 phy_qcfg_resp.link_speed = cpu_to_le16(
1166 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1167 phy_qcfg_resp.duplex_cfg =
1168 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1169 phy_qcfg_resp.duplex_state =
1170 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1171 phy_qcfg_resp.pause =
1172 (PORT_PHY_QCFG_RESP_PAUSE_TX |
1173 PORT_PHY_QCFG_RESP_PAUSE_RX);
1175 } else {
1176 /* force link down */
1177 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1178 phy_qcfg_resp.link_speed = 0;
1179 phy_qcfg_resp.duplex_state =
1180 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1181 phy_qcfg_resp.pause = 0;
1183 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1184 phy_qcfg_req->resp_addr,
1185 phy_qcfg_req->cmpl_ring,
1186 sizeof(phy_qcfg_resp));
1188 return rc;
1191 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1193 int rc = 0;
1194 struct input *encap_req = vf->hwrm_cmd_req_addr;
1195 u32 req_type = le16_to_cpu(encap_req->req_type);
1197 switch (req_type) {
1198 case HWRM_FUNC_VF_CFG:
1199 rc = bnxt_vf_configure_mac(bp, vf);
1200 break;
1201 case HWRM_CFA_L2_FILTER_ALLOC:
1202 rc = bnxt_vf_validate_set_mac(bp, vf);
1203 break;
1204 case HWRM_FUNC_CFG:
1205 /* TODO Validate if VF is allowed to change mac address,
1206 * mtu, num of rings etc
1208 rc = bnxt_hwrm_exec_fwd_resp(
1209 bp, vf, sizeof(struct hwrm_func_cfg_input));
1210 break;
1211 case HWRM_PORT_PHY_QCFG:
1212 rc = bnxt_vf_set_link(bp, vf);
1213 break;
1214 default:
1215 break;
1217 return rc;
1220 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1222 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1224 /* Scan through VF's and process commands */
1225 while (1) {
1226 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1227 if (vf_id >= active_vfs)
1228 break;
1230 clear_bit(vf_id, bp->pf.vf_event_bmap);
1231 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1232 i = vf_id + 1;
1236 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1238 struct hwrm_func_vf_cfg_input *req;
1239 int rc = 0;
1241 if (!BNXT_VF(bp))
1242 return 0;
1244 if (bp->hwrm_spec_code < 0x10202) {
1245 if (is_valid_ether_addr(bp->vf.mac_addr))
1246 rc = -EADDRNOTAVAIL;
1247 goto mac_done;
1250 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
1251 if (rc)
1252 goto mac_done;
1254 req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1255 memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
1256 if (!strict)
1257 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
1258 rc = hwrm_req_send(bp, req);
1259 mac_done:
1260 if (rc && strict) {
1261 rc = -EADDRNOTAVAIL;
1262 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1263 mac);
1264 return rc;
1266 return 0;
1269 void bnxt_update_vf_mac(struct bnxt *bp)
1271 struct hwrm_func_qcaps_output *resp;
1272 struct hwrm_func_qcaps_input *req;
1273 bool inform_pf = false;
1275 if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
1276 return;
1278 req->fid = cpu_to_le16(0xffff);
1280 resp = hwrm_req_hold(bp, req);
1281 if (hwrm_req_send(bp, req))
1282 goto update_vf_mac_exit;
1284 /* Store MAC address from the firmware. There are 2 cases:
1285 * 1. MAC address is valid. It is assigned from the PF and we
1286 * need to override the current VF MAC address with it.
1287 * 2. MAC address is zero. The VF will use a random MAC address by
1288 * default but the stored zero MAC will allow the VF user to change
1289 * the random MAC address using ndo_set_mac_address() if he wants.
1291 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
1292 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1293 /* This means we are now using our own MAC address, let
1294 * the PF know about this MAC address.
1296 if (!is_valid_ether_addr(bp->vf.mac_addr))
1297 inform_pf = true;
1300 /* overwrite netdev dev_addr with admin VF MAC */
1301 if (is_valid_ether_addr(bp->vf.mac_addr))
1302 eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
1303 update_vf_mac_exit:
1304 hwrm_req_drop(bp, req);
1305 if (inform_pf)
1306 bnxt_approve_mac(bp, bp->dev->dev_addr, false);
1309 #else
1311 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1313 if (*num_vfs)
1314 return -EOPNOTSUPP;
1315 return 0;
1318 void bnxt_sriov_disable(struct bnxt *bp)
1322 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1324 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1327 void bnxt_update_vf_mac(struct bnxt *bp)
1331 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1333 return 0;
1335 #endif