1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/ethtool.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/etherdevice.h>
18 #include <net/dcbnl.h>
21 #include "bnxt_hwrm.h"
23 #include "bnxt_sriov.h"
25 #include "bnxt_ethtool.h"
27 #ifdef CONFIG_BNXT_SRIOV
28 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt
*bp
,
29 struct bnxt_vf_info
*vf
, u16 event_id
)
31 struct hwrm_fwd_async_event_cmpl_input
*req
;
32 struct hwrm_async_event_cmpl
*async_cmpl
;
35 rc
= hwrm_req_init(bp
, req
, HWRM_FWD_ASYNC_EVENT_CMPL
);
40 req
->encap_async_event_target_id
= cpu_to_le16(vf
->fw_fid
);
42 /* broadcast this async event to all VFs */
43 req
->encap_async_event_target_id
= cpu_to_le16(0xffff);
45 (struct hwrm_async_event_cmpl
*)req
->encap_async_event_cmpl
;
46 async_cmpl
->type
= cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
);
47 async_cmpl
->event_id
= cpu_to_le16(event_id
);
49 rc
= hwrm_req_send(bp
, req
);
52 netdev_err(bp
->dev
, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
57 static int bnxt_vf_ndo_prep(struct bnxt
*bp
, int vf_id
)
59 if (!bp
->pf
.active_vfs
) {
60 netdev_err(bp
->dev
, "vf ndo called though sriov is disabled\n");
63 if (vf_id
>= bp
->pf
.active_vfs
) {
64 netdev_err(bp
->dev
, "Invalid VF id %d\n", vf_id
);
70 int bnxt_set_vf_spoofchk(struct net_device
*dev
, int vf_id
, bool setting
)
72 struct bnxt
*bp
= netdev_priv(dev
);
73 struct hwrm_func_cfg_input
*req
;
74 bool old_setting
= false;
75 struct bnxt_vf_info
*vf
;
79 if (bp
->hwrm_spec_code
< 0x10701)
82 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
86 vf
= &bp
->pf
.vf
[vf_id
];
87 if (vf
->flags
& BNXT_VF_SPOOFCHK
)
89 if (old_setting
== setting
)
93 func_flags
= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE
;
95 func_flags
= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE
;
96 /*TODO: if the driver supports VLAN filter on guest VLAN,
97 * the spoof check should also include vlan anti-spoofing
99 rc
= bnxt_hwrm_func_cfg_short_req_init(bp
, &req
);
101 req
->fid
= cpu_to_le16(vf
->fw_fid
);
102 req
->flags
= cpu_to_le32(func_flags
);
103 rc
= hwrm_req_send(bp
, req
);
106 vf
->flags
|= BNXT_VF_SPOOFCHK
;
108 vf
->flags
&= ~BNXT_VF_SPOOFCHK
;
114 static int bnxt_hwrm_func_qcfg_flags(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
116 struct hwrm_func_qcfg_output
*resp
;
117 struct hwrm_func_qcfg_input
*req
;
120 rc
= hwrm_req_init(bp
, req
, HWRM_FUNC_QCFG
);
124 req
->fid
= cpu_to_le16(BNXT_PF(bp
) ? vf
->fw_fid
: 0xffff);
125 resp
= hwrm_req_hold(bp
, req
);
126 rc
= hwrm_req_send(bp
, req
);
128 vf
->func_qcfg_flags
= le16_to_cpu(resp
->flags
);
129 hwrm_req_drop(bp
, req
);
133 bool bnxt_is_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
135 if (BNXT_PF(bp
) && !(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
136 return !!(vf
->flags
& BNXT_VF_TRUST
);
138 bnxt_hwrm_func_qcfg_flags(bp
, vf
);
139 return !!(vf
->func_qcfg_flags
& FUNC_QCFG_RESP_FLAGS_TRUSTED_VF
);
142 static int bnxt_hwrm_set_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
144 struct hwrm_func_cfg_input
*req
;
147 if (!(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
150 rc
= bnxt_hwrm_func_cfg_short_req_init(bp
, &req
);
154 req
->fid
= cpu_to_le16(vf
->fw_fid
);
155 if (vf
->flags
& BNXT_VF_TRUST
)
156 req
->flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
158 req
->flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE
);
159 return hwrm_req_send(bp
, req
);
162 int bnxt_set_vf_trust(struct net_device
*dev
, int vf_id
, bool trusted
)
164 struct bnxt
*bp
= netdev_priv(dev
);
165 struct bnxt_vf_info
*vf
;
167 if (bnxt_vf_ndo_prep(bp
, vf_id
))
170 vf
= &bp
->pf
.vf
[vf_id
];
172 vf
->flags
|= BNXT_VF_TRUST
;
174 vf
->flags
&= ~BNXT_VF_TRUST
;
176 bnxt_hwrm_set_trusted_vf(bp
, vf
);
180 int bnxt_get_vf_config(struct net_device
*dev
, int vf_id
,
181 struct ifla_vf_info
*ivi
)
183 struct bnxt
*bp
= netdev_priv(dev
);
184 struct bnxt_vf_info
*vf
;
187 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
192 vf
= &bp
->pf
.vf
[vf_id
];
194 if (is_valid_ether_addr(vf
->mac_addr
))
195 memcpy(&ivi
->mac
, vf
->mac_addr
, ETH_ALEN
);
197 memcpy(&ivi
->mac
, vf
->vf_mac_addr
, ETH_ALEN
);
198 ivi
->max_tx_rate
= vf
->max_tx_rate
;
199 ivi
->min_tx_rate
= vf
->min_tx_rate
;
200 ivi
->vlan
= vf
->vlan
& VLAN_VID_MASK
;
201 ivi
->qos
= vf
->vlan
>> VLAN_PRIO_SHIFT
;
202 ivi
->spoofchk
= !!(vf
->flags
& BNXT_VF_SPOOFCHK
);
203 ivi
->trusted
= bnxt_is_trusted_vf(bp
, vf
);
204 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
))
205 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
206 else if (vf
->flags
& BNXT_VF_LINK_UP
)
207 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
209 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
214 int bnxt_set_vf_mac(struct net_device
*dev
, int vf_id
, u8
*mac
)
216 struct bnxt
*bp
= netdev_priv(dev
);
217 struct hwrm_func_cfg_input
*req
;
218 struct bnxt_vf_info
*vf
;
221 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
224 /* reject bc or mc mac addr, zero mac addr means allow
225 * VF to use its own mac addr
227 if (is_multicast_ether_addr(mac
)) {
228 netdev_err(dev
, "Invalid VF ethernet address\n");
231 vf
= &bp
->pf
.vf
[vf_id
];
233 rc
= bnxt_hwrm_func_cfg_short_req_init(bp
, &req
);
237 memcpy(vf
->mac_addr
, mac
, ETH_ALEN
);
239 req
->fid
= cpu_to_le16(vf
->fw_fid
);
240 req
->enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
241 memcpy(req
->dflt_mac_addr
, mac
, ETH_ALEN
);
242 return hwrm_req_send(bp
, req
);
245 int bnxt_set_vf_vlan(struct net_device
*dev
, int vf_id
, u16 vlan_id
, u8 qos
,
248 struct bnxt
*bp
= netdev_priv(dev
);
249 struct hwrm_func_cfg_input
*req
;
250 struct bnxt_vf_info
*vf
;
254 if (bp
->hwrm_spec_code
< 0x10201)
257 if (vlan_proto
!= htons(ETH_P_8021Q
) &&
258 (vlan_proto
!= htons(ETH_P_8021AD
) ||
259 !(bp
->fw_cap
& BNXT_FW_CAP_DFLT_VLAN_TPID_PCP
)))
260 return -EPROTONOSUPPORT
;
262 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
266 if (vlan_id
>= VLAN_N_VID
|| qos
>= IEEE_8021Q_MAX_PRIORITIES
||
270 vf
= &bp
->pf
.vf
[vf_id
];
271 vlan_tag
= vlan_id
| (u16
)qos
<< VLAN_PRIO_SHIFT
;
272 if (vlan_tag
== vf
->vlan
)
275 rc
= bnxt_hwrm_func_cfg_short_req_init(bp
, &req
);
277 req
->fid
= cpu_to_le16(vf
->fw_fid
);
278 req
->dflt_vlan
= cpu_to_le16(vlan_tag
);
279 req
->enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
280 if (bp
->fw_cap
& BNXT_FW_CAP_DFLT_VLAN_TPID_PCP
) {
281 req
->enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID
);
282 req
->tpid
= vlan_proto
;
284 rc
= hwrm_req_send(bp
, req
);
291 int bnxt_set_vf_bw(struct net_device
*dev
, int vf_id
, int min_tx_rate
,
294 struct bnxt
*bp
= netdev_priv(dev
);
295 struct hwrm_func_cfg_input
*req
;
296 struct bnxt_vf_info
*vf
;
300 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
304 vf
= &bp
->pf
.vf
[vf_id
];
305 pf_link_speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
306 if (max_tx_rate
> pf_link_speed
) {
307 netdev_info(bp
->dev
, "max tx rate %d exceed PF link speed for VF %d\n",
312 if (min_tx_rate
> pf_link_speed
) {
313 netdev_info(bp
->dev
, "min tx rate %d is invalid for VF %d\n",
317 if (min_tx_rate
== vf
->min_tx_rate
&& max_tx_rate
== vf
->max_tx_rate
)
319 rc
= bnxt_hwrm_func_cfg_short_req_init(bp
, &req
);
321 req
->fid
= cpu_to_le16(vf
->fw_fid
);
322 req
->enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
|
323 FUNC_CFG_REQ_ENABLES_MIN_BW
);
324 req
->max_bw
= cpu_to_le32(max_tx_rate
);
325 req
->min_bw
= cpu_to_le32(min_tx_rate
);
326 rc
= hwrm_req_send(bp
, req
);
328 vf
->min_tx_rate
= min_tx_rate
;
329 vf
->max_tx_rate
= max_tx_rate
;
335 int bnxt_set_vf_link_state(struct net_device
*dev
, int vf_id
, int link
)
337 struct bnxt
*bp
= netdev_priv(dev
);
338 struct bnxt_vf_info
*vf
;
341 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
345 vf
= &bp
->pf
.vf
[vf_id
];
347 vf
->flags
&= ~(BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
);
349 case IFLA_VF_LINK_STATE_AUTO
:
350 vf
->flags
|= BNXT_VF_LINK_UP
;
352 case IFLA_VF_LINK_STATE_DISABLE
:
353 vf
->flags
|= BNXT_VF_LINK_FORCED
;
355 case IFLA_VF_LINK_STATE_ENABLE
:
356 vf
->flags
|= BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
;
359 netdev_err(bp
->dev
, "Invalid link option\n");
363 if (vf
->flags
& (BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
))
364 rc
= bnxt_hwrm_fwd_async_event_cmpl(bp
, vf
,
365 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
);
369 static int bnxt_set_vf_attr(struct bnxt
*bp
, int num_vfs
)
372 struct bnxt_vf_info
*vf
;
374 for (i
= 0; i
< num_vfs
; i
++) {
376 memset(vf
, 0, sizeof(*vf
));
381 static int bnxt_hwrm_func_vf_resource_free(struct bnxt
*bp
, int num_vfs
)
383 struct hwrm_func_vf_resc_free_input
*req
;
384 struct bnxt_pf_info
*pf
= &bp
->pf
;
387 rc
= hwrm_req_init(bp
, req
, HWRM_FUNC_VF_RESC_FREE
);
391 hwrm_req_hold(bp
, req
);
392 for (i
= pf
->first_vf_id
; i
< pf
->first_vf_id
+ num_vfs
; i
++) {
393 req
->vf_id
= cpu_to_le16(i
);
394 rc
= hwrm_req_send(bp
, req
);
398 hwrm_req_drop(bp
, req
);
402 static void bnxt_free_vf_resources(struct bnxt
*bp
)
404 struct pci_dev
*pdev
= bp
->pdev
;
407 kfree(bp
->pf
.vf_event_bmap
);
408 bp
->pf
.vf_event_bmap
= NULL
;
410 for (i
= 0; i
< 4; i
++) {
411 if (bp
->pf
.hwrm_cmd_req_addr
[i
]) {
412 dma_free_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
413 bp
->pf
.hwrm_cmd_req_addr
[i
],
414 bp
->pf
.hwrm_cmd_req_dma_addr
[i
]);
415 bp
->pf
.hwrm_cmd_req_addr
[i
] = NULL
;
419 bp
->pf
.active_vfs
= 0;
424 static int bnxt_alloc_vf_resources(struct bnxt
*bp
, int num_vfs
)
426 struct pci_dev
*pdev
= bp
->pdev
;
427 u32 nr_pages
, size
, i
, j
, k
= 0;
429 bp
->pf
.vf
= kcalloc(num_vfs
, sizeof(struct bnxt_vf_info
), GFP_KERNEL
);
433 bnxt_set_vf_attr(bp
, num_vfs
);
435 size
= num_vfs
* BNXT_HWRM_REQ_MAX_SIZE
;
436 nr_pages
= size
/ BNXT_PAGE_SIZE
;
437 if (size
& (BNXT_PAGE_SIZE
- 1))
440 for (i
= 0; i
< nr_pages
; i
++) {
441 bp
->pf
.hwrm_cmd_req_addr
[i
] =
442 dma_alloc_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
443 &bp
->pf
.hwrm_cmd_req_dma_addr
[i
],
446 if (!bp
->pf
.hwrm_cmd_req_addr
[i
])
449 for (j
= 0; j
< BNXT_HWRM_REQS_PER_PAGE
&& k
< num_vfs
; j
++) {
450 struct bnxt_vf_info
*vf
= &bp
->pf
.vf
[k
];
452 vf
->hwrm_cmd_req_addr
= bp
->pf
.hwrm_cmd_req_addr
[i
] +
453 j
* BNXT_HWRM_REQ_MAX_SIZE
;
454 vf
->hwrm_cmd_req_dma_addr
=
455 bp
->pf
.hwrm_cmd_req_dma_addr
[i
] + j
*
456 BNXT_HWRM_REQ_MAX_SIZE
;
462 bp
->pf
.vf_event_bmap
= kzalloc(16, GFP_KERNEL
);
463 if (!bp
->pf
.vf_event_bmap
)
466 bp
->pf
.hwrm_cmd_req_pages
= nr_pages
;
470 static int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
472 struct hwrm_func_buf_rgtr_input
*req
;
475 rc
= hwrm_req_init(bp
, req
, HWRM_FUNC_BUF_RGTR
);
479 req
->req_buf_num_pages
= cpu_to_le16(bp
->pf
.hwrm_cmd_req_pages
);
480 req
->req_buf_page_size
= cpu_to_le16(BNXT_PAGE_SHIFT
);
481 req
->req_buf_len
= cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE
);
482 req
->req_buf_page_addr0
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[0]);
483 req
->req_buf_page_addr1
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[1]);
484 req
->req_buf_page_addr2
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[2]);
485 req
->req_buf_page_addr3
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[3]);
487 return hwrm_req_send(bp
, req
);
490 static int __bnxt_set_vf_params(struct bnxt
*bp
, int vf_id
)
492 struct hwrm_func_cfg_input
*req
;
493 struct bnxt_vf_info
*vf
;
496 rc
= bnxt_hwrm_func_cfg_short_req_init(bp
, &req
);
500 vf
= &bp
->pf
.vf
[vf_id
];
501 req
->fid
= cpu_to_le16(vf
->fw_fid
);
503 if (is_valid_ether_addr(vf
->mac_addr
)) {
504 req
->enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
505 memcpy(req
->dflt_mac_addr
, vf
->mac_addr
, ETH_ALEN
);
508 req
->enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
509 req
->dflt_vlan
= cpu_to_le16(vf
->vlan
);
511 if (vf
->max_tx_rate
) {
512 req
->enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
|
513 FUNC_CFG_REQ_ENABLES_MIN_BW
);
514 req
->max_bw
= cpu_to_le32(vf
->max_tx_rate
);
515 req
->min_bw
= cpu_to_le32(vf
->min_tx_rate
);
517 if (vf
->flags
& BNXT_VF_TRUST
)
518 req
->flags
|= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
520 return hwrm_req_send(bp
, req
);
523 static void bnxt_hwrm_roce_sriov_cfg(struct bnxt
*bp
, int num_vfs
)
525 struct hwrm_func_qcaps_output
*resp
;
526 struct hwrm_func_cfg_input
*cfg_req
;
527 struct hwrm_func_qcaps_input
*req
;
530 rc
= hwrm_req_init(bp
, req
, HWRM_FUNC_QCAPS
);
534 req
->fid
= cpu_to_le16(0xffff);
535 resp
= hwrm_req_hold(bp
, req
);
536 rc
= hwrm_req_send(bp
, req
);
540 rc
= hwrm_req_init(bp
, cfg_req
, HWRM_FUNC_CFG
);
544 cfg_req
->fid
= cpu_to_le16(0xffff);
546 cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF
|
547 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF
|
548 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF
|
549 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF
|
550 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF
|
551 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF
);
552 cfg_req
->roce_max_av_per_vf
=
553 cpu_to_le32(le32_to_cpu(resp
->roce_vf_max_av
) / num_vfs
);
554 cfg_req
->roce_max_cq_per_vf
=
555 cpu_to_le32(le32_to_cpu(resp
->roce_vf_max_cq
) / num_vfs
);
556 cfg_req
->roce_max_mrw_per_vf
=
557 cpu_to_le32(le32_to_cpu(resp
->roce_vf_max_mrw
) / num_vfs
);
558 cfg_req
->roce_max_qp_per_vf
=
559 cpu_to_le32(le32_to_cpu(resp
->roce_vf_max_qp
) / num_vfs
);
560 cfg_req
->roce_max_srq_per_vf
=
561 cpu_to_le32(le32_to_cpu(resp
->roce_vf_max_srq
) / num_vfs
);
562 cfg_req
->roce_max_gid_per_vf
=
563 cpu_to_le32(le32_to_cpu(resp
->roce_vf_max_gid
) / num_vfs
);
565 rc
= hwrm_req_send(bp
, cfg_req
);
568 hwrm_req_drop(bp
, req
);
570 netdev_err(bp
->dev
, "RoCE sriov configuration failed\n");
573 /* Only called by PF to reserve resources for VFs, returns actual number of
574 * VFs configured, or < 0 on error.
576 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
578 struct hwrm_func_vf_resource_cfg_input
*req
;
579 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
580 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
;
581 u16 vf_stat_ctx
, vf_vnics
, vf_ring_grps
;
582 struct bnxt_pf_info
*pf
= &bp
->pf
;
583 int i
, rc
= 0, min
= 1;
587 rc
= hwrm_req_init(bp
, req
, HWRM_FUNC_VF_RESOURCE_CFG
);
591 if (bp
->flags
& BNXT_FLAG_CHIP_P5_PLUS
) {
592 vf_msix
= hw_resc
->max_nqs
- bnxt_nq_rings_in_use(bp
);
595 vf_ring_grps
= hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
;
597 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
);
598 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
);
599 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
600 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2;
602 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
;
603 vf_tx_rings
= hw_resc
->max_tx_rings
- bp
->tx_nr_rings
;
604 vf_vnics
= hw_resc
->max_vnics
- bp
->nr_vnics
;
605 vf_rss
= hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
;
607 req
->min_rsscos_ctx
= cpu_to_le16(BNXT_VF_MIN_RSS_CTX
);
608 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
610 req
->min_rsscos_ctx
= cpu_to_le16(min
);
612 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL
||
613 pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
614 req
->min_cmpl_rings
= cpu_to_le16(min
);
615 req
->min_tx_rings
= cpu_to_le16(min
);
616 req
->min_rx_rings
= cpu_to_le16(min
);
617 req
->min_l2_ctxs
= cpu_to_le16(min
);
618 req
->min_vnics
= cpu_to_le16(min
);
619 req
->min_stat_ctx
= cpu_to_le16(min
);
620 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5_PLUS
))
621 req
->min_hw_ring_grps
= cpu_to_le16(min
);
623 vf_cp_rings
/= num_vfs
;
624 vf_tx_rings
/= num_vfs
;
625 vf_rx_rings
/= num_vfs
;
626 if ((bp
->fw_cap
& BNXT_FW_CAP_PRE_RESV_VNICS
) &&
627 vf_vnics
>= pf
->max_vfs
) {
628 /* Take into account that FW has pre-reserved 1 VNIC for
631 vf_vnics
= (vf_vnics
- pf
->max_vfs
+ num_vfs
) / num_vfs
;
635 vf_stat_ctx
/= num_vfs
;
636 vf_ring_grps
/= num_vfs
;
639 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
640 req
->min_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
641 req
->min_tx_rings
= cpu_to_le16(vf_tx_rings
);
642 req
->min_rx_rings
= cpu_to_le16(vf_rx_rings
);
643 req
->min_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
644 req
->min_vnics
= cpu_to_le16(vf_vnics
);
645 req
->min_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
646 req
->min_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
647 req
->min_rsscos_ctx
= cpu_to_le16(vf_rss
);
649 req
->max_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
650 req
->max_tx_rings
= cpu_to_le16(vf_tx_rings
);
651 req
->max_rx_rings
= cpu_to_le16(vf_rx_rings
);
652 req
->max_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
653 req
->max_vnics
= cpu_to_le16(vf_vnics
);
654 req
->max_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
655 req
->max_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
656 req
->max_rsscos_ctx
= cpu_to_le16(vf_rss
);
657 if (bp
->flags
& BNXT_FLAG_CHIP_P5_PLUS
)
658 req
->max_msix
= cpu_to_le16(vf_msix
/ num_vfs
);
660 hwrm_req_hold(bp
, req
);
661 for (i
= 0; i
< num_vfs
; i
++) {
663 __bnxt_set_vf_params(bp
, i
);
665 req
->vf_id
= cpu_to_le16(pf
->first_vf_id
+ i
);
666 rc
= hwrm_req_send(bp
, req
);
669 pf
->active_vfs
= i
+ 1;
670 pf
->vf
[i
].fw_fid
= pf
->first_vf_id
+ i
;
673 if (pf
->active_vfs
) {
674 u16 n
= pf
->active_vfs
;
676 hw_resc
->max_tx_rings
-= le16_to_cpu(req
->min_tx_rings
) * n
;
677 hw_resc
->max_rx_rings
-= le16_to_cpu(req
->min_rx_rings
) * n
;
678 hw_resc
->max_hw_ring_grps
-=
679 le16_to_cpu(req
->min_hw_ring_grps
) * n
;
680 hw_resc
->max_cp_rings
-= le16_to_cpu(req
->min_cmpl_rings
) * n
;
681 hw_resc
->max_rsscos_ctxs
-=
682 le16_to_cpu(req
->min_rsscos_ctx
) * n
;
683 hw_resc
->max_stat_ctxs
-= le16_to_cpu(req
->min_stat_ctx
) * n
;
684 hw_resc
->max_vnics
-= le16_to_cpu(req
->min_vnics
) * n
;
685 if (bp
->flags
& BNXT_FLAG_CHIP_P5_PLUS
)
686 hw_resc
->max_nqs
-= vf_msix
;
690 hwrm_req_drop(bp
, req
);
694 /* Only called by PF to reserve resources for VFs, returns actual number of
695 * VFs configured, or < 0 on error.
697 static int bnxt_hwrm_func_cfg(struct bnxt
*bp
, int num_vfs
)
699 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
, vf_stat_ctx
, vf_vnics
;
700 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
701 struct bnxt_pf_info
*pf
= &bp
->pf
;
702 struct hwrm_func_cfg_input
*req
;
703 int total_vf_tx_rings
= 0;
708 rc
= bnxt_hwrm_func_cfg_short_req_init(bp
, &req
);
712 /* Remaining rings are distributed equally amongs VF's for now */
713 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
) / num_vfs
;
714 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
) / num_vfs
;
715 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
716 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2) /
719 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
) /
721 vf_ring_grps
= (hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
) / num_vfs
;
722 vf_tx_rings
= (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
) / num_vfs
;
723 vf_vnics
= (hw_resc
->max_vnics
- bp
->nr_vnics
) / num_vfs
;
724 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
726 req
->enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU
|
727 FUNC_CFG_REQ_ENABLES_MRU
|
728 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
|
729 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
|
730 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
731 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
|
732 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
|
733 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS
|
734 FUNC_CFG_REQ_ENABLES_NUM_VNICS
|
735 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
);
737 mtu
= bp
->dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
;
738 req
->mru
= cpu_to_le16(mtu
);
739 req
->admin_mtu
= cpu_to_le16(mtu
);
741 req
->num_rsscos_ctxs
= cpu_to_le16(1);
742 req
->num_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
743 req
->num_tx_rings
= cpu_to_le16(vf_tx_rings
);
744 req
->num_rx_rings
= cpu_to_le16(vf_rx_rings
);
745 req
->num_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
746 req
->num_l2_ctxs
= cpu_to_le16(4);
748 req
->num_vnics
= cpu_to_le16(vf_vnics
);
749 /* FIXME spec currently uses 1 bit for stats ctx */
750 req
->num_stat_ctxs
= cpu_to_le16(vf_stat_ctx
);
752 hwrm_req_hold(bp
, req
);
753 for (i
= 0; i
< num_vfs
; i
++) {
754 int vf_tx_rsvd
= vf_tx_rings
;
756 req
->fid
= cpu_to_le16(pf
->first_vf_id
+ i
);
757 rc
= hwrm_req_send(bp
, req
);
760 pf
->active_vfs
= i
+ 1;
761 pf
->vf
[i
].fw_fid
= le16_to_cpu(req
->fid
);
762 rc
= __bnxt_hwrm_get_tx_rings(bp
, pf
->vf
[i
].fw_fid
,
766 total_vf_tx_rings
+= vf_tx_rsvd
;
768 hwrm_req_drop(bp
, req
);
769 if (pf
->active_vfs
) {
770 hw_resc
->max_tx_rings
-= total_vf_tx_rings
;
771 hw_resc
->max_rx_rings
-= vf_rx_rings
* num_vfs
;
772 hw_resc
->max_hw_ring_grps
-= vf_ring_grps
* num_vfs
;
773 hw_resc
->max_cp_rings
-= vf_cp_rings
* num_vfs
;
774 hw_resc
->max_rsscos_ctxs
-= num_vfs
;
775 hw_resc
->max_stat_ctxs
-= vf_stat_ctx
* num_vfs
;
776 hw_resc
->max_vnics
-= vf_vnics
* num_vfs
;
782 static int bnxt_func_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
785 return bnxt_hwrm_func_vf_resc_cfg(bp
, num_vfs
, reset
);
787 return bnxt_hwrm_func_cfg(bp
, num_vfs
);
790 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
794 /* Register buffers for VFs */
795 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
799 /* Reserve resources for VFs */
800 rc
= bnxt_func_cfg(bp
, *num_vfs
, reset
);
801 if (rc
!= *num_vfs
) {
803 netdev_warn(bp
->dev
, "Unable to reserve resources for SRIOV.\n");
807 netdev_warn(bp
->dev
, "Only able to reserve resources for %d VFs.\n",
812 if (BNXT_RDMA_SRIOV_EN(bp
) && BNXT_ROCE_VF_RESC_CAP(bp
))
813 bnxt_hwrm_roce_sriov_cfg(bp
, *num_vfs
);
818 static int bnxt_sriov_enable(struct bnxt
*bp
, int *num_vfs
)
820 int rc
= 0, vfs_supported
;
821 int min_rx_rings
, min_tx_rings
, min_rss_ctxs
;
822 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
823 int tx_ok
= 0, rx_ok
= 0, rss_ok
= 0;
824 int avail_cp
, avail_stat
;
826 /* Check if we can enable requested num of vf's. At a mininum
827 * we require 1 RX 1 TX rings for each VF. In this minimum conf
828 * features like TPA will not be available.
830 vfs_supported
= *num_vfs
;
832 avail_cp
= bnxt_get_avail_cp_rings_for_en(bp
);
833 avail_stat
= bnxt_get_avail_stat_ctxs_for_en(bp
);
834 avail_cp
= min_t(int, avail_cp
, avail_stat
);
836 while (vfs_supported
) {
837 min_rx_rings
= vfs_supported
;
838 min_tx_rings
= vfs_supported
;
839 min_rss_ctxs
= vfs_supported
;
841 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
842 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2 >=
846 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
>=
850 if (hw_resc
->max_vnics
- bp
->nr_vnics
< min_rx_rings
||
851 avail_cp
< min_rx_rings
)
854 if (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
>= min_tx_rings
&&
855 avail_cp
>= min_tx_rings
)
858 if (hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
>=
862 if (tx_ok
&& rx_ok
&& rss_ok
)
868 if (!vfs_supported
) {
869 netdev_err(bp
->dev
, "Cannot enable VF's as all resources are used by PF\n");
873 if (vfs_supported
!= *num_vfs
) {
874 netdev_info(bp
->dev
, "Requested VFs %d, can enable %d\n",
875 *num_vfs
, vfs_supported
);
876 *num_vfs
= vfs_supported
;
879 rc
= bnxt_alloc_vf_resources(bp
, *num_vfs
);
883 rc
= bnxt_cfg_hw_sriov(bp
, num_vfs
, false);
887 rc
= pci_enable_sriov(bp
->pdev
, *num_vfs
);
891 if (bp
->eswitch_mode
!= DEVLINK_ESWITCH_MODE_SWITCHDEV
)
894 /* Create representors for VFs in switchdev mode */
896 rc
= bnxt_vf_reps_create(bp
);
899 netdev_info(bp
->dev
, "Cannot enable VFS as representors cannot be created\n");
907 pci_disable_sriov(bp
->pdev
);
910 /* Free the resources reserved for various VF's */
911 bnxt_hwrm_func_vf_resource_free(bp
, *num_vfs
);
913 /* Restore the max resources */
914 bnxt_hwrm_func_qcaps(bp
);
917 bnxt_free_vf_resources(bp
);
922 void bnxt_sriov_disable(struct bnxt
*bp
)
924 u16 num_vfs
= pci_num_vf(bp
->pdev
);
929 /* synchronize VF and VF-rep create and destroy */
931 bnxt_vf_reps_destroy(bp
);
933 if (pci_vfs_assigned(bp
->pdev
)) {
934 bnxt_hwrm_fwd_async_event_cmpl(
935 bp
, NULL
, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
);
936 netdev_warn(bp
->dev
, "Unable to free %d VFs because some are assigned to VMs.\n",
939 pci_disable_sriov(bp
->pdev
);
940 /* Free the HW resources reserved for various VF's */
941 bnxt_hwrm_func_vf_resource_free(bp
, num_vfs
);
945 bnxt_free_vf_resources(bp
);
947 /* Reclaim all resources for the PF. */
949 bnxt_restore_pf_fw_resources(bp
);
953 int bnxt_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
955 struct net_device
*dev
= pci_get_drvdata(pdev
);
956 struct bnxt
*bp
= netdev_priv(dev
);
959 if (!netif_running(dev
)) {
960 netdev_warn(dev
, "Reject SRIOV config request since if is down!\n");
964 if (test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
)) {
965 netdev_warn(dev
, "Reject SRIOV config request when FW reset is in progress\n");
969 bp
->sriov_cfg
= true;
972 if (pci_vfs_assigned(bp
->pdev
)) {
973 netdev_warn(dev
, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
978 /* Check if enabled VFs is same as requested */
979 if (num_vfs
&& num_vfs
== bp
->pf
.active_vfs
)
982 /* if there are previous existing VFs, clean them up */
983 bnxt_sriov_disable(bp
);
987 bnxt_sriov_enable(bp
, &num_vfs
);
990 bp
->sriov_cfg
= false;
991 wake_up(&bp
->sriov_cfg_wait
);
996 static int bnxt_hwrm_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
997 void *encap_resp
, __le64 encap_resp_addr
,
998 __le16 encap_resp_cpr
, u32 msg_size
)
1000 struct hwrm_fwd_resp_input
*req
;
1003 if (BNXT_FWD_RESP_SIZE_ERR(msg_size
)) {
1004 netdev_warn_once(bp
->dev
, "HWRM fwd response too big (%d bytes)\n",
1009 rc
= hwrm_req_init(bp
, req
, HWRM_FWD_RESP
);
1011 /* Set the new target id */
1012 req
->target_id
= cpu_to_le16(vf
->fw_fid
);
1013 req
->encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
1014 req
->encap_resp_len
= cpu_to_le16(msg_size
);
1015 req
->encap_resp_addr
= encap_resp_addr
;
1016 req
->encap_resp_cmpl_ring
= encap_resp_cpr
;
1017 memcpy(req
->encap_resp
, encap_resp
, msg_size
);
1019 rc
= hwrm_req_send(bp
, req
);
1022 netdev_err(bp
->dev
, "hwrm_fwd_resp failed. rc:%d\n", rc
);
1026 static int bnxt_hwrm_fwd_err_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
1029 struct hwrm_reject_fwd_resp_input
*req
;
1032 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size
))
1035 rc
= hwrm_req_init(bp
, req
, HWRM_REJECT_FWD_RESP
);
1037 /* Set the new target id */
1038 req
->target_id
= cpu_to_le16(vf
->fw_fid
);
1039 req
->encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
1040 memcpy(req
->encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
1042 rc
= hwrm_req_send(bp
, req
);
1045 netdev_err(bp
->dev
, "hwrm_fwd_err_resp failed. rc:%d\n", rc
);
1049 static int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
1052 struct hwrm_exec_fwd_resp_input
*req
;
1055 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size
))
1058 rc
= hwrm_req_init(bp
, req
, HWRM_EXEC_FWD_RESP
);
1060 /* Set the new target id */
1061 req
->target_id
= cpu_to_le16(vf
->fw_fid
);
1062 req
->encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
1063 memcpy(req
->encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
1065 rc
= hwrm_req_send(bp
, req
);
1068 netdev_err(bp
->dev
, "hwrm_exec_fw_resp failed. rc:%d\n", rc
);
1072 static int bnxt_vf_configure_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1074 u32 msg_size
= sizeof(struct hwrm_func_vf_cfg_input
);
1075 struct hwrm_func_vf_cfg_input
*req
=
1076 (struct hwrm_func_vf_cfg_input
*)vf
->hwrm_cmd_req_addr
;
1078 /* Allow VF to set a valid MAC address, if trust is set to on or
1079 * if the PF assigned MAC address is zero
1081 if (req
->enables
& cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
)) {
1082 bool trust
= bnxt_is_trusted_vf(bp
, vf
);
1084 if (is_valid_ether_addr(req
->dflt_mac_addr
) &&
1085 (trust
|| !is_valid_ether_addr(vf
->mac_addr
) ||
1086 ether_addr_equal(req
->dflt_mac_addr
, vf
->mac_addr
))) {
1087 ether_addr_copy(vf
->vf_mac_addr
, req
->dflt_mac_addr
);
1088 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
1090 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1092 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
1095 static int bnxt_vf_validate_set_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1097 u32 msg_size
= sizeof(struct hwrm_cfa_l2_filter_alloc_input
);
1098 struct hwrm_cfa_l2_filter_alloc_input
*req
=
1099 (struct hwrm_cfa_l2_filter_alloc_input
*)vf
->hwrm_cmd_req_addr
;
1100 bool mac_ok
= false;
1102 if (!is_valid_ether_addr((const u8
*)req
->l2_addr
))
1103 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1105 /* Allow VF to set a valid MAC address, if trust is set to on.
1106 * Or VF MAC address must first match MAC address in PF's context.
1107 * Otherwise, it must match the VF MAC address if firmware spec >=
1110 if (bnxt_is_trusted_vf(bp
, vf
)) {
1112 } else if (is_valid_ether_addr(vf
->mac_addr
)) {
1113 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->mac_addr
))
1115 } else if (is_valid_ether_addr(vf
->vf_mac_addr
)) {
1116 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->vf_mac_addr
))
1119 /* There are two cases:
1120 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1121 * to the PF and so it doesn't have to match
1122 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1123 * valid MAC address and firmware spec >= 0x10202
1128 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
1129 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1132 static int bnxt_vf_set_link(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1136 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
)) {
1138 rc
= bnxt_hwrm_exec_fwd_resp(
1139 bp
, vf
, sizeof(struct hwrm_port_phy_qcfg_input
));
1141 struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp
= {};
1142 struct hwrm_port_phy_qcfg_input
*phy_qcfg_req
;
1145 (struct hwrm_port_phy_qcfg_input
*)vf
->hwrm_cmd_req_addr
;
1146 mutex_lock(&bp
->link_lock
);
1147 memcpy(&phy_qcfg_resp
, &bp
->link_info
.phy_qcfg_resp
,
1148 sizeof(phy_qcfg_resp
));
1149 mutex_unlock(&bp
->link_lock
);
1150 phy_qcfg_resp
.resp_len
= cpu_to_le16(sizeof(phy_qcfg_resp
));
1151 phy_qcfg_resp
.seq_id
= phy_qcfg_req
->seq_id
;
1152 /* New SPEEDS2 fields are beyond the legacy structure, so
1153 * clear the SPEEDS2_SUPPORTED flag.
1155 phy_qcfg_resp
.option_flags
&=
1156 ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED
;
1157 phy_qcfg_resp
.valid
= 1;
1159 if (vf
->flags
& BNXT_VF_LINK_UP
) {
1160 /* if physical link is down, force link up on VF */
1161 if (phy_qcfg_resp
.link
!=
1162 PORT_PHY_QCFG_RESP_LINK_LINK
) {
1163 phy_qcfg_resp
.link
=
1164 PORT_PHY_QCFG_RESP_LINK_LINK
;
1165 phy_qcfg_resp
.link_speed
= cpu_to_le16(
1166 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
);
1167 phy_qcfg_resp
.duplex_cfg
=
1168 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
;
1169 phy_qcfg_resp
.duplex_state
=
1170 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
;
1171 phy_qcfg_resp
.pause
=
1172 (PORT_PHY_QCFG_RESP_PAUSE_TX
|
1173 PORT_PHY_QCFG_RESP_PAUSE_RX
);
1176 /* force link down */
1177 phy_qcfg_resp
.link
= PORT_PHY_QCFG_RESP_LINK_NO_LINK
;
1178 phy_qcfg_resp
.link_speed
= 0;
1179 phy_qcfg_resp
.duplex_state
=
1180 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
;
1181 phy_qcfg_resp
.pause
= 0;
1183 rc
= bnxt_hwrm_fwd_resp(bp
, vf
, &phy_qcfg_resp
,
1184 phy_qcfg_req
->resp_addr
,
1185 phy_qcfg_req
->cmpl_ring
,
1186 sizeof(phy_qcfg_resp
));
1191 static int bnxt_vf_req_validate_snd(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1194 struct input
*encap_req
= vf
->hwrm_cmd_req_addr
;
1195 u32 req_type
= le16_to_cpu(encap_req
->req_type
);
1198 case HWRM_FUNC_VF_CFG
:
1199 rc
= bnxt_vf_configure_mac(bp
, vf
);
1201 case HWRM_CFA_L2_FILTER_ALLOC
:
1202 rc
= bnxt_vf_validate_set_mac(bp
, vf
);
1205 /* TODO Validate if VF is allowed to change mac address,
1206 * mtu, num of rings etc
1208 rc
= bnxt_hwrm_exec_fwd_resp(
1209 bp
, vf
, sizeof(struct hwrm_func_cfg_input
));
1211 case HWRM_PORT_PHY_QCFG
:
1212 rc
= bnxt_vf_set_link(bp
, vf
);
1220 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1222 u32 i
= 0, active_vfs
= bp
->pf
.active_vfs
, vf_id
;
1224 /* Scan through VF's and process commands */
1226 vf_id
= find_next_bit(bp
->pf
.vf_event_bmap
, active_vfs
, i
);
1227 if (vf_id
>= active_vfs
)
1230 clear_bit(vf_id
, bp
->pf
.vf_event_bmap
);
1231 bnxt_vf_req_validate_snd(bp
, &bp
->pf
.vf
[vf_id
]);
1236 int bnxt_approve_mac(struct bnxt
*bp
, const u8
*mac
, bool strict
)
1238 struct hwrm_func_vf_cfg_input
*req
;
1244 if (bp
->hwrm_spec_code
< 0x10202) {
1245 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1246 rc
= -EADDRNOTAVAIL
;
1250 rc
= hwrm_req_init(bp
, req
, HWRM_FUNC_VF_CFG
);
1254 req
->enables
= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
1255 memcpy(req
->dflt_mac_addr
, mac
, ETH_ALEN
);
1257 hwrm_req_flags(bp
, req
, BNXT_HWRM_CTX_SILENT
);
1258 rc
= hwrm_req_send(bp
, req
);
1261 rc
= -EADDRNOTAVAIL
;
1262 netdev_warn(bp
->dev
, "VF MAC address %pM not approved by the PF\n",
1269 void bnxt_update_vf_mac(struct bnxt
*bp
)
1271 struct hwrm_func_qcaps_output
*resp
;
1272 struct hwrm_func_qcaps_input
*req
;
1273 bool inform_pf
= false;
1275 if (hwrm_req_init(bp
, req
, HWRM_FUNC_QCAPS
))
1278 req
->fid
= cpu_to_le16(0xffff);
1280 resp
= hwrm_req_hold(bp
, req
);
1281 if (hwrm_req_send(bp
, req
))
1282 goto update_vf_mac_exit
;
1284 /* Store MAC address from the firmware. There are 2 cases:
1285 * 1. MAC address is valid. It is assigned from the PF and we
1286 * need to override the current VF MAC address with it.
1287 * 2. MAC address is zero. The VF will use a random MAC address by
1288 * default but the stored zero MAC will allow the VF user to change
1289 * the random MAC address using ndo_set_mac_address() if he wants.
1291 if (!ether_addr_equal(resp
->mac_address
, bp
->vf
.mac_addr
)) {
1292 memcpy(bp
->vf
.mac_addr
, resp
->mac_address
, ETH_ALEN
);
1293 /* This means we are now using our own MAC address, let
1294 * the PF know about this MAC address.
1296 if (!is_valid_ether_addr(bp
->vf
.mac_addr
))
1300 /* overwrite netdev dev_addr with admin VF MAC */
1301 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1302 eth_hw_addr_set(bp
->dev
, bp
->vf
.mac_addr
);
1304 hwrm_req_drop(bp
, req
);
1306 bnxt_approve_mac(bp
, bp
->dev
->dev_addr
, false);
1311 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
1318 void bnxt_sriov_disable(struct bnxt
*bp
)
1322 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1324 netdev_err(bp
->dev
, "Invalid VF message received when SRIOV is not enable\n");
1327 void bnxt_update_vf_mac(struct bnxt
*bp
)
1331 int bnxt_approve_mac(struct bnxt
*bp
, const u8
*mac
, bool strict
)