1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/ethtool.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/etherdevice.h>
21 #include "bnxt_sriov.h"
23 #include "bnxt_ethtool.h"
25 #ifdef CONFIG_BNXT_SRIOV
26 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt
*bp
,
27 struct bnxt_vf_info
*vf
, u16 event_id
)
29 struct hwrm_fwd_async_event_cmpl_input req
= {0};
30 struct hwrm_async_event_cmpl
*async_cmpl
;
33 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_ASYNC_EVENT_CMPL
, -1, -1);
35 req
.encap_async_event_target_id
= cpu_to_le16(vf
->fw_fid
);
37 /* broadcast this async event to all VFs */
38 req
.encap_async_event_target_id
= cpu_to_le16(0xffff);
39 async_cmpl
= (struct hwrm_async_event_cmpl
*)req
.encap_async_event_cmpl
;
40 async_cmpl
->type
= cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
);
41 async_cmpl
->event_id
= cpu_to_le16(event_id
);
43 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
45 netdev_err(bp
->dev
, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
50 static int bnxt_vf_ndo_prep(struct bnxt
*bp
, int vf_id
)
52 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
53 netdev_err(bp
->dev
, "vf ndo called though PF is down\n");
56 if (!bp
->pf
.active_vfs
) {
57 netdev_err(bp
->dev
, "vf ndo called though sriov is disabled\n");
60 if (vf_id
>= bp
->pf
.active_vfs
) {
61 netdev_err(bp
->dev
, "Invalid VF id %d\n", vf_id
);
67 int bnxt_set_vf_spoofchk(struct net_device
*dev
, int vf_id
, bool setting
)
69 struct hwrm_func_cfg_input req
= {0};
70 struct bnxt
*bp
= netdev_priv(dev
);
71 struct bnxt_vf_info
*vf
;
72 bool old_setting
= false;
76 if (bp
->hwrm_spec_code
< 0x10701)
79 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
83 vf
= &bp
->pf
.vf
[vf_id
];
84 if (vf
->flags
& BNXT_VF_SPOOFCHK
)
86 if (old_setting
== setting
)
90 func_flags
= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE
;
92 func_flags
= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE
;
93 /*TODO: if the driver supports VLAN filter on guest VLAN,
94 * the spoof check should also include vlan anti-spoofing
96 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
97 req
.fid
= cpu_to_le16(vf
->fw_fid
);
98 req
.flags
= cpu_to_le32(func_flags
);
99 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
102 vf
->flags
|= BNXT_VF_SPOOFCHK
;
104 vf
->flags
&= ~BNXT_VF_SPOOFCHK
;
109 static int bnxt_hwrm_func_qcfg_flags(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
111 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
112 struct hwrm_func_qcfg_input req
= {0};
115 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
116 req
.fid
= cpu_to_le16(vf
->fw_fid
);
117 mutex_lock(&bp
->hwrm_cmd_lock
);
118 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
120 mutex_unlock(&bp
->hwrm_cmd_lock
);
123 vf
->func_qcfg_flags
= le16_to_cpu(resp
->flags
);
124 mutex_unlock(&bp
->hwrm_cmd_lock
);
128 static bool bnxt_is_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
130 if (!(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
131 return !!(vf
->flags
& BNXT_VF_TRUST
);
133 bnxt_hwrm_func_qcfg_flags(bp
, vf
);
134 return !!(vf
->func_qcfg_flags
& FUNC_QCFG_RESP_FLAGS_TRUSTED_VF
);
137 static int bnxt_hwrm_set_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
139 struct hwrm_func_cfg_input req
= {0};
141 if (!(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
144 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
145 req
.fid
= cpu_to_le16(vf
->fw_fid
);
146 if (vf
->flags
& BNXT_VF_TRUST
)
147 req
.flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
149 req
.flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE
);
150 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
153 int bnxt_set_vf_trust(struct net_device
*dev
, int vf_id
, bool trusted
)
155 struct bnxt
*bp
= netdev_priv(dev
);
156 struct bnxt_vf_info
*vf
;
158 if (bnxt_vf_ndo_prep(bp
, vf_id
))
161 vf
= &bp
->pf
.vf
[vf_id
];
163 vf
->flags
|= BNXT_VF_TRUST
;
165 vf
->flags
&= ~BNXT_VF_TRUST
;
167 bnxt_hwrm_set_trusted_vf(bp
, vf
);
171 int bnxt_get_vf_config(struct net_device
*dev
, int vf_id
,
172 struct ifla_vf_info
*ivi
)
174 struct bnxt
*bp
= netdev_priv(dev
);
175 struct bnxt_vf_info
*vf
;
178 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
183 vf
= &bp
->pf
.vf
[vf_id
];
185 if (is_valid_ether_addr(vf
->mac_addr
))
186 memcpy(&ivi
->mac
, vf
->mac_addr
, ETH_ALEN
);
188 memcpy(&ivi
->mac
, vf
->vf_mac_addr
, ETH_ALEN
);
189 ivi
->max_tx_rate
= vf
->max_tx_rate
;
190 ivi
->min_tx_rate
= vf
->min_tx_rate
;
191 ivi
->vlan
= vf
->vlan
;
192 if (vf
->flags
& BNXT_VF_QOS
)
193 ivi
->qos
= vf
->vlan
>> VLAN_PRIO_SHIFT
;
196 ivi
->spoofchk
= !!(vf
->flags
& BNXT_VF_SPOOFCHK
);
197 ivi
->trusted
= bnxt_is_trusted_vf(bp
, vf
);
198 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
))
199 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
200 else if (vf
->flags
& BNXT_VF_LINK_UP
)
201 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
203 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
208 int bnxt_set_vf_mac(struct net_device
*dev
, int vf_id
, u8
*mac
)
210 struct hwrm_func_cfg_input req
= {0};
211 struct bnxt
*bp
= netdev_priv(dev
);
212 struct bnxt_vf_info
*vf
;
215 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
218 /* reject bc or mc mac addr, zero mac addr means allow
219 * VF to use its own mac addr
221 if (is_multicast_ether_addr(mac
)) {
222 netdev_err(dev
, "Invalid VF ethernet address\n");
225 vf
= &bp
->pf
.vf
[vf_id
];
227 memcpy(vf
->mac_addr
, mac
, ETH_ALEN
);
228 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
229 req
.fid
= cpu_to_le16(vf
->fw_fid
);
230 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
231 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
232 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
235 int bnxt_set_vf_vlan(struct net_device
*dev
, int vf_id
, u16 vlan_id
, u8 qos
,
238 struct hwrm_func_cfg_input req
= {0};
239 struct bnxt
*bp
= netdev_priv(dev
);
240 struct bnxt_vf_info
*vf
;
244 if (bp
->hwrm_spec_code
< 0x10201)
247 if (vlan_proto
!= htons(ETH_P_8021Q
))
248 return -EPROTONOSUPPORT
;
250 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
254 /* TODO: needed to implement proper handling of user priority,
255 * currently fail the command if there is valid priority
257 if (vlan_id
> 4095 || qos
)
260 vf
= &bp
->pf
.vf
[vf_id
];
262 if (vlan_tag
== vf
->vlan
)
265 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
266 req
.fid
= cpu_to_le16(vf
->fw_fid
);
267 req
.dflt_vlan
= cpu_to_le16(vlan_tag
);
268 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
269 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
275 int bnxt_set_vf_bw(struct net_device
*dev
, int vf_id
, int min_tx_rate
,
278 struct hwrm_func_cfg_input req
= {0};
279 struct bnxt
*bp
= netdev_priv(dev
);
280 struct bnxt_vf_info
*vf
;
284 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
288 vf
= &bp
->pf
.vf
[vf_id
];
289 pf_link_speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
290 if (max_tx_rate
> pf_link_speed
) {
291 netdev_info(bp
->dev
, "max tx rate %d exceed PF link speed for VF %d\n",
296 if (min_tx_rate
> pf_link_speed
|| min_tx_rate
> max_tx_rate
) {
297 netdev_info(bp
->dev
, "min tx rate %d is invalid for VF %d\n",
301 if (min_tx_rate
== vf
->min_tx_rate
&& max_tx_rate
== vf
->max_tx_rate
)
303 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
304 req
.fid
= cpu_to_le16(vf
->fw_fid
);
305 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
306 req
.max_bw
= cpu_to_le32(max_tx_rate
);
307 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
308 req
.min_bw
= cpu_to_le32(min_tx_rate
);
309 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
311 vf
->min_tx_rate
= min_tx_rate
;
312 vf
->max_tx_rate
= max_tx_rate
;
317 int bnxt_set_vf_link_state(struct net_device
*dev
, int vf_id
, int link
)
319 struct bnxt
*bp
= netdev_priv(dev
);
320 struct bnxt_vf_info
*vf
;
323 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
327 vf
= &bp
->pf
.vf
[vf_id
];
329 vf
->flags
&= ~(BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
);
331 case IFLA_VF_LINK_STATE_AUTO
:
332 vf
->flags
|= BNXT_VF_LINK_UP
;
334 case IFLA_VF_LINK_STATE_DISABLE
:
335 vf
->flags
|= BNXT_VF_LINK_FORCED
;
337 case IFLA_VF_LINK_STATE_ENABLE
:
338 vf
->flags
|= BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
;
341 netdev_err(bp
->dev
, "Invalid link option\n");
345 if (vf
->flags
& (BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
))
346 rc
= bnxt_hwrm_fwd_async_event_cmpl(bp
, vf
,
347 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
);
351 static int bnxt_set_vf_attr(struct bnxt
*bp
, int num_vfs
)
354 struct bnxt_vf_info
*vf
;
356 for (i
= 0; i
< num_vfs
; i
++) {
358 memset(vf
, 0, sizeof(*vf
));
363 static int bnxt_hwrm_func_vf_resource_free(struct bnxt
*bp
, int num_vfs
)
366 struct bnxt_pf_info
*pf
= &bp
->pf
;
367 struct hwrm_func_vf_resc_free_input req
= {0};
369 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESC_FREE
, -1, -1);
371 mutex_lock(&bp
->hwrm_cmd_lock
);
372 for (i
= pf
->first_vf_id
; i
< pf
->first_vf_id
+ num_vfs
; i
++) {
373 req
.vf_id
= cpu_to_le16(i
);
374 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
379 mutex_unlock(&bp
->hwrm_cmd_lock
);
383 static void bnxt_free_vf_resources(struct bnxt
*bp
)
385 struct pci_dev
*pdev
= bp
->pdev
;
388 kfree(bp
->pf
.vf_event_bmap
);
389 bp
->pf
.vf_event_bmap
= NULL
;
391 for (i
= 0; i
< 4; i
++) {
392 if (bp
->pf
.hwrm_cmd_req_addr
[i
]) {
393 dma_free_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
394 bp
->pf
.hwrm_cmd_req_addr
[i
],
395 bp
->pf
.hwrm_cmd_req_dma_addr
[i
]);
396 bp
->pf
.hwrm_cmd_req_addr
[i
] = NULL
;
400 bp
->pf
.active_vfs
= 0;
405 static int bnxt_alloc_vf_resources(struct bnxt
*bp
, int num_vfs
)
407 struct pci_dev
*pdev
= bp
->pdev
;
408 u32 nr_pages
, size
, i
, j
, k
= 0;
410 bp
->pf
.vf
= kcalloc(num_vfs
, sizeof(struct bnxt_vf_info
), GFP_KERNEL
);
414 bnxt_set_vf_attr(bp
, num_vfs
);
416 size
= num_vfs
* BNXT_HWRM_REQ_MAX_SIZE
;
417 nr_pages
= size
/ BNXT_PAGE_SIZE
;
418 if (size
& (BNXT_PAGE_SIZE
- 1))
421 for (i
= 0; i
< nr_pages
; i
++) {
422 bp
->pf
.hwrm_cmd_req_addr
[i
] =
423 dma_alloc_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
424 &bp
->pf
.hwrm_cmd_req_dma_addr
[i
],
427 if (!bp
->pf
.hwrm_cmd_req_addr
[i
])
430 for (j
= 0; j
< BNXT_HWRM_REQS_PER_PAGE
&& k
< num_vfs
; j
++) {
431 struct bnxt_vf_info
*vf
= &bp
->pf
.vf
[k
];
433 vf
->hwrm_cmd_req_addr
= bp
->pf
.hwrm_cmd_req_addr
[i
] +
434 j
* BNXT_HWRM_REQ_MAX_SIZE
;
435 vf
->hwrm_cmd_req_dma_addr
=
436 bp
->pf
.hwrm_cmd_req_dma_addr
[i
] + j
*
437 BNXT_HWRM_REQ_MAX_SIZE
;
443 bp
->pf
.vf_event_bmap
= kzalloc(16, GFP_KERNEL
);
444 if (!bp
->pf
.vf_event_bmap
)
447 bp
->pf
.hwrm_cmd_req_pages
= nr_pages
;
451 static int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
453 struct hwrm_func_buf_rgtr_input req
= {0};
455 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_BUF_RGTR
, -1, -1);
457 req
.req_buf_num_pages
= cpu_to_le16(bp
->pf
.hwrm_cmd_req_pages
);
458 req
.req_buf_page_size
= cpu_to_le16(BNXT_PAGE_SHIFT
);
459 req
.req_buf_len
= cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE
);
460 req
.req_buf_page_addr0
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[0]);
461 req
.req_buf_page_addr1
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[1]);
462 req
.req_buf_page_addr2
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[2]);
463 req
.req_buf_page_addr3
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[3]);
465 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
468 /* Caller holds bp->hwrm_cmd_lock mutex lock */
469 static void __bnxt_set_vf_params(struct bnxt
*bp
, int vf_id
)
471 struct hwrm_func_cfg_input req
= {0};
472 struct bnxt_vf_info
*vf
;
474 vf
= &bp
->pf
.vf
[vf_id
];
475 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
476 req
.fid
= cpu_to_le16(vf
->fw_fid
);
478 if (is_valid_ether_addr(vf
->mac_addr
)) {
479 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
480 memcpy(req
.dflt_mac_addr
, vf
->mac_addr
, ETH_ALEN
);
483 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
484 req
.dflt_vlan
= cpu_to_le16(vf
->vlan
);
486 if (vf
->max_tx_rate
) {
487 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
488 req
.max_bw
= cpu_to_le32(vf
->max_tx_rate
);
489 #ifdef HAVE_IFLA_TX_RATE
490 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
491 req
.min_bw
= cpu_to_le32(vf
->min_tx_rate
);
494 if (vf
->flags
& BNXT_VF_TRUST
)
495 req
.flags
|= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
497 _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
500 /* Only called by PF to reserve resources for VFs, returns actual number of
501 * VFs configured, or < 0 on error.
503 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
505 struct hwrm_func_vf_resource_cfg_input req
= {0};
506 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
507 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
;
508 u16 vf_stat_ctx
, vf_vnics
, vf_ring_grps
;
509 struct bnxt_pf_info
*pf
= &bp
->pf
;
510 int i
, rc
= 0, min
= 1;
514 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESOURCE_CFG
, -1, -1);
516 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
517 vf_msix
= hw_resc
->max_nqs
- bnxt_nq_rings_in_use(bp
);
520 vf_ring_grps
= hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
;
522 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
);
523 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
);
524 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
525 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2;
527 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
;
528 vf_tx_rings
= hw_resc
->max_tx_rings
- bp
->tx_nr_rings
;
529 vf_vnics
= hw_resc
->max_vnics
- bp
->nr_vnics
;
530 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
531 vf_rss
= hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
;
533 req
.min_rsscos_ctx
= cpu_to_le16(BNXT_VF_MIN_RSS_CTX
);
534 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
536 req
.min_rsscos_ctx
= cpu_to_le16(min
);
538 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL
||
539 pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
540 req
.min_cmpl_rings
= cpu_to_le16(min
);
541 req
.min_tx_rings
= cpu_to_le16(min
);
542 req
.min_rx_rings
= cpu_to_le16(min
);
543 req
.min_l2_ctxs
= cpu_to_le16(min
);
544 req
.min_vnics
= cpu_to_le16(min
);
545 req
.min_stat_ctx
= cpu_to_le16(min
);
546 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
547 req
.min_hw_ring_grps
= cpu_to_le16(min
);
549 vf_cp_rings
/= num_vfs
;
550 vf_tx_rings
/= num_vfs
;
551 vf_rx_rings
/= num_vfs
;
553 vf_stat_ctx
/= num_vfs
;
554 vf_ring_grps
/= num_vfs
;
557 req
.min_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
558 req
.min_tx_rings
= cpu_to_le16(vf_tx_rings
);
559 req
.min_rx_rings
= cpu_to_le16(vf_rx_rings
);
560 req
.min_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
561 req
.min_vnics
= cpu_to_le16(vf_vnics
);
562 req
.min_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
563 req
.min_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
564 req
.min_rsscos_ctx
= cpu_to_le16(vf_rss
);
566 req
.max_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
567 req
.max_tx_rings
= cpu_to_le16(vf_tx_rings
);
568 req
.max_rx_rings
= cpu_to_le16(vf_rx_rings
);
569 req
.max_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
570 req
.max_vnics
= cpu_to_le16(vf_vnics
);
571 req
.max_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
572 req
.max_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
573 req
.max_rsscos_ctx
= cpu_to_le16(vf_rss
);
574 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
575 req
.max_msix
= cpu_to_le16(vf_msix
/ num_vfs
);
577 mutex_lock(&bp
->hwrm_cmd_lock
);
578 for (i
= 0; i
< num_vfs
; i
++) {
580 __bnxt_set_vf_params(bp
, i
);
582 req
.vf_id
= cpu_to_le16(pf
->first_vf_id
+ i
);
583 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
587 pf
->active_vfs
= i
+ 1;
588 pf
->vf
[i
].fw_fid
= pf
->first_vf_id
+ i
;
590 mutex_unlock(&bp
->hwrm_cmd_lock
);
591 if (pf
->active_vfs
) {
592 u16 n
= pf
->active_vfs
;
594 hw_resc
->max_tx_rings
-= le16_to_cpu(req
.min_tx_rings
) * n
;
595 hw_resc
->max_rx_rings
-= le16_to_cpu(req
.min_rx_rings
) * n
;
596 hw_resc
->max_hw_ring_grps
-= le16_to_cpu(req
.min_hw_ring_grps
) *
598 hw_resc
->max_cp_rings
-= le16_to_cpu(req
.min_cmpl_rings
) * n
;
599 hw_resc
->max_rsscos_ctxs
-= le16_to_cpu(req
.min_rsscos_ctx
) * n
;
600 hw_resc
->max_stat_ctxs
-= le16_to_cpu(req
.min_stat_ctx
) * n
;
601 hw_resc
->max_vnics
-= le16_to_cpu(req
.min_vnics
) * n
;
602 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
603 hw_resc
->max_irqs
-= vf_msix
* n
;
610 /* Only called by PF to reserve resources for VFs, returns actual number of
611 * VFs configured, or < 0 on error.
613 static int bnxt_hwrm_func_cfg(struct bnxt
*bp
, int num_vfs
)
616 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
, vf_stat_ctx
, vf_vnics
;
617 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
618 struct hwrm_func_cfg_input req
= {0};
619 struct bnxt_pf_info
*pf
= &bp
->pf
;
620 int total_vf_tx_rings
= 0;
623 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
625 /* Remaining rings are distributed equally amongs VF's for now */
626 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
) / num_vfs
;
627 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
) / num_vfs
;
628 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
629 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2) /
632 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
) /
634 vf_ring_grps
= (hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
) / num_vfs
;
635 vf_tx_rings
= (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
) / num_vfs
;
636 vf_vnics
= (hw_resc
->max_vnics
- bp
->nr_vnics
) / num_vfs
;
637 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
639 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU
|
640 FUNC_CFG_REQ_ENABLES_MRU
|
641 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
|
642 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
|
643 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
644 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
|
645 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
|
646 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS
|
647 FUNC_CFG_REQ_ENABLES_NUM_VNICS
|
648 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
);
650 mtu
= bp
->dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
;
651 req
.mru
= cpu_to_le16(mtu
);
652 req
.mtu
= cpu_to_le16(mtu
);
654 req
.num_rsscos_ctxs
= cpu_to_le16(1);
655 req
.num_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
656 req
.num_tx_rings
= cpu_to_le16(vf_tx_rings
);
657 req
.num_rx_rings
= cpu_to_le16(vf_rx_rings
);
658 req
.num_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
659 req
.num_l2_ctxs
= cpu_to_le16(4);
661 req
.num_vnics
= cpu_to_le16(vf_vnics
);
662 /* FIXME spec currently uses 1 bit for stats ctx */
663 req
.num_stat_ctxs
= cpu_to_le16(vf_stat_ctx
);
665 mutex_lock(&bp
->hwrm_cmd_lock
);
666 for (i
= 0; i
< num_vfs
; i
++) {
667 int vf_tx_rsvd
= vf_tx_rings
;
669 req
.fid
= cpu_to_le16(pf
->first_vf_id
+ i
);
670 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
674 pf
->active_vfs
= i
+ 1;
675 pf
->vf
[i
].fw_fid
= le16_to_cpu(req
.fid
);
676 rc
= __bnxt_hwrm_get_tx_rings(bp
, pf
->vf
[i
].fw_fid
,
680 total_vf_tx_rings
+= vf_tx_rsvd
;
682 mutex_unlock(&bp
->hwrm_cmd_lock
);
683 if (pf
->active_vfs
) {
684 hw_resc
->max_tx_rings
-= total_vf_tx_rings
;
685 hw_resc
->max_rx_rings
-= vf_rx_rings
* num_vfs
;
686 hw_resc
->max_hw_ring_grps
-= vf_ring_grps
* num_vfs
;
687 hw_resc
->max_cp_rings
-= vf_cp_rings
* num_vfs
;
688 hw_resc
->max_rsscos_ctxs
-= num_vfs
;
689 hw_resc
->max_stat_ctxs
-= vf_stat_ctx
* num_vfs
;
690 hw_resc
->max_vnics
-= vf_vnics
* num_vfs
;
696 static int bnxt_func_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
699 return bnxt_hwrm_func_vf_resc_cfg(bp
, num_vfs
, reset
);
701 return bnxt_hwrm_func_cfg(bp
, num_vfs
);
704 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
708 /* Register buffers for VFs */
709 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
713 /* Reserve resources for VFs */
714 rc
= bnxt_func_cfg(bp
, *num_vfs
, reset
);
715 if (rc
!= *num_vfs
) {
717 netdev_warn(bp
->dev
, "Unable to reserve resources for SRIOV.\n");
721 netdev_warn(bp
->dev
, "Only able to reserve resources for %d VFs.\n",
726 bnxt_ulp_sriov_cfg(bp
, *num_vfs
);
730 static int bnxt_sriov_enable(struct bnxt
*bp
, int *num_vfs
)
732 int rc
= 0, vfs_supported
;
733 int min_rx_rings
, min_tx_rings
, min_rss_ctxs
;
734 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
735 int tx_ok
= 0, rx_ok
= 0, rss_ok
= 0;
736 int avail_cp
, avail_stat
;
738 /* Check if we can enable requested num of vf's. At a mininum
739 * we require 1 RX 1 TX rings for each VF. In this minimum conf
740 * features like TPA will not be available.
742 vfs_supported
= *num_vfs
;
744 avail_cp
= bnxt_get_avail_cp_rings_for_en(bp
);
745 avail_stat
= bnxt_get_avail_stat_ctxs_for_en(bp
);
746 avail_cp
= min_t(int, avail_cp
, avail_stat
);
748 while (vfs_supported
) {
749 min_rx_rings
= vfs_supported
;
750 min_tx_rings
= vfs_supported
;
751 min_rss_ctxs
= vfs_supported
;
753 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
754 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2 >=
758 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
>=
762 if (hw_resc
->max_vnics
- bp
->nr_vnics
< min_rx_rings
||
763 avail_cp
< min_rx_rings
)
766 if (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
>= min_tx_rings
&&
767 avail_cp
>= min_tx_rings
)
770 if (hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
>=
774 if (tx_ok
&& rx_ok
&& rss_ok
)
780 if (!vfs_supported
) {
781 netdev_err(bp
->dev
, "Cannot enable VF's as all resources are used by PF\n");
785 if (vfs_supported
!= *num_vfs
) {
786 netdev_info(bp
->dev
, "Requested VFs %d, can enable %d\n",
787 *num_vfs
, vfs_supported
);
788 *num_vfs
= vfs_supported
;
791 rc
= bnxt_alloc_vf_resources(bp
, *num_vfs
);
795 rc
= bnxt_cfg_hw_sriov(bp
, num_vfs
, false);
799 rc
= pci_enable_sriov(bp
->pdev
, *num_vfs
);
806 /* Free the resources reserved for various VF's */
807 bnxt_hwrm_func_vf_resource_free(bp
, *num_vfs
);
810 bnxt_free_vf_resources(bp
);
815 void bnxt_sriov_disable(struct bnxt
*bp
)
817 u16 num_vfs
= pci_num_vf(bp
->pdev
);
822 /* synchronize VF and VF-rep create and destroy */
823 mutex_lock(&bp
->sriov_lock
);
824 bnxt_vf_reps_destroy(bp
);
826 if (pci_vfs_assigned(bp
->pdev
)) {
827 bnxt_hwrm_fwd_async_event_cmpl(
828 bp
, NULL
, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
);
829 netdev_warn(bp
->dev
, "Unable to free %d VFs because some are assigned to VMs.\n",
832 pci_disable_sriov(bp
->pdev
);
833 /* Free the HW resources reserved for various VF's */
834 bnxt_hwrm_func_vf_resource_free(bp
, num_vfs
);
836 mutex_unlock(&bp
->sriov_lock
);
838 bnxt_free_vf_resources(bp
);
840 /* Reclaim all resources for the PF. */
842 bnxt_restore_pf_fw_resources(bp
);
845 bnxt_ulp_sriov_cfg(bp
, 0);
848 int bnxt_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
850 struct net_device
*dev
= pci_get_drvdata(pdev
);
851 struct bnxt
*bp
= netdev_priv(dev
);
853 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
854 netdev_warn(dev
, "Not allow SRIOV if the irq mode is not MSIX\n");
859 if (!netif_running(dev
)) {
860 netdev_warn(dev
, "Reject SRIOV config request since if is down!\n");
864 if (test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
)) {
865 netdev_warn(dev
, "Reject SRIOV config request when FW reset is in progress\n");
869 bp
->sriov_cfg
= true;
872 if (pci_vfs_assigned(bp
->pdev
)) {
873 netdev_warn(dev
, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
878 /* Check if enabled VFs is same as requested */
879 if (num_vfs
&& num_vfs
== bp
->pf
.active_vfs
)
882 /* if there are previous existing VFs, clean them up */
883 bnxt_sriov_disable(bp
);
887 bnxt_sriov_enable(bp
, &num_vfs
);
890 bp
->sriov_cfg
= false;
891 wake_up(&bp
->sriov_cfg_wait
);
896 static int bnxt_hwrm_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
897 void *encap_resp
, __le64 encap_resp_addr
,
898 __le16 encap_resp_cpr
, u32 msg_size
)
901 struct hwrm_fwd_resp_input req
= {0};
903 if (BNXT_FWD_RESP_SIZE_ERR(msg_size
))
906 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_RESP
, -1, -1);
908 /* Set the new target id */
909 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
910 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
911 req
.encap_resp_len
= cpu_to_le16(msg_size
);
912 req
.encap_resp_addr
= encap_resp_addr
;
913 req
.encap_resp_cmpl_ring
= encap_resp_cpr
;
914 memcpy(req
.encap_resp
, encap_resp
, msg_size
);
916 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
918 netdev_err(bp
->dev
, "hwrm_fwd_resp failed. rc:%d\n", rc
);
922 static int bnxt_hwrm_fwd_err_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
926 struct hwrm_reject_fwd_resp_input req
= {0};
928 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size
))
931 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_REJECT_FWD_RESP
, -1, -1);
932 /* Set the new target id */
933 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
934 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
935 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
937 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
939 netdev_err(bp
->dev
, "hwrm_fwd_err_resp failed. rc:%d\n", rc
);
943 static int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
947 struct hwrm_exec_fwd_resp_input req
= {0};
949 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size
))
952 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_EXEC_FWD_RESP
, -1, -1);
953 /* Set the new target id */
954 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
955 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
956 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
958 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
960 netdev_err(bp
->dev
, "hwrm_exec_fw_resp failed. rc:%d\n", rc
);
964 static int bnxt_vf_configure_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
966 u32 msg_size
= sizeof(struct hwrm_func_vf_cfg_input
);
967 struct hwrm_func_vf_cfg_input
*req
=
968 (struct hwrm_func_vf_cfg_input
*)vf
->hwrm_cmd_req_addr
;
970 /* Allow VF to set a valid MAC address, if trust is set to on or
971 * if the PF assigned MAC address is zero
973 if (req
->enables
& cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
)) {
974 bool trust
= bnxt_is_trusted_vf(bp
, vf
);
976 if (is_valid_ether_addr(req
->dflt_mac_addr
) &&
977 (trust
|| !is_valid_ether_addr(vf
->mac_addr
) ||
978 ether_addr_equal(req
->dflt_mac_addr
, vf
->mac_addr
))) {
979 ether_addr_copy(vf
->vf_mac_addr
, req
->dflt_mac_addr
);
980 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
982 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
984 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
987 static int bnxt_vf_validate_set_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
989 u32 msg_size
= sizeof(struct hwrm_cfa_l2_filter_alloc_input
);
990 struct hwrm_cfa_l2_filter_alloc_input
*req
=
991 (struct hwrm_cfa_l2_filter_alloc_input
*)vf
->hwrm_cmd_req_addr
;
994 if (!is_valid_ether_addr((const u8
*)req
->l2_addr
))
995 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
997 /* Allow VF to set a valid MAC address, if trust is set to on.
998 * Or VF MAC address must first match MAC address in PF's context.
999 * Otherwise, it must match the VF MAC address if firmware spec >=
1002 if (bnxt_is_trusted_vf(bp
, vf
)) {
1004 } else if (is_valid_ether_addr(vf
->mac_addr
)) {
1005 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->mac_addr
))
1007 } else if (is_valid_ether_addr(vf
->vf_mac_addr
)) {
1008 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->vf_mac_addr
))
1011 /* There are two cases:
1012 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1013 * to the PF and so it doesn't have to match
1014 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1015 * valid MAC address and firmware spec >= 0x10202
1020 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
1021 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1024 static int bnxt_vf_set_link(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1028 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
)) {
1030 rc
= bnxt_hwrm_exec_fwd_resp(
1031 bp
, vf
, sizeof(struct hwrm_port_phy_qcfg_input
));
1033 struct hwrm_port_phy_qcfg_output phy_qcfg_resp
= {0};
1034 struct hwrm_port_phy_qcfg_input
*phy_qcfg_req
;
1037 (struct hwrm_port_phy_qcfg_input
*)vf
->hwrm_cmd_req_addr
;
1038 mutex_lock(&bp
->hwrm_cmd_lock
);
1039 memcpy(&phy_qcfg_resp
, &bp
->link_info
.phy_qcfg_resp
,
1040 sizeof(phy_qcfg_resp
));
1041 mutex_unlock(&bp
->hwrm_cmd_lock
);
1042 phy_qcfg_resp
.resp_len
= cpu_to_le16(sizeof(phy_qcfg_resp
));
1043 phy_qcfg_resp
.seq_id
= phy_qcfg_req
->seq_id
;
1044 phy_qcfg_resp
.valid
= 1;
1046 if (vf
->flags
& BNXT_VF_LINK_UP
) {
1047 /* if physical link is down, force link up on VF */
1048 if (phy_qcfg_resp
.link
!=
1049 PORT_PHY_QCFG_RESP_LINK_LINK
) {
1050 phy_qcfg_resp
.link
=
1051 PORT_PHY_QCFG_RESP_LINK_LINK
;
1052 phy_qcfg_resp
.link_speed
= cpu_to_le16(
1053 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
);
1054 phy_qcfg_resp
.duplex_cfg
=
1055 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
;
1056 phy_qcfg_resp
.duplex_state
=
1057 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
;
1058 phy_qcfg_resp
.pause
=
1059 (PORT_PHY_QCFG_RESP_PAUSE_TX
|
1060 PORT_PHY_QCFG_RESP_PAUSE_RX
);
1063 /* force link down */
1064 phy_qcfg_resp
.link
= PORT_PHY_QCFG_RESP_LINK_NO_LINK
;
1065 phy_qcfg_resp
.link_speed
= 0;
1066 phy_qcfg_resp
.duplex_state
=
1067 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
;
1068 phy_qcfg_resp
.pause
= 0;
1070 rc
= bnxt_hwrm_fwd_resp(bp
, vf
, &phy_qcfg_resp
,
1071 phy_qcfg_req
->resp_addr
,
1072 phy_qcfg_req
->cmpl_ring
,
1073 sizeof(phy_qcfg_resp
));
1078 static int bnxt_vf_req_validate_snd(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1081 struct input
*encap_req
= vf
->hwrm_cmd_req_addr
;
1082 u32 req_type
= le16_to_cpu(encap_req
->req_type
);
1085 case HWRM_FUNC_VF_CFG
:
1086 rc
= bnxt_vf_configure_mac(bp
, vf
);
1088 case HWRM_CFA_L2_FILTER_ALLOC
:
1089 rc
= bnxt_vf_validate_set_mac(bp
, vf
);
1092 /* TODO Validate if VF is allowed to change mac address,
1093 * mtu, num of rings etc
1095 rc
= bnxt_hwrm_exec_fwd_resp(
1096 bp
, vf
, sizeof(struct hwrm_func_cfg_input
));
1098 case HWRM_PORT_PHY_QCFG
:
1099 rc
= bnxt_vf_set_link(bp
, vf
);
1107 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1109 u32 i
= 0, active_vfs
= bp
->pf
.active_vfs
, vf_id
;
1111 /* Scan through VF's and process commands */
1113 vf_id
= find_next_bit(bp
->pf
.vf_event_bmap
, active_vfs
, i
);
1114 if (vf_id
>= active_vfs
)
1117 clear_bit(vf_id
, bp
->pf
.vf_event_bmap
);
1118 bnxt_vf_req_validate_snd(bp
, &bp
->pf
.vf
[vf_id
]);
1123 void bnxt_update_vf_mac(struct bnxt
*bp
)
1125 struct hwrm_func_qcaps_input req
= {0};
1126 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1128 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
1129 req
.fid
= cpu_to_le16(0xffff);
1131 mutex_lock(&bp
->hwrm_cmd_lock
);
1132 if (_hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
))
1133 goto update_vf_mac_exit
;
1135 /* Store MAC address from the firmware. There are 2 cases:
1136 * 1. MAC address is valid. It is assigned from the PF and we
1137 * need to override the current VF MAC address with it.
1138 * 2. MAC address is zero. The VF will use a random MAC address by
1139 * default but the stored zero MAC will allow the VF user to change
1140 * the random MAC address using ndo_set_mac_address() if he wants.
1142 if (!ether_addr_equal(resp
->mac_address
, bp
->vf
.mac_addr
))
1143 memcpy(bp
->vf
.mac_addr
, resp
->mac_address
, ETH_ALEN
);
1145 /* overwrite netdev dev_addr with admin VF MAC */
1146 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1147 memcpy(bp
->dev
->dev_addr
, bp
->vf
.mac_addr
, ETH_ALEN
);
1149 mutex_unlock(&bp
->hwrm_cmd_lock
);
1152 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)
1154 struct hwrm_func_vf_cfg_input req
= {0};
1160 if (bp
->hwrm_spec_code
< 0x10202) {
1161 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1162 rc
= -EADDRNOTAVAIL
;
1165 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_CFG
, -1, -1);
1166 req
.enables
= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
1167 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
1168 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
1171 rc
= -EADDRNOTAVAIL
;
1172 netdev_warn(bp
->dev
, "VF MAC address %pM not approved by the PF\n",
1180 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
1187 void bnxt_sriov_disable(struct bnxt
*bp
)
1191 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1193 netdev_err(bp
->dev
, "Invalid VF message received when SRIOV is not enable\n");
1196 void bnxt_update_vf_mac(struct bnxt
*bp
)
1200 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)