1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/interrupt.h>
16 #include <linux/etherdevice.h>
20 #include "bnxt_sriov.h"
22 #include "bnxt_ethtool.h"
24 #ifdef CONFIG_BNXT_SRIOV
25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt
*bp
,
26 struct bnxt_vf_info
*vf
, u16 event_id
)
28 struct hwrm_fwd_async_event_cmpl_input req
= {0};
29 struct hwrm_async_event_cmpl
*async_cmpl
;
32 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_ASYNC_EVENT_CMPL
, -1, -1);
34 req
.encap_async_event_target_id
= cpu_to_le16(vf
->fw_fid
);
36 /* broadcast this async event to all VFs */
37 req
.encap_async_event_target_id
= cpu_to_le16(0xffff);
38 async_cmpl
= (struct hwrm_async_event_cmpl
*)req
.encap_async_event_cmpl
;
39 async_cmpl
->type
= cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
);
40 async_cmpl
->event_id
= cpu_to_le16(event_id
);
42 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
44 netdev_err(bp
->dev
, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
49 static int bnxt_vf_ndo_prep(struct bnxt
*bp
, int vf_id
)
51 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
52 netdev_err(bp
->dev
, "vf ndo called though PF is down\n");
55 if (!bp
->pf
.active_vfs
) {
56 netdev_err(bp
->dev
, "vf ndo called though sriov is disabled\n");
59 if (vf_id
>= bp
->pf
.active_vfs
) {
60 netdev_err(bp
->dev
, "Invalid VF id %d\n", vf_id
);
66 int bnxt_set_vf_spoofchk(struct net_device
*dev
, int vf_id
, bool setting
)
68 struct hwrm_func_cfg_input req
= {0};
69 struct bnxt
*bp
= netdev_priv(dev
);
70 struct bnxt_vf_info
*vf
;
71 bool old_setting
= false;
75 if (bp
->hwrm_spec_code
< 0x10701)
78 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
82 vf
= &bp
->pf
.vf
[vf_id
];
83 if (vf
->flags
& BNXT_VF_SPOOFCHK
)
85 if (old_setting
== setting
)
88 func_flags
= vf
->func_flags
;
90 func_flags
|= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE
;
92 func_flags
|= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE
;
93 /*TODO: if the driver supports VLAN filter on guest VLAN,
94 * the spoof check should also include vlan anti-spoofing
96 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
97 req
.fid
= cpu_to_le16(vf
->fw_fid
);
98 req
.flags
= cpu_to_le32(func_flags
);
99 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
101 vf
->func_flags
= func_flags
;
103 vf
->flags
|= BNXT_VF_SPOOFCHK
;
105 vf
->flags
&= ~BNXT_VF_SPOOFCHK
;
110 static int bnxt_hwrm_func_qcfg_flags(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
112 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
113 struct hwrm_func_qcfg_input req
= {0};
116 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
117 req
.fid
= cpu_to_le16(vf
->fw_fid
);
118 mutex_lock(&bp
->hwrm_cmd_lock
);
119 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
121 mutex_unlock(&bp
->hwrm_cmd_lock
);
124 vf
->func_qcfg_flags
= le16_to_cpu(resp
->flags
);
125 mutex_unlock(&bp
->hwrm_cmd_lock
);
129 static bool bnxt_is_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
131 if (!(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
132 return !!(vf
->flags
& BNXT_VF_TRUST
);
134 bnxt_hwrm_func_qcfg_flags(bp
, vf
);
135 return !!(vf
->func_qcfg_flags
& FUNC_QCFG_RESP_FLAGS_TRUSTED_VF
);
138 static int bnxt_hwrm_set_trusted_vf(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
140 struct hwrm_func_cfg_input req
= {0};
143 if (!(bp
->fw_cap
& BNXT_FW_CAP_TRUSTED_VF
))
146 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
147 req
.fid
= cpu_to_le16(vf
->fw_fid
);
148 if (vf
->flags
& BNXT_VF_TRUST
)
149 req
.flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
151 req
.flags
= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE
);
152 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
156 int bnxt_set_vf_trust(struct net_device
*dev
, int vf_id
, bool trusted
)
158 struct bnxt
*bp
= netdev_priv(dev
);
159 struct bnxt_vf_info
*vf
;
161 if (bnxt_vf_ndo_prep(bp
, vf_id
))
164 vf
= &bp
->pf
.vf
[vf_id
];
166 vf
->flags
|= BNXT_VF_TRUST
;
168 vf
->flags
&= ~BNXT_VF_TRUST
;
170 bnxt_hwrm_set_trusted_vf(bp
, vf
);
174 int bnxt_get_vf_config(struct net_device
*dev
, int vf_id
,
175 struct ifla_vf_info
*ivi
)
177 struct bnxt
*bp
= netdev_priv(dev
);
178 struct bnxt_vf_info
*vf
;
181 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
186 vf
= &bp
->pf
.vf
[vf_id
];
188 if (is_valid_ether_addr(vf
->mac_addr
))
189 memcpy(&ivi
->mac
, vf
->mac_addr
, ETH_ALEN
);
191 memcpy(&ivi
->mac
, vf
->vf_mac_addr
, ETH_ALEN
);
192 ivi
->max_tx_rate
= vf
->max_tx_rate
;
193 ivi
->min_tx_rate
= vf
->min_tx_rate
;
194 ivi
->vlan
= vf
->vlan
;
195 if (vf
->flags
& BNXT_VF_QOS
)
196 ivi
->qos
= vf
->vlan
>> VLAN_PRIO_SHIFT
;
199 ivi
->spoofchk
= !!(vf
->flags
& BNXT_VF_SPOOFCHK
);
200 ivi
->trusted
= bnxt_is_trusted_vf(bp
, vf
);
201 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
))
202 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
203 else if (vf
->flags
& BNXT_VF_LINK_UP
)
204 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
206 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
211 int bnxt_set_vf_mac(struct net_device
*dev
, int vf_id
, u8
*mac
)
213 struct hwrm_func_cfg_input req
= {0};
214 struct bnxt
*bp
= netdev_priv(dev
);
215 struct bnxt_vf_info
*vf
;
218 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
221 /* reject bc or mc mac addr, zero mac addr means allow
222 * VF to use its own mac addr
224 if (is_multicast_ether_addr(mac
)) {
225 netdev_err(dev
, "Invalid VF ethernet address\n");
228 vf
= &bp
->pf
.vf
[vf_id
];
230 memcpy(vf
->mac_addr
, mac
, ETH_ALEN
);
231 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
232 req
.fid
= cpu_to_le16(vf
->fw_fid
);
233 req
.flags
= cpu_to_le32(vf
->func_flags
);
234 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
235 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
236 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
239 int bnxt_set_vf_vlan(struct net_device
*dev
, int vf_id
, u16 vlan_id
, u8 qos
,
242 struct hwrm_func_cfg_input req
= {0};
243 struct bnxt
*bp
= netdev_priv(dev
);
244 struct bnxt_vf_info
*vf
;
248 if (bp
->hwrm_spec_code
< 0x10201)
251 if (vlan_proto
!= htons(ETH_P_8021Q
))
252 return -EPROTONOSUPPORT
;
254 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
258 /* TODO: needed to implement proper handling of user priority,
259 * currently fail the command if there is valid priority
261 if (vlan_id
> 4095 || qos
)
264 vf
= &bp
->pf
.vf
[vf_id
];
266 if (vlan_tag
== vf
->vlan
)
269 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
270 req
.fid
= cpu_to_le16(vf
->fw_fid
);
271 req
.flags
= cpu_to_le32(vf
->func_flags
);
272 req
.dflt_vlan
= cpu_to_le16(vlan_tag
);
273 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
274 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
280 int bnxt_set_vf_bw(struct net_device
*dev
, int vf_id
, int min_tx_rate
,
283 struct hwrm_func_cfg_input req
= {0};
284 struct bnxt
*bp
= netdev_priv(dev
);
285 struct bnxt_vf_info
*vf
;
289 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
293 vf
= &bp
->pf
.vf
[vf_id
];
294 pf_link_speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
295 if (max_tx_rate
> pf_link_speed
) {
296 netdev_info(bp
->dev
, "max tx rate %d exceed PF link speed for VF %d\n",
301 if (min_tx_rate
> pf_link_speed
|| min_tx_rate
> max_tx_rate
) {
302 netdev_info(bp
->dev
, "min tx rate %d is invalid for VF %d\n",
306 if (min_tx_rate
== vf
->min_tx_rate
&& max_tx_rate
== vf
->max_tx_rate
)
308 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
309 req
.fid
= cpu_to_le16(vf
->fw_fid
);
310 req
.flags
= cpu_to_le32(vf
->func_flags
);
311 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
312 req
.max_bw
= cpu_to_le32(max_tx_rate
);
313 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
314 req
.min_bw
= cpu_to_le32(min_tx_rate
);
315 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
317 vf
->min_tx_rate
= min_tx_rate
;
318 vf
->max_tx_rate
= max_tx_rate
;
323 int bnxt_set_vf_link_state(struct net_device
*dev
, int vf_id
, int link
)
325 struct bnxt
*bp
= netdev_priv(dev
);
326 struct bnxt_vf_info
*vf
;
329 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
333 vf
= &bp
->pf
.vf
[vf_id
];
335 vf
->flags
&= ~(BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
);
337 case IFLA_VF_LINK_STATE_AUTO
:
338 vf
->flags
|= BNXT_VF_LINK_UP
;
340 case IFLA_VF_LINK_STATE_DISABLE
:
341 vf
->flags
|= BNXT_VF_LINK_FORCED
;
343 case IFLA_VF_LINK_STATE_ENABLE
:
344 vf
->flags
|= BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
;
347 netdev_err(bp
->dev
, "Invalid link option\n");
351 if (vf
->flags
& (BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
))
352 rc
= bnxt_hwrm_fwd_async_event_cmpl(bp
, vf
,
353 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
);
357 static int bnxt_set_vf_attr(struct bnxt
*bp
, int num_vfs
)
360 struct bnxt_vf_info
*vf
;
362 for (i
= 0; i
< num_vfs
; i
++) {
364 memset(vf
, 0, sizeof(*vf
));
369 static int bnxt_hwrm_func_vf_resource_free(struct bnxt
*bp
, int num_vfs
)
372 struct bnxt_pf_info
*pf
= &bp
->pf
;
373 struct hwrm_func_vf_resc_free_input req
= {0};
375 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESC_FREE
, -1, -1);
377 mutex_lock(&bp
->hwrm_cmd_lock
);
378 for (i
= pf
->first_vf_id
; i
< pf
->first_vf_id
+ num_vfs
; i
++) {
379 req
.vf_id
= cpu_to_le16(i
);
380 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
385 mutex_unlock(&bp
->hwrm_cmd_lock
);
389 static void bnxt_free_vf_resources(struct bnxt
*bp
)
391 struct pci_dev
*pdev
= bp
->pdev
;
394 kfree(bp
->pf
.vf_event_bmap
);
395 bp
->pf
.vf_event_bmap
= NULL
;
397 for (i
= 0; i
< 4; i
++) {
398 if (bp
->pf
.hwrm_cmd_req_addr
[i
]) {
399 dma_free_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
400 bp
->pf
.hwrm_cmd_req_addr
[i
],
401 bp
->pf
.hwrm_cmd_req_dma_addr
[i
]);
402 bp
->pf
.hwrm_cmd_req_addr
[i
] = NULL
;
410 static int bnxt_alloc_vf_resources(struct bnxt
*bp
, int num_vfs
)
412 struct pci_dev
*pdev
= bp
->pdev
;
413 u32 nr_pages
, size
, i
, j
, k
= 0;
415 bp
->pf
.vf
= kcalloc(num_vfs
, sizeof(struct bnxt_vf_info
), GFP_KERNEL
);
419 bnxt_set_vf_attr(bp
, num_vfs
);
421 size
= num_vfs
* BNXT_HWRM_REQ_MAX_SIZE
;
422 nr_pages
= size
/ BNXT_PAGE_SIZE
;
423 if (size
& (BNXT_PAGE_SIZE
- 1))
426 for (i
= 0; i
< nr_pages
; i
++) {
427 bp
->pf
.hwrm_cmd_req_addr
[i
] =
428 dma_alloc_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
429 &bp
->pf
.hwrm_cmd_req_dma_addr
[i
],
432 if (!bp
->pf
.hwrm_cmd_req_addr
[i
])
435 for (j
= 0; j
< BNXT_HWRM_REQS_PER_PAGE
&& k
< num_vfs
; j
++) {
436 struct bnxt_vf_info
*vf
= &bp
->pf
.vf
[k
];
438 vf
->hwrm_cmd_req_addr
= bp
->pf
.hwrm_cmd_req_addr
[i
] +
439 j
* BNXT_HWRM_REQ_MAX_SIZE
;
440 vf
->hwrm_cmd_req_dma_addr
=
441 bp
->pf
.hwrm_cmd_req_dma_addr
[i
] + j
*
442 BNXT_HWRM_REQ_MAX_SIZE
;
448 bp
->pf
.vf_event_bmap
= kzalloc(16, GFP_KERNEL
);
449 if (!bp
->pf
.vf_event_bmap
)
452 bp
->pf
.hwrm_cmd_req_pages
= nr_pages
;
456 static int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
458 struct hwrm_func_buf_rgtr_input req
= {0};
460 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_BUF_RGTR
, -1, -1);
462 req
.req_buf_num_pages
= cpu_to_le16(bp
->pf
.hwrm_cmd_req_pages
);
463 req
.req_buf_page_size
= cpu_to_le16(BNXT_PAGE_SHIFT
);
464 req
.req_buf_len
= cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE
);
465 req
.req_buf_page_addr0
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[0]);
466 req
.req_buf_page_addr1
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[1]);
467 req
.req_buf_page_addr2
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[2]);
468 req
.req_buf_page_addr3
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[3]);
470 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
473 /* Caller holds bp->hwrm_cmd_lock mutex lock */
474 static void __bnxt_set_vf_params(struct bnxt
*bp
, int vf_id
)
476 struct hwrm_func_cfg_input req
= {0};
477 struct bnxt_vf_info
*vf
;
479 vf
= &bp
->pf
.vf
[vf_id
];
480 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
481 req
.fid
= cpu_to_le16(vf
->fw_fid
);
482 req
.flags
= cpu_to_le32(vf
->func_flags
);
484 if (is_valid_ether_addr(vf
->mac_addr
)) {
485 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
486 memcpy(req
.dflt_mac_addr
, vf
->mac_addr
, ETH_ALEN
);
489 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
490 req
.dflt_vlan
= cpu_to_le16(vf
->vlan
);
492 if (vf
->max_tx_rate
) {
493 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
494 req
.max_bw
= cpu_to_le32(vf
->max_tx_rate
);
495 #ifdef HAVE_IFLA_TX_RATE
496 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
497 req
.min_bw
= cpu_to_le32(vf
->min_tx_rate
);
500 if (vf
->flags
& BNXT_VF_TRUST
)
501 req
.flags
|= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE
);
503 _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
506 /* Only called by PF to reserve resources for VFs, returns actual number of
507 * VFs configured, or < 0 on error.
509 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
511 struct hwrm_func_vf_resource_cfg_input req
= {0};
512 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
513 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
;
514 u16 vf_stat_ctx
, vf_vnics
, vf_ring_grps
;
515 struct bnxt_pf_info
*pf
= &bp
->pf
;
516 int i
, rc
= 0, min
= 1;
520 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESOURCE_CFG
, -1, -1);
522 if (bp
->flags
& BNXT_FLAG_CHIP_P5
) {
523 vf_msix
= hw_resc
->max_nqs
- bnxt_nq_rings_in_use(bp
);
526 vf_ring_grps
= hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
;
528 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
);
529 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
);
530 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
531 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2;
533 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
;
534 vf_tx_rings
= hw_resc
->max_tx_rings
- bp
->tx_nr_rings
;
535 vf_vnics
= hw_resc
->max_vnics
- bp
->nr_vnics
;
536 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
537 vf_rss
= hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
;
539 req
.min_rsscos_ctx
= cpu_to_le16(BNXT_VF_MIN_RSS_CTX
);
540 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
542 req
.min_rsscos_ctx
= cpu_to_le16(min
);
544 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL
||
545 pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
546 req
.min_cmpl_rings
= cpu_to_le16(min
);
547 req
.min_tx_rings
= cpu_to_le16(min
);
548 req
.min_rx_rings
= cpu_to_le16(min
);
549 req
.min_l2_ctxs
= cpu_to_le16(min
);
550 req
.min_vnics
= cpu_to_le16(min
);
551 req
.min_stat_ctx
= cpu_to_le16(min
);
552 if (!(bp
->flags
& BNXT_FLAG_CHIP_P5
))
553 req
.min_hw_ring_grps
= cpu_to_le16(min
);
555 vf_cp_rings
/= num_vfs
;
556 vf_tx_rings
/= num_vfs
;
557 vf_rx_rings
/= num_vfs
;
559 vf_stat_ctx
/= num_vfs
;
560 vf_ring_grps
/= num_vfs
;
563 req
.min_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
564 req
.min_tx_rings
= cpu_to_le16(vf_tx_rings
);
565 req
.min_rx_rings
= cpu_to_le16(vf_rx_rings
);
566 req
.min_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
567 req
.min_vnics
= cpu_to_le16(vf_vnics
);
568 req
.min_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
569 req
.min_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
570 req
.min_rsscos_ctx
= cpu_to_le16(vf_rss
);
572 req
.max_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
573 req
.max_tx_rings
= cpu_to_le16(vf_tx_rings
);
574 req
.max_rx_rings
= cpu_to_le16(vf_rx_rings
);
575 req
.max_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
576 req
.max_vnics
= cpu_to_le16(vf_vnics
);
577 req
.max_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
578 req
.max_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
579 req
.max_rsscos_ctx
= cpu_to_le16(vf_rss
);
580 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
581 req
.max_msix
= cpu_to_le16(vf_msix
/ num_vfs
);
583 mutex_lock(&bp
->hwrm_cmd_lock
);
584 for (i
= 0; i
< num_vfs
; i
++) {
586 __bnxt_set_vf_params(bp
, i
);
588 req
.vf_id
= cpu_to_le16(pf
->first_vf_id
+ i
);
589 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
593 pf
->active_vfs
= i
+ 1;
594 pf
->vf
[i
].fw_fid
= pf
->first_vf_id
+ i
;
596 mutex_unlock(&bp
->hwrm_cmd_lock
);
597 if (pf
->active_vfs
) {
598 u16 n
= pf
->active_vfs
;
600 hw_resc
->max_tx_rings
-= le16_to_cpu(req
.min_tx_rings
) * n
;
601 hw_resc
->max_rx_rings
-= le16_to_cpu(req
.min_rx_rings
) * n
;
602 hw_resc
->max_hw_ring_grps
-= le16_to_cpu(req
.min_hw_ring_grps
) *
604 hw_resc
->max_cp_rings
-= le16_to_cpu(req
.min_cmpl_rings
) * n
;
605 hw_resc
->max_rsscos_ctxs
-= le16_to_cpu(req
.min_rsscos_ctx
) * n
;
606 hw_resc
->max_stat_ctxs
-= le16_to_cpu(req
.min_stat_ctx
) * n
;
607 hw_resc
->max_vnics
-= le16_to_cpu(req
.min_vnics
) * n
;
608 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
609 hw_resc
->max_irqs
-= vf_msix
* n
;
616 /* Only called by PF to reserve resources for VFs, returns actual number of
617 * VFs configured, or < 0 on error.
619 static int bnxt_hwrm_func_cfg(struct bnxt
*bp
, int num_vfs
)
622 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
, vf_stat_ctx
, vf_vnics
;
623 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
624 struct hwrm_func_cfg_input req
= {0};
625 struct bnxt_pf_info
*pf
= &bp
->pf
;
626 int total_vf_tx_rings
= 0;
629 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
631 /* Remaining rings are distributed equally amongs VF's for now */
632 vf_cp_rings
= bnxt_get_avail_cp_rings_for_en(bp
) / num_vfs
;
633 vf_stat_ctx
= bnxt_get_avail_stat_ctxs_for_en(bp
) / num_vfs
;
634 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
635 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2) /
638 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
) /
640 vf_ring_grps
= (hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
) / num_vfs
;
641 vf_tx_rings
= (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
) / num_vfs
;
642 vf_vnics
= (hw_resc
->max_vnics
- bp
->nr_vnics
) / num_vfs
;
643 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
645 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU
|
646 FUNC_CFG_REQ_ENABLES_MRU
|
647 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
|
648 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
|
649 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
650 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
|
651 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
|
652 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS
|
653 FUNC_CFG_REQ_ENABLES_NUM_VNICS
|
654 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
);
656 mtu
= bp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
657 req
.mru
= cpu_to_le16(mtu
);
658 req
.mtu
= cpu_to_le16(mtu
);
660 req
.num_rsscos_ctxs
= cpu_to_le16(1);
661 req
.num_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
662 req
.num_tx_rings
= cpu_to_le16(vf_tx_rings
);
663 req
.num_rx_rings
= cpu_to_le16(vf_rx_rings
);
664 req
.num_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
665 req
.num_l2_ctxs
= cpu_to_le16(4);
667 req
.num_vnics
= cpu_to_le16(vf_vnics
);
668 /* FIXME spec currently uses 1 bit for stats ctx */
669 req
.num_stat_ctxs
= cpu_to_le16(vf_stat_ctx
);
671 mutex_lock(&bp
->hwrm_cmd_lock
);
672 for (i
= 0; i
< num_vfs
; i
++) {
673 int vf_tx_rsvd
= vf_tx_rings
;
675 req
.fid
= cpu_to_le16(pf
->first_vf_id
+ i
);
676 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
680 pf
->active_vfs
= i
+ 1;
681 pf
->vf
[i
].fw_fid
= le16_to_cpu(req
.fid
);
682 rc
= __bnxt_hwrm_get_tx_rings(bp
, pf
->vf
[i
].fw_fid
,
686 total_vf_tx_rings
+= vf_tx_rsvd
;
688 mutex_unlock(&bp
->hwrm_cmd_lock
);
689 if (pf
->active_vfs
) {
690 hw_resc
->max_tx_rings
-= total_vf_tx_rings
;
691 hw_resc
->max_rx_rings
-= vf_rx_rings
* num_vfs
;
692 hw_resc
->max_hw_ring_grps
-= vf_ring_grps
* num_vfs
;
693 hw_resc
->max_cp_rings
-= vf_cp_rings
* num_vfs
;
694 hw_resc
->max_rsscos_ctxs
-= num_vfs
;
695 hw_resc
->max_stat_ctxs
-= vf_stat_ctx
* num_vfs
;
696 hw_resc
->max_vnics
-= vf_vnics
* num_vfs
;
702 static int bnxt_func_cfg(struct bnxt
*bp
, int num_vfs
, bool reset
)
705 return bnxt_hwrm_func_vf_resc_cfg(bp
, num_vfs
, reset
);
707 return bnxt_hwrm_func_cfg(bp
, num_vfs
);
710 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
714 /* Register buffers for VFs */
715 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
719 /* Reserve resources for VFs */
720 rc
= bnxt_func_cfg(bp
, *num_vfs
, reset
);
721 if (rc
!= *num_vfs
) {
723 netdev_warn(bp
->dev
, "Unable to reserve resources for SRIOV.\n");
727 netdev_warn(bp
->dev
, "Only able to reserve resources for %d VFs.\n",
732 bnxt_ulp_sriov_cfg(bp
, *num_vfs
);
736 static int bnxt_sriov_enable(struct bnxt
*bp
, int *num_vfs
)
738 int rc
= 0, vfs_supported
;
739 int min_rx_rings
, min_tx_rings
, min_rss_ctxs
;
740 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
741 int tx_ok
= 0, rx_ok
= 0, rss_ok
= 0;
742 int avail_cp
, avail_stat
;
744 /* Check if we can enable requested num of vf's. At a mininum
745 * we require 1 RX 1 TX rings for each VF. In this minimum conf
746 * features like TPA will not be available.
748 vfs_supported
= *num_vfs
;
750 avail_cp
= bnxt_get_avail_cp_rings_for_en(bp
);
751 avail_stat
= bnxt_get_avail_stat_ctxs_for_en(bp
);
752 avail_cp
= min_t(int, avail_cp
, avail_stat
);
754 while (vfs_supported
) {
755 min_rx_rings
= vfs_supported
;
756 min_tx_rings
= vfs_supported
;
757 min_rss_ctxs
= vfs_supported
;
759 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
760 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2 >=
764 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
>=
768 if (hw_resc
->max_vnics
- bp
->nr_vnics
< min_rx_rings
||
769 avail_cp
< min_rx_rings
)
772 if (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
>= min_tx_rings
&&
773 avail_cp
>= min_tx_rings
)
776 if (hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
>=
780 if (tx_ok
&& rx_ok
&& rss_ok
)
786 if (!vfs_supported
) {
787 netdev_err(bp
->dev
, "Cannot enable VF's as all resources are used by PF\n");
791 if (vfs_supported
!= *num_vfs
) {
792 netdev_info(bp
->dev
, "Requested VFs %d, can enable %d\n",
793 *num_vfs
, vfs_supported
);
794 *num_vfs
= vfs_supported
;
797 rc
= bnxt_alloc_vf_resources(bp
, *num_vfs
);
801 rc
= bnxt_cfg_hw_sriov(bp
, num_vfs
, false);
805 rc
= pci_enable_sriov(bp
->pdev
, *num_vfs
);
812 /* Free the resources reserved for various VF's */
813 bnxt_hwrm_func_vf_resource_free(bp
, *num_vfs
);
816 bnxt_free_vf_resources(bp
);
821 void bnxt_sriov_disable(struct bnxt
*bp
)
823 u16 num_vfs
= pci_num_vf(bp
->pdev
);
828 /* synchronize VF and VF-rep create and destroy */
829 mutex_lock(&bp
->sriov_lock
);
830 bnxt_vf_reps_destroy(bp
);
832 if (pci_vfs_assigned(bp
->pdev
)) {
833 bnxt_hwrm_fwd_async_event_cmpl(
834 bp
, NULL
, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
);
835 netdev_warn(bp
->dev
, "Unable to free %d VFs because some are assigned to VMs.\n",
838 pci_disable_sriov(bp
->pdev
);
839 /* Free the HW resources reserved for various VF's */
840 bnxt_hwrm_func_vf_resource_free(bp
, num_vfs
);
842 mutex_unlock(&bp
->sriov_lock
);
844 bnxt_free_vf_resources(bp
);
846 bp
->pf
.active_vfs
= 0;
847 /* Reclaim all resources for the PF. */
849 bnxt_restore_pf_fw_resources(bp
);
852 bnxt_ulp_sriov_cfg(bp
, 0);
855 int bnxt_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
857 struct net_device
*dev
= pci_get_drvdata(pdev
);
858 struct bnxt
*bp
= netdev_priv(dev
);
860 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
861 netdev_warn(dev
, "Not allow SRIOV if the irq mode is not MSIX\n");
866 if (!netif_running(dev
)) {
867 netdev_warn(dev
, "Reject SRIOV config request since if is down!\n");
871 if (test_bit(BNXT_STATE_IN_FW_RESET
, &bp
->state
)) {
872 netdev_warn(dev
, "Reject SRIOV config request when FW reset is in progress\n");
876 bp
->sriov_cfg
= true;
879 if (pci_vfs_assigned(bp
->pdev
)) {
880 netdev_warn(dev
, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
885 /* Check if enabled VFs is same as requested */
886 if (num_vfs
&& num_vfs
== bp
->pf
.active_vfs
)
889 /* if there are previous existing VFs, clean them up */
890 bnxt_sriov_disable(bp
);
894 bnxt_sriov_enable(bp
, &num_vfs
);
897 bp
->sriov_cfg
= false;
898 wake_up(&bp
->sriov_cfg_wait
);
903 static int bnxt_hwrm_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
904 void *encap_resp
, __le64 encap_resp_addr
,
905 __le16 encap_resp_cpr
, u32 msg_size
)
908 struct hwrm_fwd_resp_input req
= {0};
910 if (BNXT_FWD_RESP_SIZE_ERR(msg_size
))
913 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_RESP
, -1, -1);
915 /* Set the new target id */
916 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
917 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
918 req
.encap_resp_len
= cpu_to_le16(msg_size
);
919 req
.encap_resp_addr
= encap_resp_addr
;
920 req
.encap_resp_cmpl_ring
= encap_resp_cpr
;
921 memcpy(req
.encap_resp
, encap_resp
, msg_size
);
923 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
925 netdev_err(bp
->dev
, "hwrm_fwd_resp failed. rc:%d\n", rc
);
929 static int bnxt_hwrm_fwd_err_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
933 struct hwrm_reject_fwd_resp_input req
= {0};
935 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size
))
938 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_REJECT_FWD_RESP
, -1, -1);
939 /* Set the new target id */
940 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
941 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
942 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
944 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
946 netdev_err(bp
->dev
, "hwrm_fwd_err_resp failed. rc:%d\n", rc
);
950 static int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
954 struct hwrm_exec_fwd_resp_input req
= {0};
956 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size
))
959 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_EXEC_FWD_RESP
, -1, -1);
960 /* Set the new target id */
961 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
962 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
963 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
965 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
967 netdev_err(bp
->dev
, "hwrm_exec_fw_resp failed. rc:%d\n", rc
);
971 static int bnxt_vf_configure_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
973 u32 msg_size
= sizeof(struct hwrm_func_vf_cfg_input
);
974 struct hwrm_func_vf_cfg_input
*req
=
975 (struct hwrm_func_vf_cfg_input
*)vf
->hwrm_cmd_req_addr
;
977 /* Allow VF to set a valid MAC address, if trust is set to on or
978 * if the PF assigned MAC address is zero
980 if (req
->enables
& cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
)) {
981 bool trust
= bnxt_is_trusted_vf(bp
, vf
);
983 if (is_valid_ether_addr(req
->dflt_mac_addr
) &&
984 (trust
|| !is_valid_ether_addr(vf
->mac_addr
) ||
985 ether_addr_equal(req
->dflt_mac_addr
, vf
->mac_addr
))) {
986 ether_addr_copy(vf
->vf_mac_addr
, req
->dflt_mac_addr
);
987 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
989 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
991 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
994 static int bnxt_vf_validate_set_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
996 u32 msg_size
= sizeof(struct hwrm_cfa_l2_filter_alloc_input
);
997 struct hwrm_cfa_l2_filter_alloc_input
*req
=
998 (struct hwrm_cfa_l2_filter_alloc_input
*)vf
->hwrm_cmd_req_addr
;
1001 if (!is_valid_ether_addr((const u8
*)req
->l2_addr
))
1002 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1004 /* Allow VF to set a valid MAC address, if trust is set to on.
1005 * Or VF MAC address must first match MAC address in PF's context.
1006 * Otherwise, it must match the VF MAC address if firmware spec >=
1009 if (bnxt_is_trusted_vf(bp
, vf
)) {
1011 } else if (is_valid_ether_addr(vf
->mac_addr
)) {
1012 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->mac_addr
))
1014 } else if (is_valid_ether_addr(vf
->vf_mac_addr
)) {
1015 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->vf_mac_addr
))
1018 /* There are two cases:
1019 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1020 * to the PF and so it doesn't have to match
1021 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1022 * valid MAC address and firmware spec >= 0x10202
1027 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
1028 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
1031 static int bnxt_vf_set_link(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1035 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
)) {
1037 rc
= bnxt_hwrm_exec_fwd_resp(
1038 bp
, vf
, sizeof(struct hwrm_port_phy_qcfg_input
));
1040 struct hwrm_port_phy_qcfg_output phy_qcfg_resp
;
1041 struct hwrm_port_phy_qcfg_input
*phy_qcfg_req
;
1044 (struct hwrm_port_phy_qcfg_input
*)vf
->hwrm_cmd_req_addr
;
1045 mutex_lock(&bp
->hwrm_cmd_lock
);
1046 memcpy(&phy_qcfg_resp
, &bp
->link_info
.phy_qcfg_resp
,
1047 sizeof(phy_qcfg_resp
));
1048 mutex_unlock(&bp
->hwrm_cmd_lock
);
1049 phy_qcfg_resp
.resp_len
= cpu_to_le16(sizeof(phy_qcfg_resp
));
1050 phy_qcfg_resp
.seq_id
= phy_qcfg_req
->seq_id
;
1051 phy_qcfg_resp
.valid
= 1;
1053 if (vf
->flags
& BNXT_VF_LINK_UP
) {
1054 /* if physical link is down, force link up on VF */
1055 if (phy_qcfg_resp
.link
!=
1056 PORT_PHY_QCFG_RESP_LINK_LINK
) {
1057 phy_qcfg_resp
.link
=
1058 PORT_PHY_QCFG_RESP_LINK_LINK
;
1059 phy_qcfg_resp
.link_speed
= cpu_to_le16(
1060 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
);
1061 phy_qcfg_resp
.duplex_cfg
=
1062 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
;
1063 phy_qcfg_resp
.duplex_state
=
1064 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
;
1065 phy_qcfg_resp
.pause
=
1066 (PORT_PHY_QCFG_RESP_PAUSE_TX
|
1067 PORT_PHY_QCFG_RESP_PAUSE_RX
);
1070 /* force link down */
1071 phy_qcfg_resp
.link
= PORT_PHY_QCFG_RESP_LINK_NO_LINK
;
1072 phy_qcfg_resp
.link_speed
= 0;
1073 phy_qcfg_resp
.duplex_state
=
1074 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
;
1075 phy_qcfg_resp
.pause
= 0;
1077 rc
= bnxt_hwrm_fwd_resp(bp
, vf
, &phy_qcfg_resp
,
1078 phy_qcfg_req
->resp_addr
,
1079 phy_qcfg_req
->cmpl_ring
,
1080 sizeof(phy_qcfg_resp
));
1085 static int bnxt_vf_req_validate_snd(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1088 struct input
*encap_req
= vf
->hwrm_cmd_req_addr
;
1089 u32 req_type
= le16_to_cpu(encap_req
->req_type
);
1092 case HWRM_FUNC_VF_CFG
:
1093 rc
= bnxt_vf_configure_mac(bp
, vf
);
1095 case HWRM_CFA_L2_FILTER_ALLOC
:
1096 rc
= bnxt_vf_validate_set_mac(bp
, vf
);
1099 /* TODO Validate if VF is allowed to change mac address,
1100 * mtu, num of rings etc
1102 rc
= bnxt_hwrm_exec_fwd_resp(
1103 bp
, vf
, sizeof(struct hwrm_func_cfg_input
));
1105 case HWRM_PORT_PHY_QCFG
:
1106 rc
= bnxt_vf_set_link(bp
, vf
);
1114 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1116 u32 i
= 0, active_vfs
= bp
->pf
.active_vfs
, vf_id
;
1118 /* Scan through VF's and process commands */
1120 vf_id
= find_next_bit(bp
->pf
.vf_event_bmap
, active_vfs
, i
);
1121 if (vf_id
>= active_vfs
)
1124 clear_bit(vf_id
, bp
->pf
.vf_event_bmap
);
1125 bnxt_vf_req_validate_snd(bp
, &bp
->pf
.vf
[vf_id
]);
1130 void bnxt_update_vf_mac(struct bnxt
*bp
)
1132 struct hwrm_func_qcaps_input req
= {0};
1133 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1135 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
1136 req
.fid
= cpu_to_le16(0xffff);
1138 mutex_lock(&bp
->hwrm_cmd_lock
);
1139 if (_hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
))
1140 goto update_vf_mac_exit
;
1142 /* Store MAC address from the firmware. There are 2 cases:
1143 * 1. MAC address is valid. It is assigned from the PF and we
1144 * need to override the current VF MAC address with it.
1145 * 2. MAC address is zero. The VF will use a random MAC address by
1146 * default but the stored zero MAC will allow the VF user to change
1147 * the random MAC address using ndo_set_mac_address() if he wants.
1149 if (!ether_addr_equal(resp
->mac_address
, bp
->vf
.mac_addr
))
1150 memcpy(bp
->vf
.mac_addr
, resp
->mac_address
, ETH_ALEN
);
1152 /* overwrite netdev dev_addr with admin VF MAC */
1153 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1154 memcpy(bp
->dev
->dev_addr
, bp
->vf
.mac_addr
, ETH_ALEN
);
1156 mutex_unlock(&bp
->hwrm_cmd_lock
);
1159 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)
1161 struct hwrm_func_vf_cfg_input req
= {0};
1167 if (bp
->hwrm_spec_code
< 0x10202) {
1168 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1169 rc
= -EADDRNOTAVAIL
;
1172 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_CFG
, -1, -1);
1173 req
.enables
= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
1174 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
1175 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
1178 rc
= -EADDRNOTAVAIL
;
1179 netdev_warn(bp
->dev
, "VF MAC address %pM not approved by the PF\n",
1187 int bnxt_cfg_hw_sriov(struct bnxt
*bp
, int *num_vfs
, bool reset
)
1194 void bnxt_sriov_disable(struct bnxt
*bp
)
1198 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1200 netdev_err(bp
->dev
, "Invalid VF message received when SRIOV is not enable\n");
1203 void bnxt_update_vf_mac(struct bnxt
*bp
)
1207 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)