1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/interrupt.h>
16 #include <linux/etherdevice.h>
20 #include "bnxt_sriov.h"
22 #include "bnxt_ethtool.h"
24 #ifdef CONFIG_BNXT_SRIOV
25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt
*bp
,
26 struct bnxt_vf_info
*vf
, u16 event_id
)
28 struct hwrm_fwd_async_event_cmpl_output
*resp
= bp
->hwrm_cmd_resp_addr
;
29 struct hwrm_fwd_async_event_cmpl_input req
= {0};
30 struct hwrm_async_event_cmpl
*async_cmpl
;
33 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_ASYNC_EVENT_CMPL
, -1, -1);
35 req
.encap_async_event_target_id
= cpu_to_le16(vf
->fw_fid
);
37 /* broadcast this async event to all VFs */
38 req
.encap_async_event_target_id
= cpu_to_le16(0xffff);
39 async_cmpl
= (struct hwrm_async_event_cmpl
*)req
.encap_async_event_cmpl
;
40 async_cmpl
->type
= cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
);
41 async_cmpl
->event_id
= cpu_to_le16(event_id
);
43 mutex_lock(&bp
->hwrm_cmd_lock
);
44 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
47 netdev_err(bp
->dev
, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
49 goto fwd_async_event_cmpl_exit
;
52 if (resp
->error_code
) {
53 netdev_err(bp
->dev
, "hwrm_fwd_async_event_cmpl error %d\n",
58 fwd_async_event_cmpl_exit
:
59 mutex_unlock(&bp
->hwrm_cmd_lock
);
63 static int bnxt_vf_ndo_prep(struct bnxt
*bp
, int vf_id
)
65 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
66 netdev_err(bp
->dev
, "vf ndo called though PF is down\n");
69 if (!bp
->pf
.active_vfs
) {
70 netdev_err(bp
->dev
, "vf ndo called though sriov is disabled\n");
73 if (vf_id
>= bp
->pf
.active_vfs
) {
74 netdev_err(bp
->dev
, "Invalid VF id %d\n", vf_id
);
80 int bnxt_set_vf_spoofchk(struct net_device
*dev
, int vf_id
, bool setting
)
82 struct hwrm_func_cfg_input req
= {0};
83 struct bnxt
*bp
= netdev_priv(dev
);
84 struct bnxt_vf_info
*vf
;
85 bool old_setting
= false;
89 if (bp
->hwrm_spec_code
< 0x10701)
92 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
96 vf
= &bp
->pf
.vf
[vf_id
];
97 if (vf
->flags
& BNXT_VF_SPOOFCHK
)
99 if (old_setting
== setting
)
103 func_flags
= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE
;
105 func_flags
= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE
;
106 /*TODO: if the driver supports VLAN filter on guest VLAN,
107 * the spoof check should also include vlan anti-spoofing
109 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
110 req
.fid
= cpu_to_le16(vf
->fw_fid
);
111 req
.flags
= cpu_to_le32(func_flags
);
112 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
115 vf
->flags
|= BNXT_VF_SPOOFCHK
;
117 vf
->flags
&= ~BNXT_VF_SPOOFCHK
;
122 int bnxt_set_vf_trust(struct net_device
*dev
, int vf_id
, bool trusted
)
124 struct bnxt
*bp
= netdev_priv(dev
);
125 struct bnxt_vf_info
*vf
;
127 if (bnxt_vf_ndo_prep(bp
, vf_id
))
130 vf
= &bp
->pf
.vf
[vf_id
];
132 vf
->flags
|= BNXT_VF_TRUST
;
134 vf
->flags
&= ~BNXT_VF_TRUST
;
139 int bnxt_get_vf_config(struct net_device
*dev
, int vf_id
,
140 struct ifla_vf_info
*ivi
)
142 struct bnxt
*bp
= netdev_priv(dev
);
143 struct bnxt_vf_info
*vf
;
146 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
151 vf
= &bp
->pf
.vf
[vf_id
];
153 if (is_valid_ether_addr(vf
->mac_addr
))
154 memcpy(&ivi
->mac
, vf
->mac_addr
, ETH_ALEN
);
156 memcpy(&ivi
->mac
, vf
->vf_mac_addr
, ETH_ALEN
);
157 ivi
->max_tx_rate
= vf
->max_tx_rate
;
158 ivi
->min_tx_rate
= vf
->min_tx_rate
;
159 ivi
->vlan
= vf
->vlan
;
160 if (vf
->flags
& BNXT_VF_QOS
)
161 ivi
->qos
= vf
->vlan
>> VLAN_PRIO_SHIFT
;
164 ivi
->spoofchk
= !!(vf
->flags
& BNXT_VF_SPOOFCHK
);
165 ivi
->trusted
= !!(vf
->flags
& BNXT_VF_TRUST
);
166 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
))
167 ivi
->linkstate
= IFLA_VF_LINK_STATE_AUTO
;
168 else if (vf
->flags
& BNXT_VF_LINK_UP
)
169 ivi
->linkstate
= IFLA_VF_LINK_STATE_ENABLE
;
171 ivi
->linkstate
= IFLA_VF_LINK_STATE_DISABLE
;
176 int bnxt_set_vf_mac(struct net_device
*dev
, int vf_id
, u8
*mac
)
178 struct hwrm_func_cfg_input req
= {0};
179 struct bnxt
*bp
= netdev_priv(dev
);
180 struct bnxt_vf_info
*vf
;
183 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
186 /* reject bc or mc mac addr, zero mac addr means allow
187 * VF to use its own mac addr
189 if (is_multicast_ether_addr(mac
)) {
190 netdev_err(dev
, "Invalid VF ethernet address\n");
193 vf
= &bp
->pf
.vf
[vf_id
];
195 memcpy(vf
->mac_addr
, mac
, ETH_ALEN
);
196 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
197 req
.fid
= cpu_to_le16(vf
->fw_fid
);
198 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
199 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
200 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
203 int bnxt_set_vf_vlan(struct net_device
*dev
, int vf_id
, u16 vlan_id
, u8 qos
,
206 struct hwrm_func_cfg_input req
= {0};
207 struct bnxt
*bp
= netdev_priv(dev
);
208 struct bnxt_vf_info
*vf
;
212 if (bp
->hwrm_spec_code
< 0x10201)
215 if (vlan_proto
!= htons(ETH_P_8021Q
))
216 return -EPROTONOSUPPORT
;
218 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
222 /* TODO: needed to implement proper handling of user priority,
223 * currently fail the command if there is valid priority
225 if (vlan_id
> 4095 || qos
)
228 vf
= &bp
->pf
.vf
[vf_id
];
230 if (vlan_tag
== vf
->vlan
)
233 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
234 req
.fid
= cpu_to_le16(vf
->fw_fid
);
235 req
.dflt_vlan
= cpu_to_le16(vlan_tag
);
236 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN
);
237 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
243 int bnxt_set_vf_bw(struct net_device
*dev
, int vf_id
, int min_tx_rate
,
246 struct hwrm_func_cfg_input req
= {0};
247 struct bnxt
*bp
= netdev_priv(dev
);
248 struct bnxt_vf_info
*vf
;
252 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
256 vf
= &bp
->pf
.vf
[vf_id
];
257 pf_link_speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
258 if (max_tx_rate
> pf_link_speed
) {
259 netdev_info(bp
->dev
, "max tx rate %d exceed PF link speed for VF %d\n",
264 if (min_tx_rate
> pf_link_speed
|| min_tx_rate
> max_tx_rate
) {
265 netdev_info(bp
->dev
, "min tx rate %d is invalid for VF %d\n",
269 if (min_tx_rate
== vf
->min_tx_rate
&& max_tx_rate
== vf
->max_tx_rate
)
271 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
272 req
.fid
= cpu_to_le16(vf
->fw_fid
);
273 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW
);
274 req
.max_bw
= cpu_to_le32(max_tx_rate
);
275 req
.enables
|= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW
);
276 req
.min_bw
= cpu_to_le32(min_tx_rate
);
277 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
279 vf
->min_tx_rate
= min_tx_rate
;
280 vf
->max_tx_rate
= max_tx_rate
;
285 int bnxt_set_vf_link_state(struct net_device
*dev
, int vf_id
, int link
)
287 struct bnxt
*bp
= netdev_priv(dev
);
288 struct bnxt_vf_info
*vf
;
291 rc
= bnxt_vf_ndo_prep(bp
, vf_id
);
295 vf
= &bp
->pf
.vf
[vf_id
];
297 vf
->flags
&= ~(BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
);
299 case IFLA_VF_LINK_STATE_AUTO
:
300 vf
->flags
|= BNXT_VF_LINK_UP
;
302 case IFLA_VF_LINK_STATE_DISABLE
:
303 vf
->flags
|= BNXT_VF_LINK_FORCED
;
305 case IFLA_VF_LINK_STATE_ENABLE
:
306 vf
->flags
|= BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
;
309 netdev_err(bp
->dev
, "Invalid link option\n");
313 if (vf
->flags
& (BNXT_VF_LINK_UP
| BNXT_VF_LINK_FORCED
))
314 rc
= bnxt_hwrm_fwd_async_event_cmpl(bp
, vf
,
315 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
);
319 static int bnxt_set_vf_attr(struct bnxt
*bp
, int num_vfs
)
322 struct bnxt_vf_info
*vf
;
324 for (i
= 0; i
< num_vfs
; i
++) {
326 memset(vf
, 0, sizeof(*vf
));
331 static int bnxt_hwrm_func_vf_resource_free(struct bnxt
*bp
, int num_vfs
)
334 struct bnxt_pf_info
*pf
= &bp
->pf
;
335 struct hwrm_func_vf_resc_free_input req
= {0};
337 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESC_FREE
, -1, -1);
339 mutex_lock(&bp
->hwrm_cmd_lock
);
340 for (i
= pf
->first_vf_id
; i
< pf
->first_vf_id
+ num_vfs
; i
++) {
341 req
.vf_id
= cpu_to_le16(i
);
342 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
347 mutex_unlock(&bp
->hwrm_cmd_lock
);
351 static void bnxt_free_vf_resources(struct bnxt
*bp
)
353 struct pci_dev
*pdev
= bp
->pdev
;
356 kfree(bp
->pf
.vf_event_bmap
);
357 bp
->pf
.vf_event_bmap
= NULL
;
359 for (i
= 0; i
< 4; i
++) {
360 if (bp
->pf
.hwrm_cmd_req_addr
[i
]) {
361 dma_free_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
362 bp
->pf
.hwrm_cmd_req_addr
[i
],
363 bp
->pf
.hwrm_cmd_req_dma_addr
[i
]);
364 bp
->pf
.hwrm_cmd_req_addr
[i
] = NULL
;
368 bp
->pf
.active_vfs
= 0;
373 static int bnxt_alloc_vf_resources(struct bnxt
*bp
, int num_vfs
)
375 struct pci_dev
*pdev
= bp
->pdev
;
376 u32 nr_pages
, size
, i
, j
, k
= 0;
378 bp
->pf
.vf
= kcalloc(num_vfs
, sizeof(struct bnxt_vf_info
), GFP_KERNEL
);
382 bnxt_set_vf_attr(bp
, num_vfs
);
384 size
= num_vfs
* BNXT_HWRM_REQ_MAX_SIZE
;
385 nr_pages
= size
/ BNXT_PAGE_SIZE
;
386 if (size
& (BNXT_PAGE_SIZE
- 1))
389 for (i
= 0; i
< nr_pages
; i
++) {
390 bp
->pf
.hwrm_cmd_req_addr
[i
] =
391 dma_alloc_coherent(&pdev
->dev
, BNXT_PAGE_SIZE
,
392 &bp
->pf
.hwrm_cmd_req_dma_addr
[i
],
395 if (!bp
->pf
.hwrm_cmd_req_addr
[i
])
398 for (j
= 0; j
< BNXT_HWRM_REQS_PER_PAGE
&& k
< num_vfs
; j
++) {
399 struct bnxt_vf_info
*vf
= &bp
->pf
.vf
[k
];
401 vf
->hwrm_cmd_req_addr
= bp
->pf
.hwrm_cmd_req_addr
[i
] +
402 j
* BNXT_HWRM_REQ_MAX_SIZE
;
403 vf
->hwrm_cmd_req_dma_addr
=
404 bp
->pf
.hwrm_cmd_req_dma_addr
[i
] + j
*
405 BNXT_HWRM_REQ_MAX_SIZE
;
411 bp
->pf
.vf_event_bmap
= kzalloc(16, GFP_KERNEL
);
412 if (!bp
->pf
.vf_event_bmap
)
415 bp
->pf
.hwrm_cmd_req_pages
= nr_pages
;
419 static int bnxt_hwrm_func_buf_rgtr(struct bnxt
*bp
)
421 struct hwrm_func_buf_rgtr_input req
= {0};
423 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_BUF_RGTR
, -1, -1);
425 req
.req_buf_num_pages
= cpu_to_le16(bp
->pf
.hwrm_cmd_req_pages
);
426 req
.req_buf_page_size
= cpu_to_le16(BNXT_PAGE_SHIFT
);
427 req
.req_buf_len
= cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE
);
428 req
.req_buf_page_addr0
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[0]);
429 req
.req_buf_page_addr1
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[1]);
430 req
.req_buf_page_addr2
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[2]);
431 req
.req_buf_page_addr3
= cpu_to_le64(bp
->pf
.hwrm_cmd_req_dma_addr
[3]);
433 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
436 /* Only called by PF to reserve resources for VFs, returns actual number of
437 * VFs configured, or < 0 on error.
439 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt
*bp
, int num_vfs
)
441 struct hwrm_func_vf_resource_cfg_input req
= {0};
442 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
443 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
;
444 u16 vf_stat_ctx
, vf_vnics
, vf_ring_grps
;
445 struct bnxt_pf_info
*pf
= &bp
->pf
;
446 int i
, rc
= 0, min
= 1;
448 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_RESOURCE_CFG
, -1, -1);
450 vf_cp_rings
= bnxt_get_max_func_cp_rings_for_en(bp
) - bp
->cp_nr_rings
;
451 vf_stat_ctx
= hw_resc
->max_stat_ctxs
- bp
->num_stat_ctxs
;
452 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
453 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2;
455 vf_rx_rings
= hw_resc
->max_rx_rings
- bp
->rx_nr_rings
;
456 vf_ring_grps
= hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
;
457 vf_tx_rings
= hw_resc
->max_tx_rings
- bp
->tx_nr_rings
;
458 vf_vnics
= hw_resc
->max_vnics
- bp
->nr_vnics
;
459 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
461 req
.min_rsscos_ctx
= cpu_to_le16(BNXT_VF_MIN_RSS_CTX
);
462 req
.max_rsscos_ctx
= cpu_to_le16(BNXT_VF_MAX_RSS_CTX
);
463 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
465 req
.min_rsscos_ctx
= cpu_to_le16(min
);
467 if (pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL
||
468 pf
->vf_resv_strategy
== BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC
) {
469 req
.min_cmpl_rings
= cpu_to_le16(min
);
470 req
.min_tx_rings
= cpu_to_le16(min
);
471 req
.min_rx_rings
= cpu_to_le16(min
);
472 req
.min_l2_ctxs
= cpu_to_le16(min
);
473 req
.min_vnics
= cpu_to_le16(min
);
474 req
.min_stat_ctx
= cpu_to_le16(min
);
475 req
.min_hw_ring_grps
= cpu_to_le16(min
);
477 vf_cp_rings
/= num_vfs
;
478 vf_tx_rings
/= num_vfs
;
479 vf_rx_rings
/= num_vfs
;
481 vf_stat_ctx
/= num_vfs
;
482 vf_ring_grps
/= num_vfs
;
484 req
.min_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
485 req
.min_tx_rings
= cpu_to_le16(vf_tx_rings
);
486 req
.min_rx_rings
= cpu_to_le16(vf_rx_rings
);
487 req
.min_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
488 req
.min_vnics
= cpu_to_le16(vf_vnics
);
489 req
.min_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
490 req
.min_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
492 req
.max_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
493 req
.max_tx_rings
= cpu_to_le16(vf_tx_rings
);
494 req
.max_rx_rings
= cpu_to_le16(vf_rx_rings
);
495 req
.max_l2_ctxs
= cpu_to_le16(BNXT_VF_MAX_L2_CTX
);
496 req
.max_vnics
= cpu_to_le16(vf_vnics
);
497 req
.max_stat_ctx
= cpu_to_le16(vf_stat_ctx
);
498 req
.max_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
500 mutex_lock(&bp
->hwrm_cmd_lock
);
501 for (i
= 0; i
< num_vfs
; i
++) {
502 req
.vf_id
= cpu_to_le16(pf
->first_vf_id
+ i
);
503 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
509 pf
->active_vfs
= i
+ 1;
510 pf
->vf
[i
].fw_fid
= pf
->first_vf_id
+ i
;
512 mutex_unlock(&bp
->hwrm_cmd_lock
);
513 if (pf
->active_vfs
) {
514 u16 n
= pf
->active_vfs
;
516 hw_resc
->max_tx_rings
-= le16_to_cpu(req
.min_tx_rings
) * n
;
517 hw_resc
->max_rx_rings
-= le16_to_cpu(req
.min_rx_rings
) * n
;
518 hw_resc
->max_hw_ring_grps
-= le16_to_cpu(req
.min_hw_ring_grps
) *
520 hw_resc
->max_cp_rings
-= le16_to_cpu(req
.min_cmpl_rings
) * n
;
521 hw_resc
->max_rsscos_ctxs
-= pf
->active_vfs
;
522 hw_resc
->max_stat_ctxs
-= le16_to_cpu(req
.min_stat_ctx
) * n
;
523 hw_resc
->max_vnics
-= le16_to_cpu(req
.min_vnics
) * n
;
530 /* Only called by PF to reserve resources for VFs, returns actual number of
531 * VFs configured, or < 0 on error.
533 static int bnxt_hwrm_func_cfg(struct bnxt
*bp
, int num_vfs
)
536 u16 vf_tx_rings
, vf_rx_rings
, vf_cp_rings
, vf_stat_ctx
, vf_vnics
;
537 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
538 u16 vf_ring_grps
, max_stat_ctxs
;
539 struct hwrm_func_cfg_input req
= {0};
540 struct bnxt_pf_info
*pf
= &bp
->pf
;
541 int total_vf_tx_rings
= 0;
543 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
545 max_stat_ctxs
= hw_resc
->max_stat_ctxs
;
547 /* Remaining rings are distributed equally amongs VF's for now */
548 vf_cp_rings
= (bnxt_get_max_func_cp_rings_for_en(bp
) -
549 bp
->cp_nr_rings
) / num_vfs
;
550 vf_stat_ctx
= (max_stat_ctxs
- bp
->num_stat_ctxs
) / num_vfs
;
551 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
552 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2) /
555 vf_rx_rings
= (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
) /
557 vf_ring_grps
= (hw_resc
->max_hw_ring_grps
- bp
->rx_nr_rings
) / num_vfs
;
558 vf_tx_rings
= (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
) / num_vfs
;
559 vf_vnics
= (hw_resc
->max_vnics
- bp
->nr_vnics
) / num_vfs
;
560 vf_vnics
= min_t(u16
, vf_vnics
, vf_rx_rings
);
562 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU
|
563 FUNC_CFG_REQ_ENABLES_MRU
|
564 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS
|
565 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
|
566 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
567 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
|
568 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
|
569 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS
|
570 FUNC_CFG_REQ_ENABLES_NUM_VNICS
|
571 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
);
573 mtu
= bp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
574 req
.mru
= cpu_to_le16(mtu
);
575 req
.mtu
= cpu_to_le16(mtu
);
577 req
.num_rsscos_ctxs
= cpu_to_le16(1);
578 req
.num_cmpl_rings
= cpu_to_le16(vf_cp_rings
);
579 req
.num_tx_rings
= cpu_to_le16(vf_tx_rings
);
580 req
.num_rx_rings
= cpu_to_le16(vf_rx_rings
);
581 req
.num_hw_ring_grps
= cpu_to_le16(vf_ring_grps
);
582 req
.num_l2_ctxs
= cpu_to_le16(4);
584 req
.num_vnics
= cpu_to_le16(vf_vnics
);
585 /* FIXME spec currently uses 1 bit for stats ctx */
586 req
.num_stat_ctxs
= cpu_to_le16(vf_stat_ctx
);
588 mutex_lock(&bp
->hwrm_cmd_lock
);
589 for (i
= 0; i
< num_vfs
; i
++) {
590 int vf_tx_rsvd
= vf_tx_rings
;
592 req
.fid
= cpu_to_le16(pf
->first_vf_id
+ i
);
593 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
597 pf
->active_vfs
= i
+ 1;
598 pf
->vf
[i
].fw_fid
= le16_to_cpu(req
.fid
);
599 rc
= __bnxt_hwrm_get_tx_rings(bp
, pf
->vf
[i
].fw_fid
,
603 total_vf_tx_rings
+= vf_tx_rsvd
;
605 mutex_unlock(&bp
->hwrm_cmd_lock
);
608 if (pf
->active_vfs
) {
609 hw_resc
->max_tx_rings
-= total_vf_tx_rings
;
610 hw_resc
->max_rx_rings
-= vf_rx_rings
* num_vfs
;
611 hw_resc
->max_hw_ring_grps
-= vf_ring_grps
* num_vfs
;
612 hw_resc
->max_cp_rings
-= vf_cp_rings
* num_vfs
;
613 hw_resc
->max_rsscos_ctxs
-= num_vfs
;
614 hw_resc
->max_stat_ctxs
-= vf_stat_ctx
* num_vfs
;
615 hw_resc
->max_vnics
-= vf_vnics
* num_vfs
;
621 static int bnxt_func_cfg(struct bnxt
*bp
, int num_vfs
)
624 return bnxt_hwrm_func_vf_resc_cfg(bp
, num_vfs
);
626 return bnxt_hwrm_func_cfg(bp
, num_vfs
);
629 static int bnxt_sriov_enable(struct bnxt
*bp
, int *num_vfs
)
631 int rc
= 0, vfs_supported
;
632 int min_rx_rings
, min_tx_rings
, min_rss_ctxs
;
633 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
634 int tx_ok
= 0, rx_ok
= 0, rss_ok
= 0;
635 int avail_cp
, avail_stat
;
637 /* Check if we can enable requested num of vf's. At a mininum
638 * we require 1 RX 1 TX rings for each VF. In this minimum conf
639 * features like TPA will not be available.
641 vfs_supported
= *num_vfs
;
643 avail_cp
= bnxt_get_max_func_cp_rings_for_en(bp
) - bp
->cp_nr_rings
;
644 avail_stat
= hw_resc
->max_stat_ctxs
- bp
->num_stat_ctxs
;
645 avail_cp
= min_t(int, avail_cp
, avail_stat
);
647 while (vfs_supported
) {
648 min_rx_rings
= vfs_supported
;
649 min_tx_rings
= vfs_supported
;
650 min_rss_ctxs
= vfs_supported
;
652 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
653 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
* 2 >=
657 if (hw_resc
->max_rx_rings
- bp
->rx_nr_rings
>=
661 if (hw_resc
->max_vnics
- bp
->nr_vnics
< min_rx_rings
||
662 avail_cp
< min_rx_rings
)
665 if (hw_resc
->max_tx_rings
- bp
->tx_nr_rings
>= min_tx_rings
&&
666 avail_cp
>= min_tx_rings
)
669 if (hw_resc
->max_rsscos_ctxs
- bp
->rsscos_nr_ctxs
>=
673 if (tx_ok
&& rx_ok
&& rss_ok
)
679 if (!vfs_supported
) {
680 netdev_err(bp
->dev
, "Cannot enable VF's as all resources are used by PF\n");
684 if (vfs_supported
!= *num_vfs
) {
685 netdev_info(bp
->dev
, "Requested VFs %d, can enable %d\n",
686 *num_vfs
, vfs_supported
);
687 *num_vfs
= vfs_supported
;
690 rc
= bnxt_alloc_vf_resources(bp
, *num_vfs
);
694 /* Reserve resources for VFs */
695 rc
= bnxt_func_cfg(bp
, *num_vfs
);
696 if (rc
!= *num_vfs
) {
698 netdev_warn(bp
->dev
, "Unable to reserve resources for SRIOV.\n");
702 netdev_warn(bp
->dev
, "Only able to reserve resources for %d VFs.\n", rc
);
706 /* Register buffers for VFs */
707 rc
= bnxt_hwrm_func_buf_rgtr(bp
);
711 bnxt_ulp_sriov_cfg(bp
, *num_vfs
);
713 rc
= pci_enable_sriov(bp
->pdev
, *num_vfs
);
720 /* Free the resources reserved for various VF's */
721 bnxt_hwrm_func_vf_resource_free(bp
, *num_vfs
);
724 bnxt_free_vf_resources(bp
);
729 void bnxt_sriov_disable(struct bnxt
*bp
)
731 u16 num_vfs
= pci_num_vf(bp
->pdev
);
736 /* synchronize VF and VF-rep create and destroy */
737 mutex_lock(&bp
->sriov_lock
);
738 bnxt_vf_reps_destroy(bp
);
740 if (pci_vfs_assigned(bp
->pdev
)) {
741 bnxt_hwrm_fwd_async_event_cmpl(
742 bp
, NULL
, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
);
743 netdev_warn(bp
->dev
, "Unable to free %d VFs because some are assigned to VMs.\n",
746 pci_disable_sriov(bp
->pdev
);
747 /* Free the HW resources reserved for various VF's */
748 bnxt_hwrm_func_vf_resource_free(bp
, num_vfs
);
750 mutex_unlock(&bp
->sriov_lock
);
752 bnxt_free_vf_resources(bp
);
754 /* Reclaim all resources for the PF. */
756 bnxt_restore_pf_fw_resources(bp
);
759 bnxt_ulp_sriov_cfg(bp
, 0);
762 int bnxt_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
764 struct net_device
*dev
= pci_get_drvdata(pdev
);
765 struct bnxt
*bp
= netdev_priv(dev
);
767 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
768 netdev_warn(dev
, "Not allow SRIOV if the irq mode is not MSIX\n");
773 if (!netif_running(dev
)) {
774 netdev_warn(dev
, "Reject SRIOV config request since if is down!\n");
778 bp
->sriov_cfg
= true;
781 if (pci_vfs_assigned(bp
->pdev
)) {
782 netdev_warn(dev
, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
787 /* Check if enabled VFs is same as requested */
788 if (num_vfs
&& num_vfs
== bp
->pf
.active_vfs
)
791 /* if there are previous existing VFs, clean them up */
792 bnxt_sriov_disable(bp
);
796 bnxt_sriov_enable(bp
, &num_vfs
);
799 bp
->sriov_cfg
= false;
800 wake_up(&bp
->sriov_cfg_wait
);
805 static int bnxt_hwrm_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
806 void *encap_resp
, __le64 encap_resp_addr
,
807 __le16 encap_resp_cpr
, u32 msg_size
)
810 struct hwrm_fwd_resp_input req
= {0};
811 struct hwrm_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
813 if (BNXT_FWD_RESP_SIZE_ERR(msg_size
))
816 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FWD_RESP
, -1, -1);
818 /* Set the new target id */
819 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
820 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
821 req
.encap_resp_len
= cpu_to_le16(msg_size
);
822 req
.encap_resp_addr
= encap_resp_addr
;
823 req
.encap_resp_cmpl_ring
= encap_resp_cpr
;
824 memcpy(req
.encap_resp
, encap_resp
, msg_size
);
826 mutex_lock(&bp
->hwrm_cmd_lock
);
827 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
830 netdev_err(bp
->dev
, "hwrm_fwd_resp failed. rc:%d\n", rc
);
834 if (resp
->error_code
) {
835 netdev_err(bp
->dev
, "hwrm_fwd_resp error %d\n",
841 mutex_unlock(&bp
->hwrm_cmd_lock
);
845 static int bnxt_hwrm_fwd_err_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
849 struct hwrm_reject_fwd_resp_input req
= {0};
850 struct hwrm_reject_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
852 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size
))
855 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_REJECT_FWD_RESP
, -1, -1);
856 /* Set the new target id */
857 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
858 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
859 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
861 mutex_lock(&bp
->hwrm_cmd_lock
);
862 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
865 netdev_err(bp
->dev
, "hwrm_fwd_err_resp failed. rc:%d\n", rc
);
866 goto fwd_err_resp_exit
;
869 if (resp
->error_code
) {
870 netdev_err(bp
->dev
, "hwrm_fwd_err_resp error %d\n",
876 mutex_unlock(&bp
->hwrm_cmd_lock
);
880 static int bnxt_hwrm_exec_fwd_resp(struct bnxt
*bp
, struct bnxt_vf_info
*vf
,
884 struct hwrm_exec_fwd_resp_input req
= {0};
885 struct hwrm_exec_fwd_resp_output
*resp
= bp
->hwrm_cmd_resp_addr
;
887 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size
))
890 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_EXEC_FWD_RESP
, -1, -1);
891 /* Set the new target id */
892 req
.target_id
= cpu_to_le16(vf
->fw_fid
);
893 req
.encap_resp_target_id
= cpu_to_le16(vf
->fw_fid
);
894 memcpy(req
.encap_request
, vf
->hwrm_cmd_req_addr
, msg_size
);
896 mutex_lock(&bp
->hwrm_cmd_lock
);
897 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
900 netdev_err(bp
->dev
, "hwrm_exec_fw_resp failed. rc:%d\n", rc
);
901 goto exec_fwd_resp_exit
;
904 if (resp
->error_code
) {
905 netdev_err(bp
->dev
, "hwrm_exec_fw_resp error %d\n",
911 mutex_unlock(&bp
->hwrm_cmd_lock
);
915 static int bnxt_vf_configure_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
917 u32 msg_size
= sizeof(struct hwrm_func_vf_cfg_input
);
918 struct hwrm_func_vf_cfg_input
*req
=
919 (struct hwrm_func_vf_cfg_input
*)vf
->hwrm_cmd_req_addr
;
921 /* Allow VF to set a valid MAC address, if trust is set to on or
922 * if the PF assigned MAC address is zero
924 if (req
->enables
& cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
)) {
925 if (is_valid_ether_addr(req
->dflt_mac_addr
) &&
926 ((vf
->flags
& BNXT_VF_TRUST
) ||
927 !is_valid_ether_addr(vf
->mac_addr
) ||
928 ether_addr_equal(req
->dflt_mac_addr
, vf
->mac_addr
))) {
929 ether_addr_copy(vf
->vf_mac_addr
, req
->dflt_mac_addr
);
930 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
932 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
934 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
937 static int bnxt_vf_validate_set_mac(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
939 u32 msg_size
= sizeof(struct hwrm_cfa_l2_filter_alloc_input
);
940 struct hwrm_cfa_l2_filter_alloc_input
*req
=
941 (struct hwrm_cfa_l2_filter_alloc_input
*)vf
->hwrm_cmd_req_addr
;
944 if (!is_valid_ether_addr((const u8
*)req
->l2_addr
))
945 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
947 /* Allow VF to set a valid MAC address, if trust is set to on.
948 * Or VF MAC address must first match MAC address in PF's context.
949 * Otherwise, it must match the VF MAC address if firmware spec >=
952 if (vf
->flags
& BNXT_VF_TRUST
) {
954 } else if (is_valid_ether_addr(vf
->mac_addr
)) {
955 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->mac_addr
))
957 } else if (is_valid_ether_addr(vf
->vf_mac_addr
)) {
958 if (ether_addr_equal((const u8
*)req
->l2_addr
, vf
->vf_mac_addr
))
961 /* There are two cases:
962 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
963 * to the PF and so it doesn't have to match
964 * 2.Allow VF to modify it's own MAC when PF has not assigned a
965 * valid MAC address and firmware spec >= 0x10202
970 return bnxt_hwrm_exec_fwd_resp(bp
, vf
, msg_size
);
971 return bnxt_hwrm_fwd_err_resp(bp
, vf
, msg_size
);
974 static int bnxt_vf_set_link(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
978 if (!(vf
->flags
& BNXT_VF_LINK_FORCED
)) {
980 rc
= bnxt_hwrm_exec_fwd_resp(
981 bp
, vf
, sizeof(struct hwrm_port_phy_qcfg_input
));
983 struct hwrm_port_phy_qcfg_output phy_qcfg_resp
;
984 struct hwrm_port_phy_qcfg_input
*phy_qcfg_req
;
987 (struct hwrm_port_phy_qcfg_input
*)vf
->hwrm_cmd_req_addr
;
988 mutex_lock(&bp
->hwrm_cmd_lock
);
989 memcpy(&phy_qcfg_resp
, &bp
->link_info
.phy_qcfg_resp
,
990 sizeof(phy_qcfg_resp
));
991 mutex_unlock(&bp
->hwrm_cmd_lock
);
992 phy_qcfg_resp
.resp_len
= cpu_to_le16(sizeof(phy_qcfg_resp
));
993 phy_qcfg_resp
.seq_id
= phy_qcfg_req
->seq_id
;
994 phy_qcfg_resp
.valid
= 1;
996 if (vf
->flags
& BNXT_VF_LINK_UP
) {
997 /* if physical link is down, force link up on VF */
998 if (phy_qcfg_resp
.link
!=
999 PORT_PHY_QCFG_RESP_LINK_LINK
) {
1000 phy_qcfg_resp
.link
=
1001 PORT_PHY_QCFG_RESP_LINK_LINK
;
1002 phy_qcfg_resp
.link_speed
= cpu_to_le16(
1003 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
);
1004 phy_qcfg_resp
.duplex_cfg
=
1005 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
;
1006 phy_qcfg_resp
.duplex_state
=
1007 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
;
1008 phy_qcfg_resp
.pause
=
1009 (PORT_PHY_QCFG_RESP_PAUSE_TX
|
1010 PORT_PHY_QCFG_RESP_PAUSE_RX
);
1013 /* force link down */
1014 phy_qcfg_resp
.link
= PORT_PHY_QCFG_RESP_LINK_NO_LINK
;
1015 phy_qcfg_resp
.link_speed
= 0;
1016 phy_qcfg_resp
.duplex_state
=
1017 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
;
1018 phy_qcfg_resp
.pause
= 0;
1020 rc
= bnxt_hwrm_fwd_resp(bp
, vf
, &phy_qcfg_resp
,
1021 phy_qcfg_req
->resp_addr
,
1022 phy_qcfg_req
->cmpl_ring
,
1023 sizeof(phy_qcfg_resp
));
1028 static int bnxt_vf_req_validate_snd(struct bnxt
*bp
, struct bnxt_vf_info
*vf
)
1031 struct input
*encap_req
= vf
->hwrm_cmd_req_addr
;
1032 u32 req_type
= le16_to_cpu(encap_req
->req_type
);
1035 case HWRM_FUNC_VF_CFG
:
1036 rc
= bnxt_vf_configure_mac(bp
, vf
);
1038 case HWRM_CFA_L2_FILTER_ALLOC
:
1039 rc
= bnxt_vf_validate_set_mac(bp
, vf
);
1042 /* TODO Validate if VF is allowed to change mac address,
1043 * mtu, num of rings etc
1045 rc
= bnxt_hwrm_exec_fwd_resp(
1046 bp
, vf
, sizeof(struct hwrm_func_cfg_input
));
1048 case HWRM_PORT_PHY_QCFG
:
1049 rc
= bnxt_vf_set_link(bp
, vf
);
1057 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1059 u32 i
= 0, active_vfs
= bp
->pf
.active_vfs
, vf_id
;
1061 /* Scan through VF's and process commands */
1063 vf_id
= find_next_bit(bp
->pf
.vf_event_bmap
, active_vfs
, i
);
1064 if (vf_id
>= active_vfs
)
1067 clear_bit(vf_id
, bp
->pf
.vf_event_bmap
);
1068 bnxt_vf_req_validate_snd(bp
, &bp
->pf
.vf
[vf_id
]);
1073 void bnxt_update_vf_mac(struct bnxt
*bp
)
1075 struct hwrm_func_qcaps_input req
= {0};
1076 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1078 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
1079 req
.fid
= cpu_to_le16(0xffff);
1081 mutex_lock(&bp
->hwrm_cmd_lock
);
1082 if (_hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
))
1083 goto update_vf_mac_exit
;
1085 /* Store MAC address from the firmware. There are 2 cases:
1086 * 1. MAC address is valid. It is assigned from the PF and we
1087 * need to override the current VF MAC address with it.
1088 * 2. MAC address is zero. The VF will use a random MAC address by
1089 * default but the stored zero MAC will allow the VF user to change
1090 * the random MAC address using ndo_set_mac_address() if he wants.
1092 if (!ether_addr_equal(resp
->mac_address
, bp
->vf
.mac_addr
))
1093 memcpy(bp
->vf
.mac_addr
, resp
->mac_address
, ETH_ALEN
);
1095 /* overwrite netdev dev_addr with admin VF MAC */
1096 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1097 memcpy(bp
->dev
->dev_addr
, bp
->vf
.mac_addr
, ETH_ALEN
);
1099 mutex_unlock(&bp
->hwrm_cmd_lock
);
1102 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)
1104 struct hwrm_func_vf_cfg_input req
= {0};
1110 if (bp
->hwrm_spec_code
< 0x10202) {
1111 if (is_valid_ether_addr(bp
->vf
.mac_addr
))
1112 rc
= -EADDRNOTAVAIL
;
1115 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_CFG
, -1, -1);
1116 req
.enables
= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR
);
1117 memcpy(req
.dflt_mac_addr
, mac
, ETH_ALEN
);
1118 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
1121 rc
= -EADDRNOTAVAIL
;
1122 netdev_warn(bp
->dev
, "VF MAC address %pM not approved by the PF\n",
1130 void bnxt_sriov_disable(struct bnxt
*bp
)
1134 void bnxt_hwrm_exec_fwd_req(struct bnxt
*bp
)
1136 netdev_err(bp
->dev
, "Invalid VF message received when SRIOV is not enable\n");
1139 void bnxt_update_vf_mac(struct bnxt
*bp
)
1143 int bnxt_approve_mac(struct bnxt
*bp
, u8
*mac
, bool strict
)