2 * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pci.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include "mlx5_core.h"
39 bool mlx5_sriov_is_enabled(struct mlx5_core_dev
*dev
)
41 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
43 return !!sriov
->num_vfs
;
46 static int sriov_restore_guids(struct mlx5_core_dev
*dev
, int vf
)
48 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
49 struct mlx5_hca_vport_context
*in
;
52 /* Restore sriov guid and policy settings */
53 if (sriov
->vfs_ctx
[vf
].node_guid
||
54 sriov
->vfs_ctx
[vf
].port_guid
||
55 sriov
->vfs_ctx
[vf
].policy
!= MLX5_POLICY_INVALID
) {
56 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
60 in
->node_guid
= sriov
->vfs_ctx
[vf
].node_guid
;
61 in
->port_guid
= sriov
->vfs_ctx
[vf
].port_guid
;
62 in
->policy
= sriov
->vfs_ctx
[vf
].policy
;
64 !!(in
->port_guid
) * MLX5_HCA_VPORT_SEL_PORT_GUID
|
65 !!(in
->node_guid
) * MLX5_HCA_VPORT_SEL_NODE_GUID
|
66 !!(in
->policy
) * MLX5_HCA_VPORT_SEL_STATE_POLICY
;
68 err
= mlx5_core_modify_hca_vport_context(dev
, 1, 1, vf
+ 1, in
);
70 mlx5_core_warn(dev
, "modify vport context failed, unable to restore VF %d settings\n", vf
);
78 static int mlx5_device_enable_sriov(struct mlx5_core_dev
*dev
, int num_vfs
)
80 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
84 if (sriov
->enabled_vfs
) {
86 "failed to enable SRIOV on device, already enabled with %d vfs\n",
91 err
= mlx5_eswitch_enable_sriov(dev
->priv
.eswitch
, num_vfs
, SRIOV_LEGACY
);
94 "failed to enable eswitch SRIOV (%d)\n", err
);
98 for (vf
= 0; vf
< num_vfs
; vf
++) {
99 err
= mlx5_core_enable_hca(dev
, vf
+ 1);
101 mlx5_core_warn(dev
, "failed to enable VF %d (%d)\n", vf
, err
);
104 sriov
->vfs_ctx
[vf
].enabled
= 1;
105 sriov
->enabled_vfs
++;
106 if (MLX5_CAP_GEN(dev
, port_type
) == MLX5_CAP_PORT_TYPE_IB
) {
107 err
= sriov_restore_guids(dev
, vf
);
110 "failed to restore VF %d settings, err %d\n",
115 mlx5_core_dbg(dev
, "successfully enabled VF* %d\n", vf
);
121 static void mlx5_device_disable_sriov(struct mlx5_core_dev
*dev
)
123 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
127 if (!sriov
->enabled_vfs
)
130 for (vf
= 0; vf
< sriov
->num_vfs
; vf
++) {
131 if (!sriov
->vfs_ctx
[vf
].enabled
)
133 err
= mlx5_core_disable_hca(dev
, vf
+ 1);
135 mlx5_core_warn(dev
, "failed to disable VF %d\n", vf
);
138 sriov
->vfs_ctx
[vf
].enabled
= 0;
139 sriov
->enabled_vfs
--;
143 mlx5_eswitch_disable_sriov(dev
->priv
.eswitch
);
145 if (mlx5_wait_for_vf_pages(dev
))
146 mlx5_core_warn(dev
, "timeout reclaiming VFs pages\n");
149 static int mlx5_pci_enable_sriov(struct pci_dev
*pdev
, int num_vfs
)
151 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
154 if (pci_num_vf(pdev
)) {
155 mlx5_core_warn(dev
, "Unable to enable pci sriov, already enabled\n");
159 err
= pci_enable_sriov(pdev
, num_vfs
);
161 mlx5_core_warn(dev
, "pci_enable_sriov failed : %d\n", err
);
166 static void mlx5_pci_disable_sriov(struct pci_dev
*pdev
)
168 pci_disable_sriov(pdev
);
171 static int mlx5_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
173 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
174 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
177 err
= mlx5_device_enable_sriov(dev
, num_vfs
);
179 mlx5_core_warn(dev
, "mlx5_device_enable_sriov failed : %d\n", err
);
183 err
= mlx5_pci_enable_sriov(pdev
, num_vfs
);
185 mlx5_core_warn(dev
, "mlx5_pci_enable_sriov failed : %d\n", err
);
186 mlx5_device_disable_sriov(dev
);
190 sriov
->num_vfs
= num_vfs
;
195 static void mlx5_sriov_disable(struct pci_dev
*pdev
)
197 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
198 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
200 mlx5_pci_disable_sriov(pdev
);
201 mlx5_device_disable_sriov(dev
);
205 int mlx5_core_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
207 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
210 mlx5_core_dbg(dev
, "requested num_vfs %d\n", num_vfs
);
211 if (!mlx5_core_is_pf(dev
))
217 ret
= mlx5_lag_forbid(dev
);
218 if (ret
&& (ret
!= -ENODEV
))
223 err
= mlx5_sriov_enable(pdev
, num_vfs
);
225 mlx5_sriov_disable(pdev
);
229 return err
? err
: num_vfs
;
232 int mlx5_sriov_attach(struct mlx5_core_dev
*dev
)
234 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
236 if (!mlx5_core_is_pf(dev
) || !sriov
->num_vfs
)
239 /* If sriov VFs exist in PCI level, enable them in device level */
240 return mlx5_device_enable_sriov(dev
, sriov
->num_vfs
);
243 void mlx5_sriov_detach(struct mlx5_core_dev
*dev
)
245 if (!mlx5_core_is_pf(dev
))
248 mlx5_device_disable_sriov(dev
);
251 int mlx5_sriov_init(struct mlx5_core_dev
*dev
)
253 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
254 struct pci_dev
*pdev
= dev
->pdev
;
257 if (!mlx5_core_is_pf(dev
))
260 total_vfs
= pci_sriov_get_totalvfs(pdev
);
261 sriov
->num_vfs
= pci_num_vf(pdev
);
262 sriov
->vfs_ctx
= kcalloc(total_vfs
, sizeof(*sriov
->vfs_ctx
), GFP_KERNEL
);
269 void mlx5_sriov_cleanup(struct mlx5_core_dev
*dev
)
271 struct mlx5_core_sriov
*sriov
= &dev
->priv
.sriov
;
273 if (!mlx5_core_is_pf(dev
))
276 kfree(sriov
->vfs_ctx
);