2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/mlx5/vport.h>
37 static inline u32
mlx_to_net_policy(enum port_state_policy mlx_policy
)
40 case MLX5_POLICY_DOWN
:
41 return IFLA_VF_LINK_STATE_DISABLE
;
43 return IFLA_VF_LINK_STATE_ENABLE
;
44 case MLX5_POLICY_FOLLOW
:
45 return IFLA_VF_LINK_STATE_AUTO
;
47 return __IFLA_VF_LINK_STATE_MAX
;
51 int mlx5_ib_get_vf_config(struct ib_device
*device
, int vf
, u8 port
,
52 struct ifla_vf_info
*info
)
54 struct mlx5_ib_dev
*dev
= to_mdev(device
);
55 struct mlx5_core_dev
*mdev
= dev
->mdev
;
56 struct mlx5_hca_vport_context
*rep
;
59 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
63 err
= mlx5_query_hca_vport_context(mdev
, 1, 1, vf
+ 1, rep
);
65 mlx5_ib_warn(dev
, "failed to query port policy for vf %d (%d)\n",
69 memset(info
, 0, sizeof(*info
));
70 info
->linkstate
= mlx_to_net_policy(rep
->policy
);
71 if (info
->linkstate
== __IFLA_VF_LINK_STATE_MAX
)
79 static inline enum port_state_policy
net_to_mlx_policy(int policy
)
82 case IFLA_VF_LINK_STATE_DISABLE
:
83 return MLX5_POLICY_DOWN
;
84 case IFLA_VF_LINK_STATE_ENABLE
:
85 return MLX5_POLICY_UP
;
86 case IFLA_VF_LINK_STATE_AUTO
:
87 return MLX5_POLICY_FOLLOW
;
89 return MLX5_POLICY_INVALID
;
93 int mlx5_ib_set_vf_link_state(struct ib_device
*device
, int vf
,
96 struct mlx5_ib_dev
*dev
= to_mdev(device
);
97 struct mlx5_core_dev
*mdev
= dev
->mdev
;
98 struct mlx5_hca_vport_context
*in
;
99 struct mlx5_vf_context
*vfs_ctx
= mdev
->priv
.sriov
.vfs_ctx
;
102 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
106 in
->policy
= net_to_mlx_policy(state
);
107 if (in
->policy
== MLX5_POLICY_INVALID
) {
111 in
->field_select
= MLX5_HCA_VPORT_SEL_STATE_POLICY
;
112 err
= mlx5_core_modify_hca_vport_context(mdev
, 1, 1, vf
+ 1, in
);
114 vfs_ctx
[vf
].policy
= in
->policy
;
121 int mlx5_ib_get_vf_stats(struct ib_device
*device
, int vf
,
122 u8 port
, struct ifla_vf_stats
*stats
)
124 int out_sz
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
125 struct mlx5_core_dev
*mdev
;
126 struct mlx5_ib_dev
*dev
;
130 dev
= to_mdev(device
);
133 out
= kzalloc(out_sz
, GFP_KERNEL
);
137 err
= mlx5_core_query_vport_counter(mdev
, true, vf
, port
, out
, out_sz
);
141 stats
->rx_packets
= MLX5_GET64_PR(query_vport_counter_out
, out
, received_ib_unicast
.packets
);
142 stats
->tx_packets
= MLX5_GET64_PR(query_vport_counter_out
, out
, transmitted_ib_unicast
.packets
);
143 stats
->rx_bytes
= MLX5_GET64_PR(query_vport_counter_out
, out
, received_ib_unicast
.octets
);
144 stats
->tx_bytes
= MLX5_GET64_PR(query_vport_counter_out
, out
, transmitted_ib_unicast
.octets
);
145 stats
->multicast
= MLX5_GET64_PR(query_vport_counter_out
, out
, received_ib_multicast
.packets
);
152 static int set_vf_node_guid(struct ib_device
*device
, int vf
, u8 port
, u64 guid
)
154 struct mlx5_ib_dev
*dev
= to_mdev(device
);
155 struct mlx5_core_dev
*mdev
= dev
->mdev
;
156 struct mlx5_hca_vport_context
*in
;
157 struct mlx5_vf_context
*vfs_ctx
= mdev
->priv
.sriov
.vfs_ctx
;
160 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
164 in
->field_select
= MLX5_HCA_VPORT_SEL_NODE_GUID
;
165 in
->node_guid
= guid
;
166 err
= mlx5_core_modify_hca_vport_context(mdev
, 1, 1, vf
+ 1, in
);
168 vfs_ctx
[vf
].node_guid
= guid
;
169 vfs_ctx
[vf
].node_guid_valid
= 1;
175 static int set_vf_port_guid(struct ib_device
*device
, int vf
, u8 port
, u64 guid
)
177 struct mlx5_ib_dev
*dev
= to_mdev(device
);
178 struct mlx5_core_dev
*mdev
= dev
->mdev
;
179 struct mlx5_hca_vport_context
*in
;
180 struct mlx5_vf_context
*vfs_ctx
= mdev
->priv
.sriov
.vfs_ctx
;
183 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
187 in
->field_select
= MLX5_HCA_VPORT_SEL_PORT_GUID
;
188 in
->port_guid
= guid
;
189 err
= mlx5_core_modify_hca_vport_context(mdev
, 1, 1, vf
+ 1, in
);
191 vfs_ctx
[vf
].port_guid
= guid
;
192 vfs_ctx
[vf
].port_guid_valid
= 1;
198 int mlx5_ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
201 if (type
== IFLA_VF_IB_NODE_GUID
)
202 return set_vf_node_guid(device
, vf
, port
, guid
);
203 else if (type
== IFLA_VF_IB_PORT_GUID
)
204 return set_vf_port_guid(device
, vf
, port
, guid
);
209 int mlx5_ib_get_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
210 struct ifla_vf_guid
*node_guid
,
211 struct ifla_vf_guid
*port_guid
)
213 struct mlx5_ib_dev
*dev
= to_mdev(device
);
214 struct mlx5_core_dev
*mdev
= dev
->mdev
;
215 struct mlx5_vf_context
*vfs_ctx
= mdev
->priv
.sriov
.vfs_ctx
;
218 vfs_ctx
[vf
].node_guid_valid
? vfs_ctx
[vf
].node_guid
: 0;
220 vfs_ctx
[vf
].port_guid_valid
? vfs_ctx
[vf
].port_guid
: 0;