WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / mellanox / mlx4 / intf.c
blob65482f004e50a11bc3f13b7253716c89a0a02338
1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/slab.h>
35 #include <linux/export.h>
36 #include <linux/errno.h>
37 #include <net/devlink.h>
39 #include "mlx4.h"
41 struct mlx4_device_context {
42 struct list_head list;
43 struct list_head bond_list;
44 struct mlx4_interface *intf;
45 void *context;
48 static LIST_HEAD(intf_list);
49 static LIST_HEAD(dev_list);
50 static DEFINE_MUTEX(intf_mutex);
52 static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
54 struct mlx4_device_context *dev_ctx;
56 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
57 if (!dev_ctx)
58 return;
60 dev_ctx->intf = intf;
61 dev_ctx->context = intf->add(&priv->dev);
63 if (dev_ctx->context) {
64 spin_lock_irq(&priv->ctx_lock);
65 list_add_tail(&dev_ctx->list, &priv->ctx_list);
66 spin_unlock_irq(&priv->ctx_lock);
67 if (intf->activate)
68 intf->activate(&priv->dev, dev_ctx->context);
69 } else
70 kfree(dev_ctx);
74 static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
76 struct mlx4_device_context *dev_ctx;
78 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
79 if (dev_ctx->intf == intf) {
80 spin_lock_irq(&priv->ctx_lock);
81 list_del(&dev_ctx->list);
82 spin_unlock_irq(&priv->ctx_lock);
84 intf->remove(&priv->dev, dev_ctx->context);
85 kfree(dev_ctx);
86 return;
90 int mlx4_register_interface(struct mlx4_interface *intf)
92 struct mlx4_priv *priv;
94 if (!intf->add || !intf->remove)
95 return -EINVAL;
97 mutex_lock(&intf_mutex);
99 list_add_tail(&intf->list, &intf_list);
100 list_for_each_entry(priv, &dev_list, dev_list) {
101 if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
102 mlx4_dbg(&priv->dev,
103 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
104 intf->flags &= ~MLX4_INTFF_BONDING;
106 mlx4_add_device(intf, priv);
109 mutex_unlock(&intf_mutex);
111 return 0;
113 EXPORT_SYMBOL_GPL(mlx4_register_interface);
115 void mlx4_unregister_interface(struct mlx4_interface *intf)
117 struct mlx4_priv *priv;
119 mutex_lock(&intf_mutex);
121 list_for_each_entry(priv, &dev_list, dev_list)
122 mlx4_remove_device(intf, priv);
124 list_del(&intf->list);
126 mutex_unlock(&intf_mutex);
128 EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
130 int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
132 struct mlx4_priv *priv = mlx4_priv(dev);
133 struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
134 unsigned long flags;
135 int ret;
136 LIST_HEAD(bond_list);
138 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
139 return -EOPNOTSUPP;
141 ret = mlx4_disable_rx_port_check(dev, enable);
142 if (ret) {
143 mlx4_err(dev, "Fail to %s rx port check\n",
144 enable ? "enable" : "disable");
145 return ret;
147 if (enable) {
148 dev->flags |= MLX4_FLAG_BONDED;
149 } else {
150 ret = mlx4_virt2phy_port_map(dev, 1, 2);
151 if (ret) {
152 mlx4_err(dev, "Fail to reset port map\n");
153 return ret;
155 dev->flags &= ~MLX4_FLAG_BONDED;
158 spin_lock_irqsave(&priv->ctx_lock, flags);
159 list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
160 if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
161 list_add_tail(&dev_ctx->bond_list, &bond_list);
162 list_del(&dev_ctx->list);
165 spin_unlock_irqrestore(&priv->ctx_lock, flags);
167 list_for_each_entry(dev_ctx, &bond_list, bond_list) {
168 dev_ctx->intf->remove(dev, dev_ctx->context);
169 dev_ctx->context = dev_ctx->intf->add(dev);
171 spin_lock_irqsave(&priv->ctx_lock, flags);
172 list_add_tail(&dev_ctx->list, &priv->ctx_list);
173 spin_unlock_irqrestore(&priv->ctx_lock, flags);
175 mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
176 dev_ctx->intf->protocol, enable ?
177 "enabled" : "disabled");
179 return 0;
182 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
183 unsigned long param)
185 struct mlx4_priv *priv = mlx4_priv(dev);
186 struct mlx4_device_context *dev_ctx;
187 unsigned long flags;
189 spin_lock_irqsave(&priv->ctx_lock, flags);
191 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
192 if (dev_ctx->intf->event)
193 dev_ctx->intf->event(dev, dev_ctx->context, type, param);
195 spin_unlock_irqrestore(&priv->ctx_lock, flags);
198 int mlx4_register_device(struct mlx4_dev *dev)
200 struct mlx4_priv *priv = mlx4_priv(dev);
201 struct mlx4_interface *intf;
203 mutex_lock(&intf_mutex);
205 dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
206 list_add_tail(&priv->dev_list, &dev_list);
207 list_for_each_entry(intf, &intf_list, list)
208 mlx4_add_device(intf, priv);
210 mutex_unlock(&intf_mutex);
211 mlx4_start_catas_poll(dev);
213 return 0;
216 void mlx4_unregister_device(struct mlx4_dev *dev)
218 struct mlx4_priv *priv = mlx4_priv(dev);
219 struct mlx4_interface *intf;
221 if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
222 return;
224 mlx4_stop_catas_poll(dev);
225 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
226 mlx4_is_slave(dev)) {
227 /* In mlx4_remove_one on a VF */
228 u32 slave_read =
229 swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
231 if (mlx4_comm_internal_err(slave_read)) {
232 mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
233 __func__);
234 mlx4_enter_error_state(dev->persist);
237 mutex_lock(&intf_mutex);
239 list_for_each_entry(intf, &intf_list, list)
240 mlx4_remove_device(intf, priv);
242 list_del(&priv->dev_list);
243 dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
245 mutex_unlock(&intf_mutex);
248 void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
250 struct mlx4_priv *priv = mlx4_priv(dev);
251 struct mlx4_device_context *dev_ctx;
252 unsigned long flags;
253 void *result = NULL;
255 spin_lock_irqsave(&priv->ctx_lock, flags);
257 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
258 if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
259 result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
260 break;
263 spin_unlock_irqrestore(&priv->ctx_lock, flags);
265 return result;
267 EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
269 struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
271 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
273 return &info->devlink_port;
275 EXPORT_SYMBOL_GPL(mlx4_get_devlink_port);