2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include "mlx5_core.h"
36 static LIST_HEAD(intf_list
);
37 static LIST_HEAD(mlx5_dev_list
);
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex
);
41 struct mlx5_device_context
{
42 struct list_head list
;
43 struct mlx5_interface
*intf
;
48 struct mlx5_delayed_event
{
49 struct list_head list
;
50 struct mlx5_core_dev
*dev
;
51 enum mlx5_dev_event event
;
57 MLX5_INTERFACE_ATTACHED
,
60 static void add_delayed_event(struct mlx5_priv
*priv
,
61 struct mlx5_core_dev
*dev
,
62 enum mlx5_dev_event event
,
65 struct mlx5_delayed_event
*delayed_event
;
67 delayed_event
= kzalloc(sizeof(*delayed_event
), GFP_ATOMIC
);
69 mlx5_core_err(dev
, "event %d is missed\n", event
);
73 mlx5_core_dbg(dev
, "Accumulating event %d\n", event
);
74 delayed_event
->dev
= dev
;
75 delayed_event
->event
= event
;
76 delayed_event
->param
= param
;
77 list_add_tail(&delayed_event
->list
, &priv
->waiting_events_list
);
80 static void delayed_event_release(struct mlx5_device_context
*dev_ctx
,
81 struct mlx5_priv
*priv
)
83 struct mlx5_core_dev
*dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
84 struct mlx5_delayed_event
*de
;
85 struct mlx5_delayed_event
*n
;
86 struct list_head temp
;
88 INIT_LIST_HEAD(&temp
);
90 spin_lock_irq(&priv
->ctx_lock
);
92 priv
->is_accum_events
= false;
93 list_splice_init(&priv
->waiting_events_list
, &temp
);
94 if (!dev_ctx
->context
)
96 list_for_each_entry_safe(de
, n
, &temp
, list
)
97 dev_ctx
->intf
->event(dev
, dev_ctx
->context
, de
->event
, de
->param
);
100 spin_unlock_irq(&priv
->ctx_lock
);
102 list_for_each_entry_safe(de
, n
, &temp
, list
) {
108 /* accumulating events that can come after mlx5_ib calls to
109 * ib_register_device, till adding that interface to the events list.
111 static void delayed_event_start(struct mlx5_priv
*priv
)
113 spin_lock_irq(&priv
->ctx_lock
);
114 priv
->is_accum_events
= true;
115 spin_unlock_irq(&priv
->ctx_lock
);
118 void mlx5_add_device(struct mlx5_interface
*intf
, struct mlx5_priv
*priv
)
120 struct mlx5_device_context
*dev_ctx
;
121 struct mlx5_core_dev
*dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
123 if (!mlx5_lag_intf_add(intf
, priv
))
126 dev_ctx
= kzalloc(sizeof(*dev_ctx
), GFP_KERNEL
);
130 dev_ctx
->intf
= intf
;
132 delayed_event_start(priv
);
134 dev_ctx
->context
= intf
->add(dev
);
135 set_bit(MLX5_INTERFACE_ADDED
, &dev_ctx
->state
);
137 set_bit(MLX5_INTERFACE_ATTACHED
, &dev_ctx
->state
);
139 if (dev_ctx
->context
) {
140 spin_lock_irq(&priv
->ctx_lock
);
141 list_add_tail(&dev_ctx
->list
, &priv
->ctx_list
);
143 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
144 if (dev_ctx
->intf
->pfault
) {
146 mlx5_core_err(dev
, "multiple page fault handlers not supported");
148 priv
->pfault_ctx
= dev_ctx
->context
;
149 priv
->pfault
= dev_ctx
->intf
->pfault
;
153 spin_unlock_irq(&priv
->ctx_lock
);
156 delayed_event_release(dev_ctx
, priv
);
158 if (!dev_ctx
->context
)
162 static struct mlx5_device_context
*mlx5_get_device(struct mlx5_interface
*intf
,
163 struct mlx5_priv
*priv
)
165 struct mlx5_device_context
*dev_ctx
;
167 list_for_each_entry(dev_ctx
, &priv
->ctx_list
, list
)
168 if (dev_ctx
->intf
== intf
)
173 void mlx5_remove_device(struct mlx5_interface
*intf
, struct mlx5_priv
*priv
)
175 struct mlx5_device_context
*dev_ctx
;
176 struct mlx5_core_dev
*dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
178 dev_ctx
= mlx5_get_device(intf
, priv
);
182 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
183 spin_lock_irq(&priv
->ctx_lock
);
184 if (priv
->pfault
== dev_ctx
->intf
->pfault
)
186 spin_unlock_irq(&priv
->ctx_lock
);
188 synchronize_srcu(&priv
->pfault_srcu
);
191 spin_lock_irq(&priv
->ctx_lock
);
192 list_del(&dev_ctx
->list
);
193 spin_unlock_irq(&priv
->ctx_lock
);
195 if (test_bit(MLX5_INTERFACE_ADDED
, &dev_ctx
->state
))
196 intf
->remove(dev
, dev_ctx
->context
);
201 static void mlx5_attach_interface(struct mlx5_interface
*intf
, struct mlx5_priv
*priv
)
203 struct mlx5_device_context
*dev_ctx
;
204 struct mlx5_core_dev
*dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
206 dev_ctx
= mlx5_get_device(intf
, priv
);
210 delayed_event_start(priv
);
212 if (test_bit(MLX5_INTERFACE_ATTACHED
, &dev_ctx
->state
))
214 intf
->attach(dev
, dev_ctx
->context
);
215 set_bit(MLX5_INTERFACE_ATTACHED
, &dev_ctx
->state
);
217 if (test_bit(MLX5_INTERFACE_ADDED
, &dev_ctx
->state
))
219 dev_ctx
->context
= intf
->add(dev
);
220 set_bit(MLX5_INTERFACE_ADDED
, &dev_ctx
->state
);
224 delayed_event_release(dev_ctx
, priv
);
227 void mlx5_attach_device(struct mlx5_core_dev
*dev
)
229 struct mlx5_priv
*priv
= &dev
->priv
;
230 struct mlx5_interface
*intf
;
232 mutex_lock(&mlx5_intf_mutex
);
233 list_for_each_entry(intf
, &intf_list
, list
)
234 mlx5_attach_interface(intf
, priv
);
235 mutex_unlock(&mlx5_intf_mutex
);
238 static void mlx5_detach_interface(struct mlx5_interface
*intf
, struct mlx5_priv
*priv
)
240 struct mlx5_device_context
*dev_ctx
;
241 struct mlx5_core_dev
*dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
243 dev_ctx
= mlx5_get_device(intf
, priv
);
248 if (!test_bit(MLX5_INTERFACE_ATTACHED
, &dev_ctx
->state
))
250 intf
->detach(dev
, dev_ctx
->context
);
251 clear_bit(MLX5_INTERFACE_ATTACHED
, &dev_ctx
->state
);
253 if (!test_bit(MLX5_INTERFACE_ADDED
, &dev_ctx
->state
))
255 intf
->remove(dev
, dev_ctx
->context
);
256 clear_bit(MLX5_INTERFACE_ADDED
, &dev_ctx
->state
);
260 void mlx5_detach_device(struct mlx5_core_dev
*dev
)
262 struct mlx5_priv
*priv
= &dev
->priv
;
263 struct mlx5_interface
*intf
;
265 mutex_lock(&mlx5_intf_mutex
);
266 list_for_each_entry(intf
, &intf_list
, list
)
267 mlx5_detach_interface(intf
, priv
);
268 mutex_unlock(&mlx5_intf_mutex
);
271 bool mlx5_device_registered(struct mlx5_core_dev
*dev
)
273 struct mlx5_priv
*priv
;
276 mutex_lock(&mlx5_intf_mutex
);
277 list_for_each_entry(priv
, &mlx5_dev_list
, dev_list
)
278 if (priv
== &dev
->priv
)
280 mutex_unlock(&mlx5_intf_mutex
);
285 int mlx5_register_device(struct mlx5_core_dev
*dev
)
287 struct mlx5_priv
*priv
= &dev
->priv
;
288 struct mlx5_interface
*intf
;
290 mutex_lock(&mlx5_intf_mutex
);
291 list_add_tail(&priv
->dev_list
, &mlx5_dev_list
);
292 list_for_each_entry(intf
, &intf_list
, list
)
293 mlx5_add_device(intf
, priv
);
294 mutex_unlock(&mlx5_intf_mutex
);
299 void mlx5_unregister_device(struct mlx5_core_dev
*dev
)
301 struct mlx5_priv
*priv
= &dev
->priv
;
302 struct mlx5_interface
*intf
;
304 mutex_lock(&mlx5_intf_mutex
);
305 list_for_each_entry(intf
, &intf_list
, list
)
306 mlx5_remove_device(intf
, priv
);
307 list_del(&priv
->dev_list
);
308 mutex_unlock(&mlx5_intf_mutex
);
311 int mlx5_register_interface(struct mlx5_interface
*intf
)
313 struct mlx5_priv
*priv
;
315 if (!intf
->add
|| !intf
->remove
)
318 mutex_lock(&mlx5_intf_mutex
);
319 list_add_tail(&intf
->list
, &intf_list
);
320 list_for_each_entry(priv
, &mlx5_dev_list
, dev_list
)
321 mlx5_add_device(intf
, priv
);
322 mutex_unlock(&mlx5_intf_mutex
);
326 EXPORT_SYMBOL(mlx5_register_interface
);
328 void mlx5_unregister_interface(struct mlx5_interface
*intf
)
330 struct mlx5_priv
*priv
;
332 mutex_lock(&mlx5_intf_mutex
);
333 list_for_each_entry(priv
, &mlx5_dev_list
, dev_list
)
334 mlx5_remove_device(intf
, priv
);
335 list_del(&intf
->list
);
336 mutex_unlock(&mlx5_intf_mutex
);
338 EXPORT_SYMBOL(mlx5_unregister_interface
);
340 void *mlx5_get_protocol_dev(struct mlx5_core_dev
*mdev
, int protocol
)
342 struct mlx5_priv
*priv
= &mdev
->priv
;
343 struct mlx5_device_context
*dev_ctx
;
347 spin_lock_irqsave(&priv
->ctx_lock
, flags
);
349 list_for_each_entry(dev_ctx
, &mdev
->priv
.ctx_list
, list
)
350 if ((dev_ctx
->intf
->protocol
== protocol
) &&
351 dev_ctx
->intf
->get_dev
) {
352 result
= dev_ctx
->intf
->get_dev(dev_ctx
->context
);
356 spin_unlock_irqrestore(&priv
->ctx_lock
, flags
);
360 EXPORT_SYMBOL(mlx5_get_protocol_dev
);
362 /* Must be called with intf_mutex held */
363 void mlx5_add_dev_by_protocol(struct mlx5_core_dev
*dev
, int protocol
)
365 struct mlx5_interface
*intf
;
367 list_for_each_entry(intf
, &intf_list
, list
)
368 if (intf
->protocol
== protocol
) {
369 mlx5_add_device(intf
, &dev
->priv
);
374 /* Must be called with intf_mutex held */
375 void mlx5_remove_dev_by_protocol(struct mlx5_core_dev
*dev
, int protocol
)
377 struct mlx5_interface
*intf
;
379 list_for_each_entry(intf
, &intf_list
, list
)
380 if (intf
->protocol
== protocol
) {
381 mlx5_remove_device(intf
, &dev
->priv
);
386 static u16
mlx5_gen_pci_id(struct mlx5_core_dev
*dev
)
388 return (u16
)((dev
->pdev
->bus
->number
<< 8) |
389 PCI_SLOT(dev
->pdev
->devfn
));
392 /* Must be called with intf_mutex held */
393 struct mlx5_core_dev
*mlx5_get_next_phys_dev(struct mlx5_core_dev
*dev
)
395 u16 pci_id
= mlx5_gen_pci_id(dev
);
396 struct mlx5_core_dev
*res
= NULL
;
397 struct mlx5_core_dev
*tmp_dev
;
398 struct mlx5_priv
*priv
;
400 list_for_each_entry(priv
, &mlx5_dev_list
, dev_list
) {
401 tmp_dev
= container_of(priv
, struct mlx5_core_dev
, priv
);
402 if ((dev
!= tmp_dev
) && (mlx5_gen_pci_id(tmp_dev
) == pci_id
)) {
411 void mlx5_core_event(struct mlx5_core_dev
*dev
, enum mlx5_dev_event event
,
414 struct mlx5_priv
*priv
= &dev
->priv
;
415 struct mlx5_device_context
*dev_ctx
;
418 spin_lock_irqsave(&priv
->ctx_lock
, flags
);
420 if (priv
->is_accum_events
)
421 add_delayed_event(priv
, dev
, event
, param
);
423 /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
424 * still in priv->ctx_list. In this case, only notify the dev_ctx if its
425 * ADDED or ATTACHED bit are set.
427 list_for_each_entry(dev_ctx
, &priv
->ctx_list
, list
)
428 if (dev_ctx
->intf
->event
&&
429 (test_bit(MLX5_INTERFACE_ADDED
, &dev_ctx
->state
) ||
430 test_bit(MLX5_INTERFACE_ATTACHED
, &dev_ctx
->state
)))
431 dev_ctx
->intf
->event(dev
, dev_ctx
->context
, event
, param
);
433 spin_unlock_irqrestore(&priv
->ctx_lock
, flags
);
436 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
437 void mlx5_core_page_fault(struct mlx5_core_dev
*dev
,
438 struct mlx5_pagefault
*pfault
)
440 struct mlx5_priv
*priv
= &dev
->priv
;
443 srcu_idx
= srcu_read_lock(&priv
->pfault_srcu
);
445 priv
->pfault(dev
, priv
->pfault_ctx
, pfault
);
446 srcu_read_unlock(&priv
->pfault_srcu
, srcu_idx
);
450 void mlx5_dev_list_lock(void)
452 mutex_lock(&mlx5_intf_mutex
);
455 void mlx5_dev_list_unlock(void)
457 mutex_unlock(&mlx5_intf_mutex
);
460 int mlx5_dev_list_trylock(void)
462 return mutex_trylock(&mlx5_intf_mutex
);