2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache
{
52 struct ib_update_work
{
53 struct work_struct work
;
54 struct ib_device
*device
;
61 static const struct ib_gid_attr zattr
;
63 enum gid_attr_find_mask
{
64 GID_ATTR_FIND_MASK_GID
= 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV
= 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT
= 1UL << 2,
69 enum gid_table_entry_props
{
70 GID_TABLE_ENTRY_INVALID
= 1UL << 0,
71 GID_TABLE_ENTRY_DEFAULT
= 1UL << 1,
74 enum gid_table_write_action
{
75 GID_TABLE_WRITE_ACTION_ADD
,
76 GID_TABLE_WRITE_ACTION_DEL
,
77 /* MODIFY only updates the GID table. Currently only used by
80 GID_TABLE_WRITE_ACTION_MODIFY
83 struct ib_gid_table_entry
{
84 /* This lock protects an entry from being
85 * read and written simultaneously.
90 struct ib_gid_attr attr
;
96 /* In RoCE, adding a GID to the table requires:
97 * (a) Find if this GID is already exists.
98 * (b) Find a free space.
99 * (c) Write the new GID
101 * Delete requires different set of operations:
105 * Add/delete should be carried out atomically.
106 * This is done by locking this mutex from multiple
107 * writers. We don't need this lock for IB, as the MAD
108 * layer replaces all entries. All data_vec entries
109 * are locked by this lock.
112 struct ib_gid_table_entry
*data_vec
;
115 static int write_gid(struct ib_device
*ib_dev
, u8 port
,
116 struct ib_gid_table
*table
, int ix
,
117 const union ib_gid
*gid
,
118 const struct ib_gid_attr
*attr
,
119 enum gid_table_write_action action
,
123 struct net_device
*old_net_dev
;
126 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
129 write_lock_irqsave(&table
->data_vec
[ix
].lock
, flags
);
131 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
132 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_INVALID
;
133 write_unlock_irqrestore(&table
->data_vec
[ix
].lock
, flags
);
134 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
135 * RoCE providers and thus only updates the cache.
137 if (action
== GID_TABLE_WRITE_ACTION_ADD
)
138 ret
= ib_dev
->add_gid(ib_dev
, port
, ix
, gid
, attr
,
139 &table
->data_vec
[ix
].context
);
140 else if (action
== GID_TABLE_WRITE_ACTION_DEL
)
141 ret
= ib_dev
->del_gid(ib_dev
, port
, ix
,
142 &table
->data_vec
[ix
].context
);
143 write_lock_irqsave(&table
->data_vec
[ix
].lock
, flags
);
146 old_net_dev
= table
->data_vec
[ix
].attr
.ndev
;
147 if (old_net_dev
&& old_net_dev
!= attr
->ndev
)
148 dev_put(old_net_dev
);
149 /* if modify_gid failed, just delete the old gid */
150 if (ret
|| action
== GID_TABLE_WRITE_ACTION_DEL
) {
153 table
->data_vec
[ix
].context
= NULL
;
156 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_DEFAULT
;
157 memcpy(&table
->data_vec
[ix
].gid
, gid
, sizeof(*gid
));
158 memcpy(&table
->data_vec
[ix
].attr
, attr
, sizeof(*attr
));
159 if (table
->data_vec
[ix
].attr
.ndev
&&
160 table
->data_vec
[ix
].attr
.ndev
!= old_net_dev
)
161 dev_hold(table
->data_vec
[ix
].attr
.ndev
);
163 table
->data_vec
[ix
].props
&= ~GID_TABLE_ENTRY_INVALID
;
165 write_unlock_irqrestore(&table
->data_vec
[ix
].lock
, flags
);
167 if (!ret
&& rdma_cap_roce_gid_table(ib_dev
, port
)) {
168 struct ib_event event
;
170 event
.device
= ib_dev
;
171 event
.element
.port_num
= port
;
172 event
.event
= IB_EVENT_GID_CHANGE
;
174 ib_dispatch_event(&event
);
179 static int add_gid(struct ib_device
*ib_dev
, u8 port
,
180 struct ib_gid_table
*table
, int ix
,
181 const union ib_gid
*gid
,
182 const struct ib_gid_attr
*attr
,
184 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
185 GID_TABLE_WRITE_ACTION_ADD
, default_gid
);
188 static int modify_gid(struct ib_device
*ib_dev
, u8 port
,
189 struct ib_gid_table
*table
, int ix
,
190 const union ib_gid
*gid
,
191 const struct ib_gid_attr
*attr
,
193 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
194 GID_TABLE_WRITE_ACTION_MODIFY
, default_gid
);
197 static int del_gid(struct ib_device
*ib_dev
, u8 port
,
198 struct ib_gid_table
*table
, int ix
,
200 return write_gid(ib_dev
, port
, table
, ix
, &zgid
, &zattr
,
201 GID_TABLE_WRITE_ACTION_DEL
, default_gid
);
204 static int find_gid(struct ib_gid_table
*table
, const union ib_gid
*gid
,
205 const struct ib_gid_attr
*val
, bool default_gid
,
210 for (i
= 0; i
< table
->sz
; i
++) {
212 struct ib_gid_attr
*attr
= &table
->data_vec
[i
].attr
;
214 read_lock_irqsave(&table
->data_vec
[i
].lock
, flags
);
216 if (table
->data_vec
[i
].props
& GID_TABLE_ENTRY_INVALID
)
219 if (mask
& GID_ATTR_FIND_MASK_GID
&&
220 memcmp(gid
, &table
->data_vec
[i
].gid
, sizeof(*gid
)))
223 if (mask
& GID_ATTR_FIND_MASK_NETDEV
&&
224 attr
->ndev
!= val
->ndev
)
227 if (mask
& GID_ATTR_FIND_MASK_DEFAULT
&&
228 !!(table
->data_vec
[i
].props
& GID_TABLE_ENTRY_DEFAULT
) !=
232 read_unlock_irqrestore(&table
->data_vec
[i
].lock
, flags
);
235 read_unlock_irqrestore(&table
->data_vec
[i
].lock
, flags
);
241 static void make_default_gid(struct net_device
*dev
, union ib_gid
*gid
)
243 gid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
244 addrconf_ifid_eui48(&gid
->raw
[8], dev
);
247 int ib_cache_gid_add(struct ib_device
*ib_dev
, u8 port
,
248 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
250 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
251 struct ib_gid_table
*table
;
254 struct net_device
*idev
;
256 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
258 if (!memcmp(gid
, &zgid
, sizeof(*gid
)))
261 if (ib_dev
->get_netdev
) {
262 idev
= ib_dev
->get_netdev(ib_dev
, port
);
263 if (idev
&& attr
->ndev
!= idev
) {
264 union ib_gid default_gid
;
266 /* Adding default GIDs in not permitted */
267 make_default_gid(idev
, &default_gid
);
268 if (!memcmp(gid
, &default_gid
, sizeof(*gid
))) {
277 mutex_lock(&table
->lock
);
279 ix
= find_gid(table
, gid
, attr
, false, GID_ATTR_FIND_MASK_GID
|
280 GID_ATTR_FIND_MASK_NETDEV
);
284 ix
= find_gid(table
, &zgid
, NULL
, false, GID_ATTR_FIND_MASK_GID
|
285 GID_ATTR_FIND_MASK_DEFAULT
);
291 add_gid(ib_dev
, port
, table
, ix
, gid
, attr
, false);
294 mutex_unlock(&table
->lock
);
298 int ib_cache_gid_del(struct ib_device
*ib_dev
, u8 port
,
299 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
301 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
302 struct ib_gid_table
*table
;
305 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
307 mutex_lock(&table
->lock
);
309 ix
= find_gid(table
, gid
, attr
, false,
310 GID_ATTR_FIND_MASK_GID
|
311 GID_ATTR_FIND_MASK_NETDEV
|
312 GID_ATTR_FIND_MASK_DEFAULT
);
316 del_gid(ib_dev
, port
, table
, ix
, false);
319 mutex_unlock(&table
->lock
);
323 int ib_cache_gid_del_all_netdev_gids(struct ib_device
*ib_dev
, u8 port
,
324 struct net_device
*ndev
)
326 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
327 struct ib_gid_table
*table
;
330 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
332 mutex_lock(&table
->lock
);
334 for (ix
= 0; ix
< table
->sz
; ix
++)
335 if (table
->data_vec
[ix
].attr
.ndev
== ndev
)
336 del_gid(ib_dev
, port
, table
, ix
, false);
338 mutex_unlock(&table
->lock
);
342 static int __ib_cache_gid_get(struct ib_device
*ib_dev
, u8 port
, int index
,
343 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
345 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
346 struct ib_gid_table
*table
;
349 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
351 if (index
< 0 || index
>= table
->sz
)
354 read_lock_irqsave(&table
->data_vec
[index
].lock
, flags
);
355 if (table
->data_vec
[index
].props
& GID_TABLE_ENTRY_INVALID
) {
356 read_unlock_irqrestore(&table
->data_vec
[index
].lock
, flags
);
360 memcpy(gid
, &table
->data_vec
[index
].gid
, sizeof(*gid
));
362 memcpy(attr
, &table
->data_vec
[index
].attr
, sizeof(*attr
));
364 dev_hold(attr
->ndev
);
367 read_unlock_irqrestore(&table
->data_vec
[index
].lock
, flags
);
371 static int _ib_cache_gid_table_find(struct ib_device
*ib_dev
,
372 const union ib_gid
*gid
,
373 const struct ib_gid_attr
*val
,
375 u8
*port
, u16
*index
)
377 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
378 struct ib_gid_table
*table
;
382 for (p
= 0; p
< ib_dev
->phys_port_cnt
; p
++) {
383 table
= ports_table
[p
];
384 local_index
= find_gid(table
, gid
, val
, false, mask
);
385 if (local_index
>= 0) {
387 *index
= local_index
;
389 *port
= p
+ rdma_start_port(ib_dev
);
397 static int ib_cache_gid_find(struct ib_device
*ib_dev
,
398 const union ib_gid
*gid
,
399 struct net_device
*ndev
, u8
*port
,
402 unsigned long mask
= GID_ATTR_FIND_MASK_GID
;
403 struct ib_gid_attr gid_attr_val
= {.ndev
= ndev
};
406 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
408 return _ib_cache_gid_table_find(ib_dev
, gid
, &gid_attr_val
,
412 int ib_find_cached_gid_by_port(struct ib_device
*ib_dev
,
413 const union ib_gid
*gid
,
414 u8 port
, struct net_device
*ndev
,
418 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
419 struct ib_gid_table
*table
;
420 unsigned long mask
= GID_ATTR_FIND_MASK_GID
;
421 struct ib_gid_attr val
= {.ndev
= ndev
};
423 if (port
< rdma_start_port(ib_dev
) ||
424 port
> rdma_end_port(ib_dev
))
427 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
430 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
432 local_index
= find_gid(table
, gid
, &val
, false, mask
);
433 if (local_index
>= 0) {
435 *index
= local_index
;
441 EXPORT_SYMBOL(ib_find_cached_gid_by_port
);
444 * ib_find_gid_by_filter - Returns the GID table index where a specified
446 * @device: The device to query.
447 * @gid: The GID value to search for.
448 * @port_num: The port number of the device where the GID value could be
450 * @filter: The filter function is executed on any matching GID in the table.
451 * If the filter function returns true, the corresponding index is returned,
452 * otherwise, we continue searching the GID table. It's guaranteed that
453 * while filter is executed, ndev field is valid and the structure won't
454 * change. filter is executed in an atomic context. filter must not be NULL.
455 * @index: The index into the cached GID table where the GID was found. This
456 * parameter may be NULL.
458 * ib_cache_gid_find_by_filter() searches for the specified GID value
459 * of which the filter function returns true in the port's GID table.
460 * This function is only supported on RoCE ports.
463 static int ib_cache_gid_find_by_filter(struct ib_device
*ib_dev
,
464 const union ib_gid
*gid
,
466 bool (*filter
)(const union ib_gid
*,
467 const struct ib_gid_attr
*,
472 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
473 struct ib_gid_table
*table
;
480 if (port
< rdma_start_port(ib_dev
) ||
481 port
> rdma_end_port(ib_dev
) ||
482 !rdma_protocol_roce(ib_dev
, port
))
483 return -EPROTONOSUPPORT
;
485 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
487 for (i
= 0; i
< table
->sz
; i
++) {
488 struct ib_gid_attr attr
;
491 read_lock_irqsave(&table
->data_vec
[i
].lock
, flags
);
492 if (table
->data_vec
[i
].props
& GID_TABLE_ENTRY_INVALID
)
495 if (memcmp(gid
, &table
->data_vec
[i
].gid
, sizeof(*gid
)))
498 memcpy(&attr
, &table
->data_vec
[i
].attr
, sizeof(attr
));
500 if (filter(gid
, &attr
, context
))
504 read_unlock_irqrestore(&table
->data_vec
[i
].lock
, flags
);
518 static struct ib_gid_table
*alloc_gid_table(int sz
)
521 struct ib_gid_table
*table
=
522 kzalloc(sizeof(struct ib_gid_table
), GFP_KERNEL
);
526 table
->data_vec
= kcalloc(sz
, sizeof(*table
->data_vec
), GFP_KERNEL
);
527 if (!table
->data_vec
)
530 mutex_init(&table
->lock
);
534 for (i
= 0; i
< sz
; i
++)
535 rwlock_init(&table
->data_vec
[i
].lock
);
544 static void release_gid_table(struct ib_gid_table
*table
)
547 kfree(table
->data_vec
);
552 static void cleanup_gid_table_port(struct ib_device
*ib_dev
, u8 port
,
553 struct ib_gid_table
*table
)
560 for (i
= 0; i
< table
->sz
; ++i
) {
561 if (memcmp(&table
->data_vec
[i
].gid
, &zgid
,
562 sizeof(table
->data_vec
[i
].gid
)))
563 del_gid(ib_dev
, port
, table
, i
,
564 table
->data_vec
[i
].props
&
565 GID_ATTR_FIND_MASK_DEFAULT
);
569 void ib_cache_gid_set_default_gid(struct ib_device
*ib_dev
, u8 port
,
570 struct net_device
*ndev
,
571 enum ib_cache_gid_default_mode mode
)
573 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
575 struct ib_gid_attr gid_attr
;
576 struct ib_gid_table
*table
;
578 union ib_gid current_gid
;
579 struct ib_gid_attr current_gid_attr
= {};
581 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
583 make_default_gid(ndev
, &gid
);
584 memset(&gid_attr
, 0, sizeof(gid_attr
));
585 gid_attr
.ndev
= ndev
;
587 mutex_lock(&table
->lock
);
588 ix
= find_gid(table
, NULL
, NULL
, true, GID_ATTR_FIND_MASK_DEFAULT
);
590 /* Coudn't find default GID location */
593 if (!__ib_cache_gid_get(ib_dev
, port
, ix
,
594 ¤t_gid
, ¤t_gid_attr
) &&
595 mode
== IB_CACHE_GID_DEFAULT_MODE_SET
&&
596 !memcmp(&gid
, ¤t_gid
, sizeof(gid
)) &&
597 !memcmp(&gid_attr
, ¤t_gid_attr
, sizeof(gid_attr
)))
600 if ((memcmp(¤t_gid
, &zgid
, sizeof(current_gid
)) ||
601 memcmp(¤t_gid_attr
, &zattr
,
602 sizeof(current_gid_attr
))) &&
603 del_gid(ib_dev
, port
, table
, ix
, true)) {
604 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
609 if (mode
== IB_CACHE_GID_DEFAULT_MODE_SET
)
610 if (add_gid(ib_dev
, port
, table
, ix
, &gid
, &gid_attr
, true))
611 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
615 if (current_gid_attr
.ndev
)
616 dev_put(current_gid_attr
.ndev
);
617 mutex_unlock(&table
->lock
);
620 static int gid_table_reserve_default(struct ib_device
*ib_dev
, u8 port
,
621 struct ib_gid_table
*table
)
623 if (rdma_protocol_roce(ib_dev
, port
)) {
624 struct ib_gid_table_entry
*entry
= &table
->data_vec
[0];
626 entry
->props
|= GID_TABLE_ENTRY_DEFAULT
;
632 static int _gid_table_setup_one(struct ib_device
*ib_dev
)
635 struct ib_gid_table
**table
;
638 table
= kcalloc(ib_dev
->phys_port_cnt
, sizeof(*table
), GFP_KERNEL
);
641 pr_warn("failed to allocate ib gid cache for %s\n",
646 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
647 u8 rdma_port
= port
+ rdma_start_port(ib_dev
);
651 ib_dev
->port_immutable
[rdma_port
].gid_tbl_len
);
654 goto rollback_table_setup
;
657 err
= gid_table_reserve_default(ib_dev
,
658 port
+ rdma_start_port(ib_dev
),
661 goto rollback_table_setup
;
664 ib_dev
->cache
.gid_cache
= table
;
667 rollback_table_setup
:
668 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
669 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
671 release_gid_table(table
[port
]);
678 static void gid_table_release_one(struct ib_device
*ib_dev
)
680 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
686 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
687 release_gid_table(table
[port
]);
690 ib_dev
->cache
.gid_cache
= NULL
;
693 static void gid_table_cleanup_one(struct ib_device
*ib_dev
)
695 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
701 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
702 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
706 static int gid_table_setup_one(struct ib_device
*ib_dev
)
710 err
= _gid_table_setup_one(ib_dev
);
715 err
= roce_rescan_device(ib_dev
);
718 gid_table_cleanup_one(ib_dev
);
719 gid_table_release_one(ib_dev
);
725 int ib_get_cached_gid(struct ib_device
*device
,
729 struct ib_gid_attr
*gid_attr
)
731 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
734 return __ib_cache_gid_get(device
, port_num
, index
, gid
, gid_attr
);
736 EXPORT_SYMBOL(ib_get_cached_gid
);
738 int ib_find_cached_gid(struct ib_device
*device
,
739 const union ib_gid
*gid
,
740 struct net_device
*ndev
,
744 return ib_cache_gid_find(device
, gid
, ndev
, port_num
, index
);
746 EXPORT_SYMBOL(ib_find_cached_gid
);
748 int ib_find_gid_by_filter(struct ib_device
*device
,
749 const union ib_gid
*gid
,
751 bool (*filter
)(const union ib_gid
*gid
,
752 const struct ib_gid_attr
*,
754 void *context
, u16
*index
)
756 /* Only RoCE GID table supports filter function */
757 if (!rdma_cap_roce_gid_table(device
, port_num
) && filter
)
758 return -EPROTONOSUPPORT
;
760 return ib_cache_gid_find_by_filter(device
, gid
,
764 EXPORT_SYMBOL(ib_find_gid_by_filter
);
766 int ib_get_cached_pkey(struct ib_device
*device
,
771 struct ib_pkey_cache
*cache
;
775 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
778 read_lock_irqsave(&device
->cache
.lock
, flags
);
780 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
782 if (index
< 0 || index
>= cache
->table_len
)
785 *pkey
= cache
->table
[index
];
787 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
791 EXPORT_SYMBOL(ib_get_cached_pkey
);
793 int ib_find_cached_pkey(struct ib_device
*device
,
798 struct ib_pkey_cache
*cache
;
804 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
807 read_lock_irqsave(&device
->cache
.lock
, flags
);
809 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
813 for (i
= 0; i
< cache
->table_len
; ++i
)
814 if ((cache
->table
[i
] & 0x7fff) == (pkey
& 0x7fff)) {
815 if (cache
->table
[i
] & 0x8000) {
823 if (ret
&& partial_ix
>= 0) {
828 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
832 EXPORT_SYMBOL(ib_find_cached_pkey
);
834 int ib_find_exact_cached_pkey(struct ib_device
*device
,
839 struct ib_pkey_cache
*cache
;
844 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
847 read_lock_irqsave(&device
->cache
.lock
, flags
);
849 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
853 for (i
= 0; i
< cache
->table_len
; ++i
)
854 if (cache
->table
[i
] == pkey
) {
860 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
864 EXPORT_SYMBOL(ib_find_exact_cached_pkey
);
866 int ib_get_cached_lmc(struct ib_device
*device
,
873 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
876 read_lock_irqsave(&device
->cache
.lock
, flags
);
877 *lmc
= device
->cache
.lmc_cache
[port_num
- rdma_start_port(device
)];
878 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
882 EXPORT_SYMBOL(ib_get_cached_lmc
);
884 static void ib_cache_update(struct ib_device
*device
,
887 struct ib_port_attr
*tprops
= NULL
;
888 struct ib_pkey_cache
*pkey_cache
= NULL
, *old_pkey_cache
;
889 struct ib_gid_cache
{
891 union ib_gid table
[0];
895 struct ib_gid_table
*table
;
896 struct ib_gid_table
**ports_table
= device
->cache
.gid_cache
;
897 bool use_roce_gid_table
=
898 rdma_cap_roce_gid_table(device
, port
);
900 if (port
< rdma_start_port(device
) || port
> rdma_end_port(device
))
903 table
= ports_table
[port
- rdma_start_port(device
)];
905 tprops
= kmalloc(sizeof *tprops
, GFP_KERNEL
);
909 ret
= ib_query_port(device
, port
, tprops
);
911 printk(KERN_WARNING
"ib_query_port failed (%d) for %s\n",
916 pkey_cache
= kmalloc(sizeof *pkey_cache
+ tprops
->pkey_tbl_len
*
917 sizeof *pkey_cache
->table
, GFP_KERNEL
);
921 pkey_cache
->table_len
= tprops
->pkey_tbl_len
;
923 if (!use_roce_gid_table
) {
924 gid_cache
= kmalloc(sizeof(*gid_cache
) + tprops
->gid_tbl_len
*
925 sizeof(*gid_cache
->table
), GFP_KERNEL
);
929 gid_cache
->table_len
= tprops
->gid_tbl_len
;
932 for (i
= 0; i
< pkey_cache
->table_len
; ++i
) {
933 ret
= ib_query_pkey(device
, port
, i
, pkey_cache
->table
+ i
);
935 printk(KERN_WARNING
"ib_query_pkey failed (%d) for %s (index %d)\n",
936 ret
, device
->name
, i
);
941 if (!use_roce_gid_table
) {
942 for (i
= 0; i
< gid_cache
->table_len
; ++i
) {
943 ret
= ib_query_gid(device
, port
, i
,
944 gid_cache
->table
+ i
, NULL
);
946 printk(KERN_WARNING
"ib_query_gid failed (%d) for %s (index %d)\n",
947 ret
, device
->name
, i
);
953 write_lock_irq(&device
->cache
.lock
);
955 old_pkey_cache
= device
->cache
.pkey_cache
[port
- rdma_start_port(device
)];
957 device
->cache
.pkey_cache
[port
- rdma_start_port(device
)] = pkey_cache
;
958 if (!use_roce_gid_table
) {
959 for (i
= 0; i
< gid_cache
->table_len
; i
++) {
960 modify_gid(device
, port
, table
, i
, gid_cache
->table
+ i
,
965 device
->cache
.lmc_cache
[port
- rdma_start_port(device
)] = tprops
->lmc
;
967 write_unlock_irq(&device
->cache
.lock
);
970 kfree(old_pkey_cache
);
980 static void ib_cache_task(struct work_struct
*_work
)
982 struct ib_update_work
*work
=
983 container_of(_work
, struct ib_update_work
, work
);
985 ib_cache_update(work
->device
, work
->port_num
);
989 static void ib_cache_event(struct ib_event_handler
*handler
,
990 struct ib_event
*event
)
992 struct ib_update_work
*work
;
994 if (event
->event
== IB_EVENT_PORT_ERR
||
995 event
->event
== IB_EVENT_PORT_ACTIVE
||
996 event
->event
== IB_EVENT_LID_CHANGE
||
997 event
->event
== IB_EVENT_PKEY_CHANGE
||
998 event
->event
== IB_EVENT_SM_CHANGE
||
999 event
->event
== IB_EVENT_CLIENT_REREGISTER
||
1000 event
->event
== IB_EVENT_GID_CHANGE
) {
1001 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
1003 INIT_WORK(&work
->work
, ib_cache_task
);
1004 work
->device
= event
->device
;
1005 work
->port_num
= event
->element
.port_num
;
1006 queue_work(ib_wq
, &work
->work
);
1011 int ib_cache_setup_one(struct ib_device
*device
)
1016 rwlock_init(&device
->cache
.lock
);
1018 device
->cache
.pkey_cache
=
1019 kzalloc(sizeof *device
->cache
.pkey_cache
*
1020 (rdma_end_port(device
) - rdma_start_port(device
) + 1), GFP_KERNEL
);
1021 device
->cache
.lmc_cache
= kmalloc(sizeof *device
->cache
.lmc_cache
*
1022 (rdma_end_port(device
) -
1023 rdma_start_port(device
) + 1),
1025 if (!device
->cache
.pkey_cache
||
1026 !device
->cache
.lmc_cache
) {
1027 printk(KERN_WARNING
"Couldn't allocate cache "
1028 "for %s\n", device
->name
);
1032 err
= gid_table_setup_one(device
);
1034 /* Allocated memory will be cleaned in the release function */
1037 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1038 ib_cache_update(device
, p
+ rdma_start_port(device
));
1040 INIT_IB_EVENT_HANDLER(&device
->cache
.event_handler
,
1041 device
, ib_cache_event
);
1042 err
= ib_register_event_handler(&device
->cache
.event_handler
);
1049 gid_table_cleanup_one(device
);
1053 void ib_cache_release_one(struct ib_device
*device
)
1058 * The release function frees all the cache elements.
1059 * This function should be called as part of freeing
1060 * all the device's resources when the cache could no
1061 * longer be accessed.
1063 if (device
->cache
.pkey_cache
)
1065 p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1066 kfree(device
->cache
.pkey_cache
[p
]);
1068 gid_table_release_one(device
);
1069 kfree(device
->cache
.pkey_cache
);
1070 kfree(device
->cache
.lmc_cache
);
1073 void ib_cache_cleanup_one(struct ib_device
*device
)
1075 /* The cleanup function unregisters the event handler,
1076 * waits for all in-progress workqueue elements and cleans
1077 * up the GID cache. This function should be called after
1078 * the device was removed from the devices list and all
1079 * clients were removed, so the cache exists but is
1080 * non-functional and shouldn't be updated anymore.
1082 ib_unregister_event_handler(&device
->cache
.event_handler
);
1083 flush_workqueue(ib_wq
);
1084 gid_table_cleanup_one(device
);
1087 void __init
ib_cache_setup(void)
1089 roce_gid_mgmt_init();
1092 void __exit
ib_cache_cleanup(void)
1094 roce_gid_mgmt_cleanup();