2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache
{
52 struct ib_update_work
{
53 struct work_struct work
;
54 struct ib_device
*device
;
61 static const struct ib_gid_attr zattr
;
63 enum gid_attr_find_mask
{
64 GID_ATTR_FIND_MASK_GID
= 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV
= 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT
= 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE
= 1UL << 3,
70 enum gid_table_entry_props
{
71 GID_TABLE_ENTRY_INVALID
= 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT
= 1UL << 1,
75 enum gid_table_write_action
{
76 GID_TABLE_WRITE_ACTION_ADD
,
77 GID_TABLE_WRITE_ACTION_DEL
,
78 /* MODIFY only updates the GID table. Currently only used by
81 GID_TABLE_WRITE_ACTION_MODIFY
84 struct ib_gid_table_entry
{
87 struct ib_gid_attr attr
;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
98 * Delete requires different set of operations:
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
113 struct ib_gid_table_entry
*data_vec
;
116 static void dispatch_gid_change_event(struct ib_device
*ib_dev
, u8 port
)
118 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
119 struct ib_event event
;
121 event
.device
= ib_dev
;
122 event
.element
.port_num
= port
;
123 event
.event
= IB_EVENT_GID_CHANGE
;
125 ib_dispatch_event(&event
);
129 static const char * const gid_type_str
[] = {
130 [IB_GID_TYPE_IB
] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP
] = "RoCE v2",
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type
)
136 if (gid_type
< ARRAY_SIZE(gid_type_str
) && gid_type_str
[gid_type
])
137 return gid_type_str
[gid_type
];
139 return "Invalid GID type";
141 EXPORT_SYMBOL(ib_cache_gid_type_str
);
143 int ib_cache_gid_parse_type_str(const char *buf
)
153 if (buf
[len
- 1] == '\n')
156 for (i
= 0; i
< ARRAY_SIZE(gid_type_str
); ++i
)
157 if (gid_type_str
[i
] && !strncmp(buf
, gid_type_str
[i
], len
) &&
158 len
== strlen(gid_type_str
[i
])) {
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str
);
167 /* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
171 static int write_gid(struct ib_device
*ib_dev
, u8 port
,
172 struct ib_gid_table
*table
, int ix
,
173 const union ib_gid
*gid
,
174 const struct ib_gid_attr
*attr
,
175 enum gid_table_write_action action
,
177 __releases(&table
->rwlock
) __acquires(&table
->rwlock
)
180 struct net_device
*old_net_dev
;
182 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
186 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
187 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_INVALID
;
188 write_unlock_irq(&table
->rwlock
);
189 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
190 * RoCE providers and thus only updates the cache.
192 if (action
== GID_TABLE_WRITE_ACTION_ADD
)
193 ret
= ib_dev
->add_gid(ib_dev
, port
, ix
, gid
, attr
,
194 &table
->data_vec
[ix
].context
);
195 else if (action
== GID_TABLE_WRITE_ACTION_DEL
)
196 ret
= ib_dev
->del_gid(ib_dev
, port
, ix
,
197 &table
->data_vec
[ix
].context
);
198 write_lock_irq(&table
->rwlock
);
201 old_net_dev
= table
->data_vec
[ix
].attr
.ndev
;
202 if (old_net_dev
&& old_net_dev
!= attr
->ndev
)
203 dev_put(old_net_dev
);
204 /* if modify_gid failed, just delete the old gid */
205 if (ret
|| action
== GID_TABLE_WRITE_ACTION_DEL
) {
208 table
->data_vec
[ix
].context
= NULL
;
211 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_DEFAULT
;
212 memcpy(&table
->data_vec
[ix
].gid
, gid
, sizeof(*gid
));
213 memcpy(&table
->data_vec
[ix
].attr
, attr
, sizeof(*attr
));
214 if (table
->data_vec
[ix
].attr
.ndev
&&
215 table
->data_vec
[ix
].attr
.ndev
!= old_net_dev
)
216 dev_hold(table
->data_vec
[ix
].attr
.ndev
);
218 table
->data_vec
[ix
].props
&= ~GID_TABLE_ENTRY_INVALID
;
223 static int add_gid(struct ib_device
*ib_dev
, u8 port
,
224 struct ib_gid_table
*table
, int ix
,
225 const union ib_gid
*gid
,
226 const struct ib_gid_attr
*attr
,
228 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
229 GID_TABLE_WRITE_ACTION_ADD
, default_gid
);
232 static int modify_gid(struct ib_device
*ib_dev
, u8 port
,
233 struct ib_gid_table
*table
, int ix
,
234 const union ib_gid
*gid
,
235 const struct ib_gid_attr
*attr
,
237 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
238 GID_TABLE_WRITE_ACTION_MODIFY
, default_gid
);
241 static int del_gid(struct ib_device
*ib_dev
, u8 port
,
242 struct ib_gid_table
*table
, int ix
,
244 return write_gid(ib_dev
, port
, table
, ix
, &zgid
, &zattr
,
245 GID_TABLE_WRITE_ACTION_DEL
, default_gid
);
248 /* rwlock should be read locked */
249 static int find_gid(struct ib_gid_table
*table
, const union ib_gid
*gid
,
250 const struct ib_gid_attr
*val
, bool default_gid
,
251 unsigned long mask
, int *pempty
)
255 int empty
= pempty
? -1 : 0;
257 while (i
< table
->sz
&& (found
< 0 || empty
< 0)) {
258 struct ib_gid_table_entry
*data
= &table
->data_vec
[i
];
259 struct ib_gid_attr
*attr
= &data
->attr
;
264 if (data
->props
& GID_TABLE_ENTRY_INVALID
)
268 if (!memcmp(&data
->gid
, &zgid
, sizeof(*gid
)) &&
269 !memcmp(attr
, &zattr
, sizeof(*attr
)) &&
276 if (mask
& GID_ATTR_FIND_MASK_GID_TYPE
&&
277 attr
->gid_type
!= val
->gid_type
)
280 if (mask
& GID_ATTR_FIND_MASK_GID
&&
281 memcmp(gid
, &data
->gid
, sizeof(*gid
)))
284 if (mask
& GID_ATTR_FIND_MASK_NETDEV
&&
285 attr
->ndev
!= val
->ndev
)
288 if (mask
& GID_ATTR_FIND_MASK_DEFAULT
&&
289 !!(data
->props
& GID_TABLE_ENTRY_DEFAULT
) !=
302 static void make_default_gid(struct net_device
*dev
, union ib_gid
*gid
)
304 gid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
305 addrconf_ifid_eui48(&gid
->raw
[8], dev
);
308 int ib_cache_gid_add(struct ib_device
*ib_dev
, u8 port
,
309 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
311 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
312 struct ib_gid_table
*table
;
315 struct net_device
*idev
;
318 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
320 if (!memcmp(gid
, &zgid
, sizeof(*gid
)))
323 if (ib_dev
->get_netdev
) {
324 idev
= ib_dev
->get_netdev(ib_dev
, port
);
325 if (idev
&& attr
->ndev
!= idev
) {
326 union ib_gid default_gid
;
328 /* Adding default GIDs in not permitted */
329 make_default_gid(idev
, &default_gid
);
330 if (!memcmp(gid
, &default_gid
, sizeof(*gid
))) {
339 mutex_lock(&table
->lock
);
340 write_lock_irq(&table
->rwlock
);
342 ix
= find_gid(table
, gid
, attr
, false, GID_ATTR_FIND_MASK_GID
|
343 GID_ATTR_FIND_MASK_GID_TYPE
|
344 GID_ATTR_FIND_MASK_NETDEV
, &empty
);
353 ret
= add_gid(ib_dev
, port
, table
, empty
, gid
, attr
, false);
355 dispatch_gid_change_event(ib_dev
, port
);
358 write_unlock_irq(&table
->rwlock
);
359 mutex_unlock(&table
->lock
);
363 int ib_cache_gid_del(struct ib_device
*ib_dev
, u8 port
,
364 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
366 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
367 struct ib_gid_table
*table
;
370 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
372 mutex_lock(&table
->lock
);
373 write_lock_irq(&table
->rwlock
);
375 ix
= find_gid(table
, gid
, attr
, false,
376 GID_ATTR_FIND_MASK_GID
|
377 GID_ATTR_FIND_MASK_GID_TYPE
|
378 GID_ATTR_FIND_MASK_NETDEV
|
379 GID_ATTR_FIND_MASK_DEFAULT
,
384 if (!del_gid(ib_dev
, port
, table
, ix
, false))
385 dispatch_gid_change_event(ib_dev
, port
);
388 write_unlock_irq(&table
->rwlock
);
389 mutex_unlock(&table
->lock
);
393 int ib_cache_gid_del_all_netdev_gids(struct ib_device
*ib_dev
, u8 port
,
394 struct net_device
*ndev
)
396 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
397 struct ib_gid_table
*table
;
399 bool deleted
= false;
401 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
403 mutex_lock(&table
->lock
);
404 write_lock_irq(&table
->rwlock
);
406 for (ix
= 0; ix
< table
->sz
; ix
++)
407 if (table
->data_vec
[ix
].attr
.ndev
== ndev
)
408 if (!del_gid(ib_dev
, port
, table
, ix
, false))
411 write_unlock_irq(&table
->rwlock
);
412 mutex_unlock(&table
->lock
);
415 dispatch_gid_change_event(ib_dev
, port
);
420 static int __ib_cache_gid_get(struct ib_device
*ib_dev
, u8 port
, int index
,
421 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
423 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
424 struct ib_gid_table
*table
;
426 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
428 if (index
< 0 || index
>= table
->sz
)
431 if (table
->data_vec
[index
].props
& GID_TABLE_ENTRY_INVALID
)
434 memcpy(gid
, &table
->data_vec
[index
].gid
, sizeof(*gid
));
436 memcpy(attr
, &table
->data_vec
[index
].attr
, sizeof(*attr
));
438 dev_hold(attr
->ndev
);
444 static int _ib_cache_gid_table_find(struct ib_device
*ib_dev
,
445 const union ib_gid
*gid
,
446 const struct ib_gid_attr
*val
,
448 u8
*port
, u16
*index
)
450 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
451 struct ib_gid_table
*table
;
456 for (p
= 0; p
< ib_dev
->phys_port_cnt
; p
++) {
457 table
= ports_table
[p
];
458 read_lock_irqsave(&table
->rwlock
, flags
);
459 local_index
= find_gid(table
, gid
, val
, false, mask
, NULL
);
460 if (local_index
>= 0) {
462 *index
= local_index
;
464 *port
= p
+ rdma_start_port(ib_dev
);
465 read_unlock_irqrestore(&table
->rwlock
, flags
);
468 read_unlock_irqrestore(&table
->rwlock
, flags
);
474 static int ib_cache_gid_find(struct ib_device
*ib_dev
,
475 const union ib_gid
*gid
,
476 enum ib_gid_type gid_type
,
477 struct net_device
*ndev
, u8
*port
,
480 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
481 GID_ATTR_FIND_MASK_GID_TYPE
;
482 struct ib_gid_attr gid_attr_val
= {.ndev
= ndev
, .gid_type
= gid_type
};
485 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
487 return _ib_cache_gid_table_find(ib_dev
, gid
, &gid_attr_val
,
491 int ib_find_cached_gid_by_port(struct ib_device
*ib_dev
,
492 const union ib_gid
*gid
,
493 enum ib_gid_type gid_type
,
494 u8 port
, struct net_device
*ndev
,
498 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
499 struct ib_gid_table
*table
;
500 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
501 GID_ATTR_FIND_MASK_GID_TYPE
;
502 struct ib_gid_attr val
= {.ndev
= ndev
, .gid_type
= gid_type
};
505 if (port
< rdma_start_port(ib_dev
) ||
506 port
> rdma_end_port(ib_dev
))
509 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
512 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
514 read_lock_irqsave(&table
->rwlock
, flags
);
515 local_index
= find_gid(table
, gid
, &val
, false, mask
, NULL
);
516 if (local_index
>= 0) {
518 *index
= local_index
;
519 read_unlock_irqrestore(&table
->rwlock
, flags
);
523 read_unlock_irqrestore(&table
->rwlock
, flags
);
526 EXPORT_SYMBOL(ib_find_cached_gid_by_port
);
529 * ib_find_gid_by_filter - Returns the GID table index where a specified
531 * @device: The device to query.
532 * @gid: The GID value to search for.
533 * @port_num: The port number of the device where the GID value could be
535 * @filter: The filter function is executed on any matching GID in the table.
536 * If the filter function returns true, the corresponding index is returned,
537 * otherwise, we continue searching the GID table. It's guaranteed that
538 * while filter is executed, ndev field is valid and the structure won't
539 * change. filter is executed in an atomic context. filter must not be NULL.
540 * @index: The index into the cached GID table where the GID was found. This
541 * parameter may be NULL.
543 * ib_cache_gid_find_by_filter() searches for the specified GID value
544 * of which the filter function returns true in the port's GID table.
545 * This function is only supported on RoCE ports.
548 static int ib_cache_gid_find_by_filter(struct ib_device
*ib_dev
,
549 const union ib_gid
*gid
,
551 bool (*filter
)(const union ib_gid
*,
552 const struct ib_gid_attr
*,
557 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
558 struct ib_gid_table
*table
;
566 if (port
< rdma_start_port(ib_dev
) ||
567 port
> rdma_end_port(ib_dev
) ||
568 !rdma_protocol_roce(ib_dev
, port
))
569 return -EPROTONOSUPPORT
;
571 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
573 read_lock_irqsave(&table
->rwlock
, flags
);
574 for (i
= 0; i
< table
->sz
; i
++) {
575 struct ib_gid_attr attr
;
577 if (table
->data_vec
[i
].props
& GID_TABLE_ENTRY_INVALID
)
580 if (memcmp(gid
, &table
->data_vec
[i
].gid
, sizeof(*gid
)))
583 memcpy(&attr
, &table
->data_vec
[i
].attr
, sizeof(attr
));
585 if (filter(gid
, &attr
, context
))
592 read_unlock_irqrestore(&table
->rwlock
, flags
);
602 static struct ib_gid_table
*alloc_gid_table(int sz
)
604 struct ib_gid_table
*table
=
605 kzalloc(sizeof(struct ib_gid_table
), GFP_KERNEL
);
610 table
->data_vec
= kcalloc(sz
, sizeof(*table
->data_vec
), GFP_KERNEL
);
611 if (!table
->data_vec
)
614 mutex_init(&table
->lock
);
617 rwlock_init(&table
->rwlock
);
626 static void release_gid_table(struct ib_gid_table
*table
)
629 kfree(table
->data_vec
);
634 static void cleanup_gid_table_port(struct ib_device
*ib_dev
, u8 port
,
635 struct ib_gid_table
*table
)
638 bool deleted
= false;
643 write_lock_irq(&table
->rwlock
);
644 for (i
= 0; i
< table
->sz
; ++i
) {
645 if (memcmp(&table
->data_vec
[i
].gid
, &zgid
,
646 sizeof(table
->data_vec
[i
].gid
)))
647 if (!del_gid(ib_dev
, port
, table
, i
,
648 table
->data_vec
[i
].props
&
649 GID_ATTR_FIND_MASK_DEFAULT
))
652 write_unlock_irq(&table
->rwlock
);
655 dispatch_gid_change_event(ib_dev
, port
);
658 void ib_cache_gid_set_default_gid(struct ib_device
*ib_dev
, u8 port
,
659 struct net_device
*ndev
,
660 unsigned long gid_type_mask
,
661 enum ib_cache_gid_default_mode mode
)
663 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
665 struct ib_gid_attr gid_attr
;
666 struct ib_gid_attr zattr_type
= zattr
;
667 struct ib_gid_table
*table
;
668 unsigned int gid_type
;
670 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
672 make_default_gid(ndev
, &gid
);
673 memset(&gid_attr
, 0, sizeof(gid_attr
));
674 gid_attr
.ndev
= ndev
;
676 for (gid_type
= 0; gid_type
< IB_GID_TYPE_SIZE
; ++gid_type
) {
678 union ib_gid current_gid
;
679 struct ib_gid_attr current_gid_attr
= {};
681 if (1UL << gid_type
& ~gid_type_mask
)
684 gid_attr
.gid_type
= gid_type
;
686 mutex_lock(&table
->lock
);
687 write_lock_irq(&table
->rwlock
);
688 ix
= find_gid(table
, NULL
, &gid_attr
, true,
689 GID_ATTR_FIND_MASK_GID_TYPE
|
690 GID_ATTR_FIND_MASK_DEFAULT
,
693 /* Coudn't find default GID location */
696 zattr_type
.gid_type
= gid_type
;
698 if (!__ib_cache_gid_get(ib_dev
, port
, ix
,
699 ¤t_gid
, ¤t_gid_attr
) &&
700 mode
== IB_CACHE_GID_DEFAULT_MODE_SET
&&
701 !memcmp(&gid
, ¤t_gid
, sizeof(gid
)) &&
702 !memcmp(&gid_attr
, ¤t_gid_attr
, sizeof(gid_attr
)))
705 if (memcmp(¤t_gid
, &zgid
, sizeof(current_gid
)) ||
706 memcmp(¤t_gid_attr
, &zattr_type
,
707 sizeof(current_gid_attr
))) {
708 if (del_gid(ib_dev
, port
, table
, ix
, true)) {
709 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
713 dispatch_gid_change_event(ib_dev
, port
);
717 if (mode
== IB_CACHE_GID_DEFAULT_MODE_SET
) {
718 if (add_gid(ib_dev
, port
, table
, ix
, &gid
, &gid_attr
, true))
719 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
722 dispatch_gid_change_event(ib_dev
, port
);
726 if (current_gid_attr
.ndev
)
727 dev_put(current_gid_attr
.ndev
);
728 write_unlock_irq(&table
->rwlock
);
729 mutex_unlock(&table
->lock
);
733 static int gid_table_reserve_default(struct ib_device
*ib_dev
, u8 port
,
734 struct ib_gid_table
*table
)
737 unsigned long roce_gid_type_mask
;
738 unsigned int num_default_gids
;
739 unsigned int current_gid
= 0;
741 roce_gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
742 num_default_gids
= hweight_long(roce_gid_type_mask
);
743 for (i
= 0; i
< num_default_gids
&& i
< table
->sz
; i
++) {
744 struct ib_gid_table_entry
*entry
=
747 entry
->props
|= GID_TABLE_ENTRY_DEFAULT
;
748 current_gid
= find_next_bit(&roce_gid_type_mask
,
751 entry
->attr
.gid_type
= current_gid
++;
757 static int _gid_table_setup_one(struct ib_device
*ib_dev
)
760 struct ib_gid_table
**table
;
763 table
= kcalloc(ib_dev
->phys_port_cnt
, sizeof(*table
), GFP_KERNEL
);
766 pr_warn("failed to allocate ib gid cache for %s\n",
771 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
772 u8 rdma_port
= port
+ rdma_start_port(ib_dev
);
776 ib_dev
->port_immutable
[rdma_port
].gid_tbl_len
);
779 goto rollback_table_setup
;
782 err
= gid_table_reserve_default(ib_dev
,
783 port
+ rdma_start_port(ib_dev
),
786 goto rollback_table_setup
;
789 ib_dev
->cache
.gid_cache
= table
;
792 rollback_table_setup
:
793 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
794 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
796 release_gid_table(table
[port
]);
803 static void gid_table_release_one(struct ib_device
*ib_dev
)
805 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
811 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
812 release_gid_table(table
[port
]);
815 ib_dev
->cache
.gid_cache
= NULL
;
818 static void gid_table_cleanup_one(struct ib_device
*ib_dev
)
820 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
826 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
827 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
831 static int gid_table_setup_one(struct ib_device
*ib_dev
)
835 err
= _gid_table_setup_one(ib_dev
);
840 err
= roce_rescan_device(ib_dev
);
843 gid_table_cleanup_one(ib_dev
);
844 gid_table_release_one(ib_dev
);
850 int ib_get_cached_gid(struct ib_device
*device
,
854 struct ib_gid_attr
*gid_attr
)
858 struct ib_gid_table
**ports_table
= device
->cache
.gid_cache
;
859 struct ib_gid_table
*table
= ports_table
[port_num
- rdma_start_port(device
)];
861 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
864 read_lock_irqsave(&table
->rwlock
, flags
);
865 res
= __ib_cache_gid_get(device
, port_num
, index
, gid
, gid_attr
);
866 read_unlock_irqrestore(&table
->rwlock
, flags
);
870 EXPORT_SYMBOL(ib_get_cached_gid
);
872 int ib_find_cached_gid(struct ib_device
*device
,
873 const union ib_gid
*gid
,
874 enum ib_gid_type gid_type
,
875 struct net_device
*ndev
,
879 return ib_cache_gid_find(device
, gid
, gid_type
, ndev
, port_num
, index
);
881 EXPORT_SYMBOL(ib_find_cached_gid
);
883 int ib_find_gid_by_filter(struct ib_device
*device
,
884 const union ib_gid
*gid
,
886 bool (*filter
)(const union ib_gid
*gid
,
887 const struct ib_gid_attr
*,
889 void *context
, u16
*index
)
891 /* Only RoCE GID table supports filter function */
892 if (!rdma_cap_roce_gid_table(device
, port_num
) && filter
)
893 return -EPROTONOSUPPORT
;
895 return ib_cache_gid_find_by_filter(device
, gid
,
899 EXPORT_SYMBOL(ib_find_gid_by_filter
);
901 int ib_get_cached_pkey(struct ib_device
*device
,
906 struct ib_pkey_cache
*cache
;
910 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
913 read_lock_irqsave(&device
->cache
.lock
, flags
);
915 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
917 if (index
< 0 || index
>= cache
->table_len
)
920 *pkey
= cache
->table
[index
];
922 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
926 EXPORT_SYMBOL(ib_get_cached_pkey
);
928 int ib_find_cached_pkey(struct ib_device
*device
,
933 struct ib_pkey_cache
*cache
;
939 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
942 read_lock_irqsave(&device
->cache
.lock
, flags
);
944 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
948 for (i
= 0; i
< cache
->table_len
; ++i
)
949 if ((cache
->table
[i
] & 0x7fff) == (pkey
& 0x7fff)) {
950 if (cache
->table
[i
] & 0x8000) {
958 if (ret
&& partial_ix
>= 0) {
963 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
967 EXPORT_SYMBOL(ib_find_cached_pkey
);
969 int ib_find_exact_cached_pkey(struct ib_device
*device
,
974 struct ib_pkey_cache
*cache
;
979 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
982 read_lock_irqsave(&device
->cache
.lock
, flags
);
984 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
988 for (i
= 0; i
< cache
->table_len
; ++i
)
989 if (cache
->table
[i
] == pkey
) {
995 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
999 EXPORT_SYMBOL(ib_find_exact_cached_pkey
);
1001 int ib_get_cached_lmc(struct ib_device
*device
,
1005 unsigned long flags
;
1008 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
1011 read_lock_irqsave(&device
->cache
.lock
, flags
);
1012 *lmc
= device
->cache
.lmc_cache
[port_num
- rdma_start_port(device
)];
1013 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
1017 EXPORT_SYMBOL(ib_get_cached_lmc
);
1019 static void ib_cache_update(struct ib_device
*device
,
1022 struct ib_port_attr
*tprops
= NULL
;
1023 struct ib_pkey_cache
*pkey_cache
= NULL
, *old_pkey_cache
;
1024 struct ib_gid_cache
{
1026 union ib_gid table
[0];
1027 } *gid_cache
= NULL
;
1030 struct ib_gid_table
*table
;
1031 struct ib_gid_table
**ports_table
= device
->cache
.gid_cache
;
1032 bool use_roce_gid_table
=
1033 rdma_cap_roce_gid_table(device
, port
);
1035 if (port
< rdma_start_port(device
) || port
> rdma_end_port(device
))
1038 table
= ports_table
[port
- rdma_start_port(device
)];
1040 tprops
= kmalloc(sizeof *tprops
, GFP_KERNEL
);
1044 ret
= ib_query_port(device
, port
, tprops
);
1046 pr_warn("ib_query_port failed (%d) for %s\n",
1051 pkey_cache
= kmalloc(sizeof *pkey_cache
+ tprops
->pkey_tbl_len
*
1052 sizeof *pkey_cache
->table
, GFP_KERNEL
);
1056 pkey_cache
->table_len
= tprops
->pkey_tbl_len
;
1058 if (!use_roce_gid_table
) {
1059 gid_cache
= kmalloc(sizeof(*gid_cache
) + tprops
->gid_tbl_len
*
1060 sizeof(*gid_cache
->table
), GFP_KERNEL
);
1064 gid_cache
->table_len
= tprops
->gid_tbl_len
;
1067 for (i
= 0; i
< pkey_cache
->table_len
; ++i
) {
1068 ret
= ib_query_pkey(device
, port
, i
, pkey_cache
->table
+ i
);
1070 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1071 ret
, device
->name
, i
);
1076 if (!use_roce_gid_table
) {
1077 for (i
= 0; i
< gid_cache
->table_len
; ++i
) {
1078 ret
= ib_query_gid(device
, port
, i
,
1079 gid_cache
->table
+ i
, NULL
);
1081 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1082 ret
, device
->name
, i
);
1088 write_lock_irq(&device
->cache
.lock
);
1090 old_pkey_cache
= device
->cache
.pkey_cache
[port
- rdma_start_port(device
)];
1092 device
->cache
.pkey_cache
[port
- rdma_start_port(device
)] = pkey_cache
;
1093 if (!use_roce_gid_table
) {
1094 write_lock(&table
->rwlock
);
1095 for (i
= 0; i
< gid_cache
->table_len
; i
++) {
1096 modify_gid(device
, port
, table
, i
, gid_cache
->table
+ i
,
1099 write_unlock(&table
->rwlock
);
1102 device
->cache
.lmc_cache
[port
- rdma_start_port(device
)] = tprops
->lmc
;
1104 write_unlock_irq(&device
->cache
.lock
);
1107 kfree(old_pkey_cache
);
1117 static void ib_cache_task(struct work_struct
*_work
)
1119 struct ib_update_work
*work
=
1120 container_of(_work
, struct ib_update_work
, work
);
1122 ib_cache_update(work
->device
, work
->port_num
);
1126 static void ib_cache_event(struct ib_event_handler
*handler
,
1127 struct ib_event
*event
)
1129 struct ib_update_work
*work
;
1131 if (event
->event
== IB_EVENT_PORT_ERR
||
1132 event
->event
== IB_EVENT_PORT_ACTIVE
||
1133 event
->event
== IB_EVENT_LID_CHANGE
||
1134 event
->event
== IB_EVENT_PKEY_CHANGE
||
1135 event
->event
== IB_EVENT_SM_CHANGE
||
1136 event
->event
== IB_EVENT_CLIENT_REREGISTER
||
1137 event
->event
== IB_EVENT_GID_CHANGE
) {
1138 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
1140 INIT_WORK(&work
->work
, ib_cache_task
);
1141 work
->device
= event
->device
;
1142 work
->port_num
= event
->element
.port_num
;
1143 queue_work(ib_wq
, &work
->work
);
1148 int ib_cache_setup_one(struct ib_device
*device
)
1153 rwlock_init(&device
->cache
.lock
);
1155 device
->cache
.pkey_cache
=
1156 kzalloc(sizeof *device
->cache
.pkey_cache
*
1157 (rdma_end_port(device
) - rdma_start_port(device
) + 1), GFP_KERNEL
);
1158 device
->cache
.lmc_cache
= kmalloc(sizeof *device
->cache
.lmc_cache
*
1159 (rdma_end_port(device
) -
1160 rdma_start_port(device
) + 1),
1162 if (!device
->cache
.pkey_cache
||
1163 !device
->cache
.lmc_cache
) {
1164 pr_warn("Couldn't allocate cache for %s\n", device
->name
);
1168 err
= gid_table_setup_one(device
);
1170 /* Allocated memory will be cleaned in the release function */
1173 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1174 ib_cache_update(device
, p
+ rdma_start_port(device
));
1176 INIT_IB_EVENT_HANDLER(&device
->cache
.event_handler
,
1177 device
, ib_cache_event
);
1178 err
= ib_register_event_handler(&device
->cache
.event_handler
);
1185 gid_table_cleanup_one(device
);
1189 void ib_cache_release_one(struct ib_device
*device
)
1194 * The release function frees all the cache elements.
1195 * This function should be called as part of freeing
1196 * all the device's resources when the cache could no
1197 * longer be accessed.
1199 if (device
->cache
.pkey_cache
)
1201 p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1202 kfree(device
->cache
.pkey_cache
[p
]);
1204 gid_table_release_one(device
);
1205 kfree(device
->cache
.pkey_cache
);
1206 kfree(device
->cache
.lmc_cache
);
1209 void ib_cache_cleanup_one(struct ib_device
*device
)
1211 /* The cleanup function unregisters the event handler,
1212 * waits for all in-progress workqueue elements and cleans
1213 * up the GID cache. This function should be called after
1214 * the device was removed from the devices list and all
1215 * clients were removed, so the cache exists but is
1216 * non-functional and shouldn't be updated anymore.
1218 ib_unregister_event_handler(&device
->cache
.event_handler
);
1219 flush_workqueue(ib_wq
);
1220 gid_table_cleanup_one(device
);
1223 void __init
ib_cache_setup(void)
1225 roce_gid_mgmt_init();
1228 void __exit
ib_cache_cleanup(void)
1230 roce_gid_mgmt_cleanup();