2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache
{
52 struct ib_update_work
{
53 struct work_struct work
;
54 struct ib_device
*device
;
56 bool enforce_security
;
62 static const struct ib_gid_attr zattr
;
64 enum gid_attr_find_mask
{
65 GID_ATTR_FIND_MASK_GID
= 1UL << 0,
66 GID_ATTR_FIND_MASK_NETDEV
= 1UL << 1,
67 GID_ATTR_FIND_MASK_DEFAULT
= 1UL << 2,
68 GID_ATTR_FIND_MASK_GID_TYPE
= 1UL << 3,
71 enum gid_table_entry_props
{
72 GID_TABLE_ENTRY_INVALID
= 1UL << 0,
73 GID_TABLE_ENTRY_DEFAULT
= 1UL << 1,
76 enum gid_table_write_action
{
77 GID_TABLE_WRITE_ACTION_ADD
,
78 GID_TABLE_WRITE_ACTION_DEL
,
79 /* MODIFY only updates the GID table. Currently only used by
82 GID_TABLE_WRITE_ACTION_MODIFY
85 struct ib_gid_table_entry
{
88 struct ib_gid_attr attr
;
94 /* In RoCE, adding a GID to the table requires:
95 * (a) Find if this GID is already exists.
96 * (b) Find a free space.
97 * (c) Write the new GID
99 * Delete requires different set of operations:
103 * Add/delete should be carried out atomically.
104 * This is done by locking this mutex from multiple
105 * writers. We don't need this lock for IB, as the MAD
106 * layer replaces all entries. All data_vec entries
107 * are locked by this lock.
110 /* This lock protects the table entries from being
111 * read and written simultaneously.
114 struct ib_gid_table_entry
*data_vec
;
117 static void dispatch_gid_change_event(struct ib_device
*ib_dev
, u8 port
)
119 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
120 struct ib_event event
;
122 event
.device
= ib_dev
;
123 event
.element
.port_num
= port
;
124 event
.event
= IB_EVENT_GID_CHANGE
;
126 ib_dispatch_event(&event
);
130 static const char * const gid_type_str
[] = {
131 [IB_GID_TYPE_IB
] = "IB/RoCE v1",
132 [IB_GID_TYPE_ROCE_UDP_ENCAP
] = "RoCE v2",
135 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type
)
137 if (gid_type
< ARRAY_SIZE(gid_type_str
) && gid_type_str
[gid_type
])
138 return gid_type_str
[gid_type
];
140 return "Invalid GID type";
142 EXPORT_SYMBOL(ib_cache_gid_type_str
);
144 int ib_cache_gid_parse_type_str(const char *buf
)
154 if (buf
[len
- 1] == '\n')
157 for (i
= 0; i
< ARRAY_SIZE(gid_type_str
); ++i
)
158 if (gid_type_str
[i
] && !strncmp(buf
, gid_type_str
[i
], len
) &&
159 len
== strlen(gid_type_str
[i
])) {
166 EXPORT_SYMBOL(ib_cache_gid_parse_type_str
);
168 /* This function expects that rwlock will be write locked in all
169 * scenarios and that lock will be locked in sleep-able (RoCE)
172 static int write_gid(struct ib_device
*ib_dev
, u8 port
,
173 struct ib_gid_table
*table
, int ix
,
174 const union ib_gid
*gid
,
175 const struct ib_gid_attr
*attr
,
176 enum gid_table_write_action action
,
178 __releases(&table
->rwlock
) __acquires(&table
->rwlock
)
181 struct net_device
*old_net_dev
;
182 enum ib_gid_type old_gid_type
;
184 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
188 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
189 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_INVALID
;
190 write_unlock_irq(&table
->rwlock
);
191 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
192 * RoCE providers and thus only updates the cache.
194 if (action
== GID_TABLE_WRITE_ACTION_ADD
)
195 ret
= ib_dev
->add_gid(ib_dev
, port
, ix
, gid
, attr
,
196 &table
->data_vec
[ix
].context
);
197 else if (action
== GID_TABLE_WRITE_ACTION_DEL
)
198 ret
= ib_dev
->del_gid(ib_dev
, port
, ix
,
199 &table
->data_vec
[ix
].context
);
200 write_lock_irq(&table
->rwlock
);
203 old_net_dev
= table
->data_vec
[ix
].attr
.ndev
;
204 old_gid_type
= table
->data_vec
[ix
].attr
.gid_type
;
205 if (old_net_dev
&& old_net_dev
!= attr
->ndev
)
206 dev_put(old_net_dev
);
207 /* if modify_gid failed, just delete the old gid */
208 if (ret
|| action
== GID_TABLE_WRITE_ACTION_DEL
) {
211 table
->data_vec
[ix
].context
= NULL
;
214 memcpy(&table
->data_vec
[ix
].gid
, gid
, sizeof(*gid
));
215 memcpy(&table
->data_vec
[ix
].attr
, attr
, sizeof(*attr
));
217 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_DEFAULT
;
218 if (action
== GID_TABLE_WRITE_ACTION_DEL
)
219 table
->data_vec
[ix
].attr
.gid_type
= old_gid_type
;
221 if (table
->data_vec
[ix
].attr
.ndev
&&
222 table
->data_vec
[ix
].attr
.ndev
!= old_net_dev
)
223 dev_hold(table
->data_vec
[ix
].attr
.ndev
);
225 table
->data_vec
[ix
].props
&= ~GID_TABLE_ENTRY_INVALID
;
230 static int add_gid(struct ib_device
*ib_dev
, u8 port
,
231 struct ib_gid_table
*table
, int ix
,
232 const union ib_gid
*gid
,
233 const struct ib_gid_attr
*attr
,
235 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
236 GID_TABLE_WRITE_ACTION_ADD
, default_gid
);
239 static int modify_gid(struct ib_device
*ib_dev
, u8 port
,
240 struct ib_gid_table
*table
, int ix
,
241 const union ib_gid
*gid
,
242 const struct ib_gid_attr
*attr
,
244 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
245 GID_TABLE_WRITE_ACTION_MODIFY
, default_gid
);
248 static int del_gid(struct ib_device
*ib_dev
, u8 port
,
249 struct ib_gid_table
*table
, int ix
,
251 return write_gid(ib_dev
, port
, table
, ix
, &zgid
, &zattr
,
252 GID_TABLE_WRITE_ACTION_DEL
, default_gid
);
255 /* rwlock should be read locked */
256 static int find_gid(struct ib_gid_table
*table
, const union ib_gid
*gid
,
257 const struct ib_gid_attr
*val
, bool default_gid
,
258 unsigned long mask
, int *pempty
)
262 int empty
= pempty
? -1 : 0;
264 while (i
< table
->sz
&& (found
< 0 || empty
< 0)) {
265 struct ib_gid_table_entry
*data
= &table
->data_vec
[i
];
266 struct ib_gid_attr
*attr
= &data
->attr
;
271 if (data
->props
& GID_TABLE_ENTRY_INVALID
)
275 if (!memcmp(&data
->gid
, &zgid
, sizeof(*gid
)) &&
276 !memcmp(attr
, &zattr
, sizeof(*attr
)) &&
283 if (mask
& GID_ATTR_FIND_MASK_GID_TYPE
&&
284 attr
->gid_type
!= val
->gid_type
)
287 if (mask
& GID_ATTR_FIND_MASK_GID
&&
288 memcmp(gid
, &data
->gid
, sizeof(*gid
)))
291 if (mask
& GID_ATTR_FIND_MASK_NETDEV
&&
292 attr
->ndev
!= val
->ndev
)
295 if (mask
& GID_ATTR_FIND_MASK_DEFAULT
&&
296 !!(data
->props
& GID_TABLE_ENTRY_DEFAULT
) !=
309 static void make_default_gid(struct net_device
*dev
, union ib_gid
*gid
)
311 gid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
312 addrconf_ifid_eui48(&gid
->raw
[8], dev
);
315 int ib_cache_gid_add(struct ib_device
*ib_dev
, u8 port
,
316 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
318 struct ib_gid_table
*table
;
321 struct net_device
*idev
;
324 table
= ib_dev
->cache
.ports
[port
- rdma_start_port(ib_dev
)].gid
;
326 if (!memcmp(gid
, &zgid
, sizeof(*gid
)))
329 if (ib_dev
->get_netdev
) {
330 idev
= ib_dev
->get_netdev(ib_dev
, port
);
331 if (idev
&& attr
->ndev
!= idev
) {
332 union ib_gid default_gid
;
334 /* Adding default GIDs in not permitted */
335 make_default_gid(idev
, &default_gid
);
336 if (!memcmp(gid
, &default_gid
, sizeof(*gid
))) {
345 mutex_lock(&table
->lock
);
346 write_lock_irq(&table
->rwlock
);
348 ix
= find_gid(table
, gid
, attr
, false, GID_ATTR_FIND_MASK_GID
|
349 GID_ATTR_FIND_MASK_GID_TYPE
|
350 GID_ATTR_FIND_MASK_NETDEV
, &empty
);
359 ret
= add_gid(ib_dev
, port
, table
, empty
, gid
, attr
, false);
361 dispatch_gid_change_event(ib_dev
, port
);
364 write_unlock_irq(&table
->rwlock
);
365 mutex_unlock(&table
->lock
);
369 int ib_cache_gid_del(struct ib_device
*ib_dev
, u8 port
,
370 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
372 struct ib_gid_table
*table
;
375 table
= ib_dev
->cache
.ports
[port
- rdma_start_port(ib_dev
)].gid
;
377 mutex_lock(&table
->lock
);
378 write_lock_irq(&table
->rwlock
);
380 ix
= find_gid(table
, gid
, attr
, false,
381 GID_ATTR_FIND_MASK_GID
|
382 GID_ATTR_FIND_MASK_GID_TYPE
|
383 GID_ATTR_FIND_MASK_NETDEV
|
384 GID_ATTR_FIND_MASK_DEFAULT
,
389 if (!del_gid(ib_dev
, port
, table
, ix
, false))
390 dispatch_gid_change_event(ib_dev
, port
);
393 write_unlock_irq(&table
->rwlock
);
394 mutex_unlock(&table
->lock
);
398 int ib_cache_gid_del_all_netdev_gids(struct ib_device
*ib_dev
, u8 port
,
399 struct net_device
*ndev
)
401 struct ib_gid_table
*table
;
403 bool deleted
= false;
405 table
= ib_dev
->cache
.ports
[port
- rdma_start_port(ib_dev
)].gid
;
407 mutex_lock(&table
->lock
);
408 write_lock_irq(&table
->rwlock
);
410 for (ix
= 0; ix
< table
->sz
; ix
++)
411 if (table
->data_vec
[ix
].attr
.ndev
== ndev
)
412 if (!del_gid(ib_dev
, port
, table
, ix
,
413 !!(table
->data_vec
[ix
].props
&
414 GID_TABLE_ENTRY_DEFAULT
)))
417 write_unlock_irq(&table
->rwlock
);
418 mutex_unlock(&table
->lock
);
421 dispatch_gid_change_event(ib_dev
, port
);
426 static int __ib_cache_gid_get(struct ib_device
*ib_dev
, u8 port
, int index
,
427 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
429 struct ib_gid_table
*table
;
431 table
= ib_dev
->cache
.ports
[port
- rdma_start_port(ib_dev
)].gid
;
433 if (index
< 0 || index
>= table
->sz
)
436 if (table
->data_vec
[index
].props
& GID_TABLE_ENTRY_INVALID
)
439 memcpy(gid
, &table
->data_vec
[index
].gid
, sizeof(*gid
));
441 memcpy(attr
, &table
->data_vec
[index
].attr
, sizeof(*attr
));
443 dev_hold(attr
->ndev
);
449 static int _ib_cache_gid_table_find(struct ib_device
*ib_dev
,
450 const union ib_gid
*gid
,
451 const struct ib_gid_attr
*val
,
453 u8
*port
, u16
*index
)
455 struct ib_gid_table
*table
;
460 for (p
= 0; p
< ib_dev
->phys_port_cnt
; p
++) {
461 table
= ib_dev
->cache
.ports
[p
].gid
;
462 read_lock_irqsave(&table
->rwlock
, flags
);
463 local_index
= find_gid(table
, gid
, val
, false, mask
, NULL
);
464 if (local_index
>= 0) {
466 *index
= local_index
;
468 *port
= p
+ rdma_start_port(ib_dev
);
469 read_unlock_irqrestore(&table
->rwlock
, flags
);
472 read_unlock_irqrestore(&table
->rwlock
, flags
);
478 static int ib_cache_gid_find(struct ib_device
*ib_dev
,
479 const union ib_gid
*gid
,
480 enum ib_gid_type gid_type
,
481 struct net_device
*ndev
, u8
*port
,
484 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
485 GID_ATTR_FIND_MASK_GID_TYPE
;
486 struct ib_gid_attr gid_attr_val
= {.ndev
= ndev
, .gid_type
= gid_type
};
489 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
491 return _ib_cache_gid_table_find(ib_dev
, gid
, &gid_attr_val
,
495 int ib_find_cached_gid_by_port(struct ib_device
*ib_dev
,
496 const union ib_gid
*gid
,
497 enum ib_gid_type gid_type
,
498 u8 port
, struct net_device
*ndev
,
502 struct ib_gid_table
*table
;
503 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
504 GID_ATTR_FIND_MASK_GID_TYPE
;
505 struct ib_gid_attr val
= {.ndev
= ndev
, .gid_type
= gid_type
};
508 if (!rdma_is_port_valid(ib_dev
, port
))
511 table
= ib_dev
->cache
.ports
[port
- rdma_start_port(ib_dev
)].gid
;
514 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
516 read_lock_irqsave(&table
->rwlock
, flags
);
517 local_index
= find_gid(table
, gid
, &val
, false, mask
, NULL
);
518 if (local_index
>= 0) {
520 *index
= local_index
;
521 read_unlock_irqrestore(&table
->rwlock
, flags
);
525 read_unlock_irqrestore(&table
->rwlock
, flags
);
528 EXPORT_SYMBOL(ib_find_cached_gid_by_port
);
531 * ib_find_gid_by_filter - Returns the GID table index where a specified
533 * @device: The device to query.
534 * @gid: The GID value to search for.
535 * @port_num: The port number of the device where the GID value could be
537 * @filter: The filter function is executed on any matching GID in the table.
538 * If the filter function returns true, the corresponding index is returned,
539 * otherwise, we continue searching the GID table. It's guaranteed that
540 * while filter is executed, ndev field is valid and the structure won't
541 * change. filter is executed in an atomic context. filter must not be NULL.
542 * @index: The index into the cached GID table where the GID was found. This
543 * parameter may be NULL.
545 * ib_cache_gid_find_by_filter() searches for the specified GID value
546 * of which the filter function returns true in the port's GID table.
547 * This function is only supported on RoCE ports.
550 static int ib_cache_gid_find_by_filter(struct ib_device
*ib_dev
,
551 const union ib_gid
*gid
,
553 bool (*filter
)(const union ib_gid
*,
554 const struct ib_gid_attr
*,
559 struct ib_gid_table
*table
;
565 if (!rdma_is_port_valid(ib_dev
, port
) ||
566 !rdma_protocol_roce(ib_dev
, port
))
567 return -EPROTONOSUPPORT
;
569 table
= ib_dev
->cache
.ports
[port
- rdma_start_port(ib_dev
)].gid
;
571 read_lock_irqsave(&table
->rwlock
, flags
);
572 for (i
= 0; i
< table
->sz
; i
++) {
573 struct ib_gid_attr attr
;
575 if (table
->data_vec
[i
].props
& GID_TABLE_ENTRY_INVALID
)
578 if (memcmp(gid
, &table
->data_vec
[i
].gid
, sizeof(*gid
)))
581 memcpy(&attr
, &table
->data_vec
[i
].attr
, sizeof(attr
));
583 if (filter(gid
, &attr
, context
)) {
590 read_unlock_irqrestore(&table
->rwlock
, flags
);
597 static struct ib_gid_table
*alloc_gid_table(int sz
)
599 struct ib_gid_table
*table
=
600 kzalloc(sizeof(struct ib_gid_table
), GFP_KERNEL
);
605 table
->data_vec
= kcalloc(sz
, sizeof(*table
->data_vec
), GFP_KERNEL
);
606 if (!table
->data_vec
)
609 mutex_init(&table
->lock
);
612 rwlock_init(&table
->rwlock
);
621 static void release_gid_table(struct ib_gid_table
*table
)
624 kfree(table
->data_vec
);
629 static void cleanup_gid_table_port(struct ib_device
*ib_dev
, u8 port
,
630 struct ib_gid_table
*table
)
633 bool deleted
= false;
638 write_lock_irq(&table
->rwlock
);
639 for (i
= 0; i
< table
->sz
; ++i
) {
640 if (memcmp(&table
->data_vec
[i
].gid
, &zgid
,
641 sizeof(table
->data_vec
[i
].gid
)))
642 if (!del_gid(ib_dev
, port
, table
, i
,
643 table
->data_vec
[i
].props
&
644 GID_ATTR_FIND_MASK_DEFAULT
))
647 write_unlock_irq(&table
->rwlock
);
650 dispatch_gid_change_event(ib_dev
, port
);
653 void ib_cache_gid_set_default_gid(struct ib_device
*ib_dev
, u8 port
,
654 struct net_device
*ndev
,
655 unsigned long gid_type_mask
,
656 enum ib_cache_gid_default_mode mode
)
659 struct ib_gid_attr gid_attr
;
660 struct ib_gid_attr zattr_type
= zattr
;
661 struct ib_gid_table
*table
;
662 unsigned int gid_type
;
664 table
= ib_dev
->cache
.ports
[port
- rdma_start_port(ib_dev
)].gid
;
666 make_default_gid(ndev
, &gid
);
667 memset(&gid_attr
, 0, sizeof(gid_attr
));
668 gid_attr
.ndev
= ndev
;
670 for (gid_type
= 0; gid_type
< IB_GID_TYPE_SIZE
; ++gid_type
) {
672 union ib_gid current_gid
;
673 struct ib_gid_attr current_gid_attr
= {};
675 if (1UL << gid_type
& ~gid_type_mask
)
678 gid_attr
.gid_type
= gid_type
;
680 mutex_lock(&table
->lock
);
681 write_lock_irq(&table
->rwlock
);
682 ix
= find_gid(table
, NULL
, &gid_attr
, true,
683 GID_ATTR_FIND_MASK_GID_TYPE
|
684 GID_ATTR_FIND_MASK_DEFAULT
,
687 /* Coudn't find default GID location */
691 zattr_type
.gid_type
= gid_type
;
693 if (!__ib_cache_gid_get(ib_dev
, port
, ix
,
694 ¤t_gid
, ¤t_gid_attr
) &&
695 mode
== IB_CACHE_GID_DEFAULT_MODE_SET
&&
696 !memcmp(&gid
, ¤t_gid
, sizeof(gid
)) &&
697 !memcmp(&gid_attr
, ¤t_gid_attr
, sizeof(gid_attr
)))
700 if (memcmp(¤t_gid
, &zgid
, sizeof(current_gid
)) ||
701 memcmp(¤t_gid_attr
, &zattr_type
,
702 sizeof(current_gid_attr
))) {
703 if (del_gid(ib_dev
, port
, table
, ix
, true)) {
704 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
708 dispatch_gid_change_event(ib_dev
, port
);
712 if (mode
== IB_CACHE_GID_DEFAULT_MODE_SET
) {
713 if (add_gid(ib_dev
, port
, table
, ix
, &gid
, &gid_attr
, true))
714 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
717 dispatch_gid_change_event(ib_dev
, port
);
721 if (current_gid_attr
.ndev
)
722 dev_put(current_gid_attr
.ndev
);
723 write_unlock_irq(&table
->rwlock
);
724 mutex_unlock(&table
->lock
);
728 static int gid_table_reserve_default(struct ib_device
*ib_dev
, u8 port
,
729 struct ib_gid_table
*table
)
732 unsigned long roce_gid_type_mask
;
733 unsigned int num_default_gids
;
734 unsigned int current_gid
= 0;
736 roce_gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
737 num_default_gids
= hweight_long(roce_gid_type_mask
);
738 for (i
= 0; i
< num_default_gids
&& i
< table
->sz
; i
++) {
739 struct ib_gid_table_entry
*entry
=
742 entry
->props
|= GID_TABLE_ENTRY_DEFAULT
;
743 current_gid
= find_next_bit(&roce_gid_type_mask
,
746 entry
->attr
.gid_type
= current_gid
++;
752 static int _gid_table_setup_one(struct ib_device
*ib_dev
)
755 struct ib_gid_table
*table
;
758 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
759 u8 rdma_port
= port
+ rdma_start_port(ib_dev
);
763 ib_dev
->port_immutable
[rdma_port
].gid_tbl_len
);
766 goto rollback_table_setup
;
769 err
= gid_table_reserve_default(ib_dev
,
770 port
+ rdma_start_port(ib_dev
),
773 goto rollback_table_setup
;
774 ib_dev
->cache
.ports
[port
].gid
= table
;
779 rollback_table_setup
:
780 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
781 table
= ib_dev
->cache
.ports
[port
].gid
;
783 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
785 release_gid_table(table
);
791 static void gid_table_release_one(struct ib_device
*ib_dev
)
793 struct ib_gid_table
*table
;
796 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
797 table
= ib_dev
->cache
.ports
[port
].gid
;
798 release_gid_table(table
);
799 ib_dev
->cache
.ports
[port
].gid
= NULL
;
803 static void gid_table_cleanup_one(struct ib_device
*ib_dev
)
805 struct ib_gid_table
*table
;
808 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
809 table
= ib_dev
->cache
.ports
[port
].gid
;
810 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
815 static int gid_table_setup_one(struct ib_device
*ib_dev
)
819 err
= _gid_table_setup_one(ib_dev
);
824 rdma_roce_rescan_device(ib_dev
);
829 int ib_get_cached_gid(struct ib_device
*device
,
833 struct ib_gid_attr
*gid_attr
)
837 struct ib_gid_table
*table
;
839 if (!rdma_is_port_valid(device
, port_num
))
842 table
= device
->cache
.ports
[port_num
- rdma_start_port(device
)].gid
;
843 read_lock_irqsave(&table
->rwlock
, flags
);
844 res
= __ib_cache_gid_get(device
, port_num
, index
, gid
, gid_attr
);
845 read_unlock_irqrestore(&table
->rwlock
, flags
);
849 EXPORT_SYMBOL(ib_get_cached_gid
);
851 int ib_find_cached_gid(struct ib_device
*device
,
852 const union ib_gid
*gid
,
853 enum ib_gid_type gid_type
,
854 struct net_device
*ndev
,
858 return ib_cache_gid_find(device
, gid
, gid_type
, ndev
, port_num
, index
);
860 EXPORT_SYMBOL(ib_find_cached_gid
);
862 int ib_find_gid_by_filter(struct ib_device
*device
,
863 const union ib_gid
*gid
,
865 bool (*filter
)(const union ib_gid
*gid
,
866 const struct ib_gid_attr
*,
868 void *context
, u16
*index
)
870 /* Only RoCE GID table supports filter function */
871 if (!rdma_cap_roce_gid_table(device
, port_num
) && filter
)
872 return -EPROTONOSUPPORT
;
874 return ib_cache_gid_find_by_filter(device
, gid
,
879 int ib_get_cached_pkey(struct ib_device
*device
,
884 struct ib_pkey_cache
*cache
;
888 if (!rdma_is_port_valid(device
, port_num
))
891 read_lock_irqsave(&device
->cache
.lock
, flags
);
893 cache
= device
->cache
.ports
[port_num
- rdma_start_port(device
)].pkey
;
895 if (index
< 0 || index
>= cache
->table_len
)
898 *pkey
= cache
->table
[index
];
900 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
904 EXPORT_SYMBOL(ib_get_cached_pkey
);
906 int ib_get_cached_subnet_prefix(struct ib_device
*device
,
913 if (port_num
< rdma_start_port(device
) ||
914 port_num
> rdma_end_port(device
))
917 p
= port_num
- rdma_start_port(device
);
918 read_lock_irqsave(&device
->cache
.lock
, flags
);
919 *sn_pfx
= device
->cache
.ports
[p
].subnet_prefix
;
920 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
924 EXPORT_SYMBOL(ib_get_cached_subnet_prefix
);
926 int ib_find_cached_pkey(struct ib_device
*device
,
931 struct ib_pkey_cache
*cache
;
937 if (!rdma_is_port_valid(device
, port_num
))
940 read_lock_irqsave(&device
->cache
.lock
, flags
);
942 cache
= device
->cache
.ports
[port_num
- rdma_start_port(device
)].pkey
;
946 for (i
= 0; i
< cache
->table_len
; ++i
)
947 if ((cache
->table
[i
] & 0x7fff) == (pkey
& 0x7fff)) {
948 if (cache
->table
[i
] & 0x8000) {
956 if (ret
&& partial_ix
>= 0) {
961 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
965 EXPORT_SYMBOL(ib_find_cached_pkey
);
967 int ib_find_exact_cached_pkey(struct ib_device
*device
,
972 struct ib_pkey_cache
*cache
;
977 if (!rdma_is_port_valid(device
, port_num
))
980 read_lock_irqsave(&device
->cache
.lock
, flags
);
982 cache
= device
->cache
.ports
[port_num
- rdma_start_port(device
)].pkey
;
986 for (i
= 0; i
< cache
->table_len
; ++i
)
987 if (cache
->table
[i
] == pkey
) {
993 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
997 EXPORT_SYMBOL(ib_find_exact_cached_pkey
);
999 int ib_get_cached_lmc(struct ib_device
*device
,
1003 unsigned long flags
;
1006 if (!rdma_is_port_valid(device
, port_num
))
1009 read_lock_irqsave(&device
->cache
.lock
, flags
);
1010 *lmc
= device
->cache
.ports
[port_num
- rdma_start_port(device
)].lmc
;
1011 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
1015 EXPORT_SYMBOL(ib_get_cached_lmc
);
1017 int ib_get_cached_port_state(struct ib_device
*device
,
1019 enum ib_port_state
*port_state
)
1021 unsigned long flags
;
1024 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
1027 read_lock_irqsave(&device
->cache
.lock
, flags
);
1028 *port_state
= device
->cache
.ports
[port_num
1029 - rdma_start_port(device
)].port_state
;
1030 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
1034 EXPORT_SYMBOL(ib_get_cached_port_state
);
1036 static void ib_cache_update(struct ib_device
*device
,
1038 bool enforce_security
)
1040 struct ib_port_attr
*tprops
= NULL
;
1041 struct ib_pkey_cache
*pkey_cache
= NULL
, *old_pkey_cache
;
1042 struct ib_gid_cache
{
1044 union ib_gid table
[0];
1045 } *gid_cache
= NULL
;
1048 struct ib_gid_table
*table
;
1049 bool use_roce_gid_table
=
1050 rdma_cap_roce_gid_table(device
, port
);
1052 if (!rdma_is_port_valid(device
, port
))
1055 table
= device
->cache
.ports
[port
- rdma_start_port(device
)].gid
;
1057 tprops
= kmalloc(sizeof *tprops
, GFP_KERNEL
);
1061 ret
= ib_query_port(device
, port
, tprops
);
1063 pr_warn("ib_query_port failed (%d) for %s\n",
1068 pkey_cache
= kmalloc(sizeof *pkey_cache
+ tprops
->pkey_tbl_len
*
1069 sizeof *pkey_cache
->table
, GFP_KERNEL
);
1073 pkey_cache
->table_len
= tprops
->pkey_tbl_len
;
1075 if (!use_roce_gid_table
) {
1076 gid_cache
= kmalloc(sizeof(*gid_cache
) + tprops
->gid_tbl_len
*
1077 sizeof(*gid_cache
->table
), GFP_KERNEL
);
1081 gid_cache
->table_len
= tprops
->gid_tbl_len
;
1084 for (i
= 0; i
< pkey_cache
->table_len
; ++i
) {
1085 ret
= ib_query_pkey(device
, port
, i
, pkey_cache
->table
+ i
);
1087 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1088 ret
, device
->name
, i
);
1093 if (!use_roce_gid_table
) {
1094 for (i
= 0; i
< gid_cache
->table_len
; ++i
) {
1095 ret
= ib_query_gid(device
, port
, i
,
1096 gid_cache
->table
+ i
, NULL
);
1098 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1099 ret
, device
->name
, i
);
1105 write_lock_irq(&device
->cache
.lock
);
1107 old_pkey_cache
= device
->cache
.ports
[port
-
1108 rdma_start_port(device
)].pkey
;
1110 device
->cache
.ports
[port
- rdma_start_port(device
)].pkey
= pkey_cache
;
1111 if (!use_roce_gid_table
) {
1112 write_lock(&table
->rwlock
);
1113 for (i
= 0; i
< gid_cache
->table_len
; i
++) {
1114 modify_gid(device
, port
, table
, i
, gid_cache
->table
+ i
,
1117 write_unlock(&table
->rwlock
);
1120 device
->cache
.ports
[port
- rdma_start_port(device
)].lmc
= tprops
->lmc
;
1121 device
->cache
.ports
[port
- rdma_start_port(device
)].port_state
=
1124 device
->cache
.ports
[port
- rdma_start_port(device
)].subnet_prefix
=
1125 tprops
->subnet_prefix
;
1126 write_unlock_irq(&device
->cache
.lock
);
1128 if (enforce_security
)
1129 ib_security_cache_change(device
,
1131 tprops
->subnet_prefix
);
1134 kfree(old_pkey_cache
);
1144 static void ib_cache_task(struct work_struct
*_work
)
1146 struct ib_update_work
*work
=
1147 container_of(_work
, struct ib_update_work
, work
);
1149 ib_cache_update(work
->device
,
1151 work
->enforce_security
);
1155 static void ib_cache_event(struct ib_event_handler
*handler
,
1156 struct ib_event
*event
)
1158 struct ib_update_work
*work
;
1160 if (event
->event
== IB_EVENT_PORT_ERR
||
1161 event
->event
== IB_EVENT_PORT_ACTIVE
||
1162 event
->event
== IB_EVENT_LID_CHANGE
||
1163 event
->event
== IB_EVENT_PKEY_CHANGE
||
1164 event
->event
== IB_EVENT_SM_CHANGE
||
1165 event
->event
== IB_EVENT_CLIENT_REREGISTER
||
1166 event
->event
== IB_EVENT_GID_CHANGE
) {
1167 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
1169 INIT_WORK(&work
->work
, ib_cache_task
);
1170 work
->device
= event
->device
;
1171 work
->port_num
= event
->element
.port_num
;
1172 if (event
->event
== IB_EVENT_PKEY_CHANGE
||
1173 event
->event
== IB_EVENT_GID_CHANGE
)
1174 work
->enforce_security
= true;
1176 work
->enforce_security
= false;
1178 queue_work(ib_wq
, &work
->work
);
1183 int ib_cache_setup_one(struct ib_device
*device
)
1188 rwlock_init(&device
->cache
.lock
);
1190 device
->cache
.ports
=
1191 kzalloc(sizeof(*device
->cache
.ports
) *
1192 (rdma_end_port(device
) - rdma_start_port(device
) + 1), GFP_KERNEL
);
1193 if (!device
->cache
.ports
)
1196 err
= gid_table_setup_one(device
);
1198 kfree(device
->cache
.ports
);
1199 device
->cache
.ports
= NULL
;
1203 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1204 ib_cache_update(device
, p
+ rdma_start_port(device
), true);
1206 INIT_IB_EVENT_HANDLER(&device
->cache
.event_handler
,
1207 device
, ib_cache_event
);
1208 ib_register_event_handler(&device
->cache
.event_handler
);
1212 void ib_cache_release_one(struct ib_device
*device
)
1217 * The release function frees all the cache elements.
1218 * This function should be called as part of freeing
1219 * all the device's resources when the cache could no
1220 * longer be accessed.
1222 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1223 kfree(device
->cache
.ports
[p
].pkey
);
1225 gid_table_release_one(device
);
1226 kfree(device
->cache
.ports
);
1229 void ib_cache_cleanup_one(struct ib_device
*device
)
1231 /* The cleanup function unregisters the event handler,
1232 * waits for all in-progress workqueue elements and cleans
1233 * up the GID cache. This function should be called after
1234 * the device was removed from the devices list and all
1235 * clients were removed, so the cache exists but is
1236 * non-functional and shouldn't be updated anymore.
1238 ib_unregister_event_handler(&device
->cache
.event_handler
);
1239 flush_workqueue(ib_wq
);
1240 gid_table_cleanup_one(device
);
1243 void __init
ib_cache_setup(void)
1245 roce_gid_mgmt_init();
1248 void __exit
ib_cache_cleanup(void)
1250 roce_gid_mgmt_cleanup();