2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
43 #include <rdma/ib_cache.h>
45 #include "core_priv.h"
47 struct ib_pkey_cache
{
52 struct ib_update_work
{
53 struct work_struct work
;
54 struct ib_device
*device
;
61 static const struct ib_gid_attr zattr
;
63 enum gid_attr_find_mask
{
64 GID_ATTR_FIND_MASK_GID
= 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV
= 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT
= 1UL << 2,
67 GID_ATTR_FIND_MASK_GID_TYPE
= 1UL << 3,
70 enum gid_table_entry_props
{
71 GID_TABLE_ENTRY_INVALID
= 1UL << 0,
72 GID_TABLE_ENTRY_DEFAULT
= 1UL << 1,
75 enum gid_table_write_action
{
76 GID_TABLE_WRITE_ACTION_ADD
,
77 GID_TABLE_WRITE_ACTION_DEL
,
78 /* MODIFY only updates the GID table. Currently only used by
81 GID_TABLE_WRITE_ACTION_MODIFY
84 struct ib_gid_table_entry
{
87 struct ib_gid_attr attr
;
93 /* In RoCE, adding a GID to the table requires:
94 * (a) Find if this GID is already exists.
95 * (b) Find a free space.
96 * (c) Write the new GID
98 * Delete requires different set of operations:
102 * Add/delete should be carried out atomically.
103 * This is done by locking this mutex from multiple
104 * writers. We don't need this lock for IB, as the MAD
105 * layer replaces all entries. All data_vec entries
106 * are locked by this lock.
109 /* This lock protects the table entries from being
110 * read and written simultaneously.
113 struct ib_gid_table_entry
*data_vec
;
116 static void dispatch_gid_change_event(struct ib_device
*ib_dev
, u8 port
)
118 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
119 struct ib_event event
;
121 event
.device
= ib_dev
;
122 event
.element
.port_num
= port
;
123 event
.event
= IB_EVENT_GID_CHANGE
;
125 ib_dispatch_event(&event
);
129 static const char * const gid_type_str
[] = {
130 [IB_GID_TYPE_IB
] = "IB/RoCE v1",
131 [IB_GID_TYPE_ROCE_UDP_ENCAP
] = "RoCE v2",
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type
)
136 if (gid_type
< ARRAY_SIZE(gid_type_str
) && gid_type_str
[gid_type
])
137 return gid_type_str
[gid_type
];
139 return "Invalid GID type";
141 EXPORT_SYMBOL(ib_cache_gid_type_str
);
143 int ib_cache_gid_parse_type_str(const char *buf
)
153 if (buf
[len
- 1] == '\n')
156 for (i
= 0; i
< ARRAY_SIZE(gid_type_str
); ++i
)
157 if (gid_type_str
[i
] && !strncmp(buf
, gid_type_str
[i
], len
) &&
158 len
== strlen(gid_type_str
[i
])) {
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str
);
167 /* This function expects that rwlock will be write locked in all
168 * scenarios and that lock will be locked in sleep-able (RoCE)
171 static int write_gid(struct ib_device
*ib_dev
, u8 port
,
172 struct ib_gid_table
*table
, int ix
,
173 const union ib_gid
*gid
,
174 const struct ib_gid_attr
*attr
,
175 enum gid_table_write_action action
,
177 __releases(&table
->rwlock
) __acquires(&table
->rwlock
)
180 struct net_device
*old_net_dev
;
181 enum ib_gid_type old_gid_type
;
183 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
187 if (rdma_cap_roce_gid_table(ib_dev
, port
)) {
188 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_INVALID
;
189 write_unlock_irq(&table
->rwlock
);
190 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
191 * RoCE providers and thus only updates the cache.
193 if (action
== GID_TABLE_WRITE_ACTION_ADD
)
194 ret
= ib_dev
->add_gid(ib_dev
, port
, ix
, gid
, attr
,
195 &table
->data_vec
[ix
].context
);
196 else if (action
== GID_TABLE_WRITE_ACTION_DEL
)
197 ret
= ib_dev
->del_gid(ib_dev
, port
, ix
,
198 &table
->data_vec
[ix
].context
);
199 write_lock_irq(&table
->rwlock
);
202 old_net_dev
= table
->data_vec
[ix
].attr
.ndev
;
203 old_gid_type
= table
->data_vec
[ix
].attr
.gid_type
;
204 if (old_net_dev
&& old_net_dev
!= attr
->ndev
)
205 dev_put(old_net_dev
);
206 /* if modify_gid failed, just delete the old gid */
207 if (ret
|| action
== GID_TABLE_WRITE_ACTION_DEL
) {
210 table
->data_vec
[ix
].context
= NULL
;
213 memcpy(&table
->data_vec
[ix
].gid
, gid
, sizeof(*gid
));
214 memcpy(&table
->data_vec
[ix
].attr
, attr
, sizeof(*attr
));
216 table
->data_vec
[ix
].props
|= GID_TABLE_ENTRY_DEFAULT
;
217 if (action
== GID_TABLE_WRITE_ACTION_DEL
)
218 table
->data_vec
[ix
].attr
.gid_type
= old_gid_type
;
220 if (table
->data_vec
[ix
].attr
.ndev
&&
221 table
->data_vec
[ix
].attr
.ndev
!= old_net_dev
)
222 dev_hold(table
->data_vec
[ix
].attr
.ndev
);
224 table
->data_vec
[ix
].props
&= ~GID_TABLE_ENTRY_INVALID
;
229 static int add_gid(struct ib_device
*ib_dev
, u8 port
,
230 struct ib_gid_table
*table
, int ix
,
231 const union ib_gid
*gid
,
232 const struct ib_gid_attr
*attr
,
234 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
235 GID_TABLE_WRITE_ACTION_ADD
, default_gid
);
238 static int modify_gid(struct ib_device
*ib_dev
, u8 port
,
239 struct ib_gid_table
*table
, int ix
,
240 const union ib_gid
*gid
,
241 const struct ib_gid_attr
*attr
,
243 return write_gid(ib_dev
, port
, table
, ix
, gid
, attr
,
244 GID_TABLE_WRITE_ACTION_MODIFY
, default_gid
);
247 static int del_gid(struct ib_device
*ib_dev
, u8 port
,
248 struct ib_gid_table
*table
, int ix
,
250 return write_gid(ib_dev
, port
, table
, ix
, &zgid
, &zattr
,
251 GID_TABLE_WRITE_ACTION_DEL
, default_gid
);
254 /* rwlock should be read locked */
255 static int find_gid(struct ib_gid_table
*table
, const union ib_gid
*gid
,
256 const struct ib_gid_attr
*val
, bool default_gid
,
257 unsigned long mask
, int *pempty
)
261 int empty
= pempty
? -1 : 0;
263 while (i
< table
->sz
&& (found
< 0 || empty
< 0)) {
264 struct ib_gid_table_entry
*data
= &table
->data_vec
[i
];
265 struct ib_gid_attr
*attr
= &data
->attr
;
270 if (data
->props
& GID_TABLE_ENTRY_INVALID
)
274 if (!memcmp(&data
->gid
, &zgid
, sizeof(*gid
)) &&
275 !memcmp(attr
, &zattr
, sizeof(*attr
)) &&
282 if (mask
& GID_ATTR_FIND_MASK_GID_TYPE
&&
283 attr
->gid_type
!= val
->gid_type
)
286 if (mask
& GID_ATTR_FIND_MASK_GID
&&
287 memcmp(gid
, &data
->gid
, sizeof(*gid
)))
290 if (mask
& GID_ATTR_FIND_MASK_NETDEV
&&
291 attr
->ndev
!= val
->ndev
)
294 if (mask
& GID_ATTR_FIND_MASK_DEFAULT
&&
295 !!(data
->props
& GID_TABLE_ENTRY_DEFAULT
) !=
308 static void make_default_gid(struct net_device
*dev
, union ib_gid
*gid
)
310 gid
->global
.subnet_prefix
= cpu_to_be64(0xfe80000000000000LL
);
311 addrconf_ifid_eui48(&gid
->raw
[8], dev
);
314 int ib_cache_gid_add(struct ib_device
*ib_dev
, u8 port
,
315 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
317 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
318 struct ib_gid_table
*table
;
321 struct net_device
*idev
;
324 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
326 if (!memcmp(gid
, &zgid
, sizeof(*gid
)))
329 if (ib_dev
->get_netdev
) {
330 idev
= ib_dev
->get_netdev(ib_dev
, port
);
331 if (idev
&& attr
->ndev
!= idev
) {
332 union ib_gid default_gid
;
334 /* Adding default GIDs in not permitted */
335 make_default_gid(idev
, &default_gid
);
336 if (!memcmp(gid
, &default_gid
, sizeof(*gid
))) {
345 mutex_lock(&table
->lock
);
346 write_lock_irq(&table
->rwlock
);
348 ix
= find_gid(table
, gid
, attr
, false, GID_ATTR_FIND_MASK_GID
|
349 GID_ATTR_FIND_MASK_GID_TYPE
|
350 GID_ATTR_FIND_MASK_NETDEV
, &empty
);
359 ret
= add_gid(ib_dev
, port
, table
, empty
, gid
, attr
, false);
361 dispatch_gid_change_event(ib_dev
, port
);
364 write_unlock_irq(&table
->rwlock
);
365 mutex_unlock(&table
->lock
);
369 int ib_cache_gid_del(struct ib_device
*ib_dev
, u8 port
,
370 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
372 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
373 struct ib_gid_table
*table
;
376 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
378 mutex_lock(&table
->lock
);
379 write_lock_irq(&table
->rwlock
);
381 ix
= find_gid(table
, gid
, attr
, false,
382 GID_ATTR_FIND_MASK_GID
|
383 GID_ATTR_FIND_MASK_GID_TYPE
|
384 GID_ATTR_FIND_MASK_NETDEV
|
385 GID_ATTR_FIND_MASK_DEFAULT
,
390 if (!del_gid(ib_dev
, port
, table
, ix
, false))
391 dispatch_gid_change_event(ib_dev
, port
);
394 write_unlock_irq(&table
->rwlock
);
395 mutex_unlock(&table
->lock
);
399 int ib_cache_gid_del_all_netdev_gids(struct ib_device
*ib_dev
, u8 port
,
400 struct net_device
*ndev
)
402 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
403 struct ib_gid_table
*table
;
405 bool deleted
= false;
407 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
409 mutex_lock(&table
->lock
);
410 write_lock_irq(&table
->rwlock
);
412 for (ix
= 0; ix
< table
->sz
; ix
++)
413 if (table
->data_vec
[ix
].attr
.ndev
== ndev
)
414 if (!del_gid(ib_dev
, port
, table
, ix
,
415 !!(table
->data_vec
[ix
].props
&
416 GID_TABLE_ENTRY_DEFAULT
)))
419 write_unlock_irq(&table
->rwlock
);
420 mutex_unlock(&table
->lock
);
423 dispatch_gid_change_event(ib_dev
, port
);
428 static int __ib_cache_gid_get(struct ib_device
*ib_dev
, u8 port
, int index
,
429 union ib_gid
*gid
, struct ib_gid_attr
*attr
)
431 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
432 struct ib_gid_table
*table
;
434 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
436 if (index
< 0 || index
>= table
->sz
)
439 if (table
->data_vec
[index
].props
& GID_TABLE_ENTRY_INVALID
)
442 memcpy(gid
, &table
->data_vec
[index
].gid
, sizeof(*gid
));
444 memcpy(attr
, &table
->data_vec
[index
].attr
, sizeof(*attr
));
446 dev_hold(attr
->ndev
);
452 static int _ib_cache_gid_table_find(struct ib_device
*ib_dev
,
453 const union ib_gid
*gid
,
454 const struct ib_gid_attr
*val
,
456 u8
*port
, u16
*index
)
458 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
459 struct ib_gid_table
*table
;
464 for (p
= 0; p
< ib_dev
->phys_port_cnt
; p
++) {
465 table
= ports_table
[p
];
466 read_lock_irqsave(&table
->rwlock
, flags
);
467 local_index
= find_gid(table
, gid
, val
, false, mask
, NULL
);
468 if (local_index
>= 0) {
470 *index
= local_index
;
472 *port
= p
+ rdma_start_port(ib_dev
);
473 read_unlock_irqrestore(&table
->rwlock
, flags
);
476 read_unlock_irqrestore(&table
->rwlock
, flags
);
482 static int ib_cache_gid_find(struct ib_device
*ib_dev
,
483 const union ib_gid
*gid
,
484 enum ib_gid_type gid_type
,
485 struct net_device
*ndev
, u8
*port
,
488 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
489 GID_ATTR_FIND_MASK_GID_TYPE
;
490 struct ib_gid_attr gid_attr_val
= {.ndev
= ndev
, .gid_type
= gid_type
};
493 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
495 return _ib_cache_gid_table_find(ib_dev
, gid
, &gid_attr_val
,
499 int ib_find_cached_gid_by_port(struct ib_device
*ib_dev
,
500 const union ib_gid
*gid
,
501 enum ib_gid_type gid_type
,
502 u8 port
, struct net_device
*ndev
,
506 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
507 struct ib_gid_table
*table
;
508 unsigned long mask
= GID_ATTR_FIND_MASK_GID
|
509 GID_ATTR_FIND_MASK_GID_TYPE
;
510 struct ib_gid_attr val
= {.ndev
= ndev
, .gid_type
= gid_type
};
513 if (port
< rdma_start_port(ib_dev
) ||
514 port
> rdma_end_port(ib_dev
))
517 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
520 mask
|= GID_ATTR_FIND_MASK_NETDEV
;
522 read_lock_irqsave(&table
->rwlock
, flags
);
523 local_index
= find_gid(table
, gid
, &val
, false, mask
, NULL
);
524 if (local_index
>= 0) {
526 *index
= local_index
;
527 read_unlock_irqrestore(&table
->rwlock
, flags
);
531 read_unlock_irqrestore(&table
->rwlock
, flags
);
534 EXPORT_SYMBOL(ib_find_cached_gid_by_port
);
537 * ib_find_gid_by_filter - Returns the GID table index where a specified
539 * @device: The device to query.
540 * @gid: The GID value to search for.
541 * @port_num: The port number of the device where the GID value could be
543 * @filter: The filter function is executed on any matching GID in the table.
544 * If the filter function returns true, the corresponding index is returned,
545 * otherwise, we continue searching the GID table. It's guaranteed that
546 * while filter is executed, ndev field is valid and the structure won't
547 * change. filter is executed in an atomic context. filter must not be NULL.
548 * @index: The index into the cached GID table where the GID was found. This
549 * parameter may be NULL.
551 * ib_cache_gid_find_by_filter() searches for the specified GID value
552 * of which the filter function returns true in the port's GID table.
553 * This function is only supported on RoCE ports.
556 static int ib_cache_gid_find_by_filter(struct ib_device
*ib_dev
,
557 const union ib_gid
*gid
,
559 bool (*filter
)(const union ib_gid
*,
560 const struct ib_gid_attr
*,
565 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
566 struct ib_gid_table
*table
;
574 if (port
< rdma_start_port(ib_dev
) ||
575 port
> rdma_end_port(ib_dev
) ||
576 !rdma_protocol_roce(ib_dev
, port
))
577 return -EPROTONOSUPPORT
;
579 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
581 read_lock_irqsave(&table
->rwlock
, flags
);
582 for (i
= 0; i
< table
->sz
; i
++) {
583 struct ib_gid_attr attr
;
585 if (table
->data_vec
[i
].props
& GID_TABLE_ENTRY_INVALID
)
588 if (memcmp(gid
, &table
->data_vec
[i
].gid
, sizeof(*gid
)))
591 memcpy(&attr
, &table
->data_vec
[i
].attr
, sizeof(attr
));
593 if (filter(gid
, &attr
, context
))
600 read_unlock_irqrestore(&table
->rwlock
, flags
);
610 static struct ib_gid_table
*alloc_gid_table(int sz
)
612 struct ib_gid_table
*table
=
613 kzalloc(sizeof(struct ib_gid_table
), GFP_KERNEL
);
618 table
->data_vec
= kcalloc(sz
, sizeof(*table
->data_vec
), GFP_KERNEL
);
619 if (!table
->data_vec
)
622 mutex_init(&table
->lock
);
625 rwlock_init(&table
->rwlock
);
634 static void release_gid_table(struct ib_gid_table
*table
)
637 kfree(table
->data_vec
);
642 static void cleanup_gid_table_port(struct ib_device
*ib_dev
, u8 port
,
643 struct ib_gid_table
*table
)
646 bool deleted
= false;
651 write_lock_irq(&table
->rwlock
);
652 for (i
= 0; i
< table
->sz
; ++i
) {
653 if (memcmp(&table
->data_vec
[i
].gid
, &zgid
,
654 sizeof(table
->data_vec
[i
].gid
)))
655 if (!del_gid(ib_dev
, port
, table
, i
,
656 table
->data_vec
[i
].props
&
657 GID_ATTR_FIND_MASK_DEFAULT
))
660 write_unlock_irq(&table
->rwlock
);
663 dispatch_gid_change_event(ib_dev
, port
);
666 void ib_cache_gid_set_default_gid(struct ib_device
*ib_dev
, u8 port
,
667 struct net_device
*ndev
,
668 unsigned long gid_type_mask
,
669 enum ib_cache_gid_default_mode mode
)
671 struct ib_gid_table
**ports_table
= ib_dev
->cache
.gid_cache
;
673 struct ib_gid_attr gid_attr
;
674 struct ib_gid_attr zattr_type
= zattr
;
675 struct ib_gid_table
*table
;
676 unsigned int gid_type
;
678 table
= ports_table
[port
- rdma_start_port(ib_dev
)];
680 make_default_gid(ndev
, &gid
);
681 memset(&gid_attr
, 0, sizeof(gid_attr
));
682 gid_attr
.ndev
= ndev
;
684 for (gid_type
= 0; gid_type
< IB_GID_TYPE_SIZE
; ++gid_type
) {
686 union ib_gid current_gid
;
687 struct ib_gid_attr current_gid_attr
= {};
689 if (1UL << gid_type
& ~gid_type_mask
)
692 gid_attr
.gid_type
= gid_type
;
694 mutex_lock(&table
->lock
);
695 write_lock_irq(&table
->rwlock
);
696 ix
= find_gid(table
, NULL
, &gid_attr
, true,
697 GID_ATTR_FIND_MASK_GID_TYPE
|
698 GID_ATTR_FIND_MASK_DEFAULT
,
701 /* Coudn't find default GID location */
705 zattr_type
.gid_type
= gid_type
;
707 if (!__ib_cache_gid_get(ib_dev
, port
, ix
,
708 ¤t_gid
, ¤t_gid_attr
) &&
709 mode
== IB_CACHE_GID_DEFAULT_MODE_SET
&&
710 !memcmp(&gid
, ¤t_gid
, sizeof(gid
)) &&
711 !memcmp(&gid_attr
, ¤t_gid_attr
, sizeof(gid_attr
)))
714 if (memcmp(¤t_gid
, &zgid
, sizeof(current_gid
)) ||
715 memcmp(¤t_gid_attr
, &zattr_type
,
716 sizeof(current_gid_attr
))) {
717 if (del_gid(ib_dev
, port
, table
, ix
, true)) {
718 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
722 dispatch_gid_change_event(ib_dev
, port
);
726 if (mode
== IB_CACHE_GID_DEFAULT_MODE_SET
) {
727 if (add_gid(ib_dev
, port
, table
, ix
, &gid
, &gid_attr
, true))
728 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
731 dispatch_gid_change_event(ib_dev
, port
);
735 if (current_gid_attr
.ndev
)
736 dev_put(current_gid_attr
.ndev
);
737 write_unlock_irq(&table
->rwlock
);
738 mutex_unlock(&table
->lock
);
742 static int gid_table_reserve_default(struct ib_device
*ib_dev
, u8 port
,
743 struct ib_gid_table
*table
)
746 unsigned long roce_gid_type_mask
;
747 unsigned int num_default_gids
;
748 unsigned int current_gid
= 0;
750 roce_gid_type_mask
= roce_gid_type_mask_support(ib_dev
, port
);
751 num_default_gids
= hweight_long(roce_gid_type_mask
);
752 for (i
= 0; i
< num_default_gids
&& i
< table
->sz
; i
++) {
753 struct ib_gid_table_entry
*entry
=
756 entry
->props
|= GID_TABLE_ENTRY_DEFAULT
;
757 current_gid
= find_next_bit(&roce_gid_type_mask
,
760 entry
->attr
.gid_type
= current_gid
++;
766 static int _gid_table_setup_one(struct ib_device
*ib_dev
)
769 struct ib_gid_table
**table
;
772 table
= kcalloc(ib_dev
->phys_port_cnt
, sizeof(*table
), GFP_KERNEL
);
775 pr_warn("failed to allocate ib gid cache for %s\n",
780 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
781 u8 rdma_port
= port
+ rdma_start_port(ib_dev
);
785 ib_dev
->port_immutable
[rdma_port
].gid_tbl_len
);
788 goto rollback_table_setup
;
791 err
= gid_table_reserve_default(ib_dev
,
792 port
+ rdma_start_port(ib_dev
),
795 goto rollback_table_setup
;
798 ib_dev
->cache
.gid_cache
= table
;
801 rollback_table_setup
:
802 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++) {
803 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
805 release_gid_table(table
[port
]);
812 static void gid_table_release_one(struct ib_device
*ib_dev
)
814 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
820 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
821 release_gid_table(table
[port
]);
824 ib_dev
->cache
.gid_cache
= NULL
;
827 static void gid_table_cleanup_one(struct ib_device
*ib_dev
)
829 struct ib_gid_table
**table
= ib_dev
->cache
.gid_cache
;
835 for (port
= 0; port
< ib_dev
->phys_port_cnt
; port
++)
836 cleanup_gid_table_port(ib_dev
, port
+ rdma_start_port(ib_dev
),
840 static int gid_table_setup_one(struct ib_device
*ib_dev
)
844 err
= _gid_table_setup_one(ib_dev
);
849 err
= roce_rescan_device(ib_dev
);
852 gid_table_cleanup_one(ib_dev
);
853 gid_table_release_one(ib_dev
);
859 int ib_get_cached_gid(struct ib_device
*device
,
863 struct ib_gid_attr
*gid_attr
)
867 struct ib_gid_table
**ports_table
= device
->cache
.gid_cache
;
868 struct ib_gid_table
*table
= ports_table
[port_num
- rdma_start_port(device
)];
870 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
873 read_lock_irqsave(&table
->rwlock
, flags
);
874 res
= __ib_cache_gid_get(device
, port_num
, index
, gid
, gid_attr
);
875 read_unlock_irqrestore(&table
->rwlock
, flags
);
879 EXPORT_SYMBOL(ib_get_cached_gid
);
881 int ib_find_cached_gid(struct ib_device
*device
,
882 const union ib_gid
*gid
,
883 enum ib_gid_type gid_type
,
884 struct net_device
*ndev
,
888 return ib_cache_gid_find(device
, gid
, gid_type
, ndev
, port_num
, index
);
890 EXPORT_SYMBOL(ib_find_cached_gid
);
892 int ib_find_gid_by_filter(struct ib_device
*device
,
893 const union ib_gid
*gid
,
895 bool (*filter
)(const union ib_gid
*gid
,
896 const struct ib_gid_attr
*,
898 void *context
, u16
*index
)
900 /* Only RoCE GID table supports filter function */
901 if (!rdma_cap_roce_gid_table(device
, port_num
) && filter
)
902 return -EPROTONOSUPPORT
;
904 return ib_cache_gid_find_by_filter(device
, gid
,
908 EXPORT_SYMBOL(ib_find_gid_by_filter
);
910 int ib_get_cached_pkey(struct ib_device
*device
,
915 struct ib_pkey_cache
*cache
;
919 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
922 read_lock_irqsave(&device
->cache
.lock
, flags
);
924 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
926 if (index
< 0 || index
>= cache
->table_len
)
929 *pkey
= cache
->table
[index
];
931 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
935 EXPORT_SYMBOL(ib_get_cached_pkey
);
937 int ib_find_cached_pkey(struct ib_device
*device
,
942 struct ib_pkey_cache
*cache
;
948 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
951 read_lock_irqsave(&device
->cache
.lock
, flags
);
953 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
957 for (i
= 0; i
< cache
->table_len
; ++i
)
958 if ((cache
->table
[i
] & 0x7fff) == (pkey
& 0x7fff)) {
959 if (cache
->table
[i
] & 0x8000) {
967 if (ret
&& partial_ix
>= 0) {
972 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
976 EXPORT_SYMBOL(ib_find_cached_pkey
);
978 int ib_find_exact_cached_pkey(struct ib_device
*device
,
983 struct ib_pkey_cache
*cache
;
988 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
991 read_lock_irqsave(&device
->cache
.lock
, flags
);
993 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
997 for (i
= 0; i
< cache
->table_len
; ++i
)
998 if (cache
->table
[i
] == pkey
) {
1004 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
1008 EXPORT_SYMBOL(ib_find_exact_cached_pkey
);
1010 int ib_get_cached_lmc(struct ib_device
*device
,
1014 unsigned long flags
;
1017 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
1020 read_lock_irqsave(&device
->cache
.lock
, flags
);
1021 *lmc
= device
->cache
.lmc_cache
[port_num
- rdma_start_port(device
)];
1022 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
1026 EXPORT_SYMBOL(ib_get_cached_lmc
);
1028 static void ib_cache_update(struct ib_device
*device
,
1031 struct ib_port_attr
*tprops
= NULL
;
1032 struct ib_pkey_cache
*pkey_cache
= NULL
, *old_pkey_cache
;
1033 struct ib_gid_cache
{
1035 union ib_gid table
[0];
1036 } *gid_cache
= NULL
;
1039 struct ib_gid_table
*table
;
1040 struct ib_gid_table
**ports_table
= device
->cache
.gid_cache
;
1041 bool use_roce_gid_table
=
1042 rdma_cap_roce_gid_table(device
, port
);
1044 if (port
< rdma_start_port(device
) || port
> rdma_end_port(device
))
1047 table
= ports_table
[port
- rdma_start_port(device
)];
1049 tprops
= kmalloc(sizeof *tprops
, GFP_KERNEL
);
1053 ret
= ib_query_port(device
, port
, tprops
);
1055 pr_warn("ib_query_port failed (%d) for %s\n",
1060 pkey_cache
= kmalloc(sizeof *pkey_cache
+ tprops
->pkey_tbl_len
*
1061 sizeof *pkey_cache
->table
, GFP_KERNEL
);
1065 pkey_cache
->table_len
= tprops
->pkey_tbl_len
;
1067 if (!use_roce_gid_table
) {
1068 gid_cache
= kmalloc(sizeof(*gid_cache
) + tprops
->gid_tbl_len
*
1069 sizeof(*gid_cache
->table
), GFP_KERNEL
);
1073 gid_cache
->table_len
= tprops
->gid_tbl_len
;
1076 for (i
= 0; i
< pkey_cache
->table_len
; ++i
) {
1077 ret
= ib_query_pkey(device
, port
, i
, pkey_cache
->table
+ i
);
1079 pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1080 ret
, device
->name
, i
);
1085 if (!use_roce_gid_table
) {
1086 for (i
= 0; i
< gid_cache
->table_len
; ++i
) {
1087 ret
= ib_query_gid(device
, port
, i
,
1088 gid_cache
->table
+ i
, NULL
);
1090 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1091 ret
, device
->name
, i
);
1097 write_lock_irq(&device
->cache
.lock
);
1099 old_pkey_cache
= device
->cache
.pkey_cache
[port
- rdma_start_port(device
)];
1101 device
->cache
.pkey_cache
[port
- rdma_start_port(device
)] = pkey_cache
;
1102 if (!use_roce_gid_table
) {
1103 write_lock(&table
->rwlock
);
1104 for (i
= 0; i
< gid_cache
->table_len
; i
++) {
1105 modify_gid(device
, port
, table
, i
, gid_cache
->table
+ i
,
1108 write_unlock(&table
->rwlock
);
1111 device
->cache
.lmc_cache
[port
- rdma_start_port(device
)] = tprops
->lmc
;
1113 write_unlock_irq(&device
->cache
.lock
);
1116 kfree(old_pkey_cache
);
1126 static void ib_cache_task(struct work_struct
*_work
)
1128 struct ib_update_work
*work
=
1129 container_of(_work
, struct ib_update_work
, work
);
1131 ib_cache_update(work
->device
, work
->port_num
);
1135 static void ib_cache_event(struct ib_event_handler
*handler
,
1136 struct ib_event
*event
)
1138 struct ib_update_work
*work
;
1140 if (event
->event
== IB_EVENT_PORT_ERR
||
1141 event
->event
== IB_EVENT_PORT_ACTIVE
||
1142 event
->event
== IB_EVENT_LID_CHANGE
||
1143 event
->event
== IB_EVENT_PKEY_CHANGE
||
1144 event
->event
== IB_EVENT_SM_CHANGE
||
1145 event
->event
== IB_EVENT_CLIENT_REREGISTER
||
1146 event
->event
== IB_EVENT_GID_CHANGE
) {
1147 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
1149 INIT_WORK(&work
->work
, ib_cache_task
);
1150 work
->device
= event
->device
;
1151 work
->port_num
= event
->element
.port_num
;
1152 queue_work(ib_wq
, &work
->work
);
1157 int ib_cache_setup_one(struct ib_device
*device
)
1162 rwlock_init(&device
->cache
.lock
);
1164 device
->cache
.pkey_cache
=
1165 kzalloc(sizeof *device
->cache
.pkey_cache
*
1166 (rdma_end_port(device
) - rdma_start_port(device
) + 1), GFP_KERNEL
);
1167 device
->cache
.lmc_cache
= kmalloc(sizeof *device
->cache
.lmc_cache
*
1168 (rdma_end_port(device
) -
1169 rdma_start_port(device
) + 1),
1171 if (!device
->cache
.pkey_cache
||
1172 !device
->cache
.lmc_cache
) {
1173 pr_warn("Couldn't allocate cache for %s\n", device
->name
);
1177 err
= gid_table_setup_one(device
);
1179 /* Allocated memory will be cleaned in the release function */
1182 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1183 ib_cache_update(device
, p
+ rdma_start_port(device
));
1185 INIT_IB_EVENT_HANDLER(&device
->cache
.event_handler
,
1186 device
, ib_cache_event
);
1187 err
= ib_register_event_handler(&device
->cache
.event_handler
);
1194 gid_table_cleanup_one(device
);
1198 void ib_cache_release_one(struct ib_device
*device
)
1203 * The release function frees all the cache elements.
1204 * This function should be called as part of freeing
1205 * all the device's resources when the cache could no
1206 * longer be accessed.
1208 if (device
->cache
.pkey_cache
)
1210 p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
)
1211 kfree(device
->cache
.pkey_cache
[p
]);
1213 gid_table_release_one(device
);
1214 kfree(device
->cache
.pkey_cache
);
1215 kfree(device
->cache
.lmc_cache
);
1218 void ib_cache_cleanup_one(struct ib_device
*device
)
1220 /* The cleanup function unregisters the event handler,
1221 * waits for all in-progress workqueue elements and cleans
1222 * up the GID cache. This function should be called after
1223 * the device was removed from the devices list and all
1224 * clients were removed, so the cache exists but is
1225 * non-functional and shouldn't be updated anymore.
1227 ib_unregister_event_handler(&device
->cache
.event_handler
);
1228 flush_workqueue(ib_wq
);
1229 gid_table_cleanup_one(device
);
1232 void __init
ib_cache_setup(void)
1234 roce_gid_mgmt_init();
1237 void __exit
ib_cache_cleanup(void)
1239 roce_gid_mgmt_cleanup();