2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
41 #include <rdma/ib_cache.h>
43 #include "core_priv.h"
45 struct ib_pkey_cache
{
52 union ib_gid table
[0];
55 struct ib_update_work
{
56 struct work_struct work
;
57 struct ib_device
*device
;
61 static inline int start_port(struct ib_device
*device
)
63 return (device
->node_type
== RDMA_NODE_IB_SWITCH
) ? 0 : 1;
66 static inline int end_port(struct ib_device
*device
)
68 return (device
->node_type
== RDMA_NODE_IB_SWITCH
) ?
69 0 : device
->phys_port_cnt
;
72 int ib_get_cached_gid(struct ib_device
*device
,
77 struct ib_gid_cache
*cache
;
81 if (port_num
< start_port(device
) || port_num
> end_port(device
))
84 read_lock_irqsave(&device
->cache
.lock
, flags
);
86 cache
= device
->cache
.gid_cache
[port_num
- start_port(device
)];
88 if (index
< 0 || index
>= cache
->table_len
)
91 *gid
= cache
->table
[index
];
93 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
97 EXPORT_SYMBOL(ib_get_cached_gid
);
99 int ib_find_cached_gid(struct ib_device
*device
,
104 struct ib_gid_cache
*cache
;
113 read_lock_irqsave(&device
->cache
.lock
, flags
);
115 for (p
= 0; p
<= end_port(device
) - start_port(device
); ++p
) {
116 cache
= device
->cache
.gid_cache
[p
];
117 for (i
= 0; i
< cache
->table_len
; ++i
) {
118 if (!memcmp(gid
, &cache
->table
[i
], sizeof *gid
)) {
119 *port_num
= p
+ start_port(device
);
128 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
132 EXPORT_SYMBOL(ib_find_cached_gid
);
134 int ib_get_cached_pkey(struct ib_device
*device
,
139 struct ib_pkey_cache
*cache
;
143 if (port_num
< start_port(device
) || port_num
> end_port(device
))
146 read_lock_irqsave(&device
->cache
.lock
, flags
);
148 cache
= device
->cache
.pkey_cache
[port_num
- start_port(device
)];
150 if (index
< 0 || index
>= cache
->table_len
)
153 *pkey
= cache
->table
[index
];
155 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
159 EXPORT_SYMBOL(ib_get_cached_pkey
);
161 int ib_find_cached_pkey(struct ib_device
*device
,
166 struct ib_pkey_cache
*cache
;
172 if (port_num
< start_port(device
) || port_num
> end_port(device
))
175 read_lock_irqsave(&device
->cache
.lock
, flags
);
177 cache
= device
->cache
.pkey_cache
[port_num
- start_port(device
)];
181 for (i
= 0; i
< cache
->table_len
; ++i
)
182 if ((cache
->table
[i
] & 0x7fff) == (pkey
& 0x7fff)) {
183 if (cache
->table
[i
] & 0x8000) {
191 if (ret
&& partial_ix
>= 0) {
196 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
200 EXPORT_SYMBOL(ib_find_cached_pkey
);
202 int ib_find_exact_cached_pkey(struct ib_device
*device
,
207 struct ib_pkey_cache
*cache
;
212 if (port_num
< start_port(device
) || port_num
> end_port(device
))
215 read_lock_irqsave(&device
->cache
.lock
, flags
);
217 cache
= device
->cache
.pkey_cache
[port_num
- start_port(device
)];
221 for (i
= 0; i
< cache
->table_len
; ++i
)
222 if (cache
->table
[i
] == pkey
) {
228 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
232 EXPORT_SYMBOL(ib_find_exact_cached_pkey
);
234 int ib_get_cached_lmc(struct ib_device
*device
,
241 if (port_num
< start_port(device
) || port_num
> end_port(device
))
244 read_lock_irqsave(&device
->cache
.lock
, flags
);
245 *lmc
= device
->cache
.lmc_cache
[port_num
- start_port(device
)];
246 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
250 EXPORT_SYMBOL(ib_get_cached_lmc
);
252 static void ib_cache_update(struct ib_device
*device
,
255 struct ib_port_attr
*tprops
= NULL
;
256 struct ib_pkey_cache
*pkey_cache
= NULL
, *old_pkey_cache
;
257 struct ib_gid_cache
*gid_cache
= NULL
, *old_gid_cache
;
261 tprops
= kmalloc(sizeof *tprops
, GFP_KERNEL
);
265 ret
= ib_query_port(device
, port
, tprops
);
267 printk(KERN_WARNING
"ib_query_port failed (%d) for %s\n",
272 pkey_cache
= kmalloc(sizeof *pkey_cache
+ tprops
->pkey_tbl_len
*
273 sizeof *pkey_cache
->table
, GFP_KERNEL
);
277 pkey_cache
->table_len
= tprops
->pkey_tbl_len
;
279 gid_cache
= kmalloc(sizeof *gid_cache
+ tprops
->gid_tbl_len
*
280 sizeof *gid_cache
->table
, GFP_KERNEL
);
284 gid_cache
->table_len
= tprops
->gid_tbl_len
;
286 for (i
= 0; i
< pkey_cache
->table_len
; ++i
) {
287 ret
= ib_query_pkey(device
, port
, i
, pkey_cache
->table
+ i
);
289 printk(KERN_WARNING
"ib_query_pkey failed (%d) for %s (index %d)\n",
290 ret
, device
->name
, i
);
295 for (i
= 0; i
< gid_cache
->table_len
; ++i
) {
296 ret
= ib_query_gid(device
, port
, i
, gid_cache
->table
+ i
);
298 printk(KERN_WARNING
"ib_query_gid failed (%d) for %s (index %d)\n",
299 ret
, device
->name
, i
);
304 write_lock_irq(&device
->cache
.lock
);
306 old_pkey_cache
= device
->cache
.pkey_cache
[port
- start_port(device
)];
307 old_gid_cache
= device
->cache
.gid_cache
[port
- start_port(device
)];
309 device
->cache
.pkey_cache
[port
- start_port(device
)] = pkey_cache
;
310 device
->cache
.gid_cache
[port
- start_port(device
)] = gid_cache
;
312 device
->cache
.lmc_cache
[port
- start_port(device
)] = tprops
->lmc
;
314 write_unlock_irq(&device
->cache
.lock
);
316 kfree(old_pkey_cache
);
317 kfree(old_gid_cache
);
327 static void ib_cache_task(struct work_struct
*_work
)
329 struct ib_update_work
*work
=
330 container_of(_work
, struct ib_update_work
, work
);
332 ib_cache_update(work
->device
, work
->port_num
);
336 static void ib_cache_event(struct ib_event_handler
*handler
,
337 struct ib_event
*event
)
339 struct ib_update_work
*work
;
341 if (event
->event
== IB_EVENT_PORT_ERR
||
342 event
->event
== IB_EVENT_PORT_ACTIVE
||
343 event
->event
== IB_EVENT_LID_CHANGE
||
344 event
->event
== IB_EVENT_PKEY_CHANGE
||
345 event
->event
== IB_EVENT_SM_CHANGE
||
346 event
->event
== IB_EVENT_CLIENT_REREGISTER
||
347 event
->event
== IB_EVENT_GID_CHANGE
) {
348 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
350 INIT_WORK(&work
->work
, ib_cache_task
);
351 work
->device
= event
->device
;
352 work
->port_num
= event
->element
.port_num
;
353 queue_work(ib_wq
, &work
->work
);
358 static void ib_cache_setup_one(struct ib_device
*device
)
362 rwlock_init(&device
->cache
.lock
);
364 device
->cache
.pkey_cache
=
365 kmalloc(sizeof *device
->cache
.pkey_cache
*
366 (end_port(device
) - start_port(device
) + 1), GFP_KERNEL
);
367 device
->cache
.gid_cache
=
368 kmalloc(sizeof *device
->cache
.gid_cache
*
369 (end_port(device
) - start_port(device
) + 1), GFP_KERNEL
);
371 device
->cache
.lmc_cache
= kmalloc(sizeof *device
->cache
.lmc_cache
*
373 start_port(device
) + 1),
376 if (!device
->cache
.pkey_cache
|| !device
->cache
.gid_cache
||
377 !device
->cache
.lmc_cache
) {
378 printk(KERN_WARNING
"Couldn't allocate cache "
379 "for %s\n", device
->name
);
383 for (p
= 0; p
<= end_port(device
) - start_port(device
); ++p
) {
384 device
->cache
.pkey_cache
[p
] = NULL
;
385 device
->cache
.gid_cache
[p
] = NULL
;
386 ib_cache_update(device
, p
+ start_port(device
));
389 INIT_IB_EVENT_HANDLER(&device
->cache
.event_handler
,
390 device
, ib_cache_event
);
391 if (ib_register_event_handler(&device
->cache
.event_handler
))
397 for (p
= 0; p
<= end_port(device
) - start_port(device
); ++p
) {
398 kfree(device
->cache
.pkey_cache
[p
]);
399 kfree(device
->cache
.gid_cache
[p
]);
403 kfree(device
->cache
.pkey_cache
);
404 kfree(device
->cache
.gid_cache
);
405 kfree(device
->cache
.lmc_cache
);
408 static void ib_cache_cleanup_one(struct ib_device
*device
)
412 ib_unregister_event_handler(&device
->cache
.event_handler
);
413 flush_workqueue(ib_wq
);
415 for (p
= 0; p
<= end_port(device
) - start_port(device
); ++p
) {
416 kfree(device
->cache
.pkey_cache
[p
]);
417 kfree(device
->cache
.gid_cache
[p
]);
420 kfree(device
->cache
.pkey_cache
);
421 kfree(device
->cache
.gid_cache
);
422 kfree(device
->cache
.lmc_cache
);
425 static struct ib_client cache_client
= {
427 .add
= ib_cache_setup_one
,
428 .remove
= ib_cache_cleanup_one
431 int __init
ib_cache_setup(void)
433 return ib_register_client(&cache_client
);
436 void __exit
ib_cache_cleanup(void)
438 ib_unregister_client(&cache_client
);