2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
41 #include <rdma/ib_cache.h>
43 #include "core_priv.h"
45 struct ib_pkey_cache
{
52 union ib_gid table
[0];
55 struct ib_update_work
{
56 struct work_struct work
;
57 struct ib_device
*device
;
61 int ib_get_cached_gid(struct ib_device
*device
,
66 struct ib_gid_cache
*cache
;
70 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
73 read_lock_irqsave(&device
->cache
.lock
, flags
);
75 cache
= device
->cache
.gid_cache
[port_num
- rdma_start_port(device
)];
77 if (index
< 0 || index
>= cache
->table_len
)
80 *gid
= cache
->table
[index
];
82 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
86 EXPORT_SYMBOL(ib_get_cached_gid
);
88 int ib_find_cached_gid(struct ib_device
*device
,
89 const union ib_gid
*gid
,
93 struct ib_gid_cache
*cache
;
102 read_lock_irqsave(&device
->cache
.lock
, flags
);
104 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
) {
105 cache
= device
->cache
.gid_cache
[p
];
106 for (i
= 0; i
< cache
->table_len
; ++i
) {
107 if (!memcmp(gid
, &cache
->table
[i
], sizeof *gid
)) {
108 *port_num
= p
+ rdma_start_port(device
);
117 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
121 EXPORT_SYMBOL(ib_find_cached_gid
);
123 int ib_get_cached_pkey(struct ib_device
*device
,
128 struct ib_pkey_cache
*cache
;
132 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
135 read_lock_irqsave(&device
->cache
.lock
, flags
);
137 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
139 if (index
< 0 || index
>= cache
->table_len
)
142 *pkey
= cache
->table
[index
];
144 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
148 EXPORT_SYMBOL(ib_get_cached_pkey
);
150 int ib_find_cached_pkey(struct ib_device
*device
,
155 struct ib_pkey_cache
*cache
;
161 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
164 read_lock_irqsave(&device
->cache
.lock
, flags
);
166 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
170 for (i
= 0; i
< cache
->table_len
; ++i
)
171 if ((cache
->table
[i
] & 0x7fff) == (pkey
& 0x7fff)) {
172 if (cache
->table
[i
] & 0x8000) {
180 if (ret
&& partial_ix
>= 0) {
185 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
189 EXPORT_SYMBOL(ib_find_cached_pkey
);
191 int ib_find_exact_cached_pkey(struct ib_device
*device
,
196 struct ib_pkey_cache
*cache
;
201 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
204 read_lock_irqsave(&device
->cache
.lock
, flags
);
206 cache
= device
->cache
.pkey_cache
[port_num
- rdma_start_port(device
)];
210 for (i
= 0; i
< cache
->table_len
; ++i
)
211 if (cache
->table
[i
] == pkey
) {
217 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
221 EXPORT_SYMBOL(ib_find_exact_cached_pkey
);
223 int ib_get_cached_lmc(struct ib_device
*device
,
230 if (port_num
< rdma_start_port(device
) || port_num
> rdma_end_port(device
))
233 read_lock_irqsave(&device
->cache
.lock
, flags
);
234 *lmc
= device
->cache
.lmc_cache
[port_num
- rdma_start_port(device
)];
235 read_unlock_irqrestore(&device
->cache
.lock
, flags
);
239 EXPORT_SYMBOL(ib_get_cached_lmc
);
241 static void ib_cache_update(struct ib_device
*device
,
244 struct ib_port_attr
*tprops
= NULL
;
245 struct ib_pkey_cache
*pkey_cache
= NULL
, *old_pkey_cache
;
246 struct ib_gid_cache
*gid_cache
= NULL
, *old_gid_cache
;
250 tprops
= kmalloc(sizeof *tprops
, GFP_KERNEL
);
254 ret
= ib_query_port(device
, port
, tprops
);
256 printk(KERN_WARNING
"ib_query_port failed (%d) for %s\n",
261 pkey_cache
= kmalloc(sizeof *pkey_cache
+ tprops
->pkey_tbl_len
*
262 sizeof *pkey_cache
->table
, GFP_KERNEL
);
266 pkey_cache
->table_len
= tprops
->pkey_tbl_len
;
268 gid_cache
= kmalloc(sizeof *gid_cache
+ tprops
->gid_tbl_len
*
269 sizeof *gid_cache
->table
, GFP_KERNEL
);
273 gid_cache
->table_len
= tprops
->gid_tbl_len
;
275 for (i
= 0; i
< pkey_cache
->table_len
; ++i
) {
276 ret
= ib_query_pkey(device
, port
, i
, pkey_cache
->table
+ i
);
278 printk(KERN_WARNING
"ib_query_pkey failed (%d) for %s (index %d)\n",
279 ret
, device
->name
, i
);
284 for (i
= 0; i
< gid_cache
->table_len
; ++i
) {
285 ret
= ib_query_gid(device
, port
, i
, gid_cache
->table
+ i
);
287 printk(KERN_WARNING
"ib_query_gid failed (%d) for %s (index %d)\n",
288 ret
, device
->name
, i
);
293 write_lock_irq(&device
->cache
.lock
);
295 old_pkey_cache
= device
->cache
.pkey_cache
[port
- rdma_start_port(device
)];
296 old_gid_cache
= device
->cache
.gid_cache
[port
- rdma_start_port(device
)];
298 device
->cache
.pkey_cache
[port
- rdma_start_port(device
)] = pkey_cache
;
299 device
->cache
.gid_cache
[port
- rdma_start_port(device
)] = gid_cache
;
301 device
->cache
.lmc_cache
[port
- rdma_start_port(device
)] = tprops
->lmc
;
303 write_unlock_irq(&device
->cache
.lock
);
305 kfree(old_pkey_cache
);
306 kfree(old_gid_cache
);
316 static void ib_cache_task(struct work_struct
*_work
)
318 struct ib_update_work
*work
=
319 container_of(_work
, struct ib_update_work
, work
);
321 ib_cache_update(work
->device
, work
->port_num
);
325 static void ib_cache_event(struct ib_event_handler
*handler
,
326 struct ib_event
*event
)
328 struct ib_update_work
*work
;
330 if (event
->event
== IB_EVENT_PORT_ERR
||
331 event
->event
== IB_EVENT_PORT_ACTIVE
||
332 event
->event
== IB_EVENT_LID_CHANGE
||
333 event
->event
== IB_EVENT_PKEY_CHANGE
||
334 event
->event
== IB_EVENT_SM_CHANGE
||
335 event
->event
== IB_EVENT_CLIENT_REREGISTER
||
336 event
->event
== IB_EVENT_GID_CHANGE
) {
337 work
= kmalloc(sizeof *work
, GFP_ATOMIC
);
339 INIT_WORK(&work
->work
, ib_cache_task
);
340 work
->device
= event
->device
;
341 work
->port_num
= event
->element
.port_num
;
342 queue_work(ib_wq
, &work
->work
);
347 static void ib_cache_setup_one(struct ib_device
*device
)
351 rwlock_init(&device
->cache
.lock
);
353 device
->cache
.pkey_cache
=
354 kmalloc(sizeof *device
->cache
.pkey_cache
*
355 (rdma_end_port(device
) - rdma_start_port(device
) + 1), GFP_KERNEL
);
356 device
->cache
.gid_cache
=
357 kmalloc(sizeof *device
->cache
.gid_cache
*
358 (rdma_end_port(device
) - rdma_start_port(device
) + 1), GFP_KERNEL
);
360 device
->cache
.lmc_cache
= kmalloc(sizeof *device
->cache
.lmc_cache
*
361 (rdma_end_port(device
) -
362 rdma_start_port(device
) + 1),
365 if (!device
->cache
.pkey_cache
|| !device
->cache
.gid_cache
||
366 !device
->cache
.lmc_cache
) {
367 printk(KERN_WARNING
"Couldn't allocate cache "
368 "for %s\n", device
->name
);
372 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
) {
373 device
->cache
.pkey_cache
[p
] = NULL
;
374 device
->cache
.gid_cache
[p
] = NULL
;
375 ib_cache_update(device
, p
+ rdma_start_port(device
));
378 INIT_IB_EVENT_HANDLER(&device
->cache
.event_handler
,
379 device
, ib_cache_event
);
380 if (ib_register_event_handler(&device
->cache
.event_handler
))
386 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
) {
387 kfree(device
->cache
.pkey_cache
[p
]);
388 kfree(device
->cache
.gid_cache
[p
]);
392 kfree(device
->cache
.pkey_cache
);
393 kfree(device
->cache
.gid_cache
);
394 kfree(device
->cache
.lmc_cache
);
397 static void ib_cache_cleanup_one(struct ib_device
*device
)
401 ib_unregister_event_handler(&device
->cache
.event_handler
);
402 flush_workqueue(ib_wq
);
404 for (p
= 0; p
<= rdma_end_port(device
) - rdma_start_port(device
); ++p
) {
405 kfree(device
->cache
.pkey_cache
[p
]);
406 kfree(device
->cache
.gid_cache
[p
]);
409 kfree(device
->cache
.pkey_cache
);
410 kfree(device
->cache
.gid_cache
);
411 kfree(device
->cache
.lmc_cache
);
414 static struct ib_client cache_client
= {
416 .add
= ib_cache_setup_one
,
417 .remove
= ib_cache_cleanup_one
420 int __init
ib_cache_setup(void)
422 return ib_register_client(&cache_client
);
425 void __exit
ib_cache_cleanup(void)
427 ib_unregister_client(&cache_client
);