2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 /* info about object pools
38 * note that mr and mw share a single index space
39 * so that one can map an lkey to the correct type of object
41 struct rxe_type_info rxe_type_info
[RXE_NUM_TYPES
] = {
44 .size
= sizeof(struct rxe_ucontext
),
48 .size
= sizeof(struct rxe_pd
),
52 .size
= sizeof(struct rxe_ah
),
53 .flags
= RXE_POOL_ATOMIC
,
57 .size
= sizeof(struct rxe_srq
),
58 .flags
= RXE_POOL_INDEX
,
59 .min_index
= RXE_MIN_SRQ_INDEX
,
60 .max_index
= RXE_MAX_SRQ_INDEX
,
64 .size
= sizeof(struct rxe_qp
),
65 .cleanup
= rxe_qp_cleanup
,
66 .flags
= RXE_POOL_INDEX
,
67 .min_index
= RXE_MIN_QP_INDEX
,
68 .max_index
= RXE_MAX_QP_INDEX
,
72 .size
= sizeof(struct rxe_cq
),
73 .cleanup
= rxe_cq_cleanup
,
77 .size
= sizeof(struct rxe_mem
),
78 .cleanup
= rxe_mem_cleanup
,
79 .flags
= RXE_POOL_INDEX
,
80 .max_index
= RXE_MAX_MR_INDEX
,
81 .min_index
= RXE_MIN_MR_INDEX
,
85 .size
= sizeof(struct rxe_mem
),
86 .flags
= RXE_POOL_INDEX
,
87 .max_index
= RXE_MAX_MW_INDEX
,
88 .min_index
= RXE_MIN_MW_INDEX
,
92 .size
= sizeof(struct rxe_mc_grp
),
93 .cleanup
= rxe_mc_cleanup
,
94 .flags
= RXE_POOL_KEY
,
95 .key_offset
= offsetof(struct rxe_mc_grp
, mgid
),
96 .key_size
= sizeof(union ib_gid
),
98 [RXE_TYPE_MC_ELEM
] = {
99 .name
= "rxe-mc_elem",
100 .size
= sizeof(struct rxe_mc_elem
),
101 .flags
= RXE_POOL_ATOMIC
,
105 static inline char *pool_name(struct rxe_pool
*pool
)
107 return rxe_type_info
[pool
->type
].name
;
110 static inline struct kmem_cache
*pool_cache(struct rxe_pool
*pool
)
112 return rxe_type_info
[pool
->type
].cache
;
115 static inline enum rxe_elem_type
rxe_type(void *arg
)
117 struct rxe_pool_entry
*elem
= arg
;
119 return elem
->pool
->type
;
122 int rxe_cache_init(void)
127 struct rxe_type_info
*type
;
129 for (i
= 0; i
< RXE_NUM_TYPES
; i
++) {
130 type
= &rxe_type_info
[i
];
131 size
= ALIGN(type
->size
, RXE_POOL_ALIGN
);
132 type
->cache
= kmem_cache_create(type
->name
, size
,
134 RXE_POOL_CACHE_FLAGS
, NULL
);
136 pr_err("Unable to init kmem cache for %s\n",
147 kmem_cache_destroy(type
->cache
);
154 void rxe_cache_exit(void)
157 struct rxe_type_info
*type
;
159 for (i
= 0; i
< RXE_NUM_TYPES
; i
++) {
160 type
= &rxe_type_info
[i
];
161 kmem_cache_destroy(type
->cache
);
166 static int rxe_pool_init_index(struct rxe_pool
*pool
, u32 max
, u32 min
)
171 if ((max
- min
+ 1) < pool
->max_elem
) {
172 pr_warn("not enough indices for max_elem\n");
177 pool
->max_index
= max
;
178 pool
->min_index
= min
;
180 size
= BITS_TO_LONGS(max
- min
+ 1) * sizeof(long);
181 pool
->table
= kmalloc(size
, GFP_KERNEL
);
183 pr_warn("no memory for bit table\n");
188 pool
->table_size
= size
;
189 bitmap_zero(pool
->table
, max
- min
+ 1);
197 struct rxe_pool
*pool
,
198 enum rxe_elem_type type
,
202 size_t size
= rxe_type_info
[type
].size
;
204 memset(pool
, 0, sizeof(*pool
));
208 pool
->max_elem
= max_elem
;
209 pool
->elem_size
= ALIGN(size
, RXE_POOL_ALIGN
);
210 pool
->flags
= rxe_type_info
[type
].flags
;
211 pool
->tree
= RB_ROOT
;
212 pool
->cleanup
= rxe_type_info
[type
].cleanup
;
214 atomic_set(&pool
->num_elem
, 0);
216 kref_init(&pool
->ref_cnt
);
218 spin_lock_init(&pool
->pool_lock
);
220 if (rxe_type_info
[type
].flags
& RXE_POOL_INDEX
) {
221 err
= rxe_pool_init_index(pool
,
222 rxe_type_info
[type
].max_index
,
223 rxe_type_info
[type
].min_index
);
228 if (rxe_type_info
[type
].flags
& RXE_POOL_KEY
) {
229 pool
->key_offset
= rxe_type_info
[type
].key_offset
;
230 pool
->key_size
= rxe_type_info
[type
].key_size
;
233 pool
->state
= rxe_pool_valid
;
239 static void rxe_pool_release(struct kref
*kref
)
241 struct rxe_pool
*pool
= container_of(kref
, struct rxe_pool
, ref_cnt
);
243 pool
->state
= rxe_pool_invalid
;
247 static void rxe_pool_put(struct rxe_pool
*pool
)
249 kref_put(&pool
->ref_cnt
, rxe_pool_release
);
252 int rxe_pool_cleanup(struct rxe_pool
*pool
)
256 spin_lock_irqsave(&pool
->pool_lock
, flags
);
257 pool
->state
= rxe_pool_invalid
;
258 if (atomic_read(&pool
->num_elem
) > 0)
259 pr_warn("%s pool destroyed with unfree'd elem\n",
261 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
268 static u32
alloc_index(struct rxe_pool
*pool
)
271 u32 range
= pool
->max_index
- pool
->min_index
+ 1;
273 index
= find_next_zero_bit(pool
->table
, range
, pool
->last
);
275 index
= find_first_zero_bit(pool
->table
, range
);
277 set_bit(index
, pool
->table
);
279 return index
+ pool
->min_index
;
282 static void insert_index(struct rxe_pool
*pool
, struct rxe_pool_entry
*new)
284 struct rb_node
**link
= &pool
->tree
.rb_node
;
285 struct rb_node
*parent
= NULL
;
286 struct rxe_pool_entry
*elem
;
290 elem
= rb_entry(parent
, struct rxe_pool_entry
, node
);
292 if (elem
->index
== new->index
) {
293 pr_warn("element already exists!\n");
297 if (elem
->index
> new->index
)
298 link
= &(*link
)->rb_left
;
300 link
= &(*link
)->rb_right
;
303 rb_link_node(&new->node
, parent
, link
);
304 rb_insert_color(&new->node
, &pool
->tree
);
309 static void insert_key(struct rxe_pool
*pool
, struct rxe_pool_entry
*new)
311 struct rb_node
**link
= &pool
->tree
.rb_node
;
312 struct rb_node
*parent
= NULL
;
313 struct rxe_pool_entry
*elem
;
318 elem
= rb_entry(parent
, struct rxe_pool_entry
, node
);
320 cmp
= memcmp((u8
*)elem
+ pool
->key_offset
,
321 (u8
*)new + pool
->key_offset
, pool
->key_size
);
324 pr_warn("key already exists!\n");
329 link
= &(*link
)->rb_left
;
331 link
= &(*link
)->rb_right
;
334 rb_link_node(&new->node
, parent
, link
);
335 rb_insert_color(&new->node
, &pool
->tree
);
340 void rxe_add_key(void *arg
, void *key
)
342 struct rxe_pool_entry
*elem
= arg
;
343 struct rxe_pool
*pool
= elem
->pool
;
346 spin_lock_irqsave(&pool
->pool_lock
, flags
);
347 memcpy((u8
*)elem
+ pool
->key_offset
, key
, pool
->key_size
);
348 insert_key(pool
, elem
);
349 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
352 void rxe_drop_key(void *arg
)
354 struct rxe_pool_entry
*elem
= arg
;
355 struct rxe_pool
*pool
= elem
->pool
;
358 spin_lock_irqsave(&pool
->pool_lock
, flags
);
359 rb_erase(&elem
->node
, &pool
->tree
);
360 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
363 void rxe_add_index(void *arg
)
365 struct rxe_pool_entry
*elem
= arg
;
366 struct rxe_pool
*pool
= elem
->pool
;
369 spin_lock_irqsave(&pool
->pool_lock
, flags
);
370 elem
->index
= alloc_index(pool
);
371 insert_index(pool
, elem
);
372 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
375 void rxe_drop_index(void *arg
)
377 struct rxe_pool_entry
*elem
= arg
;
378 struct rxe_pool
*pool
= elem
->pool
;
381 spin_lock_irqsave(&pool
->pool_lock
, flags
);
382 clear_bit(elem
->index
- pool
->min_index
, pool
->table
);
383 rb_erase(&elem
->node
, &pool
->tree
);
384 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
387 void *rxe_alloc(struct rxe_pool
*pool
)
389 struct rxe_pool_entry
*elem
;
392 might_sleep_if(!(pool
->flags
& RXE_POOL_ATOMIC
));
394 spin_lock_irqsave(&pool
->pool_lock
, flags
);
395 if (pool
->state
!= rxe_pool_valid
) {
396 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
399 kref_get(&pool
->ref_cnt
);
400 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
402 kref_get(&pool
->rxe
->ref_cnt
);
404 if (atomic_inc_return(&pool
->num_elem
) > pool
->max_elem
) {
405 atomic_dec(&pool
->num_elem
);
406 rxe_dev_put(pool
->rxe
);
411 elem
= kmem_cache_zalloc(pool_cache(pool
),
412 (pool
->flags
& RXE_POOL_ATOMIC
) ?
413 GFP_ATOMIC
: GFP_KERNEL
);
416 kref_init(&elem
->ref_cnt
);
421 void rxe_elem_release(struct kref
*kref
)
423 struct rxe_pool_entry
*elem
=
424 container_of(kref
, struct rxe_pool_entry
, ref_cnt
);
425 struct rxe_pool
*pool
= elem
->pool
;
430 kmem_cache_free(pool_cache(pool
), elem
);
431 atomic_dec(&pool
->num_elem
);
432 rxe_dev_put(pool
->rxe
);
436 void *rxe_pool_get_index(struct rxe_pool
*pool
, u32 index
)
438 struct rb_node
*node
= NULL
;
439 struct rxe_pool_entry
*elem
= NULL
;
442 spin_lock_irqsave(&pool
->pool_lock
, flags
);
444 if (pool
->state
!= rxe_pool_valid
)
447 node
= pool
->tree
.rb_node
;
450 elem
= rb_entry(node
, struct rxe_pool_entry
, node
);
452 if (elem
->index
> index
)
453 node
= node
->rb_left
;
454 else if (elem
->index
< index
)
455 node
= node
->rb_right
;
461 kref_get(&elem
->ref_cnt
);
464 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
465 return node
? (void *)elem
: NULL
;
468 void *rxe_pool_get_key(struct rxe_pool
*pool
, void *key
)
470 struct rb_node
*node
= NULL
;
471 struct rxe_pool_entry
*elem
= NULL
;
475 spin_lock_irqsave(&pool
->pool_lock
, flags
);
477 if (pool
->state
!= rxe_pool_valid
)
480 node
= pool
->tree
.rb_node
;
483 elem
= rb_entry(node
, struct rxe_pool_entry
, node
);
485 cmp
= memcmp((u8
*)elem
+ pool
->key_offset
,
486 key
, pool
->key_size
);
489 node
= node
->rb_left
;
491 node
= node
->rb_right
;
497 kref_get(&elem
->ref_cnt
);
500 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
501 return node
? ((void *)elem
) : NULL
;