2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 /* info about object pools
38 * note that mr and mw share a single index space
39 * so that one can map an lkey to the correct type of object
41 struct rxe_type_info rxe_type_info
[RXE_NUM_TYPES
] = {
44 .size
= sizeof(struct rxe_ucontext
),
48 .size
= sizeof(struct rxe_pd
),
52 .size
= sizeof(struct rxe_ah
),
53 .flags
= RXE_POOL_ATOMIC
,
57 .size
= sizeof(struct rxe_srq
),
58 .flags
= RXE_POOL_INDEX
,
59 .min_index
= RXE_MIN_SRQ_INDEX
,
60 .max_index
= RXE_MAX_SRQ_INDEX
,
64 .size
= sizeof(struct rxe_qp
),
65 .cleanup
= rxe_qp_cleanup
,
66 .flags
= RXE_POOL_INDEX
,
67 .min_index
= RXE_MIN_QP_INDEX
,
68 .max_index
= RXE_MAX_QP_INDEX
,
72 .size
= sizeof(struct rxe_cq
),
73 .cleanup
= rxe_cq_cleanup
,
77 .size
= sizeof(struct rxe_mem
),
78 .cleanup
= rxe_mem_cleanup
,
79 .flags
= RXE_POOL_INDEX
,
80 .max_index
= RXE_MAX_MR_INDEX
,
81 .min_index
= RXE_MIN_MR_INDEX
,
85 .size
= sizeof(struct rxe_mem
),
86 .flags
= RXE_POOL_INDEX
,
87 .max_index
= RXE_MAX_MW_INDEX
,
88 .min_index
= RXE_MIN_MW_INDEX
,
92 .size
= sizeof(struct rxe_mc_grp
),
93 .cleanup
= rxe_mc_cleanup
,
94 .flags
= RXE_POOL_KEY
,
95 .key_offset
= offsetof(struct rxe_mc_grp
, mgid
),
96 .key_size
= sizeof(union ib_gid
),
98 [RXE_TYPE_MC_ELEM
] = {
99 .name
= "rxe-mc_elem",
100 .size
= sizeof(struct rxe_mc_elem
),
101 .flags
= RXE_POOL_ATOMIC
,
105 static inline const char *pool_name(struct rxe_pool
*pool
)
107 return rxe_type_info
[pool
->type
].name
;
110 static inline struct kmem_cache
*pool_cache(struct rxe_pool
*pool
)
112 return rxe_type_info
[pool
->type
].cache
;
115 int rxe_cache_init(void)
120 struct rxe_type_info
*type
;
122 for (i
= 0; i
< RXE_NUM_TYPES
; i
++) {
123 type
= &rxe_type_info
[i
];
124 size
= ALIGN(type
->size
, RXE_POOL_ALIGN
);
125 type
->cache
= kmem_cache_create(type
->name
, size
,
127 RXE_POOL_CACHE_FLAGS
, NULL
);
129 pr_err("Unable to init kmem cache for %s\n",
140 kmem_cache_destroy(type
->cache
);
147 void rxe_cache_exit(void)
150 struct rxe_type_info
*type
;
152 for (i
= 0; i
< RXE_NUM_TYPES
; i
++) {
153 type
= &rxe_type_info
[i
];
154 kmem_cache_destroy(type
->cache
);
159 static int rxe_pool_init_index(struct rxe_pool
*pool
, u32 max
, u32 min
)
164 if ((max
- min
+ 1) < pool
->max_elem
) {
165 pr_warn("not enough indices for max_elem\n");
170 pool
->max_index
= max
;
171 pool
->min_index
= min
;
173 size
= BITS_TO_LONGS(max
- min
+ 1) * sizeof(long);
174 pool
->table
= kmalloc(size
, GFP_KERNEL
);
180 pool
->table_size
= size
;
181 bitmap_zero(pool
->table
, max
- min
+ 1);
189 struct rxe_pool
*pool
,
190 enum rxe_elem_type type
,
191 unsigned int max_elem
)
194 size_t size
= rxe_type_info
[type
].size
;
196 memset(pool
, 0, sizeof(*pool
));
200 pool
->max_elem
= max_elem
;
201 pool
->elem_size
= ALIGN(size
, RXE_POOL_ALIGN
);
202 pool
->flags
= rxe_type_info
[type
].flags
;
203 pool
->tree
= RB_ROOT
;
204 pool
->cleanup
= rxe_type_info
[type
].cleanup
;
206 atomic_set(&pool
->num_elem
, 0);
208 kref_init(&pool
->ref_cnt
);
210 spin_lock_init(&pool
->pool_lock
);
212 if (rxe_type_info
[type
].flags
& RXE_POOL_INDEX
) {
213 err
= rxe_pool_init_index(pool
,
214 rxe_type_info
[type
].max_index
,
215 rxe_type_info
[type
].min_index
);
220 if (rxe_type_info
[type
].flags
& RXE_POOL_KEY
) {
221 pool
->key_offset
= rxe_type_info
[type
].key_offset
;
222 pool
->key_size
= rxe_type_info
[type
].key_size
;
225 pool
->state
= rxe_pool_valid
;
231 static void rxe_pool_release(struct kref
*kref
)
233 struct rxe_pool
*pool
= container_of(kref
, struct rxe_pool
, ref_cnt
);
235 pool
->state
= rxe_pool_invalid
;
239 static void rxe_pool_put(struct rxe_pool
*pool
)
241 kref_put(&pool
->ref_cnt
, rxe_pool_release
);
244 int rxe_pool_cleanup(struct rxe_pool
*pool
)
248 spin_lock_irqsave(&pool
->pool_lock
, flags
);
249 pool
->state
= rxe_pool_invalid
;
250 if (atomic_read(&pool
->num_elem
) > 0)
251 pr_warn("%s pool destroyed with unfree'd elem\n",
253 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
260 static u32
alloc_index(struct rxe_pool
*pool
)
263 u32 range
= pool
->max_index
- pool
->min_index
+ 1;
265 index
= find_next_zero_bit(pool
->table
, range
, pool
->last
);
267 index
= find_first_zero_bit(pool
->table
, range
);
269 WARN_ON_ONCE(index
>= range
);
270 set_bit(index
, pool
->table
);
272 return index
+ pool
->min_index
;
275 static void insert_index(struct rxe_pool
*pool
, struct rxe_pool_entry
*new)
277 struct rb_node
**link
= &pool
->tree
.rb_node
;
278 struct rb_node
*parent
= NULL
;
279 struct rxe_pool_entry
*elem
;
283 elem
= rb_entry(parent
, struct rxe_pool_entry
, node
);
285 if (elem
->index
== new->index
) {
286 pr_warn("element already exists!\n");
290 if (elem
->index
> new->index
)
291 link
= &(*link
)->rb_left
;
293 link
= &(*link
)->rb_right
;
296 rb_link_node(&new->node
, parent
, link
);
297 rb_insert_color(&new->node
, &pool
->tree
);
302 static void insert_key(struct rxe_pool
*pool
, struct rxe_pool_entry
*new)
304 struct rb_node
**link
= &pool
->tree
.rb_node
;
305 struct rb_node
*parent
= NULL
;
306 struct rxe_pool_entry
*elem
;
311 elem
= rb_entry(parent
, struct rxe_pool_entry
, node
);
313 cmp
= memcmp((u8
*)elem
+ pool
->key_offset
,
314 (u8
*)new + pool
->key_offset
, pool
->key_size
);
317 pr_warn("key already exists!\n");
322 link
= &(*link
)->rb_left
;
324 link
= &(*link
)->rb_right
;
327 rb_link_node(&new->node
, parent
, link
);
328 rb_insert_color(&new->node
, &pool
->tree
);
333 void rxe_add_key(void *arg
, void *key
)
335 struct rxe_pool_entry
*elem
= arg
;
336 struct rxe_pool
*pool
= elem
->pool
;
339 spin_lock_irqsave(&pool
->pool_lock
, flags
);
340 memcpy((u8
*)elem
+ pool
->key_offset
, key
, pool
->key_size
);
341 insert_key(pool
, elem
);
342 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
345 void rxe_drop_key(void *arg
)
347 struct rxe_pool_entry
*elem
= arg
;
348 struct rxe_pool
*pool
= elem
->pool
;
351 spin_lock_irqsave(&pool
->pool_lock
, flags
);
352 rb_erase(&elem
->node
, &pool
->tree
);
353 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
356 void rxe_add_index(void *arg
)
358 struct rxe_pool_entry
*elem
= arg
;
359 struct rxe_pool
*pool
= elem
->pool
;
362 spin_lock_irqsave(&pool
->pool_lock
, flags
);
363 elem
->index
= alloc_index(pool
);
364 insert_index(pool
, elem
);
365 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
368 void rxe_drop_index(void *arg
)
370 struct rxe_pool_entry
*elem
= arg
;
371 struct rxe_pool
*pool
= elem
->pool
;
374 spin_lock_irqsave(&pool
->pool_lock
, flags
);
375 clear_bit(elem
->index
- pool
->min_index
, pool
->table
);
376 rb_erase(&elem
->node
, &pool
->tree
);
377 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
380 void *rxe_alloc(struct rxe_pool
*pool
)
382 struct rxe_pool_entry
*elem
;
385 might_sleep_if(!(pool
->flags
& RXE_POOL_ATOMIC
));
387 spin_lock_irqsave(&pool
->pool_lock
, flags
);
388 if (pool
->state
!= rxe_pool_valid
) {
389 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
392 kref_get(&pool
->ref_cnt
);
393 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
395 kref_get(&pool
->rxe
->ref_cnt
);
397 if (atomic_inc_return(&pool
->num_elem
) > pool
->max_elem
)
400 elem
= kmem_cache_zalloc(pool_cache(pool
),
401 (pool
->flags
& RXE_POOL_ATOMIC
) ?
402 GFP_ATOMIC
: GFP_KERNEL
);
407 kref_init(&elem
->ref_cnt
);
412 atomic_dec(&pool
->num_elem
);
413 rxe_dev_put(pool
->rxe
);
418 void rxe_elem_release(struct kref
*kref
)
420 struct rxe_pool_entry
*elem
=
421 container_of(kref
, struct rxe_pool_entry
, ref_cnt
);
422 struct rxe_pool
*pool
= elem
->pool
;
427 kmem_cache_free(pool_cache(pool
), elem
);
428 atomic_dec(&pool
->num_elem
);
429 rxe_dev_put(pool
->rxe
);
433 void *rxe_pool_get_index(struct rxe_pool
*pool
, u32 index
)
435 struct rb_node
*node
= NULL
;
436 struct rxe_pool_entry
*elem
= NULL
;
439 spin_lock_irqsave(&pool
->pool_lock
, flags
);
441 if (pool
->state
!= rxe_pool_valid
)
444 node
= pool
->tree
.rb_node
;
447 elem
= rb_entry(node
, struct rxe_pool_entry
, node
);
449 if (elem
->index
> index
)
450 node
= node
->rb_left
;
451 else if (elem
->index
< index
)
452 node
= node
->rb_right
;
458 kref_get(&elem
->ref_cnt
);
461 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
462 return node
? elem
: NULL
;
465 void *rxe_pool_get_key(struct rxe_pool
*pool
, void *key
)
467 struct rb_node
*node
= NULL
;
468 struct rxe_pool_entry
*elem
= NULL
;
472 spin_lock_irqsave(&pool
->pool_lock
, flags
);
474 if (pool
->state
!= rxe_pool_valid
)
477 node
= pool
->tree
.rb_node
;
480 elem
= rb_entry(node
, struct rxe_pool_entry
, node
);
482 cmp
= memcmp((u8
*)elem
+ pool
->key_offset
,
483 key
, pool
->key_size
);
486 node
= node
->rb_left
;
488 node
= node
->rb_right
;
494 kref_get(&elem
->ref_cnt
);
497 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
498 return node
? elem
: NULL
;