Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / infiniband / sw / rxe / rxe_pool.c
blobb4a8acc7bb7d62fb479473df7ec6aaacde86e533
1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include "rxe.h"
35 #include "rxe_loc.h"
37 /* info about object pools
38 * note that mr and mw share a single index space
39 * so that one can map an lkey to the correct type of object
41 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
42 [RXE_TYPE_UC] = {
43 .name = "rxe-uc",
44 .size = sizeof(struct rxe_ucontext),
46 [RXE_TYPE_PD] = {
47 .name = "rxe-pd",
48 .size = sizeof(struct rxe_pd),
50 [RXE_TYPE_AH] = {
51 .name = "rxe-ah",
52 .size = sizeof(struct rxe_ah),
53 .flags = RXE_POOL_ATOMIC,
55 [RXE_TYPE_SRQ] = {
56 .name = "rxe-srq",
57 .size = sizeof(struct rxe_srq),
58 .flags = RXE_POOL_INDEX,
59 .min_index = RXE_MIN_SRQ_INDEX,
60 .max_index = RXE_MAX_SRQ_INDEX,
62 [RXE_TYPE_QP] = {
63 .name = "rxe-qp",
64 .size = sizeof(struct rxe_qp),
65 .cleanup = rxe_qp_cleanup,
66 .flags = RXE_POOL_INDEX,
67 .min_index = RXE_MIN_QP_INDEX,
68 .max_index = RXE_MAX_QP_INDEX,
70 [RXE_TYPE_CQ] = {
71 .name = "rxe-cq",
72 .size = sizeof(struct rxe_cq),
73 .cleanup = rxe_cq_cleanup,
75 [RXE_TYPE_MR] = {
76 .name = "rxe-mr",
77 .size = sizeof(struct rxe_mem),
78 .cleanup = rxe_mem_cleanup,
79 .flags = RXE_POOL_INDEX,
80 .max_index = RXE_MAX_MR_INDEX,
81 .min_index = RXE_MIN_MR_INDEX,
83 [RXE_TYPE_MW] = {
84 .name = "rxe-mw",
85 .size = sizeof(struct rxe_mem),
86 .flags = RXE_POOL_INDEX,
87 .max_index = RXE_MAX_MW_INDEX,
88 .min_index = RXE_MIN_MW_INDEX,
90 [RXE_TYPE_MC_GRP] = {
91 .name = "rxe-mc_grp",
92 .size = sizeof(struct rxe_mc_grp),
93 .cleanup = rxe_mc_cleanup,
94 .flags = RXE_POOL_KEY,
95 .key_offset = offsetof(struct rxe_mc_grp, mgid),
96 .key_size = sizeof(union ib_gid),
98 [RXE_TYPE_MC_ELEM] = {
99 .name = "rxe-mc_elem",
100 .size = sizeof(struct rxe_mc_elem),
101 .flags = RXE_POOL_ATOMIC,
105 static inline const char *pool_name(struct rxe_pool *pool)
107 return rxe_type_info[pool->type].name;
110 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
112 return rxe_type_info[pool->type].cache;
115 int rxe_cache_init(void)
117 int err;
118 int i;
119 size_t size;
120 struct rxe_type_info *type;
122 for (i = 0; i < RXE_NUM_TYPES; i++) {
123 type = &rxe_type_info[i];
124 size = ALIGN(type->size, RXE_POOL_ALIGN);
125 type->cache = kmem_cache_create(type->name, size,
126 RXE_POOL_ALIGN,
127 RXE_POOL_CACHE_FLAGS, NULL);
128 if (!type->cache) {
129 pr_err("Unable to init kmem cache for %s\n",
130 type->name);
131 err = -ENOMEM;
132 goto err1;
136 return 0;
138 err1:
139 while (--i >= 0) {
140 kmem_cache_destroy(type->cache);
141 type->cache = NULL;
144 return err;
147 void rxe_cache_exit(void)
149 int i;
150 struct rxe_type_info *type;
152 for (i = 0; i < RXE_NUM_TYPES; i++) {
153 type = &rxe_type_info[i];
154 kmem_cache_destroy(type->cache);
155 type->cache = NULL;
159 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
161 int err = 0;
162 size_t size;
164 if ((max - min + 1) < pool->max_elem) {
165 pr_warn("not enough indices for max_elem\n");
166 err = -EINVAL;
167 goto out;
170 pool->max_index = max;
171 pool->min_index = min;
173 size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
174 pool->table = kmalloc(size, GFP_KERNEL);
175 if (!pool->table) {
176 err = -ENOMEM;
177 goto out;
180 pool->table_size = size;
181 bitmap_zero(pool->table, max - min + 1);
183 out:
184 return err;
187 int rxe_pool_init(
188 struct rxe_dev *rxe,
189 struct rxe_pool *pool,
190 enum rxe_elem_type type,
191 unsigned int max_elem)
193 int err = 0;
194 size_t size = rxe_type_info[type].size;
196 memset(pool, 0, sizeof(*pool));
198 pool->rxe = rxe;
199 pool->type = type;
200 pool->max_elem = max_elem;
201 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
202 pool->flags = rxe_type_info[type].flags;
203 pool->tree = RB_ROOT;
204 pool->cleanup = rxe_type_info[type].cleanup;
206 atomic_set(&pool->num_elem, 0);
208 kref_init(&pool->ref_cnt);
210 spin_lock_init(&pool->pool_lock);
212 if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
213 err = rxe_pool_init_index(pool,
214 rxe_type_info[type].max_index,
215 rxe_type_info[type].min_index);
216 if (err)
217 goto out;
220 if (rxe_type_info[type].flags & RXE_POOL_KEY) {
221 pool->key_offset = rxe_type_info[type].key_offset;
222 pool->key_size = rxe_type_info[type].key_size;
225 pool->state = rxe_pool_valid;
227 out:
228 return err;
231 static void rxe_pool_release(struct kref *kref)
233 struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
235 pool->state = rxe_pool_invalid;
236 kfree(pool->table);
239 static void rxe_pool_put(struct rxe_pool *pool)
241 kref_put(&pool->ref_cnt, rxe_pool_release);
244 int rxe_pool_cleanup(struct rxe_pool *pool)
246 unsigned long flags;
248 spin_lock_irqsave(&pool->pool_lock, flags);
249 pool->state = rxe_pool_invalid;
250 if (atomic_read(&pool->num_elem) > 0)
251 pr_warn("%s pool destroyed with unfree'd elem\n",
252 pool_name(pool));
253 spin_unlock_irqrestore(&pool->pool_lock, flags);
255 rxe_pool_put(pool);
257 return 0;
260 static u32 alloc_index(struct rxe_pool *pool)
262 u32 index;
263 u32 range = pool->max_index - pool->min_index + 1;
265 index = find_next_zero_bit(pool->table, range, pool->last);
266 if (index >= range)
267 index = find_first_zero_bit(pool->table, range);
269 WARN_ON_ONCE(index >= range);
270 set_bit(index, pool->table);
271 pool->last = index;
272 return index + pool->min_index;
275 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
277 struct rb_node **link = &pool->tree.rb_node;
278 struct rb_node *parent = NULL;
279 struct rxe_pool_entry *elem;
281 while (*link) {
282 parent = *link;
283 elem = rb_entry(parent, struct rxe_pool_entry, node);
285 if (elem->index == new->index) {
286 pr_warn("element already exists!\n");
287 goto out;
290 if (elem->index > new->index)
291 link = &(*link)->rb_left;
292 else
293 link = &(*link)->rb_right;
296 rb_link_node(&new->node, parent, link);
297 rb_insert_color(&new->node, &pool->tree);
298 out:
299 return;
302 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
304 struct rb_node **link = &pool->tree.rb_node;
305 struct rb_node *parent = NULL;
306 struct rxe_pool_entry *elem;
307 int cmp;
309 while (*link) {
310 parent = *link;
311 elem = rb_entry(parent, struct rxe_pool_entry, node);
313 cmp = memcmp((u8 *)elem + pool->key_offset,
314 (u8 *)new + pool->key_offset, pool->key_size);
316 if (cmp == 0) {
317 pr_warn("key already exists!\n");
318 goto out;
321 if (cmp > 0)
322 link = &(*link)->rb_left;
323 else
324 link = &(*link)->rb_right;
327 rb_link_node(&new->node, parent, link);
328 rb_insert_color(&new->node, &pool->tree);
329 out:
330 return;
333 void rxe_add_key(void *arg, void *key)
335 struct rxe_pool_entry *elem = arg;
336 struct rxe_pool *pool = elem->pool;
337 unsigned long flags;
339 spin_lock_irqsave(&pool->pool_lock, flags);
340 memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
341 insert_key(pool, elem);
342 spin_unlock_irqrestore(&pool->pool_lock, flags);
345 void rxe_drop_key(void *arg)
347 struct rxe_pool_entry *elem = arg;
348 struct rxe_pool *pool = elem->pool;
349 unsigned long flags;
351 spin_lock_irqsave(&pool->pool_lock, flags);
352 rb_erase(&elem->node, &pool->tree);
353 spin_unlock_irqrestore(&pool->pool_lock, flags);
356 void rxe_add_index(void *arg)
358 struct rxe_pool_entry *elem = arg;
359 struct rxe_pool *pool = elem->pool;
360 unsigned long flags;
362 spin_lock_irqsave(&pool->pool_lock, flags);
363 elem->index = alloc_index(pool);
364 insert_index(pool, elem);
365 spin_unlock_irqrestore(&pool->pool_lock, flags);
368 void rxe_drop_index(void *arg)
370 struct rxe_pool_entry *elem = arg;
371 struct rxe_pool *pool = elem->pool;
372 unsigned long flags;
374 spin_lock_irqsave(&pool->pool_lock, flags);
375 clear_bit(elem->index - pool->min_index, pool->table);
376 rb_erase(&elem->node, &pool->tree);
377 spin_unlock_irqrestore(&pool->pool_lock, flags);
380 void *rxe_alloc(struct rxe_pool *pool)
382 struct rxe_pool_entry *elem;
383 unsigned long flags;
385 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
387 spin_lock_irqsave(&pool->pool_lock, flags);
388 if (pool->state != rxe_pool_valid) {
389 spin_unlock_irqrestore(&pool->pool_lock, flags);
390 return NULL;
392 kref_get(&pool->ref_cnt);
393 spin_unlock_irqrestore(&pool->pool_lock, flags);
395 kref_get(&pool->rxe->ref_cnt);
397 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
398 goto out_put_pool;
400 elem = kmem_cache_zalloc(pool_cache(pool),
401 (pool->flags & RXE_POOL_ATOMIC) ?
402 GFP_ATOMIC : GFP_KERNEL);
403 if (!elem)
404 goto out_put_pool;
406 elem->pool = pool;
407 kref_init(&elem->ref_cnt);
409 return elem;
411 out_put_pool:
412 atomic_dec(&pool->num_elem);
413 rxe_dev_put(pool->rxe);
414 rxe_pool_put(pool);
415 return NULL;
418 void rxe_elem_release(struct kref *kref)
420 struct rxe_pool_entry *elem =
421 container_of(kref, struct rxe_pool_entry, ref_cnt);
422 struct rxe_pool *pool = elem->pool;
424 if (pool->cleanup)
425 pool->cleanup(elem);
427 kmem_cache_free(pool_cache(pool), elem);
428 atomic_dec(&pool->num_elem);
429 rxe_dev_put(pool->rxe);
430 rxe_pool_put(pool);
433 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
435 struct rb_node *node = NULL;
436 struct rxe_pool_entry *elem = NULL;
437 unsigned long flags;
439 spin_lock_irqsave(&pool->pool_lock, flags);
441 if (pool->state != rxe_pool_valid)
442 goto out;
444 node = pool->tree.rb_node;
446 while (node) {
447 elem = rb_entry(node, struct rxe_pool_entry, node);
449 if (elem->index > index)
450 node = node->rb_left;
451 else if (elem->index < index)
452 node = node->rb_right;
453 else
454 break;
457 if (node)
458 kref_get(&elem->ref_cnt);
460 out:
461 spin_unlock_irqrestore(&pool->pool_lock, flags);
462 return node ? elem : NULL;
465 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
467 struct rb_node *node = NULL;
468 struct rxe_pool_entry *elem = NULL;
469 int cmp;
470 unsigned long flags;
472 spin_lock_irqsave(&pool->pool_lock, flags);
474 if (pool->state != rxe_pool_valid)
475 goto out;
477 node = pool->tree.rb_node;
479 while (node) {
480 elem = rb_entry(node, struct rxe_pool_entry, node);
482 cmp = memcmp((u8 *)elem + pool->key_offset,
483 key, pool->key_size);
485 if (cmp > 0)
486 node = node->rb_left;
487 else if (cmp < 0)
488 node = node->rb_right;
489 else
490 break;
493 if (node)
494 kref_get(&elem->ref_cnt);
496 out:
497 spin_unlock_irqrestore(&pool->pool_lock, flags);
498 return node ? elem : NULL;