ALSA: hda - Add the top speaker pin config for HP Spectre x360
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_pool.c
blob6bac0717c5408653f489915162b57a2612abe75b
1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include "rxe.h"
35 #include "rxe_loc.h"
37 /* info about object pools
38 * note that mr and mw share a single index space
39 * so that one can map an lkey to the correct type of object
41 struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
42 [RXE_TYPE_UC] = {
43 .name = "rxe-uc",
44 .size = sizeof(struct rxe_ucontext),
46 [RXE_TYPE_PD] = {
47 .name = "rxe-pd",
48 .size = sizeof(struct rxe_pd),
50 [RXE_TYPE_AH] = {
51 .name = "rxe-ah",
52 .size = sizeof(struct rxe_ah),
53 .flags = RXE_POOL_ATOMIC,
55 [RXE_TYPE_SRQ] = {
56 .name = "rxe-srq",
57 .size = sizeof(struct rxe_srq),
58 .flags = RXE_POOL_INDEX,
59 .min_index = RXE_MIN_SRQ_INDEX,
60 .max_index = RXE_MAX_SRQ_INDEX,
62 [RXE_TYPE_QP] = {
63 .name = "rxe-qp",
64 .size = sizeof(struct rxe_qp),
65 .cleanup = rxe_qp_cleanup,
66 .flags = RXE_POOL_INDEX,
67 .min_index = RXE_MIN_QP_INDEX,
68 .max_index = RXE_MAX_QP_INDEX,
70 [RXE_TYPE_CQ] = {
71 .name = "rxe-cq",
72 .size = sizeof(struct rxe_cq),
73 .cleanup = rxe_cq_cleanup,
75 [RXE_TYPE_MR] = {
76 .name = "rxe-mr",
77 .size = sizeof(struct rxe_mem),
78 .cleanup = rxe_mem_cleanup,
79 .flags = RXE_POOL_INDEX,
80 .max_index = RXE_MAX_MR_INDEX,
81 .min_index = RXE_MIN_MR_INDEX,
83 [RXE_TYPE_MW] = {
84 .name = "rxe-mw",
85 .size = sizeof(struct rxe_mem),
86 .flags = RXE_POOL_INDEX,
87 .max_index = RXE_MAX_MW_INDEX,
88 .min_index = RXE_MIN_MW_INDEX,
90 [RXE_TYPE_MC_GRP] = {
91 .name = "rxe-mc_grp",
92 .size = sizeof(struct rxe_mc_grp),
93 .cleanup = rxe_mc_cleanup,
94 .flags = RXE_POOL_KEY,
95 .key_offset = offsetof(struct rxe_mc_grp, mgid),
96 .key_size = sizeof(union ib_gid),
98 [RXE_TYPE_MC_ELEM] = {
99 .name = "rxe-mc_elem",
100 .size = sizeof(struct rxe_mc_elem),
101 .flags = RXE_POOL_ATOMIC,
105 static inline char *pool_name(struct rxe_pool *pool)
107 return rxe_type_info[pool->type].name;
110 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
112 return rxe_type_info[pool->type].cache;
115 static inline enum rxe_elem_type rxe_type(void *arg)
117 struct rxe_pool_entry *elem = arg;
119 return elem->pool->type;
122 int rxe_cache_init(void)
124 int err;
125 int i;
126 size_t size;
127 struct rxe_type_info *type;
129 for (i = 0; i < RXE_NUM_TYPES; i++) {
130 type = &rxe_type_info[i];
131 size = ALIGN(type->size, RXE_POOL_ALIGN);
132 type->cache = kmem_cache_create(type->name, size,
133 RXE_POOL_ALIGN,
134 RXE_POOL_CACHE_FLAGS, NULL);
135 if (!type->cache) {
136 pr_err("Unable to init kmem cache for %s\n",
137 type->name);
138 err = -ENOMEM;
139 goto err1;
143 return 0;
145 err1:
146 while (--i >= 0) {
147 kmem_cache_destroy(type->cache);
148 type->cache = NULL;
151 return err;
154 void rxe_cache_exit(void)
156 int i;
157 struct rxe_type_info *type;
159 for (i = 0; i < RXE_NUM_TYPES; i++) {
160 type = &rxe_type_info[i];
161 kmem_cache_destroy(type->cache);
162 type->cache = NULL;
166 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
168 int err = 0;
169 size_t size;
171 if ((max - min + 1) < pool->max_elem) {
172 pr_warn("not enough indices for max_elem\n");
173 err = -EINVAL;
174 goto out;
177 pool->max_index = max;
178 pool->min_index = min;
180 size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
181 pool->table = kmalloc(size, GFP_KERNEL);
182 if (!pool->table) {
183 pr_warn("no memory for bit table\n");
184 err = -ENOMEM;
185 goto out;
188 pool->table_size = size;
189 bitmap_zero(pool->table, max - min + 1);
191 out:
192 return err;
195 int rxe_pool_init(
196 struct rxe_dev *rxe,
197 struct rxe_pool *pool,
198 enum rxe_elem_type type,
199 unsigned max_elem)
201 int err = 0;
202 size_t size = rxe_type_info[type].size;
204 memset(pool, 0, sizeof(*pool));
206 pool->rxe = rxe;
207 pool->type = type;
208 pool->max_elem = max_elem;
209 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
210 pool->flags = rxe_type_info[type].flags;
211 pool->tree = RB_ROOT;
212 pool->cleanup = rxe_type_info[type].cleanup;
214 atomic_set(&pool->num_elem, 0);
216 kref_init(&pool->ref_cnt);
218 spin_lock_init(&pool->pool_lock);
220 if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
221 err = rxe_pool_init_index(pool,
222 rxe_type_info[type].max_index,
223 rxe_type_info[type].min_index);
224 if (err)
225 goto out;
228 if (rxe_type_info[type].flags & RXE_POOL_KEY) {
229 pool->key_offset = rxe_type_info[type].key_offset;
230 pool->key_size = rxe_type_info[type].key_size;
233 pool->state = rxe_pool_valid;
235 out:
236 return err;
239 static void rxe_pool_release(struct kref *kref)
241 struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
243 pool->state = rxe_pool_invalid;
244 kfree(pool->table);
247 static void rxe_pool_put(struct rxe_pool *pool)
249 kref_put(&pool->ref_cnt, rxe_pool_release);
252 int rxe_pool_cleanup(struct rxe_pool *pool)
254 unsigned long flags;
256 spin_lock_irqsave(&pool->pool_lock, flags);
257 pool->state = rxe_pool_invalid;
258 if (atomic_read(&pool->num_elem) > 0)
259 pr_warn("%s pool destroyed with unfree'd elem\n",
260 pool_name(pool));
261 spin_unlock_irqrestore(&pool->pool_lock, flags);
263 rxe_pool_put(pool);
265 return 0;
268 static u32 alloc_index(struct rxe_pool *pool)
270 u32 index;
271 u32 range = pool->max_index - pool->min_index + 1;
273 index = find_next_zero_bit(pool->table, range, pool->last);
274 if (index >= range)
275 index = find_first_zero_bit(pool->table, range);
277 set_bit(index, pool->table);
278 pool->last = index;
279 return index + pool->min_index;
282 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
284 struct rb_node **link = &pool->tree.rb_node;
285 struct rb_node *parent = NULL;
286 struct rxe_pool_entry *elem;
288 while (*link) {
289 parent = *link;
290 elem = rb_entry(parent, struct rxe_pool_entry, node);
292 if (elem->index == new->index) {
293 pr_warn("element already exists!\n");
294 goto out;
297 if (elem->index > new->index)
298 link = &(*link)->rb_left;
299 else
300 link = &(*link)->rb_right;
303 rb_link_node(&new->node, parent, link);
304 rb_insert_color(&new->node, &pool->tree);
305 out:
306 return;
309 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
311 struct rb_node **link = &pool->tree.rb_node;
312 struct rb_node *parent = NULL;
313 struct rxe_pool_entry *elem;
314 int cmp;
316 while (*link) {
317 parent = *link;
318 elem = rb_entry(parent, struct rxe_pool_entry, node);
320 cmp = memcmp((u8 *)elem + pool->key_offset,
321 (u8 *)new + pool->key_offset, pool->key_size);
323 if (cmp == 0) {
324 pr_warn("key already exists!\n");
325 goto out;
328 if (cmp > 0)
329 link = &(*link)->rb_left;
330 else
331 link = &(*link)->rb_right;
334 rb_link_node(&new->node, parent, link);
335 rb_insert_color(&new->node, &pool->tree);
336 out:
337 return;
340 void rxe_add_key(void *arg, void *key)
342 struct rxe_pool_entry *elem = arg;
343 struct rxe_pool *pool = elem->pool;
344 unsigned long flags;
346 spin_lock_irqsave(&pool->pool_lock, flags);
347 memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
348 insert_key(pool, elem);
349 spin_unlock_irqrestore(&pool->pool_lock, flags);
352 void rxe_drop_key(void *arg)
354 struct rxe_pool_entry *elem = arg;
355 struct rxe_pool *pool = elem->pool;
356 unsigned long flags;
358 spin_lock_irqsave(&pool->pool_lock, flags);
359 rb_erase(&elem->node, &pool->tree);
360 spin_unlock_irqrestore(&pool->pool_lock, flags);
363 void rxe_add_index(void *arg)
365 struct rxe_pool_entry *elem = arg;
366 struct rxe_pool *pool = elem->pool;
367 unsigned long flags;
369 spin_lock_irqsave(&pool->pool_lock, flags);
370 elem->index = alloc_index(pool);
371 insert_index(pool, elem);
372 spin_unlock_irqrestore(&pool->pool_lock, flags);
375 void rxe_drop_index(void *arg)
377 struct rxe_pool_entry *elem = arg;
378 struct rxe_pool *pool = elem->pool;
379 unsigned long flags;
381 spin_lock_irqsave(&pool->pool_lock, flags);
382 clear_bit(elem->index - pool->min_index, pool->table);
383 rb_erase(&elem->node, &pool->tree);
384 spin_unlock_irqrestore(&pool->pool_lock, flags);
387 void *rxe_alloc(struct rxe_pool *pool)
389 struct rxe_pool_entry *elem;
390 unsigned long flags;
392 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
394 spin_lock_irqsave(&pool->pool_lock, flags);
395 if (pool->state != rxe_pool_valid) {
396 spin_unlock_irqrestore(&pool->pool_lock, flags);
397 return NULL;
399 kref_get(&pool->ref_cnt);
400 spin_unlock_irqrestore(&pool->pool_lock, flags);
402 kref_get(&pool->rxe->ref_cnt);
404 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
405 atomic_dec(&pool->num_elem);
406 rxe_dev_put(pool->rxe);
407 rxe_pool_put(pool);
408 return NULL;
411 elem = kmem_cache_zalloc(pool_cache(pool),
412 (pool->flags & RXE_POOL_ATOMIC) ?
413 GFP_ATOMIC : GFP_KERNEL);
415 elem->pool = pool;
416 kref_init(&elem->ref_cnt);
418 return elem;
421 void rxe_elem_release(struct kref *kref)
423 struct rxe_pool_entry *elem =
424 container_of(kref, struct rxe_pool_entry, ref_cnt);
425 struct rxe_pool *pool = elem->pool;
427 if (pool->cleanup)
428 pool->cleanup(elem);
430 kmem_cache_free(pool_cache(pool), elem);
431 atomic_dec(&pool->num_elem);
432 rxe_dev_put(pool->rxe);
433 rxe_pool_put(pool);
436 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
438 struct rb_node *node = NULL;
439 struct rxe_pool_entry *elem = NULL;
440 unsigned long flags;
442 spin_lock_irqsave(&pool->pool_lock, flags);
444 if (pool->state != rxe_pool_valid)
445 goto out;
447 node = pool->tree.rb_node;
449 while (node) {
450 elem = rb_entry(node, struct rxe_pool_entry, node);
452 if (elem->index > index)
453 node = node->rb_left;
454 else if (elem->index < index)
455 node = node->rb_right;
456 else
457 break;
460 if (node)
461 kref_get(&elem->ref_cnt);
463 out:
464 spin_unlock_irqrestore(&pool->pool_lock, flags);
465 return node ? (void *)elem : NULL;
468 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
470 struct rb_node *node = NULL;
471 struct rxe_pool_entry *elem = NULL;
472 int cmp;
473 unsigned long flags;
475 spin_lock_irqsave(&pool->pool_lock, flags);
477 if (pool->state != rxe_pool_valid)
478 goto out;
480 node = pool->tree.rb_node;
482 while (node) {
483 elem = rb_entry(node, struct rxe_pool_entry, node);
485 cmp = memcmp((u8 *)elem + pool->key_offset,
486 key, pool->key_size);
488 if (cmp > 0)
489 node = node->rb_left;
490 else if (cmp < 0)
491 node = node->rb_right;
492 else
493 break;
496 if (node)
497 kref_get(&elem->ref_cnt);
499 out:
500 spin_unlock_irqrestore(&pool->pool_lock, flags);
501 return node ? ((void *)elem) : NULL;