2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 /* info about object pools
38 * note that mr and mw share a single index space
39 * so that one can map an lkey to the correct type of object
41 struct rxe_type_info rxe_type_info
[RXE_NUM_TYPES
] = {
44 .size
= sizeof(struct rxe_ucontext
),
48 .size
= sizeof(struct rxe_pd
),
52 .size
= sizeof(struct rxe_ah
),
53 .flags
= RXE_POOL_ATOMIC
,
57 .size
= sizeof(struct rxe_srq
),
58 .flags
= RXE_POOL_INDEX
,
59 .min_index
= RXE_MIN_SRQ_INDEX
,
60 .max_index
= RXE_MAX_SRQ_INDEX
,
64 .size
= sizeof(struct rxe_qp
),
65 .cleanup
= rxe_qp_cleanup
,
66 .flags
= RXE_POOL_INDEX
,
67 .min_index
= RXE_MIN_QP_INDEX
,
68 .max_index
= RXE_MAX_QP_INDEX
,
72 .size
= sizeof(struct rxe_cq
),
73 .cleanup
= rxe_cq_cleanup
,
77 .size
= sizeof(struct rxe_mem
),
78 .cleanup
= rxe_mem_cleanup
,
79 .flags
= RXE_POOL_INDEX
,
80 .max_index
= RXE_MAX_MR_INDEX
,
81 .min_index
= RXE_MIN_MR_INDEX
,
85 .size
= sizeof(struct rxe_mem
),
86 .flags
= RXE_POOL_INDEX
,
87 .max_index
= RXE_MAX_MW_INDEX
,
88 .min_index
= RXE_MIN_MW_INDEX
,
92 .size
= sizeof(struct rxe_mc_grp
),
93 .cleanup
= rxe_mc_cleanup
,
94 .flags
= RXE_POOL_KEY
,
95 .key_offset
= offsetof(struct rxe_mc_grp
, mgid
),
96 .key_size
= sizeof(union ib_gid
),
98 [RXE_TYPE_MC_ELEM
] = {
99 .name
= "rxe-mc_elem",
100 .size
= sizeof(struct rxe_mc_elem
),
101 .flags
= RXE_POOL_ATOMIC
,
105 static inline const char *pool_name(struct rxe_pool
*pool
)
107 return rxe_type_info
[pool
->type
].name
;
110 static inline struct kmem_cache
*pool_cache(struct rxe_pool
*pool
)
112 return rxe_type_info
[pool
->type
].cache
;
115 static void rxe_cache_clean(size_t cnt
)
118 struct rxe_type_info
*type
;
120 for (i
= 0; i
< cnt
; i
++) {
121 type
= &rxe_type_info
[i
];
122 kmem_cache_destroy(type
->cache
);
127 int rxe_cache_init(void)
132 struct rxe_type_info
*type
;
134 for (i
= 0; i
< RXE_NUM_TYPES
; i
++) {
135 type
= &rxe_type_info
[i
];
136 size
= ALIGN(type
->size
, RXE_POOL_ALIGN
);
137 type
->cache
= kmem_cache_create(type
->name
, size
,
139 RXE_POOL_CACHE_FLAGS
, NULL
);
141 pr_err("Unable to init kmem cache for %s\n",
156 void rxe_cache_exit(void)
158 rxe_cache_clean(RXE_NUM_TYPES
);
161 static int rxe_pool_init_index(struct rxe_pool
*pool
, u32 max
, u32 min
)
166 if ((max
- min
+ 1) < pool
->max_elem
) {
167 pr_warn("not enough indices for max_elem\n");
172 pool
->max_index
= max
;
173 pool
->min_index
= min
;
175 size
= BITS_TO_LONGS(max
- min
+ 1) * sizeof(long);
176 pool
->table
= kmalloc(size
, GFP_KERNEL
);
182 pool
->table_size
= size
;
183 bitmap_zero(pool
->table
, max
- min
+ 1);
191 struct rxe_pool
*pool
,
192 enum rxe_elem_type type
,
193 unsigned int max_elem
)
196 size_t size
= rxe_type_info
[type
].size
;
198 memset(pool
, 0, sizeof(*pool
));
202 pool
->max_elem
= max_elem
;
203 pool
->elem_size
= ALIGN(size
, RXE_POOL_ALIGN
);
204 pool
->flags
= rxe_type_info
[type
].flags
;
205 pool
->tree
= RB_ROOT
;
206 pool
->cleanup
= rxe_type_info
[type
].cleanup
;
208 atomic_set(&pool
->num_elem
, 0);
210 kref_init(&pool
->ref_cnt
);
212 spin_lock_init(&pool
->pool_lock
);
214 if (rxe_type_info
[type
].flags
& RXE_POOL_INDEX
) {
215 err
= rxe_pool_init_index(pool
,
216 rxe_type_info
[type
].max_index
,
217 rxe_type_info
[type
].min_index
);
222 if (rxe_type_info
[type
].flags
& RXE_POOL_KEY
) {
223 pool
->key_offset
= rxe_type_info
[type
].key_offset
;
224 pool
->key_size
= rxe_type_info
[type
].key_size
;
227 pool
->state
= rxe_pool_valid
;
233 static void rxe_pool_release(struct kref
*kref
)
235 struct rxe_pool
*pool
= container_of(kref
, struct rxe_pool
, ref_cnt
);
237 pool
->state
= rxe_pool_invalid
;
241 static void rxe_pool_put(struct rxe_pool
*pool
)
243 kref_put(&pool
->ref_cnt
, rxe_pool_release
);
246 int rxe_pool_cleanup(struct rxe_pool
*pool
)
250 spin_lock_irqsave(&pool
->pool_lock
, flags
);
251 pool
->state
= rxe_pool_invalid
;
252 if (atomic_read(&pool
->num_elem
) > 0)
253 pr_warn("%s pool destroyed with unfree'd elem\n",
255 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
262 static u32
alloc_index(struct rxe_pool
*pool
)
265 u32 range
= pool
->max_index
- pool
->min_index
+ 1;
267 index
= find_next_zero_bit(pool
->table
, range
, pool
->last
);
269 index
= find_first_zero_bit(pool
->table
, range
);
271 WARN_ON_ONCE(index
>= range
);
272 set_bit(index
, pool
->table
);
274 return index
+ pool
->min_index
;
277 static void insert_index(struct rxe_pool
*pool
, struct rxe_pool_entry
*new)
279 struct rb_node
**link
= &pool
->tree
.rb_node
;
280 struct rb_node
*parent
= NULL
;
281 struct rxe_pool_entry
*elem
;
285 elem
= rb_entry(parent
, struct rxe_pool_entry
, node
);
287 if (elem
->index
== new->index
) {
288 pr_warn("element already exists!\n");
292 if (elem
->index
> new->index
)
293 link
= &(*link
)->rb_left
;
295 link
= &(*link
)->rb_right
;
298 rb_link_node(&new->node
, parent
, link
);
299 rb_insert_color(&new->node
, &pool
->tree
);
304 static void insert_key(struct rxe_pool
*pool
, struct rxe_pool_entry
*new)
306 struct rb_node
**link
= &pool
->tree
.rb_node
;
307 struct rb_node
*parent
= NULL
;
308 struct rxe_pool_entry
*elem
;
313 elem
= rb_entry(parent
, struct rxe_pool_entry
, node
);
315 cmp
= memcmp((u8
*)elem
+ pool
->key_offset
,
316 (u8
*)new + pool
->key_offset
, pool
->key_size
);
319 pr_warn("key already exists!\n");
324 link
= &(*link
)->rb_left
;
326 link
= &(*link
)->rb_right
;
329 rb_link_node(&new->node
, parent
, link
);
330 rb_insert_color(&new->node
, &pool
->tree
);
335 void rxe_add_key(void *arg
, void *key
)
337 struct rxe_pool_entry
*elem
= arg
;
338 struct rxe_pool
*pool
= elem
->pool
;
341 spin_lock_irqsave(&pool
->pool_lock
, flags
);
342 memcpy((u8
*)elem
+ pool
->key_offset
, key
, pool
->key_size
);
343 insert_key(pool
, elem
);
344 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
347 void rxe_drop_key(void *arg
)
349 struct rxe_pool_entry
*elem
= arg
;
350 struct rxe_pool
*pool
= elem
->pool
;
353 spin_lock_irqsave(&pool
->pool_lock
, flags
);
354 rb_erase(&elem
->node
, &pool
->tree
);
355 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
358 void rxe_add_index(void *arg
)
360 struct rxe_pool_entry
*elem
= arg
;
361 struct rxe_pool
*pool
= elem
->pool
;
364 spin_lock_irqsave(&pool
->pool_lock
, flags
);
365 elem
->index
= alloc_index(pool
);
366 insert_index(pool
, elem
);
367 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
370 void rxe_drop_index(void *arg
)
372 struct rxe_pool_entry
*elem
= arg
;
373 struct rxe_pool
*pool
= elem
->pool
;
376 spin_lock_irqsave(&pool
->pool_lock
, flags
);
377 clear_bit(elem
->index
- pool
->min_index
, pool
->table
);
378 rb_erase(&elem
->node
, &pool
->tree
);
379 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
382 void *rxe_alloc(struct rxe_pool
*pool
)
384 struct rxe_pool_entry
*elem
;
387 might_sleep_if(!(pool
->flags
& RXE_POOL_ATOMIC
));
389 spin_lock_irqsave(&pool
->pool_lock
, flags
);
390 if (pool
->state
!= rxe_pool_valid
) {
391 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
394 kref_get(&pool
->ref_cnt
);
395 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
397 kref_get(&pool
->rxe
->ref_cnt
);
399 if (atomic_inc_return(&pool
->num_elem
) > pool
->max_elem
)
402 elem
= kmem_cache_zalloc(pool_cache(pool
),
403 (pool
->flags
& RXE_POOL_ATOMIC
) ?
404 GFP_ATOMIC
: GFP_KERNEL
);
409 kref_init(&elem
->ref_cnt
);
414 atomic_dec(&pool
->num_elem
);
415 rxe_dev_put(pool
->rxe
);
420 void rxe_elem_release(struct kref
*kref
)
422 struct rxe_pool_entry
*elem
=
423 container_of(kref
, struct rxe_pool_entry
, ref_cnt
);
424 struct rxe_pool
*pool
= elem
->pool
;
429 kmem_cache_free(pool_cache(pool
), elem
);
430 atomic_dec(&pool
->num_elem
);
431 rxe_dev_put(pool
->rxe
);
435 void *rxe_pool_get_index(struct rxe_pool
*pool
, u32 index
)
437 struct rb_node
*node
= NULL
;
438 struct rxe_pool_entry
*elem
= NULL
;
441 spin_lock_irqsave(&pool
->pool_lock
, flags
);
443 if (pool
->state
!= rxe_pool_valid
)
446 node
= pool
->tree
.rb_node
;
449 elem
= rb_entry(node
, struct rxe_pool_entry
, node
);
451 if (elem
->index
> index
)
452 node
= node
->rb_left
;
453 else if (elem
->index
< index
)
454 node
= node
->rb_right
;
460 kref_get(&elem
->ref_cnt
);
463 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
464 return node
? elem
: NULL
;
467 void *rxe_pool_get_key(struct rxe_pool
*pool
, void *key
)
469 struct rb_node
*node
= NULL
;
470 struct rxe_pool_entry
*elem
= NULL
;
474 spin_lock_irqsave(&pool
->pool_lock
, flags
);
476 if (pool
->state
!= rxe_pool_valid
)
479 node
= pool
->tree
.rb_node
;
482 elem
= rb_entry(node
, struct rxe_pool_entry
, node
);
484 cmp
= memcmp((u8
*)elem
+ pool
->key_offset
,
485 key
, pool
->key_size
);
488 node
= node
->rb_left
;
490 node
= node
->rb_right
;
496 kref_get(&elem
->ref_cnt
);
499 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
500 return node
? elem
: NULL
;