2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/errno.h>
35 #include <linux/spinlock.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
41 #include <rdma/ib_fmr_pool.h>
43 #include "core_priv.h"
45 #define PFX "fmr_pool: "
48 IB_FMR_MAX_REMAPS
= 32,
51 IB_FMR_HASH_SIZE
= 1 << IB_FMR_HASH_BITS
,
52 IB_FMR_HASH_MASK
= IB_FMR_HASH_SIZE
- 1
56 * If an FMR is not in use, then the list member will point to either
57 * its pool's free_list (if the FMR can be mapped again; that is,
58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
59 * FMR needs to be unmapped before being remapped). In either of
60 * these cases it is a bug if the ref_count is not 0. In other words,
61 * if ref_count is > 0, then the list member must not be linked into
62 * either free_list or dirty_list.
64 * The cache_node member is used to link the FMR into a cache bucket
65 * (if caching is enabled). This is independent of the reference
66 * count of the FMR. When a valid FMR is released, its ref_count is
67 * decremented, and if ref_count reaches 0, the FMR is placed in
68 * either free_list or dirty_list as appropriate. However, it is not
69 * removed from the cache and may be "revived" if a call to
70 * ib_fmr_register_physical() occurs before the FMR is remapped. In
71 * this case we just increment the ref_count and remove the FMR from
72 * free_list/dirty_list.
74 * Before we remap an FMR from free_list, we remove it from the cache
75 * (to prevent another user from obtaining a stale FMR). When an FMR
76 * is released, we add it to the tail of the free list, so that our
77 * cache eviction policy is "least recently used."
79 * All manipulation of ref_count, list and cache_node is protected by
80 * pool_lock to maintain consistency.
91 struct list_head free_list
;
92 struct list_head dirty_list
;
93 struct hlist_head
*cache_bucket
;
95 void (*flush_function
)(struct ib_fmr_pool
*pool
,
99 struct task_struct
*thread
;
104 wait_queue_head_t force_wait
;
107 static inline u32
ib_fmr_hash(u64 first_page
)
109 return jhash_2words((u32
) first_page
, (u32
) (first_page
>> 32), 0) &
110 (IB_FMR_HASH_SIZE
- 1);
113 /* Caller must hold pool_lock */
114 static inline struct ib_pool_fmr
*ib_fmr_cache_lookup(struct ib_fmr_pool
*pool
,
117 u64 io_virtual_address
)
119 struct hlist_head
*bucket
;
120 struct ib_pool_fmr
*fmr
;
121 struct hlist_node
*pos
;
123 if (!pool
->cache_bucket
)
126 bucket
= pool
->cache_bucket
+ ib_fmr_hash(*page_list
);
128 hlist_for_each_entry(fmr
, pos
, bucket
, cache_node
)
129 if (io_virtual_address
== fmr
->io_virtual_address
&&
130 page_list_len
== fmr
->page_list_len
&&
131 !memcmp(page_list
, fmr
->page_list
,
132 page_list_len
* sizeof *page_list
))
138 static void ib_fmr_batch_release(struct ib_fmr_pool
*pool
)
141 struct ib_pool_fmr
*fmr
;
142 LIST_HEAD(unmap_list
);
145 spin_lock_irq(&pool
->pool_lock
);
147 list_for_each_entry(fmr
, &pool
->dirty_list
, list
) {
148 hlist_del_init(&fmr
->cache_node
);
149 fmr
->remap_count
= 0;
150 list_add_tail(&fmr
->fmr
->list
, &fmr_list
);
153 if (fmr
->ref_count
!=0) {
154 printk(KERN_WARNING PFX
"Unmapping FMR 0x%08x with ref count %d\n",
155 fmr
, fmr
->ref_count
);
160 list_splice_init(&pool
->dirty_list
, &unmap_list
);
163 spin_unlock_irq(&pool
->pool_lock
);
165 if (list_empty(&unmap_list
)) {
169 ret
= ib_unmap_fmr(&fmr_list
);
171 printk(KERN_WARNING PFX
"ib_unmap_fmr returned %d\n", ret
);
173 spin_lock_irq(&pool
->pool_lock
);
174 list_splice(&unmap_list
, &pool
->free_list
);
175 spin_unlock_irq(&pool
->pool_lock
);
178 static int ib_fmr_cleanup_thread(void *pool_ptr
)
180 struct ib_fmr_pool
*pool
= pool_ptr
;
183 if (atomic_read(&pool
->flush_ser
) - atomic_read(&pool
->req_ser
) < 0) {
184 ib_fmr_batch_release(pool
);
186 atomic_inc(&pool
->flush_ser
);
187 wake_up_interruptible(&pool
->force_wait
);
189 if (pool
->flush_function
)
190 pool
->flush_function(pool
, pool
->flush_arg
);
193 set_current_state(TASK_INTERRUPTIBLE
);
194 if (atomic_read(&pool
->flush_ser
) - atomic_read(&pool
->req_ser
) >= 0 &&
195 !kthread_should_stop())
197 __set_current_state(TASK_RUNNING
);
198 } while (!kthread_should_stop());
204 * ib_create_fmr_pool - Create an FMR pool
205 * @pd:Protection domain for FMRs
206 * @params:FMR pool parameters
208 * Create a pool of FMRs. Return value is pointer to new pool or
209 * error code if creation failed.
211 struct ib_fmr_pool
*ib_create_fmr_pool(struct ib_pd
*pd
,
212 struct ib_fmr_pool_param
*params
)
214 struct ib_device
*device
;
215 struct ib_fmr_pool
*pool
;
216 struct ib_device_attr
*attr
;
222 return ERR_PTR(-EINVAL
);
225 if (!device
->alloc_fmr
|| !device
->dealloc_fmr
||
226 !device
->map_phys_fmr
|| !device
->unmap_fmr
) {
227 printk(KERN_INFO PFX
"Device %s does not support FMRs\n",
229 return ERR_PTR(-ENOSYS
);
232 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
234 printk(KERN_WARNING PFX
"couldn't allocate device attr struct\n");
235 return ERR_PTR(-ENOMEM
);
238 ret
= ib_query_device(device
, attr
);
240 printk(KERN_WARNING PFX
"couldn't query device: %d\n", ret
);
245 if (!attr
->max_map_per_fmr
)
246 max_remaps
= IB_FMR_MAX_REMAPS
;
248 max_remaps
= attr
->max_map_per_fmr
;
252 pool
= kmalloc(sizeof *pool
, GFP_KERNEL
);
254 printk(KERN_WARNING PFX
"couldn't allocate pool struct\n");
255 return ERR_PTR(-ENOMEM
);
258 pool
->cache_bucket
= NULL
;
260 pool
->flush_function
= params
->flush_function
;
261 pool
->flush_arg
= params
->flush_arg
;
263 INIT_LIST_HEAD(&pool
->free_list
);
264 INIT_LIST_HEAD(&pool
->dirty_list
);
268 kmalloc(IB_FMR_HASH_SIZE
* sizeof *pool
->cache_bucket
,
270 if (!pool
->cache_bucket
) {
271 printk(KERN_WARNING PFX
"Failed to allocate cache in pool\n");
276 for (i
= 0; i
< IB_FMR_HASH_SIZE
; ++i
)
277 INIT_HLIST_HEAD(pool
->cache_bucket
+ i
);
281 pool
->max_pages
= params
->max_pages_per_fmr
;
282 pool
->max_remaps
= max_remaps
;
283 pool
->dirty_watermark
= params
->dirty_watermark
;
285 spin_lock_init(&pool
->pool_lock
);
286 atomic_set(&pool
->req_ser
, 0);
287 atomic_set(&pool
->flush_ser
, 0);
288 init_waitqueue_head(&pool
->force_wait
);
290 pool
->thread
= kthread_run(ib_fmr_cleanup_thread
,
294 if (IS_ERR(pool
->thread
)) {
295 printk(KERN_WARNING PFX
"couldn't start cleanup thread\n");
296 ret
= PTR_ERR(pool
->thread
);
301 struct ib_pool_fmr
*fmr
;
302 struct ib_fmr_attr fmr_attr
= {
303 .max_pages
= params
->max_pages_per_fmr
,
304 .max_maps
= pool
->max_remaps
,
305 .page_shift
= params
->page_shift
307 int bytes_per_fmr
= sizeof *fmr
;
309 if (pool
->cache_bucket
)
310 bytes_per_fmr
+= params
->max_pages_per_fmr
* sizeof (u64
);
312 for (i
= 0; i
< params
->pool_size
; ++i
) {
313 fmr
= kmalloc(bytes_per_fmr
, GFP_KERNEL
);
315 printk(KERN_WARNING PFX
"failed to allocate fmr "
316 "struct for FMR %d\n", i
);
321 fmr
->remap_count
= 0;
323 INIT_HLIST_NODE(&fmr
->cache_node
);
325 fmr
->fmr
= ib_alloc_fmr(pd
, params
->access
, &fmr_attr
);
326 if (IS_ERR(fmr
->fmr
)) {
327 printk(KERN_WARNING PFX
"fmr_create failed "
333 list_add_tail(&fmr
->list
, &pool
->free_list
);
341 kfree(pool
->cache_bucket
);
347 ib_destroy_fmr_pool(pool
);
349 return ERR_PTR(-ENOMEM
);
351 EXPORT_SYMBOL(ib_create_fmr_pool
);
354 * ib_destroy_fmr_pool - Free FMR pool
355 * @pool:FMR pool to free
357 * Destroy an FMR pool and free all associated resources.
359 void ib_destroy_fmr_pool(struct ib_fmr_pool
*pool
)
361 struct ib_pool_fmr
*fmr
;
362 struct ib_pool_fmr
*tmp
;
366 kthread_stop(pool
->thread
);
367 ib_fmr_batch_release(pool
);
370 list_for_each_entry_safe(fmr
, tmp
, &pool
->free_list
, list
) {
371 if (fmr
->remap_count
) {
372 INIT_LIST_HEAD(&fmr_list
);
373 list_add_tail(&fmr
->fmr
->list
, &fmr_list
);
374 ib_unmap_fmr(&fmr_list
);
376 ib_dealloc_fmr(fmr
->fmr
);
377 list_del(&fmr
->list
);
382 if (i
< pool
->pool_size
)
383 printk(KERN_WARNING PFX
"pool still has %d regions registered\n",
384 pool
->pool_size
- i
);
386 kfree(pool
->cache_bucket
);
389 EXPORT_SYMBOL(ib_destroy_fmr_pool
);
392 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
393 * @pool:FMR pool to flush
395 * Ensure that all unmapped FMRs are fully invalidated.
397 int ib_flush_fmr_pool(struct ib_fmr_pool
*pool
)
400 struct ib_pool_fmr
*fmr
, *next
;
403 * The free_list holds FMRs that may have been used
404 * but have not been remapped enough times to be dirty.
405 * Put them on the dirty list now so that the cleanup
406 * thread will reap them too.
408 spin_lock_irq(&pool
->pool_lock
);
409 list_for_each_entry_safe(fmr
, next
, &pool
->free_list
, list
) {
410 if (fmr
->remap_count
> 0)
411 list_move(&fmr
->list
, &pool
->dirty_list
);
413 spin_unlock_irq(&pool
->pool_lock
);
415 serial
= atomic_inc_return(&pool
->req_ser
);
416 wake_up_process(pool
->thread
);
418 if (wait_event_interruptible(pool
->force_wait
,
419 atomic_read(&pool
->flush_ser
) - serial
>= 0))
424 EXPORT_SYMBOL(ib_flush_fmr_pool
);
427 * ib_fmr_pool_map_phys -
428 * @pool:FMR pool to allocate FMR from
429 * @page_list:List of pages to map
430 * @list_len:Number of pages in @page_list
431 * @io_virtual_address:I/O virtual address for new FMR
433 * Map an FMR from an FMR pool.
435 struct ib_pool_fmr
*ib_fmr_pool_map_phys(struct ib_fmr_pool
*pool_handle
,
438 u64 io_virtual_address
)
440 struct ib_fmr_pool
*pool
= pool_handle
;
441 struct ib_pool_fmr
*fmr
;
445 if (list_len
< 1 || list_len
> pool
->max_pages
)
446 return ERR_PTR(-EINVAL
);
448 spin_lock_irqsave(&pool
->pool_lock
, flags
);
449 fmr
= ib_fmr_cache_lookup(pool
,
456 if (fmr
->ref_count
== 1) {
457 list_del(&fmr
->list
);
460 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
465 if (list_empty(&pool
->free_list
)) {
466 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
467 return ERR_PTR(-EAGAIN
);
470 fmr
= list_entry(pool
->free_list
.next
, struct ib_pool_fmr
, list
);
471 list_del(&fmr
->list
);
472 hlist_del_init(&fmr
->cache_node
);
473 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
475 result
= ib_map_phys_fmr(fmr
->fmr
, page_list
, list_len
,
479 spin_lock_irqsave(&pool
->pool_lock
, flags
);
480 list_add(&fmr
->list
, &pool
->free_list
);
481 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
483 printk(KERN_WARNING PFX
"fmr_map returns %d\n", result
);
485 return ERR_PTR(result
);
491 if (pool
->cache_bucket
) {
492 fmr
->io_virtual_address
= io_virtual_address
;
493 fmr
->page_list_len
= list_len
;
494 memcpy(fmr
->page_list
, page_list
, list_len
* sizeof(*page_list
));
496 spin_lock_irqsave(&pool
->pool_lock
, flags
);
497 hlist_add_head(&fmr
->cache_node
,
498 pool
->cache_bucket
+ ib_fmr_hash(fmr
->page_list
[0]));
499 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
504 EXPORT_SYMBOL(ib_fmr_pool_map_phys
);
507 * ib_fmr_pool_unmap - Unmap FMR
510 * Unmap an FMR. The FMR mapping may remain valid until the FMR is
511 * reused (or until ib_flush_fmr_pool() is called).
513 int ib_fmr_pool_unmap(struct ib_pool_fmr
*fmr
)
515 struct ib_fmr_pool
*pool
;
520 spin_lock_irqsave(&pool
->pool_lock
, flags
);
523 if (!fmr
->ref_count
) {
524 if (fmr
->remap_count
< pool
->max_remaps
) {
525 list_add_tail(&fmr
->list
, &pool
->free_list
);
527 list_add_tail(&fmr
->list
, &pool
->dirty_list
);
528 if (++pool
->dirty_len
>= pool
->dirty_watermark
) {
529 atomic_inc(&pool
->req_ser
);
530 wake_up_process(pool
->thread
);
536 if (fmr
->ref_count
< 0)
537 printk(KERN_WARNING PFX
"FMR %p has ref count %d < 0\n",
538 fmr
, fmr
->ref_count
);
541 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
545 EXPORT_SYMBOL(ib_fmr_pool_unmap
);