2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: fmr_pool.c 2730 2005-06-28 16:43:03Z sean.hefty $
36 #include <linux/errno.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/jhash.h>
40 #include <linux/kthread.h>
42 #include <rdma/ib_fmr_pool.h>
44 #include "core_priv.h"
47 IB_FMR_MAX_REMAPS
= 32,
50 IB_FMR_HASH_SIZE
= 1 << IB_FMR_HASH_BITS
,
51 IB_FMR_HASH_MASK
= IB_FMR_HASH_SIZE
- 1
55 * If an FMR is not in use, then the list member will point to either
56 * its pool's free_list (if the FMR can be mapped again; that is,
57 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
58 * FMR needs to be unmapped before being remapped). In either of
59 * these cases it is a bug if the ref_count is not 0. In other words,
60 * if ref_count is > 0, then the list member must not be linked into
61 * either free_list or dirty_list.
63 * The cache_node member is used to link the FMR into a cache bucket
64 * (if caching is enabled). This is independent of the reference
65 * count of the FMR. When a valid FMR is released, its ref_count is
66 * decremented, and if ref_count reaches 0, the FMR is placed in
67 * either free_list or dirty_list as appropriate. However, it is not
68 * removed from the cache and may be "revived" if a call to
69 * ib_fmr_register_physical() occurs before the FMR is remapped. In
70 * this case we just increment the ref_count and remove the FMR from
71 * free_list/dirty_list.
73 * Before we remap an FMR from free_list, we remove it from the cache
74 * (to prevent another user from obtaining a stale FMR). When an FMR
75 * is released, we add it to the tail of the free list, so that our
76 * cache eviction policy is "least recently used."
78 * All manipulation of ref_count, list and cache_node is protected by
79 * pool_lock to maintain consistency.
90 struct list_head free_list
;
91 struct list_head dirty_list
;
92 struct hlist_head
*cache_bucket
;
94 void (*flush_function
)(struct ib_fmr_pool
*pool
,
98 struct task_struct
*thread
;
103 wait_queue_head_t force_wait
;
106 static inline u32
ib_fmr_hash(u64 first_page
)
108 return jhash_2words((u32
) first_page
, (u32
) (first_page
>> 32), 0) &
109 (IB_FMR_HASH_SIZE
- 1);
112 /* Caller must hold pool_lock */
113 static inline struct ib_pool_fmr
*ib_fmr_cache_lookup(struct ib_fmr_pool
*pool
,
116 u64 io_virtual_address
)
118 struct hlist_head
*bucket
;
119 struct ib_pool_fmr
*fmr
;
120 struct hlist_node
*pos
;
122 if (!pool
->cache_bucket
)
125 bucket
= pool
->cache_bucket
+ ib_fmr_hash(*page_list
);
127 hlist_for_each_entry(fmr
, pos
, bucket
, cache_node
)
128 if (io_virtual_address
== fmr
->io_virtual_address
&&
129 page_list_len
== fmr
->page_list_len
&&
130 !memcmp(page_list
, fmr
->page_list
,
131 page_list_len
* sizeof *page_list
))
137 static void ib_fmr_batch_release(struct ib_fmr_pool
*pool
)
140 struct ib_pool_fmr
*fmr
;
141 LIST_HEAD(unmap_list
);
144 spin_lock_irq(&pool
->pool_lock
);
146 list_for_each_entry(fmr
, &pool
->dirty_list
, list
) {
147 hlist_del_init(&fmr
->cache_node
);
148 fmr
->remap_count
= 0;
149 list_add_tail(&fmr
->fmr
->list
, &fmr_list
);
152 if (fmr
->ref_count
!=0) {
153 printk(KERN_WARNING
"Unmapping FMR 0x%08x with ref count %d",
154 fmr
, fmr
->ref_count
);
159 list_splice(&pool
->dirty_list
, &unmap_list
);
160 INIT_LIST_HEAD(&pool
->dirty_list
);
163 spin_unlock_irq(&pool
->pool_lock
);
165 if (list_empty(&unmap_list
)) {
169 ret
= ib_unmap_fmr(&fmr_list
);
171 printk(KERN_WARNING
"ib_unmap_fmr returned %d", ret
);
173 spin_lock_irq(&pool
->pool_lock
);
174 list_splice(&unmap_list
, &pool
->free_list
);
175 spin_unlock_irq(&pool
->pool_lock
);
178 static int ib_fmr_cleanup_thread(void *pool_ptr
)
180 struct ib_fmr_pool
*pool
= pool_ptr
;
183 if (pool
->dirty_len
>= pool
->dirty_watermark
||
184 atomic_read(&pool
->flush_ser
) - atomic_read(&pool
->req_ser
) < 0) {
185 ib_fmr_batch_release(pool
);
187 atomic_inc(&pool
->flush_ser
);
188 wake_up_interruptible(&pool
->force_wait
);
190 if (pool
->flush_function
)
191 pool
->flush_function(pool
, pool
->flush_arg
);
194 set_current_state(TASK_INTERRUPTIBLE
);
195 if (pool
->dirty_len
< pool
->dirty_watermark
&&
196 atomic_read(&pool
->flush_ser
) - atomic_read(&pool
->req_ser
) >= 0 &&
197 !kthread_should_stop())
199 __set_current_state(TASK_RUNNING
);
200 } while (!kthread_should_stop());
206 * ib_create_fmr_pool - Create an FMR pool
207 * @pd:Protection domain for FMRs
208 * @params:FMR pool parameters
210 * Create a pool of FMRs. Return value is pointer to new pool or
211 * error code if creation failed.
213 struct ib_fmr_pool
*ib_create_fmr_pool(struct ib_pd
*pd
,
214 struct ib_fmr_pool_param
*params
)
216 struct ib_device
*device
;
217 struct ib_fmr_pool
*pool
;
218 struct ib_device_attr
*attr
;
224 return ERR_PTR(-EINVAL
);
227 if (!device
->alloc_fmr
|| !device
->dealloc_fmr
||
228 !device
->map_phys_fmr
|| !device
->unmap_fmr
) {
229 printk(KERN_WARNING
"Device %s does not support fast memory regions",
231 return ERR_PTR(-ENOSYS
);
234 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
236 printk(KERN_WARNING
"couldn't allocate device attr struct");
237 return ERR_PTR(-ENOMEM
);
240 ret
= ib_query_device(device
, attr
);
242 printk(KERN_WARNING
"couldn't query device");
247 if (!attr
->max_map_per_fmr
)
248 max_remaps
= IB_FMR_MAX_REMAPS
;
250 max_remaps
= attr
->max_map_per_fmr
;
254 pool
= kmalloc(sizeof *pool
, GFP_KERNEL
);
256 printk(KERN_WARNING
"couldn't allocate pool struct");
257 return ERR_PTR(-ENOMEM
);
260 pool
->cache_bucket
= NULL
;
262 pool
->flush_function
= params
->flush_function
;
263 pool
->flush_arg
= params
->flush_arg
;
265 INIT_LIST_HEAD(&pool
->free_list
);
266 INIT_LIST_HEAD(&pool
->dirty_list
);
270 kmalloc(IB_FMR_HASH_SIZE
* sizeof *pool
->cache_bucket
,
272 if (!pool
->cache_bucket
) {
273 printk(KERN_WARNING
"Failed to allocate cache in pool");
278 for (i
= 0; i
< IB_FMR_HASH_SIZE
; ++i
)
279 INIT_HLIST_HEAD(pool
->cache_bucket
+ i
);
283 pool
->max_pages
= params
->max_pages_per_fmr
;
284 pool
->max_remaps
= max_remaps
;
285 pool
->dirty_watermark
= params
->dirty_watermark
;
287 spin_lock_init(&pool
->pool_lock
);
288 atomic_set(&pool
->req_ser
, 0);
289 atomic_set(&pool
->flush_ser
, 0);
290 init_waitqueue_head(&pool
->force_wait
);
292 pool
->thread
= kthread_create(ib_fmr_cleanup_thread
,
296 if (IS_ERR(pool
->thread
)) {
297 printk(KERN_WARNING
"couldn't start cleanup thread");
298 ret
= PTR_ERR(pool
->thread
);
303 struct ib_pool_fmr
*fmr
;
304 struct ib_fmr_attr fmr_attr
= {
305 .max_pages
= params
->max_pages_per_fmr
,
306 .max_maps
= pool
->max_remaps
,
307 .page_shift
= params
->page_shift
310 for (i
= 0; i
< params
->pool_size
; ++i
) {
311 fmr
= kmalloc(sizeof *fmr
+ params
->max_pages_per_fmr
* sizeof (u64
),
314 printk(KERN_WARNING
"failed to allocate fmr struct "
320 fmr
->remap_count
= 0;
322 INIT_HLIST_NODE(&fmr
->cache_node
);
324 fmr
->fmr
= ib_alloc_fmr(pd
, params
->access
, &fmr_attr
);
325 if (IS_ERR(fmr
->fmr
)) {
326 printk(KERN_WARNING
"fmr_create failed for FMR %d", i
);
331 list_add_tail(&fmr
->list
, &pool
->free_list
);
339 kfree(pool
->cache_bucket
);
345 ib_destroy_fmr_pool(pool
);
347 return ERR_PTR(-ENOMEM
);
349 EXPORT_SYMBOL(ib_create_fmr_pool
);
352 * ib_destroy_fmr_pool - Free FMR pool
353 * @pool:FMR pool to free
355 * Destroy an FMR pool and free all associated resources.
357 void ib_destroy_fmr_pool(struct ib_fmr_pool
*pool
)
359 struct ib_pool_fmr
*fmr
;
360 struct ib_pool_fmr
*tmp
;
364 kthread_stop(pool
->thread
);
365 ib_fmr_batch_release(pool
);
368 list_for_each_entry_safe(fmr
, tmp
, &pool
->free_list
, list
) {
369 if (fmr
->remap_count
) {
370 INIT_LIST_HEAD(&fmr_list
);
371 list_add_tail(&fmr
->fmr
->list
, &fmr_list
);
372 ib_unmap_fmr(&fmr_list
);
374 ib_dealloc_fmr(fmr
->fmr
);
375 list_del(&fmr
->list
);
380 if (i
< pool
->pool_size
)
381 printk(KERN_WARNING
"pool still has %d regions registered",
382 pool
->pool_size
- i
);
384 kfree(pool
->cache_bucket
);
387 EXPORT_SYMBOL(ib_destroy_fmr_pool
);
390 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
391 * @pool:FMR pool to flush
393 * Ensure that all unmapped FMRs are fully invalidated.
395 int ib_flush_fmr_pool(struct ib_fmr_pool
*pool
)
397 int serial
= atomic_inc_return(&pool
->req_ser
);
399 wake_up_process(pool
->thread
);
401 if (wait_event_interruptible(pool
->force_wait
,
402 atomic_read(&pool
->flush_ser
) - serial
>= 0))
407 EXPORT_SYMBOL(ib_flush_fmr_pool
);
410 * ib_fmr_pool_map_phys -
411 * @pool:FMR pool to allocate FMR from
412 * @page_list:List of pages to map
413 * @list_len:Number of pages in @page_list
414 * @io_virtual_address:I/O virtual address for new FMR
416 * Map an FMR from an FMR pool.
418 struct ib_pool_fmr
*ib_fmr_pool_map_phys(struct ib_fmr_pool
*pool_handle
,
421 u64 io_virtual_address
)
423 struct ib_fmr_pool
*pool
= pool_handle
;
424 struct ib_pool_fmr
*fmr
;
428 if (list_len
< 1 || list_len
> pool
->max_pages
)
429 return ERR_PTR(-EINVAL
);
431 spin_lock_irqsave(&pool
->pool_lock
, flags
);
432 fmr
= ib_fmr_cache_lookup(pool
,
439 if (fmr
->ref_count
== 1) {
440 list_del(&fmr
->list
);
443 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
448 if (list_empty(&pool
->free_list
)) {
449 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
450 return ERR_PTR(-EAGAIN
);
453 fmr
= list_entry(pool
->free_list
.next
, struct ib_pool_fmr
, list
);
454 list_del(&fmr
->list
);
455 hlist_del_init(&fmr
->cache_node
);
456 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
458 result
= ib_map_phys_fmr(fmr
->fmr
, page_list
, list_len
,
462 spin_lock_irqsave(&pool
->pool_lock
, flags
);
463 list_add(&fmr
->list
, &pool
->free_list
);
464 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
466 printk(KERN_WARNING
"fmr_map returns %d\n",
469 return ERR_PTR(result
);
475 if (pool
->cache_bucket
) {
476 fmr
->io_virtual_address
= io_virtual_address
;
477 fmr
->page_list_len
= list_len
;
478 memcpy(fmr
->page_list
, page_list
, list_len
* sizeof(*page_list
));
480 spin_lock_irqsave(&pool
->pool_lock
, flags
);
481 hlist_add_head(&fmr
->cache_node
,
482 pool
->cache_bucket
+ ib_fmr_hash(fmr
->page_list
[0]));
483 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
488 EXPORT_SYMBOL(ib_fmr_pool_map_phys
);
491 * ib_fmr_pool_unmap - Unmap FMR
494 * Unmap an FMR. The FMR mapping may remain valid until the FMR is
495 * reused (or until ib_flush_fmr_pool() is called).
497 int ib_fmr_pool_unmap(struct ib_pool_fmr
*fmr
)
499 struct ib_fmr_pool
*pool
;
504 spin_lock_irqsave(&pool
->pool_lock
, flags
);
507 if (!fmr
->ref_count
) {
508 if (fmr
->remap_count
< pool
->max_remaps
) {
509 list_add_tail(&fmr
->list
, &pool
->free_list
);
511 list_add_tail(&fmr
->list
, &pool
->dirty_list
);
513 wake_up_process(pool
->thread
);
518 if (fmr
->ref_count
< 0)
519 printk(KERN_WARNING
"FMR %p has ref count %d < 0",
520 fmr
, fmr
->ref_count
);
523 spin_unlock_irqrestore(&pool
->pool_lock
, flags
);
527 EXPORT_SYMBOL(ib_fmr_pool_unmap
);