Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / infiniband / core / fmr_pool.c
blobe542025c742475a9385f1c657d9c63cee7d5b9e6
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
33 * $Id: fmr_pool.c 2730 2005-06-28 16:43:03Z sean.hefty $
36 #include <linux/errno.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/jhash.h>
40 #include <linux/kthread.h>
42 #include <rdma/ib_fmr_pool.h>
44 #include "core_priv.h"
46 #define PFX "fmr_pool: "
48 enum {
49 IB_FMR_MAX_REMAPS = 32,
51 IB_FMR_HASH_BITS = 8,
52 IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
53 IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
57 * If an FMR is not in use, then the list member will point to either
58 * its pool's free_list (if the FMR can be mapped again; that is,
59 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
60 * FMR needs to be unmapped before being remapped). In either of
61 * these cases it is a bug if the ref_count is not 0. In other words,
62 * if ref_count is > 0, then the list member must not be linked into
63 * either free_list or dirty_list.
65 * The cache_node member is used to link the FMR into a cache bucket
66 * (if caching is enabled). This is independent of the reference
67 * count of the FMR. When a valid FMR is released, its ref_count is
68 * decremented, and if ref_count reaches 0, the FMR is placed in
69 * either free_list or dirty_list as appropriate. However, it is not
70 * removed from the cache and may be "revived" if a call to
71 * ib_fmr_register_physical() occurs before the FMR is remapped. In
72 * this case we just increment the ref_count and remove the FMR from
73 * free_list/dirty_list.
75 * Before we remap an FMR from free_list, we remove it from the cache
76 * (to prevent another user from obtaining a stale FMR). When an FMR
77 * is released, we add it to the tail of the free list, so that our
78 * cache eviction policy is "least recently used."
80 * All manipulation of ref_count, list and cache_node is protected by
81 * pool_lock to maintain consistency.
84 struct ib_fmr_pool {
85 spinlock_t pool_lock;
87 int pool_size;
88 int max_pages;
89 int max_remaps;
90 int dirty_watermark;
91 int dirty_len;
92 struct list_head free_list;
93 struct list_head dirty_list;
94 struct hlist_head *cache_bucket;
96 void (*flush_function)(struct ib_fmr_pool *pool,
97 void * arg);
98 void *flush_arg;
100 struct task_struct *thread;
102 atomic_t req_ser;
103 atomic_t flush_ser;
105 wait_queue_head_t force_wait;
108 static inline u32 ib_fmr_hash(u64 first_page)
110 return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
111 (IB_FMR_HASH_SIZE - 1);
114 /* Caller must hold pool_lock */
115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
116 u64 *page_list,
117 int page_list_len,
118 u64 io_virtual_address)
120 struct hlist_head *bucket;
121 struct ib_pool_fmr *fmr;
122 struct hlist_node *pos;
124 if (!pool->cache_bucket)
125 return NULL;
127 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
129 hlist_for_each_entry(fmr, pos, bucket, cache_node)
130 if (io_virtual_address == fmr->io_virtual_address &&
131 page_list_len == fmr->page_list_len &&
132 !memcmp(page_list, fmr->page_list,
133 page_list_len * sizeof *page_list))
134 return fmr;
136 return NULL;
139 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
141 int ret;
142 <<<<<<< HEAD:drivers/infiniband/core/fmr_pool.c
143 struct ib_pool_fmr *fmr, *next;
144 =======
145 struct ib_pool_fmr *fmr;
146 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/infiniband/core/fmr_pool.c
147 LIST_HEAD(unmap_list);
148 LIST_HEAD(fmr_list);
150 spin_lock_irq(&pool->pool_lock);
152 list_for_each_entry(fmr, &pool->dirty_list, list) {
153 hlist_del_init(&fmr->cache_node);
154 fmr->remap_count = 0;
155 list_add_tail(&fmr->fmr->list, &fmr_list);
157 #ifdef DEBUG
158 if (fmr->ref_count !=0) {
159 printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
160 fmr, fmr->ref_count);
162 #endif
165 <<<<<<< HEAD:drivers/infiniband/core/fmr_pool.c
167 * The free_list may hold FMRs that have been put there
168 * because they haven't reached the max_remap count.
169 * Invalidate their mapping as well.
171 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
172 if (fmr->remap_count == 0)
173 continue;
174 hlist_del_init(&fmr->cache_node);
175 fmr->remap_count = 0;
176 list_add_tail(&fmr->fmr->list, &fmr_list);
177 list_move(&fmr->list, &unmap_list);
180 =======
181 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/infiniband/core/fmr_pool.c
182 list_splice(&pool->dirty_list, &unmap_list);
183 INIT_LIST_HEAD(&pool->dirty_list);
184 pool->dirty_len = 0;
186 spin_unlock_irq(&pool->pool_lock);
188 if (list_empty(&unmap_list)) {
189 return;
192 ret = ib_unmap_fmr(&fmr_list);
193 if (ret)
194 printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
196 spin_lock_irq(&pool->pool_lock);
197 list_splice(&unmap_list, &pool->free_list);
198 spin_unlock_irq(&pool->pool_lock);
201 static int ib_fmr_cleanup_thread(void *pool_ptr)
203 struct ib_fmr_pool *pool = pool_ptr;
205 do {
206 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
207 ib_fmr_batch_release(pool);
209 atomic_inc(&pool->flush_ser);
210 wake_up_interruptible(&pool->force_wait);
212 if (pool->flush_function)
213 pool->flush_function(pool, pool->flush_arg);
216 set_current_state(TASK_INTERRUPTIBLE);
217 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
218 !kthread_should_stop())
219 schedule();
220 __set_current_state(TASK_RUNNING);
221 } while (!kthread_should_stop());
223 return 0;
227 * ib_create_fmr_pool - Create an FMR pool
228 * @pd:Protection domain for FMRs
229 * @params:FMR pool parameters
231 * Create a pool of FMRs. Return value is pointer to new pool or
232 * error code if creation failed.
234 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
235 struct ib_fmr_pool_param *params)
237 struct ib_device *device;
238 struct ib_fmr_pool *pool;
239 struct ib_device_attr *attr;
240 int i;
241 int ret;
242 int max_remaps;
244 if (!params)
245 return ERR_PTR(-EINVAL);
247 device = pd->device;
248 if (!device->alloc_fmr || !device->dealloc_fmr ||
249 !device->map_phys_fmr || !device->unmap_fmr) {
250 printk(KERN_INFO PFX "Device %s does not support FMRs\n",
251 device->name);
252 return ERR_PTR(-ENOSYS);
255 attr = kmalloc(sizeof *attr, GFP_KERNEL);
256 if (!attr) {
257 printk(KERN_WARNING PFX "couldn't allocate device attr struct\n");
258 return ERR_PTR(-ENOMEM);
261 ret = ib_query_device(device, attr);
262 if (ret) {
263 printk(KERN_WARNING PFX "couldn't query device: %d\n", ret);
264 kfree(attr);
265 return ERR_PTR(ret);
268 if (!attr->max_map_per_fmr)
269 max_remaps = IB_FMR_MAX_REMAPS;
270 else
271 max_remaps = attr->max_map_per_fmr;
273 kfree(attr);
275 pool = kmalloc(sizeof *pool, GFP_KERNEL);
276 if (!pool) {
277 printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
278 return ERR_PTR(-ENOMEM);
281 pool->cache_bucket = NULL;
283 pool->flush_function = params->flush_function;
284 pool->flush_arg = params->flush_arg;
286 INIT_LIST_HEAD(&pool->free_list);
287 INIT_LIST_HEAD(&pool->dirty_list);
289 if (params->cache) {
290 pool->cache_bucket =
291 kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
292 GFP_KERNEL);
293 if (!pool->cache_bucket) {
294 printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
295 ret = -ENOMEM;
296 goto out_free_pool;
299 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
300 INIT_HLIST_HEAD(pool->cache_bucket + i);
303 pool->pool_size = 0;
304 pool->max_pages = params->max_pages_per_fmr;
305 pool->max_remaps = max_remaps;
306 pool->dirty_watermark = params->dirty_watermark;
307 pool->dirty_len = 0;
308 spin_lock_init(&pool->pool_lock);
309 atomic_set(&pool->req_ser, 0);
310 atomic_set(&pool->flush_ser, 0);
311 init_waitqueue_head(&pool->force_wait);
313 pool->thread = kthread_run(ib_fmr_cleanup_thread,
314 pool,
315 "ib_fmr(%s)",
316 device->name);
317 if (IS_ERR(pool->thread)) {
318 printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
319 ret = PTR_ERR(pool->thread);
320 goto out_free_pool;
324 struct ib_pool_fmr *fmr;
325 struct ib_fmr_attr fmr_attr = {
326 .max_pages = params->max_pages_per_fmr,
327 .max_maps = pool->max_remaps,
328 .page_shift = params->page_shift
330 int bytes_per_fmr = sizeof *fmr;
332 if (pool->cache_bucket)
333 bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
335 for (i = 0; i < params->pool_size; ++i) {
336 fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
337 if (!fmr) {
338 printk(KERN_WARNING PFX "failed to allocate fmr "
339 "struct for FMR %d\n", i);
340 goto out_fail;
343 fmr->pool = pool;
344 fmr->remap_count = 0;
345 fmr->ref_count = 0;
346 INIT_HLIST_NODE(&fmr->cache_node);
348 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
349 if (IS_ERR(fmr->fmr)) {
350 printk(KERN_WARNING PFX "fmr_create failed "
351 "for FMR %d\n", i);
352 kfree(fmr);
353 goto out_fail;
356 list_add_tail(&fmr->list, &pool->free_list);
357 ++pool->pool_size;
361 return pool;
363 out_free_pool:
364 kfree(pool->cache_bucket);
365 kfree(pool);
367 return ERR_PTR(ret);
369 out_fail:
370 ib_destroy_fmr_pool(pool);
372 return ERR_PTR(-ENOMEM);
374 EXPORT_SYMBOL(ib_create_fmr_pool);
377 * ib_destroy_fmr_pool - Free FMR pool
378 * @pool:FMR pool to free
380 * Destroy an FMR pool and free all associated resources.
382 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
384 struct ib_pool_fmr *fmr;
385 struct ib_pool_fmr *tmp;
386 LIST_HEAD(fmr_list);
387 int i;
389 kthread_stop(pool->thread);
390 ib_fmr_batch_release(pool);
392 i = 0;
393 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
394 <<<<<<< HEAD:drivers/infiniband/core/fmr_pool.c
395 =======
396 if (fmr->remap_count) {
397 INIT_LIST_HEAD(&fmr_list);
398 list_add_tail(&fmr->fmr->list, &fmr_list);
399 ib_unmap_fmr(&fmr_list);
401 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/infiniband/core/fmr_pool.c
402 ib_dealloc_fmr(fmr->fmr);
403 list_del(&fmr->list);
404 kfree(fmr);
405 ++i;
408 if (i < pool->pool_size)
409 printk(KERN_WARNING PFX "pool still has %d regions registered\n",
410 pool->pool_size - i);
412 kfree(pool->cache_bucket);
413 kfree(pool);
415 EXPORT_SYMBOL(ib_destroy_fmr_pool);
418 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
419 * @pool:FMR pool to flush
421 * Ensure that all unmapped FMRs are fully invalidated.
423 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
425 <<<<<<< HEAD:drivers/infiniband/core/fmr_pool.c
426 int serial = atomic_inc_return(&pool->req_ser);
427 =======
428 int serial;
429 struct ib_pool_fmr *fmr, *next;
432 * The free_list holds FMRs that may have been used
433 * but have not been remapped enough times to be dirty.
434 * Put them on the dirty list now so that the cleanup
435 * thread will reap them too.
437 spin_lock_irq(&pool->pool_lock);
438 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
439 if (fmr->remap_count > 0)
440 list_move(&fmr->list, &pool->dirty_list);
442 spin_unlock_irq(&pool->pool_lock);
443 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/infiniband/core/fmr_pool.c
445 <<<<<<< HEAD:drivers/infiniband/core/fmr_pool.c
446 =======
447 serial = atomic_inc_return(&pool->req_ser);
448 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/infiniband/core/fmr_pool.c
449 wake_up_process(pool->thread);
451 if (wait_event_interruptible(pool->force_wait,
452 atomic_read(&pool->flush_ser) - serial >= 0))
453 return -EINTR;
455 return 0;
457 EXPORT_SYMBOL(ib_flush_fmr_pool);
460 * ib_fmr_pool_map_phys -
461 * @pool:FMR pool to allocate FMR from
462 * @page_list:List of pages to map
463 * @list_len:Number of pages in @page_list
464 * @io_virtual_address:I/O virtual address for new FMR
466 * Map an FMR from an FMR pool.
468 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
469 u64 *page_list,
470 int list_len,
471 u64 io_virtual_address)
473 struct ib_fmr_pool *pool = pool_handle;
474 struct ib_pool_fmr *fmr;
475 unsigned long flags;
476 int result;
478 if (list_len < 1 || list_len > pool->max_pages)
479 return ERR_PTR(-EINVAL);
481 spin_lock_irqsave(&pool->pool_lock, flags);
482 fmr = ib_fmr_cache_lookup(pool,
483 page_list,
484 list_len,
485 io_virtual_address);
486 if (fmr) {
487 /* found in cache */
488 ++fmr->ref_count;
489 if (fmr->ref_count == 1) {
490 list_del(&fmr->list);
493 spin_unlock_irqrestore(&pool->pool_lock, flags);
495 return fmr;
498 if (list_empty(&pool->free_list)) {
499 spin_unlock_irqrestore(&pool->pool_lock, flags);
500 return ERR_PTR(-EAGAIN);
503 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
504 list_del(&fmr->list);
505 hlist_del_init(&fmr->cache_node);
506 spin_unlock_irqrestore(&pool->pool_lock, flags);
508 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
509 io_virtual_address);
511 if (result) {
512 spin_lock_irqsave(&pool->pool_lock, flags);
513 list_add(&fmr->list, &pool->free_list);
514 spin_unlock_irqrestore(&pool->pool_lock, flags);
516 printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
518 return ERR_PTR(result);
521 ++fmr->remap_count;
522 fmr->ref_count = 1;
524 if (pool->cache_bucket) {
525 fmr->io_virtual_address = io_virtual_address;
526 fmr->page_list_len = list_len;
527 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
529 spin_lock_irqsave(&pool->pool_lock, flags);
530 hlist_add_head(&fmr->cache_node,
531 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
532 spin_unlock_irqrestore(&pool->pool_lock, flags);
535 return fmr;
537 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
540 * ib_fmr_pool_unmap - Unmap FMR
541 * @fmr:FMR to unmap
543 * Unmap an FMR. The FMR mapping may remain valid until the FMR is
544 * reused (or until ib_flush_fmr_pool() is called).
546 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
548 struct ib_fmr_pool *pool;
549 unsigned long flags;
551 pool = fmr->pool;
553 spin_lock_irqsave(&pool->pool_lock, flags);
555 --fmr->ref_count;
556 if (!fmr->ref_count) {
557 if (fmr->remap_count < pool->max_remaps) {
558 list_add_tail(&fmr->list, &pool->free_list);
559 } else {
560 list_add_tail(&fmr->list, &pool->dirty_list);
561 if (++pool->dirty_len >= pool->dirty_watermark) {
562 atomic_inc(&pool->req_ser);
563 wake_up_process(pool->thread);
568 #ifdef DEBUG
569 if (fmr->ref_count < 0)
570 printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
571 fmr, fmr->ref_count);
572 #endif
574 spin_unlock_irqrestore(&pool->pool_lock, flags);
576 return 0;
578 EXPORT_SYMBOL(ib_fmr_pool_unmap);