2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
41 static struct workqueue_struct
*rds_ib_fmr_wq
;
43 static DEFINE_PER_CPU(unsigned long, clean_list_grace
);
44 #define CLEAN_LIST_BUSY_BIT 0
47 * This is stored as mr->r_trans_private.
50 struct rds_ib_device
*device
;
51 struct rds_ib_mr_pool
*pool
;
54 struct xlist_head xlist
;
56 /* unmap_list is for freeing */
57 struct list_head unmap_list
;
58 unsigned int remap_count
;
60 struct scatterlist
*sg
;
67 * Our own little FMR pool
69 struct rds_ib_mr_pool
{
70 struct mutex flush_lock
; /* serialize fmr invalidate */
71 struct delayed_work flush_worker
; /* flush worker */
73 atomic_t item_count
; /* total # of MRs */
74 atomic_t dirty_count
; /* # dirty of MRs */
76 struct xlist_head drop_list
; /* MRs that have reached their max_maps limit */
77 struct xlist_head free_list
; /* unused MRs */
78 struct xlist_head clean_list
; /* global unused & unamapped MRs */
79 wait_queue_head_t flush_wait
;
81 atomic_t free_pinned
; /* memory pinned by free MRs */
82 unsigned long max_items
;
83 unsigned long max_items_soft
;
84 unsigned long max_free_pinned
;
85 struct ib_fmr_attr fmr_attr
;
88 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool
*pool
, int free_all
, struct rds_ib_mr
**);
89 static void rds_ib_teardown_mr(struct rds_ib_mr
*ibmr
);
90 static void rds_ib_mr_pool_flush_worker(struct work_struct
*work
);
92 static struct rds_ib_device
*rds_ib_get_device(__be32 ipaddr
)
94 struct rds_ib_device
*rds_ibdev
;
95 struct rds_ib_ipaddr
*i_ipaddr
;
98 list_for_each_entry_rcu(rds_ibdev
, &rds_ib_devices
, list
) {
99 list_for_each_entry_rcu(i_ipaddr
, &rds_ibdev
->ipaddr_list
, list
) {
100 if (i_ipaddr
->ipaddr
== ipaddr
) {
101 atomic_inc(&rds_ibdev
->refcount
);
112 static int rds_ib_add_ipaddr(struct rds_ib_device
*rds_ibdev
, __be32 ipaddr
)
114 struct rds_ib_ipaddr
*i_ipaddr
;
116 i_ipaddr
= kmalloc(sizeof *i_ipaddr
, GFP_KERNEL
);
120 i_ipaddr
->ipaddr
= ipaddr
;
122 spin_lock_irq(&rds_ibdev
->spinlock
);
123 list_add_tail_rcu(&i_ipaddr
->list
, &rds_ibdev
->ipaddr_list
);
124 spin_unlock_irq(&rds_ibdev
->spinlock
);
129 static void rds_ib_remove_ipaddr(struct rds_ib_device
*rds_ibdev
, __be32 ipaddr
)
131 struct rds_ib_ipaddr
*i_ipaddr
;
132 struct rds_ib_ipaddr
*to_free
= NULL
;
135 spin_lock_irq(&rds_ibdev
->spinlock
);
136 list_for_each_entry_rcu(i_ipaddr
, &rds_ibdev
->ipaddr_list
, list
) {
137 if (i_ipaddr
->ipaddr
== ipaddr
) {
138 list_del_rcu(&i_ipaddr
->list
);
143 spin_unlock_irq(&rds_ibdev
->spinlock
);
151 int rds_ib_update_ipaddr(struct rds_ib_device
*rds_ibdev
, __be32 ipaddr
)
153 struct rds_ib_device
*rds_ibdev_old
;
155 rds_ibdev_old
= rds_ib_get_device(ipaddr
);
157 rds_ib_remove_ipaddr(rds_ibdev_old
, ipaddr
);
158 rds_ib_dev_put(rds_ibdev_old
);
161 return rds_ib_add_ipaddr(rds_ibdev
, ipaddr
);
164 void rds_ib_add_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
)
166 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
168 /* conn was previously on the nodev_conns_list */
169 spin_lock_irq(&ib_nodev_conns_lock
);
170 BUG_ON(list_empty(&ib_nodev_conns
));
171 BUG_ON(list_empty(&ic
->ib_node
));
172 list_del(&ic
->ib_node
);
174 spin_lock(&rds_ibdev
->spinlock
);
175 list_add_tail(&ic
->ib_node
, &rds_ibdev
->conn_list
);
176 spin_unlock(&rds_ibdev
->spinlock
);
177 spin_unlock_irq(&ib_nodev_conns_lock
);
179 ic
->rds_ibdev
= rds_ibdev
;
180 atomic_inc(&rds_ibdev
->refcount
);
183 void rds_ib_remove_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
)
185 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
187 /* place conn on nodev_conns_list */
188 spin_lock(&ib_nodev_conns_lock
);
190 spin_lock_irq(&rds_ibdev
->spinlock
);
191 BUG_ON(list_empty(&ic
->ib_node
));
192 list_del(&ic
->ib_node
);
193 spin_unlock_irq(&rds_ibdev
->spinlock
);
195 list_add_tail(&ic
->ib_node
, &ib_nodev_conns
);
197 spin_unlock(&ib_nodev_conns_lock
);
199 ic
->rds_ibdev
= NULL
;
200 rds_ib_dev_put(rds_ibdev
);
203 void rds_ib_destroy_nodev_conns(void)
205 struct rds_ib_connection
*ic
, *_ic
;
208 /* avoid calling conn_destroy with irqs off */
209 spin_lock_irq(&ib_nodev_conns_lock
);
210 list_splice(&ib_nodev_conns
, &tmp_list
);
211 spin_unlock_irq(&ib_nodev_conns_lock
);
213 list_for_each_entry_safe(ic
, _ic
, &tmp_list
, ib_node
)
214 rds_conn_destroy(ic
->conn
);
217 struct rds_ib_mr_pool
*rds_ib_create_mr_pool(struct rds_ib_device
*rds_ibdev
)
219 struct rds_ib_mr_pool
*pool
;
221 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
223 return ERR_PTR(-ENOMEM
);
225 INIT_XLIST_HEAD(&pool
->free_list
);
226 INIT_XLIST_HEAD(&pool
->drop_list
);
227 INIT_XLIST_HEAD(&pool
->clean_list
);
228 mutex_init(&pool
->flush_lock
);
229 init_waitqueue_head(&pool
->flush_wait
);
230 INIT_DELAYED_WORK(&pool
->flush_worker
, rds_ib_mr_pool_flush_worker
);
232 pool
->fmr_attr
.max_pages
= fmr_message_size
;
233 pool
->fmr_attr
.max_maps
= rds_ibdev
->fmr_max_remaps
;
234 pool
->fmr_attr
.page_shift
= PAGE_SHIFT
;
235 pool
->max_free_pinned
= rds_ibdev
->max_fmrs
* fmr_message_size
/ 4;
237 /* We never allow more than max_items MRs to be allocated.
238 * When we exceed more than max_items_soft, we start freeing
239 * items more aggressively.
240 * Make sure that max_items > max_items_soft > max_items / 2
242 pool
->max_items_soft
= rds_ibdev
->max_fmrs
* 3 / 4;
243 pool
->max_items
= rds_ibdev
->max_fmrs
;
248 void rds_ib_get_mr_info(struct rds_ib_device
*rds_ibdev
, struct rds_info_rdma_connection
*iinfo
)
250 struct rds_ib_mr_pool
*pool
= rds_ibdev
->mr_pool
;
252 iinfo
->rdma_mr_max
= pool
->max_items
;
253 iinfo
->rdma_mr_size
= pool
->fmr_attr
.max_pages
;
256 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool
*pool
)
258 cancel_delayed_work_sync(&pool
->flush_worker
);
259 rds_ib_flush_mr_pool(pool
, 1, NULL
);
260 WARN_ON(atomic_read(&pool
->item_count
));
261 WARN_ON(atomic_read(&pool
->free_pinned
));
265 static void refill_local(struct rds_ib_mr_pool
*pool
, struct xlist_head
*xl
,
266 struct rds_ib_mr
**ibmr_ret
)
268 struct xlist_head
*ibmr_xl
;
269 ibmr_xl
= xlist_del_head_fast(xl
);
270 *ibmr_ret
= list_entry(ibmr_xl
, struct rds_ib_mr
, xlist
);
273 static inline struct rds_ib_mr
*rds_ib_reuse_fmr(struct rds_ib_mr_pool
*pool
)
275 struct rds_ib_mr
*ibmr
= NULL
;
276 struct xlist_head
*ret
;
280 flag
= &__get_cpu_var(clean_list_grace
);
281 set_bit(CLEAN_LIST_BUSY_BIT
, flag
);
282 ret
= xlist_del_head(&pool
->clean_list
);
284 ibmr
= list_entry(ret
, struct rds_ib_mr
, xlist
);
286 clear_bit(CLEAN_LIST_BUSY_BIT
, flag
);
291 static inline void wait_clean_list_grace(void)
296 for_each_online_cpu(cpu
) {
297 flag
= &per_cpu(clean_list_grace
, cpu
);
298 while (test_bit(CLEAN_LIST_BUSY_BIT
, flag
))
303 static struct rds_ib_mr
*rds_ib_alloc_fmr(struct rds_ib_device
*rds_ibdev
)
305 struct rds_ib_mr_pool
*pool
= rds_ibdev
->mr_pool
;
306 struct rds_ib_mr
*ibmr
= NULL
;
307 int err
= 0, iter
= 0;
309 if (atomic_read(&pool
->dirty_count
) >= pool
->max_items
/ 10)
310 queue_delayed_work(rds_ib_fmr_wq
, &pool
->flush_worker
, 10);
313 ibmr
= rds_ib_reuse_fmr(pool
);
317 /* No clean MRs - now we have the choice of either
318 * allocating a fresh MR up to the limit imposed by the
319 * driver, or flush any dirty unused MRs.
320 * We try to avoid stalling in the send path if possible,
321 * so we allocate as long as we're allowed to.
323 * We're fussy with enforcing the FMR limit, though. If the driver
324 * tells us we can't use more than N fmrs, we shouldn't start
326 if (atomic_inc_return(&pool
->item_count
) <= pool
->max_items
)
329 atomic_dec(&pool
->item_count
);
332 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted
);
333 return ERR_PTR(-EAGAIN
);
336 /* We do have some empty MRs. Flush them out. */
337 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait
);
338 rds_ib_flush_mr_pool(pool
, 0, &ibmr
);
343 ibmr
= kzalloc_node(sizeof(*ibmr
), GFP_KERNEL
, rdsibdev_to_node(rds_ibdev
));
349 memset(ibmr
, 0, sizeof(*ibmr
));
351 ibmr
->fmr
= ib_alloc_fmr(rds_ibdev
->pd
,
352 (IB_ACCESS_LOCAL_WRITE
|
353 IB_ACCESS_REMOTE_READ
|
354 IB_ACCESS_REMOTE_WRITE
|
355 IB_ACCESS_REMOTE_ATOMIC
),
357 if (IS_ERR(ibmr
->fmr
)) {
358 err
= PTR_ERR(ibmr
->fmr
);
360 printk(KERN_WARNING
"RDS/IB: ib_alloc_fmr failed (err=%d)\n", err
);
364 rds_ib_stats_inc(s_ib_rdma_mr_alloc
);
370 ib_dealloc_fmr(ibmr
->fmr
);
373 atomic_dec(&pool
->item_count
);
377 static int rds_ib_map_fmr(struct rds_ib_device
*rds_ibdev
, struct rds_ib_mr
*ibmr
,
378 struct scatterlist
*sg
, unsigned int nents
)
380 struct ib_device
*dev
= rds_ibdev
->dev
;
381 struct scatterlist
*scat
= sg
;
385 int page_cnt
, sg_dma_len
;
389 sg_dma_len
= ib_dma_map_sg(dev
, sg
, nents
,
391 if (unlikely(!sg_dma_len
)) {
392 printk(KERN_WARNING
"RDS/IB: dma_map_sg failed!\n");
399 for (i
= 0; i
< sg_dma_len
; ++i
) {
400 unsigned int dma_len
= ib_sg_dma_len(dev
, &scat
[i
]);
401 u64 dma_addr
= ib_sg_dma_address(dev
, &scat
[i
]);
403 if (dma_addr
& ~PAGE_MASK
) {
409 if ((dma_addr
+ dma_len
) & ~PAGE_MASK
) {
410 if (i
< sg_dma_len
- 1)
419 page_cnt
+= len
>> PAGE_SHIFT
;
420 if (page_cnt
> fmr_message_size
)
423 dma_pages
= kmalloc_node(sizeof(u64
) * page_cnt
, GFP_ATOMIC
,
424 rdsibdev_to_node(rds_ibdev
));
429 for (i
= 0; i
< sg_dma_len
; ++i
) {
430 unsigned int dma_len
= ib_sg_dma_len(dev
, &scat
[i
]);
431 u64 dma_addr
= ib_sg_dma_address(dev
, &scat
[i
]);
433 for (j
= 0; j
< dma_len
; j
+= PAGE_SIZE
)
434 dma_pages
[page_cnt
++] =
435 (dma_addr
& PAGE_MASK
) + j
;
438 ret
= ib_map_phys_fmr(ibmr
->fmr
,
439 dma_pages
, page_cnt
, io_addr
);
443 /* Success - we successfully remapped the MR, so we can
444 * safely tear down the old mapping. */
445 rds_ib_teardown_mr(ibmr
);
448 ibmr
->sg_len
= nents
;
449 ibmr
->sg_dma_len
= sg_dma_len
;
452 rds_ib_stats_inc(s_ib_rdma_mr_used
);
461 void rds_ib_sync_mr(void *trans_private
, int direction
)
463 struct rds_ib_mr
*ibmr
= trans_private
;
464 struct rds_ib_device
*rds_ibdev
= ibmr
->device
;
467 case DMA_FROM_DEVICE
:
468 ib_dma_sync_sg_for_cpu(rds_ibdev
->dev
, ibmr
->sg
,
469 ibmr
->sg_dma_len
, DMA_BIDIRECTIONAL
);
472 ib_dma_sync_sg_for_device(rds_ibdev
->dev
, ibmr
->sg
,
473 ibmr
->sg_dma_len
, DMA_BIDIRECTIONAL
);
478 static void __rds_ib_teardown_mr(struct rds_ib_mr
*ibmr
)
480 struct rds_ib_device
*rds_ibdev
= ibmr
->device
;
482 if (ibmr
->sg_dma_len
) {
483 ib_dma_unmap_sg(rds_ibdev
->dev
,
484 ibmr
->sg
, ibmr
->sg_len
,
486 ibmr
->sg_dma_len
= 0;
489 /* Release the s/g list */
493 for (i
= 0; i
< ibmr
->sg_len
; ++i
) {
494 struct page
*page
= sg_page(&ibmr
->sg
[i
]);
496 /* FIXME we need a way to tell a r/w MR
498 BUG_ON(irqs_disabled());
499 set_page_dirty(page
);
509 static void rds_ib_teardown_mr(struct rds_ib_mr
*ibmr
)
511 unsigned int pinned
= ibmr
->sg_len
;
513 __rds_ib_teardown_mr(ibmr
);
515 struct rds_ib_device
*rds_ibdev
= ibmr
->device
;
516 struct rds_ib_mr_pool
*pool
= rds_ibdev
->mr_pool
;
518 atomic_sub(pinned
, &pool
->free_pinned
);
522 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool
*pool
, int free_all
)
524 unsigned int item_count
;
526 item_count
= atomic_read(&pool
->item_count
);
534 * given an xlist of mrs, put them all into the list_head for more processing
536 static void xlist_append_to_list(struct xlist_head
*xlist
, struct list_head
*list
)
538 struct rds_ib_mr
*ibmr
;
539 struct xlist_head splice
;
540 struct xlist_head
*cur
;
541 struct xlist_head
*next
;
544 xlist_splice(xlist
, &splice
);
548 ibmr
= list_entry(cur
, struct rds_ib_mr
, xlist
);
549 list_add_tail(&ibmr
->unmap_list
, list
);
555 * this takes a list head of mrs and turns it into an xlist of clusters.
556 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
559 static void list_append_to_xlist(struct rds_ib_mr_pool
*pool
,
560 struct list_head
*list
, struct xlist_head
*xlist
,
561 struct xlist_head
**tail_ret
)
563 struct rds_ib_mr
*ibmr
;
564 struct xlist_head
*cur_mr
= xlist
;
565 struct xlist_head
*tail_mr
= NULL
;
567 list_for_each_entry(ibmr
, list
, unmap_list
) {
568 tail_mr
= &ibmr
->xlist
;
569 tail_mr
->next
= NULL
;
570 cur_mr
->next
= tail_mr
;
577 * Flush our pool of MRs.
578 * At a minimum, all currently unused MRs are unmapped.
579 * If the number of MRs allocated exceeds the limit, we also try
580 * to free as many MRs as needed to get back to this limit.
582 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool
*pool
,
583 int free_all
, struct rds_ib_mr
**ibmr_ret
)
585 struct rds_ib_mr
*ibmr
, *next
;
586 struct xlist_head clean_xlist
;
587 struct xlist_head
*clean_tail
;
588 LIST_HEAD(unmap_list
);
590 unsigned long unpinned
= 0;
591 unsigned int nfreed
= 0, ncleaned
= 0, free_goal
;
594 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush
);
598 while(!mutex_trylock(&pool
->flush_lock
)) {
599 ibmr
= rds_ib_reuse_fmr(pool
);
602 finish_wait(&pool
->flush_wait
, &wait
);
606 prepare_to_wait(&pool
->flush_wait
, &wait
,
607 TASK_UNINTERRUPTIBLE
);
608 if (xlist_empty(&pool
->clean_list
))
611 ibmr
= rds_ib_reuse_fmr(pool
);
614 finish_wait(&pool
->flush_wait
, &wait
);
618 finish_wait(&pool
->flush_wait
, &wait
);
620 mutex_lock(&pool
->flush_lock
);
623 ibmr
= rds_ib_reuse_fmr(pool
);
630 /* Get the list of all MRs to be dropped. Ordering matters -
631 * we want to put drop_list ahead of free_list.
633 xlist_append_to_list(&pool
->drop_list
, &unmap_list
);
634 xlist_append_to_list(&pool
->free_list
, &unmap_list
);
636 xlist_append_to_list(&pool
->clean_list
, &unmap_list
);
638 free_goal
= rds_ib_flush_goal(pool
, free_all
);
640 if (list_empty(&unmap_list
))
643 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
644 list_for_each_entry(ibmr
, &unmap_list
, unmap_list
)
645 list_add(&ibmr
->fmr
->list
, &fmr_list
);
647 ret
= ib_unmap_fmr(&fmr_list
);
649 printk(KERN_WARNING
"RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret
);
651 /* Now we can destroy the DMA mapping and unpin any pages */
652 list_for_each_entry_safe(ibmr
, next
, &unmap_list
, unmap_list
) {
653 unpinned
+= ibmr
->sg_len
;
654 __rds_ib_teardown_mr(ibmr
);
655 if (nfreed
< free_goal
|| ibmr
->remap_count
>= pool
->fmr_attr
.max_maps
) {
656 rds_ib_stats_inc(s_ib_rdma_mr_free
);
657 list_del(&ibmr
->unmap_list
);
658 ib_dealloc_fmr(ibmr
->fmr
);
665 if (!list_empty(&unmap_list
)) {
666 /* we have to make sure that none of the things we're about
667 * to put on the clean list would race with other cpus trying
668 * to pull items off. The xlist would explode if we managed to
669 * remove something from the clean list and then add it back again
670 * while another CPU was spinning on that same item in xlist_del_head.
672 * This is pretty unlikely, but just in case wait for an xlist grace period
673 * here before adding anything back into the clean list.
675 wait_clean_list_grace();
677 list_append_to_xlist(pool
, &unmap_list
, &clean_xlist
, &clean_tail
);
679 refill_local(pool
, &clean_xlist
, ibmr_ret
);
681 /* refill_local may have emptied our list */
682 if (!xlist_empty(&clean_xlist
))
683 xlist_add(clean_xlist
.next
, clean_tail
, &pool
->clean_list
);
687 atomic_sub(unpinned
, &pool
->free_pinned
);
688 atomic_sub(ncleaned
, &pool
->dirty_count
);
689 atomic_sub(nfreed
, &pool
->item_count
);
692 mutex_unlock(&pool
->flush_lock
);
693 if (waitqueue_active(&pool
->flush_wait
))
694 wake_up(&pool
->flush_wait
);
699 int rds_ib_fmr_init(void)
701 rds_ib_fmr_wq
= create_workqueue("rds_fmr_flushd");
708 * By the time this is called all the IB devices should have been torn down and
709 * had their pools freed. As each pool is freed its work struct is waited on,
710 * so the pool flushing work queue should be idle by the time we get here.
712 void rds_ib_fmr_exit(void)
714 destroy_workqueue(rds_ib_fmr_wq
);
717 static void rds_ib_mr_pool_flush_worker(struct work_struct
*work
)
719 struct rds_ib_mr_pool
*pool
= container_of(work
, struct rds_ib_mr_pool
, flush_worker
.work
);
721 rds_ib_flush_mr_pool(pool
, 0, NULL
);
724 void rds_ib_free_mr(void *trans_private
, int invalidate
)
726 struct rds_ib_mr
*ibmr
= trans_private
;
727 struct rds_ib_device
*rds_ibdev
= ibmr
->device
;
728 struct rds_ib_mr_pool
*pool
= rds_ibdev
->mr_pool
;
730 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr
->sg_len
);
732 /* Return it to the pool's free list */
733 if (ibmr
->remap_count
>= pool
->fmr_attr
.max_maps
)
734 xlist_add(&ibmr
->xlist
, &ibmr
->xlist
, &pool
->drop_list
);
736 xlist_add(&ibmr
->xlist
, &ibmr
->xlist
, &pool
->free_list
);
738 atomic_add(ibmr
->sg_len
, &pool
->free_pinned
);
739 atomic_inc(&pool
->dirty_count
);
741 /* If we've pinned too many pages, request a flush */
742 if (atomic_read(&pool
->free_pinned
) >= pool
->max_free_pinned
||
743 atomic_read(&pool
->dirty_count
) >= pool
->max_items
/ 10)
744 queue_delayed_work(rds_ib_fmr_wq
, &pool
->flush_worker
, 10);
747 if (likely(!in_interrupt())) {
748 rds_ib_flush_mr_pool(pool
, 0, NULL
);
750 /* We get here if the user created a MR marked
751 * as use_once and invalidate at the same time. */
752 queue_delayed_work(rds_ib_fmr_wq
,
753 &pool
->flush_worker
, 10);
757 rds_ib_dev_put(rds_ibdev
);
760 void rds_ib_flush_mrs(void)
762 struct rds_ib_device
*rds_ibdev
;
764 down_read(&rds_ib_devices_lock
);
765 list_for_each_entry(rds_ibdev
, &rds_ib_devices
, list
) {
766 struct rds_ib_mr_pool
*pool
= rds_ibdev
->mr_pool
;
769 rds_ib_flush_mr_pool(pool
, 0, NULL
);
771 up_read(&rds_ib_devices_lock
);
774 void *rds_ib_get_mr(struct scatterlist
*sg
, unsigned long nents
,
775 struct rds_sock
*rs
, u32
*key_ret
)
777 struct rds_ib_device
*rds_ibdev
;
778 struct rds_ib_mr
*ibmr
= NULL
;
781 rds_ibdev
= rds_ib_get_device(rs
->rs_bound_addr
);
787 if (!rds_ibdev
->mr_pool
) {
792 ibmr
= rds_ib_alloc_fmr(rds_ibdev
);
796 ret
= rds_ib_map_fmr(rds_ibdev
, ibmr
, sg
, nents
);
798 *key_ret
= ibmr
->fmr
->rkey
;
800 printk(KERN_WARNING
"RDS/IB: map_fmr failed (errno=%d)\n", ret
);
802 ibmr
->device
= rds_ibdev
;
808 rds_ib_free_mr(ibmr
, 0);
812 rds_ib_dev_put(rds_ibdev
);