2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/ratelimit.h>
42 * This is stored as mr->r_trans_private.
45 struct rds_iw_device
*device
;
46 struct rds_iw_mr_pool
*pool
;
47 struct rdma_cm_id
*cm_id
;
51 struct rds_iw_mapping mapping
;
52 unsigned char remap_count
;
56 * Our own little MR pool
58 struct rds_iw_mr_pool
{
59 struct rds_iw_device
*device
; /* back ptr to the device that owns us */
61 struct mutex flush_lock
; /* serialize fmr invalidate */
62 struct work_struct flush_worker
; /* flush worker */
64 spinlock_t list_lock
; /* protect variables below */
65 atomic_t item_count
; /* total # of MRs */
66 atomic_t dirty_count
; /* # dirty of MRs */
67 struct list_head dirty_list
; /* dirty mappings */
68 struct list_head clean_list
; /* unused & unamapped MRs */
69 atomic_t free_pinned
; /* memory pinned by free MRs */
70 unsigned long max_message_size
; /* in pages */
71 unsigned long max_items
;
72 unsigned long max_items_soft
;
73 unsigned long max_free_pinned
;
77 static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool
*pool
, int free_all
);
78 static void rds_iw_mr_pool_flush_worker(struct work_struct
*work
);
79 static int rds_iw_init_reg(struct rds_iw_mr_pool
*pool
, struct rds_iw_mr
*ibmr
);
80 static int rds_iw_map_reg(struct rds_iw_mr_pool
*pool
,
81 struct rds_iw_mr
*ibmr
,
82 struct scatterlist
*sg
, unsigned int nents
);
83 static void rds_iw_free_fastreg(struct rds_iw_mr_pool
*pool
, struct rds_iw_mr
*ibmr
);
84 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool
*pool
,
85 struct list_head
*unmap_list
,
86 struct list_head
*kill_list
,
88 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool
*pool
, struct rds_iw_mr
*ibmr
);
90 static int rds_iw_get_device(struct sockaddr_in
*src
, struct sockaddr_in
*dst
,
91 struct rds_iw_device
**rds_iwdev
,
92 struct rdma_cm_id
**cm_id
)
94 struct rds_iw_device
*iwdev
;
95 struct rds_iw_cm_id
*i_cm_id
;
100 list_for_each_entry(iwdev
, &rds_iw_devices
, list
) {
101 spin_lock_irq(&iwdev
->spinlock
);
102 list_for_each_entry(i_cm_id
, &iwdev
->cm_id_list
, list
) {
103 struct sockaddr_in
*src_addr
, *dst_addr
;
105 src_addr
= (struct sockaddr_in
*)&i_cm_id
->cm_id
->route
.addr
.src_addr
;
106 dst_addr
= (struct sockaddr_in
*)&i_cm_id
->cm_id
->route
.addr
.dst_addr
;
108 rdsdebug("local ipaddr = %x port %d, "
109 "remote ipaddr = %x port %d"
110 "..looking for %x port %d, "
111 "remote ipaddr = %x port %d\n",
112 src_addr
->sin_addr
.s_addr
,
114 dst_addr
->sin_addr
.s_addr
,
116 src
->sin_addr
.s_addr
,
118 dst
->sin_addr
.s_addr
,
120 #ifdef WORKING_TUPLE_DETECTION
121 if (src_addr
->sin_addr
.s_addr
== src
->sin_addr
.s_addr
&&
122 src_addr
->sin_port
== src
->sin_port
&&
123 dst_addr
->sin_addr
.s_addr
== dst
->sin_addr
.s_addr
&&
124 dst_addr
->sin_port
== dst
->sin_port
) {
126 /* FIXME - needs to compare the local and remote
127 * ipaddr/port tuple, but the ipaddr is the only
128 * available information in the rds_sock (as the rest are
129 * zero'ed. It doesn't appear to be properly populated
130 * during connection setup...
132 if (src_addr
->sin_addr
.s_addr
== src
->sin_addr
.s_addr
) {
134 spin_unlock_irq(&iwdev
->spinlock
);
136 *cm_id
= i_cm_id
->cm_id
;
140 spin_unlock_irq(&iwdev
->spinlock
);
146 static int rds_iw_add_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
)
148 struct rds_iw_cm_id
*i_cm_id
;
150 i_cm_id
= kmalloc(sizeof *i_cm_id
, GFP_KERNEL
);
154 i_cm_id
->cm_id
= cm_id
;
156 spin_lock_irq(&rds_iwdev
->spinlock
);
157 list_add_tail(&i_cm_id
->list
, &rds_iwdev
->cm_id_list
);
158 spin_unlock_irq(&rds_iwdev
->spinlock
);
163 static void rds_iw_remove_cm_id(struct rds_iw_device
*rds_iwdev
,
164 struct rdma_cm_id
*cm_id
)
166 struct rds_iw_cm_id
*i_cm_id
;
168 spin_lock_irq(&rds_iwdev
->spinlock
);
169 list_for_each_entry(i_cm_id
, &rds_iwdev
->cm_id_list
, list
) {
170 if (i_cm_id
->cm_id
== cm_id
) {
171 list_del(&i_cm_id
->list
);
176 spin_unlock_irq(&rds_iwdev
->spinlock
);
180 int rds_iw_update_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
)
182 struct sockaddr_in
*src_addr
, *dst_addr
;
183 struct rds_iw_device
*rds_iwdev_old
;
184 struct rdma_cm_id
*pcm_id
;
187 src_addr
= (struct sockaddr_in
*)&cm_id
->route
.addr
.src_addr
;
188 dst_addr
= (struct sockaddr_in
*)&cm_id
->route
.addr
.dst_addr
;
190 rc
= rds_iw_get_device(src_addr
, dst_addr
, &rds_iwdev_old
, &pcm_id
);
192 rds_iw_remove_cm_id(rds_iwdev
, cm_id
);
194 return rds_iw_add_cm_id(rds_iwdev
, cm_id
);
197 void rds_iw_add_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
)
199 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
201 /* conn was previously on the nodev_conns_list */
202 spin_lock_irq(&iw_nodev_conns_lock
);
203 BUG_ON(list_empty(&iw_nodev_conns
));
204 BUG_ON(list_empty(&ic
->iw_node
));
205 list_del(&ic
->iw_node
);
207 spin_lock(&rds_iwdev
->spinlock
);
208 list_add_tail(&ic
->iw_node
, &rds_iwdev
->conn_list
);
209 spin_unlock(&rds_iwdev
->spinlock
);
210 spin_unlock_irq(&iw_nodev_conns_lock
);
212 ic
->rds_iwdev
= rds_iwdev
;
215 void rds_iw_remove_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
)
217 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
219 /* place conn on nodev_conns_list */
220 spin_lock(&iw_nodev_conns_lock
);
222 spin_lock_irq(&rds_iwdev
->spinlock
);
223 BUG_ON(list_empty(&ic
->iw_node
));
224 list_del(&ic
->iw_node
);
225 spin_unlock_irq(&rds_iwdev
->spinlock
);
227 list_add_tail(&ic
->iw_node
, &iw_nodev_conns
);
229 spin_unlock(&iw_nodev_conns_lock
);
231 rds_iw_remove_cm_id(ic
->rds_iwdev
, ic
->i_cm_id
);
232 ic
->rds_iwdev
= NULL
;
235 void __rds_iw_destroy_conns(struct list_head
*list
, spinlock_t
*list_lock
)
237 struct rds_iw_connection
*ic
, *_ic
;
240 /* avoid calling conn_destroy with irqs off */
241 spin_lock_irq(list_lock
);
242 list_splice(list
, &tmp_list
);
243 INIT_LIST_HEAD(list
);
244 spin_unlock_irq(list_lock
);
246 list_for_each_entry_safe(ic
, _ic
, &tmp_list
, iw_node
)
247 rds_conn_destroy(ic
->conn
);
250 static void rds_iw_set_scatterlist(struct rds_iw_scatterlist
*sg
,
251 struct scatterlist
*list
, unsigned int sg_len
)
260 static int rds_iw_map_scatterlist(struct rds_iw_device
*rds_iwdev
,
261 struct rds_iw_scatterlist
*sg
)
263 struct ib_device
*dev
= rds_iwdev
->dev
;
266 WARN_ON(sg
->dma_len
);
268 sg
->dma_len
= ib_dma_map_sg(dev
, sg
->list
, sg
->len
, DMA_BIDIRECTIONAL
);
269 if (unlikely(!sg
->dma_len
)) {
270 printk(KERN_WARNING
"RDS/IW: dma_map_sg failed!\n");
278 for (i
= 0; i
< sg
->dma_len
; ++i
) {
279 unsigned int dma_len
= ib_sg_dma_len(dev
, &sg
->list
[i
]);
280 u64 dma_addr
= ib_sg_dma_address(dev
, &sg
->list
[i
]);
283 sg
->bytes
+= dma_len
;
285 end_addr
= dma_addr
+ dma_len
;
286 if (dma_addr
& PAGE_MASK
) {
289 dma_addr
&= ~PAGE_MASK
;
291 if (end_addr
& PAGE_MASK
) {
292 if (i
< sg
->dma_len
- 1)
294 end_addr
= (end_addr
+ PAGE_MASK
) & ~PAGE_MASK
;
297 sg
->dma_npages
+= (end_addr
- dma_addr
) >> PAGE_SHIFT
;
300 /* Now gather the dma addrs into one list */
301 if (sg
->dma_npages
> fastreg_message_size
)
309 ib_dma_unmap_sg(rds_iwdev
->dev
, sg
->list
, sg
->len
, DMA_BIDIRECTIONAL
);
315 struct rds_iw_mr_pool
*rds_iw_create_mr_pool(struct rds_iw_device
*rds_iwdev
)
317 struct rds_iw_mr_pool
*pool
;
319 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
321 printk(KERN_WARNING
"RDS/IW: rds_iw_create_mr_pool alloc error\n");
322 return ERR_PTR(-ENOMEM
);
325 pool
->device
= rds_iwdev
;
326 INIT_LIST_HEAD(&pool
->dirty_list
);
327 INIT_LIST_HEAD(&pool
->clean_list
);
328 mutex_init(&pool
->flush_lock
);
329 spin_lock_init(&pool
->list_lock
);
330 INIT_WORK(&pool
->flush_worker
, rds_iw_mr_pool_flush_worker
);
332 pool
->max_message_size
= fastreg_message_size
;
333 pool
->max_items
= fastreg_pool_size
;
334 pool
->max_free_pinned
= pool
->max_items
* pool
->max_message_size
/ 4;
335 pool
->max_pages
= fastreg_message_size
;
337 /* We never allow more than max_items MRs to be allocated.
338 * When we exceed more than max_items_soft, we start freeing
339 * items more aggressively.
340 * Make sure that max_items > max_items_soft > max_items / 2
342 pool
->max_items_soft
= pool
->max_items
* 3 / 4;
347 void rds_iw_get_mr_info(struct rds_iw_device
*rds_iwdev
, struct rds_info_rdma_connection
*iinfo
)
349 struct rds_iw_mr_pool
*pool
= rds_iwdev
->mr_pool
;
351 iinfo
->rdma_mr_max
= pool
->max_items
;
352 iinfo
->rdma_mr_size
= pool
->max_pages
;
355 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool
*pool
)
357 flush_workqueue(rds_wq
);
358 rds_iw_flush_mr_pool(pool
, 1);
359 BUG_ON(atomic_read(&pool
->item_count
));
360 BUG_ON(atomic_read(&pool
->free_pinned
));
364 static inline struct rds_iw_mr
*rds_iw_reuse_fmr(struct rds_iw_mr_pool
*pool
)
366 struct rds_iw_mr
*ibmr
= NULL
;
369 spin_lock_irqsave(&pool
->list_lock
, flags
);
370 if (!list_empty(&pool
->clean_list
)) {
371 ibmr
= list_entry(pool
->clean_list
.next
, struct rds_iw_mr
, mapping
.m_list
);
372 list_del_init(&ibmr
->mapping
.m_list
);
374 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
379 static struct rds_iw_mr
*rds_iw_alloc_mr(struct rds_iw_device
*rds_iwdev
)
381 struct rds_iw_mr_pool
*pool
= rds_iwdev
->mr_pool
;
382 struct rds_iw_mr
*ibmr
= NULL
;
383 int err
= 0, iter
= 0;
386 ibmr
= rds_iw_reuse_fmr(pool
);
390 /* No clean MRs - now we have the choice of either
391 * allocating a fresh MR up to the limit imposed by the
392 * driver, or flush any dirty unused MRs.
393 * We try to avoid stalling in the send path if possible,
394 * so we allocate as long as we're allowed to.
396 * We're fussy with enforcing the FMR limit, though. If the driver
397 * tells us we can't use more than N fmrs, we shouldn't start
399 if (atomic_inc_return(&pool
->item_count
) <= pool
->max_items
)
402 atomic_dec(&pool
->item_count
);
405 rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted
);
406 return ERR_PTR(-EAGAIN
);
409 /* We do have some empty MRs. Flush them out. */
410 rds_iw_stats_inc(s_iw_rdma_mr_pool_wait
);
411 rds_iw_flush_mr_pool(pool
, 0);
414 ibmr
= kzalloc(sizeof(*ibmr
), GFP_KERNEL
);
420 spin_lock_init(&ibmr
->mapping
.m_lock
);
421 INIT_LIST_HEAD(&ibmr
->mapping
.m_list
);
422 ibmr
->mapping
.m_mr
= ibmr
;
424 err
= rds_iw_init_reg(pool
, ibmr
);
428 rds_iw_stats_inc(s_iw_rdma_mr_alloc
);
433 rds_iw_destroy_fastreg(pool
, ibmr
);
436 atomic_dec(&pool
->item_count
);
440 void rds_iw_sync_mr(void *trans_private
, int direction
)
442 struct rds_iw_mr
*ibmr
= trans_private
;
443 struct rds_iw_device
*rds_iwdev
= ibmr
->device
;
446 case DMA_FROM_DEVICE
:
447 ib_dma_sync_sg_for_cpu(rds_iwdev
->dev
, ibmr
->mapping
.m_sg
.list
,
448 ibmr
->mapping
.m_sg
.dma_len
, DMA_BIDIRECTIONAL
);
451 ib_dma_sync_sg_for_device(rds_iwdev
->dev
, ibmr
->mapping
.m_sg
.list
,
452 ibmr
->mapping
.m_sg
.dma_len
, DMA_BIDIRECTIONAL
);
458 * Flush our pool of MRs.
459 * At a minimum, all currently unused MRs are unmapped.
460 * If the number of MRs allocated exceeds the limit, we also try
461 * to free as many MRs as needed to get back to this limit.
463 static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool
*pool
, int free_all
)
465 struct rds_iw_mr
*ibmr
, *next
;
466 LIST_HEAD(unmap_list
);
467 LIST_HEAD(kill_list
);
469 unsigned int nfreed
= 0, ncleaned
= 0, unpinned
= 0;
471 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush
);
473 mutex_lock(&pool
->flush_lock
);
475 spin_lock_irqsave(&pool
->list_lock
, flags
);
476 /* Get the list of all mappings to be destroyed */
477 list_splice_init(&pool
->dirty_list
, &unmap_list
);
479 list_splice_init(&pool
->clean_list
, &kill_list
);
480 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
482 /* Batched invalidate of dirty MRs.
483 * For FMR based MRs, the mappings on the unmap list are
484 * actually members of an ibmr (ibmr->mapping). They either
485 * migrate to the kill_list, or have been cleaned and should be
486 * moved to the clean_list.
487 * For fastregs, they will be dynamically allocated, and
488 * will be destroyed by the unmap function.
490 if (!list_empty(&unmap_list
)) {
491 ncleaned
= rds_iw_unmap_fastreg_list(pool
, &unmap_list
,
492 &kill_list
, &unpinned
);
493 /* If we've been asked to destroy all MRs, move those
494 * that were simply cleaned to the kill list */
496 list_splice_init(&unmap_list
, &kill_list
);
499 /* Destroy any MRs that are past their best before date */
500 list_for_each_entry_safe(ibmr
, next
, &kill_list
, mapping
.m_list
) {
501 rds_iw_stats_inc(s_iw_rdma_mr_free
);
502 list_del(&ibmr
->mapping
.m_list
);
503 rds_iw_destroy_fastreg(pool
, ibmr
);
508 /* Anything that remains are laundered ibmrs, which we can add
509 * back to the clean list. */
510 if (!list_empty(&unmap_list
)) {
511 spin_lock_irqsave(&pool
->list_lock
, flags
);
512 list_splice(&unmap_list
, &pool
->clean_list
);
513 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
516 atomic_sub(unpinned
, &pool
->free_pinned
);
517 atomic_sub(ncleaned
, &pool
->dirty_count
);
518 atomic_sub(nfreed
, &pool
->item_count
);
520 mutex_unlock(&pool
->flush_lock
);
523 static void rds_iw_mr_pool_flush_worker(struct work_struct
*work
)
525 struct rds_iw_mr_pool
*pool
= container_of(work
, struct rds_iw_mr_pool
, flush_worker
);
527 rds_iw_flush_mr_pool(pool
, 0);
530 void rds_iw_free_mr(void *trans_private
, int invalidate
)
532 struct rds_iw_mr
*ibmr
= trans_private
;
533 struct rds_iw_mr_pool
*pool
= ibmr
->device
->mr_pool
;
535 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr
->mapping
.m_sg
.len
);
539 /* Return it to the pool's free list */
540 rds_iw_free_fastreg(pool
, ibmr
);
542 /* If we've pinned too many pages, request a flush */
543 if (atomic_read(&pool
->free_pinned
) >= pool
->max_free_pinned
||
544 atomic_read(&pool
->dirty_count
) >= pool
->max_items
/ 10)
545 queue_work(rds_wq
, &pool
->flush_worker
);
548 if (likely(!in_interrupt())) {
549 rds_iw_flush_mr_pool(pool
, 0);
551 /* We get here if the user created a MR marked
552 * as use_once and invalidate at the same time. */
553 queue_work(rds_wq
, &pool
->flush_worker
);
558 void rds_iw_flush_mrs(void)
560 struct rds_iw_device
*rds_iwdev
;
562 list_for_each_entry(rds_iwdev
, &rds_iw_devices
, list
) {
563 struct rds_iw_mr_pool
*pool
= rds_iwdev
->mr_pool
;
566 rds_iw_flush_mr_pool(pool
, 0);
570 void *rds_iw_get_mr(struct scatterlist
*sg
, unsigned long nents
,
571 struct rds_sock
*rs
, u32
*key_ret
)
573 struct rds_iw_device
*rds_iwdev
;
574 struct rds_iw_mr
*ibmr
= NULL
;
575 struct rdma_cm_id
*cm_id
;
576 struct sockaddr_in src
= {
577 .sin_addr
.s_addr
= rs
->rs_bound_addr
,
578 .sin_port
= rs
->rs_bound_port
,
580 struct sockaddr_in dst
= {
581 .sin_addr
.s_addr
= rs
->rs_conn_addr
,
582 .sin_port
= rs
->rs_conn_port
,
586 ret
= rds_iw_get_device(&src
, &dst
, &rds_iwdev
, &cm_id
);
592 if (!rds_iwdev
->mr_pool
) {
597 ibmr
= rds_iw_alloc_mr(rds_iwdev
);
602 ibmr
->device
= rds_iwdev
;
604 ret
= rds_iw_map_reg(rds_iwdev
->mr_pool
, ibmr
, sg
, nents
);
606 *key_ret
= ibmr
->mr
->rkey
;
608 printk(KERN_WARNING
"RDS/IW: failed to map mr (errno=%d)\n", ret
);
613 rds_iw_free_mr(ibmr
, 0);
622 * The life cycle of a fastreg registration is a bit different from
624 * The idea behind fastreg is to have one MR, to which we bind different
625 * mappings over time. To avoid stalling on the expensive map and invalidate
626 * operations, these operations are pipelined on the same send queue on
627 * which we want to send the message containing the r_key.
629 * This creates a bit of a problem for us, as we do not have the destination
630 * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
631 * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit
632 * will try to queue a LOCAL_INV (if needed) and a REG_MR work request
633 * before queuing the SEND. When completions for these arrive, they are
634 * dispatched to the MR has a bit set showing that RDMa can be performed.
636 * There is another interesting aspect that's related to invalidation.
637 * The application can request that a mapping is invalidated in FREE_MR.
638 * The expectation there is that this invalidation step includes ALL
639 * PREVIOUSLY FREED MRs.
641 static int rds_iw_init_reg(struct rds_iw_mr_pool
*pool
,
642 struct rds_iw_mr
*ibmr
)
644 struct rds_iw_device
*rds_iwdev
= pool
->device
;
648 mr
= ib_alloc_mr(rds_iwdev
->pd
, IB_MR_TYPE_MEM_REG
,
649 pool
->max_message_size
);
653 printk(KERN_WARNING
"RDS/IW: ib_alloc_mr failed (err=%d)\n", err
);
661 static int rds_iw_rdma_reg_mr(struct rds_iw_mapping
*mapping
)
663 struct rds_iw_mr
*ibmr
= mapping
->m_mr
;
664 struct rds_iw_scatterlist
*m_sg
= &mapping
->m_sg
;
665 struct ib_reg_wr reg_wr
;
666 struct ib_send_wr
*failed_wr
;
669 n
= ib_map_mr_sg_zbva(ibmr
->mr
, m_sg
->list
, m_sg
->len
, PAGE_SIZE
);
670 if (unlikely(n
!= m_sg
->len
))
671 return n
< 0 ? n
: -EINVAL
;
673 reg_wr
.wr
.next
= NULL
;
674 reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
675 reg_wr
.wr
.wr_id
= RDS_IW_REG_WR_ID
;
676 reg_wr
.wr
.num_sge
= 0;
677 reg_wr
.mr
= ibmr
->mr
;
678 reg_wr
.key
= mapping
->m_rkey
;
679 reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
|
680 IB_ACCESS_REMOTE_READ
|
681 IB_ACCESS_REMOTE_WRITE
;
684 * Perform a WR for the reg_mr. Each individual page
685 * in the sg list is added to the fast reg page list and placed
686 * inside the reg_mr WR. The key used is a rolling 8bit
687 * counter, which should guarantee uniqueness.
689 ib_update_fast_reg_key(ibmr
->mr
, ibmr
->remap_count
++);
690 mapping
->m_rkey
= ibmr
->mr
->rkey
;
692 failed_wr
= ®_wr
.wr
;
693 ret
= ib_post_send(ibmr
->cm_id
->qp
, ®_wr
.wr
, &failed_wr
);
694 BUG_ON(failed_wr
!= ®_wr
.wr
);
696 printk_ratelimited(KERN_WARNING
"RDS/IW: %s:%d ib_post_send returned %d\n",
697 __func__
, __LINE__
, ret
);
701 static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr
*ibmr
)
703 struct ib_send_wr s_wr
, *failed_wr
;
706 if (!ibmr
->cm_id
->qp
|| !ibmr
->mr
)
709 memset(&s_wr
, 0, sizeof(s_wr
));
710 s_wr
.wr_id
= RDS_IW_LOCAL_INV_WR_ID
;
711 s_wr
.opcode
= IB_WR_LOCAL_INV
;
712 s_wr
.ex
.invalidate_rkey
= ibmr
->mr
->rkey
;
713 s_wr
.send_flags
= IB_SEND_SIGNALED
;
716 ret
= ib_post_send(ibmr
->cm_id
->qp
, &s_wr
, &failed_wr
);
718 printk_ratelimited(KERN_WARNING
"RDS/IW: %s:%d ib_post_send returned %d\n",
719 __func__
, __LINE__
, ret
);
726 static int rds_iw_map_reg(struct rds_iw_mr_pool
*pool
,
727 struct rds_iw_mr
*ibmr
,
728 struct scatterlist
*sg
,
731 struct rds_iw_device
*rds_iwdev
= pool
->device
;
732 struct rds_iw_mapping
*mapping
= &ibmr
->mapping
;
736 rds_iw_set_scatterlist(&mapping
->m_sg
, sg
, sg_len
);
738 ret
= rds_iw_map_scatterlist(rds_iwdev
, &mapping
->m_sg
);
744 if (mapping
->m_sg
.dma_len
> pool
->max_message_size
) {
749 ret
= rds_iw_rdma_reg_mr(mapping
);
753 rds_iw_stats_inc(s_iw_rdma_mr_used
);
762 * "Free" a fastreg MR.
764 static void rds_iw_free_fastreg(struct rds_iw_mr_pool
*pool
,
765 struct rds_iw_mr
*ibmr
)
770 if (!ibmr
->mapping
.m_sg
.dma_len
)
773 ret
= rds_iw_rdma_fastreg_inv(ibmr
);
777 /* Try to post the LOCAL_INV WR to the queue. */
778 spin_lock_irqsave(&pool
->list_lock
, flags
);
780 list_add_tail(&ibmr
->mapping
.m_list
, &pool
->dirty_list
);
781 atomic_add(ibmr
->mapping
.m_sg
.len
, &pool
->free_pinned
);
782 atomic_inc(&pool
->dirty_count
);
784 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
787 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool
*pool
,
788 struct list_head
*unmap_list
,
789 struct list_head
*kill_list
,
792 struct rds_iw_mapping
*mapping
, *next
;
793 unsigned int ncleaned
= 0;
794 LIST_HEAD(laundered
);
796 /* Batched invalidation of fastreg MRs.
797 * Why do we do it this way, even though we could pipeline unmap
798 * and remap? The reason is the application semantics - when the
799 * application requests an invalidation of MRs, it expects all
800 * previously released R_Keys to become invalid.
802 * If we implement MR reuse naively, we risk memory corruption
803 * (this has actually been observed). So the default behavior
804 * requires that a MR goes through an explicit unmap operation before
805 * we can reuse it again.
807 * We could probably improve on this a little, by allowing immediate
808 * reuse of a MR on the same socket (eg you could add small
809 * cache of unused MRs to strct rds_socket - GET_MR could grab one
810 * of these without requiring an explicit invalidate).
812 while (!list_empty(unmap_list
)) {
815 spin_lock_irqsave(&pool
->list_lock
, flags
);
816 list_for_each_entry_safe(mapping
, next
, unmap_list
, m_list
) {
817 *unpinned
+= mapping
->m_sg
.len
;
818 list_move(&mapping
->m_list
, &laundered
);
821 spin_unlock_irqrestore(&pool
->list_lock
, flags
);
824 /* Move all laundered mappings back to the unmap list.
825 * We do not kill any WRs right now - it doesn't seem the
826 * fastreg API has a max_remap limit. */
827 list_splice_init(&laundered
, unmap_list
);
832 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool
*pool
,
833 struct rds_iw_mr
*ibmr
)
836 ib_dereg_mr(ibmr
->mr
);