pktgen: Fix delay handling
[linux/fpc-iii.git] / net / rds / iw_rdma.c
blobde4a1b16bf7b070814c6ff4d7f92fbce4e2e906b
1 /*
2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/kernel.h>
35 #include "rds.h"
36 #include "rdma.h"
37 #include "iw.h"
41 * This is stored as mr->r_trans_private.
43 struct rds_iw_mr {
44 struct rds_iw_device *device;
45 struct rds_iw_mr_pool *pool;
46 struct rdma_cm_id *cm_id;
48 struct ib_mr *mr;
49 struct ib_fast_reg_page_list *page_list;
51 struct rds_iw_mapping mapping;
52 unsigned char remap_count;
56 * Our own little MR pool
58 struct rds_iw_mr_pool {
59 struct rds_iw_device *device; /* back ptr to the device that owns us */
61 struct mutex flush_lock; /* serialize fmr invalidate */
62 struct work_struct flush_worker; /* flush worker */
64 spinlock_t list_lock; /* protect variables below */
65 atomic_t item_count; /* total # of MRs */
66 atomic_t dirty_count; /* # dirty of MRs */
67 struct list_head dirty_list; /* dirty mappings */
68 struct list_head clean_list; /* unused & unamapped MRs */
69 atomic_t free_pinned; /* memory pinned by free MRs */
70 unsigned long max_message_size; /* in pages */
71 unsigned long max_items;
72 unsigned long max_items_soft;
73 unsigned long max_free_pinned;
74 int max_pages;
77 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
78 static void rds_iw_mr_pool_flush_worker(struct work_struct *work);
79 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
80 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
81 struct rds_iw_mr *ibmr,
82 struct scatterlist *sg, unsigned int nents);
83 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
84 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
85 struct list_head *unmap_list,
86 struct list_head *kill_list);
87 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
89 static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
91 struct rds_iw_device *iwdev;
92 struct rds_iw_cm_id *i_cm_id;
94 *rds_iwdev = NULL;
95 *cm_id = NULL;
97 list_for_each_entry(iwdev, &rds_iw_devices, list) {
98 spin_lock_irq(&iwdev->spinlock);
99 list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) {
100 struct sockaddr_in *src_addr, *dst_addr;
102 src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr;
103 dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr;
105 rdsdebug("local ipaddr = %x port %d, "
106 "remote ipaddr = %x port %d"
107 "..looking for %x port %d, "
108 "remote ipaddr = %x port %d\n",
109 src_addr->sin_addr.s_addr,
110 src_addr->sin_port,
111 dst_addr->sin_addr.s_addr,
112 dst_addr->sin_port,
113 rs->rs_bound_addr,
114 rs->rs_bound_port,
115 rs->rs_conn_addr,
116 rs->rs_conn_port);
117 #ifdef WORKING_TUPLE_DETECTION
118 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
119 src_addr->sin_port == rs->rs_bound_port &&
120 dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
121 dst_addr->sin_port == rs->rs_conn_port) {
122 #else
123 /* FIXME - needs to compare the local and remote
124 * ipaddr/port tuple, but the ipaddr is the only
125 * available infomation in the rds_sock (as the rest are
126 * zero'ed. It doesn't appear to be properly populated
127 * during connection setup...
129 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
130 #endif
131 spin_unlock_irq(&iwdev->spinlock);
132 *rds_iwdev = iwdev;
133 *cm_id = i_cm_id->cm_id;
134 return 0;
137 spin_unlock_irq(&iwdev->spinlock);
140 return 1;
143 static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
145 struct rds_iw_cm_id *i_cm_id;
147 i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL);
148 if (!i_cm_id)
149 return -ENOMEM;
151 i_cm_id->cm_id = cm_id;
153 spin_lock_irq(&rds_iwdev->spinlock);
154 list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list);
155 spin_unlock_irq(&rds_iwdev->spinlock);
157 return 0;
160 void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
162 struct rds_iw_cm_id *i_cm_id;
164 spin_lock_irq(&rds_iwdev->spinlock);
165 list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) {
166 if (i_cm_id->cm_id == cm_id) {
167 list_del(&i_cm_id->list);
168 kfree(i_cm_id);
169 break;
172 spin_unlock_irq(&rds_iwdev->spinlock);
176 int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
178 struct sockaddr_in *src_addr, *dst_addr;
179 struct rds_iw_device *rds_iwdev_old;
180 struct rds_sock rs;
181 struct rdma_cm_id *pcm_id;
182 int rc;
184 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
185 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
187 rs.rs_bound_addr = src_addr->sin_addr.s_addr;
188 rs.rs_bound_port = src_addr->sin_port;
189 rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
190 rs.rs_conn_port = dst_addr->sin_port;
192 rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
193 if (rc)
194 rds_iw_remove_cm_id(rds_iwdev, cm_id);
196 return rds_iw_add_cm_id(rds_iwdev, cm_id);
199 void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
201 struct rds_iw_connection *ic = conn->c_transport_data;
203 /* conn was previously on the nodev_conns_list */
204 spin_lock_irq(&iw_nodev_conns_lock);
205 BUG_ON(list_empty(&iw_nodev_conns));
206 BUG_ON(list_empty(&ic->iw_node));
207 list_del(&ic->iw_node);
209 spin_lock_irq(&rds_iwdev->spinlock);
210 list_add_tail(&ic->iw_node, &rds_iwdev->conn_list);
211 spin_unlock_irq(&rds_iwdev->spinlock);
212 spin_unlock_irq(&iw_nodev_conns_lock);
214 ic->rds_iwdev = rds_iwdev;
217 void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
219 struct rds_iw_connection *ic = conn->c_transport_data;
221 /* place conn on nodev_conns_list */
222 spin_lock(&iw_nodev_conns_lock);
224 spin_lock_irq(&rds_iwdev->spinlock);
225 BUG_ON(list_empty(&ic->iw_node));
226 list_del(&ic->iw_node);
227 spin_unlock_irq(&rds_iwdev->spinlock);
229 list_add_tail(&ic->iw_node, &iw_nodev_conns);
231 spin_unlock(&iw_nodev_conns_lock);
233 rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id);
234 ic->rds_iwdev = NULL;
237 void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock)
239 struct rds_iw_connection *ic, *_ic;
240 LIST_HEAD(tmp_list);
242 /* avoid calling conn_destroy with irqs off */
243 spin_lock_irq(list_lock);
244 list_splice(list, &tmp_list);
245 INIT_LIST_HEAD(list);
246 spin_unlock_irq(list_lock);
248 list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) {
249 if (ic->conn->c_passive)
250 rds_conn_destroy(ic->conn->c_passive);
251 rds_conn_destroy(ic->conn);
255 static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
256 struct scatterlist *list, unsigned int sg_len)
258 sg->list = list;
259 sg->len = sg_len;
260 sg->dma_len = 0;
261 sg->dma_npages = 0;
262 sg->bytes = 0;
265 static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
266 struct rds_iw_scatterlist *sg)
268 struct ib_device *dev = rds_iwdev->dev;
269 u64 *dma_pages = NULL;
270 int i, j, ret;
272 WARN_ON(sg->dma_len);
274 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
275 if (unlikely(!sg->dma_len)) {
276 printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n");
277 return ERR_PTR(-EBUSY);
280 sg->bytes = 0;
281 sg->dma_npages = 0;
283 ret = -EINVAL;
284 for (i = 0; i < sg->dma_len; ++i) {
285 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
286 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
287 u64 end_addr;
289 sg->bytes += dma_len;
291 end_addr = dma_addr + dma_len;
292 if (dma_addr & PAGE_MASK) {
293 if (i > 0)
294 goto out_unmap;
295 dma_addr &= ~PAGE_MASK;
297 if (end_addr & PAGE_MASK) {
298 if (i < sg->dma_len - 1)
299 goto out_unmap;
300 end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK;
303 sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT;
306 /* Now gather the dma addrs into one list */
307 if (sg->dma_npages > fastreg_message_size)
308 goto out_unmap;
310 dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC);
311 if (!dma_pages) {
312 ret = -ENOMEM;
313 goto out_unmap;
316 for (i = j = 0; i < sg->dma_len; ++i) {
317 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
318 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
319 u64 end_addr;
321 end_addr = dma_addr + dma_len;
322 dma_addr &= ~PAGE_MASK;
323 for (; dma_addr < end_addr; dma_addr += PAGE_SIZE)
324 dma_pages[j++] = dma_addr;
325 BUG_ON(j > sg->dma_npages);
328 return dma_pages;
330 out_unmap:
331 ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
332 sg->dma_len = 0;
333 kfree(dma_pages);
334 return ERR_PTR(ret);
338 struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev)
340 struct rds_iw_mr_pool *pool;
342 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
343 if (!pool) {
344 printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n");
345 return ERR_PTR(-ENOMEM);
348 pool->device = rds_iwdev;
349 INIT_LIST_HEAD(&pool->dirty_list);
350 INIT_LIST_HEAD(&pool->clean_list);
351 mutex_init(&pool->flush_lock);
352 spin_lock_init(&pool->list_lock);
353 INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker);
355 pool->max_message_size = fastreg_message_size;
356 pool->max_items = fastreg_pool_size;
357 pool->max_free_pinned = pool->max_items * pool->max_message_size / 4;
358 pool->max_pages = fastreg_message_size;
360 /* We never allow more than max_items MRs to be allocated.
361 * When we exceed more than max_items_soft, we start freeing
362 * items more aggressively.
363 * Make sure that max_items > max_items_soft > max_items / 2
365 pool->max_items_soft = pool->max_items * 3 / 4;
367 return pool;
370 void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo)
372 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
374 iinfo->rdma_mr_max = pool->max_items;
375 iinfo->rdma_mr_size = pool->max_pages;
378 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool)
380 flush_workqueue(rds_wq);
381 rds_iw_flush_mr_pool(pool, 1);
382 BUG_ON(atomic_read(&pool->item_count));
383 BUG_ON(atomic_read(&pool->free_pinned));
384 kfree(pool);
387 static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool)
389 struct rds_iw_mr *ibmr = NULL;
390 unsigned long flags;
392 spin_lock_irqsave(&pool->list_lock, flags);
393 if (!list_empty(&pool->clean_list)) {
394 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
395 list_del_init(&ibmr->mapping.m_list);
397 spin_unlock_irqrestore(&pool->list_lock, flags);
399 return ibmr;
402 static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev)
404 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
405 struct rds_iw_mr *ibmr = NULL;
406 int err = 0, iter = 0;
408 while (1) {
409 ibmr = rds_iw_reuse_fmr(pool);
410 if (ibmr)
411 return ibmr;
413 /* No clean MRs - now we have the choice of either
414 * allocating a fresh MR up to the limit imposed by the
415 * driver, or flush any dirty unused MRs.
416 * We try to avoid stalling in the send path if possible,
417 * so we allocate as long as we're allowed to.
419 * We're fussy with enforcing the FMR limit, though. If the driver
420 * tells us we can't use more than N fmrs, we shouldn't start
421 * arguing with it */
422 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
423 break;
425 atomic_dec(&pool->item_count);
427 if (++iter > 2) {
428 rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted);
429 return ERR_PTR(-EAGAIN);
432 /* We do have some empty MRs. Flush them out. */
433 rds_iw_stats_inc(s_iw_rdma_mr_pool_wait);
434 rds_iw_flush_mr_pool(pool, 0);
437 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
438 if (!ibmr) {
439 err = -ENOMEM;
440 goto out_no_cigar;
443 spin_lock_init(&ibmr->mapping.m_lock);
444 INIT_LIST_HEAD(&ibmr->mapping.m_list);
445 ibmr->mapping.m_mr = ibmr;
447 err = rds_iw_init_fastreg(pool, ibmr);
448 if (err)
449 goto out_no_cigar;
451 rds_iw_stats_inc(s_iw_rdma_mr_alloc);
452 return ibmr;
454 out_no_cigar:
455 if (ibmr) {
456 rds_iw_destroy_fastreg(pool, ibmr);
457 kfree(ibmr);
459 atomic_dec(&pool->item_count);
460 return ERR_PTR(err);
463 void rds_iw_sync_mr(void *trans_private, int direction)
465 struct rds_iw_mr *ibmr = trans_private;
466 struct rds_iw_device *rds_iwdev = ibmr->device;
468 switch (direction) {
469 case DMA_FROM_DEVICE:
470 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list,
471 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
472 break;
473 case DMA_TO_DEVICE:
474 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list,
475 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
476 break;
480 static inline unsigned int rds_iw_flush_goal(struct rds_iw_mr_pool *pool, int free_all)
482 unsigned int item_count;
484 item_count = atomic_read(&pool->item_count);
485 if (free_all)
486 return item_count;
488 return 0;
492 * Flush our pool of MRs.
493 * At a minimum, all currently unused MRs are unmapped.
494 * If the number of MRs allocated exceeds the limit, we also try
495 * to free as many MRs as needed to get back to this limit.
497 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
499 struct rds_iw_mr *ibmr, *next;
500 LIST_HEAD(unmap_list);
501 LIST_HEAD(kill_list);
502 unsigned long flags;
503 unsigned int nfreed = 0, ncleaned = 0, free_goal;
504 int ret = 0;
506 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
508 mutex_lock(&pool->flush_lock);
510 spin_lock_irqsave(&pool->list_lock, flags);
511 /* Get the list of all mappings to be destroyed */
512 list_splice_init(&pool->dirty_list, &unmap_list);
513 if (free_all)
514 list_splice_init(&pool->clean_list, &kill_list);
515 spin_unlock_irqrestore(&pool->list_lock, flags);
517 free_goal = rds_iw_flush_goal(pool, free_all);
519 /* Batched invalidate of dirty MRs.
520 * For FMR based MRs, the mappings on the unmap list are
521 * actually members of an ibmr (ibmr->mapping). They either
522 * migrate to the kill_list, or have been cleaned and should be
523 * moved to the clean_list.
524 * For fastregs, they will be dynamically allocated, and
525 * will be destroyed by the unmap function.
527 if (!list_empty(&unmap_list)) {
528 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list);
529 /* If we've been asked to destroy all MRs, move those
530 * that were simply cleaned to the kill list */
531 if (free_all)
532 list_splice_init(&unmap_list, &kill_list);
535 /* Destroy any MRs that are past their best before date */
536 list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) {
537 rds_iw_stats_inc(s_iw_rdma_mr_free);
538 list_del(&ibmr->mapping.m_list);
539 rds_iw_destroy_fastreg(pool, ibmr);
540 kfree(ibmr);
541 nfreed++;
544 /* Anything that remains are laundered ibmrs, which we can add
545 * back to the clean list. */
546 if (!list_empty(&unmap_list)) {
547 spin_lock_irqsave(&pool->list_lock, flags);
548 list_splice(&unmap_list, &pool->clean_list);
549 spin_unlock_irqrestore(&pool->list_lock, flags);
552 atomic_sub(ncleaned, &pool->dirty_count);
553 atomic_sub(nfreed, &pool->item_count);
555 mutex_unlock(&pool->flush_lock);
556 return ret;
559 static void rds_iw_mr_pool_flush_worker(struct work_struct *work)
561 struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker);
563 rds_iw_flush_mr_pool(pool, 0);
566 void rds_iw_free_mr(void *trans_private, int invalidate)
568 struct rds_iw_mr *ibmr = trans_private;
569 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool;
571 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len);
572 if (!pool)
573 return;
575 /* Return it to the pool's free list */
576 rds_iw_free_fastreg(pool, ibmr);
578 /* If we've pinned too many pages, request a flush */
579 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
580 || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
581 queue_work(rds_wq, &pool->flush_worker);
583 if (invalidate) {
584 if (likely(!in_interrupt())) {
585 rds_iw_flush_mr_pool(pool, 0);
586 } else {
587 /* We get here if the user created a MR marked
588 * as use_once and invalidate at the same time. */
589 queue_work(rds_wq, &pool->flush_worker);
594 void rds_iw_flush_mrs(void)
596 struct rds_iw_device *rds_iwdev;
598 list_for_each_entry(rds_iwdev, &rds_iw_devices, list) {
599 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
601 if (pool)
602 rds_iw_flush_mr_pool(pool, 0);
606 void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
607 struct rds_sock *rs, u32 *key_ret)
609 struct rds_iw_device *rds_iwdev;
610 struct rds_iw_mr *ibmr = NULL;
611 struct rdma_cm_id *cm_id;
612 int ret;
614 ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
615 if (ret || !cm_id) {
616 ret = -ENODEV;
617 goto out;
620 if (!rds_iwdev->mr_pool) {
621 ret = -ENODEV;
622 goto out;
625 ibmr = rds_iw_alloc_mr(rds_iwdev);
626 if (IS_ERR(ibmr))
627 return ibmr;
629 ibmr->cm_id = cm_id;
630 ibmr->device = rds_iwdev;
632 ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents);
633 if (ret == 0)
634 *key_ret = ibmr->mr->rkey;
635 else
636 printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret);
638 out:
639 if (ret) {
640 if (ibmr)
641 rds_iw_free_mr(ibmr, 0);
642 ibmr = ERR_PTR(ret);
644 return ibmr;
648 * iWARP fastreg handling
650 * The life cycle of a fastreg registration is a bit different from
651 * FMRs.
652 * The idea behind fastreg is to have one MR, to which we bind different
653 * mappings over time. To avoid stalling on the expensive map and invalidate
654 * operations, these operations are pipelined on the same send queue on
655 * which we want to send the message containing the r_key.
657 * This creates a bit of a problem for us, as we do not have the destination
658 * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
659 * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit
660 * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request
661 * before queuing the SEND. When completions for these arrive, they are
662 * dispatched to the MR has a bit set showing that RDMa can be performed.
664 * There is another interesting aspect that's related to invalidation.
665 * The application can request that a mapping is invalidated in FREE_MR.
666 * The expectation there is that this invalidation step includes ALL
667 * PREVIOUSLY FREED MRs.
669 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
670 struct rds_iw_mr *ibmr)
672 struct rds_iw_device *rds_iwdev = pool->device;
673 struct ib_fast_reg_page_list *page_list = NULL;
674 struct ib_mr *mr;
675 int err;
677 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size);
678 if (IS_ERR(mr)) {
679 err = PTR_ERR(mr);
681 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err);
682 return err;
685 /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages
686 * is not filled in.
688 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
689 if (IS_ERR(page_list)) {
690 err = PTR_ERR(page_list);
692 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err);
693 ib_dereg_mr(mr);
694 return err;
697 ibmr->page_list = page_list;
698 ibmr->mr = mr;
699 return 0;
702 static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
704 struct rds_iw_mr *ibmr = mapping->m_mr;
705 struct ib_send_wr f_wr, *failed_wr;
706 int ret;
709 * Perform a WR for the fast_reg_mr. Each individual page
710 * in the sg list is added to the fast reg page list and placed
711 * inside the fast_reg_mr WR. The key used is a rolling 8bit
712 * counter, which should guarantee uniqueness.
714 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
715 mapping->m_rkey = ibmr->mr->rkey;
717 memset(&f_wr, 0, sizeof(f_wr));
718 f_wr.wr_id = RDS_IW_FAST_REG_WR_ID;
719 f_wr.opcode = IB_WR_FAST_REG_MR;
720 f_wr.wr.fast_reg.length = mapping->m_sg.bytes;
721 f_wr.wr.fast_reg.rkey = mapping->m_rkey;
722 f_wr.wr.fast_reg.page_list = ibmr->page_list;
723 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len;
724 f_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
725 f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE |
726 IB_ACCESS_REMOTE_READ |
727 IB_ACCESS_REMOTE_WRITE;
728 f_wr.wr.fast_reg.iova_start = 0;
729 f_wr.send_flags = IB_SEND_SIGNALED;
731 failed_wr = &f_wr;
732 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
733 BUG_ON(failed_wr != &f_wr);
734 if (ret && printk_ratelimit())
735 printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
736 __func__, __LINE__, ret);
737 return ret;
740 static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr)
742 struct ib_send_wr s_wr, *failed_wr;
743 int ret = 0;
745 if (!ibmr->cm_id->qp || !ibmr->mr)
746 goto out;
748 memset(&s_wr, 0, sizeof(s_wr));
749 s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID;
750 s_wr.opcode = IB_WR_LOCAL_INV;
751 s_wr.ex.invalidate_rkey = ibmr->mr->rkey;
752 s_wr.send_flags = IB_SEND_SIGNALED;
754 failed_wr = &s_wr;
755 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
756 if (ret && printk_ratelimit()) {
757 printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
758 __func__, __LINE__, ret);
759 goto out;
761 out:
762 return ret;
765 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
766 struct rds_iw_mr *ibmr,
767 struct scatterlist *sg,
768 unsigned int sg_len)
770 struct rds_iw_device *rds_iwdev = pool->device;
771 struct rds_iw_mapping *mapping = &ibmr->mapping;
772 u64 *dma_pages;
773 int i, ret = 0;
775 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
777 dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
778 if (IS_ERR(dma_pages)) {
779 ret = PTR_ERR(dma_pages);
780 dma_pages = NULL;
781 goto out;
784 if (mapping->m_sg.dma_len > pool->max_message_size) {
785 ret = -EMSGSIZE;
786 goto out;
789 for (i = 0; i < mapping->m_sg.dma_npages; ++i)
790 ibmr->page_list->page_list[i] = dma_pages[i];
792 ret = rds_iw_rdma_build_fastreg(mapping);
793 if (ret)
794 goto out;
796 rds_iw_stats_inc(s_iw_rdma_mr_used);
798 out:
799 kfree(dma_pages);
801 return ret;
805 * "Free" a fastreg MR.
807 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
808 struct rds_iw_mr *ibmr)
810 unsigned long flags;
811 int ret;
813 if (!ibmr->mapping.m_sg.dma_len)
814 return;
816 ret = rds_iw_rdma_fastreg_inv(ibmr);
817 if (ret)
818 return;
820 /* Try to post the LOCAL_INV WR to the queue. */
821 spin_lock_irqsave(&pool->list_lock, flags);
823 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
824 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
825 atomic_inc(&pool->dirty_count);
827 spin_unlock_irqrestore(&pool->list_lock, flags);
830 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
831 struct list_head *unmap_list,
832 struct list_head *kill_list)
834 struct rds_iw_mapping *mapping, *next;
835 unsigned int ncleaned = 0;
836 LIST_HEAD(laundered);
838 /* Batched invalidation of fastreg MRs.
839 * Why do we do it this way, even though we could pipeline unmap
840 * and remap? The reason is the application semantics - when the
841 * application requests an invalidation of MRs, it expects all
842 * previously released R_Keys to become invalid.
844 * If we implement MR reuse naively, we risk memory corruption
845 * (this has actually been observed). So the default behavior
846 * requires that a MR goes through an explicit unmap operation before
847 * we can reuse it again.
849 * We could probably improve on this a little, by allowing immediate
850 * reuse of a MR on the same socket (eg you could add small
851 * cache of unused MRs to strct rds_socket - GET_MR could grab one
852 * of these without requiring an explicit invalidate).
854 while (!list_empty(unmap_list)) {
855 unsigned long flags;
857 spin_lock_irqsave(&pool->list_lock, flags);
858 list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
859 list_move(&mapping->m_list, &laundered);
860 ncleaned++;
862 spin_unlock_irqrestore(&pool->list_lock, flags);
865 /* Move all laundered mappings back to the unmap list.
866 * We do not kill any WRs right now - it doesn't seem the
867 * fastreg API has a max_remap limit. */
868 list_splice_init(&laundered, unmap_list);
870 return ncleaned;
873 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool,
874 struct rds_iw_mr *ibmr)
876 if (ibmr->page_list)
877 ib_free_fast_reg_page_list(ibmr->page_list);
878 if (ibmr->mr)
879 ib_dereg_mr(ibmr->mr);