2 * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
43 * - should we detect duplicate keys on a socket? hmm.
44 * - an rdma is an mlock, apply rlimit?
48 * get the number of pages by looking at the page indices that the start and
49 * end addresses fall in.
51 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
52 * causes the address to wrap or overflows an unsigned int. This comes
53 * from being stored in the 'length' member of 'struct scatterlist'.
55 static unsigned int rds_pages_in_vec(struct rds_iovec
*vec
)
57 if ((vec
->addr
+ vec
->bytes
<= vec
->addr
) ||
58 (vec
->bytes
> (u64
)UINT_MAX
))
61 return ((vec
->addr
+ vec
->bytes
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
) -
62 (vec
->addr
>> PAGE_SHIFT
);
65 static struct rds_mr
*rds_mr_tree_walk(struct rb_root
*root
, u64 key
,
66 struct rds_mr
*insert
)
68 struct rb_node
**p
= &root
->rb_node
;
69 struct rb_node
*parent
= NULL
;
74 mr
= rb_entry(parent
, struct rds_mr
, r_rb_node
);
78 else if (key
> mr
->r_key
)
85 rb_link_node(&insert
->r_rb_node
, parent
, p
);
86 rb_insert_color(&insert
->r_rb_node
, root
);
87 refcount_inc(&insert
->r_refcount
);
93 * Destroy the transport-specific part of a MR.
95 static void rds_destroy_mr(struct rds_mr
*mr
)
97 struct rds_sock
*rs
= mr
->r_sock
;
98 void *trans_private
= NULL
;
101 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102 mr
->r_key
, refcount_read(&mr
->r_refcount
));
104 if (test_and_set_bit(RDS_MR_DEAD
, &mr
->r_state
))
107 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
108 if (!RB_EMPTY_NODE(&mr
->r_rb_node
))
109 rb_erase(&mr
->r_rb_node
, &rs
->rs_rdma_keys
);
110 trans_private
= mr
->r_trans_private
;
111 mr
->r_trans_private
= NULL
;
112 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
115 mr
->r_trans
->free_mr(trans_private
, mr
->r_invalidate
);
118 void __rds_put_mr_final(struct rds_mr
*mr
)
125 * By the time this is called we can't have any more ioctls called on
126 * the socket so we don't need to worry about racing with others.
128 void rds_rdma_drop_keys(struct rds_sock
*rs
)
131 struct rb_node
*node
;
134 /* Release any MRs associated with this socket */
135 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
136 while ((node
= rb_first(&rs
->rs_rdma_keys
))) {
137 mr
= rb_entry(node
, struct rds_mr
, r_rb_node
);
138 if (mr
->r_trans
== rs
->rs_transport
)
139 mr
->r_invalidate
= 0;
140 rb_erase(&mr
->r_rb_node
, &rs
->rs_rdma_keys
);
141 RB_CLEAR_NODE(&mr
->r_rb_node
);
142 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
145 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
147 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
149 if (rs
->rs_transport
&& rs
->rs_transport
->flush_mrs
)
150 rs
->rs_transport
->flush_mrs();
154 * Helper function to pin user pages.
156 static int rds_pin_pages(unsigned long user_addr
, unsigned int nr_pages
,
157 struct page
**pages
, int write
)
159 unsigned int gup_flags
= FOLL_LONGTERM
;
163 gup_flags
|= FOLL_WRITE
;
165 ret
= pin_user_pages_fast(user_addr
, nr_pages
, gup_flags
, pages
);
166 if (ret
>= 0 && ret
< nr_pages
) {
167 unpin_user_pages(pages
, ret
);
174 static int __rds_rdma_map(struct rds_sock
*rs
, struct rds_get_mr_args
*args
,
175 u64
*cookie_ret
, struct rds_mr
**mr_ret
,
176 struct rds_conn_path
*cp
)
178 struct rds_mr
*mr
= NULL
, *found
;
179 struct scatterlist
*sg
= NULL
;
180 unsigned int nr_pages
;
181 struct page
**pages
= NULL
;
184 rds_rdma_cookie_t cookie
;
185 unsigned int nents
= 0;
190 if (ipv6_addr_any(&rs
->rs_bound_addr
) || !rs
->rs_transport
) {
191 ret
= -ENOTCONN
; /* XXX not a great errno */
195 if (!rs
->rs_transport
->get_mr
) {
200 /* If the combination of the addr and size requested for this memory
201 * region causes an integer overflow, return error.
203 if (((args
->vec
.addr
+ args
->vec
.bytes
) < args
->vec
.addr
) ||
204 PAGE_ALIGN(args
->vec
.addr
+ args
->vec
.bytes
) <
205 (args
->vec
.addr
+ args
->vec
.bytes
)) {
210 if (!can_do_mlock()) {
215 nr_pages
= rds_pages_in_vec(&args
->vec
);
221 /* Restrict the size of mr irrespective of underlying transport
222 * To account for unaligned mr regions, subtract one from nr_pages
224 if ((nr_pages
- 1) > (RDS_MAX_MSG_SIZE
>> PAGE_SHIFT
)) {
229 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
230 args
->vec
.addr
, args
->vec
.bytes
, nr_pages
);
232 /* XXX clamp nr_pages to limit the size of this alloc? */
233 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL
);
239 mr
= kzalloc(sizeof(struct rds_mr
), GFP_KERNEL
);
245 refcount_set(&mr
->r_refcount
, 1);
246 RB_CLEAR_NODE(&mr
->r_rb_node
);
247 mr
->r_trans
= rs
->rs_transport
;
250 if (args
->flags
& RDS_RDMA_USE_ONCE
)
252 if (args
->flags
& RDS_RDMA_INVALIDATE
)
253 mr
->r_invalidate
= 1;
254 if (args
->flags
& RDS_RDMA_READWRITE
)
258 * Pin the pages that make up the user buffer and transfer the page
259 * pointers to the mr's sg array. We check to see if we've mapped
260 * the whole region after transferring the partial page references
261 * to the sg array so that we can have one page ref cleanup path.
263 * For now we have no flag that tells us whether the mapping is
264 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
267 ret
= rds_pin_pages(args
->vec
.addr
, nr_pages
, pages
, 1);
268 if (ret
== -EOPNOTSUPP
) {
270 } else if (ret
<= 0) {
274 sg
= kcalloc(nents
, sizeof(*sg
), GFP_KERNEL
);
280 sg_init_table(sg
, nents
);
282 /* Stick all pages into the scatterlist */
283 for (i
= 0 ; i
< nents
; i
++)
284 sg_set_page(&sg
[i
], pages
[i
], PAGE_SIZE
, 0);
286 rdsdebug("RDS: trans_private nents is %u\n", nents
);
288 /* Obtain a transport specific MR. If this succeeds, the
289 * s/g list is now owned by the MR.
290 * Note that dma_map() implies that pending writes are
291 * flushed to RAM, so no dma_sync is needed here. */
292 trans_private
= rs
->rs_transport
->get_mr(
293 sg
, nents
, rs
, &mr
->r_key
, cp
? cp
->cp_conn
: NULL
,
294 args
->vec
.addr
, args
->vec
.bytes
,
295 need_odp
? ODP_ZEROBASED
: ODP_NOT_NEEDED
);
297 if (IS_ERR(trans_private
)) {
298 /* In ODP case, we don't GUP pages, so don't need
299 * to release anything.
302 unpin_user_pages(pages
, nr_pages
);
305 ret
= PTR_ERR(trans_private
);
309 mr
->r_trans_private
= trans_private
;
311 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
312 mr
->r_key
, (void *)(unsigned long) args
->cookie_addr
);
314 /* The user may pass us an unaligned address, but we can only
315 * map page aligned regions. So we keep the offset, and build
316 * a 64bit cookie containing <R_Key, offset> and pass that
319 cookie
= rds_rdma_make_cookie(mr
->r_key
, 0);
321 cookie
= rds_rdma_make_cookie(mr
->r_key
,
322 args
->vec
.addr
& ~PAGE_MASK
);
324 *cookie_ret
= cookie
;
326 if (args
->cookie_addr
&&
327 put_user(cookie
, (u64 __user
*)(unsigned long)args
->cookie_addr
)) {
329 unpin_user_pages(pages
, nr_pages
);
336 /* Inserting the new MR into the rbtree bumps its
337 * reference count. */
338 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
339 found
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, mr
->r_key
, mr
);
340 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
342 BUG_ON(found
&& found
!= mr
);
344 rdsdebug("RDS: get_mr key is %x\n", mr
->r_key
);
346 refcount_inc(&mr
->r_refcount
);
358 int rds_get_mr(struct rds_sock
*rs
, char __user
*optval
, int optlen
)
360 struct rds_get_mr_args args
;
362 if (optlen
!= sizeof(struct rds_get_mr_args
))
365 if (copy_from_user(&args
, (struct rds_get_mr_args __user
*)optval
,
366 sizeof(struct rds_get_mr_args
)))
369 return __rds_rdma_map(rs
, &args
, NULL
, NULL
, NULL
);
372 int rds_get_mr_for_dest(struct rds_sock
*rs
, char __user
*optval
, int optlen
)
374 struct rds_get_mr_for_dest_args args
;
375 struct rds_get_mr_args new_args
;
377 if (optlen
!= sizeof(struct rds_get_mr_for_dest_args
))
380 if (copy_from_user(&args
, (struct rds_get_mr_for_dest_args __user
*)optval
,
381 sizeof(struct rds_get_mr_for_dest_args
)))
385 * Initially, just behave like get_mr().
386 * TODO: Implement get_mr as wrapper around this
389 new_args
.vec
= args
.vec
;
390 new_args
.cookie_addr
= args
.cookie_addr
;
391 new_args
.flags
= args
.flags
;
393 return __rds_rdma_map(rs
, &new_args
, NULL
, NULL
, NULL
);
397 * Free the MR indicated by the given R_Key
399 int rds_free_mr(struct rds_sock
*rs
, char __user
*optval
, int optlen
)
401 struct rds_free_mr_args args
;
405 if (optlen
!= sizeof(struct rds_free_mr_args
))
408 if (copy_from_user(&args
, (struct rds_free_mr_args __user
*)optval
,
409 sizeof(struct rds_free_mr_args
)))
412 /* Special case - a null cookie means flush all unused MRs */
413 if (args
.cookie
== 0) {
414 if (!rs
->rs_transport
|| !rs
->rs_transport
->flush_mrs
)
416 rs
->rs_transport
->flush_mrs();
420 /* Look up the MR given its R_key and remove it from the rbtree
421 * so nobody else finds it.
422 * This should also prevent races with rds_rdma_unuse.
424 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
425 mr
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, rds_rdma_cookie_key(args
.cookie
), NULL
);
427 rb_erase(&mr
->r_rb_node
, &rs
->rs_rdma_keys
);
428 RB_CLEAR_NODE(&mr
->r_rb_node
);
429 if (args
.flags
& RDS_RDMA_INVALIDATE
)
430 mr
->r_invalidate
= 1;
432 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
438 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
439 * we return. If we let rds_mr_put() do it it might not happen until
440 * someone else drops their ref.
448 * This is called when we receive an extension header that
449 * tells us this MR was used. It allows us to implement
452 void rds_rdma_unuse(struct rds_sock
*rs
, u32 r_key
, int force
)
458 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
459 mr
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, r_key
, NULL
);
461 pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
463 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
467 if (mr
->r_use_once
|| force
) {
468 rb_erase(&mr
->r_rb_node
, &rs
->rs_rdma_keys
);
469 RB_CLEAR_NODE(&mr
->r_rb_node
);
472 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
474 /* May have to issue a dma_sync on this memory region.
475 * Note we could avoid this if the operation was a RDMA READ,
476 * but at this point we can't tell. */
477 if (mr
->r_trans
->sync_mr
)
478 mr
->r_trans
->sync_mr(mr
->r_trans_private
, DMA_FROM_DEVICE
);
480 /* If the MR was marked as invalidate, this will
481 * trigger an async flush. */
488 void rds_rdma_free_op(struct rm_rdma_op
*ro
)
493 rds_mr_put(ro
->op_odp_mr
);
495 for (i
= 0; i
< ro
->op_nents
; i
++) {
496 struct page
*page
= sg_page(&ro
->op_sg
[i
]);
498 /* Mark page dirty if it was possibly modified, which
499 * is the case for a RDMA_READ which copies from remote
502 unpin_user_pages_dirty_lock(&page
, 1, !ro
->op_write
);
506 kfree(ro
->op_notifier
);
507 ro
->op_notifier
= NULL
;
509 ro
->op_odp_mr
= NULL
;
512 void rds_atomic_free_op(struct rm_atomic_op
*ao
)
514 struct page
*page
= sg_page(ao
->op_sg
);
516 /* Mark page dirty if it was possibly modified, which
517 * is the case for a RDMA_READ which copies from remote
519 unpin_user_pages_dirty_lock(&page
, 1, true);
521 kfree(ao
->op_notifier
);
522 ao
->op_notifier
= NULL
;
528 * Count the number of pages needed to describe an incoming iovec array.
530 static int rds_rdma_pages(struct rds_iovec iov
[], int nr_iovecs
)
533 unsigned int nr_pages
;
536 /* figure out the number of pages in the vector */
537 for (i
= 0; i
< nr_iovecs
; i
++) {
538 nr_pages
= rds_pages_in_vec(&iov
[i
]);
542 tot_pages
+= nr_pages
;
545 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
546 * so tot_pages cannot overflow without first going negative.
555 int rds_rdma_extra_size(struct rds_rdma_args
*args
,
556 struct rds_iov_vector
*iov
)
558 struct rds_iovec
*vec
;
559 struct rds_iovec __user
*local_vec
;
561 unsigned int nr_pages
;
564 local_vec
= (struct rds_iovec __user
*)(unsigned long) args
->local_vec_addr
;
566 if (args
->nr_local
== 0)
569 iov
->iov
= kcalloc(args
->nr_local
,
570 sizeof(struct rds_iovec
),
577 if (copy_from_user(vec
, local_vec
, args
->nr_local
*
578 sizeof(struct rds_iovec
)))
580 iov
->len
= args
->nr_local
;
582 /* figure out the number of pages in the vector */
583 for (i
= 0; i
< args
->nr_local
; i
++, vec
++) {
585 nr_pages
= rds_pages_in_vec(vec
);
589 tot_pages
+= nr_pages
;
592 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
593 * so tot_pages cannot overflow without first going negative.
599 return tot_pages
* sizeof(struct scatterlist
);
603 * The application asks for a RDMA transfer.
604 * Extract all arguments and set up the rdma_op
606 int rds_cmsg_rdma_args(struct rds_sock
*rs
, struct rds_message
*rm
,
607 struct cmsghdr
*cmsg
,
608 struct rds_iov_vector
*vec
)
610 struct rds_rdma_args
*args
;
611 struct rm_rdma_op
*op
= &rm
->rdma
;
613 unsigned int nr_bytes
;
614 struct page
**pages
= NULL
;
615 struct rds_iovec
*iovs
;
618 bool odp_supported
= true;
620 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(struct rds_rdma_args
))
621 || rm
->rdma
.op_active
)
624 args
= CMSG_DATA(cmsg
);
626 if (ipv6_addr_any(&rs
->rs_bound_addr
)) {
627 ret
= -ENOTCONN
; /* XXX not a great errno */
631 if (args
->nr_local
> UIO_MAXIOV
) {
636 if (vec
->len
!= args
->nr_local
) {
640 /* odp-mr is not supported for multiple requests within one message */
641 if (args
->nr_local
!= 1)
642 odp_supported
= false;
646 nr_pages
= rds_rdma_pages(iovs
, args
->nr_local
);
652 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_KERNEL
);
658 op
->op_write
= !!(args
->flags
& RDS_RDMA_READWRITE
);
659 op
->op_fence
= !!(args
->flags
& RDS_RDMA_FENCE
);
660 op
->op_notify
= !!(args
->flags
& RDS_RDMA_NOTIFY_ME
);
661 op
->op_silent
= !!(args
->flags
& RDS_RDMA_SILENT
);
663 op
->op_recverr
= rs
->rs_recverr
;
664 op
->op_odp_mr
= NULL
;
667 op
->op_sg
= rds_message_alloc_sgs(rm
, nr_pages
, &ret
);
671 if (op
->op_notify
|| op
->op_recverr
) {
672 /* We allocate an uninitialized notifier here, because
673 * we don't want to do that in the completion handler. We
674 * would have to use GFP_ATOMIC there, and don't want to deal
675 * with failed allocations.
677 op
->op_notifier
= kmalloc(sizeof(struct rds_notifier
), GFP_KERNEL
);
678 if (!op
->op_notifier
) {
682 op
->op_notifier
->n_user_token
= args
->user_token
;
683 op
->op_notifier
->n_status
= RDS_RDMA_SUCCESS
;
686 /* The cookie contains the R_Key of the remote memory region, and
687 * optionally an offset into it. This is how we implement RDMA into
689 * When setting up the RDMA, we need to add that offset to the
690 * destination address (which is really an offset into the MR)
691 * FIXME: We may want to move this into ib_rdma.c
693 op
->op_rkey
= rds_rdma_cookie_key(args
->cookie
);
694 op
->op_remote_addr
= args
->remote_vec
.addr
+ rds_rdma_cookie_offset(args
->cookie
);
698 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
699 (unsigned long long)args
->nr_local
,
700 (unsigned long long)args
->remote_vec
.addr
,
703 for (i
= 0; i
< args
->nr_local
; i
++) {
704 struct rds_iovec
*iov
= &iovs
[i
];
705 /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
706 unsigned int nr
= rds_pages_in_vec(iov
);
708 rs
->rs_user_addr
= iov
->addr
;
709 rs
->rs_user_bytes
= iov
->bytes
;
711 /* If it's a WRITE operation, we want to pin the pages for reading.
712 * If it's a READ operation, we need to pin the pages for writing.
714 ret
= rds_pin_pages(iov
->addr
, nr
, pages
, !op
->op_write
);
715 if ((!odp_supported
&& ret
<= 0) ||
716 (odp_supported
&& ret
<= 0 && ret
!= -EOPNOTSUPP
))
719 if (ret
== -EOPNOTSUPP
) {
720 struct rds_mr
*local_odp_mr
;
722 if (!rs
->rs_transport
->get_mr
) {
727 kzalloc(sizeof(*local_odp_mr
), GFP_KERNEL
);
732 RB_CLEAR_NODE(&local_odp_mr
->r_rb_node
);
733 refcount_set(&local_odp_mr
->r_refcount
, 1);
734 local_odp_mr
->r_trans
= rs
->rs_transport
;
735 local_odp_mr
->r_sock
= rs
;
736 local_odp_mr
->r_trans_private
=
737 rs
->rs_transport
->get_mr(
738 NULL
, 0, rs
, &local_odp_mr
->r_key
, NULL
,
739 iov
->addr
, iov
->bytes
, ODP_VIRTUAL
);
740 if (IS_ERR(local_odp_mr
->r_trans_private
)) {
741 ret
= IS_ERR(local_odp_mr
->r_trans_private
);
742 rdsdebug("get_mr ret %d %p\"", ret
,
743 local_odp_mr
->r_trans_private
);
748 rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
749 local_odp_mr
, local_odp_mr
->r_trans_private
);
750 op
->op_odp_mr
= local_odp_mr
;
751 op
->op_odp_addr
= iov
->addr
;
754 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
755 nr_bytes
, nr
, iov
->bytes
, iov
->addr
);
757 nr_bytes
+= iov
->bytes
;
759 for (j
= 0; j
< nr
; j
++) {
760 unsigned int offset
= iov
->addr
& ~PAGE_MASK
;
761 struct scatterlist
*sg
;
763 sg
= &op
->op_sg
[op
->op_nents
+ j
];
764 sg_set_page(sg
, pages
[j
],
765 min_t(unsigned int, iov
->bytes
, PAGE_SIZE
- offset
),
768 sg_dma_len(sg
) = sg
->length
;
769 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
770 sg
->offset
, sg
->length
, iov
->addr
, iov
->bytes
);
772 iov
->addr
+= sg
->length
;
773 iov
->bytes
-= sg
->length
;
779 if (nr_bytes
> args
->remote_vec
.bytes
) {
780 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
782 (unsigned int) args
->remote_vec
.bytes
);
786 op
->op_bytes
= nr_bytes
;
793 rds_rdma_free_op(op
);
795 rds_stats_inc(s_send_rdma
);
801 * The application wants us to pass an RDMA destination (aka MR)
804 int rds_cmsg_rdma_dest(struct rds_sock
*rs
, struct rds_message
*rm
,
805 struct cmsghdr
*cmsg
)
812 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(rds_rdma_cookie_t
)) ||
813 rm
->m_rdma_cookie
!= 0)
816 memcpy(&rm
->m_rdma_cookie
, CMSG_DATA(cmsg
), sizeof(rm
->m_rdma_cookie
));
818 /* We are reusing a previously mapped MR here. Most likely, the
819 * application has written to the buffer, so we need to explicitly
820 * flush those writes to RAM. Otherwise the HCA may not see them
821 * when doing a DMA from that buffer.
823 r_key
= rds_rdma_cookie_key(rm
->m_rdma_cookie
);
825 spin_lock_irqsave(&rs
->rs_rdma_lock
, flags
);
826 mr
= rds_mr_tree_walk(&rs
->rs_rdma_keys
, r_key
, NULL
);
828 err
= -EINVAL
; /* invalid r_key */
830 refcount_inc(&mr
->r_refcount
);
831 spin_unlock_irqrestore(&rs
->rs_rdma_lock
, flags
);
834 mr
->r_trans
->sync_mr(mr
->r_trans_private
,
836 rm
->rdma
.op_rdma_mr
= mr
;
842 * The application passes us an address range it wants to enable RDMA
843 * to/from. We map the area, and save the <R_Key,offset> pair
844 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
845 * in an extension header.
847 int rds_cmsg_rdma_map(struct rds_sock
*rs
, struct rds_message
*rm
,
848 struct cmsghdr
*cmsg
)
850 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(struct rds_get_mr_args
)) ||
851 rm
->m_rdma_cookie
!= 0)
854 return __rds_rdma_map(rs
, CMSG_DATA(cmsg
), &rm
->m_rdma_cookie
,
855 &rm
->rdma
.op_rdma_mr
, rm
->m_conn_path
);
859 * Fill in rds_message for an atomic request.
861 int rds_cmsg_atomic(struct rds_sock
*rs
, struct rds_message
*rm
,
862 struct cmsghdr
*cmsg
)
864 struct page
*page
= NULL
;
865 struct rds_atomic_args
*args
;
868 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(struct rds_atomic_args
))
869 || rm
->atomic
.op_active
)
872 args
= CMSG_DATA(cmsg
);
874 /* Nonmasked & masked cmsg ops converted to masked hw ops */
875 switch (cmsg
->cmsg_type
) {
876 case RDS_CMSG_ATOMIC_FADD
:
877 rm
->atomic
.op_type
= RDS_ATOMIC_TYPE_FADD
;
878 rm
->atomic
.op_m_fadd
.add
= args
->fadd
.add
;
879 rm
->atomic
.op_m_fadd
.nocarry_mask
= 0;
881 case RDS_CMSG_MASKED_ATOMIC_FADD
:
882 rm
->atomic
.op_type
= RDS_ATOMIC_TYPE_FADD
;
883 rm
->atomic
.op_m_fadd
.add
= args
->m_fadd
.add
;
884 rm
->atomic
.op_m_fadd
.nocarry_mask
= args
->m_fadd
.nocarry_mask
;
886 case RDS_CMSG_ATOMIC_CSWP
:
887 rm
->atomic
.op_type
= RDS_ATOMIC_TYPE_CSWP
;
888 rm
->atomic
.op_m_cswp
.compare
= args
->cswp
.compare
;
889 rm
->atomic
.op_m_cswp
.swap
= args
->cswp
.swap
;
890 rm
->atomic
.op_m_cswp
.compare_mask
= ~0;
891 rm
->atomic
.op_m_cswp
.swap_mask
= ~0;
893 case RDS_CMSG_MASKED_ATOMIC_CSWP
:
894 rm
->atomic
.op_type
= RDS_ATOMIC_TYPE_CSWP
;
895 rm
->atomic
.op_m_cswp
.compare
= args
->m_cswp
.compare
;
896 rm
->atomic
.op_m_cswp
.swap
= args
->m_cswp
.swap
;
897 rm
->atomic
.op_m_cswp
.compare_mask
= args
->m_cswp
.compare_mask
;
898 rm
->atomic
.op_m_cswp
.swap_mask
= args
->m_cswp
.swap_mask
;
901 BUG(); /* should never happen */
904 rm
->atomic
.op_notify
= !!(args
->flags
& RDS_RDMA_NOTIFY_ME
);
905 rm
->atomic
.op_silent
= !!(args
->flags
& RDS_RDMA_SILENT
);
906 rm
->atomic
.op_active
= 1;
907 rm
->atomic
.op_recverr
= rs
->rs_recverr
;
908 rm
->atomic
.op_sg
= rds_message_alloc_sgs(rm
, 1, &ret
);
909 if (!rm
->atomic
.op_sg
)
912 /* verify 8 byte-aligned */
913 if (args
->local_addr
& 0x7) {
918 ret
= rds_pin_pages(args
->local_addr
, 1, &page
, 1);
923 sg_set_page(rm
->atomic
.op_sg
, page
, 8, offset_in_page(args
->local_addr
));
925 if (rm
->atomic
.op_notify
|| rm
->atomic
.op_recverr
) {
926 /* We allocate an uninitialized notifier here, because
927 * we don't want to do that in the completion handler. We
928 * would have to use GFP_ATOMIC there, and don't want to deal
929 * with failed allocations.
931 rm
->atomic
.op_notifier
= kmalloc(sizeof(*rm
->atomic
.op_notifier
), GFP_KERNEL
);
932 if (!rm
->atomic
.op_notifier
) {
937 rm
->atomic
.op_notifier
->n_user_token
= args
->user_token
;
938 rm
->atomic
.op_notifier
->n_status
= RDS_RDMA_SUCCESS
;
941 rm
->atomic
.op_rkey
= rds_rdma_cookie_key(args
->cookie
);
942 rm
->atomic
.op_remote_addr
= args
->remote_addr
+ rds_rdma_cookie_offset(args
->cookie
);
947 unpin_user_page(page
);
948 rm
->atomic
.op_active
= 0;
949 kfree(rm
->atomic
.op_notifier
);