2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
56 * rvt_driver_mr_init - Init MR resources per driver
57 * @rdi: rvt dev struct
59 * Do any intilization needed when a driver registers with rdmavt.
61 * Return: 0 on success or errno on failure
63 int rvt_driver_mr_init(struct rvt_dev_info
*rdi
)
65 unsigned int lkey_table_size
= rdi
->dparms
.lkey_table_size
;
70 * The top hfi1_lkey_table_size bits are used to index the
71 * table. The lower 8 bits can be owned by the user (copied from
72 * the LKEY). The remaining bits act as a generation number or tag.
77 spin_lock_init(&rdi
->lkey_table
.lock
);
79 /* ensure generation is at least 4 bits */
80 if (lkey_table_size
> RVT_MAX_LKEY_TABLE_BITS
) {
81 rvt_pr_warn(rdi
, "lkey bits %u too large, reduced to %u\n",
82 lkey_table_size
, RVT_MAX_LKEY_TABLE_BITS
);
83 rdi
->dparms
.lkey_table_size
= RVT_MAX_LKEY_TABLE_BITS
;
84 lkey_table_size
= rdi
->dparms
.lkey_table_size
;
86 rdi
->lkey_table
.max
= 1 << lkey_table_size
;
87 lk_tab_size
= rdi
->lkey_table
.max
* sizeof(*rdi
->lkey_table
.table
);
88 rdi
->lkey_table
.table
= (struct rvt_mregion __rcu
**)
89 vmalloc_node(lk_tab_size
, rdi
->dparms
.node
);
90 if (!rdi
->lkey_table
.table
)
93 RCU_INIT_POINTER(rdi
->dma_mr
, NULL
);
94 for (i
= 0; i
< rdi
->lkey_table
.max
; i
++)
95 RCU_INIT_POINTER(rdi
->lkey_table
.table
[i
], NULL
);
101 *rvt_mr_exit: clean up MR
102 *@rdi: rvt dev structure
104 * called when drivers have unregistered or perhaps failed to register with us
106 void rvt_mr_exit(struct rvt_dev_info
*rdi
)
109 rvt_pr_err(rdi
, "DMA MR not null!\n");
111 vfree(rdi
->lkey_table
.table
);
114 static void rvt_deinit_mregion(struct rvt_mregion
*mr
)
123 static int rvt_init_mregion(struct rvt_mregion
*mr
, struct ib_pd
*pd
,
127 struct rvt_dev_info
*dev
= ib_to_rvt(pd
->device
);
130 m
= (count
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
132 mr
->map
[i
] = kzalloc_node(sizeof(*mr
->map
[0]), GFP_KERNEL
,
135 rvt_deinit_mregion(mr
);
140 init_completion(&mr
->comp
);
141 /* count returning the ptr to user */
142 atomic_set(&mr
->refcount
, 1);
143 atomic_set(&mr
->lkey_invalid
, 0);
145 mr
->max_segs
= count
;
150 * rvt_alloc_lkey - allocate an lkey
151 * @mr: memory region that this lkey protects
152 * @dma_region: 0->normal key, 1->restricted DMA key
154 * Returns 0 if successful, otherwise returns -errno.
156 * Increments mr reference count as required.
158 * Sets the lkey field mr for non-dma regions.
161 static int rvt_alloc_lkey(struct rvt_mregion
*mr
, int dma_region
)
167 struct rvt_dev_info
*dev
= ib_to_rvt(mr
->pd
->device
);
168 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
171 spin_lock_irqsave(&rkt
->lock
, flags
);
173 /* special case for dma_mr lkey == 0 */
175 struct rvt_mregion
*tmr
;
177 tmr
= rcu_access_pointer(dev
->dma_mr
);
179 rcu_assign_pointer(dev
->dma_mr
, mr
);
180 mr
->lkey_published
= 1;
187 /* Find the next available LKEY */
191 if (!rcu_access_pointer(rkt
->table
[r
]))
193 r
= (r
+ 1) & (rkt
->max
- 1);
197 rkt
->next
= (r
+ 1) & (rkt
->max
- 1);
199 * Make sure lkey is never zero which is reserved to indicate an
204 * bits are capped to ensure enough bits for generation number
206 mr
->lkey
= (r
<< (32 - dev
->dparms
.lkey_table_size
)) |
207 ((((1 << (24 - dev
->dparms
.lkey_table_size
)) - 1) & rkt
->gen
)
213 rcu_assign_pointer(rkt
->table
[r
], mr
);
214 mr
->lkey_published
= 1;
216 spin_unlock_irqrestore(&rkt
->lock
, flags
);
221 spin_unlock_irqrestore(&rkt
->lock
, flags
);
227 * rvt_free_lkey - free an lkey
228 * @mr: mr to free from tables
230 static void rvt_free_lkey(struct rvt_mregion
*mr
)
235 struct rvt_dev_info
*dev
= ib_to_rvt(mr
->pd
->device
);
236 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
239 spin_lock_irqsave(&rkt
->lock
, flags
);
240 if (!mr
->lkey_published
)
243 RCU_INIT_POINTER(dev
->dma_mr
, NULL
);
245 r
= lkey
>> (32 - dev
->dparms
.lkey_table_size
);
246 RCU_INIT_POINTER(rkt
->table
[r
], NULL
);
248 mr
->lkey_published
= 0;
251 spin_unlock_irqrestore(&rkt
->lock
, flags
);
258 static struct rvt_mr
*__rvt_alloc_mr(int count
, struct ib_pd
*pd
)
264 /* Allocate struct plus pointers to first level page tables. */
265 m
= (count
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
266 mr
= kzalloc(sizeof(*mr
) + m
* sizeof(mr
->mr
.map
[0]), GFP_KERNEL
);
270 rval
= rvt_init_mregion(&mr
->mr
, pd
, count
);
274 * ib_reg_phys_mr() will initialize mr->ibmr except for
277 rval
= rvt_alloc_lkey(&mr
->mr
, 0);
280 mr
->ibmr
.lkey
= mr
->mr
.lkey
;
281 mr
->ibmr
.rkey
= mr
->mr
.lkey
;
286 rvt_deinit_mregion(&mr
->mr
);
293 static void __rvt_free_mr(struct rvt_mr
*mr
)
295 rvt_deinit_mregion(&mr
->mr
);
296 rvt_free_lkey(&mr
->mr
);
301 * rvt_get_dma_mr - get a DMA memory region
302 * @pd: protection domain for this memory region
305 * Return: the memory region on success, otherwise returns an errno.
306 * Note that all DMA addresses should be created via the
307 * struct ib_dma_mapping_ops functions (see dma.c).
309 struct ib_mr
*rvt_get_dma_mr(struct ib_pd
*pd
, int acc
)
315 if (ibpd_to_rvtpd(pd
)->user
)
316 return ERR_PTR(-EPERM
);
318 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
320 ret
= ERR_PTR(-ENOMEM
);
324 rval
= rvt_init_mregion(&mr
->mr
, pd
, 0);
330 rval
= rvt_alloc_lkey(&mr
->mr
, 1);
336 mr
->mr
.access_flags
= acc
;
342 rvt_deinit_mregion(&mr
->mr
);
349 * rvt_reg_user_mr - register a userspace memory region
350 * @pd: protection domain for this memory region
351 * @start: starting userspace address
352 * @length: length of region to register
353 * @mr_access_flags: access flags for this memory region
354 * @udata: unused by the driver
356 * Return: the memory region on success, otherwise returns an errno.
358 struct ib_mr
*rvt_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
359 u64 virt_addr
, int mr_access_flags
,
360 struct ib_udata
*udata
)
363 struct ib_umem
*umem
;
364 struct scatterlist
*sg
;
369 return ERR_PTR(-EINVAL
);
371 umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
378 mr
= __rvt_alloc_mr(n
, pd
);
380 ret
= (struct ib_mr
*)mr
;
384 mr
->mr
.user_base
= start
;
385 mr
->mr
.iova
= virt_addr
;
386 mr
->mr
.length
= length
;
387 mr
->mr
.offset
= ib_umem_offset(umem
);
388 mr
->mr
.access_flags
= mr_access_flags
;
391 if (is_power_of_2(umem
->page_size
))
392 mr
->mr
.page_shift
= ilog2(umem
->page_size
);
395 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
398 vaddr
= page_address(sg_page(sg
));
400 ret
= ERR_PTR(-EINVAL
);
403 mr
->mr
.map
[m
]->segs
[n
].vaddr
= vaddr
;
404 mr
->mr
.map
[m
]->segs
[n
].length
= umem
->page_size
;
406 if (n
== RVT_SEGSZ
) {
417 ib_umem_release(umem
);
423 * rvt_dereg_mr - unregister and free a memory region
424 * @ibmr: the memory region to free
427 * Note that this is called to free MRs created by rvt_get_dma_mr()
428 * or rvt_reg_user_mr().
430 * Returns 0 on success.
432 int rvt_dereg_mr(struct ib_mr
*ibmr
)
434 struct rvt_mr
*mr
= to_imr(ibmr
);
435 struct rvt_dev_info
*rdi
= ib_to_rvt(ibmr
->pd
->device
);
437 unsigned long timeout
;
439 rvt_free_lkey(&mr
->mr
);
441 rvt_put_mr(&mr
->mr
); /* will set completion if last */
442 timeout
= wait_for_completion_timeout(&mr
->mr
.comp
, 5 * HZ
);
445 "rvt_dereg_mr timeout mr %p pd %p refcount %u\n",
446 mr
, mr
->mr
.pd
, atomic_read(&mr
->mr
.refcount
));
451 rvt_deinit_mregion(&mr
->mr
);
453 ib_umem_release(mr
->umem
);
460 * rvt_alloc_mr - Allocate a memory region usable with the
461 * @pd: protection domain for this memory region
462 * @mr_type: mem region type
463 * @max_num_sg: Max number of segments allowed
465 * Return: the memory region on success, otherwise return an errno.
467 struct ib_mr
*rvt_alloc_mr(struct ib_pd
*pd
,
468 enum ib_mr_type mr_type
,
473 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
474 return ERR_PTR(-EINVAL
);
476 mr
= __rvt_alloc_mr(max_num_sg
, pd
);
478 return (struct ib_mr
*)mr
;
484 * rvt_set_page - page assignment function called by ib_sg_to_pages
485 * @ibmr: memory region
486 * @addr: dma address of mapped page
488 * Return: 0 on success
490 static int rvt_set_page(struct ib_mr
*ibmr
, u64 addr
)
492 struct rvt_mr
*mr
= to_imr(ibmr
);
493 u32 ps
= 1 << mr
->mr
.page_shift
;
494 u32 mapped_segs
= mr
->mr
.length
>> mr
->mr
.page_shift
;
497 if (unlikely(mapped_segs
== mr
->mr
.max_segs
))
500 if (mr
->mr
.length
== 0) {
501 mr
->mr
.user_base
= addr
;
505 m
= mapped_segs
/ RVT_SEGSZ
;
506 n
= mapped_segs
% RVT_SEGSZ
;
507 mr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *)addr
;
508 mr
->mr
.map
[m
]->segs
[n
].length
= ps
;
515 * rvt_map_mr_sg - map sg list and set it the memory region
516 * @ibmr: memory region
517 * @sg: dma mapped scatterlist
518 * @sg_nents: number of entries in sg
519 * @sg_offset: offset in bytes into sg
521 * Return: number of sg elements mapped to the memory region
523 int rvt_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
,
524 int sg_nents
, unsigned int *sg_offset
)
526 struct rvt_mr
*mr
= to_imr(ibmr
);
529 mr
->mr
.page_shift
= PAGE_SHIFT
;
530 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
,
535 * rvt_fast_reg_mr - fast register physical MR
536 * @qp: the queue pair where the work request comes from
537 * @ibmr: the memory region to be registered
538 * @key: updated key for this memory region
539 * @access: access flags for this memory region
541 * Returns 0 on success.
543 int rvt_fast_reg_mr(struct rvt_qp
*qp
, struct ib_mr
*ibmr
, u32 key
,
546 struct rvt_mr
*mr
= to_imr(ibmr
);
548 if (qp
->ibqp
.pd
!= mr
->mr
.pd
)
551 /* not applicable to dma MR or user MR */
552 if (!mr
->mr
.lkey
|| mr
->umem
)
555 if ((key
& 0xFFFFFF00) != (mr
->mr
.lkey
& 0xFFFFFF00))
561 mr
->mr
.access_flags
= access
;
562 atomic_set(&mr
->mr
.lkey_invalid
, 0);
566 EXPORT_SYMBOL(rvt_fast_reg_mr
);
569 * rvt_invalidate_rkey - invalidate an MR rkey
570 * @qp: queue pair associated with the invalidate op
571 * @rkey: rkey to invalidate
573 * Returns 0 on success.
575 int rvt_invalidate_rkey(struct rvt_qp
*qp
, u32 rkey
)
577 struct rvt_dev_info
*dev
= ib_to_rvt(qp
->ibqp
.device
);
578 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
579 struct rvt_mregion
*mr
;
585 mr
= rcu_dereference(
586 rkt
->table
[(rkey
>> (32 - dev
->dparms
.lkey_table_size
))]);
587 if (unlikely(!mr
|| mr
->lkey
!= rkey
|| qp
->ibqp
.pd
!= mr
->pd
))
590 atomic_set(&mr
->lkey_invalid
, 1);
598 EXPORT_SYMBOL(rvt_invalidate_rkey
);
601 * rvt_alloc_fmr - allocate a fast memory region
602 * @pd: the protection domain for this memory region
603 * @mr_access_flags: access flags for this memory region
604 * @fmr_attr: fast memory region attributes
606 * Return: the memory region on success, otherwise returns an errno.
608 struct ib_fmr
*rvt_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
609 struct ib_fmr_attr
*fmr_attr
)
616 /* Allocate struct plus pointers to first level page tables. */
617 m
= (fmr_attr
->max_pages
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
618 fmr
= kzalloc(sizeof(*fmr
) + m
* sizeof(fmr
->mr
.map
[0]), GFP_KERNEL
);
622 rval
= rvt_init_mregion(&fmr
->mr
, pd
, fmr_attr
->max_pages
);
627 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
630 rval
= rvt_alloc_lkey(&fmr
->mr
, 0);
633 fmr
->ibfmr
.rkey
= fmr
->mr
.lkey
;
634 fmr
->ibfmr
.lkey
= fmr
->mr
.lkey
;
636 * Resources are allocated but no valid mapping (RKEY can't be
639 fmr
->mr
.access_flags
= mr_access_flags
;
640 fmr
->mr
.max_segs
= fmr_attr
->max_pages
;
641 fmr
->mr
.page_shift
= fmr_attr
->page_shift
;
648 rvt_deinit_mregion(&fmr
->mr
);
656 * rvt_map_phys_fmr - set up a fast memory region
657 * @ibmfr: the fast memory region to set up
658 * @page_list: the list of pages to associate with the fast memory region
659 * @list_len: the number of pages to associate with the fast memory region
660 * @iova: the virtual address of the start of the fast memory region
662 * This may be called from interrupt context.
664 * Return: 0 on success
667 int rvt_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
668 int list_len
, u64 iova
)
670 struct rvt_fmr
*fmr
= to_ifmr(ibfmr
);
671 struct rvt_lkey_table
*rkt
;
675 struct rvt_dev_info
*rdi
= ib_to_rvt(ibfmr
->device
);
677 i
= atomic_read(&fmr
->mr
.refcount
);
681 if (list_len
> fmr
->mr
.max_segs
)
684 rkt
= &rdi
->lkey_table
;
685 spin_lock_irqsave(&rkt
->lock
, flags
);
686 fmr
->mr
.user_base
= iova
;
688 ps
= 1 << fmr
->mr
.page_shift
;
689 fmr
->mr
.length
= list_len
* ps
;
692 for (i
= 0; i
< list_len
; i
++) {
693 fmr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *)page_list
[i
];
694 fmr
->mr
.map
[m
]->segs
[n
].length
= ps
;
695 if (++n
== RVT_SEGSZ
) {
700 spin_unlock_irqrestore(&rkt
->lock
, flags
);
705 * rvt_unmap_fmr - unmap fast memory regions
706 * @fmr_list: the list of fast memory regions to unmap
708 * Return: 0 on success.
710 int rvt_unmap_fmr(struct list_head
*fmr_list
)
713 struct rvt_lkey_table
*rkt
;
715 struct rvt_dev_info
*rdi
;
717 list_for_each_entry(fmr
, fmr_list
, ibfmr
.list
) {
718 rdi
= ib_to_rvt(fmr
->ibfmr
.device
);
719 rkt
= &rdi
->lkey_table
;
720 spin_lock_irqsave(&rkt
->lock
, flags
);
721 fmr
->mr
.user_base
= 0;
724 spin_unlock_irqrestore(&rkt
->lock
, flags
);
730 * rvt_dealloc_fmr - deallocate a fast memory region
731 * @ibfmr: the fast memory region to deallocate
733 * Return: 0 on success.
735 int rvt_dealloc_fmr(struct ib_fmr
*ibfmr
)
737 struct rvt_fmr
*fmr
= to_ifmr(ibfmr
);
739 unsigned long timeout
;
741 rvt_free_lkey(&fmr
->mr
);
742 rvt_put_mr(&fmr
->mr
); /* will set completion if last */
743 timeout
= wait_for_completion_timeout(&fmr
->mr
.comp
, 5 * HZ
);
745 rvt_get_mr(&fmr
->mr
);
749 rvt_deinit_mregion(&fmr
->mr
);
756 * rvt_lkey_ok - check IB SGE for validity and initialize
757 * @rkt: table containing lkey to check SGE against
758 * @pd: protection domain
759 * @isge: outgoing internal SGE
763 * Check the IB SGE for validity and initialize our internal version
766 * Return: 1 if valid and successful, otherwise returns 0.
768 * increments the reference count upon success
771 int rvt_lkey_ok(struct rvt_lkey_table
*rkt
, struct rvt_pd
*pd
,
772 struct rvt_sge
*isge
, struct ib_sge
*sge
, int acc
)
774 struct rvt_mregion
*mr
;
777 struct rvt_dev_info
*dev
= ib_to_rvt(pd
->ibpd
.device
);
780 * We use LKEY == zero for kernel virtual addresses
781 * (see rvt_get_dma_mr and dma.c).
784 if (sge
->lkey
== 0) {
787 mr
= rcu_dereference(dev
->dma_mr
);
790 atomic_inc(&mr
->refcount
);
794 isge
->vaddr
= (void *)sge
->addr
;
795 isge
->length
= sge
->length
;
796 isge
->sge_length
= sge
->length
;
801 mr
= rcu_dereference(
802 rkt
->table
[(sge
->lkey
>> (32 - dev
->dparms
.lkey_table_size
))]);
803 if (unlikely(!mr
|| atomic_read(&mr
->lkey_invalid
) ||
804 mr
->lkey
!= sge
->lkey
|| mr
->pd
!= &pd
->ibpd
))
807 off
= sge
->addr
- mr
->user_base
;
808 if (unlikely(sge
->addr
< mr
->user_base
||
809 off
+ sge
->length
> mr
->length
||
810 (mr
->access_flags
& acc
) != acc
))
812 atomic_inc(&mr
->refcount
);
816 if (mr
->page_shift
) {
818 * page sizes are uniform power of 2 so no loop is necessary
819 * entries_spanned_by_off is the number of times the loop below
820 * would have executed.
822 size_t entries_spanned_by_off
;
824 entries_spanned_by_off
= off
>> mr
->page_shift
;
825 off
-= (entries_spanned_by_off
<< mr
->page_shift
);
826 m
= entries_spanned_by_off
/ RVT_SEGSZ
;
827 n
= entries_spanned_by_off
% RVT_SEGSZ
;
831 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
832 off
-= mr
->map
[m
]->segs
[n
].length
;
834 if (n
>= RVT_SEGSZ
) {
841 isge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
842 isge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
843 isge
->sge_length
= sge
->length
;
852 EXPORT_SYMBOL(rvt_lkey_ok
);
855 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
856 * @qp: qp for validation
858 * @len: length of data
859 * @vaddr: virtual address to place data
860 * @rkey: rkey to check
863 * Return: 1 if successful, otherwise 0.
865 * increments the reference count upon success
867 int rvt_rkey_ok(struct rvt_qp
*qp
, struct rvt_sge
*sge
,
868 u32 len
, u64 vaddr
, u32 rkey
, int acc
)
870 struct rvt_dev_info
*dev
= ib_to_rvt(qp
->ibqp
.device
);
871 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
872 struct rvt_mregion
*mr
;
877 * We use RKEY == zero for kernel virtual addresses
878 * (see rvt_get_dma_mr and dma.c).
882 struct rvt_pd
*pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
883 struct rvt_dev_info
*rdi
= ib_to_rvt(pd
->ibpd
.device
);
887 mr
= rcu_dereference(rdi
->dma_mr
);
890 atomic_inc(&mr
->refcount
);
894 sge
->vaddr
= (void *)vaddr
;
896 sge
->sge_length
= len
;
902 mr
= rcu_dereference(
903 rkt
->table
[(rkey
>> (32 - dev
->dparms
.lkey_table_size
))]);
904 if (unlikely(!mr
|| atomic_read(&mr
->lkey_invalid
) ||
905 mr
->lkey
!= rkey
|| qp
->ibqp
.pd
!= mr
->pd
))
908 off
= vaddr
- mr
->iova
;
909 if (unlikely(vaddr
< mr
->iova
|| off
+ len
> mr
->length
||
910 (mr
->access_flags
& acc
) == 0))
912 atomic_inc(&mr
->refcount
);
916 if (mr
->page_shift
) {
918 * page sizes are uniform power of 2 so no loop is necessary
919 * entries_spanned_by_off is the number of times the loop below
920 * would have executed.
922 size_t entries_spanned_by_off
;
924 entries_spanned_by_off
= off
>> mr
->page_shift
;
925 off
-= (entries_spanned_by_off
<< mr
->page_shift
);
926 m
= entries_spanned_by_off
/ RVT_SEGSZ
;
927 n
= entries_spanned_by_off
% RVT_SEGSZ
;
931 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
932 off
-= mr
->map
[m
]->segs
[n
].length
;
934 if (n
>= RVT_SEGSZ
) {
941 sge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
942 sge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
943 sge
->sge_length
= len
;
952 EXPORT_SYMBOL(rvt_rkey_ok
);