2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
57 * rvt_driver_mr_init - Init MR resources per driver
58 * @rdi: rvt dev struct
60 * Do any intilization needed when a driver registers with rdmavt.
62 * Return: 0 on success or errno on failure
64 int rvt_driver_mr_init(struct rvt_dev_info
*rdi
)
66 unsigned int lkey_table_size
= rdi
->dparms
.lkey_table_size
;
71 * The top hfi1_lkey_table_size bits are used to index the
72 * table. The lower 8 bits can be owned by the user (copied from
73 * the LKEY). The remaining bits act as a generation number or tag.
78 spin_lock_init(&rdi
->lkey_table
.lock
);
80 /* ensure generation is at least 4 bits */
81 if (lkey_table_size
> RVT_MAX_LKEY_TABLE_BITS
) {
82 rvt_pr_warn(rdi
, "lkey bits %u too large, reduced to %u\n",
83 lkey_table_size
, RVT_MAX_LKEY_TABLE_BITS
);
84 rdi
->dparms
.lkey_table_size
= RVT_MAX_LKEY_TABLE_BITS
;
85 lkey_table_size
= rdi
->dparms
.lkey_table_size
;
87 rdi
->lkey_table
.max
= 1 << lkey_table_size
;
88 rdi
->lkey_table
.shift
= 32 - lkey_table_size
;
89 lk_tab_size
= rdi
->lkey_table
.max
* sizeof(*rdi
->lkey_table
.table
);
90 rdi
->lkey_table
.table
= (struct rvt_mregion __rcu
**)
91 vmalloc_node(lk_tab_size
, rdi
->dparms
.node
);
92 if (!rdi
->lkey_table
.table
)
95 RCU_INIT_POINTER(rdi
->dma_mr
, NULL
);
96 for (i
= 0; i
< rdi
->lkey_table
.max
; i
++)
97 RCU_INIT_POINTER(rdi
->lkey_table
.table
[i
], NULL
);
103 *rvt_mr_exit: clean up MR
104 *@rdi: rvt dev structure
106 * called when drivers have unregistered or perhaps failed to register with us
108 void rvt_mr_exit(struct rvt_dev_info
*rdi
)
111 rvt_pr_err(rdi
, "DMA MR not null!\n");
113 vfree(rdi
->lkey_table
.table
);
116 static void rvt_deinit_mregion(struct rvt_mregion
*mr
)
123 percpu_ref_exit(&mr
->refcount
);
126 static void __rvt_mregion_complete(struct percpu_ref
*ref
)
128 struct rvt_mregion
*mr
= container_of(ref
, struct rvt_mregion
,
134 static int rvt_init_mregion(struct rvt_mregion
*mr
, struct ib_pd
*pd
,
135 int count
, unsigned int percpu_flags
)
138 struct rvt_dev_info
*dev
= ib_to_rvt(pd
->device
);
141 m
= (count
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
143 mr
->map
[i
] = kzalloc_node(sizeof(*mr
->map
[0]), GFP_KERNEL
,
149 init_completion(&mr
->comp
);
150 /* count returning the ptr to user */
151 if (percpu_ref_init(&mr
->refcount
, &__rvt_mregion_complete
,
152 percpu_flags
, GFP_KERNEL
))
155 atomic_set(&mr
->lkey_invalid
, 0);
157 mr
->max_segs
= count
;
160 rvt_deinit_mregion(mr
);
165 * rvt_alloc_lkey - allocate an lkey
166 * @mr: memory region that this lkey protects
167 * @dma_region: 0->normal key, 1->restricted DMA key
169 * Returns 0 if successful, otherwise returns -errno.
171 * Increments mr reference count as required.
173 * Sets the lkey field mr for non-dma regions.
176 static int rvt_alloc_lkey(struct rvt_mregion
*mr
, int dma_region
)
182 struct rvt_dev_info
*dev
= ib_to_rvt(mr
->pd
->device
);
183 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
186 spin_lock_irqsave(&rkt
->lock
, flags
);
188 /* special case for dma_mr lkey == 0 */
190 struct rvt_mregion
*tmr
;
192 tmr
= rcu_access_pointer(dev
->dma_mr
);
194 mr
->lkey_published
= 1;
195 /* Insure published written first */
196 rcu_assign_pointer(dev
->dma_mr
, mr
);
202 /* Find the next available LKEY */
206 if (!rcu_access_pointer(rkt
->table
[r
]))
208 r
= (r
+ 1) & (rkt
->max
- 1);
212 rkt
->next
= (r
+ 1) & (rkt
->max
- 1);
214 * Make sure lkey is never zero which is reserved to indicate an
219 * bits are capped to ensure enough bits for generation number
221 mr
->lkey
= (r
<< (32 - dev
->dparms
.lkey_table_size
)) |
222 ((((1 << (24 - dev
->dparms
.lkey_table_size
)) - 1) & rkt
->gen
)
228 mr
->lkey_published
= 1;
229 /* Insure published written first */
230 rcu_assign_pointer(rkt
->table
[r
], mr
);
232 spin_unlock_irqrestore(&rkt
->lock
, flags
);
237 spin_unlock_irqrestore(&rkt
->lock
, flags
);
243 * rvt_free_lkey - free an lkey
244 * @mr: mr to free from tables
246 static void rvt_free_lkey(struct rvt_mregion
*mr
)
251 struct rvt_dev_info
*dev
= ib_to_rvt(mr
->pd
->device
);
252 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
255 spin_lock_irqsave(&rkt
->lock
, flags
);
257 if (mr
->lkey_published
) {
258 mr
->lkey_published
= 0;
259 /* insure published is written before pointer */
260 rcu_assign_pointer(dev
->dma_mr
, NULL
);
264 if (!mr
->lkey_published
)
266 r
= lkey
>> (32 - dev
->dparms
.lkey_table_size
);
267 mr
->lkey_published
= 0;
268 /* insure published is written before pointer */
269 rcu_assign_pointer(rkt
->table
[r
], NULL
);
273 spin_unlock_irqrestore(&rkt
->lock
, flags
);
275 percpu_ref_kill(&mr
->refcount
);
278 static struct rvt_mr
*__rvt_alloc_mr(int count
, struct ib_pd
*pd
)
284 /* Allocate struct plus pointers to first level page tables. */
285 m
= (count
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
286 mr
= kzalloc(sizeof(*mr
) + m
* sizeof(mr
->mr
.map
[0]), GFP_KERNEL
);
290 rval
= rvt_init_mregion(&mr
->mr
, pd
, count
, 0);
294 * ib_reg_phys_mr() will initialize mr->ibmr except for
297 rval
= rvt_alloc_lkey(&mr
->mr
, 0);
300 mr
->ibmr
.lkey
= mr
->mr
.lkey
;
301 mr
->ibmr
.rkey
= mr
->mr
.lkey
;
306 rvt_deinit_mregion(&mr
->mr
);
313 static void __rvt_free_mr(struct rvt_mr
*mr
)
315 rvt_free_lkey(&mr
->mr
);
316 rvt_deinit_mregion(&mr
->mr
);
321 * rvt_get_dma_mr - get a DMA memory region
322 * @pd: protection domain for this memory region
325 * Return: the memory region on success, otherwise returns an errno.
326 * Note that all DMA addresses should be created via the functions in
327 * struct dma_virt_ops.
329 struct ib_mr
*rvt_get_dma_mr(struct ib_pd
*pd
, int acc
)
335 if (ibpd_to_rvtpd(pd
)->user
)
336 return ERR_PTR(-EPERM
);
338 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
340 ret
= ERR_PTR(-ENOMEM
);
344 rval
= rvt_init_mregion(&mr
->mr
, pd
, 0, 0);
350 rval
= rvt_alloc_lkey(&mr
->mr
, 1);
356 mr
->mr
.access_flags
= acc
;
362 rvt_deinit_mregion(&mr
->mr
);
369 * rvt_reg_user_mr - register a userspace memory region
370 * @pd: protection domain for this memory region
371 * @start: starting userspace address
372 * @length: length of region to register
373 * @mr_access_flags: access flags for this memory region
374 * @udata: unused by the driver
376 * Return: the memory region on success, otherwise returns an errno.
378 struct ib_mr
*rvt_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
379 u64 virt_addr
, int mr_access_flags
,
380 struct ib_udata
*udata
)
383 struct ib_umem
*umem
;
384 struct scatterlist
*sg
;
389 return ERR_PTR(-EINVAL
);
391 umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
398 mr
= __rvt_alloc_mr(n
, pd
);
400 ret
= (struct ib_mr
*)mr
;
404 mr
->mr
.user_base
= start
;
405 mr
->mr
.iova
= virt_addr
;
406 mr
->mr
.length
= length
;
407 mr
->mr
.offset
= ib_umem_offset(umem
);
408 mr
->mr
.access_flags
= mr_access_flags
;
411 mr
->mr
.page_shift
= umem
->page_shift
;
414 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
417 vaddr
= page_address(sg_page(sg
));
419 ret
= ERR_PTR(-EINVAL
);
422 mr
->mr
.map
[m
]->segs
[n
].vaddr
= vaddr
;
423 mr
->mr
.map
[m
]->segs
[n
].length
= BIT(umem
->page_shift
);
424 trace_rvt_mr_user_seg(&mr
->mr
, m
, n
, vaddr
,
425 BIT(umem
->page_shift
));
427 if (n
== RVT_SEGSZ
) {
438 ib_umem_release(umem
);
444 * rvt_dereg_clean_qp_cb - callback from iterator
446 * @v - the mregion (as u64)
448 * This routine fields the callback for all QPs and
449 * for QPs in the same PD as the MR will call the
450 * rvt_qp_mr_clean() to potentially cleanup references.
452 static void rvt_dereg_clean_qp_cb(struct rvt_qp
*qp
, u64 v
)
454 struct rvt_mregion
*mr
= (struct rvt_mregion
*)v
;
456 /* skip PDs that are not ours */
457 if (mr
->pd
!= qp
->ibqp
.pd
)
459 rvt_qp_mr_clean(qp
, mr
->lkey
);
463 * rvt_dereg_clean_qps - find QPs for reference cleanup
464 * @mr - the MR that is being deregistered
466 * This routine iterates RC QPs looking for references
467 * to the lkey noted in mr.
469 static void rvt_dereg_clean_qps(struct rvt_mregion
*mr
)
471 struct rvt_dev_info
*rdi
= ib_to_rvt(mr
->pd
->device
);
473 rvt_qp_iter(rdi
, (u64
)mr
, rvt_dereg_clean_qp_cb
);
477 * rvt_check_refs - check references
479 * @t - the caller identification
481 * This routine checks MRs holding a reference during
482 * when being de-registered.
484 * If the count is non-zero, the code calls a clean routine then
485 * waits for the timeout for the count to zero.
487 static int rvt_check_refs(struct rvt_mregion
*mr
, const char *t
)
489 unsigned long timeout
;
490 struct rvt_dev_info
*rdi
= ib_to_rvt(mr
->pd
->device
);
492 if (percpu_ref_is_zero(&mr
->refcount
))
496 rvt_dereg_clean_qps(mr
);
497 timeout
= wait_for_completion_timeout(&mr
->comp
, 5 * HZ
);
500 "%s timeout mr %p pd %p lkey %x refcount %ld\n",
501 t
, mr
, mr
->pd
, mr
->lkey
,
502 atomic_long_read(&mr
->refcount
.count
));
510 * rvt_mr_has_lkey - is MR
514 bool rvt_mr_has_lkey(struct rvt_mregion
*mr
, u32 lkey
)
516 return mr
&& lkey
== mr
->lkey
;
520 * rvt_ss_has_lkey - is mr in sge tests
521 * @ss - the sge state
524 * This code tests for an MR in the indicated
527 bool rvt_ss_has_lkey(struct rvt_sge_state
*ss
, u32 lkey
)
535 rval
= rvt_mr_has_lkey(ss
->sge
.mr
, lkey
);
537 for (i
= 0; !rval
&& i
< ss
->num_sge
- 1; i
++)
538 rval
= rvt_mr_has_lkey(ss
->sg_list
[i
].mr
, lkey
);
543 * rvt_dereg_mr - unregister and free a memory region
544 * @ibmr: the memory region to free
547 * Note that this is called to free MRs created by rvt_get_dma_mr()
548 * or rvt_reg_user_mr().
550 * Returns 0 on success.
552 int rvt_dereg_mr(struct ib_mr
*ibmr
)
554 struct rvt_mr
*mr
= to_imr(ibmr
);
557 rvt_free_lkey(&mr
->mr
);
559 rvt_put_mr(&mr
->mr
); /* will set completion if last */
560 ret
= rvt_check_refs(&mr
->mr
, __func__
);
563 rvt_deinit_mregion(&mr
->mr
);
565 ib_umem_release(mr
->umem
);
572 * rvt_alloc_mr - Allocate a memory region usable with the
573 * @pd: protection domain for this memory region
574 * @mr_type: mem region type
575 * @max_num_sg: Max number of segments allowed
577 * Return: the memory region on success, otherwise return an errno.
579 struct ib_mr
*rvt_alloc_mr(struct ib_pd
*pd
,
580 enum ib_mr_type mr_type
,
585 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
586 return ERR_PTR(-EINVAL
);
588 mr
= __rvt_alloc_mr(max_num_sg
, pd
);
590 return (struct ib_mr
*)mr
;
596 * rvt_set_page - page assignment function called by ib_sg_to_pages
597 * @ibmr: memory region
598 * @addr: dma address of mapped page
600 * Return: 0 on success
602 static int rvt_set_page(struct ib_mr
*ibmr
, u64 addr
)
604 struct rvt_mr
*mr
= to_imr(ibmr
);
605 u32 ps
= 1 << mr
->mr
.page_shift
;
606 u32 mapped_segs
= mr
->mr
.length
>> mr
->mr
.page_shift
;
609 if (unlikely(mapped_segs
== mr
->mr
.max_segs
))
612 if (mr
->mr
.length
== 0) {
613 mr
->mr
.user_base
= addr
;
617 m
= mapped_segs
/ RVT_SEGSZ
;
618 n
= mapped_segs
% RVT_SEGSZ
;
619 mr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *)addr
;
620 mr
->mr
.map
[m
]->segs
[n
].length
= ps
;
621 trace_rvt_mr_page_seg(&mr
->mr
, m
, n
, (void *)addr
, ps
);
628 * rvt_map_mr_sg - map sg list and set it the memory region
629 * @ibmr: memory region
630 * @sg: dma mapped scatterlist
631 * @sg_nents: number of entries in sg
632 * @sg_offset: offset in bytes into sg
634 * Return: number of sg elements mapped to the memory region
636 int rvt_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
,
637 int sg_nents
, unsigned int *sg_offset
)
639 struct rvt_mr
*mr
= to_imr(ibmr
);
642 mr
->mr
.page_shift
= PAGE_SHIFT
;
643 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
,
648 * rvt_fast_reg_mr - fast register physical MR
649 * @qp: the queue pair where the work request comes from
650 * @ibmr: the memory region to be registered
651 * @key: updated key for this memory region
652 * @access: access flags for this memory region
654 * Returns 0 on success.
656 int rvt_fast_reg_mr(struct rvt_qp
*qp
, struct ib_mr
*ibmr
, u32 key
,
659 struct rvt_mr
*mr
= to_imr(ibmr
);
661 if (qp
->ibqp
.pd
!= mr
->mr
.pd
)
664 /* not applicable to dma MR or user MR */
665 if (!mr
->mr
.lkey
|| mr
->umem
)
668 if ((key
& 0xFFFFFF00) != (mr
->mr
.lkey
& 0xFFFFFF00))
674 mr
->mr
.access_flags
= access
;
675 atomic_set(&mr
->mr
.lkey_invalid
, 0);
679 EXPORT_SYMBOL(rvt_fast_reg_mr
);
682 * rvt_invalidate_rkey - invalidate an MR rkey
683 * @qp: queue pair associated with the invalidate op
684 * @rkey: rkey to invalidate
686 * Returns 0 on success.
688 int rvt_invalidate_rkey(struct rvt_qp
*qp
, u32 rkey
)
690 struct rvt_dev_info
*dev
= ib_to_rvt(qp
->ibqp
.device
);
691 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
692 struct rvt_mregion
*mr
;
698 mr
= rcu_dereference(
699 rkt
->table
[(rkey
>> (32 - dev
->dparms
.lkey_table_size
))]);
700 if (unlikely(!mr
|| mr
->lkey
!= rkey
|| qp
->ibqp
.pd
!= mr
->pd
))
703 atomic_set(&mr
->lkey_invalid
, 1);
711 EXPORT_SYMBOL(rvt_invalidate_rkey
);
714 * rvt_alloc_fmr - allocate a fast memory region
715 * @pd: the protection domain for this memory region
716 * @mr_access_flags: access flags for this memory region
717 * @fmr_attr: fast memory region attributes
719 * Return: the memory region on success, otherwise returns an errno.
721 struct ib_fmr
*rvt_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
722 struct ib_fmr_attr
*fmr_attr
)
729 /* Allocate struct plus pointers to first level page tables. */
730 m
= (fmr_attr
->max_pages
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
731 fmr
= kzalloc(sizeof(*fmr
) + m
* sizeof(fmr
->mr
.map
[0]), GFP_KERNEL
);
735 rval
= rvt_init_mregion(&fmr
->mr
, pd
, fmr_attr
->max_pages
,
736 PERCPU_REF_INIT_ATOMIC
);
741 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
744 rval
= rvt_alloc_lkey(&fmr
->mr
, 0);
747 fmr
->ibfmr
.rkey
= fmr
->mr
.lkey
;
748 fmr
->ibfmr
.lkey
= fmr
->mr
.lkey
;
750 * Resources are allocated but no valid mapping (RKEY can't be
753 fmr
->mr
.access_flags
= mr_access_flags
;
754 fmr
->mr
.max_segs
= fmr_attr
->max_pages
;
755 fmr
->mr
.page_shift
= fmr_attr
->page_shift
;
762 rvt_deinit_mregion(&fmr
->mr
);
770 * rvt_map_phys_fmr - set up a fast memory region
771 * @ibfmr: the fast memory region to set up
772 * @page_list: the list of pages to associate with the fast memory region
773 * @list_len: the number of pages to associate with the fast memory region
774 * @iova: the virtual address of the start of the fast memory region
776 * This may be called from interrupt context.
778 * Return: 0 on success
781 int rvt_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
782 int list_len
, u64 iova
)
784 struct rvt_fmr
*fmr
= to_ifmr(ibfmr
);
785 struct rvt_lkey_table
*rkt
;
790 struct rvt_dev_info
*rdi
= ib_to_rvt(ibfmr
->device
);
792 i
= atomic_long_read(&fmr
->mr
.refcount
.count
);
796 if (list_len
> fmr
->mr
.max_segs
)
799 rkt
= &rdi
->lkey_table
;
800 spin_lock_irqsave(&rkt
->lock
, flags
);
801 fmr
->mr
.user_base
= iova
;
803 ps
= 1 << fmr
->mr
.page_shift
;
804 fmr
->mr
.length
= list_len
* ps
;
807 for (i
= 0; i
< list_len
; i
++) {
808 fmr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *)page_list
[i
];
809 fmr
->mr
.map
[m
]->segs
[n
].length
= ps
;
810 trace_rvt_mr_fmr_seg(&fmr
->mr
, m
, n
, (void *)page_list
[i
], ps
);
811 if (++n
== RVT_SEGSZ
) {
816 spin_unlock_irqrestore(&rkt
->lock
, flags
);
821 * rvt_unmap_fmr - unmap fast memory regions
822 * @fmr_list: the list of fast memory regions to unmap
824 * Return: 0 on success.
826 int rvt_unmap_fmr(struct list_head
*fmr_list
)
829 struct rvt_lkey_table
*rkt
;
831 struct rvt_dev_info
*rdi
;
833 list_for_each_entry(fmr
, fmr_list
, ibfmr
.list
) {
834 rdi
= ib_to_rvt(fmr
->ibfmr
.device
);
835 rkt
= &rdi
->lkey_table
;
836 spin_lock_irqsave(&rkt
->lock
, flags
);
837 fmr
->mr
.user_base
= 0;
840 spin_unlock_irqrestore(&rkt
->lock
, flags
);
846 * rvt_dealloc_fmr - deallocate a fast memory region
847 * @ibfmr: the fast memory region to deallocate
849 * Return: 0 on success.
851 int rvt_dealloc_fmr(struct ib_fmr
*ibfmr
)
853 struct rvt_fmr
*fmr
= to_ifmr(ibfmr
);
856 rvt_free_lkey(&fmr
->mr
);
857 rvt_put_mr(&fmr
->mr
); /* will set completion if last */
858 ret
= rvt_check_refs(&fmr
->mr
, __func__
);
861 rvt_deinit_mregion(&fmr
->mr
);
868 * rvt_sge_adjacent - is isge compressible
869 * @last_sge: last outgoing SGE written
872 * If adjacent will update last_sge to add length.
874 * Return: true if isge is adjacent to last sge
876 static inline bool rvt_sge_adjacent(struct rvt_sge
*last_sge
,
879 if (last_sge
&& sge
->lkey
== last_sge
->mr
->lkey
&&
880 ((uint64_t)(last_sge
->vaddr
+ last_sge
->length
) == sge
->addr
)) {
882 if (unlikely((sge
->addr
- last_sge
->mr
->user_base
+
883 sge
->length
> last_sge
->mr
->length
)))
884 return false; /* overrun, caller will catch */
886 last_sge
->length
+= sge
->length
;
888 last_sge
->sge_length
+= sge
->length
;
889 trace_rvt_sge_adjacent(last_sge
, sge
);
896 * rvt_lkey_ok - check IB SGE for validity and initialize
897 * @rkt: table containing lkey to check SGE against
898 * @pd: protection domain
899 * @isge: outgoing internal SGE
900 * @last_sge: last outgoing SGE written
904 * Check the IB SGE for validity and initialize our internal version
907 * Increments the reference count when a new sge is stored.
909 * Return: 0 if compressed, 1 if added , otherwise returns -errno.
911 int rvt_lkey_ok(struct rvt_lkey_table
*rkt
, struct rvt_pd
*pd
,
912 struct rvt_sge
*isge
, struct rvt_sge
*last_sge
,
913 struct ib_sge
*sge
, int acc
)
915 struct rvt_mregion
*mr
;
920 * We use LKEY == zero for kernel virtual addresses
921 * (see rvt_get_dma_mr() and dma_virt_ops).
923 if (sge
->lkey
== 0) {
924 struct rvt_dev_info
*dev
= ib_to_rvt(pd
->ibpd
.device
);
928 if (rvt_sge_adjacent(last_sge
, sge
))
931 mr
= rcu_dereference(dev
->dma_mr
);
938 isge
->vaddr
= (void *)sge
->addr
;
939 isge
->length
= sge
->length
;
940 isge
->sge_length
= sge
->length
;
945 if (rvt_sge_adjacent(last_sge
, sge
))
948 mr
= rcu_dereference(rkt
->table
[sge
->lkey
>> rkt
->shift
]);
952 if (!READ_ONCE(mr
->lkey_published
))
955 if (unlikely(atomic_read(&mr
->lkey_invalid
) ||
956 mr
->lkey
!= sge
->lkey
|| mr
->pd
!= &pd
->ibpd
))
959 off
= sge
->addr
- mr
->user_base
;
960 if (unlikely(sge
->addr
< mr
->user_base
||
961 off
+ sge
->length
> mr
->length
||
962 (mr
->access_flags
& acc
) != acc
))
967 if (mr
->page_shift
) {
969 * page sizes are uniform power of 2 so no loop is necessary
970 * entries_spanned_by_off is the number of times the loop below
971 * would have executed.
973 size_t entries_spanned_by_off
;
975 entries_spanned_by_off
= off
>> mr
->page_shift
;
976 off
-= (entries_spanned_by_off
<< mr
->page_shift
);
977 m
= entries_spanned_by_off
/ RVT_SEGSZ
;
978 n
= entries_spanned_by_off
% RVT_SEGSZ
;
982 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
983 off
-= mr
->map
[m
]->segs
[n
].length
;
985 if (n
>= RVT_SEGSZ
) {
992 isge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
993 isge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
994 isge
->sge_length
= sge
->length
;
998 trace_rvt_sge_new(isge
, sge
);
1006 EXPORT_SYMBOL(rvt_lkey_ok
);
1009 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
1010 * @qp: qp for validation
1012 * @len: length of data
1013 * @vaddr: virtual address to place data
1014 * @rkey: rkey to check
1015 * @acc: access flags
1017 * Return: 1 if successful, otherwise 0.
1019 * increments the reference count upon success
1021 int rvt_rkey_ok(struct rvt_qp
*qp
, struct rvt_sge
*sge
,
1022 u32 len
, u64 vaddr
, u32 rkey
, int acc
)
1024 struct rvt_dev_info
*dev
= ib_to_rvt(qp
->ibqp
.device
);
1025 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
1026 struct rvt_mregion
*mr
;
1031 * We use RKEY == zero for kernel virtual addresses
1032 * (see rvt_get_dma_mr() and dma_virt_ops).
1036 struct rvt_pd
*pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
1037 struct rvt_dev_info
*rdi
= ib_to_rvt(pd
->ibpd
.device
);
1041 mr
= rcu_dereference(rdi
->dma_mr
);
1048 sge
->vaddr
= (void *)vaddr
;
1050 sge
->sge_length
= len
;
1056 mr
= rcu_dereference(rkt
->table
[rkey
>> rkt
->shift
]);
1060 /* insure mr read is before test */
1061 if (!READ_ONCE(mr
->lkey_published
))
1063 if (unlikely(atomic_read(&mr
->lkey_invalid
) ||
1064 mr
->lkey
!= rkey
|| qp
->ibqp
.pd
!= mr
->pd
))
1067 off
= vaddr
- mr
->iova
;
1068 if (unlikely(vaddr
< mr
->iova
|| off
+ len
> mr
->length
||
1069 (mr
->access_flags
& acc
) == 0))
1074 if (mr
->page_shift
) {
1076 * page sizes are uniform power of 2 so no loop is necessary
1077 * entries_spanned_by_off is the number of times the loop below
1078 * would have executed.
1080 size_t entries_spanned_by_off
;
1082 entries_spanned_by_off
= off
>> mr
->page_shift
;
1083 off
-= (entries_spanned_by_off
<< mr
->page_shift
);
1084 m
= entries_spanned_by_off
/ RVT_SEGSZ
;
1085 n
= entries_spanned_by_off
% RVT_SEGSZ
;
1089 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
1090 off
-= mr
->map
[m
]->segs
[n
].length
;
1092 if (n
>= RVT_SEGSZ
) {
1099 sge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
1100 sge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
1101 sge
->sge_length
= len
;
1112 EXPORT_SYMBOL(rvt_rkey_ok
);