cxl: Poll for outstanding IRQs when detaching a context
[linux/fpc-iii.git] / drivers / infiniband / sw / rdmavt / mr.c
blob0ff765bfd619070a54d50c67caa61b85893d220b
1 /*
2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
7 * GPL LICENSE SUMMARY
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * BSD LICENSE
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
52 #include "vt.h"
53 #include "mr.h"
55 /**
56 * rvt_driver_mr_init - Init MR resources per driver
57 * @rdi: rvt dev struct
59 * Do any intilization needed when a driver registers with rdmavt.
61 * Return: 0 on success or errno on failure
63 int rvt_driver_mr_init(struct rvt_dev_info *rdi)
65 unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
66 unsigned lk_tab_size;
67 int i;
70 * The top hfi1_lkey_table_size bits are used to index the
71 * table. The lower 8 bits can be owned by the user (copied from
72 * the LKEY). The remaining bits act as a generation number or tag.
74 if (!lkey_table_size)
75 return -EINVAL;
77 spin_lock_init(&rdi->lkey_table.lock);
79 /* ensure generation is at least 4 bits */
80 if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
81 rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
82 lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
83 rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
84 lkey_table_size = rdi->dparms.lkey_table_size;
86 rdi->lkey_table.max = 1 << lkey_table_size;
87 lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
88 rdi->lkey_table.table = (struct rvt_mregion __rcu **)
89 vmalloc_node(lk_tab_size, rdi->dparms.node);
90 if (!rdi->lkey_table.table)
91 return -ENOMEM;
93 RCU_INIT_POINTER(rdi->dma_mr, NULL);
94 for (i = 0; i < rdi->lkey_table.max; i++)
95 RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
97 return 0;
101 *rvt_mr_exit: clean up MR
102 *@rdi: rvt dev structure
104 * called when drivers have unregistered or perhaps failed to register with us
106 void rvt_mr_exit(struct rvt_dev_info *rdi)
108 if (rdi->dma_mr)
109 rvt_pr_err(rdi, "DMA MR not null!\n");
111 vfree(rdi->lkey_table.table);
114 static void rvt_deinit_mregion(struct rvt_mregion *mr)
116 int i = mr->mapsz;
118 mr->mapsz = 0;
119 while (i)
120 kfree(mr->map[--i]);
123 static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
124 int count)
126 int m, i = 0;
128 mr->mapsz = 0;
129 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
130 for (; i < m; i++) {
131 mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
132 if (!mr->map[i]) {
133 rvt_deinit_mregion(mr);
134 return -ENOMEM;
136 mr->mapsz++;
138 init_completion(&mr->comp);
139 /* count returning the ptr to user */
140 atomic_set(&mr->refcount, 1);
141 mr->pd = pd;
142 mr->max_segs = count;
143 return 0;
147 * rvt_alloc_lkey - allocate an lkey
148 * @mr: memory region that this lkey protects
149 * @dma_region: 0->normal key, 1->restricted DMA key
151 * Returns 0 if successful, otherwise returns -errno.
153 * Increments mr reference count as required.
155 * Sets the lkey field mr for non-dma regions.
158 static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
160 unsigned long flags;
161 u32 r;
162 u32 n;
163 int ret = 0;
164 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
165 struct rvt_lkey_table *rkt = &dev->lkey_table;
167 rvt_get_mr(mr);
168 spin_lock_irqsave(&rkt->lock, flags);
170 /* special case for dma_mr lkey == 0 */
171 if (dma_region) {
172 struct rvt_mregion *tmr;
174 tmr = rcu_access_pointer(dev->dma_mr);
175 if (!tmr) {
176 rcu_assign_pointer(dev->dma_mr, mr);
177 mr->lkey_published = 1;
178 } else {
179 rvt_put_mr(mr);
181 goto success;
184 /* Find the next available LKEY */
185 r = rkt->next;
186 n = r;
187 for (;;) {
188 if (!rcu_access_pointer(rkt->table[r]))
189 break;
190 r = (r + 1) & (rkt->max - 1);
191 if (r == n)
192 goto bail;
194 rkt->next = (r + 1) & (rkt->max - 1);
196 * Make sure lkey is never zero which is reserved to indicate an
197 * unrestricted LKEY.
199 rkt->gen++;
201 * bits are capped to ensure enough bits for generation number
203 mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
204 ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
205 << 8);
206 if (mr->lkey == 0) {
207 mr->lkey |= 1 << 8;
208 rkt->gen++;
210 rcu_assign_pointer(rkt->table[r], mr);
211 mr->lkey_published = 1;
212 success:
213 spin_unlock_irqrestore(&rkt->lock, flags);
214 out:
215 return ret;
216 bail:
217 rvt_put_mr(mr);
218 spin_unlock_irqrestore(&rkt->lock, flags);
219 ret = -ENOMEM;
220 goto out;
224 * rvt_free_lkey - free an lkey
225 * @mr: mr to free from tables
227 static void rvt_free_lkey(struct rvt_mregion *mr)
229 unsigned long flags;
230 u32 lkey = mr->lkey;
231 u32 r;
232 struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
233 struct rvt_lkey_table *rkt = &dev->lkey_table;
234 int freed = 0;
236 spin_lock_irqsave(&rkt->lock, flags);
237 if (!mr->lkey_published)
238 goto out;
239 if (lkey == 0) {
240 RCU_INIT_POINTER(dev->dma_mr, NULL);
241 } else {
242 r = lkey >> (32 - dev->dparms.lkey_table_size);
243 RCU_INIT_POINTER(rkt->table[r], NULL);
245 mr->lkey_published = 0;
246 freed++;
247 out:
248 spin_unlock_irqrestore(&rkt->lock, flags);
249 if (freed) {
250 synchronize_rcu();
251 rvt_put_mr(mr);
255 static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
257 struct rvt_mr *mr;
258 int rval = -ENOMEM;
259 int m;
261 /* Allocate struct plus pointers to first level page tables. */
262 m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
263 mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
264 if (!mr)
265 goto bail;
267 rval = rvt_init_mregion(&mr->mr, pd, count);
268 if (rval)
269 goto bail;
271 * ib_reg_phys_mr() will initialize mr->ibmr except for
272 * lkey and rkey.
274 rval = rvt_alloc_lkey(&mr->mr, 0);
275 if (rval)
276 goto bail_mregion;
277 mr->ibmr.lkey = mr->mr.lkey;
278 mr->ibmr.rkey = mr->mr.lkey;
279 done:
280 return mr;
282 bail_mregion:
283 rvt_deinit_mregion(&mr->mr);
284 bail:
285 kfree(mr);
286 mr = ERR_PTR(rval);
287 goto done;
290 static void __rvt_free_mr(struct rvt_mr *mr)
292 rvt_deinit_mregion(&mr->mr);
293 rvt_free_lkey(&mr->mr);
294 vfree(mr);
298 * rvt_get_dma_mr - get a DMA memory region
299 * @pd: protection domain for this memory region
300 * @acc: access flags
302 * Return: the memory region on success, otherwise returns an errno.
303 * Note that all DMA addresses should be created via the
304 * struct ib_dma_mapping_ops functions (see dma.c).
306 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
308 struct rvt_mr *mr;
309 struct ib_mr *ret;
310 int rval;
312 if (ibpd_to_rvtpd(pd)->user)
313 return ERR_PTR(-EPERM);
315 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
316 if (!mr) {
317 ret = ERR_PTR(-ENOMEM);
318 goto bail;
321 rval = rvt_init_mregion(&mr->mr, pd, 0);
322 if (rval) {
323 ret = ERR_PTR(rval);
324 goto bail;
327 rval = rvt_alloc_lkey(&mr->mr, 1);
328 if (rval) {
329 ret = ERR_PTR(rval);
330 goto bail_mregion;
333 mr->mr.access_flags = acc;
334 ret = &mr->ibmr;
335 done:
336 return ret;
338 bail_mregion:
339 rvt_deinit_mregion(&mr->mr);
340 bail:
341 kfree(mr);
342 goto done;
346 * rvt_reg_user_mr - register a userspace memory region
347 * @pd: protection domain for this memory region
348 * @start: starting userspace address
349 * @length: length of region to register
350 * @mr_access_flags: access flags for this memory region
351 * @udata: unused by the driver
353 * Return: the memory region on success, otherwise returns an errno.
355 struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
356 u64 virt_addr, int mr_access_flags,
357 struct ib_udata *udata)
359 struct rvt_mr *mr;
360 struct ib_umem *umem;
361 struct scatterlist *sg;
362 int n, m, entry;
363 struct ib_mr *ret;
365 if (length == 0)
366 return ERR_PTR(-EINVAL);
368 umem = ib_umem_get(pd->uobject->context, start, length,
369 mr_access_flags, 0);
370 if (IS_ERR(umem))
371 return (void *)umem;
373 n = umem->nmap;
375 mr = __rvt_alloc_mr(n, pd);
376 if (IS_ERR(mr)) {
377 ret = (struct ib_mr *)mr;
378 goto bail_umem;
381 mr->mr.user_base = start;
382 mr->mr.iova = virt_addr;
383 mr->mr.length = length;
384 mr->mr.offset = ib_umem_offset(umem);
385 mr->mr.access_flags = mr_access_flags;
386 mr->umem = umem;
388 if (is_power_of_2(umem->page_size))
389 mr->mr.page_shift = ilog2(umem->page_size);
390 m = 0;
391 n = 0;
392 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
393 void *vaddr;
395 vaddr = page_address(sg_page(sg));
396 if (!vaddr) {
397 ret = ERR_PTR(-EINVAL);
398 goto bail_inval;
400 mr->mr.map[m]->segs[n].vaddr = vaddr;
401 mr->mr.map[m]->segs[n].length = umem->page_size;
402 n++;
403 if (n == RVT_SEGSZ) {
404 m++;
405 n = 0;
408 return &mr->ibmr;
410 bail_inval:
411 __rvt_free_mr(mr);
413 bail_umem:
414 ib_umem_release(umem);
416 return ret;
420 * rvt_dereg_mr - unregister and free a memory region
421 * @ibmr: the memory region to free
424 * Note that this is called to free MRs created by rvt_get_dma_mr()
425 * or rvt_reg_user_mr().
427 * Returns 0 on success.
429 int rvt_dereg_mr(struct ib_mr *ibmr)
431 struct rvt_mr *mr = to_imr(ibmr);
432 struct rvt_dev_info *rdi = ib_to_rvt(ibmr->pd->device);
433 int ret = 0;
434 unsigned long timeout;
436 rvt_free_lkey(&mr->mr);
438 rvt_put_mr(&mr->mr); /* will set completion if last */
439 timeout = wait_for_completion_timeout(&mr->mr.comp, 5 * HZ);
440 if (!timeout) {
441 rvt_pr_err(rdi,
442 "rvt_dereg_mr timeout mr %p pd %p refcount %u\n",
443 mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
444 rvt_get_mr(&mr->mr);
445 ret = -EBUSY;
446 goto out;
448 rvt_deinit_mregion(&mr->mr);
449 if (mr->umem)
450 ib_umem_release(mr->umem);
451 kfree(mr);
452 out:
453 return ret;
457 * rvt_alloc_mr - Allocate a memory region usable with the
458 * @pd: protection domain for this memory region
459 * @mr_type: mem region type
460 * @max_num_sg: Max number of segments allowed
462 * Return: the memory region on success, otherwise return an errno.
464 struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
465 enum ib_mr_type mr_type,
466 u32 max_num_sg)
468 struct rvt_mr *mr;
470 if (mr_type != IB_MR_TYPE_MEM_REG)
471 return ERR_PTR(-EINVAL);
473 mr = __rvt_alloc_mr(max_num_sg, pd);
474 if (IS_ERR(mr))
475 return (struct ib_mr *)mr;
477 return &mr->ibmr;
481 * rvt_alloc_fmr - allocate a fast memory region
482 * @pd: the protection domain for this memory region
483 * @mr_access_flags: access flags for this memory region
484 * @fmr_attr: fast memory region attributes
486 * Return: the memory region on success, otherwise returns an errno.
488 struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
489 struct ib_fmr_attr *fmr_attr)
491 struct rvt_fmr *fmr;
492 int m;
493 struct ib_fmr *ret;
494 int rval = -ENOMEM;
496 /* Allocate struct plus pointers to first level page tables. */
497 m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
498 fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
499 if (!fmr)
500 goto bail;
502 rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
503 if (rval)
504 goto bail;
507 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
508 * rkey.
510 rval = rvt_alloc_lkey(&fmr->mr, 0);
511 if (rval)
512 goto bail_mregion;
513 fmr->ibfmr.rkey = fmr->mr.lkey;
514 fmr->ibfmr.lkey = fmr->mr.lkey;
516 * Resources are allocated but no valid mapping (RKEY can't be
517 * used).
519 fmr->mr.access_flags = mr_access_flags;
520 fmr->mr.max_segs = fmr_attr->max_pages;
521 fmr->mr.page_shift = fmr_attr->page_shift;
523 ret = &fmr->ibfmr;
524 done:
525 return ret;
527 bail_mregion:
528 rvt_deinit_mregion(&fmr->mr);
529 bail:
530 kfree(fmr);
531 ret = ERR_PTR(rval);
532 goto done;
536 * rvt_map_phys_fmr - set up a fast memory region
537 * @ibmfr: the fast memory region to set up
538 * @page_list: the list of pages to associate with the fast memory region
539 * @list_len: the number of pages to associate with the fast memory region
540 * @iova: the virtual address of the start of the fast memory region
542 * This may be called from interrupt context.
544 * Return: 0 on success
547 int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
548 int list_len, u64 iova)
550 struct rvt_fmr *fmr = to_ifmr(ibfmr);
551 struct rvt_lkey_table *rkt;
552 unsigned long flags;
553 int m, n, i;
554 u32 ps;
555 struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
557 i = atomic_read(&fmr->mr.refcount);
558 if (i > 2)
559 return -EBUSY;
561 if (list_len > fmr->mr.max_segs)
562 return -EINVAL;
564 rkt = &rdi->lkey_table;
565 spin_lock_irqsave(&rkt->lock, flags);
566 fmr->mr.user_base = iova;
567 fmr->mr.iova = iova;
568 ps = 1 << fmr->mr.page_shift;
569 fmr->mr.length = list_len * ps;
570 m = 0;
571 n = 0;
572 for (i = 0; i < list_len; i++) {
573 fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
574 fmr->mr.map[m]->segs[n].length = ps;
575 if (++n == RVT_SEGSZ) {
576 m++;
577 n = 0;
580 spin_unlock_irqrestore(&rkt->lock, flags);
581 return 0;
585 * rvt_unmap_fmr - unmap fast memory regions
586 * @fmr_list: the list of fast memory regions to unmap
588 * Return: 0 on success.
590 int rvt_unmap_fmr(struct list_head *fmr_list)
592 struct rvt_fmr *fmr;
593 struct rvt_lkey_table *rkt;
594 unsigned long flags;
595 struct rvt_dev_info *rdi;
597 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
598 rdi = ib_to_rvt(fmr->ibfmr.device);
599 rkt = &rdi->lkey_table;
600 spin_lock_irqsave(&rkt->lock, flags);
601 fmr->mr.user_base = 0;
602 fmr->mr.iova = 0;
603 fmr->mr.length = 0;
604 spin_unlock_irqrestore(&rkt->lock, flags);
606 return 0;
610 * rvt_dealloc_fmr - deallocate a fast memory region
611 * @ibfmr: the fast memory region to deallocate
613 * Return: 0 on success.
615 int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
617 struct rvt_fmr *fmr = to_ifmr(ibfmr);
618 int ret = 0;
619 unsigned long timeout;
621 rvt_free_lkey(&fmr->mr);
622 rvt_put_mr(&fmr->mr); /* will set completion if last */
623 timeout = wait_for_completion_timeout(&fmr->mr.comp, 5 * HZ);
624 if (!timeout) {
625 rvt_get_mr(&fmr->mr);
626 ret = -EBUSY;
627 goto out;
629 rvt_deinit_mregion(&fmr->mr);
630 kfree(fmr);
631 out:
632 return ret;
636 * rvt_lkey_ok - check IB SGE for validity and initialize
637 * @rkt: table containing lkey to check SGE against
638 * @pd: protection domain
639 * @isge: outgoing internal SGE
640 * @sge: SGE to check
641 * @acc: access flags
643 * Check the IB SGE for validity and initialize our internal version
644 * of it.
646 * Return: 1 if valid and successful, otherwise returns 0.
648 * increments the reference count upon success
651 int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
652 struct rvt_sge *isge, struct ib_sge *sge, int acc)
654 struct rvt_mregion *mr;
655 unsigned n, m;
656 size_t off;
657 struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
660 * We use LKEY == zero for kernel virtual addresses
661 * (see rvt_get_dma_mr and dma.c).
663 rcu_read_lock();
664 if (sge->lkey == 0) {
665 if (pd->user)
666 goto bail;
667 mr = rcu_dereference(dev->dma_mr);
668 if (!mr)
669 goto bail;
670 atomic_inc(&mr->refcount);
671 rcu_read_unlock();
673 isge->mr = mr;
674 isge->vaddr = (void *)sge->addr;
675 isge->length = sge->length;
676 isge->sge_length = sge->length;
677 isge->m = 0;
678 isge->n = 0;
679 goto ok;
681 mr = rcu_dereference(
682 rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
683 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
684 goto bail;
686 off = sge->addr - mr->user_base;
687 if (unlikely(sge->addr < mr->user_base ||
688 off + sge->length > mr->length ||
689 (mr->access_flags & acc) != acc))
690 goto bail;
691 atomic_inc(&mr->refcount);
692 rcu_read_unlock();
694 off += mr->offset;
695 if (mr->page_shift) {
697 * page sizes are uniform power of 2 so no loop is necessary
698 * entries_spanned_by_off is the number of times the loop below
699 * would have executed.
701 size_t entries_spanned_by_off;
703 entries_spanned_by_off = off >> mr->page_shift;
704 off -= (entries_spanned_by_off << mr->page_shift);
705 m = entries_spanned_by_off / RVT_SEGSZ;
706 n = entries_spanned_by_off % RVT_SEGSZ;
707 } else {
708 m = 0;
709 n = 0;
710 while (off >= mr->map[m]->segs[n].length) {
711 off -= mr->map[m]->segs[n].length;
712 n++;
713 if (n >= RVT_SEGSZ) {
714 m++;
715 n = 0;
719 isge->mr = mr;
720 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
721 isge->length = mr->map[m]->segs[n].length - off;
722 isge->sge_length = sge->length;
723 isge->m = m;
724 isge->n = n;
726 return 1;
727 bail:
728 rcu_read_unlock();
729 return 0;
731 EXPORT_SYMBOL(rvt_lkey_ok);
734 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
735 * @qp: qp for validation
736 * @sge: SGE state
737 * @len: length of data
738 * @vaddr: virtual address to place data
739 * @rkey: rkey to check
740 * @acc: access flags
742 * Return: 1 if successful, otherwise 0.
744 * increments the reference count upon success
746 int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
747 u32 len, u64 vaddr, u32 rkey, int acc)
749 struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
750 struct rvt_lkey_table *rkt = &dev->lkey_table;
751 struct rvt_mregion *mr;
752 unsigned n, m;
753 size_t off;
756 * We use RKEY == zero for kernel virtual addresses
757 * (see rvt_get_dma_mr and dma.c).
759 rcu_read_lock();
760 if (rkey == 0) {
761 struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
762 struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
764 if (pd->user)
765 goto bail;
766 mr = rcu_dereference(rdi->dma_mr);
767 if (!mr)
768 goto bail;
769 atomic_inc(&mr->refcount);
770 rcu_read_unlock();
772 sge->mr = mr;
773 sge->vaddr = (void *)vaddr;
774 sge->length = len;
775 sge->sge_length = len;
776 sge->m = 0;
777 sge->n = 0;
778 goto ok;
781 mr = rcu_dereference(
782 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
783 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
784 goto bail;
786 off = vaddr - mr->iova;
787 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
788 (mr->access_flags & acc) == 0))
789 goto bail;
790 atomic_inc(&mr->refcount);
791 rcu_read_unlock();
793 off += mr->offset;
794 if (mr->page_shift) {
796 * page sizes are uniform power of 2 so no loop is necessary
797 * entries_spanned_by_off is the number of times the loop below
798 * would have executed.
800 size_t entries_spanned_by_off;
802 entries_spanned_by_off = off >> mr->page_shift;
803 off -= (entries_spanned_by_off << mr->page_shift);
804 m = entries_spanned_by_off / RVT_SEGSZ;
805 n = entries_spanned_by_off % RVT_SEGSZ;
806 } else {
807 m = 0;
808 n = 0;
809 while (off >= mr->map[m]->segs[n].length) {
810 off -= mr->map[m]->segs[n].length;
811 n++;
812 if (n >= RVT_SEGSZ) {
813 m++;
814 n = 0;
818 sge->mr = mr;
819 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
820 sge->length = mr->map[m]->segs[n].length - off;
821 sge->sge_length = len;
822 sge->m = m;
823 sge->n = n;
825 return 1;
826 bail:
827 rcu_read_unlock();
828 return 0;
830 EXPORT_SYMBOL(rvt_rkey_ok);