2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
40 #define MAX_PREFETCH_LEN (4*1024*1024U)
42 /* Timeout in ms to wait for an active mmu notifier to complete when handling
44 #define MMU_NOTIFIER_TIMEOUT 1000
46 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
47 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
48 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
49 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
50 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
52 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
54 static u64 mlx5_imr_ksm_entries
;
56 static int check_parent(struct ib_umem_odp
*odp
,
57 struct mlx5_ib_mr
*parent
)
59 struct mlx5_ib_mr
*mr
= odp
->private;
61 return mr
&& mr
->parent
== parent
&& !odp
->dying
;
64 static struct ib_umem_odp
*odp_next(struct ib_umem_odp
*odp
)
66 struct mlx5_ib_mr
*mr
= odp
->private, *parent
= mr
->parent
;
67 struct ib_ucontext
*ctx
= odp
->umem
->context
;
70 down_read(&ctx
->umem_rwsem
);
72 rb
= rb_next(&odp
->interval_tree
.rb
);
75 odp
= rb_entry(rb
, struct ib_umem_odp
, interval_tree
.rb
);
76 if (check_parent(odp
, parent
))
82 up_read(&ctx
->umem_rwsem
);
86 static struct ib_umem_odp
*odp_lookup(struct ib_ucontext
*ctx
,
87 u64 start
, u64 length
,
88 struct mlx5_ib_mr
*parent
)
90 struct ib_umem_odp
*odp
;
93 down_read(&ctx
->umem_rwsem
);
94 odp
= rbt_ib_umem_lookup(&ctx
->umem_tree
, start
, length
);
99 if (check_parent(odp
, parent
))
101 rb
= rb_next(&odp
->interval_tree
.rb
);
104 odp
= rb_entry(rb
, struct ib_umem_odp
, interval_tree
.rb
);
105 if (ib_umem_start(odp
->umem
) > start
+ length
)
111 up_read(&ctx
->umem_rwsem
);
115 void mlx5_odp_populate_klm(struct mlx5_klm
*pklm
, size_t offset
,
116 size_t nentries
, struct mlx5_ib_mr
*mr
, int flags
)
118 struct ib_pd
*pd
= mr
->ibmr
.pd
;
119 struct ib_ucontext
*ctx
= pd
->uobject
->context
;
120 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
121 struct ib_umem_odp
*odp
;
125 if (flags
& MLX5_IB_UPD_XLT_ZAP
) {
126 for (i
= 0; i
< nentries
; i
++, pklm
++) {
127 pklm
->bcount
= cpu_to_be32(MLX5_IMR_MTT_SIZE
);
128 pklm
->key
= cpu_to_be32(dev
->null_mkey
);
134 odp
= odp_lookup(ctx
, offset
* MLX5_IMR_MTT_SIZE
,
135 nentries
* MLX5_IMR_MTT_SIZE
, mr
);
137 for (i
= 0; i
< nentries
; i
++, pklm
++) {
138 pklm
->bcount
= cpu_to_be32(MLX5_IMR_MTT_SIZE
);
139 va
= (offset
+ i
) * MLX5_IMR_MTT_SIZE
;
140 if (odp
&& odp
->umem
->address
== va
) {
141 struct mlx5_ib_mr
*mtt
= odp
->private;
143 pklm
->key
= cpu_to_be32(mtt
->ibmr
.lkey
);
146 pklm
->key
= cpu_to_be32(dev
->null_mkey
);
148 mlx5_ib_dbg(dev
, "[%d] va %lx key %x\n",
149 i
, va
, be32_to_cpu(pklm
->key
));
153 static void mr_leaf_free_action(struct work_struct
*work
)
155 struct ib_umem_odp
*odp
= container_of(work
, struct ib_umem_odp
, work
);
156 int idx
= ib_umem_start(odp
->umem
) >> MLX5_IMR_MTT_SHIFT
;
157 struct mlx5_ib_mr
*mr
= odp
->private, *imr
= mr
->parent
;
160 synchronize_srcu(&mr
->dev
->mr_srcu
);
162 ib_umem_release(odp
->umem
);
164 mlx5_ib_update_xlt(imr
, idx
, 1, 0,
165 MLX5_IB_UPD_XLT_INDIRECT
|
166 MLX5_IB_UPD_XLT_ATOMIC
);
167 mlx5_mr_cache_free(mr
->dev
, mr
);
169 if (atomic_dec_and_test(&imr
->num_leaf_free
))
170 wake_up(&imr
->q_leaf_free
);
173 void mlx5_ib_invalidate_range(struct ib_umem
*umem
, unsigned long start
,
176 struct mlx5_ib_mr
*mr
;
177 const u64 umr_block_mask
= (MLX5_UMR_MTT_ALIGNMENT
/
178 sizeof(struct mlx5_mtt
)) - 1;
179 u64 idx
= 0, blk_start_idx
= 0;
183 if (!umem
|| !umem
->odp_data
) {
184 pr_err("invalidation called on NULL umem or non-ODP umem\n");
188 mr
= umem
->odp_data
->private;
190 if (!mr
|| !mr
->ibmr
.pd
)
193 start
= max_t(u64
, ib_umem_start(umem
), start
);
194 end
= min_t(u64
, ib_umem_end(umem
), end
);
197 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
198 * while we are doing the invalidation, no page fault will attempt to
199 * overwrite the same MTTs. Concurent invalidations might race us,
200 * but they will write 0s as well, so no difference in the end result.
203 for (addr
= start
; addr
< end
; addr
+= BIT(umem
->page_shift
)) {
204 idx
= (addr
- ib_umem_start(umem
)) >> umem
->page_shift
;
206 * Strive to write the MTTs in chunks, but avoid overwriting
207 * non-existing MTTs. The huristic here can be improved to
208 * estimate the cost of another UMR vs. the cost of bigger
211 if (umem
->odp_data
->dma_list
[idx
] &
212 (ODP_READ_ALLOWED_BIT
| ODP_WRITE_ALLOWED_BIT
)) {
218 u64 umr_offset
= idx
& umr_block_mask
;
220 if (in_block
&& umr_offset
== 0) {
221 mlx5_ib_update_xlt(mr
, blk_start_idx
,
222 idx
- blk_start_idx
, 0,
223 MLX5_IB_UPD_XLT_ZAP
|
224 MLX5_IB_UPD_XLT_ATOMIC
);
230 mlx5_ib_update_xlt(mr
, blk_start_idx
,
231 idx
- blk_start_idx
+ 1, 0,
232 MLX5_IB_UPD_XLT_ZAP
|
233 MLX5_IB_UPD_XLT_ATOMIC
);
235 * We are now sure that the device will not access the
236 * memory. We can safely unmap it, and mark it as dirty if
240 ib_umem_odp_unmap_dma_pages(umem
, start
, end
);
242 if (unlikely(!umem
->npages
&& mr
->parent
&&
243 !umem
->odp_data
->dying
)) {
244 WRITE_ONCE(umem
->odp_data
->dying
, 1);
245 atomic_inc(&mr
->parent
->num_leaf_free
);
246 schedule_work(&umem
->odp_data
->work
);
250 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
252 struct ib_odp_caps
*caps
= &dev
->odp_caps
;
254 memset(caps
, 0, sizeof(*caps
));
256 if (!MLX5_CAP_GEN(dev
->mdev
, pg
))
259 caps
->general_caps
= IB_ODP_SUPPORT
;
261 if (MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
))
262 dev
->odp_max_size
= U64_MAX
;
264 dev
->odp_max_size
= BIT_ULL(MLX5_MAX_UMR_SHIFT
+ PAGE_SHIFT
);
266 if (MLX5_CAP_ODP(dev
->mdev
, ud_odp_caps
.send
))
267 caps
->per_transport_caps
.ud_odp_caps
|= IB_ODP_SUPPORT_SEND
;
269 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.send
))
270 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_SEND
;
272 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.receive
))
273 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_RECV
;
275 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.write
))
276 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_WRITE
;
278 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.read
))
279 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_READ
;
281 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.atomic
))
282 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_ATOMIC
;
284 if (MLX5_CAP_GEN(dev
->mdev
, fixed_buffer_size
) &&
285 MLX5_CAP_GEN(dev
->mdev
, null_mkey
) &&
286 MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
))
287 caps
->general_caps
|= IB_ODP_SUPPORT_IMPLICIT
;
292 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev
*dev
,
293 struct mlx5_pagefault
*pfault
,
296 int wq_num
= pfault
->event_subtype
== MLX5_PFAULT_SUBTYPE_WQE
?
297 pfault
->wqe
.wq_num
: pfault
->token
;
298 int ret
= mlx5_core_page_fault_resume(dev
->mdev
,
304 mlx5_ib_err(dev
, "Failed to resolve the page fault on WQ 0x%x\n",
308 static struct mlx5_ib_mr
*implicit_mr_alloc(struct ib_pd
*pd
,
309 struct ib_umem
*umem
,
310 bool ksm
, int access_flags
)
312 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
313 struct mlx5_ib_mr
*mr
;
316 mr
= mlx5_mr_cache_alloc(dev
, ksm
? MLX5_IMR_KSM_CACHE_ENTRY
:
317 MLX5_IMR_MTT_CACHE_ENTRY
);
325 mr
->access_flags
= access_flags
;
330 err
= mlx5_ib_update_xlt(mr
, 0,
331 mlx5_imr_ksm_entries
,
333 MLX5_IB_UPD_XLT_INDIRECT
|
334 MLX5_IB_UPD_XLT_ZAP
|
335 MLX5_IB_UPD_XLT_ENABLE
);
338 err
= mlx5_ib_update_xlt(mr
, 0,
339 MLX5_IMR_MTT_ENTRIES
,
341 MLX5_IB_UPD_XLT_ZAP
|
342 MLX5_IB_UPD_XLT_ENABLE
|
343 MLX5_IB_UPD_XLT_ATOMIC
);
349 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
350 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
354 mlx5_ib_dbg(dev
, "key %x dev %p mr %p\n",
355 mr
->mmkey
.key
, dev
->mdev
, mr
);
360 mlx5_ib_err(dev
, "Failed to register MKEY %d\n", err
);
361 mlx5_mr_cache_free(dev
, mr
);
366 static struct ib_umem_odp
*implicit_mr_get_data(struct mlx5_ib_mr
*mr
,
367 u64 io_virt
, size_t bcnt
)
369 struct ib_ucontext
*ctx
= mr
->ibmr
.pd
->uobject
->context
;
370 struct mlx5_ib_dev
*dev
= to_mdev(mr
->ibmr
.pd
->device
);
371 struct ib_umem_odp
*odp
, *result
= NULL
;
372 u64 addr
= io_virt
& MLX5_IMR_MTT_MASK
;
373 int nentries
= 0, start_idx
= 0, ret
;
374 struct mlx5_ib_mr
*mtt
;
375 struct ib_umem
*umem
;
377 mutex_lock(&mr
->umem
->odp_data
->umem_mutex
);
378 odp
= odp_lookup(ctx
, addr
, 1, mr
);
380 mlx5_ib_dbg(dev
, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
381 io_virt
, bcnt
, addr
, odp
);
388 umem
= ib_alloc_odp_umem(ctx
, addr
, MLX5_IMR_MTT_SIZE
);
390 mutex_unlock(&mr
->umem
->odp_data
->umem_mutex
);
391 return ERR_CAST(umem
);
394 mtt
= implicit_mr_alloc(mr
->ibmr
.pd
, umem
, 0, mr
->access_flags
);
396 mutex_unlock(&mr
->umem
->odp_data
->umem_mutex
);
397 ib_umem_release(umem
);
398 return ERR_CAST(mtt
);
401 odp
= umem
->odp_data
;
404 mtt
->mmkey
.iova
= addr
;
406 INIT_WORK(&odp
->work
, mr_leaf_free_action
);
409 start_idx
= addr
>> MLX5_IMR_MTT_SHIFT
;
413 /* Return first odp if region not covered by single one */
417 addr
+= MLX5_IMR_MTT_SIZE
;
418 if (unlikely(addr
< io_virt
+ bcnt
)) {
420 if (odp
&& odp
->umem
->address
!= addr
)
425 if (unlikely(nentries
)) {
426 ret
= mlx5_ib_update_xlt(mr
, start_idx
, nentries
, 0,
427 MLX5_IB_UPD_XLT_INDIRECT
|
428 MLX5_IB_UPD_XLT_ATOMIC
);
430 mlx5_ib_err(dev
, "Failed to update PAS\n");
431 result
= ERR_PTR(ret
);
435 mutex_unlock(&mr
->umem
->odp_data
->umem_mutex
);
439 struct mlx5_ib_mr
*mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd
*pd
,
442 struct ib_ucontext
*ctx
= pd
->ibpd
.uobject
->context
;
443 struct mlx5_ib_mr
*imr
;
444 struct ib_umem
*umem
;
446 umem
= ib_umem_get(ctx
, 0, 0, IB_ACCESS_ON_DEMAND
, 0);
448 return ERR_CAST(umem
);
450 imr
= implicit_mr_alloc(&pd
->ibpd
, umem
, 1, access_flags
);
452 ib_umem_release(umem
);
453 return ERR_CAST(imr
);
457 init_waitqueue_head(&imr
->q_leaf_free
);
458 atomic_set(&imr
->num_leaf_free
, 0);
463 static int mr_leaf_free(struct ib_umem
*umem
, u64 start
,
464 u64 end
, void *cookie
)
466 struct mlx5_ib_mr
*mr
= umem
->odp_data
->private, *imr
= cookie
;
468 if (mr
->parent
!= imr
)
471 ib_umem_odp_unmap_dma_pages(umem
,
475 if (umem
->odp_data
->dying
)
478 WRITE_ONCE(umem
->odp_data
->dying
, 1);
479 atomic_inc(&imr
->num_leaf_free
);
480 schedule_work(&umem
->odp_data
->work
);
485 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr
*imr
)
487 struct ib_ucontext
*ctx
= imr
->ibmr
.pd
->uobject
->context
;
489 down_read(&ctx
->umem_rwsem
);
490 rbt_ib_umem_for_each_in_range(&ctx
->umem_tree
, 0, ULLONG_MAX
,
492 up_read(&ctx
->umem_rwsem
);
494 wait_event(imr
->q_leaf_free
, !atomic_read(&imr
->num_leaf_free
));
497 static int pagefault_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
,
498 u64 io_virt
, size_t bcnt
, u32
*bytes_mapped
)
500 u64 access_mask
= ODP_READ_ALLOWED_BIT
;
501 int npages
= 0, page_shift
, np
;
502 u64 start_idx
, page_mask
;
503 struct ib_umem_odp
*odp
;
508 if (!mr
->umem
->odp_data
->page_list
) {
509 odp
= implicit_mr_get_data(mr
, io_virt
, bcnt
);
516 odp
= mr
->umem
->odp_data
;
520 size
= min_t(size_t, bcnt
, ib_umem_end(odp
->umem
) - io_virt
);
522 page_shift
= mr
->umem
->page_shift
;
523 page_mask
= ~(BIT(page_shift
) - 1);
524 start_idx
= (io_virt
- (mr
->mmkey
.iova
& page_mask
)) >> page_shift
;
526 if (mr
->umem
->writable
)
527 access_mask
|= ODP_WRITE_ALLOWED_BIT
;
529 current_seq
= READ_ONCE(odp
->notifiers_seq
);
531 * Ensure the sequence number is valid for some time before we call
536 ret
= ib_umem_odp_map_dma_pages(mr
->umem
, io_virt
, size
,
537 access_mask
, current_seq
);
544 mutex_lock(&odp
->umem_mutex
);
545 if (!ib_umem_mmu_notifier_retry(mr
->umem
, current_seq
)) {
547 * No need to check whether the MTTs really belong to
548 * this MR, since ib_umem_odp_map_dma_pages already
551 ret
= mlx5_ib_update_xlt(mr
, start_idx
, np
,
552 page_shift
, MLX5_IB_UPD_XLT_ATOMIC
);
556 mutex_unlock(&odp
->umem_mutex
);
560 mlx5_ib_err(dev
, "Failed to update mkey page tables\n");
565 u32 new_mappings
= (np
<< page_shift
) -
566 (io_virt
- round_down(io_virt
, 1 << page_shift
));
567 *bytes_mapped
+= min_t(u32
, new_mappings
, size
);
570 npages
+= np
<< (page_shift
- PAGE_SHIFT
);
573 if (unlikely(bcnt
)) {
574 struct ib_umem_odp
*next
;
577 next
= odp_next(odp
);
578 if (unlikely(!next
|| next
->umem
->address
!= io_virt
)) {
579 mlx5_ib_dbg(dev
, "next implicit leaf removed at 0x%llx. got %p\n",
591 if (ret
== -EAGAIN
) {
592 if (mr
->parent
|| !odp
->dying
) {
593 unsigned long timeout
=
594 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT
);
596 if (!wait_for_completion_timeout(
597 &odp
->notifier_completion
,
599 mlx5_ib_warn(dev
, "timeout waiting for mmu notifier. seq %d against %d\n",
600 current_seq
, odp
->notifiers_seq
);
603 /* The MR is being killed, kill the QP as well. */
612 struct pf_frame
*next
;
620 * Handle a single data segment in a page-fault WQE or RDMA region.
622 * Returns number of OS pages retrieved on success. The caller may continue to
623 * the next data segment.
624 * Can return the following error codes:
625 * -EAGAIN to designate a temporary error. The caller will abort handling the
626 * page fault and resolve it.
627 * -EFAULT when there's an error mapping the requested pages. The caller will
628 * abort the page fault handling.
630 static int pagefault_single_data_segment(struct mlx5_ib_dev
*dev
,
631 u32 key
, u64 io_virt
, size_t bcnt
,
632 u32
*bytes_committed
,
635 int npages
= 0, srcu_key
, ret
, i
, outlen
, cur_outlen
= 0, depth
= 0;
636 struct pf_frame
*head
= NULL
, *frame
;
637 struct mlx5_core_mkey
*mmkey
;
638 struct mlx5_ib_mw
*mw
;
639 struct mlx5_ib_mr
*mr
;
640 struct mlx5_klm
*pklm
;
644 srcu_key
= srcu_read_lock(&dev
->mr_srcu
);
646 io_virt
+= *bytes_committed
;
647 bcnt
-= *bytes_committed
;
650 mmkey
= __mlx5_mr_lookup(dev
->mdev
, mlx5_base_mkey(key
));
651 if (!mmkey
|| mmkey
->key
!= key
) {
652 mlx5_ib_dbg(dev
, "failed to find mkey %x\n", key
);
657 switch (mmkey
->type
) {
659 mr
= container_of(mmkey
, struct mlx5_ib_mr
, mmkey
);
660 if (!mr
->live
|| !mr
->ibmr
.pd
) {
661 mlx5_ib_dbg(dev
, "got dead MR\n");
666 ret
= pagefault_mr(dev
, mr
, io_virt
, bcnt
, bytes_mapped
);
675 mw
= container_of(mmkey
, struct mlx5_ib_mw
, mmkey
);
677 if (depth
>= MLX5_CAP_GEN(dev
->mdev
, max_indirection
)) {
678 mlx5_ib_dbg(dev
, "indirection level exceeded\n");
683 outlen
= MLX5_ST_SZ_BYTES(query_mkey_out
) +
684 sizeof(*pklm
) * (mw
->ndescs
- 2);
686 if (outlen
> cur_outlen
) {
688 out
= kzalloc(outlen
, GFP_KERNEL
);
696 pklm
= (struct mlx5_klm
*)MLX5_ADDR_OF(query_mkey_out
, out
,
697 bsf0_klm0_pas_mtt0_1
);
699 ret
= mlx5_core_query_mkey(dev
->mdev
, &mw
->mmkey
, out
, outlen
);
703 offset
= io_virt
- MLX5_GET64(query_mkey_out
, out
,
704 memory_key_mkey_entry
.start_addr
);
706 for (i
= 0; bcnt
&& i
< mw
->ndescs
; i
++, pklm
++) {
707 if (offset
>= be32_to_cpu(pklm
->bcount
)) {
708 offset
-= be32_to_cpu(pklm
->bcount
);
712 frame
= kzalloc(sizeof(*frame
), GFP_KERNEL
);
718 frame
->key
= be32_to_cpu(pklm
->key
);
719 frame
->io_virt
= be64_to_cpu(pklm
->va
) + offset
;
720 frame
->bcnt
= min_t(size_t, bcnt
,
721 be32_to_cpu(pklm
->bcount
) - offset
);
722 frame
->depth
= depth
+ 1;
731 mlx5_ib_dbg(dev
, "wrong mkey type %d\n", mmkey
->type
);
741 io_virt
= frame
->io_virt
;
743 depth
= frame
->depth
;
757 srcu_read_unlock(&dev
->mr_srcu
, srcu_key
);
758 *bytes_committed
= 0;
759 return ret
? ret
: npages
;
763 * Parse a series of data segments for page fault handling.
765 * @qp the QP on which the fault occurred.
766 * @pfault contains page fault information.
767 * @wqe points at the first data segment in the WQE.
768 * @wqe_end points after the end of the WQE.
769 * @bytes_mapped receives the number of bytes that the function was able to
770 * map. This allows the caller to decide intelligently whether
771 * enough memory was mapped to resolve the page fault
772 * successfully (e.g. enough for the next MTU, or the entire
774 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
775 * the committed bytes).
777 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
778 * negative error code.
780 static int pagefault_data_segments(struct mlx5_ib_dev
*dev
,
781 struct mlx5_pagefault
*pfault
,
782 struct mlx5_ib_qp
*qp
, void *wqe
,
783 void *wqe_end
, u32
*bytes_mapped
,
784 u32
*total_wqe_bytes
, int receive_queue
)
786 int ret
= 0, npages
= 0;
793 /* Skip SRQ next-WQE segment. */
794 if (receive_queue
&& qp
->ibqp
.srq
)
795 wqe
+= sizeof(struct mlx5_wqe_srq_next_seg
);
800 *total_wqe_bytes
= 0;
802 while (wqe
< wqe_end
) {
803 struct mlx5_wqe_data_seg
*dseg
= wqe
;
805 io_virt
= be64_to_cpu(dseg
->addr
);
806 key
= be32_to_cpu(dseg
->lkey
);
807 byte_count
= be32_to_cpu(dseg
->byte_count
);
808 inline_segment
= !!(byte_count
& MLX5_INLINE_SEG
);
809 bcnt
= byte_count
& ~MLX5_INLINE_SEG
;
811 if (inline_segment
) {
812 bcnt
= bcnt
& MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK
;
813 wqe
+= ALIGN(sizeof(struct mlx5_wqe_inline_seg
) + bcnt
,
816 wqe
+= sizeof(*dseg
);
819 /* receive WQE end of sg list. */
820 if (receive_queue
&& bcnt
== 0 && key
== MLX5_INVALID_LKEY
&&
824 if (!inline_segment
&& total_wqe_bytes
) {
825 *total_wqe_bytes
+= bcnt
- min_t(size_t, bcnt
,
826 pfault
->bytes_committed
);
829 /* A zero length data segment designates a length of 2GB. */
833 if (inline_segment
|| bcnt
<= pfault
->bytes_committed
) {
834 pfault
->bytes_committed
-=
836 pfault
->bytes_committed
);
840 ret
= pagefault_single_data_segment(dev
, key
, io_virt
, bcnt
,
841 &pfault
->bytes_committed
,
848 return ret
< 0 ? ret
: npages
;
851 static const u32 mlx5_ib_odp_opcode_cap
[] = {
852 [MLX5_OPCODE_SEND
] = IB_ODP_SUPPORT_SEND
,
853 [MLX5_OPCODE_SEND_IMM
] = IB_ODP_SUPPORT_SEND
,
854 [MLX5_OPCODE_SEND_INVAL
] = IB_ODP_SUPPORT_SEND
,
855 [MLX5_OPCODE_RDMA_WRITE
] = IB_ODP_SUPPORT_WRITE
,
856 [MLX5_OPCODE_RDMA_WRITE_IMM
] = IB_ODP_SUPPORT_WRITE
,
857 [MLX5_OPCODE_RDMA_READ
] = IB_ODP_SUPPORT_READ
,
858 [MLX5_OPCODE_ATOMIC_CS
] = IB_ODP_SUPPORT_ATOMIC
,
859 [MLX5_OPCODE_ATOMIC_FA
] = IB_ODP_SUPPORT_ATOMIC
,
863 * Parse initiator WQE. Advances the wqe pointer to point at the
864 * scatter-gather list, and set wqe_end to the end of the WQE.
866 static int mlx5_ib_mr_initiator_pfault_handler(
867 struct mlx5_ib_dev
*dev
, struct mlx5_pagefault
*pfault
,
868 struct mlx5_ib_qp
*qp
, void **wqe
, void **wqe_end
, int wqe_length
)
870 struct mlx5_wqe_ctrl_seg
*ctrl
= *wqe
;
871 u16 wqe_index
= pfault
->wqe
.wqe_index
;
873 struct mlx5_base_av
*av
;
876 u32 ctrl_wqe_index
, ctrl_qpn
;
878 u32 qpn
= qp
->trans_qp
.base
.mqp
.qpn
;
880 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
881 if (ds
* MLX5_WQE_DS_UNITS
> wqe_length
) {
882 mlx5_ib_err(dev
, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
888 mlx5_ib_err(dev
, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
894 ctrl_wqe_index
= (be32_to_cpu(ctrl
->opmod_idx_opcode
) &
895 MLX5_WQE_CTRL_WQE_INDEX_MASK
) >>
896 MLX5_WQE_CTRL_WQE_INDEX_SHIFT
;
897 if (wqe_index
!= ctrl_wqe_index
) {
898 mlx5_ib_err(dev
, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
904 ctrl_qpn
= (be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_QPN_MASK
) >>
905 MLX5_WQE_CTRL_QPN_SHIFT
;
906 if (qpn
!= ctrl_qpn
) {
907 mlx5_ib_err(dev
, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
914 *wqe_end
= *wqe
+ ds
* MLX5_WQE_DS_UNITS
;
915 *wqe
+= sizeof(*ctrl
);
917 opcode
= be32_to_cpu(ctrl
->opmod_idx_opcode
) &
918 MLX5_WQE_CTRL_OPCODE_MASK
;
920 switch (qp
->ibqp
.qp_type
) {
922 transport_caps
= dev
->odp_caps
.per_transport_caps
.rc_odp_caps
;
925 transport_caps
= dev
->odp_caps
.per_transport_caps
.ud_odp_caps
;
928 mlx5_ib_err(dev
, "ODP fault on QP of an unsupported transport 0x%x\n",
933 if (unlikely(opcode
>= ARRAY_SIZE(mlx5_ib_odp_opcode_cap
) ||
934 !(transport_caps
& mlx5_ib_odp_opcode_cap
[opcode
]))) {
935 mlx5_ib_err(dev
, "ODP fault on QP of an unsupported opcode 0x%x\n",
940 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
942 if (av
->dqp_dct
& cpu_to_be32(MLX5_EXTENDED_UD_AV
))
943 *wqe
+= sizeof(struct mlx5_av
);
945 *wqe
+= sizeof(struct mlx5_base_av
);
949 case MLX5_OPCODE_RDMA_WRITE
:
950 case MLX5_OPCODE_RDMA_WRITE_IMM
:
951 case MLX5_OPCODE_RDMA_READ
:
952 *wqe
+= sizeof(struct mlx5_wqe_raddr_seg
);
954 case MLX5_OPCODE_ATOMIC_CS
:
955 case MLX5_OPCODE_ATOMIC_FA
:
956 *wqe
+= sizeof(struct mlx5_wqe_raddr_seg
);
957 *wqe
+= sizeof(struct mlx5_wqe_atomic_seg
);
965 * Parse responder WQE. Advances the wqe pointer to point at the
966 * scatter-gather list, and set wqe_end to the end of the WQE.
968 static int mlx5_ib_mr_responder_pfault_handler(
969 struct mlx5_ib_dev
*dev
, struct mlx5_pagefault
*pfault
,
970 struct mlx5_ib_qp
*qp
, void **wqe
, void **wqe_end
, int wqe_length
)
972 struct mlx5_ib_wq
*wq
= &qp
->rq
;
973 int wqe_size
= 1 << wq
->wqe_shift
;
976 mlx5_ib_err(dev
, "ODP fault on SRQ is not supported\n");
981 mlx5_ib_err(dev
, "ODP fault with WQE signatures is not supported\n");
985 if (wqe_size
> wqe_length
) {
986 mlx5_ib_err(dev
, "Couldn't read all of the receive WQE's content\n");
990 switch (qp
->ibqp
.qp_type
) {
992 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
993 IB_ODP_SUPPORT_RECV
))
994 goto invalid_transport_or_opcode
;
997 invalid_transport_or_opcode
:
998 mlx5_ib_err(dev
, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
1003 *wqe_end
= *wqe
+ wqe_size
;
1008 static struct mlx5_ib_qp
*mlx5_ib_odp_find_qp(struct mlx5_ib_dev
*dev
,
1011 struct mlx5_core_qp
*mqp
= __mlx5_qp_lookup(dev
->mdev
, wq_num
);
1014 mlx5_ib_err(dev
, "QPN 0x%6x not found\n", wq_num
);
1018 return to_mibqp(mqp
);
1021 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev
*dev
,
1022 struct mlx5_pagefault
*pfault
)
1025 void *wqe
, *wqe_end
;
1026 u32 bytes_mapped
, total_wqe_bytes
;
1027 char *buffer
= NULL
;
1028 int resume_with_error
= 1;
1029 u16 wqe_index
= pfault
->wqe
.wqe_index
;
1030 int requestor
= pfault
->type
& MLX5_PFAULT_REQUESTOR
;
1031 struct mlx5_ib_qp
*qp
;
1033 buffer
= (char *)__get_free_page(GFP_KERNEL
);
1035 mlx5_ib_err(dev
, "Error allocating memory for IO page fault handling.\n");
1036 goto resolve_page_fault
;
1039 qp
= mlx5_ib_odp_find_qp(dev
, pfault
->wqe
.wq_num
);
1041 goto resolve_page_fault
;
1043 ret
= mlx5_ib_read_user_wqe(qp
, requestor
, wqe_index
, buffer
,
1044 PAGE_SIZE
, &qp
->trans_qp
.base
);
1046 mlx5_ib_err(dev
, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
1047 ret
, wqe_index
, pfault
->token
);
1048 goto resolve_page_fault
;
1053 ret
= mlx5_ib_mr_initiator_pfault_handler(dev
, pfault
, qp
, &wqe
,
1056 ret
= mlx5_ib_mr_responder_pfault_handler(dev
, pfault
, qp
, &wqe
,
1059 goto resolve_page_fault
;
1061 if (wqe
>= wqe_end
) {
1062 mlx5_ib_err(dev
, "ODP fault on invalid WQE.\n");
1063 goto resolve_page_fault
;
1066 ret
= pagefault_data_segments(dev
, pfault
, qp
, wqe
, wqe_end
,
1067 &bytes_mapped
, &total_wqe_bytes
,
1069 if (ret
== -EAGAIN
) {
1070 resume_with_error
= 0;
1071 goto resolve_page_fault
;
1072 } else if (ret
< 0 || total_wqe_bytes
> bytes_mapped
) {
1073 goto resolve_page_fault
;
1076 resume_with_error
= 0;
1078 mlx5_ib_page_fault_resume(dev
, pfault
, resume_with_error
);
1079 mlx5_ib_dbg(dev
, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1080 pfault
->wqe
.wq_num
, resume_with_error
,
1082 free_page((unsigned long)buffer
);
1085 static int pages_in_range(u64 address
, u32 length
)
1087 return (ALIGN(address
+ length
, PAGE_SIZE
) -
1088 (address
& PAGE_MASK
)) >> PAGE_SHIFT
;
1091 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev
*dev
,
1092 struct mlx5_pagefault
*pfault
)
1096 u32 prefetch_len
= pfault
->bytes_committed
;
1097 int prefetch_activated
= 0;
1098 u32 rkey
= pfault
->rdma
.r_key
;
1101 /* The RDMA responder handler handles the page fault in two parts.
1102 * First it brings the necessary pages for the current packet
1103 * (and uses the pfault context), and then (after resuming the QP)
1104 * prefetches more pages. The second operation cannot use the pfault
1105 * context and therefore uses the dummy_pfault context allocated on
1107 pfault
->rdma
.rdma_va
+= pfault
->bytes_committed
;
1108 pfault
->rdma
.rdma_op_len
-= min(pfault
->bytes_committed
,
1109 pfault
->rdma
.rdma_op_len
);
1110 pfault
->bytes_committed
= 0;
1112 address
= pfault
->rdma
.rdma_va
;
1113 length
= pfault
->rdma
.rdma_op_len
;
1115 /* For some operations, the hardware cannot tell the exact message
1116 * length, and in those cases it reports zero. Use prefetch
1119 prefetch_activated
= 1;
1120 length
= pfault
->rdma
.packet_size
;
1121 prefetch_len
= min(MAX_PREFETCH_LEN
, prefetch_len
);
1124 ret
= pagefault_single_data_segment(dev
, rkey
, address
, length
,
1125 &pfault
->bytes_committed
, NULL
);
1126 if (ret
== -EAGAIN
) {
1127 /* We're racing with an invalidation, don't prefetch */
1128 prefetch_activated
= 0;
1129 } else if (ret
< 0 || pages_in_range(address
, length
) > ret
) {
1130 mlx5_ib_page_fault_resume(dev
, pfault
, 1);
1132 mlx5_ib_dbg(dev
, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1133 ret
, pfault
->token
, pfault
->type
);
1137 mlx5_ib_page_fault_resume(dev
, pfault
, 0);
1138 mlx5_ib_dbg(dev
, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1139 pfault
->token
, pfault
->type
,
1140 prefetch_activated
);
1142 /* At this point, there might be a new pagefault already arriving in
1143 * the eq, switch to the dummy pagefault for the rest of the
1144 * processing. We're still OK with the objects being alive as the
1145 * work-queue is being fenced. */
1147 if (prefetch_activated
) {
1148 u32 bytes_committed
= 0;
1150 ret
= pagefault_single_data_segment(dev
, rkey
, address
,
1152 &bytes_committed
, NULL
);
1153 if (ret
< 0 && ret
!= -EAGAIN
) {
1154 mlx5_ib_dbg(dev
, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1155 ret
, pfault
->token
, address
, prefetch_len
);
1160 void mlx5_ib_pfault(struct mlx5_core_dev
*mdev
, void *context
,
1161 struct mlx5_pagefault
*pfault
)
1163 struct mlx5_ib_dev
*dev
= context
;
1164 u8 event_subtype
= pfault
->event_subtype
;
1166 switch (event_subtype
) {
1167 case MLX5_PFAULT_SUBTYPE_WQE
:
1168 mlx5_ib_mr_wqe_pfault_handler(dev
, pfault
);
1170 case MLX5_PFAULT_SUBTYPE_RDMA
:
1171 mlx5_ib_mr_rdma_pfault_handler(dev
, pfault
);
1174 mlx5_ib_err(dev
, "Invalid page fault event subtype: 0x%x\n",
1176 mlx5_ib_page_fault_resume(dev
, pfault
, 1);
1180 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent
*ent
)
1182 if (!(ent
->dev
->odp_caps
.general_caps
& IB_ODP_SUPPORT_IMPLICIT
))
1185 switch (ent
->order
- 2) {
1186 case MLX5_IMR_MTT_CACHE_ENTRY
:
1187 ent
->page
= PAGE_SHIFT
;
1188 ent
->xlt
= MLX5_IMR_MTT_ENTRIES
*
1189 sizeof(struct mlx5_mtt
) /
1190 MLX5_IB_UMR_OCTOWORD
;
1191 ent
->access_mode
= MLX5_MKC_ACCESS_MODE_MTT
;
1195 case MLX5_IMR_KSM_CACHE_ENTRY
:
1196 ent
->page
= MLX5_KSM_PAGE_SHIFT
;
1197 ent
->xlt
= mlx5_imr_ksm_entries
*
1198 sizeof(struct mlx5_klm
) /
1199 MLX5_IB_UMR_OCTOWORD
;
1200 ent
->access_mode
= MLX5_MKC_ACCESS_MODE_KSM
;
1206 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*dev
)
1210 if (dev
->odp_caps
.general_caps
& IB_ODP_SUPPORT_IMPLICIT
) {
1211 ret
= mlx5_cmd_null_mkey(dev
->mdev
, &dev
->null_mkey
);
1213 mlx5_ib_err(dev
, "Error getting null_mkey %d\n", ret
);
1221 int mlx5_ib_odp_init(void)
1223 mlx5_imr_ksm_entries
= BIT_ULL(get_order(TASK_SIZE
) -