2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
38 #define MAX_PREFETCH_LEN (4*1024*1024U)
40 /* Timeout in ms to wait for an active mmu notifier to complete when handling
42 #define MMU_NOTIFIER_TIMEOUT 1000
44 struct workqueue_struct
*mlx5_ib_page_fault_wq
;
46 void mlx5_ib_invalidate_range(struct ib_umem
*umem
, unsigned long start
,
49 struct mlx5_ib_mr
*mr
;
50 const u64 umr_block_mask
= (MLX5_UMR_MTT_ALIGNMENT
/ sizeof(u64
)) - 1;
51 u64 idx
= 0, blk_start_idx
= 0;
55 if (!umem
|| !umem
->odp_data
) {
56 pr_err("invalidation called on NULL umem or non-ODP umem\n");
60 mr
= umem
->odp_data
->private;
62 if (!mr
|| !mr
->ibmr
.pd
)
65 start
= max_t(u64
, ib_umem_start(umem
), start
);
66 end
= min_t(u64
, ib_umem_end(umem
), end
);
69 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
70 * while we are doing the invalidation, no page fault will attempt to
71 * overwrite the same MTTs. Concurent invalidations might race us,
72 * but they will write 0s as well, so no difference in the end result.
75 for (addr
= start
; addr
< end
; addr
+= (u64
)umem
->page_size
) {
76 idx
= (addr
- ib_umem_start(umem
)) / PAGE_SIZE
;
78 * Strive to write the MTTs in chunks, but avoid overwriting
79 * non-existing MTTs. The huristic here can be improved to
80 * estimate the cost of another UMR vs. the cost of bigger
83 if (umem
->odp_data
->dma_list
[idx
] &
84 (ODP_READ_ALLOWED_BIT
| ODP_WRITE_ALLOWED_BIT
)) {
90 u64 umr_offset
= idx
& umr_block_mask
;
92 if (in_block
&& umr_offset
== 0) {
93 mlx5_ib_update_mtt(mr
, blk_start_idx
,
94 idx
- blk_start_idx
, 1);
100 mlx5_ib_update_mtt(mr
, blk_start_idx
, idx
- blk_start_idx
+ 1,
104 * We are now sure that the device will not access the
105 * memory. We can safely unmap it, and mark it as dirty if
109 ib_umem_odp_unmap_dma_pages(umem
, start
, end
);
112 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
114 struct ib_odp_caps
*caps
= &dev
->odp_caps
;
116 memset(caps
, 0, sizeof(*caps
));
118 if (!MLX5_CAP_GEN(dev
->mdev
, pg
))
121 caps
->general_caps
= IB_ODP_SUPPORT
;
123 if (MLX5_CAP_ODP(dev
->mdev
, ud_odp_caps
.send
))
124 caps
->per_transport_caps
.ud_odp_caps
|= IB_ODP_SUPPORT_SEND
;
126 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.send
))
127 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_SEND
;
129 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.receive
))
130 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_RECV
;
132 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.write
))
133 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_WRITE
;
135 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.read
))
136 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_READ
;
141 static struct mlx5_ib_mr
*mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev
*dev
,
144 u32 base_key
= mlx5_base_mkey(key
);
145 struct mlx5_core_mkey
*mmkey
= __mlx5_mr_lookup(dev
->mdev
, base_key
);
146 struct mlx5_ib_mr
*mr
= container_of(mmkey
, struct mlx5_ib_mr
, mmkey
);
148 if (!mmkey
|| mmkey
->key
!= key
|| !mr
->live
)
151 return container_of(mmkey
, struct mlx5_ib_mr
, mmkey
);
154 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp
*qp
,
155 struct mlx5_ib_pfault
*pfault
,
158 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
159 u32 qpn
= qp
->trans_qp
.base
.mqp
.qpn
;
160 int ret
= mlx5_core_page_fault_resume(dev
->mdev
,
162 pfault
->mpfault
.flags
,
165 pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn
);
169 * Handle a single data segment in a page-fault WQE.
171 * Returns number of pages retrieved on success. The caller will continue to
172 * the next data segment.
173 * Can return the following error codes:
174 * -EAGAIN to designate a temporary error. The caller will abort handling the
175 * page fault and resolve it.
176 * -EFAULT when there's an error mapping the requested pages. The caller will
177 * abort the page fault handling and possibly move the QP to an error state.
178 * On other errors the QP should also be closed with an error.
180 static int pagefault_single_data_segment(struct mlx5_ib_qp
*qp
,
181 struct mlx5_ib_pfault
*pfault
,
182 u32 key
, u64 io_virt
, size_t bcnt
,
185 struct mlx5_ib_dev
*mib_dev
= to_mdev(qp
->ibqp
.pd
->device
);
187 unsigned int current_seq
;
189 int npages
= 0, ret
= 0;
190 struct mlx5_ib_mr
*mr
;
191 u64 access_mask
= ODP_READ_ALLOWED_BIT
;
193 srcu_key
= srcu_read_lock(&mib_dev
->mr_srcu
);
194 mr
= mlx5_ib_odp_find_mr_lkey(mib_dev
, key
);
196 * If we didn't find the MR, it means the MR was closed while we were
197 * handling the ODP event. In this case we return -EFAULT so that the
200 if (!mr
|| !mr
->ibmr
.pd
) {
201 pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
206 if (!mr
->umem
->odp_data
) {
207 pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
211 (bcnt
- pfault
->mpfault
.bytes_committed
);
214 if (mr
->ibmr
.pd
!= qp
->ibqp
.pd
) {
215 pr_err("Page-fault with different PDs for QP and MR.\n");
220 current_seq
= ACCESS_ONCE(mr
->umem
->odp_data
->notifiers_seq
);
222 * Ensure the sequence number is valid for some time before we call
228 * Avoid branches - this code will perform correctly
229 * in all iterations (in iteration 2 and above,
230 * bytes_committed == 0).
232 io_virt
+= pfault
->mpfault
.bytes_committed
;
233 bcnt
-= pfault
->mpfault
.bytes_committed
;
235 start_idx
= (io_virt
- (mr
->mmkey
.iova
& PAGE_MASK
)) >> PAGE_SHIFT
;
237 if (mr
->umem
->writable
)
238 access_mask
|= ODP_WRITE_ALLOWED_BIT
;
239 npages
= ib_umem_odp_map_dma_pages(mr
->umem
, io_virt
, bcnt
,
240 access_mask
, current_seq
);
247 mutex_lock(&mr
->umem
->odp_data
->umem_mutex
);
248 if (!ib_umem_mmu_notifier_retry(mr
->umem
, current_seq
)) {
250 * No need to check whether the MTTs really belong to
251 * this MR, since ib_umem_odp_map_dma_pages already
254 ret
= mlx5_ib_update_mtt(mr
, start_idx
, npages
, 0);
258 mutex_unlock(&mr
->umem
->odp_data
->umem_mutex
);
261 pr_err("Failed to update mkey page tables\n");
266 u32 new_mappings
= npages
* PAGE_SIZE
-
267 (io_virt
- round_down(io_virt
, PAGE_SIZE
));
268 *bytes_mapped
+= min_t(u32
, new_mappings
, bcnt
);
273 if (ret
== -EAGAIN
) {
274 if (!mr
->umem
->odp_data
->dying
) {
275 struct ib_umem_odp
*odp_data
= mr
->umem
->odp_data
;
276 unsigned long timeout
=
277 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT
);
279 if (!wait_for_completion_timeout(
280 &odp_data
->notifier_completion
,
282 pr_warn("timeout waiting for mmu notifier completion\n");
285 /* The MR is being killed, kill the QP as well. */
289 srcu_read_unlock(&mib_dev
->mr_srcu
, srcu_key
);
290 pfault
->mpfault
.bytes_committed
= 0;
291 return ret
? ret
: npages
;
295 * Parse a series of data segments for page fault handling.
297 * @qp the QP on which the fault occurred.
298 * @pfault contains page fault information.
299 * @wqe points at the first data segment in the WQE.
300 * @wqe_end points after the end of the WQE.
301 * @bytes_mapped receives the number of bytes that the function was able to
302 * map. This allows the caller to decide intelligently whether
303 * enough memory was mapped to resolve the page fault
304 * successfully (e.g. enough for the next MTU, or the entire
306 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
307 * the committed bytes).
309 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
310 * negative error code.
312 static int pagefault_data_segments(struct mlx5_ib_qp
*qp
,
313 struct mlx5_ib_pfault
*pfault
, void *wqe
,
314 void *wqe_end
, u32
*bytes_mapped
,
315 u32
*total_wqe_bytes
, int receive_queue
)
317 int ret
= 0, npages
= 0;
324 /* Skip SRQ next-WQE segment. */
325 if (receive_queue
&& qp
->ibqp
.srq
)
326 wqe
+= sizeof(struct mlx5_wqe_srq_next_seg
);
331 *total_wqe_bytes
= 0;
333 while (wqe
< wqe_end
) {
334 struct mlx5_wqe_data_seg
*dseg
= wqe
;
336 io_virt
= be64_to_cpu(dseg
->addr
);
337 key
= be32_to_cpu(dseg
->lkey
);
338 byte_count
= be32_to_cpu(dseg
->byte_count
);
339 inline_segment
= !!(byte_count
& MLX5_INLINE_SEG
);
340 bcnt
= byte_count
& ~MLX5_INLINE_SEG
;
342 if (inline_segment
) {
343 bcnt
= bcnt
& MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK
;
344 wqe
+= ALIGN(sizeof(struct mlx5_wqe_inline_seg
) + bcnt
,
347 wqe
+= sizeof(*dseg
);
350 /* receive WQE end of sg list. */
351 if (receive_queue
&& bcnt
== 0 && key
== MLX5_INVALID_LKEY
&&
355 if (!inline_segment
&& total_wqe_bytes
) {
356 *total_wqe_bytes
+= bcnt
- min_t(size_t, bcnt
,
357 pfault
->mpfault
.bytes_committed
);
360 /* A zero length data segment designates a length of 2GB. */
364 if (inline_segment
|| bcnt
<= pfault
->mpfault
.bytes_committed
) {
365 pfault
->mpfault
.bytes_committed
-=
367 pfault
->mpfault
.bytes_committed
);
371 ret
= pagefault_single_data_segment(qp
, pfault
, key
, io_virt
,
378 return ret
< 0 ? ret
: npages
;
382 * Parse initiator WQE. Advances the wqe pointer to point at the
383 * scatter-gather list, and set wqe_end to the end of the WQE.
385 static int mlx5_ib_mr_initiator_pfault_handler(
386 struct mlx5_ib_qp
*qp
, struct mlx5_ib_pfault
*pfault
,
387 void **wqe
, void **wqe_end
, int wqe_length
)
389 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
390 struct mlx5_wqe_ctrl_seg
*ctrl
= *wqe
;
391 u16 wqe_index
= pfault
->mpfault
.wqe
.wqe_index
;
394 u32 ctrl_wqe_index
, ctrl_qpn
;
396 u32 qpn
= qp
->trans_qp
.base
.mqp
.qpn
;
398 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
399 if (ds
* MLX5_WQE_DS_UNITS
> wqe_length
) {
400 mlx5_ib_err(dev
, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
406 mlx5_ib_err(dev
, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
412 ctrl_wqe_index
= (be32_to_cpu(ctrl
->opmod_idx_opcode
) &
413 MLX5_WQE_CTRL_WQE_INDEX_MASK
) >>
414 MLX5_WQE_CTRL_WQE_INDEX_SHIFT
;
415 if (wqe_index
!= ctrl_wqe_index
) {
416 mlx5_ib_err(dev
, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
422 ctrl_qpn
= (be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_QPN_MASK
) >>
423 MLX5_WQE_CTRL_QPN_SHIFT
;
424 if (qpn
!= ctrl_qpn
) {
425 mlx5_ib_err(dev
, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
432 *wqe_end
= *wqe
+ ds
* MLX5_WQE_DS_UNITS
;
433 *wqe
+= sizeof(*ctrl
);
435 opcode
= be32_to_cpu(ctrl
->opmod_idx_opcode
) &
436 MLX5_WQE_CTRL_OPCODE_MASK
;
437 switch (qp
->ibqp
.qp_type
) {
440 case MLX5_OPCODE_SEND
:
441 case MLX5_OPCODE_SEND_IMM
:
442 case MLX5_OPCODE_SEND_INVAL
:
443 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
444 IB_ODP_SUPPORT_SEND
))
445 goto invalid_transport_or_opcode
;
447 case MLX5_OPCODE_RDMA_WRITE
:
448 case MLX5_OPCODE_RDMA_WRITE_IMM
:
449 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
450 IB_ODP_SUPPORT_WRITE
))
451 goto invalid_transport_or_opcode
;
452 *wqe
+= sizeof(struct mlx5_wqe_raddr_seg
);
454 case MLX5_OPCODE_RDMA_READ
:
455 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
456 IB_ODP_SUPPORT_READ
))
457 goto invalid_transport_or_opcode
;
458 *wqe
+= sizeof(struct mlx5_wqe_raddr_seg
);
461 goto invalid_transport_or_opcode
;
466 case MLX5_OPCODE_SEND
:
467 case MLX5_OPCODE_SEND_IMM
:
468 if (!(dev
->odp_caps
.per_transport_caps
.ud_odp_caps
&
469 IB_ODP_SUPPORT_SEND
))
470 goto invalid_transport_or_opcode
;
471 *wqe
+= sizeof(struct mlx5_wqe_datagram_seg
);
474 goto invalid_transport_or_opcode
;
478 invalid_transport_or_opcode
:
479 mlx5_ib_err(dev
, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
480 qp
->ibqp
.qp_type
, opcode
);
488 * Parse responder WQE. Advances the wqe pointer to point at the
489 * scatter-gather list, and set wqe_end to the end of the WQE.
491 static int mlx5_ib_mr_responder_pfault_handler(
492 struct mlx5_ib_qp
*qp
, struct mlx5_ib_pfault
*pfault
,
493 void **wqe
, void **wqe_end
, int wqe_length
)
495 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
496 struct mlx5_ib_wq
*wq
= &qp
->rq
;
497 int wqe_size
= 1 << wq
->wqe_shift
;
500 mlx5_ib_err(dev
, "ODP fault on SRQ is not supported\n");
505 mlx5_ib_err(dev
, "ODP fault with WQE signatures is not supported\n");
509 if (wqe_size
> wqe_length
) {
510 mlx5_ib_err(dev
, "Couldn't read all of the receive WQE's content\n");
514 switch (qp
->ibqp
.qp_type
) {
516 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
517 IB_ODP_SUPPORT_RECV
))
518 goto invalid_transport_or_opcode
;
521 invalid_transport_or_opcode
:
522 mlx5_ib_err(dev
, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
527 *wqe_end
= *wqe
+ wqe_size
;
532 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp
*qp
,
533 struct mlx5_ib_pfault
*pfault
)
535 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
538 u32 bytes_mapped
, total_wqe_bytes
;
540 int resume_with_error
= 0;
541 u16 wqe_index
= pfault
->mpfault
.wqe
.wqe_index
;
542 int requestor
= pfault
->mpfault
.flags
& MLX5_PFAULT_REQUESTOR
;
543 u32 qpn
= qp
->trans_qp
.base
.mqp
.qpn
;
545 buffer
= (char *)__get_free_page(GFP_KERNEL
);
547 mlx5_ib_err(dev
, "Error allocating memory for IO page fault handling.\n");
548 resume_with_error
= 1;
549 goto resolve_page_fault
;
552 ret
= mlx5_ib_read_user_wqe(qp
, requestor
, wqe_index
, buffer
,
553 PAGE_SIZE
, &qp
->trans_qp
.base
);
555 mlx5_ib_err(dev
, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
556 -ret
, wqe_index
, qpn
);
557 resume_with_error
= 1;
558 goto resolve_page_fault
;
563 ret
= mlx5_ib_mr_initiator_pfault_handler(qp
, pfault
, &wqe
,
566 ret
= mlx5_ib_mr_responder_pfault_handler(qp
, pfault
, &wqe
,
569 resume_with_error
= 1;
570 goto resolve_page_fault
;
573 if (wqe
>= wqe_end
) {
574 mlx5_ib_err(dev
, "ODP fault on invalid WQE.\n");
575 resume_with_error
= 1;
576 goto resolve_page_fault
;
579 ret
= pagefault_data_segments(qp
, pfault
, wqe
, wqe_end
, &bytes_mapped
,
580 &total_wqe_bytes
, !requestor
);
581 if (ret
== -EAGAIN
) {
582 goto resolve_page_fault
;
583 } else if (ret
< 0 || total_wqe_bytes
> bytes_mapped
) {
584 mlx5_ib_err(dev
, "Error getting user pages for page fault. Error: 0x%x\n",
586 resume_with_error
= 1;
587 goto resolve_page_fault
;
591 mlx5_ib_page_fault_resume(qp
, pfault
, resume_with_error
);
592 mlx5_ib_dbg(dev
, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
593 qpn
, resume_with_error
,
594 pfault
->mpfault
.flags
);
596 free_page((unsigned long)buffer
);
599 static int pages_in_range(u64 address
, u32 length
)
601 return (ALIGN(address
+ length
, PAGE_SIZE
) -
602 (address
& PAGE_MASK
)) >> PAGE_SHIFT
;
605 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp
*qp
,
606 struct mlx5_ib_pfault
*pfault
)
608 struct mlx5_pagefault
*mpfault
= &pfault
->mpfault
;
611 u32 prefetch_len
= mpfault
->bytes_committed
;
612 int prefetch_activated
= 0;
613 u32 rkey
= mpfault
->rdma
.r_key
;
616 /* The RDMA responder handler handles the page fault in two parts.
617 * First it brings the necessary pages for the current packet
618 * (and uses the pfault context), and then (after resuming the QP)
619 * prefetches more pages. The second operation cannot use the pfault
620 * context and therefore uses the dummy_pfault context allocated on
622 struct mlx5_ib_pfault dummy_pfault
= {};
624 dummy_pfault
.mpfault
.bytes_committed
= 0;
626 mpfault
->rdma
.rdma_va
+= mpfault
->bytes_committed
;
627 mpfault
->rdma
.rdma_op_len
-= min(mpfault
->bytes_committed
,
628 mpfault
->rdma
.rdma_op_len
);
629 mpfault
->bytes_committed
= 0;
631 address
= mpfault
->rdma
.rdma_va
;
632 length
= mpfault
->rdma
.rdma_op_len
;
634 /* For some operations, the hardware cannot tell the exact message
635 * length, and in those cases it reports zero. Use prefetch
638 prefetch_activated
= 1;
639 length
= mpfault
->rdma
.packet_size
;
640 prefetch_len
= min(MAX_PREFETCH_LEN
, prefetch_len
);
643 ret
= pagefault_single_data_segment(qp
, pfault
, rkey
, address
, length
,
645 if (ret
== -EAGAIN
) {
646 /* We're racing with an invalidation, don't prefetch */
647 prefetch_activated
= 0;
648 } else if (ret
< 0 || pages_in_range(address
, length
) > ret
) {
649 mlx5_ib_page_fault_resume(qp
, pfault
, 1);
653 mlx5_ib_page_fault_resume(qp
, pfault
, 0);
655 /* At this point, there might be a new pagefault already arriving in
656 * the eq, switch to the dummy pagefault for the rest of the
657 * processing. We're still OK with the objects being alive as the
658 * work-queue is being fenced. */
660 if (prefetch_activated
) {
661 ret
= pagefault_single_data_segment(qp
, &dummy_pfault
, rkey
,
666 pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
667 ret
, prefetch_activated
,
668 qp
->ibqp
.qp_num
, address
, prefetch_len
);
673 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp
*qp
,
674 struct mlx5_ib_pfault
*pfault
)
676 u8 event_subtype
= pfault
->mpfault
.event_subtype
;
678 switch (event_subtype
) {
679 case MLX5_PFAULT_SUBTYPE_WQE
:
680 mlx5_ib_mr_wqe_pfault_handler(qp
, pfault
);
682 case MLX5_PFAULT_SUBTYPE_RDMA
:
683 mlx5_ib_mr_rdma_pfault_handler(qp
, pfault
);
686 pr_warn("Invalid page fault event subtype: 0x%x\n",
688 mlx5_ib_page_fault_resume(qp
, pfault
, 1);
693 static void mlx5_ib_qp_pfault_action(struct work_struct
*work
)
695 struct mlx5_ib_pfault
*pfault
= container_of(work
,
696 struct mlx5_ib_pfault
,
698 enum mlx5_ib_pagefault_context context
=
699 mlx5_ib_get_pagefault_context(&pfault
->mpfault
);
700 struct mlx5_ib_qp
*qp
= container_of(pfault
, struct mlx5_ib_qp
,
701 pagefaults
[context
]);
702 mlx5_ib_mr_pfault_handler(qp
, pfault
);
705 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp
*qp
)
709 spin_lock_irqsave(&qp
->disable_page_faults_lock
, flags
);
710 qp
->disable_page_faults
= 1;
711 spin_unlock_irqrestore(&qp
->disable_page_faults_lock
, flags
);
714 * Note that at this point, we are guarenteed that no more
715 * work queue elements will be posted to the work queue with
716 * the QP we are closing.
718 flush_workqueue(mlx5_ib_page_fault_wq
);
721 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp
*qp
)
725 spin_lock_irqsave(&qp
->disable_page_faults_lock
, flags
);
726 qp
->disable_page_faults
= 0;
727 spin_unlock_irqrestore(&qp
->disable_page_faults_lock
, flags
);
730 static void mlx5_ib_pfault_handler(struct mlx5_core_qp
*qp
,
731 struct mlx5_pagefault
*pfault
)
734 * Note that we will only get one fault event per QP per context
735 * (responder/initiator, read/write), until we resolve the page fault
736 * with the mlx5_ib_page_fault_resume command. Since this function is
737 * called from within the work element, there is no risk of missing
740 struct mlx5_ib_qp
*mibqp
= to_mibqp(qp
);
741 enum mlx5_ib_pagefault_context context
=
742 mlx5_ib_get_pagefault_context(pfault
);
743 struct mlx5_ib_pfault
*qp_pfault
= &mibqp
->pagefaults
[context
];
745 qp_pfault
->mpfault
= *pfault
;
747 /* No need to stop interrupts here since we are in an interrupt */
748 spin_lock(&mibqp
->disable_page_faults_lock
);
749 if (!mibqp
->disable_page_faults
)
750 queue_work(mlx5_ib_page_fault_wq
, &qp_pfault
->work
);
751 spin_unlock(&mibqp
->disable_page_faults_lock
);
754 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp
*qp
)
758 qp
->disable_page_faults
= 1;
759 spin_lock_init(&qp
->disable_page_faults_lock
);
761 qp
->trans_qp
.base
.mqp
.pfault_handler
= mlx5_ib_pfault_handler
;
763 for (i
= 0; i
< MLX5_IB_PAGEFAULT_CONTEXTS
; ++i
)
764 INIT_WORK(&qp
->pagefaults
[i
].work
, mlx5_ib_qp_pfault_action
);
767 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
)
771 ret
= init_srcu_struct(&ibdev
->mr_srcu
);
778 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev
*ibdev
)
780 cleanup_srcu_struct(&ibdev
->mr_srcu
);
783 int __init
mlx5_ib_odp_init(void)
785 mlx5_ib_page_fault_wq
=
786 create_singlethread_workqueue("mlx5_ib_page_faults");
787 if (!mlx5_ib_page_fault_wq
)
793 void mlx5_ib_odp_cleanup(void)
795 destroy_workqueue(mlx5_ib_page_fault_wq
);