2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
38 #define MAX_PREFETCH_LEN (4*1024*1024U)
40 /* Timeout in ms to wait for an active mmu notifier to complete when handling
42 #define MMU_NOTIFIER_TIMEOUT 1000
44 struct workqueue_struct
*mlx5_ib_page_fault_wq
;
46 void mlx5_ib_invalidate_range(struct ib_umem
*umem
, unsigned long start
,
49 struct mlx5_ib_mr
*mr
;
50 const u64 umr_block_mask
= (MLX5_UMR_MTT_ALIGNMENT
/ sizeof(u64
)) - 1;
51 u64 idx
= 0, blk_start_idx
= 0;
55 if (!umem
|| !umem
->odp_data
) {
56 pr_err("invalidation called on NULL umem or non-ODP umem\n");
60 mr
= umem
->odp_data
->private;
62 if (!mr
|| !mr
->ibmr
.pd
)
65 start
= max_t(u64
, ib_umem_start(umem
), start
);
66 end
= min_t(u64
, ib_umem_end(umem
), end
);
69 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
70 * while we are doing the invalidation, no page fault will attempt to
71 * overwrite the same MTTs. Concurent invalidations might race us,
72 * but they will write 0s as well, so no difference in the end result.
75 for (addr
= start
; addr
< end
; addr
+= (u64
)umem
->page_size
) {
76 idx
= (addr
- ib_umem_start(umem
)) / PAGE_SIZE
;
78 * Strive to write the MTTs in chunks, but avoid overwriting
79 * non-existing MTTs. The huristic here can be improved to
80 * estimate the cost of another UMR vs. the cost of bigger
83 if (umem
->odp_data
->dma_list
[idx
] &
84 (ODP_READ_ALLOWED_BIT
| ODP_WRITE_ALLOWED_BIT
)) {
90 u64 umr_offset
= idx
& umr_block_mask
;
92 if (in_block
&& umr_offset
== 0) {
93 mlx5_ib_update_mtt(mr
, blk_start_idx
,
94 idx
- blk_start_idx
, 1);
100 mlx5_ib_update_mtt(mr
, blk_start_idx
, idx
- blk_start_idx
+ 1,
104 * We are now sure that the device will not access the
105 * memory. We can safely unmap it, and mark it as dirty if
109 ib_umem_odp_unmap_dma_pages(umem
, start
, end
);
112 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
114 struct ib_odp_caps
*caps
= &dev
->odp_caps
;
116 memset(caps
, 0, sizeof(*caps
));
118 if (!MLX5_CAP_GEN(dev
->mdev
, pg
))
121 caps
->general_caps
= IB_ODP_SUPPORT
;
123 if (MLX5_CAP_ODP(dev
->mdev
, ud_odp_caps
.send
))
124 caps
->per_transport_caps
.ud_odp_caps
|= IB_ODP_SUPPORT_SEND
;
126 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.send
))
127 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_SEND
;
129 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.receive
))
130 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_RECV
;
132 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.write
))
133 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_WRITE
;
135 if (MLX5_CAP_ODP(dev
->mdev
, rc_odp_caps
.read
))
136 caps
->per_transport_caps
.rc_odp_caps
|= IB_ODP_SUPPORT_READ
;
141 static struct mlx5_ib_mr
*mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev
*dev
,
144 u32 base_key
= mlx5_base_mkey(key
);
145 struct mlx5_core_mr
*mmr
= __mlx5_mr_lookup(dev
->mdev
, base_key
);
146 struct mlx5_ib_mr
*mr
= container_of(mmr
, struct mlx5_ib_mr
, mmr
);
148 if (!mmr
|| mmr
->key
!= key
|| !mr
->live
)
151 return container_of(mmr
, struct mlx5_ib_mr
, mmr
);
154 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp
*qp
,
155 struct mlx5_ib_pfault
*pfault
,
157 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
158 int ret
= mlx5_core_page_fault_resume(dev
->mdev
, qp
->mqp
.qpn
,
159 pfault
->mpfault
.flags
,
162 pr_err("Failed to resolve the page fault on QP 0x%x\n",
167 * Handle a single data segment in a page-fault WQE.
169 * Returns number of pages retrieved on success. The caller will continue to
170 * the next data segment.
171 * Can return the following error codes:
172 * -EAGAIN to designate a temporary error. The caller will abort handling the
173 * page fault and resolve it.
174 * -EFAULT when there's an error mapping the requested pages. The caller will
175 * abort the page fault handling and possibly move the QP to an error state.
176 * On other errors the QP should also be closed with an error.
178 static int pagefault_single_data_segment(struct mlx5_ib_qp
*qp
,
179 struct mlx5_ib_pfault
*pfault
,
180 u32 key
, u64 io_virt
, size_t bcnt
,
183 struct mlx5_ib_dev
*mib_dev
= to_mdev(qp
->ibqp
.pd
->device
);
185 unsigned int current_seq
;
187 int npages
= 0, ret
= 0;
188 struct mlx5_ib_mr
*mr
;
189 u64 access_mask
= ODP_READ_ALLOWED_BIT
;
191 srcu_key
= srcu_read_lock(&mib_dev
->mr_srcu
);
192 mr
= mlx5_ib_odp_find_mr_lkey(mib_dev
, key
);
194 * If we didn't find the MR, it means the MR was closed while we were
195 * handling the ODP event. In this case we return -EFAULT so that the
198 if (!mr
|| !mr
->ibmr
.pd
) {
199 pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
204 if (!mr
->umem
->odp_data
) {
205 pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
209 (bcnt
- pfault
->mpfault
.bytes_committed
);
212 if (mr
->ibmr
.pd
!= qp
->ibqp
.pd
) {
213 pr_err("Page-fault with different PDs for QP and MR.\n");
218 current_seq
= ACCESS_ONCE(mr
->umem
->odp_data
->notifiers_seq
);
220 * Ensure the sequence number is valid for some time before we call
226 * Avoid branches - this code will perform correctly
227 * in all iterations (in iteration 2 and above,
228 * bytes_committed == 0).
230 io_virt
+= pfault
->mpfault
.bytes_committed
;
231 bcnt
-= pfault
->mpfault
.bytes_committed
;
233 start_idx
= (io_virt
- (mr
->mmr
.iova
& PAGE_MASK
)) >> PAGE_SHIFT
;
235 if (mr
->umem
->writable
)
236 access_mask
|= ODP_WRITE_ALLOWED_BIT
;
237 npages
= ib_umem_odp_map_dma_pages(mr
->umem
, io_virt
, bcnt
,
238 access_mask
, current_seq
);
245 mutex_lock(&mr
->umem
->odp_data
->umem_mutex
);
246 if (!ib_umem_mmu_notifier_retry(mr
->umem
, current_seq
)) {
248 * No need to check whether the MTTs really belong to
249 * this MR, since ib_umem_odp_map_dma_pages already
252 ret
= mlx5_ib_update_mtt(mr
, start_idx
, npages
, 0);
256 mutex_unlock(&mr
->umem
->odp_data
->umem_mutex
);
259 pr_err("Failed to update mkey page tables\n");
264 u32 new_mappings
= npages
* PAGE_SIZE
-
265 (io_virt
- round_down(io_virt
, PAGE_SIZE
));
266 *bytes_mapped
+= min_t(u32
, new_mappings
, bcnt
);
271 if (ret
== -EAGAIN
) {
272 if (!mr
->umem
->odp_data
->dying
) {
273 struct ib_umem_odp
*odp_data
= mr
->umem
->odp_data
;
274 unsigned long timeout
=
275 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT
);
277 if (!wait_for_completion_timeout(
278 &odp_data
->notifier_completion
,
280 pr_warn("timeout waiting for mmu notifier completion\n");
283 /* The MR is being killed, kill the QP as well. */
287 srcu_read_unlock(&mib_dev
->mr_srcu
, srcu_key
);
288 pfault
->mpfault
.bytes_committed
= 0;
289 return ret
? ret
: npages
;
293 * Parse a series of data segments for page fault handling.
295 * @qp the QP on which the fault occurred.
296 * @pfault contains page fault information.
297 * @wqe points at the first data segment in the WQE.
298 * @wqe_end points after the end of the WQE.
299 * @bytes_mapped receives the number of bytes that the function was able to
300 * map. This allows the caller to decide intelligently whether
301 * enough memory was mapped to resolve the page fault
302 * successfully (e.g. enough for the next MTU, or the entire
304 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
305 * the committed bytes).
307 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
308 * negative error code.
310 static int pagefault_data_segments(struct mlx5_ib_qp
*qp
,
311 struct mlx5_ib_pfault
*pfault
, void *wqe
,
312 void *wqe_end
, u32
*bytes_mapped
,
313 u32
*total_wqe_bytes
, int receive_queue
)
315 int ret
= 0, npages
= 0;
322 /* Skip SRQ next-WQE segment. */
323 if (receive_queue
&& qp
->ibqp
.srq
)
324 wqe
+= sizeof(struct mlx5_wqe_srq_next_seg
);
329 *total_wqe_bytes
= 0;
331 while (wqe
< wqe_end
) {
332 struct mlx5_wqe_data_seg
*dseg
= wqe
;
334 io_virt
= be64_to_cpu(dseg
->addr
);
335 key
= be32_to_cpu(dseg
->lkey
);
336 byte_count
= be32_to_cpu(dseg
->byte_count
);
337 inline_segment
= !!(byte_count
& MLX5_INLINE_SEG
);
338 bcnt
= byte_count
& ~MLX5_INLINE_SEG
;
340 if (inline_segment
) {
341 bcnt
= bcnt
& MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK
;
342 wqe
+= ALIGN(sizeof(struct mlx5_wqe_inline_seg
) + bcnt
,
345 wqe
+= sizeof(*dseg
);
348 /* receive WQE end of sg list. */
349 if (receive_queue
&& bcnt
== 0 && key
== MLX5_INVALID_LKEY
&&
353 if (!inline_segment
&& total_wqe_bytes
) {
354 *total_wqe_bytes
+= bcnt
- min_t(size_t, bcnt
,
355 pfault
->mpfault
.bytes_committed
);
358 /* A zero length data segment designates a length of 2GB. */
362 if (inline_segment
|| bcnt
<= pfault
->mpfault
.bytes_committed
) {
363 pfault
->mpfault
.bytes_committed
-=
365 pfault
->mpfault
.bytes_committed
);
369 ret
= pagefault_single_data_segment(qp
, pfault
, key
, io_virt
,
376 return ret
< 0 ? ret
: npages
;
380 * Parse initiator WQE. Advances the wqe pointer to point at the
381 * scatter-gather list, and set wqe_end to the end of the WQE.
383 static int mlx5_ib_mr_initiator_pfault_handler(
384 struct mlx5_ib_qp
*qp
, struct mlx5_ib_pfault
*pfault
,
385 void **wqe
, void **wqe_end
, int wqe_length
)
387 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
388 struct mlx5_wqe_ctrl_seg
*ctrl
= *wqe
;
389 u16 wqe_index
= pfault
->mpfault
.wqe
.wqe_index
;
392 u32 ctrl_wqe_index
, ctrl_qpn
;
395 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
396 if (ds
* MLX5_WQE_DS_UNITS
> wqe_length
) {
397 mlx5_ib_err(dev
, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
403 mlx5_ib_err(dev
, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
404 wqe_index
, qp
->mqp
.qpn
);
409 ctrl_wqe_index
= (be32_to_cpu(ctrl
->opmod_idx_opcode
) &
410 MLX5_WQE_CTRL_WQE_INDEX_MASK
) >>
411 MLX5_WQE_CTRL_WQE_INDEX_SHIFT
;
412 if (wqe_index
!= ctrl_wqe_index
) {
413 mlx5_ib_err(dev
, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
414 wqe_index
, qp
->mqp
.qpn
,
419 ctrl_qpn
= (be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_QPN_MASK
) >>
420 MLX5_WQE_CTRL_QPN_SHIFT
;
421 if (qp
->mqp
.qpn
!= ctrl_qpn
) {
422 mlx5_ib_err(dev
, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
423 wqe_index
, qp
->mqp
.qpn
,
429 *wqe_end
= *wqe
+ ds
* MLX5_WQE_DS_UNITS
;
430 *wqe
+= sizeof(*ctrl
);
432 opcode
= be32_to_cpu(ctrl
->opmod_idx_opcode
) &
433 MLX5_WQE_CTRL_OPCODE_MASK
;
434 switch (qp
->ibqp
.qp_type
) {
437 case MLX5_OPCODE_SEND
:
438 case MLX5_OPCODE_SEND_IMM
:
439 case MLX5_OPCODE_SEND_INVAL
:
440 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
441 IB_ODP_SUPPORT_SEND
))
442 goto invalid_transport_or_opcode
;
444 case MLX5_OPCODE_RDMA_WRITE
:
445 case MLX5_OPCODE_RDMA_WRITE_IMM
:
446 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
447 IB_ODP_SUPPORT_WRITE
))
448 goto invalid_transport_or_opcode
;
449 *wqe
+= sizeof(struct mlx5_wqe_raddr_seg
);
451 case MLX5_OPCODE_RDMA_READ
:
452 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
453 IB_ODP_SUPPORT_READ
))
454 goto invalid_transport_or_opcode
;
455 *wqe
+= sizeof(struct mlx5_wqe_raddr_seg
);
458 goto invalid_transport_or_opcode
;
463 case MLX5_OPCODE_SEND
:
464 case MLX5_OPCODE_SEND_IMM
:
465 if (!(dev
->odp_caps
.per_transport_caps
.ud_odp_caps
&
466 IB_ODP_SUPPORT_SEND
))
467 goto invalid_transport_or_opcode
;
468 *wqe
+= sizeof(struct mlx5_wqe_datagram_seg
);
471 goto invalid_transport_or_opcode
;
475 invalid_transport_or_opcode
:
476 mlx5_ib_err(dev
, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
477 qp
->ibqp
.qp_type
, opcode
);
485 * Parse responder WQE. Advances the wqe pointer to point at the
486 * scatter-gather list, and set wqe_end to the end of the WQE.
488 static int mlx5_ib_mr_responder_pfault_handler(
489 struct mlx5_ib_qp
*qp
, struct mlx5_ib_pfault
*pfault
,
490 void **wqe
, void **wqe_end
, int wqe_length
)
492 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
493 struct mlx5_ib_wq
*wq
= &qp
->rq
;
494 int wqe_size
= 1 << wq
->wqe_shift
;
497 mlx5_ib_err(dev
, "ODP fault on SRQ is not supported\n");
502 mlx5_ib_err(dev
, "ODP fault with WQE signatures is not supported\n");
506 if (wqe_size
> wqe_length
) {
507 mlx5_ib_err(dev
, "Couldn't read all of the receive WQE's content\n");
511 switch (qp
->ibqp
.qp_type
) {
513 if (!(dev
->odp_caps
.per_transport_caps
.rc_odp_caps
&
514 IB_ODP_SUPPORT_RECV
))
515 goto invalid_transport_or_opcode
;
518 invalid_transport_or_opcode
:
519 mlx5_ib_err(dev
, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
524 *wqe_end
= *wqe
+ wqe_size
;
529 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp
*qp
,
530 struct mlx5_ib_pfault
*pfault
)
532 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.pd
->device
);
535 u32 bytes_mapped
, total_wqe_bytes
;
537 int resume_with_error
= 0;
538 u16 wqe_index
= pfault
->mpfault
.wqe
.wqe_index
;
539 int requestor
= pfault
->mpfault
.flags
& MLX5_PFAULT_REQUESTOR
;
541 buffer
= (char *)__get_free_page(GFP_KERNEL
);
543 mlx5_ib_err(dev
, "Error allocating memory for IO page fault handling.\n");
544 resume_with_error
= 1;
545 goto resolve_page_fault
;
548 ret
= mlx5_ib_read_user_wqe(qp
, requestor
, wqe_index
, buffer
,
551 mlx5_ib_err(dev
, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
552 -ret
, wqe_index
, qp
->mqp
.qpn
);
553 resume_with_error
= 1;
554 goto resolve_page_fault
;
559 ret
= mlx5_ib_mr_initiator_pfault_handler(qp
, pfault
, &wqe
,
562 ret
= mlx5_ib_mr_responder_pfault_handler(qp
, pfault
, &wqe
,
565 resume_with_error
= 1;
566 goto resolve_page_fault
;
569 if (wqe
>= wqe_end
) {
570 mlx5_ib_err(dev
, "ODP fault on invalid WQE.\n");
571 resume_with_error
= 1;
572 goto resolve_page_fault
;
575 ret
= pagefault_data_segments(qp
, pfault
, wqe
, wqe_end
, &bytes_mapped
,
576 &total_wqe_bytes
, !requestor
);
577 if (ret
== -EAGAIN
) {
578 goto resolve_page_fault
;
579 } else if (ret
< 0 || total_wqe_bytes
> bytes_mapped
) {
580 mlx5_ib_err(dev
, "Error getting user pages for page fault. Error: 0x%x\n",
582 resume_with_error
= 1;
583 goto resolve_page_fault
;
587 mlx5_ib_page_fault_resume(qp
, pfault
, resume_with_error
);
588 mlx5_ib_dbg(dev
, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
589 qp
->mqp
.qpn
, resume_with_error
, pfault
->mpfault
.flags
);
591 free_page((unsigned long)buffer
);
594 static int pages_in_range(u64 address
, u32 length
)
596 return (ALIGN(address
+ length
, PAGE_SIZE
) -
597 (address
& PAGE_MASK
)) >> PAGE_SHIFT
;
600 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp
*qp
,
601 struct mlx5_ib_pfault
*pfault
)
603 struct mlx5_pagefault
*mpfault
= &pfault
->mpfault
;
606 u32 prefetch_len
= mpfault
->bytes_committed
;
607 int prefetch_activated
= 0;
608 u32 rkey
= mpfault
->rdma
.r_key
;
611 /* The RDMA responder handler handles the page fault in two parts.
612 * First it brings the necessary pages for the current packet
613 * (and uses the pfault context), and then (after resuming the QP)
614 * prefetches more pages. The second operation cannot use the pfault
615 * context and therefore uses the dummy_pfault context allocated on
617 struct mlx5_ib_pfault dummy_pfault
= {};
619 dummy_pfault
.mpfault
.bytes_committed
= 0;
621 mpfault
->rdma
.rdma_va
+= mpfault
->bytes_committed
;
622 mpfault
->rdma
.rdma_op_len
-= min(mpfault
->bytes_committed
,
623 mpfault
->rdma
.rdma_op_len
);
624 mpfault
->bytes_committed
= 0;
626 address
= mpfault
->rdma
.rdma_va
;
627 length
= mpfault
->rdma
.rdma_op_len
;
629 /* For some operations, the hardware cannot tell the exact message
630 * length, and in those cases it reports zero. Use prefetch
633 prefetch_activated
= 1;
634 length
= mpfault
->rdma
.packet_size
;
635 prefetch_len
= min(MAX_PREFETCH_LEN
, prefetch_len
);
638 ret
= pagefault_single_data_segment(qp
, pfault
, rkey
, address
, length
,
640 if (ret
== -EAGAIN
) {
641 /* We're racing with an invalidation, don't prefetch */
642 prefetch_activated
= 0;
643 } else if (ret
< 0 || pages_in_range(address
, length
) > ret
) {
644 mlx5_ib_page_fault_resume(qp
, pfault
, 1);
648 mlx5_ib_page_fault_resume(qp
, pfault
, 0);
650 /* At this point, there might be a new pagefault already arriving in
651 * the eq, switch to the dummy pagefault for the rest of the
652 * processing. We're still OK with the objects being alive as the
653 * work-queue is being fenced. */
655 if (prefetch_activated
) {
656 ret
= pagefault_single_data_segment(qp
, &dummy_pfault
, rkey
,
661 pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
662 ret
, prefetch_activated
,
663 qp
->ibqp
.qp_num
, address
, prefetch_len
);
668 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp
*qp
,
669 struct mlx5_ib_pfault
*pfault
)
671 u8 event_subtype
= pfault
->mpfault
.event_subtype
;
673 switch (event_subtype
) {
674 case MLX5_PFAULT_SUBTYPE_WQE
:
675 mlx5_ib_mr_wqe_pfault_handler(qp
, pfault
);
677 case MLX5_PFAULT_SUBTYPE_RDMA
:
678 mlx5_ib_mr_rdma_pfault_handler(qp
, pfault
);
681 pr_warn("Invalid page fault event subtype: 0x%x\n",
683 mlx5_ib_page_fault_resume(qp
, pfault
, 1);
688 static void mlx5_ib_qp_pfault_action(struct work_struct
*work
)
690 struct mlx5_ib_pfault
*pfault
= container_of(work
,
691 struct mlx5_ib_pfault
,
693 enum mlx5_ib_pagefault_context context
=
694 mlx5_ib_get_pagefault_context(&pfault
->mpfault
);
695 struct mlx5_ib_qp
*qp
= container_of(pfault
, struct mlx5_ib_qp
,
696 pagefaults
[context
]);
697 mlx5_ib_mr_pfault_handler(qp
, pfault
);
700 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp
*qp
)
704 spin_lock_irqsave(&qp
->disable_page_faults_lock
, flags
);
705 qp
->disable_page_faults
= 1;
706 spin_unlock_irqrestore(&qp
->disable_page_faults_lock
, flags
);
709 * Note that at this point, we are guarenteed that no more
710 * work queue elements will be posted to the work queue with
711 * the QP we are closing.
713 flush_workqueue(mlx5_ib_page_fault_wq
);
716 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp
*qp
)
720 spin_lock_irqsave(&qp
->disable_page_faults_lock
, flags
);
721 qp
->disable_page_faults
= 0;
722 spin_unlock_irqrestore(&qp
->disable_page_faults_lock
, flags
);
725 static void mlx5_ib_pfault_handler(struct mlx5_core_qp
*qp
,
726 struct mlx5_pagefault
*pfault
)
729 * Note that we will only get one fault event per QP per context
730 * (responder/initiator, read/write), until we resolve the page fault
731 * with the mlx5_ib_page_fault_resume command. Since this function is
732 * called from within the work element, there is no risk of missing
735 struct mlx5_ib_qp
*mibqp
= to_mibqp(qp
);
736 enum mlx5_ib_pagefault_context context
=
737 mlx5_ib_get_pagefault_context(pfault
);
738 struct mlx5_ib_pfault
*qp_pfault
= &mibqp
->pagefaults
[context
];
740 qp_pfault
->mpfault
= *pfault
;
742 /* No need to stop interrupts here since we are in an interrupt */
743 spin_lock(&mibqp
->disable_page_faults_lock
);
744 if (!mibqp
->disable_page_faults
)
745 queue_work(mlx5_ib_page_fault_wq
, &qp_pfault
->work
);
746 spin_unlock(&mibqp
->disable_page_faults_lock
);
749 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp
*qp
)
753 qp
->disable_page_faults
= 1;
754 spin_lock_init(&qp
->disable_page_faults_lock
);
756 qp
->mqp
.pfault_handler
= mlx5_ib_pfault_handler
;
758 for (i
= 0; i
< MLX5_IB_PAGEFAULT_CONTEXTS
; ++i
)
759 INIT_WORK(&qp
->pagefaults
[i
].work
, mlx5_ib_qp_pfault_action
);
762 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
)
766 ret
= init_srcu_struct(&ibdev
->mr_srcu
);
773 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev
*ibdev
)
775 cleanup_srcu_struct(&ibdev
->mr_srcu
);
778 int __init
mlx5_ib_odp_init(void)
780 mlx5_ib_page_fault_wq
=
781 create_singlethread_workqueue("mlx5_ib_page_faults");
782 if (!mlx5_ib_page_fault_wq
)
788 void mlx5_ib_odp_cleanup(void)
790 destroy_workqueue(mlx5_ib_page_fault_wq
);