2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree.h>
43 #include <linux/hmm.h>
44 #include <linux/pagemap.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_umem_odp.h>
52 static inline int ib_init_umem_odp(struct ib_umem_odp
*umem_odp
,
53 const struct mmu_interval_notifier_ops
*ops
)
57 umem_odp
->umem
.is_odp
= 1;
58 mutex_init(&umem_odp
->umem_mutex
);
60 if (!umem_odp
->is_implicit_odp
) {
61 size_t page_size
= 1UL << umem_odp
->page_shift
;
66 start
= ALIGN_DOWN(umem_odp
->umem
.address
, page_size
);
67 if (check_add_overflow(umem_odp
->umem
.address
,
68 (unsigned long)umem_odp
->umem
.length
,
71 end
= ALIGN(end
, page_size
);
72 if (unlikely(end
< page_size
))
75 ndmas
= (end
- start
) >> umem_odp
->page_shift
;
79 npfns
= (end
- start
) >> PAGE_SHIFT
;
80 umem_odp
->pfn_list
= kvcalloc(
81 npfns
, sizeof(*umem_odp
->pfn_list
), GFP_KERNEL
);
82 if (!umem_odp
->pfn_list
)
85 umem_odp
->dma_list
= kvcalloc(
86 ndmas
, sizeof(*umem_odp
->dma_list
), GFP_KERNEL
);
87 if (!umem_odp
->dma_list
) {
92 ret
= mmu_interval_notifier_insert(&umem_odp
->notifier
,
93 umem_odp
->umem
.owning_mm
,
94 start
, end
- start
, ops
);
102 kvfree(umem_odp
->dma_list
);
104 kvfree(umem_odp
->pfn_list
);
109 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
111 * Implicit ODP umems do not have a VA range and do not have any page lists.
112 * They exist only to hold the per_mm reference to help the driver create
115 * @device: IB device to create UMEM
116 * @access: ib_reg_mr access flags
118 struct ib_umem_odp
*ib_umem_odp_alloc_implicit(struct ib_device
*device
,
121 struct ib_umem
*umem
;
122 struct ib_umem_odp
*umem_odp
;
125 if (access
& IB_ACCESS_HUGETLB
)
126 return ERR_PTR(-EINVAL
);
128 umem_odp
= kzalloc(sizeof(*umem_odp
), GFP_KERNEL
);
130 return ERR_PTR(-ENOMEM
);
131 umem
= &umem_odp
->umem
;
132 umem
->ibdev
= device
;
133 umem
->writable
= ib_access_writable(access
);
134 umem
->owning_mm
= current
->mm
;
135 umem_odp
->is_implicit_odp
= 1;
136 umem_odp
->page_shift
= PAGE_SHIFT
;
138 umem_odp
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
139 ret
= ib_init_umem_odp(umem_odp
, NULL
);
141 put_pid(umem_odp
->tgid
);
147 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit
);
150 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
153 * @root: The parent umem enclosing the child. This must be allocated using
154 * ib_alloc_implicit_odp_umem()
155 * @addr: The starting userspace VA
156 * @size: The length of the userspace VA
157 * @ops: MMU interval ops, currently only @invalidate
160 ib_umem_odp_alloc_child(struct ib_umem_odp
*root
, unsigned long addr
,
162 const struct mmu_interval_notifier_ops
*ops
)
165 * Caller must ensure that root cannot be freed during the call to
168 struct ib_umem_odp
*odp_data
;
169 struct ib_umem
*umem
;
172 if (WARN_ON(!root
->is_implicit_odp
))
173 return ERR_PTR(-EINVAL
);
175 odp_data
= kzalloc(sizeof(*odp_data
), GFP_KERNEL
);
177 return ERR_PTR(-ENOMEM
);
178 umem
= &odp_data
->umem
;
179 umem
->ibdev
= root
->umem
.ibdev
;
181 umem
->address
= addr
;
182 umem
->writable
= root
->umem
.writable
;
183 umem
->owning_mm
= root
->umem
.owning_mm
;
184 odp_data
->page_shift
= PAGE_SHIFT
;
185 odp_data
->notifier
.ops
= ops
;
188 * A mmget must be held when registering a notifier, the owming_mm only
189 * has a mm_grab at this point.
191 if (!mmget_not_zero(umem
->owning_mm
)) {
196 odp_data
->tgid
= get_pid(root
->tgid
);
197 ret
= ib_init_umem_odp(odp_data
, ops
);
200 mmput(umem
->owning_mm
);
204 put_pid(odp_data
->tgid
);
205 mmput(umem
->owning_mm
);
210 EXPORT_SYMBOL(ib_umem_odp_alloc_child
);
213 * ib_umem_odp_get - Create a umem_odp for a userspace va
215 * @device: IB device struct to get UMEM
216 * @addr: userspace virtual address to start at
217 * @size: length of region to pin
218 * @access: IB_ACCESS_xxx flags for memory being pinned
219 * @ops: MMU interval ops, currently only @invalidate
221 * The driver should use when the access flags indicate ODP memory. It avoids
222 * pinning, instead, stores the mm for future page fault handling in
223 * conjunction with MMU notifiers.
225 struct ib_umem_odp
*ib_umem_odp_get(struct ib_device
*device
,
226 unsigned long addr
, size_t size
, int access
,
227 const struct mmu_interval_notifier_ops
*ops
)
229 struct ib_umem_odp
*umem_odp
;
230 struct mm_struct
*mm
;
233 if (WARN_ON_ONCE(!(access
& IB_ACCESS_ON_DEMAND
)))
234 return ERR_PTR(-EINVAL
);
236 umem_odp
= kzalloc(sizeof(struct ib_umem_odp
), GFP_KERNEL
);
238 return ERR_PTR(-ENOMEM
);
240 umem_odp
->umem
.ibdev
= device
;
241 umem_odp
->umem
.length
= size
;
242 umem_odp
->umem
.address
= addr
;
243 umem_odp
->umem
.writable
= ib_access_writable(access
);
244 umem_odp
->umem
.owning_mm
= mm
= current
->mm
;
245 umem_odp
->notifier
.ops
= ops
;
247 umem_odp
->page_shift
= PAGE_SHIFT
;
248 #ifdef CONFIG_HUGETLB_PAGE
249 if (access
& IB_ACCESS_HUGETLB
)
250 umem_odp
->page_shift
= HPAGE_SHIFT
;
253 umem_odp
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
254 ret
= ib_init_umem_odp(umem_odp
, ops
);
260 put_pid(umem_odp
->tgid
);
264 EXPORT_SYMBOL(ib_umem_odp_get
);
266 void ib_umem_odp_release(struct ib_umem_odp
*umem_odp
)
269 * Ensure that no more pages are mapped in the umem.
271 * It is the driver's responsibility to ensure, before calling us,
272 * that the hardware will not attempt to access the MR any more.
274 if (!umem_odp
->is_implicit_odp
) {
275 mutex_lock(&umem_odp
->umem_mutex
);
276 ib_umem_odp_unmap_dma_pages(umem_odp
, ib_umem_start(umem_odp
),
277 ib_umem_end(umem_odp
));
278 mutex_unlock(&umem_odp
->umem_mutex
);
279 mmu_interval_notifier_remove(&umem_odp
->notifier
);
280 kvfree(umem_odp
->dma_list
);
281 kvfree(umem_odp
->pfn_list
);
283 put_pid(umem_odp
->tgid
);
286 EXPORT_SYMBOL(ib_umem_odp_release
);
289 * Map for DMA and insert a single page into the on-demand paging page tables.
291 * @umem: the umem to insert the page to.
292 * @dma_index: index in the umem to add the dma to.
293 * @page: the page struct to map and add.
294 * @access_mask: access permissions needed for this page.
295 * @current_seq: sequence number for synchronization with invalidations.
296 * the sequence number is taken from
297 * umem_odp->notifiers_seq.
299 * The function returns -EFAULT if the DMA mapping operation fails.
302 static int ib_umem_odp_map_dma_single_page(
303 struct ib_umem_odp
*umem_odp
,
304 unsigned int dma_index
,
308 struct ib_device
*dev
= umem_odp
->umem
.ibdev
;
309 dma_addr_t
*dma_addr
= &umem_odp
->dma_list
[dma_index
];
313 * If the page is already dma mapped it means it went through
314 * a non-invalidating trasition, like read-only to writable.
317 *dma_addr
= (*dma_addr
& ODP_DMA_ADDR_MASK
) | access_mask
;
321 *dma_addr
= ib_dma_map_page(dev
, page
, 0, 1 << umem_odp
->page_shift
,
323 if (ib_dma_mapping_error(dev
, *dma_addr
)) {
328 *dma_addr
|= access_mask
;
333 * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
335 * Maps the range passed in the argument to DMA addresses.
336 * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
337 * Upon success the ODP MR will be locked to let caller complete its device
340 * Returns the number of pages mapped in success, negative error code
342 * @umem_odp: the umem to map and pin
343 * @user_virt: the address from which we need to map.
344 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
345 * bigger due to alignment, and may also be smaller in case of an error
346 * pinning or mapping a page. The actual pages mapped is returned in
348 * @access_mask: bit mask of the requested access permissions for the given
350 * @fault: is faulting required for the given range
352 int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp
*umem_odp
, u64 user_virt
,
353 u64 bcnt
, u64 access_mask
, bool fault
)
354 __acquires(&umem_odp
->umem_mutex
)
356 struct task_struct
*owning_process
= NULL
;
357 struct mm_struct
*owning_mm
= umem_odp
->umem
.owning_mm
;
358 int pfn_index
, dma_index
, ret
= 0, start_idx
;
359 unsigned int page_shift
, hmm_order
, pfn_start_idx
;
360 unsigned long num_pfns
, current_seq
;
361 struct hmm_range range
= {};
362 unsigned long timeout
;
364 if (access_mask
== 0)
367 if (user_virt
< ib_umem_start(umem_odp
) ||
368 user_virt
+ bcnt
> ib_umem_end(umem_odp
))
371 page_shift
= umem_odp
->page_shift
;
374 * owning_process is allowed to be NULL, this means somehow the mm is
375 * existing beyond the lifetime of the originating process.. Presumably
376 * mmget_not_zero will fail in this case.
378 owning_process
= get_pid_task(umem_odp
->tgid
, PIDTYPE_PID
);
379 if (!owning_process
|| !mmget_not_zero(owning_mm
)) {
384 range
.notifier
= &umem_odp
->notifier
;
385 range
.start
= ALIGN_DOWN(user_virt
, 1UL << page_shift
);
386 range
.end
= ALIGN(user_virt
+ bcnt
, 1UL << page_shift
);
387 pfn_start_idx
= (range
.start
- ib_umem_start(umem_odp
)) >> PAGE_SHIFT
;
388 num_pfns
= (range
.end
- range
.start
) >> PAGE_SHIFT
;
390 range
.default_flags
= HMM_PFN_REQ_FAULT
;
392 if (access_mask
& ODP_WRITE_ALLOWED_BIT
)
393 range
.default_flags
|= HMM_PFN_REQ_WRITE
;
396 range
.hmm_pfns
= &(umem_odp
->pfn_list
[pfn_start_idx
]);
397 timeout
= jiffies
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT
);
400 current_seq
= range
.notifier_seq
=
401 mmu_interval_read_begin(&umem_odp
->notifier
);
403 mmap_read_lock(owning_mm
);
404 ret
= hmm_range_fault(&range
);
405 mmap_read_unlock(owning_mm
);
407 if (ret
== -EBUSY
&& !time_after(jiffies
, timeout
))
412 start_idx
= (range
.start
- ib_umem_start(umem_odp
)) >> page_shift
;
413 dma_index
= start_idx
;
415 mutex_lock(&umem_odp
->umem_mutex
);
416 if (mmu_interval_read_retry(&umem_odp
->notifier
, current_seq
)) {
417 mutex_unlock(&umem_odp
->umem_mutex
);
421 for (pfn_index
= 0; pfn_index
< num_pfns
;
422 pfn_index
+= 1 << (page_shift
- PAGE_SHIFT
), dma_index
++) {
426 * Since we asked for hmm_range_fault() to populate
427 * pages it shouldn't return an error entry on success.
429 WARN_ON(range
.hmm_pfns
[pfn_index
] & HMM_PFN_ERROR
);
430 WARN_ON(!(range
.hmm_pfns
[pfn_index
] & HMM_PFN_VALID
));
432 if (!(range
.hmm_pfns
[pfn_index
] & HMM_PFN_VALID
)) {
433 WARN_ON(umem_odp
->dma_list
[dma_index
]);
436 access_mask
= ODP_READ_ALLOWED_BIT
;
437 if (range
.hmm_pfns
[pfn_index
] & HMM_PFN_WRITE
)
438 access_mask
|= ODP_WRITE_ALLOWED_BIT
;
441 hmm_order
= hmm_pfn_to_map_order(range
.hmm_pfns
[pfn_index
]);
442 /* If a hugepage was detected and ODP wasn't set for, the umem
443 * page_shift will be used, the opposite case is an error.
445 if (hmm_order
+ PAGE_SHIFT
< page_shift
) {
447 ibdev_dbg(umem_odp
->umem
.ibdev
,
448 "%s: un-expected hmm_order %d, page_shift %d\n",
449 __func__
, hmm_order
, page_shift
);
453 ret
= ib_umem_odp_map_dma_single_page(
454 umem_odp
, dma_index
, hmm_pfn_to_page(range
.hmm_pfns
[pfn_index
]),
457 ibdev_dbg(umem_odp
->umem
.ibdev
,
458 "ib_umem_odp_map_dma_single_page failed with error %d\n", ret
);
462 /* upon sucesss lock should stay on hold for the callee */
464 ret
= dma_index
- start_idx
;
466 mutex_unlock(&umem_odp
->umem_mutex
);
472 put_task_struct(owning_process
);
475 EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock
);
477 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp
*umem_odp
, u64 virt
,
484 struct ib_device
*dev
= umem_odp
->umem
.ibdev
;
486 lockdep_assert_held(&umem_odp
->umem_mutex
);
488 virt
= max_t(u64
, virt
, ib_umem_start(umem_odp
));
489 bound
= min_t(u64
, bound
, ib_umem_end(umem_odp
));
490 for (addr
= virt
; addr
< bound
; addr
+= BIT(umem_odp
->page_shift
)) {
491 idx
= (addr
- ib_umem_start(umem_odp
)) >> umem_odp
->page_shift
;
492 dma
= umem_odp
->dma_list
[idx
];
494 /* The access flags guaranteed a valid DMA address in case was NULL */
496 unsigned long pfn_idx
= (addr
- ib_umem_start(umem_odp
)) >> PAGE_SHIFT
;
497 struct page
*page
= hmm_pfn_to_page(umem_odp
->pfn_list
[pfn_idx
]);
499 dma_addr
= dma
& ODP_DMA_ADDR_MASK
;
500 ib_dma_unmap_page(dev
, dma_addr
,
501 BIT(umem_odp
->page_shift
),
503 if (dma
& ODP_WRITE_ALLOWED_BIT
) {
504 struct page
*head_page
= compound_head(page
);
506 * set_page_dirty prefers being called with
507 * the page lock. However, MMU notifiers are
508 * called sometimes with and sometimes without
509 * the lock. We rely on the umem_mutex instead
510 * to prevent other mmu notifiers from
511 * continuing and allowing the page mapping to
514 set_page_dirty(head_page
);
516 umem_odp
->dma_list
[idx
] = 0;
521 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages
);