2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree.h>
43 #include <linux/pagemap.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_umem.h>
47 #include <rdma/ib_umem_odp.h>
51 static inline int ib_init_umem_odp(struct ib_umem_odp
*umem_odp
,
52 const struct mmu_interval_notifier_ops
*ops
)
56 umem_odp
->umem
.is_odp
= 1;
57 mutex_init(&umem_odp
->umem_mutex
);
59 if (!umem_odp
->is_implicit_odp
) {
60 size_t page_size
= 1UL << umem_odp
->page_shift
;
65 start
= ALIGN_DOWN(umem_odp
->umem
.address
, page_size
);
66 if (check_add_overflow(umem_odp
->umem
.address
,
67 (unsigned long)umem_odp
->umem
.length
,
70 end
= ALIGN(end
, page_size
);
71 if (unlikely(end
< page_size
))
74 pages
= (end
- start
) >> umem_odp
->page_shift
;
78 umem_odp
->page_list
= kvcalloc(
79 pages
, sizeof(*umem_odp
->page_list
), GFP_KERNEL
);
80 if (!umem_odp
->page_list
)
83 umem_odp
->dma_list
= kvcalloc(
84 pages
, sizeof(*umem_odp
->dma_list
), GFP_KERNEL
);
85 if (!umem_odp
->dma_list
) {
90 ret
= mmu_interval_notifier_insert(&umem_odp
->notifier
,
91 umem_odp
->umem
.owning_mm
,
92 start
, end
- start
, ops
);
100 kvfree(umem_odp
->dma_list
);
102 kvfree(umem_odp
->page_list
);
107 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
109 * Implicit ODP umems do not have a VA range and do not have any page lists.
110 * They exist only to hold the per_mm reference to help the driver create
113 * @device: IB device to create UMEM
114 * @access: ib_reg_mr access flags
116 struct ib_umem_odp
*ib_umem_odp_alloc_implicit(struct ib_device
*device
,
119 struct ib_umem
*umem
;
120 struct ib_umem_odp
*umem_odp
;
123 if (access
& IB_ACCESS_HUGETLB
)
124 return ERR_PTR(-EINVAL
);
126 umem_odp
= kzalloc(sizeof(*umem_odp
), GFP_KERNEL
);
128 return ERR_PTR(-ENOMEM
);
129 umem
= &umem_odp
->umem
;
130 umem
->ibdev
= device
;
131 umem
->writable
= ib_access_writable(access
);
132 umem
->owning_mm
= current
->mm
;
133 umem_odp
->is_implicit_odp
= 1;
134 umem_odp
->page_shift
= PAGE_SHIFT
;
136 umem_odp
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
137 ret
= ib_init_umem_odp(umem_odp
, NULL
);
139 put_pid(umem_odp
->tgid
);
145 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit
);
148 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
151 * @root: The parent umem enclosing the child. This must be allocated using
152 * ib_alloc_implicit_odp_umem()
153 * @addr: The starting userspace VA
154 * @size: The length of the userspace VA
157 ib_umem_odp_alloc_child(struct ib_umem_odp
*root
, unsigned long addr
,
159 const struct mmu_interval_notifier_ops
*ops
)
162 * Caller must ensure that root cannot be freed during the call to
165 struct ib_umem_odp
*odp_data
;
166 struct ib_umem
*umem
;
169 if (WARN_ON(!root
->is_implicit_odp
))
170 return ERR_PTR(-EINVAL
);
172 odp_data
= kzalloc(sizeof(*odp_data
), GFP_KERNEL
);
174 return ERR_PTR(-ENOMEM
);
175 umem
= &odp_data
->umem
;
176 umem
->ibdev
= root
->umem
.ibdev
;
178 umem
->address
= addr
;
179 umem
->writable
= root
->umem
.writable
;
180 umem
->owning_mm
= root
->umem
.owning_mm
;
181 odp_data
->page_shift
= PAGE_SHIFT
;
182 odp_data
->notifier
.ops
= ops
;
184 odp_data
->tgid
= get_pid(root
->tgid
);
185 ret
= ib_init_umem_odp(odp_data
, ops
);
187 put_pid(odp_data
->tgid
);
193 EXPORT_SYMBOL(ib_umem_odp_alloc_child
);
196 * ib_umem_odp_get - Create a umem_odp for a userspace va
198 * @device: IB device struct to get UMEM
199 * @addr: userspace virtual address to start at
200 * @size: length of region to pin
201 * @access: IB_ACCESS_xxx flags for memory being pinned
203 * The driver should use when the access flags indicate ODP memory. It avoids
204 * pinning, instead, stores the mm for future page fault handling in
205 * conjunction with MMU notifiers.
207 struct ib_umem_odp
*ib_umem_odp_get(struct ib_device
*device
,
208 unsigned long addr
, size_t size
, int access
,
209 const struct mmu_interval_notifier_ops
*ops
)
211 struct ib_umem_odp
*umem_odp
;
212 struct mm_struct
*mm
;
215 if (WARN_ON_ONCE(!(access
& IB_ACCESS_ON_DEMAND
)))
216 return ERR_PTR(-EINVAL
);
218 umem_odp
= kzalloc(sizeof(struct ib_umem_odp
), GFP_KERNEL
);
220 return ERR_PTR(-ENOMEM
);
222 umem_odp
->umem
.ibdev
= device
;
223 umem_odp
->umem
.length
= size
;
224 umem_odp
->umem
.address
= addr
;
225 umem_odp
->umem
.writable
= ib_access_writable(access
);
226 umem_odp
->umem
.owning_mm
= mm
= current
->mm
;
227 umem_odp
->notifier
.ops
= ops
;
229 umem_odp
->page_shift
= PAGE_SHIFT
;
230 #ifdef CONFIG_HUGETLB_PAGE
231 if (access
& IB_ACCESS_HUGETLB
)
232 umem_odp
->page_shift
= HPAGE_SHIFT
;
235 umem_odp
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
236 ret
= ib_init_umem_odp(umem_odp
, ops
);
242 put_pid(umem_odp
->tgid
);
246 EXPORT_SYMBOL(ib_umem_odp_get
);
248 void ib_umem_odp_release(struct ib_umem_odp
*umem_odp
)
251 * Ensure that no more pages are mapped in the umem.
253 * It is the driver's responsibility to ensure, before calling us,
254 * that the hardware will not attempt to access the MR any more.
256 if (!umem_odp
->is_implicit_odp
) {
257 mutex_lock(&umem_odp
->umem_mutex
);
258 ib_umem_odp_unmap_dma_pages(umem_odp
, ib_umem_start(umem_odp
),
259 ib_umem_end(umem_odp
));
260 mutex_unlock(&umem_odp
->umem_mutex
);
261 mmu_interval_notifier_remove(&umem_odp
->notifier
);
262 kvfree(umem_odp
->dma_list
);
263 kvfree(umem_odp
->page_list
);
264 put_pid(umem_odp
->tgid
);
268 EXPORT_SYMBOL(ib_umem_odp_release
);
271 * Map for DMA and insert a single page into the on-demand paging page tables.
273 * @umem: the umem to insert the page to.
274 * @page_index: index in the umem to add the page to.
275 * @page: the page struct to map and add.
276 * @access_mask: access permissions needed for this page.
277 * @current_seq: sequence number for synchronization with invalidations.
278 * the sequence number is taken from
279 * umem_odp->notifiers_seq.
281 * The function returns -EFAULT if the DMA mapping operation fails. It returns
282 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
284 * The page is released via put_page even if the operation failed. For on-demand
285 * pinning, the page is released whenever it isn't stored in the umem.
287 static int ib_umem_odp_map_dma_single_page(
288 struct ib_umem_odp
*umem_odp
,
289 unsigned int page_index
,
292 unsigned long current_seq
)
294 struct ib_device
*dev
= umem_odp
->umem
.ibdev
;
298 if (mmu_interval_check_retry(&umem_odp
->notifier
, current_seq
)) {
302 if (!(umem_odp
->dma_list
[page_index
])) {
304 ib_dma_map_page(dev
, page
, 0, BIT(umem_odp
->page_shift
),
306 if (ib_dma_mapping_error(dev
, dma_addr
)) {
310 umem_odp
->dma_list
[page_index
] = dma_addr
| access_mask
;
311 umem_odp
->page_list
[page_index
] = page
;
313 } else if (umem_odp
->page_list
[page_index
] == page
) {
314 umem_odp
->dma_list
[page_index
] |= access_mask
;
317 * This is a race here where we could have done:
323 * mutex_lock(umem_mutex)
324 * page from GUP != page in ODP
326 * It should be prevented by the retry test above as reading
327 * the seq number should be reliable under the
328 * umem_mutex. Thus something is really not working right if
332 "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
333 umem_odp
->page_list
[page_index
], page
);
343 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
345 * Pins the range of pages passed in the argument, and maps them to
346 * DMA addresses. The DMA addresses of the mapped pages is updated in
347 * umem_odp->dma_list.
349 * Returns the number of pages mapped in success, negative error code
351 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
352 * the function from completing its task.
353 * An -ENOENT error code indicates that userspace process is being terminated
354 * and mm was already destroyed.
355 * @umem_odp: the umem to map and pin
356 * @user_virt: the address from which we need to map.
357 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
358 * bigger due to alignment, and may also be smaller in case of an error
359 * pinning or mapping a page. The actual pages mapped is returned in
361 * @access_mask: bit mask of the requested access permissions for the given
363 * @current_seq: the MMU notifiers sequance value for synchronization with
364 * invalidations. the sequance number is read from
365 * umem_odp->notifiers_seq before calling this function
367 int ib_umem_odp_map_dma_pages(struct ib_umem_odp
*umem_odp
, u64 user_virt
,
368 u64 bcnt
, u64 access_mask
,
369 unsigned long current_seq
)
371 struct task_struct
*owning_process
= NULL
;
372 struct mm_struct
*owning_mm
= umem_odp
->umem
.owning_mm
;
373 struct page
**local_page_list
= NULL
;
375 int j
, k
, ret
= 0, start_idx
, npages
= 0;
376 unsigned int flags
= 0, page_shift
;
379 if (access_mask
== 0)
382 if (user_virt
< ib_umem_start(umem_odp
) ||
383 user_virt
+ bcnt
> ib_umem_end(umem_odp
))
386 local_page_list
= (struct page
**)__get_free_page(GFP_KERNEL
);
387 if (!local_page_list
)
390 page_shift
= umem_odp
->page_shift
;
391 page_mask
= ~(BIT(page_shift
) - 1);
392 off
= user_virt
& (~page_mask
);
393 user_virt
= user_virt
& page_mask
;
394 bcnt
+= off
; /* Charge for the first page offset as well. */
397 * owning_process is allowed to be NULL, this means somehow the mm is
398 * existing beyond the lifetime of the originating process.. Presumably
399 * mmget_not_zero will fail in this case.
401 owning_process
= get_pid_task(umem_odp
->tgid
, PIDTYPE_PID
);
402 if (!owning_process
|| !mmget_not_zero(owning_mm
)) {
407 if (access_mask
& ODP_WRITE_ALLOWED_BIT
)
410 start_idx
= (user_virt
- ib_umem_start(umem_odp
)) >> page_shift
;
414 const size_t gup_num_pages
= min_t(size_t,
415 ALIGN(bcnt
, PAGE_SIZE
) / PAGE_SIZE
,
416 PAGE_SIZE
/ sizeof(struct page
*));
418 down_read(&owning_mm
->mmap_sem
);
420 * Note: this might result in redundent page getting. We can
421 * avoid this by checking dma_list to be 0 before calling
422 * get_user_pages. However, this make the code much more
423 * complex (and doesn't gain us much performance in most use
426 npages
= get_user_pages_remote(owning_process
, owning_mm
,
427 user_virt
, gup_num_pages
,
428 flags
, local_page_list
, NULL
, NULL
);
429 up_read(&owning_mm
->mmap_sem
);
432 if (npages
!= -EAGAIN
)
433 pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages
, npages
);
435 pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages
, npages
);
439 bcnt
-= min_t(size_t, npages
<< PAGE_SHIFT
, bcnt
);
440 mutex_lock(&umem_odp
->umem_mutex
);
441 for (j
= 0; j
< npages
; j
++, user_virt
+= PAGE_SIZE
) {
442 if (user_virt
& ~page_mask
) {
444 if (page_to_phys(local_page_list
[j
]) != p
) {
448 put_page(local_page_list
[j
]);
452 ret
= ib_umem_odp_map_dma_single_page(
453 umem_odp
, k
, local_page_list
[j
],
454 access_mask
, current_seq
);
457 pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret
);
459 pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret
);
463 p
= page_to_phys(local_page_list
[j
]);
466 mutex_unlock(&umem_odp
->umem_mutex
);
470 * Release pages, remembering that the first page
471 * to hit an error was already released by
472 * ib_umem_odp_map_dma_single_page().
474 if (npages
- (j
+ 1) > 0)
475 release_pages(&local_page_list
[j
+1],
482 if (npages
< 0 && k
== start_idx
)
491 put_task_struct(owning_process
);
492 free_page((unsigned long)local_page_list
);
495 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages
);
497 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp
*umem_odp
, u64 virt
,
502 struct ib_device
*dev
= umem_odp
->umem
.ibdev
;
504 lockdep_assert_held(&umem_odp
->umem_mutex
);
506 virt
= max_t(u64
, virt
, ib_umem_start(umem_odp
));
507 bound
= min_t(u64
, bound
, ib_umem_end(umem_odp
));
508 /* Note that during the run of this function, the
509 * notifiers_count of the MR is > 0, preventing any racing
510 * faults from completion. We might be racing with other
511 * invalidations, so we must make sure we free each page only
513 for (addr
= virt
; addr
< bound
; addr
+= BIT(umem_odp
->page_shift
)) {
514 idx
= (addr
- ib_umem_start(umem_odp
)) >> umem_odp
->page_shift
;
515 if (umem_odp
->page_list
[idx
]) {
516 struct page
*page
= umem_odp
->page_list
[idx
];
517 dma_addr_t dma
= umem_odp
->dma_list
[idx
];
518 dma_addr_t dma_addr
= dma
& ODP_DMA_ADDR_MASK
;
522 ib_dma_unmap_page(dev
, dma_addr
,
523 BIT(umem_odp
->page_shift
),
525 if (dma
& ODP_WRITE_ALLOWED_BIT
) {
526 struct page
*head_page
= compound_head(page
);
528 * set_page_dirty prefers being called with
529 * the page lock. However, MMU notifiers are
530 * called sometimes with and sometimes without
531 * the lock. We rely on the umem_mutex instead
532 * to prevent other mmu notifiers from
533 * continuing and allowing the page mapping to
536 set_page_dirty(head_page
);
538 umem_odp
->page_list
[idx
] = NULL
;
539 umem_odp
->dma_list
[idx
] = 0;
544 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages
);