locking/refcounts: Include fewer headers in <linux/refcount.h>
[linux/fpc-iii.git] / drivers / infiniband / core / umem_odp.c
blob182436b92ba93a95ca6e119108c30d50706b7212
1 /*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree_generic.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_umem.h>
46 #include <rdma/ib_umem_odp.h>
49 * The ib_umem list keeps track of memory regions for which the HW
50 * device request to receive notification when the related memory
51 * mapping is changed.
53 * ib_umem_lock protects the list.
56 static u64 node_start(struct umem_odp_node *n)
58 struct ib_umem_odp *umem_odp =
59 container_of(n, struct ib_umem_odp, interval_tree);
61 return ib_umem_start(umem_odp->umem);
64 /* Note that the representation of the intervals in the interval tree
65 * considers the ending point as contained in the interval, while the
66 * function ib_umem_end returns the first address which is not contained
67 * in the umem.
69 static u64 node_last(struct umem_odp_node *n)
71 struct ib_umem_odp *umem_odp =
72 container_of(n, struct ib_umem_odp, interval_tree);
74 return ib_umem_end(umem_odp->umem) - 1;
77 INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
78 node_start, node_last, static, rbt_ib_umem)
80 static void ib_umem_notifier_start_account(struct ib_umem *item)
82 mutex_lock(&item->odp_data->umem_mutex);
84 /* Only update private counters for this umem if it has them.
85 * Otherwise skip it. All page faults will be delayed for this umem. */
86 if (item->odp_data->mn_counters_active) {
87 int notifiers_count = item->odp_data->notifiers_count++;
89 if (notifiers_count == 0)
90 /* Initialize the completion object for waiting on
91 * notifiers. Since notifier_count is zero, no one
92 * should be waiting right now. */
93 reinit_completion(&item->odp_data->notifier_completion);
95 mutex_unlock(&item->odp_data->umem_mutex);
98 static void ib_umem_notifier_end_account(struct ib_umem *item)
100 mutex_lock(&item->odp_data->umem_mutex);
102 /* Only update private counters for this umem if it has them.
103 * Otherwise skip it. All page faults will be delayed for this umem. */
104 if (item->odp_data->mn_counters_active) {
106 * This sequence increase will notify the QP page fault that
107 * the page that is going to be mapped in the spte could have
108 * been freed.
110 ++item->odp_data->notifiers_seq;
111 if (--item->odp_data->notifiers_count == 0)
112 complete_all(&item->odp_data->notifier_completion);
114 mutex_unlock(&item->odp_data->umem_mutex);
117 /* Account for a new mmu notifier in an ib_ucontext. */
118 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
120 atomic_inc(&context->notifier_count);
123 /* Account for a terminating mmu notifier in an ib_ucontext.
125 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
126 * the function takes the semaphore itself. */
127 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
129 int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
131 if (zero_notifiers &&
132 !list_empty(&context->no_private_counters)) {
133 /* No currently running mmu notifiers. Now is the chance to
134 * add private accounting to all previously added umems. */
135 struct ib_umem_odp *odp_data, *next;
137 /* Prevent concurrent mmu notifiers from working on the
138 * no_private_counters list. */
139 down_write(&context->umem_rwsem);
141 /* Read the notifier_count again, with the umem_rwsem
142 * semaphore taken for write. */
143 if (!atomic_read(&context->notifier_count)) {
144 list_for_each_entry_safe(odp_data, next,
145 &context->no_private_counters,
146 no_private_counters) {
147 mutex_lock(&odp_data->umem_mutex);
148 odp_data->mn_counters_active = true;
149 list_del(&odp_data->no_private_counters);
150 complete_all(&odp_data->notifier_completion);
151 mutex_unlock(&odp_data->umem_mutex);
155 up_write(&context->umem_rwsem);
159 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
160 u64 end, void *cookie) {
162 * Increase the number of notifiers running, to
163 * prevent any further fault handling on this MR.
165 ib_umem_notifier_start_account(item);
166 item->odp_data->dying = 1;
167 /* Make sure that the fact the umem is dying is out before we release
168 * all pending page faults. */
169 smp_wmb();
170 complete_all(&item->odp_data->notifier_completion);
171 item->context->invalidate_range(item, ib_umem_start(item),
172 ib_umem_end(item));
173 return 0;
176 static void ib_umem_notifier_release(struct mmu_notifier *mn,
177 struct mm_struct *mm)
179 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
181 if (!context->invalidate_range)
182 return;
184 ib_ucontext_notifier_start_account(context);
185 down_read(&context->umem_rwsem);
186 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
187 ULLONG_MAX,
188 ib_umem_notifier_release_trampoline,
189 NULL);
190 up_read(&context->umem_rwsem);
193 static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
194 u64 end, void *cookie)
196 ib_umem_notifier_start_account(item);
197 item->context->invalidate_range(item, start, start + PAGE_SIZE);
198 ib_umem_notifier_end_account(item);
199 return 0;
202 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
203 u64 end, void *cookie)
205 ib_umem_notifier_start_account(item);
206 item->context->invalidate_range(item, start, end);
207 return 0;
210 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
211 struct mm_struct *mm,
212 unsigned long start,
213 unsigned long end)
215 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
217 if (!context->invalidate_range)
218 return;
220 ib_ucontext_notifier_start_account(context);
221 down_read(&context->umem_rwsem);
222 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
223 end,
224 invalidate_range_start_trampoline, NULL);
225 up_read(&context->umem_rwsem);
228 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
229 u64 end, void *cookie)
231 ib_umem_notifier_end_account(item);
232 return 0;
235 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
236 struct mm_struct *mm,
237 unsigned long start,
238 unsigned long end)
240 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
242 if (!context->invalidate_range)
243 return;
245 down_read(&context->umem_rwsem);
246 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
247 end,
248 invalidate_range_end_trampoline, NULL);
249 up_read(&context->umem_rwsem);
250 ib_ucontext_notifier_end_account(context);
253 static const struct mmu_notifier_ops ib_umem_notifiers = {
254 .release = ib_umem_notifier_release,
255 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
256 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
259 struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
260 unsigned long addr,
261 size_t size)
263 struct ib_umem *umem;
264 struct ib_umem_odp *odp_data;
265 int pages = size >> PAGE_SHIFT;
266 int ret;
268 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
269 if (!umem)
270 return ERR_PTR(-ENOMEM);
272 umem->context = context;
273 umem->length = size;
274 umem->address = addr;
275 umem->page_shift = PAGE_SHIFT;
276 umem->writable = 1;
278 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
279 if (!odp_data) {
280 ret = -ENOMEM;
281 goto out_umem;
283 odp_data->umem = umem;
285 mutex_init(&odp_data->umem_mutex);
286 init_completion(&odp_data->notifier_completion);
288 odp_data->page_list =
289 vzalloc(array_size(pages, sizeof(*odp_data->page_list)));
290 if (!odp_data->page_list) {
291 ret = -ENOMEM;
292 goto out_odp_data;
295 odp_data->dma_list =
296 vzalloc(array_size(pages, sizeof(*odp_data->dma_list)));
297 if (!odp_data->dma_list) {
298 ret = -ENOMEM;
299 goto out_page_list;
302 down_write(&context->umem_rwsem);
303 context->odp_mrs_count++;
304 rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
305 if (likely(!atomic_read(&context->notifier_count)))
306 odp_data->mn_counters_active = true;
307 else
308 list_add(&odp_data->no_private_counters,
309 &context->no_private_counters);
310 up_write(&context->umem_rwsem);
312 umem->odp_data = odp_data;
314 return umem;
316 out_page_list:
317 vfree(odp_data->page_list);
318 out_odp_data:
319 kfree(odp_data);
320 out_umem:
321 kfree(umem);
322 return ERR_PTR(ret);
324 EXPORT_SYMBOL(ib_alloc_odp_umem);
326 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
327 int access)
329 int ret_val;
330 struct pid *our_pid;
331 struct mm_struct *mm = get_task_mm(current);
333 if (!mm)
334 return -EINVAL;
336 if (access & IB_ACCESS_HUGETLB) {
337 struct vm_area_struct *vma;
338 struct hstate *h;
340 down_read(&mm->mmap_sem);
341 vma = find_vma(mm, ib_umem_start(umem));
342 if (!vma || !is_vm_hugetlb_page(vma)) {
343 up_read(&mm->mmap_sem);
344 return -EINVAL;
346 h = hstate_vma(vma);
347 umem->page_shift = huge_page_shift(h);
348 up_read(&mm->mmap_sem);
349 umem->hugetlb = 1;
350 } else {
351 umem->hugetlb = 0;
354 /* Prevent creating ODP MRs in child processes */
355 rcu_read_lock();
356 our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
357 rcu_read_unlock();
358 put_pid(our_pid);
359 if (context->tgid != our_pid) {
360 ret_val = -EINVAL;
361 goto out_mm;
364 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
365 if (!umem->odp_data) {
366 ret_val = -ENOMEM;
367 goto out_mm;
369 umem->odp_data->umem = umem;
371 mutex_init(&umem->odp_data->umem_mutex);
373 init_completion(&umem->odp_data->notifier_completion);
375 if (ib_umem_num_pages(umem)) {
376 umem->odp_data->page_list =
377 vzalloc(array_size(sizeof(*umem->odp_data->page_list),
378 ib_umem_num_pages(umem)));
379 if (!umem->odp_data->page_list) {
380 ret_val = -ENOMEM;
381 goto out_odp_data;
384 umem->odp_data->dma_list =
385 vzalloc(array_size(sizeof(*umem->odp_data->dma_list),
386 ib_umem_num_pages(umem)));
387 if (!umem->odp_data->dma_list) {
388 ret_val = -ENOMEM;
389 goto out_page_list;
394 * When using MMU notifiers, we will get a
395 * notification before the "current" task (and MM) is
396 * destroyed. We use the umem_rwsem semaphore to synchronize.
398 down_write(&context->umem_rwsem);
399 context->odp_mrs_count++;
400 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
401 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
402 &context->umem_tree);
403 if (likely(!atomic_read(&context->notifier_count)) ||
404 context->odp_mrs_count == 1)
405 umem->odp_data->mn_counters_active = true;
406 else
407 list_add(&umem->odp_data->no_private_counters,
408 &context->no_private_counters);
409 downgrade_write(&context->umem_rwsem);
411 if (context->odp_mrs_count == 1) {
413 * Note that at this point, no MMU notifier is running
414 * for this context!
416 atomic_set(&context->notifier_count, 0);
417 INIT_HLIST_NODE(&context->mn.hlist);
418 context->mn.ops = &ib_umem_notifiers;
420 * Lock-dep detects a false positive for mmap_sem vs.
421 * umem_rwsem, due to not grasping downgrade_write correctly.
423 lockdep_off();
424 ret_val = mmu_notifier_register(&context->mn, mm);
425 lockdep_on();
426 if (ret_val) {
427 pr_err("Failed to register mmu_notifier %d\n", ret_val);
428 ret_val = -EBUSY;
429 goto out_mutex;
433 up_read(&context->umem_rwsem);
436 * Note that doing an mmput can cause a notifier for the relevant mm.
437 * If the notifier is called while we hold the umem_rwsem, this will
438 * cause a deadlock. Therefore, we release the reference only after we
439 * released the semaphore.
441 mmput(mm);
442 return 0;
444 out_mutex:
445 up_read(&context->umem_rwsem);
446 vfree(umem->odp_data->dma_list);
447 out_page_list:
448 vfree(umem->odp_data->page_list);
449 out_odp_data:
450 kfree(umem->odp_data);
451 out_mm:
452 mmput(mm);
453 return ret_val;
456 void ib_umem_odp_release(struct ib_umem *umem)
458 struct ib_ucontext *context = umem->context;
461 * Ensure that no more pages are mapped in the umem.
463 * It is the driver's responsibility to ensure, before calling us,
464 * that the hardware will not attempt to access the MR any more.
466 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
467 ib_umem_end(umem));
469 down_write(&context->umem_rwsem);
470 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
471 rbt_ib_umem_remove(&umem->odp_data->interval_tree,
472 &context->umem_tree);
473 context->odp_mrs_count--;
474 if (!umem->odp_data->mn_counters_active) {
475 list_del(&umem->odp_data->no_private_counters);
476 complete_all(&umem->odp_data->notifier_completion);
480 * Downgrade the lock to a read lock. This ensures that the notifiers
481 * (who lock the mutex for reading) will be able to finish, and we
482 * will be able to enventually obtain the mmu notifiers SRCU. Note
483 * that since we are doing it atomically, no other user could register
484 * and unregister while we do the check.
486 downgrade_write(&context->umem_rwsem);
487 if (!context->odp_mrs_count) {
488 struct task_struct *owning_process = NULL;
489 struct mm_struct *owning_mm = NULL;
491 owning_process = get_pid_task(context->tgid,
492 PIDTYPE_PID);
493 if (owning_process == NULL)
495 * The process is already dead, notifier were removed
496 * already.
498 goto out;
500 owning_mm = get_task_mm(owning_process);
501 if (owning_mm == NULL)
503 * The process' mm is already dead, notifier were
504 * removed already.
506 goto out_put_task;
507 mmu_notifier_unregister(&context->mn, owning_mm);
509 mmput(owning_mm);
511 out_put_task:
512 put_task_struct(owning_process);
514 out:
515 up_read(&context->umem_rwsem);
517 vfree(umem->odp_data->dma_list);
518 vfree(umem->odp_data->page_list);
519 kfree(umem->odp_data);
520 kfree(umem);
524 * Map for DMA and insert a single page into the on-demand paging page tables.
526 * @umem: the umem to insert the page to.
527 * @page_index: index in the umem to add the page to.
528 * @page: the page struct to map and add.
529 * @access_mask: access permissions needed for this page.
530 * @current_seq: sequence number for synchronization with invalidations.
531 * the sequence number is taken from
532 * umem->odp_data->notifiers_seq.
534 * The function returns -EFAULT if the DMA mapping operation fails. It returns
535 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
537 * The page is released via put_page even if the operation failed. For
538 * on-demand pinning, the page is released whenever it isn't stored in the
539 * umem.
541 static int ib_umem_odp_map_dma_single_page(
542 struct ib_umem *umem,
543 int page_index,
544 struct page *page,
545 u64 access_mask,
546 unsigned long current_seq)
548 struct ib_device *dev = umem->context->device;
549 dma_addr_t dma_addr;
550 int stored_page = 0;
551 int remove_existing_mapping = 0;
552 int ret = 0;
555 * Note: we avoid writing if seq is different from the initial seq, to
556 * handle case of a racing notifier. This check also allows us to bail
557 * early if we have a notifier running in parallel with us.
559 if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
560 ret = -EAGAIN;
561 goto out;
563 if (!(umem->odp_data->dma_list[page_index])) {
564 dma_addr = ib_dma_map_page(dev,
565 page,
566 0, BIT(umem->page_shift),
567 DMA_BIDIRECTIONAL);
568 if (ib_dma_mapping_error(dev, dma_addr)) {
569 ret = -EFAULT;
570 goto out;
572 umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
573 umem->odp_data->page_list[page_index] = page;
574 umem->npages++;
575 stored_page = 1;
576 } else if (umem->odp_data->page_list[page_index] == page) {
577 umem->odp_data->dma_list[page_index] |= access_mask;
578 } else {
579 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
580 umem->odp_data->page_list[page_index], page);
581 /* Better remove the mapping now, to prevent any further
582 * damage. */
583 remove_existing_mapping = 1;
586 out:
587 /* On Demand Paging - avoid pinning the page */
588 if (umem->context->invalidate_range || !stored_page)
589 put_page(page);
591 if (remove_existing_mapping && umem->context->invalidate_range) {
592 invalidate_page_trampoline(
593 umem,
594 ib_umem_start(umem) + (page_index >> umem->page_shift),
595 ib_umem_start(umem) + ((page_index + 1) >>
596 umem->page_shift),
597 NULL);
598 ret = -EAGAIN;
601 return ret;
605 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
607 * Pins the range of pages passed in the argument, and maps them to
608 * DMA addresses. The DMA addresses of the mapped pages is updated in
609 * umem->odp_data->dma_list.
611 * Returns the number of pages mapped in success, negative error code
612 * for failure.
613 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
614 * the function from completing its task.
615 * An -ENOENT error code indicates that userspace process is being terminated
616 * and mm was already destroyed.
617 * @umem: the umem to map and pin
618 * @user_virt: the address from which we need to map.
619 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
620 * bigger due to alignment, and may also be smaller in case of an error
621 * pinning or mapping a page. The actual pages mapped is returned in
622 * the return value.
623 * @access_mask: bit mask of the requested access permissions for the given
624 * range.
625 * @current_seq: the MMU notifiers sequance value for synchronization with
626 * invalidations. the sequance number is read from
627 * umem->odp_data->notifiers_seq before calling this function
629 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
630 u64 access_mask, unsigned long current_seq)
632 struct task_struct *owning_process = NULL;
633 struct mm_struct *owning_mm = NULL;
634 struct page **local_page_list = NULL;
635 u64 page_mask, off;
636 int j, k, ret = 0, start_idx, npages = 0, page_shift;
637 unsigned int flags = 0;
638 phys_addr_t p = 0;
640 if (access_mask == 0)
641 return -EINVAL;
643 if (user_virt < ib_umem_start(umem) ||
644 user_virt + bcnt > ib_umem_end(umem))
645 return -EFAULT;
647 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
648 if (!local_page_list)
649 return -ENOMEM;
651 page_shift = umem->page_shift;
652 page_mask = ~(BIT(page_shift) - 1);
653 off = user_virt & (~page_mask);
654 user_virt = user_virt & page_mask;
655 bcnt += off; /* Charge for the first page offset as well. */
657 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
658 if (owning_process == NULL) {
659 ret = -EINVAL;
660 goto out_no_task;
663 owning_mm = get_task_mm(owning_process);
664 if (owning_mm == NULL) {
665 ret = -ENOENT;
666 goto out_put_task;
669 if (access_mask & ODP_WRITE_ALLOWED_BIT)
670 flags |= FOLL_WRITE;
672 start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
673 k = start_idx;
675 while (bcnt > 0) {
676 const size_t gup_num_pages = min_t(size_t,
677 (bcnt + BIT(page_shift) - 1) >> page_shift,
678 PAGE_SIZE / sizeof(struct page *));
680 down_read(&owning_mm->mmap_sem);
682 * Note: this might result in redundent page getting. We can
683 * avoid this by checking dma_list to be 0 before calling
684 * get_user_pages. However, this make the code much more
685 * complex (and doesn't gain us much performance in most use
686 * cases).
688 npages = get_user_pages_remote(owning_process, owning_mm,
689 user_virt, gup_num_pages,
690 flags, local_page_list, NULL, NULL);
691 up_read(&owning_mm->mmap_sem);
693 if (npages < 0)
694 break;
696 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
697 mutex_lock(&umem->odp_data->umem_mutex);
698 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
699 if (user_virt & ~page_mask) {
700 p += PAGE_SIZE;
701 if (page_to_phys(local_page_list[j]) != p) {
702 ret = -EFAULT;
703 break;
705 put_page(local_page_list[j]);
706 continue;
709 ret = ib_umem_odp_map_dma_single_page(
710 umem, k, local_page_list[j],
711 access_mask, current_seq);
712 if (ret < 0)
713 break;
715 p = page_to_phys(local_page_list[j]);
716 k++;
718 mutex_unlock(&umem->odp_data->umem_mutex);
720 if (ret < 0) {
721 /* Release left over pages when handling errors. */
722 for (++j; j < npages; ++j)
723 put_page(local_page_list[j]);
724 break;
728 if (ret >= 0) {
729 if (npages < 0 && k == start_idx)
730 ret = npages;
731 else
732 ret = k - start_idx;
735 mmput(owning_mm);
736 out_put_task:
737 put_task_struct(owning_process);
738 out_no_task:
739 free_page((unsigned long)local_page_list);
740 return ret;
742 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
744 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
745 u64 bound)
747 int idx;
748 u64 addr;
749 struct ib_device *dev = umem->context->device;
751 virt = max_t(u64, virt, ib_umem_start(umem));
752 bound = min_t(u64, bound, ib_umem_end(umem));
753 /* Note that during the run of this function, the
754 * notifiers_count of the MR is > 0, preventing any racing
755 * faults from completion. We might be racing with other
756 * invalidations, so we must make sure we free each page only
757 * once. */
758 mutex_lock(&umem->odp_data->umem_mutex);
759 for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
760 idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
761 if (umem->odp_data->page_list[idx]) {
762 struct page *page = umem->odp_data->page_list[idx];
763 dma_addr_t dma = umem->odp_data->dma_list[idx];
764 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
766 WARN_ON(!dma_addr);
768 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
769 DMA_BIDIRECTIONAL);
770 if (dma & ODP_WRITE_ALLOWED_BIT) {
771 struct page *head_page = compound_head(page);
773 * set_page_dirty prefers being called with
774 * the page lock. However, MMU notifiers are
775 * called sometimes with and sometimes without
776 * the lock. We rely on the umem_mutex instead
777 * to prevent other mmu notifiers from
778 * continuing and allowing the page mapping to
779 * be removed.
781 set_page_dirty(head_page);
783 /* on demand pinning support */
784 if (!umem->context->invalidate_range)
785 put_page(page);
786 umem->odp_data->page_list[idx] = NULL;
787 umem->odp_data->dma_list[idx] = 0;
788 umem->npages--;
791 mutex_unlock(&umem->odp_data->umem_mutex);
793 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
795 /* @last is not a part of the interval. See comment for function
796 * node_last.
798 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
799 u64 start, u64 last,
800 umem_call_back cb,
801 void *cookie)
803 int ret_val = 0;
804 struct umem_odp_node *node, *next;
805 struct ib_umem_odp *umem;
807 if (unlikely(start == last))
808 return ret_val;
810 for (node = rbt_ib_umem_iter_first(root, start, last - 1);
811 node; node = next) {
812 next = rbt_ib_umem_iter_next(node, start, last - 1);
813 umem = container_of(node, struct ib_umem_odp, interval_tree);
814 ret_val = cb(umem->umem, start, last, cookie) || ret_val;
817 return ret_val;
819 EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
821 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
822 u64 addr, u64 length)
824 struct umem_odp_node *node;
826 node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
827 if (node)
828 return container_of(node, struct ib_umem_odp, interval_tree);
829 return NULL;
832 EXPORT_SYMBOL(rbt_ib_umem_lookup);