1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
32 struct hmm_range
*range
;
37 HMM_NEED_FAULT
= 1 << 0,
38 HMM_NEED_WRITE_FAULT
= 1 << 1,
39 HMM_NEED_ALL_BITS
= HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
,
42 static int hmm_pfns_fill(unsigned long addr
, unsigned long end
,
43 struct hmm_range
*range
, unsigned long cpu_flags
)
45 unsigned long i
= (addr
- range
->start
) >> PAGE_SHIFT
;
47 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
48 range
->hmm_pfns
[i
] = cpu_flags
;
53 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
54 * @addr: range virtual start address (inclusive)
55 * @end: range virtual end address (exclusive)
56 * @required_fault: HMM_NEED_* flags
57 * @walk: mm_walk structure
58 * Return: -EBUSY after page fault, or page fault error
60 * This function will be called whenever pmd_none() or pte_none() returns true,
61 * or whenever there is no page directory covering the virtual address range.
63 static int hmm_vma_fault(unsigned long addr
, unsigned long end
,
64 unsigned int required_fault
, struct mm_walk
*walk
)
66 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
67 struct vm_area_struct
*vma
= walk
->vma
;
68 unsigned int fault_flags
= FAULT_FLAG_REMOTE
;
70 WARN_ON_ONCE(!required_fault
);
71 hmm_vma_walk
->last
= addr
;
73 if (required_fault
& HMM_NEED_WRITE_FAULT
) {
74 if (!(vma
->vm_flags
& VM_WRITE
))
76 fault_flags
|= FAULT_FLAG_WRITE
;
79 for (; addr
< end
; addr
+= PAGE_SIZE
)
80 if (handle_mm_fault(vma
, addr
, fault_flags
, NULL
) &
86 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
87 unsigned long pfn_req_flags
,
88 unsigned long cpu_flags
)
90 struct hmm_range
*range
= hmm_vma_walk
->range
;
93 * So we not only consider the individual per page request we also
94 * consider the default flags requested for the range. The API can
95 * be used 2 ways. The first one where the HMM user coalesces
96 * multiple page faults into one request and sets flags per pfn for
97 * those faults. The second one where the HMM user wants to pre-
98 * fault a range with specific flags. For the latter one it is a
99 * waste to have the user pre-fill the pfn arrays with a default
102 pfn_req_flags
&= range
->pfn_flags_mask
;
103 pfn_req_flags
|= range
->default_flags
;
105 /* We aren't ask to do anything ... */
106 if (!(pfn_req_flags
& HMM_PFN_REQ_FAULT
))
109 /* Need to write fault ? */
110 if ((pfn_req_flags
& HMM_PFN_REQ_WRITE
) &&
111 !(cpu_flags
& HMM_PFN_WRITE
))
112 return HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
;
114 /* If CPU page table is not valid then we need to fault */
115 if (!(cpu_flags
& HMM_PFN_VALID
))
116 return HMM_NEED_FAULT
;
121 hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
122 const unsigned long hmm_pfns
[], unsigned long npages
,
123 unsigned long cpu_flags
)
125 struct hmm_range
*range
= hmm_vma_walk
->range
;
126 unsigned int required_fault
= 0;
130 * If the default flags do not request to fault pages, and the mask does
131 * not allow for individual pages to be faulted, then
132 * hmm_pte_need_fault() will always return 0.
134 if (!((range
->default_flags
| range
->pfn_flags_mask
) &
138 for (i
= 0; i
< npages
; ++i
) {
139 required_fault
|= hmm_pte_need_fault(hmm_vma_walk
, hmm_pfns
[i
],
141 if (required_fault
== HMM_NEED_ALL_BITS
)
142 return required_fault
;
144 return required_fault
;
147 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
148 __always_unused
int depth
, struct mm_walk
*walk
)
150 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
151 struct hmm_range
*range
= hmm_vma_walk
->range
;
152 unsigned int required_fault
;
153 unsigned long i
, npages
;
154 unsigned long *hmm_pfns
;
156 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
157 npages
= (end
- addr
) >> PAGE_SHIFT
;
158 hmm_pfns
= &range
->hmm_pfns
[i
];
160 hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0);
164 return hmm_pfns_fill(addr
, end
, range
, HMM_PFN_ERROR
);
167 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
168 return hmm_pfns_fill(addr
, end
, range
, 0);
171 static inline unsigned long hmm_pfn_flags_order(unsigned long order
)
173 return order
<< HMM_PFN_ORDER_SHIFT
;
176 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range
*range
,
179 if (pmd_protnone(pmd
))
181 return (pmd_write(pmd
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) :
183 hmm_pfn_flags_order(PMD_SHIFT
- PAGE_SHIFT
);
186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
187 static int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
188 unsigned long end
, unsigned long hmm_pfns
[],
191 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
192 struct hmm_range
*range
= hmm_vma_walk
->range
;
193 unsigned long pfn
, npages
, i
;
194 unsigned int required_fault
;
195 unsigned long cpu_flags
;
197 npages
= (end
- addr
) >> PAGE_SHIFT
;
198 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
200 hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, cpu_flags
);
202 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
204 pfn
= pmd_pfn(pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
205 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
206 hmm_pfns
[i
] = pfn
| cpu_flags
;
209 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
210 /* stub to allow the code below to compile */
211 int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
212 unsigned long end
, unsigned long hmm_pfns
[], pmd_t pmd
);
213 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
215 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range
*range
,
218 if (pte_none(pte
) || !pte_present(pte
) || pte_protnone(pte
))
220 return pte_write(pte
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) : HMM_PFN_VALID
;
223 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
224 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
225 unsigned long *hmm_pfn
)
227 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
228 struct hmm_range
*range
= hmm_vma_walk
->range
;
229 unsigned int required_fault
;
230 unsigned long cpu_flags
;
231 pte_t pte
= ptep_get(ptep
);
232 uint64_t pfn_req_flags
= *hmm_pfn
;
234 if (pte_none_mostly(pte
)) {
236 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0);
243 if (!pte_present(pte
)) {
244 swp_entry_t entry
= pte_to_swp_entry(pte
);
247 * Don't fault in device private pages owned by the caller,
248 * just report the PFN.
250 if (is_device_private_entry(entry
) &&
251 pfn_swap_entry_to_page(entry
)->pgmap
->owner
==
252 range
->dev_private_owner
) {
253 cpu_flags
= HMM_PFN_VALID
;
254 if (is_writable_device_private_entry(entry
))
255 cpu_flags
|= HMM_PFN_WRITE
;
256 *hmm_pfn
= swp_offset_pfn(entry
) | cpu_flags
;
261 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0);
262 if (!required_fault
) {
267 if (!non_swap_entry(entry
))
270 if (is_device_private_entry(entry
))
273 if (is_device_exclusive_entry(entry
))
276 if (is_migration_entry(entry
)) {
278 hmm_vma_walk
->last
= addr
;
279 migration_entry_wait(walk
->mm
, pmdp
, addr
);
283 /* Report error for everything else */
288 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
290 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, cpu_flags
);
295 * Bypass devmap pte such as DAX page when all pfn requested
296 * flags(pfn_req_flags) are fulfilled.
297 * Since each architecture defines a struct page for the zero page, just
298 * fall through and treat it like a normal page.
300 if (!vm_normal_page(walk
->vma
, addr
, pte
) &&
302 !is_zero_pfn(pte_pfn(pte
))) {
303 if (hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0)) {
307 *hmm_pfn
= HMM_PFN_ERROR
;
311 *hmm_pfn
= pte_pfn(pte
) | cpu_flags
;
316 /* Fault any virtual address we were asked to fault */
317 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
320 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
323 struct mm_walk
*walk
)
325 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
326 struct hmm_range
*range
= hmm_vma_walk
->range
;
327 unsigned long *hmm_pfns
=
328 &range
->hmm_pfns
[(start
- range
->start
) >> PAGE_SHIFT
];
329 unsigned long npages
= (end
- start
) >> PAGE_SHIFT
;
330 unsigned long addr
= start
;
335 pmd
= pmdp_get_lockless(pmdp
);
337 return hmm_vma_walk_hole(start
, end
, -1, walk
);
339 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
340 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0)) {
341 hmm_vma_walk
->last
= addr
;
342 pmd_migration_entry_wait(walk
->mm
, pmdp
);
345 return hmm_pfns_fill(start
, end
, range
, 0);
348 if (!pmd_present(pmd
)) {
349 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0))
351 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
354 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
356 * No need to take pmd_lock here, even if some other thread
357 * is splitting the huge pmd we will get that event through
358 * mmu_notifier callback.
360 * So just read pmd value and check again it's a transparent
361 * huge or device mapping one and compute corresponding pfn
364 pmd
= pmdp_get_lockless(pmdp
);
365 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
368 return hmm_vma_handle_pmd(walk
, addr
, end
, hmm_pfns
, pmd
);
372 * We have handled all the valid cases above ie either none, migration,
373 * huge or transparent huge. At this point either it is a valid pmd
374 * entry pointing to pte directory or it is a bad pmd that will not
378 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0))
380 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
383 ptep
= pte_offset_map(pmdp
, addr
);
386 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, hmm_pfns
++) {
389 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, hmm_pfns
);
391 /* hmm_vma_handle_pte() did pte_unmap() */
399 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
400 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
401 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range
*range
,
404 if (!pud_present(pud
))
406 return (pud_write(pud
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) :
408 hmm_pfn_flags_order(PUD_SHIFT
- PAGE_SHIFT
);
411 static int hmm_vma_walk_pud(pud_t
*pudp
, unsigned long start
, unsigned long end
,
412 struct mm_walk
*walk
)
414 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
415 struct hmm_range
*range
= hmm_vma_walk
->range
;
416 unsigned long addr
= start
;
418 spinlock_t
*ptl
= pud_trans_huge_lock(pudp
, walk
->vma
);
423 /* Normally we don't want to split the huge page */
424 walk
->action
= ACTION_CONTINUE
;
426 pud
= READ_ONCE(*pudp
);
427 if (!pud_present(pud
)) {
429 return hmm_vma_walk_hole(start
, end
, -1, walk
);
432 if (pud_leaf(pud
) && pud_devmap(pud
)) {
433 unsigned long i
, npages
, pfn
;
434 unsigned int required_fault
;
435 unsigned long *hmm_pfns
;
436 unsigned long cpu_flags
;
438 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
439 npages
= (end
- addr
) >> PAGE_SHIFT
;
440 hmm_pfns
= &range
->hmm_pfns
[i
];
442 cpu_flags
= pud_to_hmm_pfn_flags(range
, pud
);
443 required_fault
= hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
,
445 if (required_fault
) {
447 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
450 pfn
= pud_pfn(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
451 for (i
= 0; i
< npages
; ++i
, ++pfn
)
452 hmm_pfns
[i
] = pfn
| cpu_flags
;
456 /* Ask for the PUD to be split */
457 walk
->action
= ACTION_SUBTREE
;
464 #define hmm_vma_walk_pud NULL
467 #ifdef CONFIG_HUGETLB_PAGE
468 static int hmm_vma_walk_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
469 unsigned long start
, unsigned long end
,
470 struct mm_walk
*walk
)
472 unsigned long addr
= start
, i
, pfn
;
473 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
474 struct hmm_range
*range
= hmm_vma_walk
->range
;
475 struct vm_area_struct
*vma
= walk
->vma
;
476 unsigned int required_fault
;
477 unsigned long pfn_req_flags
;
478 unsigned long cpu_flags
;
482 ptl
= huge_pte_lock(hstate_vma(vma
), walk
->mm
, pte
);
483 entry
= huge_ptep_get(walk
->mm
, addr
, pte
);
485 i
= (start
- range
->start
) >> PAGE_SHIFT
;
486 pfn_req_flags
= range
->hmm_pfns
[i
];
487 cpu_flags
= pte_to_hmm_pfn_flags(range
, entry
) |
488 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma
)));
490 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, cpu_flags
);
491 if (required_fault
) {
495 hugetlb_vma_unlock_read(vma
);
497 * Avoid deadlock: drop the vma lock before calling
498 * hmm_vma_fault(), which will itself potentially take and
499 * drop the vma lock. This is also correct from a
500 * protection point of view, because there is no further
501 * use here of either pte or ptl after dropping the vma
504 ret
= hmm_vma_fault(addr
, end
, required_fault
, walk
);
505 hugetlb_vma_lock_read(vma
);
509 pfn
= pte_pfn(entry
) + ((start
& ~hmask
) >> PAGE_SHIFT
);
510 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
511 range
->hmm_pfns
[i
] = pfn
| cpu_flags
;
517 #define hmm_vma_walk_hugetlb_entry NULL
518 #endif /* CONFIG_HUGETLB_PAGE */
520 static int hmm_vma_walk_test(unsigned long start
, unsigned long end
,
521 struct mm_walk
*walk
)
523 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
524 struct hmm_range
*range
= hmm_vma_walk
->range
;
525 struct vm_area_struct
*vma
= walk
->vma
;
527 if (!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)) &&
528 vma
->vm_flags
& VM_READ
)
532 * vma ranges that don't have struct page backing them or map I/O
533 * devices directly cannot be handled by hmm_range_fault().
535 * If the vma does not allow read access, then assume that it does not
536 * allow write access either. HMM does not support architectures that
537 * allow write without read.
539 * If a fault is requested for an unsupported range then it is a hard
542 if (hmm_range_need_fault(hmm_vma_walk
,
544 ((start
- range
->start
) >> PAGE_SHIFT
),
545 (end
- start
) >> PAGE_SHIFT
, 0))
548 hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
550 /* Skip this vma and continue processing the next vma. */
554 static const struct mm_walk_ops hmm_walk_ops
= {
555 .pud_entry
= hmm_vma_walk_pud
,
556 .pmd_entry
= hmm_vma_walk_pmd
,
557 .pte_hole
= hmm_vma_walk_hole
,
558 .hugetlb_entry
= hmm_vma_walk_hugetlb_entry
,
559 .test_walk
= hmm_vma_walk_test
,
560 .walk_lock
= PGWALK_RDLOCK
,
564 * hmm_range_fault - try to fault some address in a virtual address range
565 * @range: argument structure
567 * Returns 0 on success or one of the following error codes:
569 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
570 * (e.g., device file vma).
571 * -ENOMEM: Out of memory.
572 * -EPERM: Invalid permission (e.g., asking for write and range is read
574 * -EBUSY: The range has been invalidated and the caller needs to wait for
575 * the invalidation to finish.
576 * -EFAULT: A page was requested to be valid and could not be made valid
577 * ie it has no backing VMA or it is illegal to access
579 * This is similar to get_user_pages(), except that it can read the page tables
580 * without mutating them (ie causing faults).
582 int hmm_range_fault(struct hmm_range
*range
)
584 struct hmm_vma_walk hmm_vma_walk
= {
586 .last
= range
->start
,
588 struct mm_struct
*mm
= range
->notifier
->mm
;
591 mmap_assert_locked(mm
);
594 /* If range is no longer valid force retry. */
595 if (mmu_interval_check_retry(range
->notifier
,
596 range
->notifier_seq
))
598 ret
= walk_page_range(mm
, hmm_vma_walk
.last
, range
->end
,
599 &hmm_walk_ops
, &hmm_vma_walk
);
601 * When -EBUSY is returned the loop restarts with
602 * hmm_vma_walk.last set to an address that has not been stored
603 * in pfns. All entries < last in the pfn array are set to their
604 * output, and all >= are still at their input values.
606 } while (ret
== -EBUSY
);
609 EXPORT_SYMBOL(hmm_range_fault
);