1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
30 struct hmm_range
*range
;
35 HMM_NEED_FAULT
= 1 << 0,
36 HMM_NEED_WRITE_FAULT
= 1 << 1,
37 HMM_NEED_ALL_BITS
= HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
,
40 static int hmm_pfns_fill(unsigned long addr
, unsigned long end
,
41 struct hmm_range
*range
, unsigned long cpu_flags
)
43 unsigned long i
= (addr
- range
->start
) >> PAGE_SHIFT
;
45 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
46 range
->hmm_pfns
[i
] = cpu_flags
;
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
54 * @required_fault: HMM_NEED_* flags
55 * @walk: mm_walk structure
56 * Return: -EBUSY after page fault, or page fault error
58 * This function will be called whenever pmd_none() or pte_none() returns true,
59 * or whenever there is no page directory covering the virtual address range.
61 static int hmm_vma_fault(unsigned long addr
, unsigned long end
,
62 unsigned int required_fault
, struct mm_walk
*walk
)
64 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
65 struct vm_area_struct
*vma
= walk
->vma
;
66 unsigned int fault_flags
= FAULT_FLAG_REMOTE
;
68 WARN_ON_ONCE(!required_fault
);
69 hmm_vma_walk
->last
= addr
;
71 if (required_fault
& HMM_NEED_WRITE_FAULT
) {
72 if (!(vma
->vm_flags
& VM_WRITE
))
74 fault_flags
|= FAULT_FLAG_WRITE
;
77 for (; addr
< end
; addr
+= PAGE_SIZE
)
78 if (handle_mm_fault(vma
, addr
, fault_flags
) & VM_FAULT_ERROR
)
83 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
84 unsigned long pfn_req_flags
,
85 unsigned long cpu_flags
)
87 struct hmm_range
*range
= hmm_vma_walk
->range
;
90 * So we not only consider the individual per page request we also
91 * consider the default flags requested for the range. The API can
92 * be used 2 ways. The first one where the HMM user coalesces
93 * multiple page faults into one request and sets flags per pfn for
94 * those faults. The second one where the HMM user wants to pre-
95 * fault a range with specific flags. For the latter one it is a
96 * waste to have the user pre-fill the pfn arrays with a default
99 pfn_req_flags
&= range
->pfn_flags_mask
;
100 pfn_req_flags
|= range
->default_flags
;
102 /* We aren't ask to do anything ... */
103 if (!(pfn_req_flags
& HMM_PFN_REQ_FAULT
))
106 /* Need to write fault ? */
107 if ((pfn_req_flags
& HMM_PFN_REQ_WRITE
) &&
108 !(cpu_flags
& HMM_PFN_WRITE
))
109 return HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
;
111 /* If CPU page table is not valid then we need to fault */
112 if (!(cpu_flags
& HMM_PFN_VALID
))
113 return HMM_NEED_FAULT
;
118 hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
119 const unsigned long hmm_pfns
[], unsigned long npages
,
120 unsigned long cpu_flags
)
122 struct hmm_range
*range
= hmm_vma_walk
->range
;
123 unsigned int required_fault
= 0;
127 * If the default flags do not request to fault pages, and the mask does
128 * not allow for individual pages to be faulted, then
129 * hmm_pte_need_fault() will always return 0.
131 if (!((range
->default_flags
| range
->pfn_flags_mask
) &
135 for (i
= 0; i
< npages
; ++i
) {
136 required_fault
|= hmm_pte_need_fault(hmm_vma_walk
, hmm_pfns
[i
],
138 if (required_fault
== HMM_NEED_ALL_BITS
)
139 return required_fault
;
141 return required_fault
;
144 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
145 __always_unused
int depth
, struct mm_walk
*walk
)
147 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
148 struct hmm_range
*range
= hmm_vma_walk
->range
;
149 unsigned int required_fault
;
150 unsigned long i
, npages
;
151 unsigned long *hmm_pfns
;
153 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
154 npages
= (end
- addr
) >> PAGE_SHIFT
;
155 hmm_pfns
= &range
->hmm_pfns
[i
];
157 hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0);
161 return hmm_pfns_fill(addr
, end
, range
, HMM_PFN_ERROR
);
164 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
165 return hmm_pfns_fill(addr
, end
, range
, 0);
168 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range
*range
,
171 if (pmd_protnone(pmd
))
173 return pmd_write(pmd
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) : HMM_PFN_VALID
;
176 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
177 static int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
178 unsigned long end
, unsigned long hmm_pfns
[],
181 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
182 struct hmm_range
*range
= hmm_vma_walk
->range
;
183 unsigned long pfn
, npages
, i
;
184 unsigned int required_fault
;
185 unsigned long cpu_flags
;
187 npages
= (end
- addr
) >> PAGE_SHIFT
;
188 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
190 hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, cpu_flags
);
192 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
194 pfn
= pmd_pfn(pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
195 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
196 hmm_pfns
[i
] = pfn
| cpu_flags
;
199 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
200 /* stub to allow the code below to compile */
201 int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
202 unsigned long end
, unsigned long hmm_pfns
[], pmd_t pmd
);
203 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
205 static inline bool hmm_is_device_private_entry(struct hmm_range
*range
,
208 return is_device_private_entry(entry
) &&
209 device_private_entry_to_page(entry
)->pgmap
->owner
==
210 range
->dev_private_owner
;
213 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range
*range
,
216 if (pte_none(pte
) || !pte_present(pte
) || pte_protnone(pte
))
218 return pte_write(pte
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) : HMM_PFN_VALID
;
221 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
222 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
223 unsigned long *hmm_pfn
)
225 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
226 struct hmm_range
*range
= hmm_vma_walk
->range
;
227 unsigned int required_fault
;
228 unsigned long cpu_flags
;
230 uint64_t pfn_req_flags
= *hmm_pfn
;
234 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0);
241 if (!pte_present(pte
)) {
242 swp_entry_t entry
= pte_to_swp_entry(pte
);
245 * Never fault in device private pages pages, but just report
246 * the PFN even if not present.
248 if (hmm_is_device_private_entry(range
, entry
)) {
249 cpu_flags
= HMM_PFN_VALID
;
250 if (is_write_device_private_entry(entry
))
251 cpu_flags
|= HMM_PFN_WRITE
;
252 *hmm_pfn
= device_private_entry_to_pfn(entry
) |
258 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0);
259 if (!required_fault
) {
264 if (!non_swap_entry(entry
))
267 if (is_migration_entry(entry
)) {
269 hmm_vma_walk
->last
= addr
;
270 migration_entry_wait(walk
->mm
, pmdp
, addr
);
274 /* Report error for everything else */
279 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
281 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, cpu_flags
);
286 * Since each architecture defines a struct page for the zero page, just
287 * fall through and treat it like a normal page.
289 if (pte_special(pte
) && !is_zero_pfn(pte_pfn(pte
))) {
290 if (hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0)) {
294 *hmm_pfn
= HMM_PFN_ERROR
;
298 *hmm_pfn
= pte_pfn(pte
) | cpu_flags
;
303 /* Fault any virtual address we were asked to fault */
304 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
307 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
310 struct mm_walk
*walk
)
312 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
313 struct hmm_range
*range
= hmm_vma_walk
->range
;
314 unsigned long *hmm_pfns
=
315 &range
->hmm_pfns
[(start
- range
->start
) >> PAGE_SHIFT
];
316 unsigned long npages
= (end
- start
) >> PAGE_SHIFT
;
317 unsigned long addr
= start
;
322 pmd
= READ_ONCE(*pmdp
);
324 return hmm_vma_walk_hole(start
, end
, -1, walk
);
326 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
327 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0)) {
328 hmm_vma_walk
->last
= addr
;
329 pmd_migration_entry_wait(walk
->mm
, pmdp
);
332 return hmm_pfns_fill(start
, end
, range
, 0);
335 if (!pmd_present(pmd
)) {
336 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0))
338 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
341 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
343 * No need to take pmd_lock here, even if some other thread
344 * is splitting the huge pmd we will get that event through
345 * mmu_notifier callback.
347 * So just read pmd value and check again it's a transparent
348 * huge or device mapping one and compute corresponding pfn
351 pmd
= pmd_read_atomic(pmdp
);
353 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
356 return hmm_vma_handle_pmd(walk
, addr
, end
, hmm_pfns
, pmd
);
360 * We have handled all the valid cases above ie either none, migration,
361 * huge or transparent huge. At this point either it is a valid pmd
362 * entry pointing to pte directory or it is a bad pmd that will not
366 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0))
368 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
371 ptep
= pte_offset_map(pmdp
, addr
);
372 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, hmm_pfns
++) {
375 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, hmm_pfns
);
377 /* hmm_vma_handle_pte() did pte_unmap() */
385 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
386 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
387 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range
*range
,
390 if (!pud_present(pud
))
392 return pud_write(pud
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) : HMM_PFN_VALID
;
395 static int hmm_vma_walk_pud(pud_t
*pudp
, unsigned long start
, unsigned long end
,
396 struct mm_walk
*walk
)
398 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
399 struct hmm_range
*range
= hmm_vma_walk
->range
;
400 unsigned long addr
= start
;
403 spinlock_t
*ptl
= pud_trans_huge_lock(pudp
, walk
->vma
);
408 /* Normally we don't want to split the huge page */
409 walk
->action
= ACTION_CONTINUE
;
411 pud
= READ_ONCE(*pudp
);
414 return hmm_vma_walk_hole(start
, end
, -1, walk
);
417 if (pud_huge(pud
) && pud_devmap(pud
)) {
418 unsigned long i
, npages
, pfn
;
419 unsigned int required_fault
;
420 unsigned long *hmm_pfns
;
421 unsigned long cpu_flags
;
423 if (!pud_present(pud
)) {
425 return hmm_vma_walk_hole(start
, end
, -1, walk
);
428 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
429 npages
= (end
- addr
) >> PAGE_SHIFT
;
430 hmm_pfns
= &range
->hmm_pfns
[i
];
432 cpu_flags
= pud_to_hmm_pfn_flags(range
, pud
);
433 required_fault
= hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
,
435 if (required_fault
) {
437 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
440 pfn
= pud_pfn(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
441 for (i
= 0; i
< npages
; ++i
, ++pfn
)
442 hmm_pfns
[i
] = pfn
| cpu_flags
;
446 /* Ask for the PUD to be split */
447 walk
->action
= ACTION_SUBTREE
;
454 #define hmm_vma_walk_pud NULL
457 #ifdef CONFIG_HUGETLB_PAGE
458 static int hmm_vma_walk_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
459 unsigned long start
, unsigned long end
,
460 struct mm_walk
*walk
)
462 unsigned long addr
= start
, i
, pfn
;
463 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
464 struct hmm_range
*range
= hmm_vma_walk
->range
;
465 struct vm_area_struct
*vma
= walk
->vma
;
466 unsigned int required_fault
;
467 unsigned long pfn_req_flags
;
468 unsigned long cpu_flags
;
472 ptl
= huge_pte_lock(hstate_vma(vma
), walk
->mm
, pte
);
473 entry
= huge_ptep_get(pte
);
475 i
= (start
- range
->start
) >> PAGE_SHIFT
;
476 pfn_req_flags
= range
->hmm_pfns
[i
];
477 cpu_flags
= pte_to_hmm_pfn_flags(range
, entry
);
479 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, cpu_flags
);
480 if (required_fault
) {
482 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
485 pfn
= pte_pfn(entry
) + ((start
& ~hmask
) >> PAGE_SHIFT
);
486 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
487 range
->hmm_pfns
[i
] = pfn
| cpu_flags
;
493 #define hmm_vma_walk_hugetlb_entry NULL
494 #endif /* CONFIG_HUGETLB_PAGE */
496 static int hmm_vma_walk_test(unsigned long start
, unsigned long end
,
497 struct mm_walk
*walk
)
499 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
500 struct hmm_range
*range
= hmm_vma_walk
->range
;
501 struct vm_area_struct
*vma
= walk
->vma
;
503 if (!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
)) &&
504 vma
->vm_flags
& VM_READ
)
508 * vma ranges that don't have struct page backing them or map I/O
509 * devices directly cannot be handled by hmm_range_fault().
511 * If the vma does not allow read access, then assume that it does not
512 * allow write access either. HMM does not support architectures that
513 * allow write without read.
515 * If a fault is requested for an unsupported range then it is a hard
518 if (hmm_range_need_fault(hmm_vma_walk
,
520 ((start
- range
->start
) >> PAGE_SHIFT
),
521 (end
- start
) >> PAGE_SHIFT
, 0))
524 hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
526 /* Skip this vma and continue processing the next vma. */
530 static const struct mm_walk_ops hmm_walk_ops
= {
531 .pud_entry
= hmm_vma_walk_pud
,
532 .pmd_entry
= hmm_vma_walk_pmd
,
533 .pte_hole
= hmm_vma_walk_hole
,
534 .hugetlb_entry
= hmm_vma_walk_hugetlb_entry
,
535 .test_walk
= hmm_vma_walk_test
,
539 * hmm_range_fault - try to fault some address in a virtual address range
540 * @range: argument structure
542 * Returns 0 on success or one of the following error codes:
544 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
545 * (e.g., device file vma).
546 * -ENOMEM: Out of memory.
547 * -EPERM: Invalid permission (e.g., asking for write and range is read
549 * -EBUSY: The range has been invalidated and the caller needs to wait for
550 * the invalidation to finish.
551 * -EFAULT: A page was requested to be valid and could not be made valid
552 * ie it has no backing VMA or it is illegal to access
554 * This is similar to get_user_pages(), except that it can read the page tables
555 * without mutating them (ie causing faults).
557 int hmm_range_fault(struct hmm_range
*range
)
559 struct hmm_vma_walk hmm_vma_walk
= {
561 .last
= range
->start
,
563 struct mm_struct
*mm
= range
->notifier
->mm
;
566 mmap_assert_locked(mm
);
569 /* If range is no longer valid force retry. */
570 if (mmu_interval_check_retry(range
->notifier
,
571 range
->notifier_seq
))
573 ret
= walk_page_range(mm
, hmm_vma_walk
.last
, range
->end
,
574 &hmm_walk_ops
, &hmm_vma_walk
);
576 * When -EBUSY is returned the loop restarts with
577 * hmm_vma_walk.last set to an address that has not been stored
578 * in pfns. All entries < last in the pfn array are set to their
579 * output, and all >= are still at their input values.
581 } while (ret
== -EBUSY
);
584 EXPORT_SYMBOL(hmm_range_fault
);