1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2013 Red Hat Inc.
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
30 struct hmm_range
*range
;
35 HMM_NEED_FAULT
= 1 << 0,
36 HMM_NEED_WRITE_FAULT
= 1 << 1,
37 HMM_NEED_ALL_BITS
= HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
,
40 static int hmm_pfns_fill(unsigned long addr
, unsigned long end
,
41 struct hmm_range
*range
, unsigned long cpu_flags
)
43 unsigned long i
= (addr
- range
->start
) >> PAGE_SHIFT
;
45 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++)
46 range
->hmm_pfns
[i
] = cpu_flags
;
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
54 * @required_fault: HMM_NEED_* flags
55 * @walk: mm_walk structure
56 * Return: -EBUSY after page fault, or page fault error
58 * This function will be called whenever pmd_none() or pte_none() returns true,
59 * or whenever there is no page directory covering the virtual address range.
61 static int hmm_vma_fault(unsigned long addr
, unsigned long end
,
62 unsigned int required_fault
, struct mm_walk
*walk
)
64 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
65 struct vm_area_struct
*vma
= walk
->vma
;
66 unsigned int fault_flags
= FAULT_FLAG_REMOTE
;
68 WARN_ON_ONCE(!required_fault
);
69 hmm_vma_walk
->last
= addr
;
71 if (required_fault
& HMM_NEED_WRITE_FAULT
) {
72 if (!(vma
->vm_flags
& VM_WRITE
))
74 fault_flags
|= FAULT_FLAG_WRITE
;
77 for (; addr
< end
; addr
+= PAGE_SIZE
)
78 if (handle_mm_fault(vma
, addr
, fault_flags
, NULL
) &
84 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
85 unsigned long pfn_req_flags
,
86 unsigned long cpu_flags
)
88 struct hmm_range
*range
= hmm_vma_walk
->range
;
91 * So we not only consider the individual per page request we also
92 * consider the default flags requested for the range. The API can
93 * be used 2 ways. The first one where the HMM user coalesces
94 * multiple page faults into one request and sets flags per pfn for
95 * those faults. The second one where the HMM user wants to pre-
96 * fault a range with specific flags. For the latter one it is a
97 * waste to have the user pre-fill the pfn arrays with a default
100 pfn_req_flags
&= range
->pfn_flags_mask
;
101 pfn_req_flags
|= range
->default_flags
;
103 /* We aren't ask to do anything ... */
104 if (!(pfn_req_flags
& HMM_PFN_REQ_FAULT
))
107 /* Need to write fault ? */
108 if ((pfn_req_flags
& HMM_PFN_REQ_WRITE
) &&
109 !(cpu_flags
& HMM_PFN_WRITE
))
110 return HMM_NEED_FAULT
| HMM_NEED_WRITE_FAULT
;
112 /* If CPU page table is not valid then we need to fault */
113 if (!(cpu_flags
& HMM_PFN_VALID
))
114 return HMM_NEED_FAULT
;
119 hmm_range_need_fault(const struct hmm_vma_walk
*hmm_vma_walk
,
120 const unsigned long hmm_pfns
[], unsigned long npages
,
121 unsigned long cpu_flags
)
123 struct hmm_range
*range
= hmm_vma_walk
->range
;
124 unsigned int required_fault
= 0;
128 * If the default flags do not request to fault pages, and the mask does
129 * not allow for individual pages to be faulted, then
130 * hmm_pte_need_fault() will always return 0.
132 if (!((range
->default_flags
| range
->pfn_flags_mask
) &
136 for (i
= 0; i
< npages
; ++i
) {
137 required_fault
|= hmm_pte_need_fault(hmm_vma_walk
, hmm_pfns
[i
],
139 if (required_fault
== HMM_NEED_ALL_BITS
)
140 return required_fault
;
142 return required_fault
;
145 static int hmm_vma_walk_hole(unsigned long addr
, unsigned long end
,
146 __always_unused
int depth
, struct mm_walk
*walk
)
148 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
149 struct hmm_range
*range
= hmm_vma_walk
->range
;
150 unsigned int required_fault
;
151 unsigned long i
, npages
;
152 unsigned long *hmm_pfns
;
154 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
155 npages
= (end
- addr
) >> PAGE_SHIFT
;
156 hmm_pfns
= &range
->hmm_pfns
[i
];
158 hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0);
162 return hmm_pfns_fill(addr
, end
, range
, HMM_PFN_ERROR
);
165 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
166 return hmm_pfns_fill(addr
, end
, range
, 0);
169 static inline unsigned long hmm_pfn_flags_order(unsigned long order
)
171 return order
<< HMM_PFN_ORDER_SHIFT
;
174 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range
*range
,
177 if (pmd_protnone(pmd
))
179 return (pmd_write(pmd
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) :
181 hmm_pfn_flags_order(PMD_SHIFT
- PAGE_SHIFT
);
184 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
185 static int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
186 unsigned long end
, unsigned long hmm_pfns
[],
189 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
190 struct hmm_range
*range
= hmm_vma_walk
->range
;
191 unsigned long pfn
, npages
, i
;
192 unsigned int required_fault
;
193 unsigned long cpu_flags
;
195 npages
= (end
- addr
) >> PAGE_SHIFT
;
196 cpu_flags
= pmd_to_hmm_pfn_flags(range
, pmd
);
198 hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, cpu_flags
);
200 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
202 pfn
= pmd_pfn(pmd
) + ((addr
& ~PMD_MASK
) >> PAGE_SHIFT
);
203 for (i
= 0; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
204 hmm_pfns
[i
] = pfn
| cpu_flags
;
207 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
208 /* stub to allow the code below to compile */
209 int hmm_vma_handle_pmd(struct mm_walk
*walk
, unsigned long addr
,
210 unsigned long end
, unsigned long hmm_pfns
[], pmd_t pmd
);
211 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
213 static inline bool hmm_is_device_private_entry(struct hmm_range
*range
,
216 return is_device_private_entry(entry
) &&
217 device_private_entry_to_page(entry
)->pgmap
->owner
==
218 range
->dev_private_owner
;
221 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range
*range
,
224 if (pte_none(pte
) || !pte_present(pte
) || pte_protnone(pte
))
226 return pte_write(pte
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) : HMM_PFN_VALID
;
229 static int hmm_vma_handle_pte(struct mm_walk
*walk
, unsigned long addr
,
230 unsigned long end
, pmd_t
*pmdp
, pte_t
*ptep
,
231 unsigned long *hmm_pfn
)
233 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
234 struct hmm_range
*range
= hmm_vma_walk
->range
;
235 unsigned int required_fault
;
236 unsigned long cpu_flags
;
238 uint64_t pfn_req_flags
= *hmm_pfn
;
242 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0);
249 if (!pte_present(pte
)) {
250 swp_entry_t entry
= pte_to_swp_entry(pte
);
253 * Never fault in device private pages, but just report
254 * the PFN even if not present.
256 if (hmm_is_device_private_entry(range
, entry
)) {
257 cpu_flags
= HMM_PFN_VALID
;
258 if (is_write_device_private_entry(entry
))
259 cpu_flags
|= HMM_PFN_WRITE
;
260 *hmm_pfn
= device_private_entry_to_pfn(entry
) |
266 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0);
267 if (!required_fault
) {
272 if (!non_swap_entry(entry
))
275 if (is_migration_entry(entry
)) {
277 hmm_vma_walk
->last
= addr
;
278 migration_entry_wait(walk
->mm
, pmdp
, addr
);
282 /* Report error for everything else */
287 cpu_flags
= pte_to_hmm_pfn_flags(range
, pte
);
289 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, cpu_flags
);
294 * Since each architecture defines a struct page for the zero page, just
295 * fall through and treat it like a normal page.
297 if (pte_special(pte
) && !is_zero_pfn(pte_pfn(pte
))) {
298 if (hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, 0)) {
302 *hmm_pfn
= HMM_PFN_ERROR
;
306 *hmm_pfn
= pte_pfn(pte
) | cpu_flags
;
311 /* Fault any virtual address we were asked to fault */
312 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
315 static int hmm_vma_walk_pmd(pmd_t
*pmdp
,
318 struct mm_walk
*walk
)
320 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
321 struct hmm_range
*range
= hmm_vma_walk
->range
;
322 unsigned long *hmm_pfns
=
323 &range
->hmm_pfns
[(start
- range
->start
) >> PAGE_SHIFT
];
324 unsigned long npages
= (end
- start
) >> PAGE_SHIFT
;
325 unsigned long addr
= start
;
330 pmd
= READ_ONCE(*pmdp
);
332 return hmm_vma_walk_hole(start
, end
, -1, walk
);
334 if (thp_migration_supported() && is_pmd_migration_entry(pmd
)) {
335 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0)) {
336 hmm_vma_walk
->last
= addr
;
337 pmd_migration_entry_wait(walk
->mm
, pmdp
);
340 return hmm_pfns_fill(start
, end
, range
, 0);
343 if (!pmd_present(pmd
)) {
344 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0))
346 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
349 if (pmd_devmap(pmd
) || pmd_trans_huge(pmd
)) {
351 * No need to take pmd_lock here, even if some other thread
352 * is splitting the huge pmd we will get that event through
353 * mmu_notifier callback.
355 * So just read pmd value and check again it's a transparent
356 * huge or device mapping one and compute corresponding pfn
359 pmd
= pmd_read_atomic(pmdp
);
361 if (!pmd_devmap(pmd
) && !pmd_trans_huge(pmd
))
364 return hmm_vma_handle_pmd(walk
, addr
, end
, hmm_pfns
, pmd
);
368 * We have handled all the valid cases above ie either none, migration,
369 * huge or transparent huge. At this point either it is a valid pmd
370 * entry pointing to pte directory or it is a bad pmd that will not
374 if (hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
, npages
, 0))
376 return hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
379 ptep
= pte_offset_map(pmdp
, addr
);
380 for (; addr
< end
; addr
+= PAGE_SIZE
, ptep
++, hmm_pfns
++) {
383 r
= hmm_vma_handle_pte(walk
, addr
, end
, pmdp
, ptep
, hmm_pfns
);
385 /* hmm_vma_handle_pte() did pte_unmap() */
393 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
394 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
395 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range
*range
,
398 if (!pud_present(pud
))
400 return (pud_write(pud
) ? (HMM_PFN_VALID
| HMM_PFN_WRITE
) :
402 hmm_pfn_flags_order(PUD_SHIFT
- PAGE_SHIFT
);
405 static int hmm_vma_walk_pud(pud_t
*pudp
, unsigned long start
, unsigned long end
,
406 struct mm_walk
*walk
)
408 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
409 struct hmm_range
*range
= hmm_vma_walk
->range
;
410 unsigned long addr
= start
;
413 spinlock_t
*ptl
= pud_trans_huge_lock(pudp
, walk
->vma
);
418 /* Normally we don't want to split the huge page */
419 walk
->action
= ACTION_CONTINUE
;
421 pud
= READ_ONCE(*pudp
);
424 return hmm_vma_walk_hole(start
, end
, -1, walk
);
427 if (pud_huge(pud
) && pud_devmap(pud
)) {
428 unsigned long i
, npages
, pfn
;
429 unsigned int required_fault
;
430 unsigned long *hmm_pfns
;
431 unsigned long cpu_flags
;
433 if (!pud_present(pud
)) {
435 return hmm_vma_walk_hole(start
, end
, -1, walk
);
438 i
= (addr
- range
->start
) >> PAGE_SHIFT
;
439 npages
= (end
- addr
) >> PAGE_SHIFT
;
440 hmm_pfns
= &range
->hmm_pfns
[i
];
442 cpu_flags
= pud_to_hmm_pfn_flags(range
, pud
);
443 required_fault
= hmm_range_need_fault(hmm_vma_walk
, hmm_pfns
,
445 if (required_fault
) {
447 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
450 pfn
= pud_pfn(pud
) + ((addr
& ~PUD_MASK
) >> PAGE_SHIFT
);
451 for (i
= 0; i
< npages
; ++i
, ++pfn
)
452 hmm_pfns
[i
] = pfn
| cpu_flags
;
456 /* Ask for the PUD to be split */
457 walk
->action
= ACTION_SUBTREE
;
464 #define hmm_vma_walk_pud NULL
467 #ifdef CONFIG_HUGETLB_PAGE
468 static int hmm_vma_walk_hugetlb_entry(pte_t
*pte
, unsigned long hmask
,
469 unsigned long start
, unsigned long end
,
470 struct mm_walk
*walk
)
472 unsigned long addr
= start
, i
, pfn
;
473 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
474 struct hmm_range
*range
= hmm_vma_walk
->range
;
475 struct vm_area_struct
*vma
= walk
->vma
;
476 unsigned int required_fault
;
477 unsigned long pfn_req_flags
;
478 unsigned long cpu_flags
;
482 ptl
= huge_pte_lock(hstate_vma(vma
), walk
->mm
, pte
);
483 entry
= huge_ptep_get(pte
);
485 i
= (start
- range
->start
) >> PAGE_SHIFT
;
486 pfn_req_flags
= range
->hmm_pfns
[i
];
487 cpu_flags
= pte_to_hmm_pfn_flags(range
, entry
) |
488 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma
)));
490 hmm_pte_need_fault(hmm_vma_walk
, pfn_req_flags
, cpu_flags
);
491 if (required_fault
) {
493 return hmm_vma_fault(addr
, end
, required_fault
, walk
);
496 pfn
= pte_pfn(entry
) + ((start
& ~hmask
) >> PAGE_SHIFT
);
497 for (; addr
< end
; addr
+= PAGE_SIZE
, i
++, pfn
++)
498 range
->hmm_pfns
[i
] = pfn
| cpu_flags
;
504 #define hmm_vma_walk_hugetlb_entry NULL
505 #endif /* CONFIG_HUGETLB_PAGE */
507 static int hmm_vma_walk_test(unsigned long start
, unsigned long end
,
508 struct mm_walk
*walk
)
510 struct hmm_vma_walk
*hmm_vma_walk
= walk
->private;
511 struct hmm_range
*range
= hmm_vma_walk
->range
;
512 struct vm_area_struct
*vma
= walk
->vma
;
514 if (!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
| VM_MIXEDMAP
)) &&
515 vma
->vm_flags
& VM_READ
)
519 * vma ranges that don't have struct page backing them or map I/O
520 * devices directly cannot be handled by hmm_range_fault().
522 * If the vma does not allow read access, then assume that it does not
523 * allow write access either. HMM does not support architectures that
524 * allow write without read.
526 * If a fault is requested for an unsupported range then it is a hard
529 if (hmm_range_need_fault(hmm_vma_walk
,
531 ((start
- range
->start
) >> PAGE_SHIFT
),
532 (end
- start
) >> PAGE_SHIFT
, 0))
535 hmm_pfns_fill(start
, end
, range
, HMM_PFN_ERROR
);
537 /* Skip this vma and continue processing the next vma. */
541 static const struct mm_walk_ops hmm_walk_ops
= {
542 .pud_entry
= hmm_vma_walk_pud
,
543 .pmd_entry
= hmm_vma_walk_pmd
,
544 .pte_hole
= hmm_vma_walk_hole
,
545 .hugetlb_entry
= hmm_vma_walk_hugetlb_entry
,
546 .test_walk
= hmm_vma_walk_test
,
550 * hmm_range_fault - try to fault some address in a virtual address range
551 * @range: argument structure
553 * Returns 0 on success or one of the following error codes:
555 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
556 * (e.g., device file vma).
557 * -ENOMEM: Out of memory.
558 * -EPERM: Invalid permission (e.g., asking for write and range is read
560 * -EBUSY: The range has been invalidated and the caller needs to wait for
561 * the invalidation to finish.
562 * -EFAULT: A page was requested to be valid and could not be made valid
563 * ie it has no backing VMA or it is illegal to access
565 * This is similar to get_user_pages(), except that it can read the page tables
566 * without mutating them (ie causing faults).
568 int hmm_range_fault(struct hmm_range
*range
)
570 struct hmm_vma_walk hmm_vma_walk
= {
572 .last
= range
->start
,
574 struct mm_struct
*mm
= range
->notifier
->mm
;
577 mmap_assert_locked(mm
);
580 /* If range is no longer valid force retry. */
581 if (mmu_interval_check_retry(range
->notifier
,
582 range
->notifier_seq
))
584 ret
= walk_page_range(mm
, hmm_vma_walk
.last
, range
->end
,
585 &hmm_walk_ops
, &hmm_vma_walk
);
587 * When -EBUSY is returned the loop restarts with
588 * hmm_vma_walk.last set to an address that has not been stored
589 * in pfns. All entries < last in the pfn array are set to their
590 * output, and all >= are still at their input values.
592 } while (ret
== -EBUSY
);
595 EXPORT_SYMBOL(hmm_range_fault
);