1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
8 * We want to know the real level where a entry is located ignoring any
9 * folding of levels which may be happening. For example if p4d is folded then
10 * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
12 static int real_depth(int depth
)
14 if (depth
== 3 && PTRS_PER_PMD
== 1)
16 if (depth
== 2 && PTRS_PER_PUD
== 1)
18 if (depth
== 1 && PTRS_PER_P4D
== 1)
23 static int walk_pte_range_inner(pte_t
*pte
, unsigned long addr
,
24 unsigned long end
, struct mm_walk
*walk
)
26 const struct mm_walk_ops
*ops
= walk
->ops
;
30 err
= ops
->pte_entry(pte
, addr
, addr
+ PAGE_SIZE
, walk
);
33 if (addr
>= end
- PAGE_SIZE
)
41 static int walk_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
49 pte
= pte_offset_map(pmd
, addr
);
50 err
= walk_pte_range_inner(pte
, addr
, end
, walk
);
53 pte
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
54 err
= walk_pte_range_inner(pte
, addr
, end
, walk
);
55 pte_unmap_unlock(pte
, ptl
);
61 static int walk_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
66 const struct mm_walk_ops
*ops
= walk
->ops
;
68 int depth
= real_depth(3);
70 pmd
= pmd_offset(pud
, addr
);
73 next
= pmd_addr_end(addr
, end
);
74 if (pmd_none(*pmd
) || (!walk
->vma
&& !walk
->no_vma
)) {
76 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
82 walk
->action
= ACTION_SUBTREE
;
85 * This implies that each ->pmd_entry() handler
86 * needs to know about pmd_trans_huge() pmds
89 err
= ops
->pmd_entry(pmd
, addr
, next
, walk
);
93 if (walk
->action
== ACTION_AGAIN
)
97 * Check this here so we only break down trans_huge
98 * pages when we _need_ to
100 if ((!walk
->vma
&& (pmd_leaf(*pmd
) || !pmd_present(*pmd
))) ||
101 walk
->action
== ACTION_CONTINUE
||
106 split_huge_pmd(walk
->vma
, pmd
, addr
);
107 if (pmd_trans_unstable(pmd
))
111 err
= walk_pte_range(pmd
, addr
, next
, walk
);
114 } while (pmd
++, addr
= next
, addr
!= end
);
119 static int walk_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
120 struct mm_walk
*walk
)
124 const struct mm_walk_ops
*ops
= walk
->ops
;
126 int depth
= real_depth(2);
128 pud
= pud_offset(p4d
, addr
);
131 next
= pud_addr_end(addr
, end
);
132 if (pud_none(*pud
) || (!walk
->vma
&& !walk
->no_vma
)) {
134 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
140 walk
->action
= ACTION_SUBTREE
;
143 err
= ops
->pud_entry(pud
, addr
, next
, walk
);
147 if (walk
->action
== ACTION_AGAIN
)
150 if ((!walk
->vma
&& (pud_leaf(*pud
) || !pud_present(*pud
))) ||
151 walk
->action
== ACTION_CONTINUE
||
152 !(ops
->pmd_entry
|| ops
->pte_entry
))
156 split_huge_pud(walk
->vma
, pud
, addr
);
160 err
= walk_pmd_range(pud
, addr
, next
, walk
);
163 } while (pud
++, addr
= next
, addr
!= end
);
168 static int walk_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
169 struct mm_walk
*walk
)
173 const struct mm_walk_ops
*ops
= walk
->ops
;
175 int depth
= real_depth(1);
177 p4d
= p4d_offset(pgd
, addr
);
179 next
= p4d_addr_end(addr
, end
);
180 if (p4d_none_or_clear_bad(p4d
)) {
182 err
= ops
->pte_hole(addr
, next
, depth
, walk
);
187 if (ops
->p4d_entry
) {
188 err
= ops
->p4d_entry(p4d
, addr
, next
, walk
);
192 if (ops
->pud_entry
|| ops
->pmd_entry
|| ops
->pte_entry
)
193 err
= walk_pud_range(p4d
, addr
, next
, walk
);
196 } while (p4d
++, addr
= next
, addr
!= end
);
201 static int walk_pgd_range(unsigned long addr
, unsigned long end
,
202 struct mm_walk
*walk
)
206 const struct mm_walk_ops
*ops
= walk
->ops
;
210 pgd
= walk
->pgd
+ pgd_index(addr
);
212 pgd
= pgd_offset(walk
->mm
, addr
);
214 next
= pgd_addr_end(addr
, end
);
215 if (pgd_none_or_clear_bad(pgd
)) {
217 err
= ops
->pte_hole(addr
, next
, 0, walk
);
222 if (ops
->pgd_entry
) {
223 err
= ops
->pgd_entry(pgd
, addr
, next
, walk
);
227 if (ops
->p4d_entry
|| ops
->pud_entry
|| ops
->pmd_entry
||
229 err
= walk_p4d_range(pgd
, addr
, next
, walk
);
232 } while (pgd
++, addr
= next
, addr
!= end
);
237 #ifdef CONFIG_HUGETLB_PAGE
238 static unsigned long hugetlb_entry_end(struct hstate
*h
, unsigned long addr
,
241 unsigned long boundary
= (addr
& huge_page_mask(h
)) + huge_page_size(h
);
242 return boundary
< end
? boundary
: end
;
245 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
246 struct mm_walk
*walk
)
248 struct vm_area_struct
*vma
= walk
->vma
;
249 struct hstate
*h
= hstate_vma(vma
);
251 unsigned long hmask
= huge_page_mask(h
);
252 unsigned long sz
= huge_page_size(h
);
254 const struct mm_walk_ops
*ops
= walk
->ops
;
258 next
= hugetlb_entry_end(h
, addr
, end
);
259 pte
= huge_pte_offset(walk
->mm
, addr
& hmask
, sz
);
262 err
= ops
->hugetlb_entry(pte
, hmask
, addr
, next
, walk
);
263 else if (ops
->pte_hole
)
264 err
= ops
->pte_hole(addr
, next
, -1, walk
);
268 } while (addr
= next
, addr
!= end
);
273 #else /* CONFIG_HUGETLB_PAGE */
274 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
275 struct mm_walk
*walk
)
280 #endif /* CONFIG_HUGETLB_PAGE */
283 * Decide whether we really walk over the current vma on [@start, @end)
284 * or skip it via the returned value. Return 0 if we do walk over the
285 * current vma, and return 1 if we skip the vma. Negative values means
286 * error, where we abort the current walk.
288 static int walk_page_test(unsigned long start
, unsigned long end
,
289 struct mm_walk
*walk
)
291 struct vm_area_struct
*vma
= walk
->vma
;
292 const struct mm_walk_ops
*ops
= walk
->ops
;
295 return ops
->test_walk(start
, end
, walk
);
298 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
299 * range, so we don't walk over it as we do for normal vmas. However,
300 * Some callers are interested in handling hole range and they don't
301 * want to just ignore any single address range. Such users certainly
302 * define their ->pte_hole() callbacks, so let's delegate them to handle
305 if (vma
->vm_flags
& VM_PFNMAP
) {
308 err
= ops
->pte_hole(start
, end
, -1, walk
);
309 return err
? err
: 1;
314 static int __walk_page_range(unsigned long start
, unsigned long end
,
315 struct mm_walk
*walk
)
318 struct vm_area_struct
*vma
= walk
->vma
;
319 const struct mm_walk_ops
*ops
= walk
->ops
;
321 if (vma
&& ops
->pre_vma
) {
322 err
= ops
->pre_vma(start
, end
, walk
);
327 if (vma
&& is_vm_hugetlb_page(vma
)) {
328 if (ops
->hugetlb_entry
)
329 err
= walk_hugetlb_range(start
, end
, walk
);
331 err
= walk_pgd_range(start
, end
, walk
);
333 if (vma
&& ops
->post_vma
)
340 * walk_page_range - walk page table with caller specific callbacks
341 * @mm: mm_struct representing the target process of page table walk
342 * @start: start address of the virtual address range
343 * @end: end address of the virtual address range
344 * @ops: operation to call during the walk
345 * @private: private data for callbacks' usage
347 * Recursively walk the page table tree of the process represented by @mm
348 * within the virtual address range [@start, @end). During walking, we can do
349 * some caller-specific works for each entry, by setting up pmd_entry(),
350 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
351 * callbacks, the associated entries/pages are just ignored.
352 * The return values of these callbacks are commonly defined like below:
354 * - 0 : succeeded to handle the current entry, and if you don't reach the
355 * end address yet, continue to walk.
356 * - >0 : succeeded to handle the current entry, and return to the caller
357 * with caller specific value.
358 * - <0 : failed to handle the current entry, and return to the caller
361 * Before starting to walk page table, some callers want to check whether
362 * they really want to walk over the current vma, typically by checking
363 * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
366 * If operations need to be staged before and committed after a vma is walked,
367 * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
368 * since it is intended to handle commit-type operations, can't return any
371 * struct mm_walk keeps current values of some common data like vma and pmd,
372 * which are useful for the access from callbacks. If you want to pass some
373 * caller-specific data to callbacks, @private should be helpful.
376 * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
377 * because these function traverse vma list and/or access to vma's data.
379 int walk_page_range(struct mm_struct
*mm
, unsigned long start
,
380 unsigned long end
, const struct mm_walk_ops
*ops
,
385 struct vm_area_struct
*vma
;
386 struct mm_walk walk
= {
398 mmap_assert_locked(walk
.mm
);
400 vma
= find_vma(walk
.mm
, start
);
402 if (!vma
) { /* after the last vma */
405 } else if (start
< vma
->vm_start
) { /* outside vma */
407 next
= min(end
, vma
->vm_start
);
408 } else { /* inside vma */
410 next
= min(end
, vma
->vm_end
);
413 err
= walk_page_test(start
, next
, &walk
);
416 * positive return values are purely for
417 * controlling the pagewalk, so should never
418 * be passed to the callers.
426 if (walk
.vma
|| walk
.ops
->pte_hole
)
427 err
= __walk_page_range(start
, next
, &walk
);
430 } while (start
= next
, start
< end
);
435 * Similar to walk_page_range() but can walk any page tables even if they are
436 * not backed by VMAs. Because 'unusual' entries may be walked this function
437 * will also not lock the PTEs for the pte_entry() callback. This is useful for
438 * walking the kernel pages tables or page tables for firmware.
440 int walk_page_range_novma(struct mm_struct
*mm
, unsigned long start
,
441 unsigned long end
, const struct mm_walk_ops
*ops
,
445 struct mm_walk walk
= {
453 if (start
>= end
|| !walk
.mm
)
456 mmap_assert_locked(walk
.mm
);
458 return __walk_page_range(start
, end
, &walk
);
461 int walk_page_vma(struct vm_area_struct
*vma
, const struct mm_walk_ops
*ops
,
464 struct mm_walk walk
= {
475 mmap_assert_locked(walk
.mm
);
477 err
= walk_page_test(vma
->vm_start
, vma
->vm_end
, &walk
);
482 return __walk_page_range(vma
->vm_start
, vma
->vm_end
, &walk
);
486 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
487 * @mapping: Pointer to the struct address_space
488 * @first_index: First page offset in the address_space
489 * @nr: Number of incremental page offsets to cover
490 * @ops: operation to call during the walk
491 * @private: private data for callbacks' usage
493 * This function walks all memory areas mapped into a struct address_space.
494 * The walk is limited to only the given page-size index range, but if
495 * the index boundaries cross a huge page-table entry, that entry will be
498 * Also see walk_page_range() for additional information.
501 * This function can't require that the struct mm_struct::mmap_lock is held,
502 * since @mapping may be mapped by multiple processes. Instead
503 * @mapping->i_mmap_rwsem must be held. This might have implications in the
504 * callbacks, and it's up tho the caller to ensure that the
505 * struct mm_struct::mmap_lock is not needed.
507 * Also this means that a caller can't rely on the struct
508 * vm_area_struct::vm_flags to be constant across a call,
509 * except for immutable flags. Callers requiring this shouldn't use
512 * Return: 0 on success, negative error code on failure, positive number on
513 * caller defined premature termination.
515 int walk_page_mapping(struct address_space
*mapping
, pgoff_t first_index
,
516 pgoff_t nr
, const struct mm_walk_ops
*ops
,
519 struct mm_walk walk
= {
523 struct vm_area_struct
*vma
;
524 pgoff_t vba
, vea
, cba
, cea
;
525 unsigned long start_addr
, end_addr
;
528 lockdep_assert_held(&mapping
->i_mmap_rwsem
);
529 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, first_index
,
530 first_index
+ nr
- 1) {
531 /* Clip to the vma */
533 vea
= vba
+ vma_pages(vma
);
536 cea
= first_index
+ nr
;
539 start_addr
= ((cba
- vba
) << PAGE_SHIFT
) + vma
->vm_start
;
540 end_addr
= ((cea
- vba
) << PAGE_SHIFT
) + vma
->vm_start
;
541 if (start_addr
>= end_addr
)
545 walk
.mm
= vma
->vm_mm
;
547 err
= walk_page_test(vma
->vm_start
, vma
->vm_end
, &walk
);
554 err
= __walk_page_range(start_addr
, end_addr
, &walk
);