1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
7 static int walk_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
13 pte
= pte_offset_map(pmd
, addr
);
15 err
= walk
->pte_entry(pte
, addr
, addr
+ PAGE_SIZE
, walk
);
28 static int walk_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
,
35 pmd
= pmd_offset(pud
, addr
);
38 next
= pmd_addr_end(addr
, end
);
39 if (pmd_none(*pmd
) || !walk
->vma
) {
41 err
= walk
->pte_hole(addr
, next
, walk
);
47 * This implies that each ->pmd_entry() handler
48 * needs to know about pmd_trans_huge() pmds
51 err
= walk
->pmd_entry(pmd
, addr
, next
, walk
);
56 * Check this here so we only break down trans_huge
57 * pages when we _need_ to
62 split_huge_pmd(walk
->vma
, pmd
, addr
);
63 if (pmd_trans_unstable(pmd
))
65 err
= walk_pte_range(pmd
, addr
, next
, walk
);
68 } while (pmd
++, addr
= next
, addr
!= end
);
73 static int walk_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
80 pud
= pud_offset(p4d
, addr
);
83 next
= pud_addr_end(addr
, end
);
84 if (pud_none(*pud
) || !walk
->vma
) {
86 err
= walk
->pte_hole(addr
, next
, walk
);
92 if (walk
->pud_entry
) {
93 spinlock_t
*ptl
= pud_trans_huge_lock(pud
, walk
->vma
);
96 err
= walk
->pud_entry(pud
, addr
, next
, walk
);
104 split_huge_pud(walk
->vma
, pud
, addr
);
108 if (walk
->pmd_entry
|| walk
->pte_entry
)
109 err
= walk_pmd_range(pud
, addr
, next
, walk
);
112 } while (pud
++, addr
= next
, addr
!= end
);
117 static int walk_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
118 struct mm_walk
*walk
)
124 p4d
= p4d_offset(pgd
, addr
);
126 next
= p4d_addr_end(addr
, end
);
127 if (p4d_none_or_clear_bad(p4d
)) {
129 err
= walk
->pte_hole(addr
, next
, walk
);
134 if (walk
->pmd_entry
|| walk
->pte_entry
)
135 err
= walk_pud_range(p4d
, addr
, next
, walk
);
138 } while (p4d
++, addr
= next
, addr
!= end
);
143 static int walk_pgd_range(unsigned long addr
, unsigned long end
,
144 struct mm_walk
*walk
)
150 pgd
= pgd_offset(walk
->mm
, addr
);
152 next
= pgd_addr_end(addr
, end
);
153 if (pgd_none_or_clear_bad(pgd
)) {
155 err
= walk
->pte_hole(addr
, next
, walk
);
160 if (walk
->pmd_entry
|| walk
->pte_entry
)
161 err
= walk_p4d_range(pgd
, addr
, next
, walk
);
164 } while (pgd
++, addr
= next
, addr
!= end
);
169 #ifdef CONFIG_HUGETLB_PAGE
170 static unsigned long hugetlb_entry_end(struct hstate
*h
, unsigned long addr
,
173 unsigned long boundary
= (addr
& huge_page_mask(h
)) + huge_page_size(h
);
174 return boundary
< end
? boundary
: end
;
177 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
178 struct mm_walk
*walk
)
180 struct vm_area_struct
*vma
= walk
->vma
;
181 struct hstate
*h
= hstate_vma(vma
);
183 unsigned long hmask
= huge_page_mask(h
);
184 unsigned long sz
= huge_page_size(h
);
189 next
= hugetlb_entry_end(h
, addr
, end
);
190 pte
= huge_pte_offset(walk
->mm
, addr
& hmask
, sz
);
193 err
= walk
->hugetlb_entry(pte
, hmask
, addr
, next
, walk
);
194 else if (walk
->pte_hole
)
195 err
= walk
->pte_hole(addr
, next
, walk
);
199 } while (addr
= next
, addr
!= end
);
204 #else /* CONFIG_HUGETLB_PAGE */
205 static int walk_hugetlb_range(unsigned long addr
, unsigned long end
,
206 struct mm_walk
*walk
)
211 #endif /* CONFIG_HUGETLB_PAGE */
214 * Decide whether we really walk over the current vma on [@start, @end)
215 * or skip it via the returned value. Return 0 if we do walk over the
216 * current vma, and return 1 if we skip the vma. Negative values means
217 * error, where we abort the current walk.
219 static int walk_page_test(unsigned long start
, unsigned long end
,
220 struct mm_walk
*walk
)
222 struct vm_area_struct
*vma
= walk
->vma
;
225 return walk
->test_walk(start
, end
, walk
);
228 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
229 * range, so we don't walk over it as we do for normal vmas. However,
230 * Some callers are interested in handling hole range and they don't
231 * want to just ignore any single address range. Such users certainly
232 * define their ->pte_hole() callbacks, so let's delegate them to handle
235 if (vma
->vm_flags
& VM_PFNMAP
) {
238 err
= walk
->pte_hole(start
, end
, walk
);
239 return err
? err
: 1;
244 static int __walk_page_range(unsigned long start
, unsigned long end
,
245 struct mm_walk
*walk
)
248 struct vm_area_struct
*vma
= walk
->vma
;
250 if (vma
&& is_vm_hugetlb_page(vma
)) {
251 if (walk
->hugetlb_entry
)
252 err
= walk_hugetlb_range(start
, end
, walk
);
254 err
= walk_pgd_range(start
, end
, walk
);
260 * walk_page_range - walk page table with caller specific callbacks
262 * Recursively walk the page table tree of the process represented by @walk->mm
263 * within the virtual address range [@start, @end). During walking, we can do
264 * some caller-specific works for each entry, by setting up pmd_entry(),
265 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
266 * callbacks, the associated entries/pages are just ignored.
267 * The return values of these callbacks are commonly defined like below:
268 * - 0 : succeeded to handle the current entry, and if you don't reach the
269 * end address yet, continue to walk.
270 * - >0 : succeeded to handle the current entry, and return to the caller
271 * with caller specific value.
272 * - <0 : failed to handle the current entry, and return to the caller
275 * Before starting to walk page table, some callers want to check whether
276 * they really want to walk over the current vma, typically by checking
277 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
280 * struct mm_walk keeps current values of some common data like vma and pmd,
281 * which are useful for the access from callbacks. If you want to pass some
282 * caller-specific data to callbacks, @walk->private should be helpful.
285 * Callers of walk_page_range() and walk_page_vma() should hold
286 * @walk->mm->mmap_sem, because these function traverse vma list and/or
287 * access to vma's data.
289 int walk_page_range(unsigned long start
, unsigned long end
,
290 struct mm_walk
*walk
)
294 struct vm_area_struct
*vma
;
302 VM_BUG_ON_MM(!rwsem_is_locked(&walk
->mm
->mmap_sem
), walk
->mm
);
304 vma
= find_vma(walk
->mm
, start
);
306 if (!vma
) { /* after the last vma */
309 } else if (start
< vma
->vm_start
) { /* outside vma */
311 next
= min(end
, vma
->vm_start
);
312 } else { /* inside vma */
314 next
= min(end
, vma
->vm_end
);
317 err
= walk_page_test(start
, next
, walk
);
320 * positive return values are purely for
321 * controlling the pagewalk, so should never
322 * be passed to the callers.
330 if (walk
->vma
|| walk
->pte_hole
)
331 err
= __walk_page_range(start
, next
, walk
);
334 } while (start
= next
, start
< end
);
338 int walk_page_vma(struct vm_area_struct
*vma
, struct mm_walk
*walk
)
345 VM_BUG_ON(!rwsem_is_locked(&walk
->mm
->mmap_sem
));
348 err
= walk_page_test(vma
->vm_start
, vma
->vm_end
, walk
);
353 return __walk_page_range(vma
->vm_start
, vma
->vm_end
, walk
);