unicore: Drop pointless include
[linux/fpc-iii.git] / mm / pagewalk.c
blobc3084ff2569d2f297ed369874ba839aadc93785e
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/hugetlb.h>
7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
8 struct mm_walk *walk)
10 pte_t *pte;
11 int err = 0;
13 pte = pte_offset_map(pmd, addr);
14 for (;;) {
15 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
16 if (err)
17 break;
18 addr += PAGE_SIZE;
19 if (addr == end)
20 break;
21 pte++;
24 pte_unmap(pte);
25 return err;
28 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
29 struct mm_walk *walk)
31 pmd_t *pmd;
32 unsigned long next;
33 int err = 0;
35 pmd = pmd_offset(pud, addr);
36 do {
37 again:
38 next = pmd_addr_end(addr, end);
39 if (pmd_none(*pmd) || !walk->vma) {
40 if (walk->pte_hole)
41 err = walk->pte_hole(addr, next, walk);
42 if (err)
43 break;
44 continue;
47 * This implies that each ->pmd_entry() handler
48 * needs to know about pmd_trans_huge() pmds
50 if (walk->pmd_entry)
51 err = walk->pmd_entry(pmd, addr, next, walk);
52 if (err)
53 break;
56 * Check this here so we only break down trans_huge
57 * pages when we _need_ to
59 if (!walk->pte_entry)
60 continue;
62 split_huge_pmd(walk->vma, pmd, addr);
63 if (pmd_trans_unstable(pmd))
64 goto again;
65 err = walk_pte_range(pmd, addr, next, walk);
66 if (err)
67 break;
68 } while (pmd++, addr = next, addr != end);
70 return err;
73 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
74 struct mm_walk *walk)
76 pud_t *pud;
77 unsigned long next;
78 int err = 0;
80 pud = pud_offset(p4d, addr);
81 do {
82 again:
83 next = pud_addr_end(addr, end);
84 if (pud_none(*pud) || !walk->vma) {
85 if (walk->pte_hole)
86 err = walk->pte_hole(addr, next, walk);
87 if (err)
88 break;
89 continue;
92 if (walk->pud_entry) {
93 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
95 if (ptl) {
96 err = walk->pud_entry(pud, addr, next, walk);
97 spin_unlock(ptl);
98 if (err)
99 break;
100 continue;
104 split_huge_pud(walk->vma, pud, addr);
105 if (pud_none(*pud))
106 goto again;
108 if (walk->pmd_entry || walk->pte_entry)
109 err = walk_pmd_range(pud, addr, next, walk);
110 if (err)
111 break;
112 } while (pud++, addr = next, addr != end);
114 return err;
117 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
118 struct mm_walk *walk)
120 p4d_t *p4d;
121 unsigned long next;
122 int err = 0;
124 p4d = p4d_offset(pgd, addr);
125 do {
126 next = p4d_addr_end(addr, end);
127 if (p4d_none_or_clear_bad(p4d)) {
128 if (walk->pte_hole)
129 err = walk->pte_hole(addr, next, walk);
130 if (err)
131 break;
132 continue;
134 if (walk->pmd_entry || walk->pte_entry)
135 err = walk_pud_range(p4d, addr, next, walk);
136 if (err)
137 break;
138 } while (p4d++, addr = next, addr != end);
140 return err;
143 static int walk_pgd_range(unsigned long addr, unsigned long end,
144 struct mm_walk *walk)
146 pgd_t *pgd;
147 unsigned long next;
148 int err = 0;
150 pgd = pgd_offset(walk->mm, addr);
151 do {
152 next = pgd_addr_end(addr, end);
153 if (pgd_none_or_clear_bad(pgd)) {
154 if (walk->pte_hole)
155 err = walk->pte_hole(addr, next, walk);
156 if (err)
157 break;
158 continue;
160 if (walk->pmd_entry || walk->pte_entry)
161 err = walk_p4d_range(pgd, addr, next, walk);
162 if (err)
163 break;
164 } while (pgd++, addr = next, addr != end);
166 return err;
169 #ifdef CONFIG_HUGETLB_PAGE
170 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
171 unsigned long end)
173 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
174 return boundary < end ? boundary : end;
177 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
178 struct mm_walk *walk)
180 struct vm_area_struct *vma = walk->vma;
181 struct hstate *h = hstate_vma(vma);
182 unsigned long next;
183 unsigned long hmask = huge_page_mask(h);
184 unsigned long sz = huge_page_size(h);
185 pte_t *pte;
186 int err = 0;
188 do {
189 next = hugetlb_entry_end(h, addr, end);
190 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
192 if (pte)
193 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
194 else if (walk->pte_hole)
195 err = walk->pte_hole(addr, next, walk);
197 if (err)
198 break;
199 } while (addr = next, addr != end);
201 return err;
204 #else /* CONFIG_HUGETLB_PAGE */
205 static int walk_hugetlb_range(unsigned long addr, unsigned long end,
206 struct mm_walk *walk)
208 return 0;
211 #endif /* CONFIG_HUGETLB_PAGE */
214 * Decide whether we really walk over the current vma on [@start, @end)
215 * or skip it via the returned value. Return 0 if we do walk over the
216 * current vma, and return 1 if we skip the vma. Negative values means
217 * error, where we abort the current walk.
219 static int walk_page_test(unsigned long start, unsigned long end,
220 struct mm_walk *walk)
222 struct vm_area_struct *vma = walk->vma;
224 if (walk->test_walk)
225 return walk->test_walk(start, end, walk);
228 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
229 * range, so we don't walk over it as we do for normal vmas. However,
230 * Some callers are interested in handling hole range and they don't
231 * want to just ignore any single address range. Such users certainly
232 * define their ->pte_hole() callbacks, so let's delegate them to handle
233 * vma(VM_PFNMAP).
235 if (vma->vm_flags & VM_PFNMAP) {
236 int err = 1;
237 if (walk->pte_hole)
238 err = walk->pte_hole(start, end, walk);
239 return err ? err : 1;
241 return 0;
244 static int __walk_page_range(unsigned long start, unsigned long end,
245 struct mm_walk *walk)
247 int err = 0;
248 struct vm_area_struct *vma = walk->vma;
250 if (vma && is_vm_hugetlb_page(vma)) {
251 if (walk->hugetlb_entry)
252 err = walk_hugetlb_range(start, end, walk);
253 } else
254 err = walk_pgd_range(start, end, walk);
256 return err;
260 * walk_page_range - walk page table with caller specific callbacks
261 * @start: start address of the virtual address range
262 * @end: end address of the virtual address range
263 * @walk: mm_walk structure defining the callbacks and the target address space
265 * Recursively walk the page table tree of the process represented by @walk->mm
266 * within the virtual address range [@start, @end). During walking, we can do
267 * some caller-specific works for each entry, by setting up pmd_entry(),
268 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
269 * callbacks, the associated entries/pages are just ignored.
270 * The return values of these callbacks are commonly defined like below:
272 * - 0 : succeeded to handle the current entry, and if you don't reach the
273 * end address yet, continue to walk.
274 * - >0 : succeeded to handle the current entry, and return to the caller
275 * with caller specific value.
276 * - <0 : failed to handle the current entry, and return to the caller
277 * with error code.
279 * Before starting to walk page table, some callers want to check whether
280 * they really want to walk over the current vma, typically by checking
281 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
282 * purpose.
284 * struct mm_walk keeps current values of some common data like vma and pmd,
285 * which are useful for the access from callbacks. If you want to pass some
286 * caller-specific data to callbacks, @walk->private should be helpful.
288 * Locking:
289 * Callers of walk_page_range() and walk_page_vma() should hold
290 * @walk->mm->mmap_sem, because these function traverse vma list and/or
291 * access to vma's data.
293 int walk_page_range(unsigned long start, unsigned long end,
294 struct mm_walk *walk)
296 int err = 0;
297 unsigned long next;
298 struct vm_area_struct *vma;
300 if (start >= end)
301 return -EINVAL;
303 if (!walk->mm)
304 return -EINVAL;
306 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
308 vma = find_vma(walk->mm, start);
309 do {
310 if (!vma) { /* after the last vma */
311 walk->vma = NULL;
312 next = end;
313 } else if (start < vma->vm_start) { /* outside vma */
314 walk->vma = NULL;
315 next = min(end, vma->vm_start);
316 } else { /* inside vma */
317 walk->vma = vma;
318 next = min(end, vma->vm_end);
319 vma = vma->vm_next;
321 err = walk_page_test(start, next, walk);
322 if (err > 0) {
324 * positive return values are purely for
325 * controlling the pagewalk, so should never
326 * be passed to the callers.
328 err = 0;
329 continue;
331 if (err < 0)
332 break;
334 if (walk->vma || walk->pte_hole)
335 err = __walk_page_range(start, next, walk);
336 if (err)
337 break;
338 } while (start = next, start < end);
339 return err;
342 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
344 int err;
346 if (!walk->mm)
347 return -EINVAL;
349 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
350 VM_BUG_ON(!vma);
351 walk->vma = vma;
352 err = walk_page_test(vma->vm_start, vma->vm_end, walk);
353 if (err > 0)
354 return 0;
355 if (err < 0)
356 return err;
357 return __walk_page_range(vma->vm_start, vma->vm_end, walk);