HID: hid-picolcd: Fix memory leak in picolcd_debug_out_report()
[zen-stable.git] / mm / pagewalk.c
blob38cc58b8b2b0d37340f6e327c56c796bd7c24516
1 #include <linux/mm.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
7 struct mm_walk *walk)
9 pte_t *pte;
10 int err = 0;
12 pte = pte_offset_map(pmd, addr);
13 for (;;) {
14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
15 if (err)
16 break;
17 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
23 pte_unmap(pte);
24 return err;
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
28 struct mm_walk *walk)
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
34 pmd = pmd_offset(pud, addr);
35 do {
36 next = pmd_addr_end(addr, end);
37 if (pmd_none_or_clear_bad(pmd)) {
38 if (walk->pte_hole)
39 err = walk->pte_hole(addr, next, walk);
40 if (err)
41 break;
42 continue;
44 if (walk->pmd_entry)
45 err = walk->pmd_entry(pmd, addr, next, walk);
46 if (!err && walk->pte_entry)
47 err = walk_pte_range(pmd, addr, next, walk);
48 if (err)
49 break;
50 } while (pmd++, addr = next, addr != end);
52 return err;
55 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
56 struct mm_walk *walk)
58 pud_t *pud;
59 unsigned long next;
60 int err = 0;
62 pud = pud_offset(pgd, addr);
63 do {
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud)) {
66 if (walk->pte_hole)
67 err = walk->pte_hole(addr, next, walk);
68 if (err)
69 break;
70 continue;
72 if (walk->pud_entry)
73 err = walk->pud_entry(pud, addr, next, walk);
74 if (!err && (walk->pmd_entry || walk->pte_entry))
75 err = walk_pmd_range(pud, addr, next, walk);
76 if (err)
77 break;
78 } while (pud++, addr = next, addr != end);
80 return err;
83 #ifdef CONFIG_HUGETLB_PAGE
84 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
85 unsigned long end)
87 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
88 return boundary < end ? boundary : end;
91 static int walk_hugetlb_range(struct vm_area_struct *vma,
92 unsigned long addr, unsigned long end,
93 struct mm_walk *walk)
95 struct hstate *h = hstate_vma(vma);
96 unsigned long next;
97 unsigned long hmask = huge_page_mask(h);
98 pte_t *pte;
99 int err = 0;
101 do {
102 next = hugetlb_entry_end(h, addr, end);
103 pte = huge_pte_offset(walk->mm, addr & hmask);
104 if (pte && walk->hugetlb_entry)
105 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
106 if (err)
107 return err;
108 } while (addr = next, addr != end);
110 return 0;
112 #endif
115 * walk_page_range - walk a memory map's page tables with a callback
116 * @mm: memory map to walk
117 * @addr: starting address
118 * @end: ending address
119 * @walk: set of callbacks to invoke for each level of the tree
121 * Recursively walk the page table for the memory area in a VMA,
122 * calling supplied callbacks. Callbacks are called in-order (first
123 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
124 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
126 * Each callback receives an entry pointer and the start and end of the
127 * associated range, and a copy of the original mm_walk for access to
128 * the ->private or ->mm fields.
130 * No locks are taken, but the bottom level iterator will map PTE
131 * directories from highmem if necessary.
133 * If any callback returns a non-zero value, the walk is aborted and
134 * the return value is propagated back to the caller. Otherwise 0 is returned.
136 int walk_page_range(unsigned long addr, unsigned long end,
137 struct mm_walk *walk)
139 pgd_t *pgd;
140 unsigned long next;
141 int err = 0;
143 if (addr >= end)
144 return err;
146 if (!walk->mm)
147 return -EINVAL;
149 pgd = pgd_offset(walk->mm, addr);
150 do {
151 struct vm_area_struct *uninitialized_var(vma);
153 next = pgd_addr_end(addr, end);
155 #ifdef CONFIG_HUGETLB_PAGE
157 * handle hugetlb vma individually because pagetable walk for
158 * the hugetlb page is dependent on the architecture and
159 * we can't handled it in the same manner as non-huge pages.
161 vma = find_vma(walk->mm, addr);
162 if (vma && is_vm_hugetlb_page(vma)) {
163 if (vma->vm_end < next)
164 next = vma->vm_end;
166 * Hugepage is very tightly coupled with vma, so
167 * walk through hugetlb entries within a given vma.
169 err = walk_hugetlb_range(vma, addr, next, walk);
170 if (err)
171 break;
172 pgd = pgd_offset(walk->mm, next);
173 continue;
175 #endif
176 if (pgd_none_or_clear_bad(pgd)) {
177 if (walk->pte_hole)
178 err = walk->pte_hole(addr, next, walk);
179 if (err)
180 break;
181 pgd++;
182 continue;
184 if (walk->pgd_entry)
185 err = walk->pgd_entry(pgd, addr, next, walk);
186 if (!err &&
187 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
188 err = walk_pud_range(pgd, addr, next, walk);
189 if (err)
190 break;
191 pgd++;
192 } while (addr = next, addr != end);
194 return err;