perf trace: Switch to using a struct for the aumented_raw_syscalls syscalls map values
[linux/fpc-iii.git] / mm / page_vma_mapped.c
blob11df03e71288c3fe0b78e164eca835ac4332e5ca
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
8 #include "internal.h"
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
12 page_vma_mapped_walk_done(pvmw);
13 return false;
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
34 * true.
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry))
45 return false;
46 } else if (!pte_present(*pvmw->pte))
47 return false;
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 spin_lock(pvmw->ptl);
52 return true;
55 static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
57 unsigned long hpage_pfn = page_to_pfn(hpage);
59 /* THP can be referenced by any subpage */
60 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
63 /**
64 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
66 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
67 * mapped. check_pte() has to validate this.
69 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
70 * page.
72 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
73 * entry that points to @pvmw->page or any subpage in case of THP.
75 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
76 * @pvmw->page or any subpage in case of THP.
78 * Otherwise, return false.
81 static bool check_pte(struct page_vma_mapped_walk *pvmw)
83 unsigned long pfn;
85 if (pvmw->flags & PVMW_MIGRATION) {
86 swp_entry_t entry;
87 if (!is_swap_pte(*pvmw->pte))
88 return false;
89 entry = pte_to_swp_entry(*pvmw->pte);
91 if (!is_migration_entry(entry))
92 return false;
94 pfn = migration_entry_to_pfn(entry);
95 } else if (is_swap_pte(*pvmw->pte)) {
96 swp_entry_t entry;
98 /* Handle un-addressable ZONE_DEVICE memory */
99 entry = pte_to_swp_entry(*pvmw->pte);
100 if (!is_device_private_entry(entry))
101 return false;
103 pfn = device_private_entry_to_pfn(entry);
104 } else {
105 if (!pte_present(*pvmw->pte))
106 return false;
108 pfn = pte_pfn(*pvmw->pte);
111 return pfn_in_hpage(pvmw->page, pfn);
115 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
116 * @pvmw->address
117 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
118 * must be set. pmd, pte and ptl must be NULL.
120 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
121 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
122 * adjusted if needed (for PTE-mapped THPs).
124 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
125 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
126 * a loop to find all PTEs that map the THP.
128 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
129 * regardless of which page table level the page is mapped at. @pvmw->pmd is
130 * NULL.
132 * Retruns false if there are no more page table entries for the page in
133 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
135 * If you need to stop the walk before page_vma_mapped_walk() returned false,
136 * use page_vma_mapped_walk_done(). It will do the housekeeping.
138 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
140 struct mm_struct *mm = pvmw->vma->vm_mm;
141 struct page *page = pvmw->page;
142 pgd_t *pgd;
143 p4d_t *p4d;
144 pud_t *pud;
145 pmd_t pmde;
147 /* The only possible pmd mapping has been handled on last iteration */
148 if (pvmw->pmd && !pvmw->pte)
149 return not_found(pvmw);
151 if (pvmw->pte)
152 goto next_pte;
154 if (unlikely(PageHuge(pvmw->page))) {
155 /* when pud is not present, pte will be NULL */
156 pvmw->pte = huge_pte_offset(mm, pvmw->address,
157 PAGE_SIZE << compound_order(page));
158 if (!pvmw->pte)
159 return false;
161 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
162 spin_lock(pvmw->ptl);
163 if (!check_pte(pvmw))
164 return not_found(pvmw);
165 return true;
167 restart:
168 pgd = pgd_offset(mm, pvmw->address);
169 if (!pgd_present(*pgd))
170 return false;
171 p4d = p4d_offset(pgd, pvmw->address);
172 if (!p4d_present(*p4d))
173 return false;
174 pud = pud_offset(p4d, pvmw->address);
175 if (!pud_present(*pud))
176 return false;
177 pvmw->pmd = pmd_offset(pud, pvmw->address);
179 * Make sure the pmd value isn't cached in a register by the
180 * compiler and used as a stale value after we've observed a
181 * subsequent update.
183 pmde = READ_ONCE(*pvmw->pmd);
184 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
185 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
186 if (likely(pmd_trans_huge(*pvmw->pmd))) {
187 if (pvmw->flags & PVMW_MIGRATION)
188 return not_found(pvmw);
189 if (pmd_page(*pvmw->pmd) != page)
190 return not_found(pvmw);
191 return true;
192 } else if (!pmd_present(*pvmw->pmd)) {
193 if (thp_migration_supported()) {
194 if (!(pvmw->flags & PVMW_MIGRATION))
195 return not_found(pvmw);
196 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
197 swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
199 if (migration_entry_to_page(entry) != page)
200 return not_found(pvmw);
201 return true;
204 return not_found(pvmw);
205 } else {
206 /* THP pmd was split under us: handle on pte level */
207 spin_unlock(pvmw->ptl);
208 pvmw->ptl = NULL;
210 } else if (!pmd_present(pmde)) {
211 return false;
213 if (!map_pte(pvmw))
214 goto next_pte;
215 while (1) {
216 if (check_pte(pvmw))
217 return true;
218 next_pte:
219 /* Seek to next pte only makes sense for THP */
220 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
221 return not_found(pvmw);
222 do {
223 pvmw->address += PAGE_SIZE;
224 if (pvmw->address >= pvmw->vma->vm_end ||
225 pvmw->address >=
226 __vma_address(pvmw->page, pvmw->vma) +
227 hpage_nr_pages(pvmw->page) * PAGE_SIZE)
228 return not_found(pvmw);
229 /* Did we cross page table boundary? */
230 if (pvmw->address % PMD_SIZE == 0) {
231 pte_unmap(pvmw->pte);
232 if (pvmw->ptl) {
233 spin_unlock(pvmw->ptl);
234 pvmw->ptl = NULL;
236 goto restart;
237 } else {
238 pvmw->pte++;
240 } while (pte_none(*pvmw->pte));
242 if (!pvmw->ptl) {
243 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
244 spin_lock(pvmw->ptl);
250 * page_mapped_in_vma - check whether a page is really mapped in a VMA
251 * @page: the page to test
252 * @vma: the VMA to test
254 * Returns 1 if the page is mapped into the page tables of the VMA, 0
255 * if the page is not mapped into the page tables of this VMA. Only
256 * valid for normal file or anonymous VMAs.
258 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
260 struct page_vma_mapped_walk pvmw = {
261 .page = page,
262 .vma = vma,
263 .flags = PVMW_SYNC,
265 unsigned long start, end;
267 start = __vma_address(page, vma);
268 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
270 if (unlikely(end < vma->vm_start || start >= vma->vm_end))
271 return 0;
272 pvmw.address = max(start, vma->vm_start);
273 if (!page_vma_mapped_walk(&pvmw))
274 return 0;
275 page_vma_mapped_walk_done(&pvmw);
276 return 1;