1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
10 static inline bool not_found(struct page_vma_mapped_walk
*pvmw
)
12 page_vma_mapped_walk_done(pvmw
);
16 static bool map_pte(struct page_vma_mapped_walk
*pvmw
)
18 pvmw
->pte
= pte_offset_map(pvmw
->pmd
, pvmw
->address
);
19 if (!(pvmw
->flags
& PVMW_SYNC
)) {
20 if (pvmw
->flags
& PVMW_MIGRATION
) {
21 if (!is_swap_pte(*pvmw
->pte
))
24 if (!pte_present(*pvmw
->pte
))
28 pvmw
->ptl
= pte_lockptr(pvmw
->vma
->vm_mm
, pvmw
->pmd
);
33 static bool check_pte(struct page_vma_mapped_walk
*pvmw
)
35 if (pvmw
->flags
& PVMW_MIGRATION
) {
36 #ifdef CONFIG_MIGRATION
38 if (!is_swap_pte(*pvmw
->pte
))
40 entry
= pte_to_swp_entry(*pvmw
->pte
);
42 if (!is_migration_entry(entry
))
44 if (migration_entry_to_page(entry
) - pvmw
->page
>=
45 hpage_nr_pages(pvmw
->page
)) {
48 if (migration_entry_to_page(entry
) < pvmw
->page
)
54 if (is_swap_pte(*pvmw
->pte
)) {
57 entry
= pte_to_swp_entry(*pvmw
->pte
);
58 if (is_device_private_entry(entry
) &&
59 device_private_entry_to_page(entry
) == pvmw
->page
)
63 if (!pte_present(*pvmw
->pte
))
66 /* THP can be referenced by any subpage */
67 if (pte_page(*pvmw
->pte
) - pvmw
->page
>=
68 hpage_nr_pages(pvmw
->page
)) {
71 if (pte_page(*pvmw
->pte
) < pvmw
->page
)
79 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
81 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
82 * must be set. pmd, pte and ptl must be NULL.
84 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
85 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
86 * adjusted if needed (for PTE-mapped THPs).
88 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
89 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
90 * a loop to find all PTEs that map the THP.
92 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
93 * regardless of which page table level the page is mapped at. @pvmw->pmd is
96 * Retruns false if there are no more page table entries for the page in
97 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
99 * If you need to stop the walk before page_vma_mapped_walk() returned false,
100 * use page_vma_mapped_walk_done(). It will do the housekeeping.
102 bool page_vma_mapped_walk(struct page_vma_mapped_walk
*pvmw
)
104 struct mm_struct
*mm
= pvmw
->vma
->vm_mm
;
105 struct page
*page
= pvmw
->page
;
111 /* The only possible pmd mapping has been handled on last iteration */
112 if (pvmw
->pmd
&& !pvmw
->pte
)
113 return not_found(pvmw
);
118 if (unlikely(PageHuge(pvmw
->page
))) {
119 /* when pud is not present, pte will be NULL */
120 pvmw
->pte
= huge_pte_offset(mm
, pvmw
->address
,
121 PAGE_SIZE
<< compound_order(page
));
125 pvmw
->ptl
= huge_pte_lockptr(page_hstate(page
), mm
, pvmw
->pte
);
126 spin_lock(pvmw
->ptl
);
127 if (!check_pte(pvmw
))
128 return not_found(pvmw
);
132 pgd
= pgd_offset(mm
, pvmw
->address
);
133 if (!pgd_present(*pgd
))
135 p4d
= p4d_offset(pgd
, pvmw
->address
);
136 if (!p4d_present(*p4d
))
138 pud
= pud_offset(p4d
, pvmw
->address
);
139 if (!pud_present(*pud
))
141 pvmw
->pmd
= pmd_offset(pud
, pvmw
->address
);
143 * Make sure the pmd value isn't cached in a register by the
144 * compiler and used as a stale value after we've observed a
147 pmde
= READ_ONCE(*pvmw
->pmd
);
148 if (pmd_trans_huge(pmde
) || is_pmd_migration_entry(pmde
)) {
149 pvmw
->ptl
= pmd_lock(mm
, pvmw
->pmd
);
150 if (likely(pmd_trans_huge(*pvmw
->pmd
))) {
151 if (pvmw
->flags
& PVMW_MIGRATION
)
152 return not_found(pvmw
);
153 if (pmd_page(*pvmw
->pmd
) != page
)
154 return not_found(pvmw
);
156 } else if (!pmd_present(*pvmw
->pmd
)) {
157 if (thp_migration_supported()) {
158 if (!(pvmw
->flags
& PVMW_MIGRATION
))
159 return not_found(pvmw
);
160 if (is_migration_entry(pmd_to_swp_entry(*pvmw
->pmd
))) {
161 swp_entry_t entry
= pmd_to_swp_entry(*pvmw
->pmd
);
163 if (migration_entry_to_page(entry
) != page
)
164 return not_found(pvmw
);
168 return not_found(pvmw
);
170 /* THP pmd was split under us: handle on pte level */
171 spin_unlock(pvmw
->ptl
);
174 } else if (!pmd_present(pmde
)) {
183 /* Seek to next pte only makes sense for THP */
184 if (!PageTransHuge(pvmw
->page
) || PageHuge(pvmw
->page
))
185 return not_found(pvmw
);
187 pvmw
->address
+= PAGE_SIZE
;
188 if (pvmw
->address
>= pvmw
->vma
->vm_end
||
190 __vma_address(pvmw
->page
, pvmw
->vma
) +
191 hpage_nr_pages(pvmw
->page
) * PAGE_SIZE
)
192 return not_found(pvmw
);
193 /* Did we cross page table boundary? */
194 if (pvmw
->address
% PMD_SIZE
== 0) {
195 pte_unmap(pvmw
->pte
);
197 spin_unlock(pvmw
->ptl
);
204 } while (pte_none(*pvmw
->pte
));
207 pvmw
->ptl
= pte_lockptr(mm
, pvmw
->pmd
);
208 spin_lock(pvmw
->ptl
);
214 * page_mapped_in_vma - check whether a page is really mapped in a VMA
215 * @page: the page to test
216 * @vma: the VMA to test
218 * Returns 1 if the page is mapped into the page tables of the VMA, 0
219 * if the page is not mapped into the page tables of this VMA. Only
220 * valid for normal file or anonymous VMAs.
222 int page_mapped_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
224 struct page_vma_mapped_walk pvmw
= {
229 unsigned long start
, end
;
231 start
= __vma_address(page
, vma
);
232 end
= start
+ PAGE_SIZE
* (hpage_nr_pages(page
) - 1);
234 if (unlikely(end
< vma
->vm_start
|| start
>= vma
->vm_end
))
236 pvmw
.address
= max(start
, vma
->vm_start
);
237 if (!page_vma_mapped_walk(&pvmw
))
239 page_vma_mapped_walk_done(&pvmw
);