1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
10 static inline bool not_found(struct page_vma_mapped_walk
*pvmw
)
12 page_vma_mapped_walk_done(pvmw
);
16 static bool map_pte(struct page_vma_mapped_walk
*pvmw
)
18 pvmw
->pte
= pte_offset_map(pvmw
->pmd
, pvmw
->address
);
19 if (!(pvmw
->flags
& PVMW_SYNC
)) {
20 if (pvmw
->flags
& PVMW_MIGRATION
) {
21 if (!is_swap_pte(*pvmw
->pte
))
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
39 if (is_swap_pte(*pvmw
->pte
)) {
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry
= pte_to_swp_entry(*pvmw
->pte
);
44 if (!is_device_private_entry(entry
))
46 } else if (!pte_present(*pvmw
->pte
))
50 pvmw
->ptl
= pte_lockptr(pvmw
->vma
->vm_mm
, pvmw
->pmd
);
55 static inline bool pfn_is_match(struct page
*page
, unsigned long pfn
)
57 unsigned long page_pfn
= page_to_pfn(page
);
59 /* normal page and hugetlbfs page */
60 if (!PageTransCompound(page
) || PageHuge(page
))
61 return page_pfn
== pfn
;
63 /* THP can be referenced by any subpage */
64 return pfn
>= page_pfn
&& pfn
- page_pfn
< thp_nr_pages(page
);
68 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
69 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
71 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
72 * mapped. check_pte() has to validate this.
74 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
77 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
78 * entry that points to @pvmw->page or any subpage in case of THP.
80 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
81 * pvmw->page or any subpage in case of THP.
83 * Otherwise, return false.
86 static bool check_pte(struct page_vma_mapped_walk
*pvmw
)
90 if (pvmw
->flags
& PVMW_MIGRATION
) {
92 if (!is_swap_pte(*pvmw
->pte
))
94 entry
= pte_to_swp_entry(*pvmw
->pte
);
96 if (!is_migration_entry(entry
))
99 pfn
= migration_entry_to_pfn(entry
);
100 } else if (is_swap_pte(*pvmw
->pte
)) {
103 /* Handle un-addressable ZONE_DEVICE memory */
104 entry
= pte_to_swp_entry(*pvmw
->pte
);
105 if (!is_device_private_entry(entry
))
108 pfn
= device_private_entry_to_pfn(entry
);
110 if (!pte_present(*pvmw
->pte
))
113 pfn
= pte_pfn(*pvmw
->pte
);
116 return pfn_is_match(pvmw
->page
, pfn
);
120 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
122 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
123 * must be set. pmd, pte and ptl must be NULL.
125 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
126 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
127 * adjusted if needed (for PTE-mapped THPs).
129 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
130 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
131 * a loop to find all PTEs that map the THP.
133 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
134 * regardless of which page table level the page is mapped at. @pvmw->pmd is
137 * Retruns false if there are no more page table entries for the page in
138 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
140 * If you need to stop the walk before page_vma_mapped_walk() returned false,
141 * use page_vma_mapped_walk_done(). It will do the housekeeping.
143 bool page_vma_mapped_walk(struct page_vma_mapped_walk
*pvmw
)
145 struct mm_struct
*mm
= pvmw
->vma
->vm_mm
;
146 struct page
*page
= pvmw
->page
;
152 /* The only possible pmd mapping has been handled on last iteration */
153 if (pvmw
->pmd
&& !pvmw
->pte
)
154 return not_found(pvmw
);
159 if (unlikely(PageHuge(pvmw
->page
))) {
160 /* when pud is not present, pte will be NULL */
161 pvmw
->pte
= huge_pte_offset(mm
, pvmw
->address
, page_size(page
));
165 pvmw
->ptl
= huge_pte_lockptr(page_hstate(page
), mm
, pvmw
->pte
);
166 spin_lock(pvmw
->ptl
);
167 if (!check_pte(pvmw
))
168 return not_found(pvmw
);
172 pgd
= pgd_offset(mm
, pvmw
->address
);
173 if (!pgd_present(*pgd
))
175 p4d
= p4d_offset(pgd
, pvmw
->address
);
176 if (!p4d_present(*p4d
))
178 pud
= pud_offset(p4d
, pvmw
->address
);
179 if (!pud_present(*pud
))
181 pvmw
->pmd
= pmd_offset(pud
, pvmw
->address
);
183 * Make sure the pmd value isn't cached in a register by the
184 * compiler and used as a stale value after we've observed a
187 pmde
= READ_ONCE(*pvmw
->pmd
);
188 if (pmd_trans_huge(pmde
) || is_pmd_migration_entry(pmde
)) {
189 pvmw
->ptl
= pmd_lock(mm
, pvmw
->pmd
);
190 if (likely(pmd_trans_huge(*pvmw
->pmd
))) {
191 if (pvmw
->flags
& PVMW_MIGRATION
)
192 return not_found(pvmw
);
193 if (pmd_page(*pvmw
->pmd
) != page
)
194 return not_found(pvmw
);
196 } else if (!pmd_present(*pvmw
->pmd
)) {
197 if (thp_migration_supported()) {
198 if (!(pvmw
->flags
& PVMW_MIGRATION
))
199 return not_found(pvmw
);
200 if (is_migration_entry(pmd_to_swp_entry(*pvmw
->pmd
))) {
201 swp_entry_t entry
= pmd_to_swp_entry(*pvmw
->pmd
);
203 if (migration_entry_to_page(entry
) != page
)
204 return not_found(pvmw
);
208 return not_found(pvmw
);
210 /* THP pmd was split under us: handle on pte level */
211 spin_unlock(pvmw
->ptl
);
214 } else if (!pmd_present(pmde
)) {
223 /* Seek to next pte only makes sense for THP */
224 if (!PageTransHuge(pvmw
->page
) || PageHuge(pvmw
->page
))
225 return not_found(pvmw
);
227 pvmw
->address
+= PAGE_SIZE
;
228 if (pvmw
->address
>= pvmw
->vma
->vm_end
||
230 __vma_address(pvmw
->page
, pvmw
->vma
) +
231 thp_size(pvmw
->page
))
232 return not_found(pvmw
);
233 /* Did we cross page table boundary? */
234 if (pvmw
->address
% PMD_SIZE
== 0) {
235 pte_unmap(pvmw
->pte
);
237 spin_unlock(pvmw
->ptl
);
244 } while (pte_none(*pvmw
->pte
));
247 pvmw
->ptl
= pte_lockptr(mm
, pvmw
->pmd
);
248 spin_lock(pvmw
->ptl
);
254 * page_mapped_in_vma - check whether a page is really mapped in a VMA
255 * @page: the page to test
256 * @vma: the VMA to test
258 * Returns 1 if the page is mapped into the page tables of the VMA, 0
259 * if the page is not mapped into the page tables of this VMA. Only
260 * valid for normal file or anonymous VMAs.
262 int page_mapped_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
264 struct page_vma_mapped_walk pvmw
= {
269 unsigned long start
, end
;
271 start
= __vma_address(page
, vma
);
272 end
= start
+ thp_size(page
) - PAGE_SIZE
;
274 if (unlikely(end
< vma
->vm_start
|| start
>= vma
->vm_end
))
276 pvmw
.address
= max(start
, vma
->vm_start
);
277 if (!page_vma_mapped_walk(&pvmw
))
279 page_vma_mapped_walk_done(&pvmw
);