1 // SPDX-License-Identifier: GPL-2.0
5 * Generic pgtable methods declared in linux/pgtable.h
7 * Copyright (C) 2010 Linus Torvalds
10 #include <linux/pagemap.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pgtable.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mm_inline.h>
16 #include <asm/pgalloc.h>
20 * If a p?d_bad entry is found while walking page tables, report
21 * the error, before resetting entry to p?d_none. Usually (but
22 * very seldom) called out from the p?d_none_or_clear_bad macros.
25 void pgd_clear_bad(pgd_t
*pgd
)
31 #ifndef __PAGETABLE_P4D_FOLDED
32 void p4d_clear_bad(p4d_t
*p4d
)
39 #ifndef __PAGETABLE_PUD_FOLDED
40 void pud_clear_bad(pud_t
*pud
)
48 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
49 * above. pmd folding is special and typically pmd_* macros refer to upper
50 * level even when folded
52 void pmd_clear_bad(pmd_t
*pmd
)
58 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
60 * Only sets the access flags (dirty, accessed), as well as write
61 * permission. Furthermore, we know it always gets set to a "more
62 * permissive" setting, which allows most architectures to optimize
63 * this. We return whether the PTE actually changed, which in turn
64 * instructs the caller to do things like update__mmu_cache. This
65 * used to be done in the caller, but sparc needs minor faults to
66 * force that call on sun4c so we changed this macro slightly
68 int ptep_set_access_flags(struct vm_area_struct
*vma
,
69 unsigned long address
, pte_t
*ptep
,
70 pte_t entry
, int dirty
)
72 int changed
= !pte_same(ptep_get(ptep
), entry
);
74 set_pte_at(vma
->vm_mm
, address
, ptep
, entry
);
75 flush_tlb_fix_spurious_fault(vma
, address
, ptep
);
81 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
82 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
83 unsigned long address
, pte_t
*ptep
)
86 young
= ptep_test_and_clear_young(vma
, address
, ptep
);
88 flush_tlb_page(vma
, address
);
93 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
94 pte_t
ptep_clear_flush(struct vm_area_struct
*vma
, unsigned long address
,
97 struct mm_struct
*mm
= (vma
)->vm_mm
;
99 pte
= ptep_get_and_clear(mm
, address
, ptep
);
100 if (pte_accessible(mm
, pte
))
101 flush_tlb_page(vma
, address
);
106 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
108 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
109 int pmdp_set_access_flags(struct vm_area_struct
*vma
,
110 unsigned long address
, pmd_t
*pmdp
,
111 pmd_t entry
, int dirty
)
113 int changed
= !pmd_same(*pmdp
, entry
);
114 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
116 set_pmd_at(vma
->vm_mm
, address
, pmdp
, entry
);
117 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
123 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
124 int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
125 unsigned long address
, pmd_t
*pmdp
)
128 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
129 young
= pmdp_test_and_clear_young(vma
, address
, pmdp
);
131 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
136 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
137 pmd_t
pmdp_huge_clear_flush(struct vm_area_struct
*vma
, unsigned long address
,
141 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
142 VM_BUG_ON(pmd_present(*pmdp
) && !pmd_trans_huge(*pmdp
) &&
144 pmd
= pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
145 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
149 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
150 pud_t
pudp_huge_clear_flush(struct vm_area_struct
*vma
, unsigned long address
,
155 VM_BUG_ON(address
& ~HPAGE_PUD_MASK
);
156 VM_BUG_ON(!pud_trans_huge(*pudp
) && !pud_devmap(*pudp
));
157 pud
= pudp_huge_get_and_clear(vma
->vm_mm
, address
, pudp
);
158 flush_pud_tlb_range(vma
, address
, address
+ HPAGE_PUD_SIZE
);
164 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
165 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
168 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
171 if (!pmd_huge_pte(mm
, pmdp
))
172 INIT_LIST_HEAD(&pgtable
->lru
);
174 list_add(&pgtable
->lru
, &pmd_huge_pte(mm
, pmdp
)->lru
);
175 pmd_huge_pte(mm
, pmdp
) = pgtable
;
179 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
180 /* no "address" argument so destroys page coloring of some arch */
181 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
185 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
188 pgtable
= pmd_huge_pte(mm
, pmdp
);
189 pmd_huge_pte(mm
, pmdp
) = list_first_entry_or_null(&pgtable
->lru
,
191 if (pmd_huge_pte(mm
, pmdp
))
192 list_del(&pgtable
->lru
);
197 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
198 pmd_t
pmdp_invalidate(struct vm_area_struct
*vma
, unsigned long address
,
201 VM_WARN_ON_ONCE(!pmd_present(*pmdp
));
202 pmd_t old
= pmdp_establish(vma
, address
, pmdp
, pmd_mkinvalid(*pmdp
));
203 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
208 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
209 pmd_t
pmdp_invalidate_ad(struct vm_area_struct
*vma
, unsigned long address
,
212 VM_WARN_ON_ONCE(!pmd_present(*pmdp
));
213 return pmdp_invalidate(vma
, address
, pmdp
);
217 #ifndef pmdp_collapse_flush
218 pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
222 * pmd and hugepage pte format are same. So we could
223 * use the same function.
227 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
228 VM_BUG_ON(pmd_trans_huge(*pmdp
));
229 pmd
= pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
231 /* collapse entails shooting down ptes not pmd */
232 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
237 /* arch define pte_free_defer in asm/pgalloc.h for its own implementation */
238 #ifndef pte_free_defer
239 static void pte_free_now(struct rcu_head
*head
)
243 page
= container_of(head
, struct page
, rcu_head
);
244 pte_free(NULL
/* mm not passed and not used */, (pgtable_t
)page
);
247 void pte_free_defer(struct mm_struct
*mm
, pgtable_t pgtable
)
252 call_rcu(&page
->rcu_head
, pte_free_now
);
254 #endif /* pte_free_defer */
255 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
257 #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \
258 (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU))
260 * See the comment above ptep_get_lockless() in include/linux/pgtable.h:
261 * the barriers in pmdp_get_lockless() cannot guarantee that the value in
262 * pmd_high actually belongs with the value in pmd_low; but holding interrupts
263 * off blocks the TLB flush between present updates, which guarantees that a
264 * successful __pte_offset_map() points to a page from matched halves.
266 static unsigned long pmdp_get_lockless_start(void)
268 unsigned long irqflags
;
270 local_irq_save(irqflags
);
273 static void pmdp_get_lockless_end(unsigned long irqflags
)
275 local_irq_restore(irqflags
);
278 static unsigned long pmdp_get_lockless_start(void) { return 0; }
279 static void pmdp_get_lockless_end(unsigned long irqflags
) { }
282 pte_t
*__pte_offset_map(pmd_t
*pmd
, unsigned long addr
, pmd_t
*pmdvalp
)
284 unsigned long irqflags
;
288 irqflags
= pmdp_get_lockless_start();
289 pmdval
= pmdp_get_lockless(pmd
);
290 pmdp_get_lockless_end(irqflags
);
294 if (unlikely(pmd_none(pmdval
) || is_pmd_migration_entry(pmdval
)))
296 if (unlikely(pmd_trans_huge(pmdval
) || pmd_devmap(pmdval
)))
298 if (unlikely(pmd_bad(pmdval
))) {
302 return __pte_map(&pmdval
, addr
);
308 pte_t
*pte_offset_map_ro_nolock(struct mm_struct
*mm
, pmd_t
*pmd
,
309 unsigned long addr
, spinlock_t
**ptlp
)
314 pte
= __pte_offset_map(pmd
, addr
, &pmdval
);
316 *ptlp
= pte_lockptr(mm
, &pmdval
);
320 pte_t
*pte_offset_map_rw_nolock(struct mm_struct
*mm
, pmd_t
*pmd
,
321 unsigned long addr
, pmd_t
*pmdvalp
,
326 VM_WARN_ON_ONCE(!pmdvalp
);
327 pte
= __pte_offset_map(pmd
, addr
, pmdvalp
);
329 *ptlp
= pte_lockptr(mm
, pmdvalp
);
334 * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation
335 * __pte_offset_map_lock() below, is usually called with the pmd pointer for
336 * addr, reached by walking down the mm's pgd, p4d, pud for addr: either while
337 * holding mmap_lock or vma lock for read or for write; or in truncate or rmap
338 * context, while holding file's i_mmap_lock or anon_vma lock for read (or for
339 * write). In a few cases, it may be used with pmd pointing to a pmd_t already
340 * copied to or constructed on the stack.
342 * When successful, it returns the pte pointer for addr, with its page table
343 * kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent
344 * modification by software, with a pointer to that spinlock in ptlp (in some
345 * configs mm->page_table_lock, in SPLIT_PTLOCK configs a spinlock in table's
346 * struct page). pte_unmap_unlock(pte, ptl) to unlock and unmap afterwards.
348 * But it is unsuccessful, returning NULL with *ptlp unchanged, if there is no
349 * page table at *pmd: if, for example, the page table has just been removed,
350 * or replaced by the huge pmd of a THP. (When successful, *pmd is rechecked
351 * after acquiring the ptlock, and retried internally if it changed: so that a
352 * page table can be safely removed or replaced by THP while holding its lock.)
354 * pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above,
355 * just returns the pte pointer for addr, its page table kmapped if necessary;
356 * or NULL if there is no page table at *pmd. It does not attempt to lock the
357 * page table, so cannot normally be used when the page table is to be updated,
358 * or when entries read must be stable. But it does take rcu_read_lock(): so
359 * that even when page table is racily removed, it remains a valid though empty
360 * and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s
363 * pte_offset_map_ro_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map();
364 * but when successful, it also outputs a pointer to the spinlock in ptlp - as
365 * pte_offset_map_lock() does, but in this case without locking it. This helps
366 * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time
367 * act on a changed *pmd: pte_offset_map_ro_nolock() provides the correct spinlock
368 * pointer for the page table that it returns. Even after grabbing the spinlock,
369 * we might be looking either at a page table that is still mapped or one that
370 * was unmapped and is about to get freed. But for R/O access this is sufficient.
371 * So it is only applicable for read-only cases where any modification operations
372 * to the page table are not allowed even if the corresponding spinlock is held
375 * pte_offset_map_rw_nolock(mm, pmd, addr, pmdvalp, ptlp), above, is like
376 * pte_offset_map_ro_nolock(); but when successful, it also outputs the pdmval.
377 * It is applicable for may-write cases where any modification operations to the
378 * page table may happen after the corresponding spinlock is held afterwards.
379 * But the users should make sure the page table is stable like checking pte_same()
380 * or checking pmd_same() by using the output pmdval before performing the write
383 * Note: "RO" / "RW" expresses the intended semantics, not that the *kmap* will
384 * be read-only/read-write protected.
386 * Note that free_pgtables(), used after unmapping detached vmas, or when
387 * exiting the whole mm, does not take page table lock before freeing a page
388 * table, and may not use RCU at all: "outsiders" like khugepaged should avoid
389 * pte_offset_map() and co once the vma is detached from mm or mm_users is zero.
391 pte_t
*__pte_offset_map_lock(struct mm_struct
*mm
, pmd_t
*pmd
,
392 unsigned long addr
, spinlock_t
**ptlp
)
398 pte
= __pte_offset_map(pmd
, addr
, &pmdval
);
401 ptl
= pte_lockptr(mm
, &pmdval
);
403 if (likely(pmd_same(pmdval
, pmdp_get_lockless(pmd
)))) {
407 pte_unmap_unlock(pte
, ptl
);