4 * Generic pgtable methods declared in asm-generic/pgtable.h
6 * Copyright (C) 2010 Linus Torvalds
9 #include <linux/pagemap.h>
11 #include <asm-generic/pgtable.h>
14 * If a p?d_bad entry is found while walking page tables, report
15 * the error, before resetting entry to p?d_none. Usually (but
16 * very seldom) called out from the p?d_none_or_clear_bad macros.
19 void pgd_clear_bad(pgd_t
*pgd
)
25 void pud_clear_bad(pud_t
*pud
)
31 void pmd_clear_bad(pmd_t
*pmd
)
37 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
39 * Only sets the access flags (dirty, accessed), as well as write
40 * permission. Furthermore, we know it always gets set to a "more
41 * permissive" setting, which allows most architectures to optimize
42 * this. We return whether the PTE actually changed, which in turn
43 * instructs the caller to do things like update__mmu_cache. This
44 * used to be done in the caller, but sparc needs minor faults to
45 * force that call on sun4c so we changed this macro slightly
47 int ptep_set_access_flags(struct vm_area_struct
*vma
,
48 unsigned long address
, pte_t
*ptep
,
49 pte_t entry
, int dirty
)
51 int changed
= !pte_same(*ptep
, entry
);
53 set_pte_at(vma
->vm_mm
, address
, ptep
, entry
);
54 flush_tlb_fix_spurious_fault(vma
, address
);
60 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
62 unsigned long address
, pte_t
*ptep
)
65 young
= ptep_test_and_clear_young(vma
, address
, ptep
);
67 flush_tlb_page(vma
, address
);
72 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
73 pte_t
ptep_clear_flush(struct vm_area_struct
*vma
, unsigned long address
,
76 struct mm_struct
*mm
= (vma
)->vm_mm
;
78 pte
= ptep_get_and_clear(mm
, address
, ptep
);
79 if (pte_accessible(mm
, pte
))
80 flush_tlb_page(vma
, address
);
85 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
87 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
90 * ARCHes with special requirements for evicting THP backing TLB entries can
91 * implement this. Otherwise also, it can help optimize normal TLB flush in
92 * THP regime. stock flush_tlb_range() typically has optimization to nuke the
93 * entire TLB TLB if flush span is greater than a threshhold, which will
94 * likely be true for a single huge page. Thus a single thp flush will
95 * invalidate the entire TLB which is not desitable.
96 * e.g. see arch/arc: flush_pmd_tlb_range
98 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
101 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
102 int pmdp_set_access_flags(struct vm_area_struct
*vma
,
103 unsigned long address
, pmd_t
*pmdp
,
104 pmd_t entry
, int dirty
)
106 int changed
= !pmd_same(*pmdp
, entry
);
107 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
109 set_pmd_at(vma
->vm_mm
, address
, pmdp
, entry
);
110 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
116 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
117 int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
118 unsigned long address
, pmd_t
*pmdp
)
121 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
122 young
= pmdp_test_and_clear_young(vma
, address
, pmdp
);
124 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
129 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
130 pmd_t
pmdp_huge_clear_flush(struct vm_area_struct
*vma
, unsigned long address
,
134 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
135 VM_BUG_ON(!pmd_trans_huge(*pmdp
));
136 pmd
= pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
137 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
142 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
143 void pmdp_splitting_flush(struct vm_area_struct
*vma
, unsigned long address
,
146 pmd_t pmd
= pmd_mksplitting(*pmdp
);
147 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
148 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd
);
149 /* tlb flush only to serialize against gup-fast */
150 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
154 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
155 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
158 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
161 if (!pmd_huge_pte(mm
, pmdp
))
162 INIT_LIST_HEAD(&pgtable
->lru
);
164 list_add(&pgtable
->lru
, &pmd_huge_pte(mm
, pmdp
)->lru
);
165 pmd_huge_pte(mm
, pmdp
) = pgtable
;
169 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
170 /* no "address" argument so destroys page coloring of some arch */
171 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
175 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
178 pgtable
= pmd_huge_pte(mm
, pmdp
);
179 if (list_empty(&pgtable
->lru
))
180 pmd_huge_pte(mm
, pmdp
) = NULL
;
182 pmd_huge_pte(mm
, pmdp
) = list_entry(pgtable
->lru
.next
,
184 list_del(&pgtable
->lru
);
190 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
191 void pmdp_invalidate(struct vm_area_struct
*vma
, unsigned long address
,
195 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd_mknotpresent(entry
));
196 flush_pmd_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
200 #ifndef pmdp_collapse_flush
201 pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
205 * pmd and hugepage pte format are same. So we could
206 * use the same function.
210 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
211 VM_BUG_ON(pmd_trans_huge(*pmdp
));
212 pmd
= pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
214 /* collapse entails shooting down ptes not pmd */
215 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
219 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */