fs: prevent doing FALLOC_FL_ZERO_RANGE on append only file
[linux/fpc-iii.git] / mm / pgtable-generic.c
bloba8b9199259342df9cafb84be53010eb2206ebece
1 /*
2 * mm/pgtable-generic.c
4 * Generic pgtable methods declared in asm-generic/pgtable.h
6 * Copyright (C) 2010 Linus Torvalds
7 */
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
14 * If a p?d_bad entry is found while walking page tables, report
15 * the error, before resetting entry to p?d_none. Usually (but
16 * very seldom) called out from the p?d_none_or_clear_bad macros.
19 void pgd_clear_bad(pgd_t *pgd)
21 pgd_ERROR(*pgd);
22 pgd_clear(pgd);
25 void pud_clear_bad(pud_t *pud)
27 pud_ERROR(*pud);
28 pud_clear(pud);
31 void pmd_clear_bad(pmd_t *pmd)
33 pmd_ERROR(*pmd);
34 pmd_clear(pmd);
37 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
39 * Only sets the access flags (dirty, accessed), as well as write
40 * permission. Furthermore, we know it always gets set to a "more
41 * permissive" setting, which allows most architectures to optimize
42 * this. We return whether the PTE actually changed, which in turn
43 * instructs the caller to do things like update__mmu_cache. This
44 * used to be done in the caller, but sparc needs minor faults to
45 * force that call on sun4c so we changed this macro slightly
47 int ptep_set_access_flags(struct vm_area_struct *vma,
48 unsigned long address, pte_t *ptep,
49 pte_t entry, int dirty)
51 int changed = !pte_same(*ptep, entry);
52 if (changed) {
53 set_pte_at(vma->vm_mm, address, ptep, entry);
54 flush_tlb_fix_spurious_fault(vma, address);
56 return changed;
58 #endif
60 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
61 int pmdp_set_access_flags(struct vm_area_struct *vma,
62 unsigned long address, pmd_t *pmdp,
63 pmd_t entry, int dirty)
65 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66 int changed = !pmd_same(*pmdp, entry);
67 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
68 if (changed) {
69 set_pmd_at(vma->vm_mm, address, pmdp, entry);
70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
72 return changed;
73 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
74 BUG();
75 return 0;
76 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
78 #endif
80 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
81 int ptep_clear_flush_young(struct vm_area_struct *vma,
82 unsigned long address, pte_t *ptep)
84 int young;
85 young = ptep_test_and_clear_young(vma, address, ptep);
86 if (young)
87 flush_tlb_page(vma, address);
88 return young;
90 #endif
92 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
93 int pmdp_clear_flush_young(struct vm_area_struct *vma,
94 unsigned long address, pmd_t *pmdp)
96 int young;
97 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
98 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
99 #else
100 BUG();
101 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
102 young = pmdp_test_and_clear_young(vma, address, pmdp);
103 if (young)
104 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
105 return young;
107 #endif
109 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
110 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
111 pte_t *ptep)
113 struct mm_struct *mm = (vma)->vm_mm;
114 pte_t pte;
115 pte = ptep_get_and_clear(mm, address, ptep);
116 if (pte_accessible(mm, pte))
117 flush_tlb_page(vma, address);
118 return pte;
120 #endif
122 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
123 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
124 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
125 pmd_t *pmdp)
127 pmd_t pmd;
128 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
130 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
131 return pmd;
133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
134 #endif
136 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
137 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
139 pmd_t *pmdp)
141 pmd_t pmd = pmd_mksplitting(*pmdp);
142 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
143 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
144 /* tlb flush only to serialize against gup-fast */
145 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
147 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
148 #endif
150 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
151 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
152 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
153 pgtable_t pgtable)
155 assert_spin_locked(pmd_lockptr(mm, pmdp));
157 /* FIFO */
158 if (!pmd_huge_pte(mm, pmdp))
159 INIT_LIST_HEAD(&pgtable->lru);
160 else
161 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
162 pmd_huge_pte(mm, pmdp) = pgtable;
164 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
165 #endif
167 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
168 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
169 /* no "address" argument so destroys page coloring of some arch */
170 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
172 pgtable_t pgtable;
174 assert_spin_locked(pmd_lockptr(mm, pmdp));
176 /* FIFO */
177 pgtable = pmd_huge_pte(mm, pmdp);
178 if (list_empty(&pgtable->lru))
179 pmd_huge_pte(mm, pmdp) = NULL;
180 else {
181 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
182 struct page, lru);
183 list_del(&pgtable->lru);
185 return pgtable;
187 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
188 #endif
190 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
191 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
192 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
193 pmd_t *pmdp)
195 pmd_t entry = *pmdp;
196 if (pmd_numa(entry))
197 entry = pmd_mknonnuma(entry);
198 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
201 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
202 #endif