1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/hugetlb.h>
4 #include <linux/bitops.h>
5 #include <linux/mmu_notifier.h>
6 #include <linux/mm_inline.h>
7 #include <asm/cacheflush.h>
8 #include <asm/tlbflush.h>
11 * struct wp_walk - Private struct for pagetable walk callbacks
12 * @range: Range for mmu notifiers
13 * @tlbflush_start: Address of first modified pte
14 * @tlbflush_end: Address of last modified pte + 1
15 * @total: Total number of modified ptes
18 struct mmu_notifier_range range
;
19 unsigned long tlbflush_start
;
20 unsigned long tlbflush_end
;
25 * wp_pte - Write-protect a pte
26 * @pte: Pointer to the pte
27 * @addr: The start of protecting virtual address
28 * @end: The end of protecting virtual address
29 * @walk: pagetable walk callback argument
31 * The function write-protects a pte and records the range in
32 * virtual address space of touched ptes for efficient range TLB flushes.
34 static int wp_pte(pte_t
*pte
, unsigned long addr
, unsigned long end
,
37 struct wp_walk
*wpwalk
= walk
->private;
38 pte_t ptent
= ptep_get(pte
);
40 if (pte_write(ptent
)) {
41 pte_t old_pte
= ptep_modify_prot_start(walk
->vma
, addr
, pte
);
43 ptent
= pte_wrprotect(old_pte
);
44 ptep_modify_prot_commit(walk
->vma
, addr
, pte
, old_pte
, ptent
);
46 wpwalk
->tlbflush_start
= min(wpwalk
->tlbflush_start
, addr
);
47 wpwalk
->tlbflush_end
= max(wpwalk
->tlbflush_end
,
55 * struct clean_walk - Private struct for the clean_record_pte function.
56 * @base: struct wp_walk we derive from
57 * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap
58 * @bitmap: Bitmap with one bit for each page offset in the address_space range
60 * @start: Address_space page offset of first modified pte relative
62 * @end: Address_space page offset of last modified pte relative
68 unsigned long *bitmap
;
73 #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base)
76 * clean_record_pte - Clean a pte and record its address space offset in a
78 * @pte: Pointer to the pte
79 * @addr: The start of virtual address to be clean
80 * @end: The end of virtual address to be clean
81 * @walk: pagetable walk callback argument
83 * The function cleans a pte and records the range in
84 * virtual address space of touched ptes for efficient TLB flushes.
85 * It also records dirty ptes in a bitmap representing page offsets
86 * in the address_space, as well as the first and last of the bits
89 static int clean_record_pte(pte_t
*pte
, unsigned long addr
,
90 unsigned long end
, struct mm_walk
*walk
)
92 struct wp_walk
*wpwalk
= walk
->private;
93 struct clean_walk
*cwalk
= to_clean_walk(wpwalk
);
94 pte_t ptent
= ptep_get(pte
);
96 if (pte_dirty(ptent
)) {
97 pgoff_t pgoff
= ((addr
- walk
->vma
->vm_start
) >> PAGE_SHIFT
) +
98 walk
->vma
->vm_pgoff
- cwalk
->bitmap_pgoff
;
99 pte_t old_pte
= ptep_modify_prot_start(walk
->vma
, addr
, pte
);
101 ptent
= pte_mkclean(old_pte
);
102 ptep_modify_prot_commit(walk
->vma
, addr
, pte
, old_pte
, ptent
);
105 wpwalk
->tlbflush_start
= min(wpwalk
->tlbflush_start
, addr
);
106 wpwalk
->tlbflush_end
= max(wpwalk
->tlbflush_end
,
109 __set_bit(pgoff
, cwalk
->bitmap
);
110 cwalk
->start
= min(cwalk
->start
, pgoff
);
111 cwalk
->end
= max(cwalk
->end
, pgoff
+ 1);
118 * wp_clean_pmd_entry - The pagewalk pmd callback.
120 * Dirty-tracking should take place on the PTE level, so
121 * WARN() if encountering a dirty huge pmd.
122 * Furthermore, never split huge pmds, since that currently
123 * causes dirty info loss. The pagefault handler should do
126 static int wp_clean_pmd_entry(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
127 struct mm_walk
*walk
)
129 pmd_t pmdval
= pmdp_get_lockless(pmd
);
131 /* Do not split a huge pmd, present or migrated */
132 if (pmd_trans_huge(pmdval
) || pmd_devmap(pmdval
)) {
133 WARN_ON(pmd_write(pmdval
) || pmd_dirty(pmdval
));
134 walk
->action
= ACTION_CONTINUE
;
140 * wp_clean_pud_entry - The pagewalk pud callback.
142 * Dirty-tracking should take place on the PTE level, so
143 * WARN() if encountering a dirty huge puds.
144 * Furthermore, never split huge puds, since that currently
145 * causes dirty info loss. The pagefault handler should do
148 static int wp_clean_pud_entry(pud_t
*pud
, unsigned long addr
, unsigned long end
,
149 struct mm_walk
*walk
)
151 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
152 pud_t pudval
= READ_ONCE(*pud
);
154 /* Do not split a huge pud */
155 if (pud_trans_huge(pudval
) || pud_devmap(pudval
)) {
156 WARN_ON(pud_write(pudval
) || pud_dirty(pudval
));
157 walk
->action
= ACTION_CONTINUE
;
164 * wp_clean_pre_vma - The pagewalk pre_vma callback.
166 * The pre_vma callback performs the cache flush, stages the tlb flush
167 * and calls the necessary mmu notifiers.
169 static int wp_clean_pre_vma(unsigned long start
, unsigned long end
,
170 struct mm_walk
*walk
)
172 struct wp_walk
*wpwalk
= walk
->private;
174 wpwalk
->tlbflush_start
= end
;
175 wpwalk
->tlbflush_end
= start
;
177 mmu_notifier_range_init(&wpwalk
->range
, MMU_NOTIFY_PROTECTION_PAGE
, 0,
178 walk
->mm
, start
, end
);
179 mmu_notifier_invalidate_range_start(&wpwalk
->range
);
180 flush_cache_range(walk
->vma
, start
, end
);
183 * We're not using tlb_gather_mmu() since typically
184 * only a small subrange of PTEs are affected, whereas
185 * tlb_gather_mmu() records the full range.
187 inc_tlb_flush_pending(walk
->mm
);
193 * wp_clean_post_vma - The pagewalk post_vma callback.
195 * The post_vma callback performs the tlb flush and calls necessary mmu
198 static void wp_clean_post_vma(struct mm_walk
*walk
)
200 struct wp_walk
*wpwalk
= walk
->private;
202 if (mm_tlb_flush_nested(walk
->mm
))
203 flush_tlb_range(walk
->vma
, wpwalk
->range
.start
,
205 else if (wpwalk
->tlbflush_end
> wpwalk
->tlbflush_start
)
206 flush_tlb_range(walk
->vma
, wpwalk
->tlbflush_start
,
207 wpwalk
->tlbflush_end
);
209 mmu_notifier_invalidate_range_end(&wpwalk
->range
);
210 dec_tlb_flush_pending(walk
->mm
);
214 * wp_clean_test_walk - The pagewalk test_walk callback.
216 * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas.
218 static int wp_clean_test_walk(unsigned long start
, unsigned long end
,
219 struct mm_walk
*walk
)
221 unsigned long vm_flags
= READ_ONCE(walk
->vma
->vm_flags
);
223 /* Skip non-applicable VMAs */
224 if ((vm_flags
& (VM_SHARED
| VM_MAYWRITE
| VM_HUGETLB
)) !=
225 (VM_SHARED
| VM_MAYWRITE
))
231 static const struct mm_walk_ops clean_walk_ops
= {
232 .pte_entry
= clean_record_pte
,
233 .pmd_entry
= wp_clean_pmd_entry
,
234 .pud_entry
= wp_clean_pud_entry
,
235 .test_walk
= wp_clean_test_walk
,
236 .pre_vma
= wp_clean_pre_vma
,
237 .post_vma
= wp_clean_post_vma
240 static const struct mm_walk_ops wp_walk_ops
= {
242 .pmd_entry
= wp_clean_pmd_entry
,
243 .pud_entry
= wp_clean_pud_entry
,
244 .test_walk
= wp_clean_test_walk
,
245 .pre_vma
= wp_clean_pre_vma
,
246 .post_vma
= wp_clean_post_vma
250 * wp_shared_mapping_range - Write-protect all ptes in an address space range
251 * @mapping: The address_space we want to write protect
252 * @first_index: The first page offset in the range
253 * @nr: Number of incremental page offsets to cover
255 * Note: This function currently skips transhuge page-table entries, since
256 * it's intended for dirty-tracking on the PTE level. It will warn on
257 * encountering transhuge write-enabled entries, though, and can easily be
258 * extended to handle them as well.
260 * Return: The number of ptes actually write-protected. Note that
261 * already write-protected ptes are not counted.
263 unsigned long wp_shared_mapping_range(struct address_space
*mapping
,
264 pgoff_t first_index
, pgoff_t nr
)
266 struct wp_walk wpwalk
= { .total
= 0 };
268 i_mmap_lock_read(mapping
);
269 WARN_ON(walk_page_mapping(mapping
, first_index
, nr
, &wp_walk_ops
,
271 i_mmap_unlock_read(mapping
);
275 EXPORT_SYMBOL_GPL(wp_shared_mapping_range
);
278 * clean_record_shared_mapping_range - Clean and record all ptes in an
279 * address space range
280 * @mapping: The address_space we want to clean
281 * @first_index: The first page offset in the range
282 * @nr: Number of incremental page offsets to cover
283 * @bitmap_pgoff: The page offset of the first bit in @bitmap
284 * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to
285 * cover the whole range @first_index..@first_index + @nr.
286 * @start: Pointer to number of the first set bit in @bitmap.
287 * is modified as new bits are set by the function.
288 * @end: Pointer to the number of the last set bit in @bitmap.
289 * none set. The value is modified as new bits are set by the function.
291 * When this function returns there is no guarantee that a CPU has
292 * not already dirtied new ptes. However it will not clean any ptes not
293 * reported in the bitmap. The guarantees are as follows:
295 * * All ptes dirty when the function starts executing will end up recorded
297 * * All ptes dirtied after that will either remain dirty, be recorded in the
300 * If a caller needs to make sure all dirty ptes are picked up and none
301 * additional are added, it first needs to write-protect the address-space
302 * range and make sure new writers are blocked in page_mkwrite() or
303 * pfn_mkwrite(). And then after a TLB flush following the write-protection
304 * pick up all dirty bits.
306 * This function currently skips transhuge page-table entries, since
307 * it's intended for dirty-tracking on the PTE level. It will warn on
308 * encountering transhuge dirty entries, though, and can easily be extended
309 * to handle them as well.
311 * Return: The number of dirty ptes actually cleaned.
313 unsigned long clean_record_shared_mapping_range(struct address_space
*mapping
,
314 pgoff_t first_index
, pgoff_t nr
,
315 pgoff_t bitmap_pgoff
,
316 unsigned long *bitmap
,
320 bool none_set
= (*start
>= *end
);
321 struct clean_walk cwalk
= {
322 .base
= { .total
= 0 },
323 .bitmap_pgoff
= bitmap_pgoff
,
325 .start
= none_set
? nr
: *start
,
326 .end
= none_set
? 0 : *end
,
329 i_mmap_lock_read(mapping
);
330 WARN_ON(walk_page_mapping(mapping
, first_index
, nr
, &clean_walk_ops
,
332 i_mmap_unlock_read(mapping
);
334 *start
= cwalk
.start
;
337 return cwalk
.base
.total
;
339 EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range
);