2 #include <linux/highmem.h>
3 #include <linux/kernel.h>
4 #include <linux/mmdebug.h>
5 #include <linux/mm_types.h>
6 #include <linux/mm_inline.h>
7 #include <linux/pagemap.h>
8 #include <linux/rcupdate.h>
10 #include <linux/swap.h>
11 #include <linux/rmap.h>
13 #include <asm/pgalloc.h>
16 #ifndef CONFIG_MMU_GATHER_NO_GATHER
18 static bool tlb_next_batch(struct mmu_gather
*tlb
)
20 struct mmu_gather_batch
*batch
;
22 /* Limit batching if we have delayed rmaps pending */
23 if (tlb
->delayed_rmap
&& tlb
->active
!= &tlb
->local
)
28 tlb
->active
= batch
->next
;
32 if (tlb
->batch_count
== MAX_GATHER_BATCH_COUNT
)
35 batch
= (void *)__get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
42 batch
->max
= MAX_GATHER_BATCH
;
44 tlb
->active
->next
= batch
;
51 static void tlb_flush_rmap_batch(struct mmu_gather_batch
*batch
, struct vm_area_struct
*vma
)
53 struct encoded_page
**pages
= batch
->encoded_pages
;
55 for (int i
= 0; i
< batch
->nr
; i
++) {
56 struct encoded_page
*enc
= pages
[i
];
58 if (encoded_page_flags(enc
) & ENCODED_PAGE_BIT_DELAY_RMAP
) {
59 struct page
*page
= encoded_page_ptr(enc
);
60 unsigned int nr_pages
= 1;
62 if (unlikely(encoded_page_flags(enc
) &
63 ENCODED_PAGE_BIT_NR_PAGES_NEXT
))
64 nr_pages
= encoded_nr_pages(pages
[++i
]);
66 folio_remove_rmap_ptes(page_folio(page
), page
, nr_pages
,
73 * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB
74 * @tlb: the current mmu_gather
75 * @vma: The memory area from which the pages are being removed.
77 * Note that because of how tlb_next_batch() above works, we will
78 * never start multiple new batches with pending delayed rmaps, so
79 * we only need to walk through the current active batch and the
82 void tlb_flush_rmaps(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
84 if (!tlb
->delayed_rmap
)
87 tlb_flush_rmap_batch(&tlb
->local
, vma
);
88 if (tlb
->active
!= &tlb
->local
)
89 tlb_flush_rmap_batch(tlb
->active
, vma
);
90 tlb
->delayed_rmap
= 0;
95 * We might end up freeing a lot of pages. Reschedule on a regular
96 * basis to avoid soft lockups in configurations without full
97 * preemption enabled. The magic number of 512 folios seems to work.
99 #define MAX_NR_FOLIOS_PER_FREE 512
101 static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch
*batch
)
103 struct encoded_page
**pages
= batch
->encoded_pages
;
104 unsigned int nr
, nr_pages
;
107 if (!page_poisoning_enabled_static() && !want_init_on_free()) {
108 nr
= min(MAX_NR_FOLIOS_PER_FREE
, batch
->nr
);
111 * Make sure we cover page + nr_pages, and don't leave
112 * nr_pages behind when capping the number of entries.
114 if (unlikely(encoded_page_flags(pages
[nr
- 1]) &
115 ENCODED_PAGE_BIT_NR_PAGES_NEXT
))
119 * With page poisoning and init_on_free, the time it
120 * takes to free memory grows proportionally with the
121 * actual memory size. Therefore, limit based on the
122 * actual memory size and not the number of involved
125 for (nr
= 0, nr_pages
= 0;
126 nr
< batch
->nr
&& nr_pages
< MAX_NR_FOLIOS_PER_FREE
;
128 if (unlikely(encoded_page_flags(pages
[nr
]) &
129 ENCODED_PAGE_BIT_NR_PAGES_NEXT
))
130 nr_pages
+= encoded_nr_pages(pages
[++nr
]);
136 free_pages_and_swap_cache(pages
, nr
);
144 static void tlb_batch_pages_flush(struct mmu_gather
*tlb
)
146 struct mmu_gather_batch
*batch
;
148 for (batch
= &tlb
->local
; batch
&& batch
->nr
; batch
= batch
->next
)
149 __tlb_batch_free_encoded_pages(batch
);
150 tlb
->active
= &tlb
->local
;
153 static void tlb_batch_list_free(struct mmu_gather
*tlb
)
155 struct mmu_gather_batch
*batch
, *next
;
157 for (batch
= tlb
->local
.next
; batch
; batch
= next
) {
159 free_pages((unsigned long)batch
, 0);
161 tlb
->local
.next
= NULL
;
164 static bool __tlb_remove_folio_pages_size(struct mmu_gather
*tlb
,
165 struct page
*page
, unsigned int nr_pages
, bool delay_rmap
,
168 int flags
= delay_rmap
? ENCODED_PAGE_BIT_DELAY_RMAP
: 0;
169 struct mmu_gather_batch
*batch
;
171 VM_BUG_ON(!tlb
->end
);
173 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
174 VM_WARN_ON(tlb
->page_size
!= page_size
);
175 VM_WARN_ON_ONCE(nr_pages
!= 1 && page_size
!= PAGE_SIZE
);
176 VM_WARN_ON_ONCE(page_folio(page
) != page_folio(page
+ nr_pages
- 1));
181 * Add the page and check if we are full. If so
184 if (likely(nr_pages
== 1)) {
185 batch
->encoded_pages
[batch
->nr
++] = encode_page(page
, flags
);
187 flags
|= ENCODED_PAGE_BIT_NR_PAGES_NEXT
;
188 batch
->encoded_pages
[batch
->nr
++] = encode_page(page
, flags
);
189 batch
->encoded_pages
[batch
->nr
++] = encode_nr_pages(nr_pages
);
192 * Make sure that we can always add another "page" + "nr_pages",
193 * requiring two entries instead of only a single one.
195 if (batch
->nr
>= batch
->max
- 1) {
196 if (!tlb_next_batch(tlb
))
200 VM_BUG_ON_PAGE(batch
->nr
> batch
->max
- 1, page
);
205 bool __tlb_remove_folio_pages(struct mmu_gather
*tlb
, struct page
*page
,
206 unsigned int nr_pages
, bool delay_rmap
)
208 return __tlb_remove_folio_pages_size(tlb
, page
, nr_pages
, delay_rmap
,
212 bool __tlb_remove_page_size(struct mmu_gather
*tlb
, struct page
*page
,
213 bool delay_rmap
, int page_size
)
215 return __tlb_remove_folio_pages_size(tlb
, page
, 1, delay_rmap
, page_size
);
218 #endif /* MMU_GATHER_NO_GATHER */
220 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
222 static void __tlb_remove_table_free(struct mmu_table_batch
*batch
)
226 for (i
= 0; i
< batch
->nr
; i
++)
227 __tlb_remove_table(batch
->tables
[i
]);
229 free_page((unsigned long)batch
);
232 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
235 * Semi RCU freeing of the page directories.
237 * This is needed by some architectures to implement software pagetable walkers.
239 * gup_fast() and other software pagetable walkers do a lockless page-table
240 * walk and therefore needs some synchronization with the freeing of the page
241 * directories. The chosen means to accomplish that is by disabling IRQs over
244 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
245 * since we unlink the page, flush TLBs, free the page. Since the disabling of
246 * IRQs delays the completion of the TLB flush we can never observe an already
249 * Architectures that do not have this (PPC) need to delay the freeing by some
250 * other means, this is that means.
252 * What we do is batch the freed directory pages (tables) and RCU free them.
253 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
254 * holds off grace periods.
256 * However, in order to batch these pages we need to allocate storage, this
257 * allocation is deep inside the MM code and can thus easily fail on memory
258 * pressure. To guarantee progress we fall back to single table freeing, see
259 * the implementation of tlb_remove_table_one().
263 static void tlb_remove_table_smp_sync(void *arg
)
265 /* Simply deliver the interrupt */
268 void tlb_remove_table_sync_one(void)
271 * This isn't an RCU grace period and hence the page-tables cannot be
272 * assumed to be actually RCU-freed.
274 * It is however sufficient for software page-table walkers that rely on
277 smp_call_function(tlb_remove_table_smp_sync
, NULL
, 1);
280 static void tlb_remove_table_rcu(struct rcu_head
*head
)
282 __tlb_remove_table_free(container_of(head
, struct mmu_table_batch
, rcu
));
285 static void tlb_remove_table_free(struct mmu_table_batch
*batch
)
287 call_rcu(&batch
->rcu
, tlb_remove_table_rcu
);
290 #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
292 static void tlb_remove_table_free(struct mmu_table_batch
*batch
)
294 __tlb_remove_table_free(batch
);
297 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
300 * If we want tlb_remove_table() to imply TLB invalidates.
302 static inline void tlb_table_invalidate(struct mmu_gather
*tlb
)
304 if (tlb_needs_table_invalidate()) {
306 * Invalidate page-table caches used by hardware walkers. Then
307 * we still need to RCU-sched wait while freeing the pages
308 * because software walkers can still be in-flight.
310 tlb_flush_mmu_tlbonly(tlb
);
314 static void tlb_remove_table_one(void *table
)
316 tlb_remove_table_sync_one();
317 __tlb_remove_table(table
);
320 static void tlb_table_flush(struct mmu_gather
*tlb
)
322 struct mmu_table_batch
**batch
= &tlb
->batch
;
325 tlb_table_invalidate(tlb
);
326 tlb_remove_table_free(*batch
);
331 void tlb_remove_table(struct mmu_gather
*tlb
, void *table
)
333 struct mmu_table_batch
**batch
= &tlb
->batch
;
335 if (*batch
== NULL
) {
336 *batch
= (struct mmu_table_batch
*)__get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
337 if (*batch
== NULL
) {
338 tlb_table_invalidate(tlb
);
339 tlb_remove_table_one(table
);
345 (*batch
)->tables
[(*batch
)->nr
++] = table
;
346 if ((*batch
)->nr
== MAX_TABLE_BATCH
)
347 tlb_table_flush(tlb
);
350 static inline void tlb_table_init(struct mmu_gather
*tlb
)
355 #else /* !CONFIG_MMU_GATHER_TABLE_FREE */
357 static inline void tlb_table_flush(struct mmu_gather
*tlb
) { }
358 static inline void tlb_table_init(struct mmu_gather
*tlb
) { }
360 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
362 static void tlb_flush_mmu_free(struct mmu_gather
*tlb
)
364 tlb_table_flush(tlb
);
365 #ifndef CONFIG_MMU_GATHER_NO_GATHER
366 tlb_batch_pages_flush(tlb
);
370 void tlb_flush_mmu(struct mmu_gather
*tlb
)
372 tlb_flush_mmu_tlbonly(tlb
);
373 tlb_flush_mmu_free(tlb
);
376 static void __tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
,
380 tlb
->fullmm
= fullmm
;
382 #ifndef CONFIG_MMU_GATHER_NO_GATHER
383 tlb
->need_flush_all
= 0;
384 tlb
->local
.next
= NULL
;
386 tlb
->local
.max
= ARRAY_SIZE(tlb
->__pages
);
387 tlb
->active
= &tlb
->local
;
388 tlb
->batch_count
= 0;
390 tlb
->delayed_rmap
= 0;
393 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
397 __tlb_reset_range(tlb
);
398 inc_tlb_flush_pending(tlb
->mm
);
402 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
403 * @tlb: the mmu_gather structure to initialize
404 * @mm: the mm_struct of the target address space
406 * Called to initialize an (on-stack) mmu_gather structure for page-table
407 * tear-down from @mm.
409 void tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
)
411 __tlb_gather_mmu(tlb
, mm
, false);
415 * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
416 * @tlb: the mmu_gather structure to initialize
417 * @mm: the mm_struct of the target address space
419 * In this case, @mm is without users and we're going to destroy the
420 * full address space (exit/execve).
422 * Called to initialize an (on-stack) mmu_gather structure for page-table
423 * tear-down from @mm.
425 void tlb_gather_mmu_fullmm(struct mmu_gather
*tlb
, struct mm_struct
*mm
)
427 __tlb_gather_mmu(tlb
, mm
, true);
431 * tlb_finish_mmu - finish an mmu_gather structure
432 * @tlb: the mmu_gather structure to finish
434 * Called at the end of the shootdown operation to free up any resources that
437 void tlb_finish_mmu(struct mmu_gather
*tlb
)
440 * If there are parallel threads are doing PTE changes on same range
441 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
442 * flush by batching, one thread may end up seeing inconsistent PTEs
443 * and result in having stale TLB entries. So flush TLB forcefully
444 * if we detect parallel PTE batching threads.
446 * However, some syscalls, e.g. munmap(), may free page tables, this
447 * needs force flush everything in the given range. Otherwise this
448 * may result in having stale TLB entries for some architectures,
449 * e.g. aarch64, that could specify flush what level TLB.
451 if (mm_tlb_flush_nested(tlb
->mm
)) {
453 * The aarch64 yields better performance with fullmm by
454 * avoiding multiple CPUs spamming TLBI messages at the
457 * On x86 non-fullmm doesn't yield significant difference
461 __tlb_reset_range(tlb
);
462 tlb
->freed_tables
= 1;
467 #ifndef CONFIG_MMU_GATHER_NO_GATHER
468 tlb_batch_list_free(tlb
);
470 dec_tlb_flush_pending(tlb
->mm
);