3 * Generic TLB shootdown code
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 #ifndef _ASM_GENERIC__TLB_H
14 #define _ASM_GENERIC__TLB_H
16 #include <linux/config.h>
17 #include <linux/swap.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlbflush.h>
22 * For UP we don't need to worry about TLB flush
23 * and page free order so much..
26 #define FREE_PTE_NR 506
27 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
30 #define tlb_fast_mode(tlb) 1
33 /* struct mmu_gather is an opaque type used by the mm code for passing around
34 * any data needed by arch specific code for tlb_remove_page. This structure
35 * can be per-CPU or per-MM as the page table lock is held for the duration of
40 unsigned int nr
; /* set to ~0U means fast mode */
41 unsigned int need_flush
;/* Really unmapped some ptes? */
42 unsigned int fullmm
; /* non-zero means full mm flush */
44 struct page
* pages
[FREE_PTE_NR
];
47 /* Users of the generic TLB shootdown code must declare this storage space. */
48 DECLARE_PER_CPU(struct mmu_gather
, mmu_gathers
);
51 * Return a pointer to an initialized struct mmu_gather.
53 static inline struct mmu_gather
*
54 tlb_gather_mmu(struct mm_struct
*mm
, unsigned int full_mm_flush
)
56 struct mmu_gather
*tlb
= &per_cpu(mmu_gathers
, smp_processor_id());
60 /* Use fast mode if only one CPU is online */
61 tlb
->nr
= num_online_cpus() > 1 ? 0U : ~0U;
63 tlb
->fullmm
= full_mm_flush
;
70 tlb_flush_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
76 if (!tlb_fast_mode(tlb
)) {
77 free_pages_and_swap_cache(tlb
->pages
, tlb
->nr
);
83 * Called at the end of the shootdown operation to free up any resources
84 * that were required. The page table lock is still held at this point.
87 tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
89 int freed
= tlb
->freed
;
90 struct mm_struct
*mm
= tlb
->mm
;
91 int rss
= get_mm_counter(mm
, rss
);
95 add_mm_counter(mm
, rss
, -freed
);
96 tlb_flush_mmu(tlb
, start
, end
);
98 /* keep the page table cache within bounds */
102 static inline unsigned int
103 tlb_is_full_mm(struct mmu_gather
*tlb
)
109 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
110 * handling the additional races in SMP caused by other CPUs caching valid
111 * mappings in their TLBs.
113 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
116 if (tlb_fast_mode(tlb
)) {
117 free_page_and_swap_cache(page
);
120 tlb
->pages
[tlb
->nr
++] = page
;
121 if (tlb
->nr
>= FREE_PTE_NR
)
122 tlb_flush_mmu(tlb
, 0, 0);
126 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
128 * Record the fact that pte's were really umapped in ->need_flush, so we can
129 * later optimise away the tlb invalidate. This helps when userspace is
130 * unmapping already-unmapped pages, which happens quite a lot.
132 #define tlb_remove_tlb_entry(tlb, ptep, address) \
134 tlb->need_flush = 1; \
135 __tlb_remove_tlb_entry(tlb, ptep, address); \
138 #define pte_free_tlb(tlb, ptep) \
140 tlb->need_flush = 1; \
141 __pte_free_tlb(tlb, ptep); \
144 #ifndef __ARCH_HAS_4LEVEL_HACK
145 #define pud_free_tlb(tlb, pudp) \
147 tlb->need_flush = 1; \
148 __pud_free_tlb(tlb, pudp); \
152 #define pmd_free_tlb(tlb, pmdp) \
154 tlb->need_flush = 1; \
155 __pmd_free_tlb(tlb, pmdp); \
158 #define tlb_migrate_finish(mm) do {} while (0)
160 #endif /* _ASM_GENERIC__TLB_H */