2 * arch/arm/include/asm/tlb.h
4 * Copyright (C) 2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
17 #ifndef __ASMARM_TLB_H
18 #define __ASMARM_TLB_H
20 #include <asm/cacheflush.h>
24 #include <linux/pagemap.h>
26 #define tlb_flush(tlb) ((void) tlb)
28 #include <asm-generic/tlb.h>
30 #else /* !CONFIG_MMU */
32 #include <linux/swap.h>
33 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
36 #define MMU_GATHER_BUNDLE 8
38 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
39 static inline void __tlb_remove_table(void *_table
)
41 free_page_and_swap_cache((struct page
*)_table
);
44 struct mmu_table_batch
{
50 #define MAX_TABLE_BATCH \
51 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
53 extern void tlb_table_flush(struct mmu_gather
*tlb
);
54 extern void tlb_remove_table(struct mmu_gather
*tlb
, void *table
);
56 #define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
58 #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
59 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
62 * TLB handling. This allows us to remove pages from the page
63 * tables, and efficiently handle the TLB issues.
67 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
68 struct mmu_table_batch
*batch
;
69 unsigned int need_flush
;
72 struct vm_area_struct
*vma
;
73 unsigned long start
, end
;
74 unsigned long range_start
;
75 unsigned long range_end
;
79 struct page
*local
[MMU_GATHER_BUNDLE
];
82 DECLARE_PER_CPU(struct mmu_gather
, mmu_gathers
);
85 * This is unnecessarily complex. There's three ways the TLB shootdown
87 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
88 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
89 * tlb->vma will be non-NULL.
90 * 2. Unmapping all vmas. See exit_mmap().
91 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
92 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
93 * 3. Unmapping argument pages. See shift_arg_pages().
94 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
95 * tlb->vma will be NULL.
97 static inline void tlb_flush(struct mmu_gather
*tlb
)
99 if (tlb
->fullmm
|| !tlb
->vma
)
100 flush_tlb_mm(tlb
->mm
);
101 else if (tlb
->range_end
> 0) {
102 flush_tlb_range(tlb
->vma
, tlb
->range_start
, tlb
->range_end
);
103 tlb
->range_start
= TASK_SIZE
;
108 static inline void tlb_add_flush(struct mmu_gather
*tlb
, unsigned long addr
)
111 if (addr
< tlb
->range_start
)
112 tlb
->range_start
= addr
;
113 if (addr
+ PAGE_SIZE
> tlb
->range_end
)
114 tlb
->range_end
= addr
+ PAGE_SIZE
;
118 static inline void __tlb_alloc_page(struct mmu_gather
*tlb
)
120 unsigned long addr
= __get_free_pages(GFP_NOWAIT
| __GFP_NOWARN
, 0);
123 tlb
->pages
= (void *)addr
;
124 tlb
->max
= PAGE_SIZE
/ sizeof(struct page
*);
128 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather
*tlb
)
131 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
132 tlb_table_flush(tlb
);
136 static inline void tlb_flush_mmu_free(struct mmu_gather
*tlb
)
138 free_pages_and_swap_cache(tlb
->pages
, tlb
->nr
);
140 if (tlb
->pages
== tlb
->local
)
141 __tlb_alloc_page(tlb
);
144 static inline void tlb_flush_mmu(struct mmu_gather
*tlb
)
146 tlb_flush_mmu_tlbonly(tlb
);
147 tlb_flush_mmu_free(tlb
);
151 tlb_gather_mmu(struct mmu_gather
*tlb
, struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
154 tlb
->fullmm
= !(start
| (end
+1));
158 tlb
->max
= ARRAY_SIZE(tlb
->local
);
159 tlb
->pages
= tlb
->local
;
161 __tlb_alloc_page(tlb
);
163 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
169 tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
173 /* keep the page table cache within bounds */
176 if (tlb
->pages
!= tlb
->local
)
177 free_pages((unsigned long)tlb
->pages
, 0);
181 * Memorize the range for the TLB flush.
184 tlb_remove_tlb_entry(struct mmu_gather
*tlb
, pte_t
*ptep
, unsigned long addr
)
186 tlb_add_flush(tlb
, addr
);
190 * In the case of tlb vma handling, we can optimise these away in the
191 * case where we're doing a full MM flush. When we're doing a munmap,
192 * the vmas are adjusted to only cover the region to be torn down.
195 tlb_start_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
198 flush_cache_range(vma
, vma
->vm_start
, vma
->vm_end
);
200 tlb
->range_start
= TASK_SIZE
;
206 tlb_end_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
212 static inline int __tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
214 tlb
->pages
[tlb
->nr
++] = page
;
215 VM_BUG_ON(tlb
->nr
> tlb
->max
);
216 return tlb
->max
- tlb
->nr
;
219 static inline void tlb_remove_page(struct mmu_gather
*tlb
, struct page
*page
)
221 if (!__tlb_remove_page(tlb
, page
))
225 static inline void __pte_free_tlb(struct mmu_gather
*tlb
, pgtable_t pte
,
228 pgtable_page_dtor(pte
);
230 #ifdef CONFIG_ARM_LPAE
231 tlb_add_flush(tlb
, addr
);
234 * With the classic ARM MMU, a pte page has two corresponding pmd
235 * entries, each covering 1MB.
238 tlb_add_flush(tlb
, addr
+ SZ_1M
- PAGE_SIZE
);
239 tlb_add_flush(tlb
, addr
+ SZ_1M
);
242 tlb_remove_entry(tlb
, pte
);
245 static inline void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmdp
,
248 #ifdef CONFIG_ARM_LPAE
249 tlb_add_flush(tlb
, addr
);
250 tlb_remove_entry(tlb
, virt_to_page(pmdp
));
255 tlb_remove_pmd_tlb_entry(struct mmu_gather
*tlb
, pmd_t
*pmdp
, unsigned long addr
)
257 tlb_add_flush(tlb
, addr
);
260 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
261 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
262 #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
264 #define tlb_migrate_finish(mm) do { } while (0)
266 #endif /* CONFIG_MMU */