net: skb_segment() provides list head and tail
[linux/fpc-iii.git] / arch / arm / include / asm / tlb.h
blobf1a0dace3efee423e7727e143550aae06f081fd5
1 /*
2 * arch/arm/include/asm/tlb.h
4 * Copyright (C) 2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
17 #ifndef __ASMARM_TLB_H
18 #define __ASMARM_TLB_H
20 #include <asm/cacheflush.h>
22 #ifndef CONFIG_MMU
24 #include <linux/pagemap.h>
26 #define tlb_flush(tlb) ((void) tlb)
28 #include <asm-generic/tlb.h>
30 #else /* !CONFIG_MMU */
32 #include <linux/swap.h>
33 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
36 #define MMU_GATHER_BUNDLE 8
39 * TLB handling. This allows us to remove pages from the page
40 * tables, and efficiently handle the TLB issues.
42 struct mmu_gather {
43 struct mm_struct *mm;
44 unsigned int fullmm;
45 struct vm_area_struct *vma;
46 unsigned long start, end;
47 unsigned long range_start;
48 unsigned long range_end;
49 unsigned int nr;
50 unsigned int max;
51 struct page **pages;
52 struct page *local[MMU_GATHER_BUNDLE];
55 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
58 * This is unnecessarily complex. There's three ways the TLB shootdown
59 * code is used:
60 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
61 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
62 * tlb->vma will be non-NULL.
63 * 2. Unmapping all vmas. See exit_mmap().
64 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
65 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
66 * 3. Unmapping argument pages. See shift_arg_pages().
67 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
68 * tlb->vma will be NULL.
70 static inline void tlb_flush(struct mmu_gather *tlb)
72 if (tlb->fullmm || !tlb->vma)
73 flush_tlb_mm(tlb->mm);
74 else if (tlb->range_end > 0) {
75 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
76 tlb->range_start = TASK_SIZE;
77 tlb->range_end = 0;
81 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
83 if (!tlb->fullmm) {
84 if (addr < tlb->range_start)
85 tlb->range_start = addr;
86 if (addr + PAGE_SIZE > tlb->range_end)
87 tlb->range_end = addr + PAGE_SIZE;
91 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
93 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
95 if (addr) {
96 tlb->pages = (void *)addr;
97 tlb->max = PAGE_SIZE / sizeof(struct page *);
101 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
103 tlb_flush(tlb);
106 static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
108 free_pages_and_swap_cache(tlb->pages, tlb->nr);
109 tlb->nr = 0;
110 if (tlb->pages == tlb->local)
111 __tlb_alloc_page(tlb);
114 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
116 tlb_flush_mmu_tlbonly(tlb);
117 tlb_flush_mmu_free(tlb);
120 static inline void
121 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
123 tlb->mm = mm;
124 tlb->fullmm = !(start | (end+1));
125 tlb->start = start;
126 tlb->end = end;
127 tlb->vma = NULL;
128 tlb->max = ARRAY_SIZE(tlb->local);
129 tlb->pages = tlb->local;
130 tlb->nr = 0;
131 __tlb_alloc_page(tlb);
134 static inline void
135 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
137 tlb_flush_mmu(tlb);
139 /* keep the page table cache within bounds */
140 check_pgt_cache();
142 if (tlb->pages != tlb->local)
143 free_pages((unsigned long)tlb->pages, 0);
147 * Memorize the range for the TLB flush.
149 static inline void
150 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
152 tlb_add_flush(tlb, addr);
156 * In the case of tlb vma handling, we can optimise these away in the
157 * case where we're doing a full MM flush. When we're doing a munmap,
158 * the vmas are adjusted to only cover the region to be torn down.
160 static inline void
161 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
163 if (!tlb->fullmm) {
164 flush_cache_range(vma, vma->vm_start, vma->vm_end);
165 tlb->vma = vma;
166 tlb->range_start = TASK_SIZE;
167 tlb->range_end = 0;
171 static inline void
172 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
174 if (!tlb->fullmm)
175 tlb_flush(tlb);
178 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
180 tlb->pages[tlb->nr++] = page;
181 VM_BUG_ON(tlb->nr > tlb->max);
182 return tlb->max - tlb->nr;
185 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
187 if (!__tlb_remove_page(tlb, page))
188 tlb_flush_mmu(tlb);
191 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
192 unsigned long addr)
194 pgtable_page_dtor(pte);
196 #ifdef CONFIG_ARM_LPAE
197 tlb_add_flush(tlb, addr);
198 #else
200 * With the classic ARM MMU, a pte page has two corresponding pmd
201 * entries, each covering 1MB.
203 addr &= PMD_MASK;
204 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
205 tlb_add_flush(tlb, addr + SZ_1M);
206 #endif
208 tlb_remove_page(tlb, pte);
211 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
212 unsigned long addr)
214 #ifdef CONFIG_ARM_LPAE
215 tlb_add_flush(tlb, addr);
216 tlb_remove_page(tlb, virt_to_page(pmdp));
217 #endif
220 static inline void
221 tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
223 tlb_add_flush(tlb, addr);
226 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
227 #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
228 #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
230 #define tlb_migrate_finish(mm) do { } while (0)
232 #endif /* CONFIG_MMU */
233 #endif