Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/gregkh/driver...
[linux-2.6/verdex.git] / include / asm-ia64 / tlb.h
blob3a9a6d1be75cf47659f30ea06e60a354b94c1bf0
1 #ifndef _ASM_IA64_TLB_H
2 #define _ASM_IA64_TLB_H
3 /*
4 * Based on <asm-generic/tlb.h>.
6 * Copyright (C) 2002-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9 /*
10 * Removing a translation from a page table (including TLB-shootdown) is a four-step
11 * procedure:
13 * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
14 * (this is a no-op on ia64).
15 * (2) Clear the relevant portions of the page-table
16 * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
17 * (4) Release the pages that were freed up in step (2).
19 * Note that the ordering of these steps is crucial to avoid races on MP machines.
21 * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template:
25 * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
26 * {
27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma);
29 * for each page-table-entry PTE that needs to be removed do {
30 * tlb_remove_tlb_entry(tlb, pte, address);
31 * if (pte refers to a normal page) {
32 * tlb_remove_page(tlb, page);
33 * }
34 * }
35 * tlb_end_vma(tlb, vma);
36 * }
37 * }
38 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
40 #include <linux/config.h>
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
45 #include <asm/pgalloc.h>
46 #include <asm/processor.h>
47 #include <asm/tlbflush.h>
48 #include <asm/machvec.h>
50 #ifdef CONFIG_SMP
51 # define FREE_PTE_NR 2048
52 # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
53 #else
54 # define FREE_PTE_NR 0
55 # define tlb_fast_mode(tlb) (1)
56 #endif
58 struct mmu_gather {
59 struct mm_struct *mm;
60 unsigned int nr; /* == ~0U => fast mode */
61 unsigned char fullmm; /* non-zero means full mm flush */
62 unsigned char need_flush; /* really unmapped some PTEs? */
63 unsigned long freed; /* number of pages freed */
64 unsigned long start_addr;
65 unsigned long end_addr;
66 struct page *pages[FREE_PTE_NR];
69 /* Users of the generic TLB shootdown code must declare this storage space. */
70 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
73 * Flush the TLB for address range START to END and, if not in fast mode, release the
74 * freed pages that where gathered up to this point.
76 static inline void
77 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
79 unsigned int nr;
81 if (!tlb->need_flush)
82 return;
83 tlb->need_flush = 0;
85 if (tlb->fullmm) {
87 * Tearing down the entire address space. This happens both as a result
88 * of exit() and execve(). The latter case necessitates the call to
89 * flush_tlb_mm() here.
91 flush_tlb_mm(tlb->mm);
92 } else if (unlikely (end - start >= 1024*1024*1024*1024UL
93 || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
96 * If we flush more than a tera-byte or across regions, we're probably
97 * better off just flushing the entire TLB(s). This should be very rare
98 * and is not worth optimizing for.
100 flush_tlb_all();
101 } else {
103 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
104 * vma pointer.
106 struct vm_area_struct vma;
108 vma.vm_mm = tlb->mm;
109 /* flush the address range from the tlb: */
110 flush_tlb_range(&vma, start, end);
111 /* now flush the virt. page-table area mapping the address range: */
112 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
115 /* lastly, release the freed pages */
116 nr = tlb->nr;
117 if (!tlb_fast_mode(tlb)) {
118 unsigned long i;
119 tlb->nr = 0;
120 tlb->start_addr = ~0UL;
121 for (i = 0; i < nr; ++i)
122 free_page_and_swap_cache(tlb->pages[i]);
127 * Return a pointer to an initialized struct mmu_gather.
129 static inline struct mmu_gather *
130 tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
132 struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers);
134 tlb->mm = mm;
136 * Use fast mode if only 1 CPU is online.
138 * It would be tempting to turn on fast-mode for full_mm_flush as well. But this
139 * doesn't work because of speculative accesses and software prefetching: the page
140 * table of "mm" may (and usually is) the currently active page table and even
141 * though the kernel won't do any user-space accesses during the TLB shoot down, a
142 * compiler might use speculation or lfetch.fault on what happens to be a valid
143 * user-space address. This in turn could trigger a TLB miss fault (or a VHPT
144 * walk) and re-insert a TLB entry we just removed. Slow mode avoids such
145 * problems. (We could make fast-mode work by switching the current task to a
146 * different "mm" during the shootdown.) --davidm 08/02/2002
148 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
149 tlb->fullmm = full_mm_flush;
150 tlb->freed = 0;
151 tlb->start_addr = ~0UL;
152 return tlb;
156 * Called at the end of the shootdown operation to free up any resources that were
157 * collected. The page table lock is still held at this point.
159 static inline void
160 tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
162 unsigned long freed = tlb->freed;
163 struct mm_struct *mm = tlb->mm;
164 unsigned long rss = get_mm_counter(mm, rss);
166 if (rss < freed)
167 freed = rss;
168 add_mm_counter(mm, rss, -freed);
170 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
171 * tlb->end_addr.
173 ia64_tlb_flush_mmu(tlb, start, end);
175 /* keep the page table cache within bounds */
176 check_pgt_cache();
179 static inline unsigned int
180 tlb_is_full_mm(struct mmu_gather *tlb)
182 return tlb->fullmm;
186 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
187 * must be delayed until after the TLB has been flushed (see comments at the beginning of
188 * this file).
190 static inline void
191 tlb_remove_page (struct mmu_gather *tlb, struct page *page)
193 tlb->need_flush = 1;
195 if (tlb_fast_mode(tlb)) {
196 free_page_and_swap_cache(page);
197 return;
199 tlb->pages[tlb->nr++] = page;
200 if (tlb->nr >= FREE_PTE_NR)
201 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
205 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
206 * PTE, not just those pointing to (normal) physical memory.
208 static inline void
209 __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
211 if (tlb->start_addr == ~0UL)
212 tlb->start_addr = address;
213 tlb->end_addr = address + PAGE_SIZE;
216 #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
218 #define tlb_start_vma(tlb, vma) do { } while (0)
219 #define tlb_end_vma(tlb, vma) do { } while (0)
221 #define tlb_remove_tlb_entry(tlb, ptep, addr) \
222 do { \
223 tlb->need_flush = 1; \
224 __tlb_remove_tlb_entry(tlb, ptep, addr); \
225 } while (0)
227 #define pte_free_tlb(tlb, ptep) \
228 do { \
229 tlb->need_flush = 1; \
230 __pte_free_tlb(tlb, ptep); \
231 } while (0)
233 #define pmd_free_tlb(tlb, ptep) \
234 do { \
235 tlb->need_flush = 1; \
236 __pmd_free_tlb(tlb, ptep); \
237 } while (0)
239 #define pud_free_tlb(tlb, pudp) \
240 do { \
241 tlb->need_flush = 1; \
242 __pud_free_tlb(tlb, pudp); \
243 } while (0)
245 #endif /* _ASM_IA64_TLB_H */