powerpc: Delete __cpuinit usage from all users
[linux/fpc-iii.git] / arch / ia64 / include / asm / tlb.h
blobef3a9de01954511a352fa5b24285136789425e21
1 #ifndef _ASM_IA64_TLB_H
2 #define _ASM_IA64_TLB_H
3 /*
4 * Based on <asm-generic/tlb.h>.
6 * Copyright (C) 2002-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9 /*
10 * Removing a translation from a page table (including TLB-shootdown) is a four-step
11 * procedure:
13 * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
14 * (this is a no-op on ia64).
15 * (2) Clear the relevant portions of the page-table
16 * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
17 * (4) Release the pages that were freed up in step (2).
19 * Note that the ordering of these steps is crucial to avoid races on MP machines.
21 * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template:
25 * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
26 * {
27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma);
29 * for each page-table-entry PTE that needs to be removed do {
30 * tlb_remove_tlb_entry(tlb, pte, address);
31 * if (pte refers to a normal page) {
32 * tlb_remove_page(tlb, page);
33 * }
34 * }
35 * tlb_end_vma(tlb, vma);
36 * }
37 * }
38 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
40 #include <linux/mm.h>
41 #include <linux/pagemap.h>
42 #include <linux/swap.h>
44 #include <asm/pgalloc.h>
45 #include <asm/processor.h>
46 #include <asm/tlbflush.h>
47 #include <asm/machvec.h>
50 * If we can't allocate a page to make a big batch of page pointers
51 * to work on, then just handle a few from the on-stack structure.
53 #define IA64_GATHER_BUNDLE 8
55 struct mmu_gather {
56 struct mm_struct *mm;
57 unsigned int nr;
58 unsigned int max;
59 unsigned char fullmm; /* non-zero means full mm flush */
60 unsigned char need_flush; /* really unmapped some PTEs? */
61 unsigned long start_addr;
62 unsigned long end_addr;
63 struct page **pages;
64 struct page *local[IA64_GATHER_BUNDLE];
67 struct ia64_tr_entry {
68 u64 ifa;
69 u64 itir;
70 u64 pte;
71 u64 rr;
72 }; /*Record for tr entry!*/
74 extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
75 extern void ia64_ptr_entry(u64 target_mask, int slot);
77 extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
80 region register macros
82 #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
83 #define RR_VE(val) (((val) & 0x0000000000000001) << 0)
84 #define RR_VE_MASK 0x0000000000000001L
85 #define RR_VE_SHIFT 0
86 #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
87 #define RR_PS(val) (((val) & 0x000000000000003f) << 2)
88 #define RR_PS_MASK 0x00000000000000fcL
89 #define RR_PS_SHIFT 2
90 #define RR_RID_MASK 0x00000000ffffff00L
91 #define RR_TO_RID(val) ((val >> 8) & 0xffffff)
94 * Flush the TLB for address range START to END and, if not in fast mode, release the
95 * freed pages that where gathered up to this point.
97 static inline void
98 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
100 unsigned long i;
101 unsigned int nr;
103 if (!tlb->need_flush)
104 return;
105 tlb->need_flush = 0;
107 if (tlb->fullmm) {
109 * Tearing down the entire address space. This happens both as a result
110 * of exit() and execve(). The latter case necessitates the call to
111 * flush_tlb_mm() here.
113 flush_tlb_mm(tlb->mm);
114 } else if (unlikely (end - start >= 1024*1024*1024*1024UL
115 || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
118 * If we flush more than a tera-byte or across regions, we're probably
119 * better off just flushing the entire TLB(s). This should be very rare
120 * and is not worth optimizing for.
122 flush_tlb_all();
123 } else {
125 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
126 * vma pointer.
128 struct vm_area_struct vma;
130 vma.vm_mm = tlb->mm;
131 /* flush the address range from the tlb: */
132 flush_tlb_range(&vma, start, end);
133 /* now flush the virt. page-table area mapping the address range: */
134 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
137 /* lastly, release the freed pages */
138 nr = tlb->nr;
140 tlb->nr = 0;
141 tlb->start_addr = ~0UL;
142 for (i = 0; i < nr; ++i)
143 free_page_and_swap_cache(tlb->pages[i]);
146 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
148 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
150 if (addr) {
151 tlb->pages = (void *)addr;
152 tlb->max = PAGE_SIZE / sizeof(void *);
157 static inline void
158 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
160 tlb->mm = mm;
161 tlb->max = ARRAY_SIZE(tlb->local);
162 tlb->pages = tlb->local;
163 tlb->nr = 0;
164 tlb->fullmm = full_mm_flush;
165 tlb->start_addr = ~0UL;
169 * Called at the end of the shootdown operation to free up any resources that were
170 * collected.
172 static inline void
173 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
176 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
177 * tlb->end_addr.
179 ia64_tlb_flush_mmu(tlb, start, end);
181 /* keep the page table cache within bounds */
182 check_pgt_cache();
184 if (tlb->pages != tlb->local)
185 free_pages((unsigned long)tlb->pages, 0);
189 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
190 * must be delayed until after the TLB has been flushed (see comments at the beginning of
191 * this file).
193 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
195 tlb->need_flush = 1;
197 if (!tlb->nr && tlb->pages == tlb->local)
198 __tlb_alloc_page(tlb);
200 tlb->pages[tlb->nr++] = page;
201 VM_BUG_ON(tlb->nr > tlb->max);
203 return tlb->max - tlb->nr;
206 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
208 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
211 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
213 if (!__tlb_remove_page(tlb, page))
214 tlb_flush_mmu(tlb);
218 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
219 * PTE, not just those pointing to (normal) physical memory.
221 static inline void
222 __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
224 if (tlb->start_addr == ~0UL)
225 tlb->start_addr = address;
226 tlb->end_addr = address + PAGE_SIZE;
229 #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
231 #define tlb_start_vma(tlb, vma) do { } while (0)
232 #define tlb_end_vma(tlb, vma) do { } while (0)
234 #define tlb_remove_tlb_entry(tlb, ptep, addr) \
235 do { \
236 tlb->need_flush = 1; \
237 __tlb_remove_tlb_entry(tlb, ptep, addr); \
238 } while (0)
240 #define pte_free_tlb(tlb, ptep, address) \
241 do { \
242 tlb->need_flush = 1; \
243 __pte_free_tlb(tlb, ptep, address); \
244 } while (0)
246 #define pmd_free_tlb(tlb, ptep, address) \
247 do { \
248 tlb->need_flush = 1; \
249 __pmd_free_tlb(tlb, ptep, address); \
250 } while (0)
252 #define pud_free_tlb(tlb, pudp, address) \
253 do { \
254 tlb->need_flush = 1; \
255 __pud_free_tlb(tlb, pudp, address); \
256 } while (0)
258 #endif /* _ASM_IA64_TLB_H */