Linux 2.6.16.22
[linux/fpc-iii.git] / include / asm-ia64 / mmu_context.h
blobb5c65081a3aab49bc051b7209c03d0ca54122160
1 #ifndef _ASM_IA64_MMU_CONTEXT_H
2 #define _ASM_IA64_MMU_CONTEXT_H
4 /*
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
9 /*
10 * Routines to manage the allocation of task context numbers. Task context
11 * numbers are used to reduce or eliminate the need to perform TLB flushes
12 * due to context switches. Context numbers are implemented using ia-64
13 * region ids. Since the IA-64 TLB does not consider the region number when
14 * performing a TLB lookup, we need to assign a unique region id to each
15 * region in a process. We use the least significant three bits in aregion
16 * id for this purpose.
19 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
21 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
23 # include <asm/page.h>
24 # ifndef __ASSEMBLY__
26 #include <linux/compiler.h>
27 #include <linux/percpu.h>
28 #include <linux/sched.h>
29 #include <linux/spinlock.h>
31 #include <asm/processor.h>
33 struct ia64_ctx {
34 spinlock_t lock;
35 unsigned int next; /* next context number to use */
36 unsigned int limit; /* available free range */
37 unsigned int max_ctx; /* max. context value supported by all CPUs */
38 /* call wrap_mmu_context when next >= max */
39 unsigned long *bitmap; /* bitmap size is max_ctx+1 */
40 unsigned long *flushmap;/* pending rid to be flushed */
43 extern struct ia64_ctx ia64_ctx;
44 DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
46 extern void mmu_context_init (void);
47 extern void wrap_mmu_context (struct mm_struct *mm);
49 static inline void
50 enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
55 * When the context counter wraps around all TLBs need to be flushed because
56 * an old context number might have been reused. This is signalled by the
57 * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
58 * below. Called by activate_mm(). <efocht@ess.nec.de>
60 static inline void
61 delayed_tlb_flush (void)
63 extern void local_flush_tlb_all (void);
64 unsigned long flags;
66 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
67 spin_lock_irqsave(&ia64_ctx.lock, flags);
68 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
69 local_flush_tlb_all();
70 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
72 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
76 static inline nv_mm_context_t
77 get_mmu_context (struct mm_struct *mm)
79 unsigned long flags;
80 nv_mm_context_t context = mm->context;
82 if (likely(context))
83 goto out;
85 spin_lock_irqsave(&ia64_ctx.lock, flags);
86 /* re-check, now that we've got the lock: */
87 context = mm->context;
88 if (context == 0) {
89 cpus_clear(mm->cpu_vm_mask);
90 if (ia64_ctx.next >= ia64_ctx.limit) {
91 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
92 ia64_ctx.max_ctx, ia64_ctx.next);
93 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
94 ia64_ctx.max_ctx, ia64_ctx.next);
95 if (ia64_ctx.next >= ia64_ctx.max_ctx)
96 wrap_mmu_context(mm);
98 mm->context = context = ia64_ctx.next++;
99 __set_bit(context, ia64_ctx.bitmap);
101 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
102 out:
104 * Ensure we're not starting to use "context" before any old
105 * uses of it are gone from our TLB.
107 delayed_tlb_flush();
109 return context;
113 * Initialize context number to some sane value. MM is guaranteed to be a
114 * brand-new address-space, so no TLB flushing is needed, ever.
116 static inline int
117 init_new_context (struct task_struct *p, struct mm_struct *mm)
119 mm->context = 0;
120 return 0;
123 static inline void
124 destroy_context (struct mm_struct *mm)
126 /* Nothing to do. */
129 static inline void
130 reload_context (nv_mm_context_t context)
132 unsigned long rid;
133 unsigned long rid_incr = 0;
134 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
136 old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
137 rid = context << 3; /* make space for encoding the region number */
138 rid_incr = 1 << 8;
140 /* encode the region id, preferred page size, and VHPT enable bit: */
141 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
142 rr1 = rr0 + 1*rid_incr;
143 rr2 = rr0 + 2*rid_incr;
144 rr3 = rr0 + 3*rid_incr;
145 rr4 = rr0 + 4*rid_incr;
146 #ifdef CONFIG_HUGETLB_PAGE
147 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
149 # if RGN_HPAGE != 4
150 # error "reload_context assumes RGN_HPAGE is 4"
151 # endif
152 #endif
154 ia64_set_rr(0x0000000000000000UL, rr0);
155 ia64_set_rr(0x2000000000000000UL, rr1);
156 ia64_set_rr(0x4000000000000000UL, rr2);
157 ia64_set_rr(0x6000000000000000UL, rr3);
158 ia64_set_rr(0x8000000000000000UL, rr4);
159 ia64_srlz_i(); /* srlz.i implies srlz.d */
163 * Must be called with preemption off
165 static inline void
166 activate_context (struct mm_struct *mm)
168 nv_mm_context_t context;
170 do {
171 context = get_mmu_context(mm);
172 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
173 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
174 reload_context(context);
176 * in the unlikely event of a TLB-flush by another thread,
177 * redo the load.
179 } while (unlikely(context != mm->context));
182 #define deactivate_mm(tsk,mm) do { } while (0)
185 * Switch from address space PREV to address space NEXT.
187 static inline void
188 activate_mm (struct mm_struct *prev, struct mm_struct *next)
191 * We may get interrupts here, but that's OK because interrupt
192 * handlers cannot touch user-space.
194 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
195 activate_context(next);
198 #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
200 # endif /* ! __ASSEMBLY__ */
201 #endif /* _ASM_IA64_MMU_CONTEXT_H */