1 #ifndef _ASM_IA64_MMU_CONTEXT_H
2 #define _ASM_IA64_MMU_CONTEXT_H
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Routines to manage the allocation of task context numbers. Task context numbers are
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches.
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
13 * consider the region number when performing a TLB lookup, we need to assign a unique
14 * region id to each region in a process. We use the least significant three bits in a
15 * region id for this purpose.
18 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
20 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
22 # include <asm/page.h>
25 #include <linux/compiler.h>
26 #include <linux/percpu.h>
27 #include <linux/sched.h>
28 #include <linux/spinlock.h>
30 #include <asm/processor.h>
34 unsigned int next
; /* next context number to use */
35 unsigned int limit
; /* next >= limit => must call wrap_mmu_context() */
36 unsigned int max_ctx
; /* max. context value supported by all CPUs */
39 extern struct ia64_ctx ia64_ctx
;
40 DECLARE_PER_CPU(u8
, ia64_need_tlb_flush
);
42 extern void wrap_mmu_context (struct mm_struct
*mm
);
45 enter_lazy_tlb (struct mm_struct
*mm
, struct task_struct
*tsk
)
50 * When the context counter wraps around all TLBs need to be flushed because an old
51 * context number might have been reused. This is signalled by the ia64_need_tlb_flush
52 * per-CPU variable, which is checked in the routine below. Called by activate_mm().
56 delayed_tlb_flush (void)
58 extern void local_flush_tlb_all (void);
61 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush
))) {
62 spin_lock_irqsave(&ia64_ctx
.lock
, flags
);
64 if (__ia64_per_cpu_var(ia64_need_tlb_flush
)) {
65 local_flush_tlb_all();
66 __ia64_per_cpu_var(ia64_need_tlb_flush
) = 0;
69 spin_unlock_irqrestore(&ia64_ctx
.lock
, flags
);
73 static inline nv_mm_context_t
74 get_mmu_context (struct mm_struct
*mm
)
77 nv_mm_context_t context
= mm
->context
;
79 if (unlikely(!context
)) {
80 spin_lock_irqsave(&ia64_ctx
.lock
, flags
);
82 /* re-check, now that we've got the lock: */
83 context
= mm
->context
;
85 cpus_clear(mm
->cpu_vm_mask
);
86 if (ia64_ctx
.next
>= ia64_ctx
.limit
)
88 mm
->context
= context
= ia64_ctx
.next
++;
91 spin_unlock_irqrestore(&ia64_ctx
.lock
, flags
);
94 * Ensure we're not starting to use "context" before any old
95 * uses of it are gone from our TLB.
103 * Initialize context number to some sane value. MM is guaranteed to be a brand-new
104 * address-space, so no TLB flushing is needed, ever.
107 init_new_context (struct task_struct
*p
, struct mm_struct
*mm
)
114 destroy_context (struct mm_struct
*mm
)
120 reload_context (nv_mm_context_t context
)
123 unsigned long rid_incr
= 0;
124 unsigned long rr0
, rr1
, rr2
, rr3
, rr4
, old_rr4
;
126 old_rr4
= ia64_get_rr(RGN_BASE(RGN_HPAGE
));
127 rid
= context
<< 3; /* make space for encoding the region number */
130 /* encode the region id, preferred page size, and VHPT enable bit: */
131 rr0
= (rid
<< 8) | (PAGE_SHIFT
<< 2) | 1;
132 rr1
= rr0
+ 1*rid_incr
;
133 rr2
= rr0
+ 2*rid_incr
;
134 rr3
= rr0
+ 3*rid_incr
;
135 rr4
= rr0
+ 4*rid_incr
;
136 #ifdef CONFIG_HUGETLB_PAGE
137 rr4
= (rr4
& (~(0xfcUL
))) | (old_rr4
& 0xfc);
140 # error "reload_context assumes RGN_HPAGE is 4"
144 ia64_set_rr(0x0000000000000000UL
, rr0
);
145 ia64_set_rr(0x2000000000000000UL
, rr1
);
146 ia64_set_rr(0x4000000000000000UL
, rr2
);
147 ia64_set_rr(0x6000000000000000UL
, rr3
);
148 ia64_set_rr(0x8000000000000000UL
, rr4
);
149 ia64_srlz_i(); /* srlz.i implies srlz.d */
153 * Must be called with preemption off
156 activate_context (struct mm_struct
*mm
)
158 nv_mm_context_t context
;
161 context
= get_mmu_context(mm
);
162 if (!cpu_isset(smp_processor_id(), mm
->cpu_vm_mask
))
163 cpu_set(smp_processor_id(), mm
->cpu_vm_mask
);
164 reload_context(context
);
165 /* in the unlikely event of a TLB-flush by another thread, redo the load: */
166 } while (unlikely(context
!= mm
->context
));
169 #define deactivate_mm(tsk,mm) do { } while (0)
172 * Switch from address space PREV to address space NEXT.
175 activate_mm (struct mm_struct
*prev
, struct mm_struct
*next
)
178 * We may get interrupts here, but that's OK because interrupt handlers cannot
181 ia64_set_kr(IA64_KR_PT_BASE
, __pa(next
->pgd
));
182 activate_context(next
);
185 #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
187 # endif /* ! __ASSEMBLY__ */
188 #endif /* _ASM_IA64_MMU_CONTEXT_H */