1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_MMU_CONTEXT_H
3 #define _ASM_IA64_MMU_CONTEXT_H
6 * Copyright (C) 1998-2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
11 * Routines to manage the allocation of task context numbers. Task context
12 * numbers are used to reduce or eliminate the need to perform TLB flushes
13 * due to context switches. Context numbers are implemented using ia-64
14 * region ids. Since the IA-64 TLB does not consider the region number when
15 * performing a TLB lookup, we need to assign a unique region id to each
16 * region in a process. We use the least significant three bits in aregion
17 * id for this purpose.
20 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
22 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
24 # include <asm/page.h>
27 #include <linux/compiler.h>
28 #include <linux/percpu.h>
29 #include <linux/sched.h>
30 #include <linux/mm_types.h>
31 #include <linux/spinlock.h>
33 #include <asm/processor.h>
34 #include <asm-generic/mm_hooks.h>
38 unsigned int next
; /* next context number to use */
39 unsigned int limit
; /* available free range */
40 unsigned int max_ctx
; /* max. context value supported by all CPUs */
41 /* call wrap_mmu_context when next >= max */
42 unsigned long *bitmap
; /* bitmap size is max_ctx+1 */
43 unsigned long *flushmap
;/* pending rid to be flushed */
46 extern struct ia64_ctx ia64_ctx
;
47 DECLARE_PER_CPU(u8
, ia64_need_tlb_flush
);
49 extern void mmu_context_init (void);
50 extern void wrap_mmu_context (struct mm_struct
*mm
);
53 enter_lazy_tlb (struct mm_struct
*mm
, struct task_struct
*tsk
)
58 * When the context counter wraps around all TLBs need to be flushed because
59 * an old context number might have been reused. This is signalled by the
60 * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
61 * below. Called by activate_mm(). <efocht@ess.nec.de>
64 delayed_tlb_flush (void)
66 extern void local_flush_tlb_all (void);
69 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush
))) {
70 spin_lock_irqsave(&ia64_ctx
.lock
, flags
);
71 if (__ia64_per_cpu_var(ia64_need_tlb_flush
)) {
72 local_flush_tlb_all();
73 __ia64_per_cpu_var(ia64_need_tlb_flush
) = 0;
75 spin_unlock_irqrestore(&ia64_ctx
.lock
, flags
);
79 static inline nv_mm_context_t
80 get_mmu_context (struct mm_struct
*mm
)
83 nv_mm_context_t context
= mm
->context
;
88 spin_lock_irqsave(&ia64_ctx
.lock
, flags
);
89 /* re-check, now that we've got the lock: */
90 context
= mm
->context
;
92 cpumask_clear(mm_cpumask(mm
));
93 if (ia64_ctx
.next
>= ia64_ctx
.limit
) {
94 ia64_ctx
.next
= find_next_zero_bit(ia64_ctx
.bitmap
,
95 ia64_ctx
.max_ctx
, ia64_ctx
.next
);
96 ia64_ctx
.limit
= find_next_bit(ia64_ctx
.bitmap
,
97 ia64_ctx
.max_ctx
, ia64_ctx
.next
);
98 if (ia64_ctx
.next
>= ia64_ctx
.max_ctx
)
101 mm
->context
= context
= ia64_ctx
.next
++;
102 __set_bit(context
, ia64_ctx
.bitmap
);
104 spin_unlock_irqrestore(&ia64_ctx
.lock
, flags
);
107 * Ensure we're not starting to use "context" before any old
108 * uses of it are gone from our TLB.
116 * Initialize context number to some sane value. MM is guaranteed to be a
117 * brand-new address-space, so no TLB flushing is needed, ever.
120 init_new_context (struct task_struct
*p
, struct mm_struct
*mm
)
127 destroy_context (struct mm_struct
*mm
)
133 reload_context (nv_mm_context_t context
)
136 unsigned long rid_incr
= 0;
137 unsigned long rr0
, rr1
, rr2
, rr3
, rr4
, old_rr4
;
139 old_rr4
= ia64_get_rr(RGN_BASE(RGN_HPAGE
));
140 rid
= context
<< 3; /* make space for encoding the region number */
143 /* encode the region id, preferred page size, and VHPT enable bit: */
144 rr0
= (rid
<< 8) | (PAGE_SHIFT
<< 2) | 1;
145 rr1
= rr0
+ 1*rid_incr
;
146 rr2
= rr0
+ 2*rid_incr
;
147 rr3
= rr0
+ 3*rid_incr
;
148 rr4
= rr0
+ 4*rid_incr
;
149 #ifdef CONFIG_HUGETLB_PAGE
150 rr4
= (rr4
& (~(0xfcUL
))) | (old_rr4
& 0xfc);
153 # error "reload_context assumes RGN_HPAGE is 4"
157 ia64_set_rr0_to_rr4(rr0
, rr1
, rr2
, rr3
, rr4
);
158 ia64_srlz_i(); /* srlz.i implies srlz.d */
162 * Must be called with preemption off
165 activate_context (struct mm_struct
*mm
)
167 nv_mm_context_t context
;
170 context
= get_mmu_context(mm
);
171 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
)))
172 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
173 reload_context(context
);
175 * in the unlikely event of a TLB-flush by another thread,
178 } while (unlikely(context
!= mm
->context
));
181 #define deactivate_mm(tsk,mm) do { } while (0)
184 * Switch from address space PREV to address space NEXT.
187 activate_mm (struct mm_struct
*prev
, struct mm_struct
*next
)
190 * We may get interrupts here, but that's OK because interrupt
191 * handlers cannot touch user-space.
193 ia64_set_kr(IA64_KR_PT_BASE
, __pa(next
->pgd
));
194 activate_context(next
);
197 #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
199 # endif /* ! __ASSEMBLY__ */
200 #endif /* _ASM_IA64_MMU_CONTEXT_H */