2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU is not using the hash
4 * table, such as 8xx, 4xx, BookE's etc...
6 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
9 * Derived from previous arch/powerpc/mm/mmu_context.c
10 * and arch/powerpc/include/asm/mmu_context.h
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 * - The global context lock will not scale very well
20 * - The maps should be dynamically allocated to allow for processors
21 * that support more PID bits at runtime
22 * - Implement flush_tlb_mm() by making the context stale and picking
24 * - More aggressively clear stale map bits and maybe find some way to
25 * also clear mm->cpu_vm_mask bits when processes are migrated
28 #define DEBUG_MAP_CONSISTENCY
29 #define DEBUG_CLAMP_LAST_CONTEXT 31
30 //#define DEBUG_HARDER
32 /* We don't use DEBUG because it tends to be compiled in always nowadays
33 * and this would generate way too much output
36 #define pr_hard(args...) printk(KERN_DEBUG args)
37 #define pr_hardcont(args...) printk(KERN_CONT args)
39 #define pr_hard(args...) do { } while(0)
40 #define pr_hardcont(args...) do { } while(0)
43 #include <linux/kernel.h>
45 #include <linux/init.h>
46 #include <linux/spinlock.h>
47 #include <linux/bootmem.h>
48 #include <linux/notifier.h>
49 #include <linux/cpu.h>
51 #include <asm/mmu_context.h>
52 #include <asm/tlbflush.h>
54 static unsigned int first_context
, last_context
;
55 static unsigned int next_context
, nr_free_contexts
;
56 static unsigned long *context_map
;
57 static unsigned long *stale_map
[NR_CPUS
];
58 static struct mm_struct
**context_mm
;
59 static DEFINE_SPINLOCK(context_lock
);
61 #define CTX_MAP_SIZE \
62 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
65 /* Steal a context from a task that has one at the moment.
67 * This is used when we are running out of available PID numbers
70 * This isn't an LRU system, it just frees up each context in
71 * turn (sort-of pseudo-random replacement :). This would be the
72 * place to implement an LRU scheme if anyone was motivated to do it.
75 * For context stealing, we use a slightly different approach for
76 * SMP and UP. Basically, the UP one is simpler and doesn't use
77 * the stale map as we can just flush the local CPU
81 static unsigned int steal_context_smp(unsigned int id
)
84 unsigned int cpu
, max
, i
;
86 max
= last_context
- first_context
;
88 /* Attempt to free next_context first and then loop until we manage */
90 /* Pick up the victim mm */
93 /* We have a candidate victim, check if it's active, on SMP
94 * we cannot steal active contexts
96 if (mm
->context
.active
) {
98 if (id
> last_context
)
102 pr_hardcont(" | steal %d from 0x%p", id
, mm
);
104 /* Mark this mm has having no context anymore */
105 mm
->context
.id
= MMU_NO_CONTEXT
;
107 /* Mark it stale on all CPUs that used this mm. For threaded
108 * implementations, we set it on all threads on each core
109 * represented in the mask. A future implementation will use
110 * a core map instead but this will do for now.
112 for_each_cpu(cpu
, mm_cpumask(mm
)) {
113 for (i
= cpu_first_thread_in_core(cpu
);
114 i
<= cpu_last_thread_in_core(cpu
); i
++)
115 __set_bit(id
, stale_map
[i
]);
121 /* This will happen if you have more CPUs than available contexts,
122 * all we can do here is wait a bit and try again
124 spin_unlock(&context_lock
);
126 spin_lock(&context_lock
);
128 /* This will cause the caller to try again */
129 return MMU_NO_CONTEXT
;
131 #endif /* CONFIG_SMP */
133 /* Note that this will also be called on SMP if all other CPUs are
134 * offlined, which means that it may be called for cpu != 0. For
135 * this to work, we somewhat assume that CPUs that are onlined
136 * come up with a fully clean TLB (or are cleaned when offlined)
138 static unsigned int steal_context_up(unsigned int id
)
140 struct mm_struct
*mm
;
141 int cpu
= smp_processor_id();
143 /* Pick up the victim mm */
146 pr_hardcont(" | steal %d from 0x%p", id
, mm
);
148 /* Flush the TLB for that context */
149 local_flush_tlb_mm(mm
);
151 /* Mark this mm has having no context anymore */
152 mm
->context
.id
= MMU_NO_CONTEXT
;
154 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
155 __clear_bit(id
, stale_map
[cpu
]);
160 #ifdef DEBUG_MAP_CONSISTENCY
161 static void context_check_map(void)
163 unsigned int id
, nrf
, nact
;
166 for (id
= first_context
; id
<= last_context
; id
++) {
167 int used
= test_bit(id
, context_map
);
170 if (used
!= (context_mm
[id
] != NULL
))
171 pr_err("MMU: Context %d is %s and MM is %p !\n",
172 id
, used
? "used" : "free", context_mm
[id
]);
173 if (context_mm
[id
] != NULL
)
174 nact
+= context_mm
[id
]->context
.active
;
176 if (nrf
!= nr_free_contexts
) {
177 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
178 nr_free_contexts
, nrf
);
179 nr_free_contexts
= nrf
;
181 if (nact
> num_online_cpus())
182 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
183 nact
, num_online_cpus());
184 if (first_context
> 0 && !test_bit(0, context_map
))
185 pr_err("MMU: Context 0 has been freed !!!\n");
188 static void context_check_map(void) { }
191 void switch_mmu_context(struct mm_struct
*prev
, struct mm_struct
*next
)
193 unsigned int i
, id
, cpu
= smp_processor_id();
196 /* No lockless fast path .. yet */
197 spin_lock(&context_lock
);
199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
200 cpu
, next
, next
->context
.active
, next
->context
.id
);
203 /* Mark us active and the previous one not anymore */
204 next
->context
.active
++;
206 pr_hardcont(" (old=0x%p a=%d)", prev
, prev
->context
.active
);
207 WARN_ON(prev
->context
.active
< 1);
208 prev
->context
.active
--;
212 #endif /* CONFIG_SMP */
214 /* If we already have a valid assigned context, skip all that */
215 id
= next
->context
.id
;
216 if (likely(id
!= MMU_NO_CONTEXT
)) {
217 #ifdef DEBUG_MAP_CONSISTENCY
218 if (context_mm
[id
] != next
)
219 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
220 next
, id
, id
, context_mm
[id
]);
225 /* We really don't have a context, let's try to acquire one */
227 if (id
> last_context
)
231 /* No more free contexts, let's try to steal one */
232 if (nr_free_contexts
== 0) {
234 if (num_online_cpus() > 1) {
235 id
= steal_context_smp(id
);
236 if (id
== MMU_NO_CONTEXT
)
240 #endif /* CONFIG_SMP */
241 id
= steal_context_up(id
);
246 /* We know there's at least one free context, try to find it */
247 while (__test_and_set_bit(id
, map
)) {
248 id
= find_next_zero_bit(map
, last_context
+1, id
);
249 if (id
> last_context
)
253 next_context
= id
+ 1;
254 context_mm
[id
] = next
;
255 next
->context
.id
= id
;
256 pr_hardcont(" | new id=%d,nrf=%d", id
, nr_free_contexts
);
261 /* If that context got marked stale on this CPU, then flush the
262 * local TLB for it and unmark it before we use it
264 if (test_bit(id
, stale_map
[cpu
])) {
265 pr_hardcont(" | stale flush %d [%d..%d]",
266 id
, cpu_first_thread_in_core(cpu
),
267 cpu_last_thread_in_core(cpu
));
269 local_flush_tlb_mm(next
);
271 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
272 for (i
= cpu_first_thread_in_core(cpu
);
273 i
<= cpu_last_thread_in_core(cpu
); i
++) {
274 __clear_bit(id
, stale_map
[i
]);
278 /* Flick the MMU and release lock */
279 pr_hardcont(" -> %d\n", id
);
280 set_context(id
, next
->pgd
);
281 spin_unlock(&context_lock
);
285 * Set up the context for a new address space.
287 int init_new_context(struct task_struct
*t
, struct mm_struct
*mm
)
289 pr_hard("initing context for mm @%p\n", mm
);
291 mm
->context
.id
= MMU_NO_CONTEXT
;
292 mm
->context
.active
= 0;
298 * We're finished using the context for an address space.
300 void destroy_context(struct mm_struct
*mm
)
305 if (mm
->context
.id
== MMU_NO_CONTEXT
)
308 WARN_ON(mm
->context
.active
!= 0);
310 spin_lock_irqsave(&context_lock
, flags
);
312 if (id
!= MMU_NO_CONTEXT
) {
313 __clear_bit(id
, context_map
);
314 mm
->context
.id
= MMU_NO_CONTEXT
;
315 #ifdef DEBUG_MAP_CONSISTENCY
316 mm
->context
.active
= 0;
318 context_mm
[id
] = NULL
;
321 spin_unlock_irqrestore(&context_lock
, flags
);
326 static int __cpuinit
mmu_context_cpu_notify(struct notifier_block
*self
,
327 unsigned long action
, void *hcpu
)
329 unsigned int cpu
= (unsigned int)(long)hcpu
;
330 #ifdef CONFIG_HOTPLUG_CPU
331 struct task_struct
*p
;
333 /* We don't touch CPU 0 map, it's allocated at aboot and kept
341 case CPU_ONLINE_FROZEN
:
342 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu
);
343 stale_map
[cpu
] = kzalloc(CTX_MAP_SIZE
, GFP_KERNEL
);
345 #ifdef CONFIG_HOTPLUG_CPU
347 case CPU_DEAD_FROZEN
:
348 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu
);
349 kfree(stale_map
[cpu
]);
350 stale_map
[cpu
] = NULL
;
352 /* We also clear the cpu_vm_mask bits of CPUs going away */
353 read_lock(&tasklist_lock
);
354 for_each_process(p
) {
356 cpu_mask_clear_cpu(cpu
, mm_cpumask(p
->mm
));
358 read_unlock(&tasklist_lock
);
360 #endif /* CONFIG_HOTPLUG_CPU */
365 static struct notifier_block __cpuinitdata mmu_context_cpu_nb
= {
366 .notifier_call
= mmu_context_cpu_notify
,
369 #endif /* CONFIG_SMP */
372 * Initialize the context management stuff.
374 void __init
mmu_context_init(void)
376 /* Mark init_mm as being active on all possible CPUs since
377 * we'll get called with prev == init_mm the first time
378 * we schedule on a given CPU
380 init_mm
.context
.active
= NR_CPUS
;
383 * The MPC8xx has only 16 contexts. We rotate through them on each
384 * task switch. A better way would be to keep track of tasks that
385 * own contexts, and implement an LRU usage. That way very active
386 * tasks don't always have to pay the TLB reload overhead. The
387 * kernel pages are mapped shared, so the kernel can run on behalf
388 * of any task that makes a kernel entry. Shared does not mean they
389 * are not protected, just that the ASID comparison is not performed.
392 * The IBM4xx has 256 contexts, so we can just rotate through these
393 * as a way of "switching" contexts. If the TID of the TLB is zero,
394 * the PID/TID comparison is disabled, so we can use a TID of zero
395 * to represent all kernel pages as shared among all contexts.
398 if (mmu_has_feature(MMU_FTR_TYPE_8xx
)) {
406 #ifdef DEBUG_CLAMP_LAST_CONTEXT
407 last_context
= DEBUG_CLAMP_LAST_CONTEXT
;
410 * Allocate the maps used by context management
412 context_map
= alloc_bootmem(CTX_MAP_SIZE
);
413 context_mm
= alloc_bootmem(sizeof(void *) * (last_context
+ 1));
414 stale_map
[0] = alloc_bootmem(CTX_MAP_SIZE
);
417 register_cpu_notifier(&mmu_context_cpu_nb
);
421 "MMU: Allocated %zu bytes of context maps for %d contexts\n",
422 2 * CTX_MAP_SIZE
+ (sizeof(void *) * (last_context
+ 1)),
423 last_context
- first_context
+ 1);
426 * Some processors have too few contexts to reserve one for
427 * init_mm, and require using context 0 for a normal task.
428 * Other processors reserve the use of context zero for the kernel.
429 * This code assumes first_context < 32.
431 context_map
[0] = (1 << first_context
) - 1;
432 next_context
= first_context
;
433 nr_free_contexts
= last_context
- first_context
+ 1;