x86: arch/x86/mm/init_32.c cleanup
[wrt350n-kernel.git] / include / asm-um / mmu_context.h
blob5f3b863aef9aeeafb40c8ee168addf2d6309ecd1
1 /*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
6 #ifndef __UM_MMU_CONTEXT_H
7 #define __UM_MMU_CONTEXT_H
9 #include <asm-generic/mm_hooks.h>
11 #include "linux/sched.h"
12 #include "um_mmu.h"
14 #define get_mmu_context(task) do ; while(0)
15 #define activate_context(tsk) do ; while(0)
17 #define deactivate_mm(tsk,mm) do { } while (0)
19 extern void force_flush_all(void);
21 static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
24 * This is called by fs/exec.c and fs/aio.c. In the first case, for an
25 * exec, we don't need to do anything as we're called from userspace
26 * and thus going to use a new host PID. In the second, we're called
27 * from a kernel thread, and thus need to go doing the mmap's on the
28 * host. Since they're very expensive, we want to avoid that as far as
29 * possible.
31 if (old != new && (current->flags & PF_BORROWED_MM))
32 __switch_mm(&new->context.id);
35 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
36 struct task_struct *tsk)
38 unsigned cpu = smp_processor_id();
40 if(prev != next){
41 cpu_clear(cpu, prev->cpu_vm_mask);
42 cpu_set(cpu, next->cpu_vm_mask);
43 if(next != &init_mm)
44 __switch_mm(&next->context.id);
48 static inline void enter_lazy_tlb(struct mm_struct *mm,
49 struct task_struct *tsk)
53 extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
55 extern void destroy_context(struct mm_struct *mm);
57 #endif