2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
6 #ifndef __UM_MMU_CONTEXT_H
7 #define __UM_MMU_CONTEXT_H
9 #include "linux/sched.h"
12 extern void arch_dup_mmap(struct mm_struct
*oldmm
, struct mm_struct
*mm
);
13 extern void arch_exit_mmap(struct mm_struct
*mm
);
15 #define get_mmu_context(task) do ; while(0)
16 #define activate_context(tsk) do ; while(0)
18 #define deactivate_mm(tsk,mm) do { } while (0)
20 extern void force_flush_all(void);
22 static inline void activate_mm(struct mm_struct
*old
, struct mm_struct
*new)
25 * This is called by fs/exec.c and fs/aio.c. In the first case, for an
26 * exec, we don't need to do anything as we're called from userspace
27 * and thus going to use a new host PID. In the second, we're called
28 * from a kernel thread, and thus need to go doing the mmap's on the
29 * host. Since they're very expensive, we want to avoid that as far as
32 if (old
!= new && (current
->flags
& PF_BORROWED_MM
))
33 __switch_mm(&new->context
.id
);
35 arch_dup_mmap(old
, new);
38 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
39 struct task_struct
*tsk
)
41 unsigned cpu
= smp_processor_id();
44 cpu_clear(cpu
, prev
->cpu_vm_mask
);
45 cpu_set(cpu
, next
->cpu_vm_mask
);
47 __switch_mm(&next
->context
.id
);
51 static inline void enter_lazy_tlb(struct mm_struct
*mm
,
52 struct task_struct
*tsk
)
56 extern int init_new_context(struct task_struct
*task
, struct mm_struct
*mm
);
58 extern void destroy_context(struct mm_struct
*mm
);