x86/boot: Rename overlapping memcpy() to memmove()
[linux/fpc-iii.git] / arch / unicore32 / include / asm / mmu_context.h
blobe35632ef23c759a43e4673d222aac430172cdad7
1 /*
2 * linux/arch/unicore32/include/asm/mmu_context.h
4 * Code specific to PKUnity SoC and UniCore ISA
6 * Copyright (C) 2001-2010 GUAN Xue-tao
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #ifndef __UNICORE_MMU_CONTEXT_H__
13 #define __UNICORE_MMU_CONTEXT_H__
15 #include <linux/compiler.h>
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/vmacache.h>
19 #include <linux/io.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-single.h>
24 #define init_new_context(tsk, mm) 0
26 #define destroy_context(mm) do { } while (0)
29 * This is called when "tsk" is about to enter lazy TLB mode.
31 * mm: describes the currently active mm context
32 * tsk: task which is entering lazy tlb
33 * cpu: cpu number which is entering lazy tlb
35 * tsk->mm will be NULL
37 static inline void
38 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
43 * This is the actual mm switch as far as the scheduler
44 * is concerned. No registers are touched. We avoid
45 * calling the CPU specific function when the mm hasn't
46 * actually changed.
48 static inline void
49 switch_mm(struct mm_struct *prev, struct mm_struct *next,
50 struct task_struct *tsk)
52 unsigned int cpu = smp_processor_id();
54 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
55 cpu_switch_mm(next->pgd, next);
58 #define deactivate_mm(tsk, mm) do { } while (0)
59 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
62 * We are inserting a "fake" vma for the user-accessible vector page so
63 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
64 * But we also want to remove it before the generic code gets to see it
65 * during process exit or the unmapping of it would cause total havoc.
66 * (the macro is used as remove_vma() is static to mm/mmap.c)
68 #define arch_exit_mmap(mm) \
69 do { \
70 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
71 if (high_vma) { \
72 BUG_ON(high_vma->vm_next); /* it should be last */ \
73 if (high_vma->vm_prev) \
74 high_vma->vm_prev->vm_next = NULL; \
75 else \
76 mm->mmap = NULL; \
77 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
78 vmacache_invalidate(mm); \
79 mm->map_count--; \
80 remove_vma(high_vma); \
81 } \
82 } while (0)
84 static inline void arch_dup_mmap(struct mm_struct *oldmm,
85 struct mm_struct *mm)
89 static inline void arch_unmap(struct mm_struct *mm,
90 struct vm_area_struct *vma,
91 unsigned long start, unsigned long end)
95 static inline void arch_bprm_mm_init(struct mm_struct *mm,
96 struct vm_area_struct *vma)
100 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
101 bool write, bool foreign)
103 /* by default, allow everything */
104 return true;
107 static inline bool arch_pte_access_permitted(pte_t pte, bool write)
109 /* by default, allow everything */
110 return true;
112 #endif