x86: add PAGE_KERNEL_EXEC_NOCACHE
[wrt350n-kernel.git] / include / asm-blackfin / mmu_context.h
blobb5eb67596ad59e79df4e891d4c42541b57b341d0
1 /*
2 * File: include/asm-blackfin/mmu_context.h
3 * Based on:
4 * Author:
6 * Created:
7 * Description:
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #ifndef __BLACKFIN_MMU_CONTEXT_H__
31 #define __BLACKFIN_MMU_CONTEXT_H__
33 #include <linux/gfp.h>
34 #include <linux/sched.h>
35 #include <asm/setup.h>
36 #include <asm/page.h>
37 #include <asm/pgalloc.h>
38 #include <asm/cplbinit.h>
40 extern void *current_l1_stack_save;
41 extern int nr_l1stack_tasks;
42 extern void *l1_stack_base;
43 extern unsigned long l1_stack_len;
45 extern int l1sram_free(const void*);
46 extern void *l1sram_alloc_max(void*);
48 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
52 /* Called when creating a new context during fork() or execve(). */
53 static inline int
54 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
56 #ifdef CONFIG_MPU
57 unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
58 mm->context.page_rwx_mask = (unsigned long *)p;
59 memset(mm->context.page_rwx_mask, 0,
60 page_mask_nelts * 3 * sizeof(long));
61 #endif
62 return 0;
65 static inline void free_l1stack(void)
67 nr_l1stack_tasks--;
68 if (nr_l1stack_tasks == 0)
69 l1sram_free(l1_stack_base);
71 static inline void destroy_context(struct mm_struct *mm)
73 struct sram_list_struct *tmp;
75 if (current_l1_stack_save == mm->context.l1_stack_save)
76 current_l1_stack_save = 0;
77 if (mm->context.l1_stack_save)
78 free_l1stack();
80 while ((tmp = mm->context.sram_list)) {
81 mm->context.sram_list = tmp->next;
82 sram_free(tmp->addr);
83 kfree(tmp);
85 #ifdef CONFIG_MPU
86 if (current_rwx_mask == mm->context.page_rwx_mask)
87 current_rwx_mask = NULL;
88 free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
89 #endif
92 static inline unsigned long
93 alloc_l1stack(unsigned long length, unsigned long *stack_base)
95 if (nr_l1stack_tasks == 0) {
96 l1_stack_base = l1sram_alloc_max(&l1_stack_len);
97 if (!l1_stack_base)
98 return 0;
101 if (l1_stack_len < length) {
102 if (nr_l1stack_tasks == 0)
103 l1sram_free(l1_stack_base);
104 return 0;
106 *stack_base = (unsigned long)l1_stack_base;
107 nr_l1stack_tasks++;
108 return l1_stack_len;
111 static inline int
112 activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
114 if (current_l1_stack_save)
115 memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
116 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
117 memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
118 return 1;
121 #define deactivate_mm(tsk,mm) do { } while (0)
123 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
125 static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
126 struct task_struct *tsk)
128 if (prev_mm == next_mm)
129 return;
130 #ifdef CONFIG_MPU
131 if (prev_mm->context.page_rwx_mask == current_rwx_mask) {
132 flush_switched_cplbs();
133 set_mask_dcplbs(next_mm->context.page_rwx_mask);
135 #endif
137 /* L1 stack switching. */
138 if (!next_mm->context.l1_stack_save)
139 return;
140 if (next_mm->context.l1_stack_save == current_l1_stack_save)
141 return;
142 if (current_l1_stack_save) {
143 memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len);
145 current_l1_stack_save = next_mm->context.l1_stack_save;
146 memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len);
149 #ifdef CONFIG_MPU
150 static inline void protect_page(struct mm_struct *mm, unsigned long addr,
151 unsigned long flags)
153 unsigned long *mask = mm->context.page_rwx_mask;
154 unsigned long page = addr >> 12;
155 unsigned long idx = page >> 5;
156 unsigned long bit = 1 << (page & 31);
158 if (flags & VM_MAYREAD)
159 mask[idx] |= bit;
160 else
161 mask[idx] &= ~bit;
162 mask += page_mask_nelts;
163 if (flags & VM_MAYWRITE)
164 mask[idx] |= bit;
165 else
166 mask[idx] &= ~bit;
167 mask += page_mask_nelts;
168 if (flags & VM_MAYEXEC)
169 mask[idx] |= bit;
170 else
171 mask[idx] &= ~bit;
174 static inline void update_protections(struct mm_struct *mm)
176 flush_switched_cplbs();
177 set_mask_dcplbs(mm->context.page_rwx_mask);
179 #endif
181 #endif