[NETLINK]: w1_int.c: fix default netlink group
[linux-2.6/verdex.git] / arch / um / kernel / tlb.c
blob83ec8d4747fd039482aca79dc9137371d0b86989
1 /*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include "linux/mm.h"
7 #include "asm/page.h"
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
13 #include "tlb.h"
14 #include "mem.h"
15 #include "mem_user.h"
16 #include "os.h"
18 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
20 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
21 unsigned long end_addr, int force,
22 void (*do_ops)(union mm_context *, struct host_vm_op *,
23 int))
25 pgd_t *npgd;
26 pud_t *npud;
27 pmd_t *npmd;
28 pte_t *npte;
29 union mm_context *mmu = &mm->context;
30 unsigned long addr, end;
31 int r, w, x;
32 struct host_vm_op ops[16];
33 int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
35 if(mm == NULL) return;
37 for(addr = start_addr; addr < end_addr;){
38 npgd = pgd_offset(mm, addr);
39 if(!pgd_present(*npgd)){
40 end = ADD_ROUND(addr, PGDIR_SIZE);
41 if(end > end_addr)
42 end = end_addr;
43 if(force || pgd_newpage(*npgd)){
44 op_index = add_munmap(addr, end - addr, ops,
45 op_index, last_op, mmu,
46 do_ops);
47 pgd_mkuptodate(*npgd);
49 addr = end;
50 continue;
53 npud = pud_offset(npgd, addr);
54 if(!pud_present(*npud)){
55 end = ADD_ROUND(addr, PUD_SIZE);
56 if(end > end_addr)
57 end = end_addr;
58 if(force || pud_newpage(*npud)){
59 op_index = add_munmap(addr, end - addr, ops,
60 op_index, last_op, mmu,
61 do_ops);
62 pud_mkuptodate(*npud);
64 addr = end;
65 continue;
68 npmd = pmd_offset(npud, addr);
69 if(!pmd_present(*npmd)){
70 end = ADD_ROUND(addr, PMD_SIZE);
71 if(end > end_addr)
72 end = end_addr;
73 if(force || pmd_newpage(*npmd)){
74 op_index = add_munmap(addr, end - addr, ops,
75 op_index, last_op, mmu,
76 do_ops);
77 pmd_mkuptodate(*npmd);
79 addr = end;
80 continue;
83 npte = pte_offset_kernel(npmd, addr);
84 r = pte_read(*npte);
85 w = pte_write(*npte);
86 x = pte_exec(*npte);
87 if(!pte_dirty(*npte))
88 w = 0;
89 if(!pte_young(*npte)){
90 r = 0;
91 w = 0;
93 if(force || pte_newpage(*npte)){
94 if(pte_present(*npte))
95 op_index = add_mmap(addr,
96 pte_val(*npte) & PAGE_MASK,
97 PAGE_SIZE, r, w, x, ops,
98 op_index, last_op, mmu,
99 do_ops);
100 else op_index = add_munmap(addr, PAGE_SIZE, ops,
101 op_index, last_op, mmu,
102 do_ops);
104 else if(pte_newprot(*npte))
105 op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
106 op_index, last_op, mmu,
107 do_ops);
109 *npte = pte_mkuptodate(*npte);
110 addr += PAGE_SIZE;
112 (*do_ops)(mmu, ops, op_index);
115 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
117 struct mm_struct *mm;
118 pgd_t *pgd;
119 pud_t *pud;
120 pmd_t *pmd;
121 pte_t *pte;
122 unsigned long addr, last;
123 int updated = 0, err;
125 mm = &init_mm;
126 for(addr = start; addr < end;){
127 pgd = pgd_offset(mm, addr);
128 if(!pgd_present(*pgd)){
129 last = ADD_ROUND(addr, PGDIR_SIZE);
130 if(last > end)
131 last = end;
132 if(pgd_newpage(*pgd)){
133 updated = 1;
134 err = os_unmap_memory((void *) addr,
135 last - addr);
136 if(err < 0)
137 panic("munmap failed, errno = %d\n",
138 -err);
140 addr = last;
141 continue;
144 pud = pud_offset(pgd, addr);
145 if(!pud_present(*pud)){
146 last = ADD_ROUND(addr, PUD_SIZE);
147 if(last > end)
148 last = end;
149 if(pud_newpage(*pud)){
150 updated = 1;
151 err = os_unmap_memory((void *) addr,
152 last - addr);
153 if(err < 0)
154 panic("munmap failed, errno = %d\n",
155 -err);
157 addr = last;
158 continue;
161 pmd = pmd_offset(pud, addr);
162 if(!pmd_present(*pmd)){
163 last = ADD_ROUND(addr, PMD_SIZE);
164 if(last > end)
165 last = end;
166 if(pmd_newpage(*pmd)){
167 updated = 1;
168 err = os_unmap_memory((void *) addr,
169 last - addr);
170 if(err < 0)
171 panic("munmap failed, errno = %d\n",
172 -err);
174 addr = last;
175 continue;
178 pte = pte_offset_kernel(pmd, addr);
179 if(!pte_present(*pte) || pte_newpage(*pte)){
180 updated = 1;
181 err = os_unmap_memory((void *) addr,
182 PAGE_SIZE);
183 if(err < 0)
184 panic("munmap failed, errno = %d\n",
185 -err);
186 if(pte_present(*pte))
187 map_memory(addr,
188 pte_val(*pte) & PAGE_MASK,
189 PAGE_SIZE, 1, 1, 1);
191 else if(pte_newprot(*pte)){
192 updated = 1;
193 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
195 addr += PAGE_SIZE;
197 return(updated);
200 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
202 return(pgd_offset(mm, address));
205 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
207 return(pud_offset(pgd, address));
210 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
212 return(pmd_offset(pud, address));
215 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
217 return(pte_offset_kernel(pmd, address));
220 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
222 pgd_t *pgd = pgd_offset(task->mm, addr);
223 pud_t *pud = pud_offset(pgd, addr);
224 pmd_t *pmd = pmd_offset(pud, addr);
226 return(pte_offset_map(pmd, addr));
229 int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
230 int r, int w, int x, struct host_vm_op *ops, int index,
231 int last_filled, union mm_context *mmu,
232 void (*do_ops)(union mm_context *, struct host_vm_op *, int))
234 __u64 offset;
235 struct host_vm_op *last;
236 int fd;
238 fd = phys_mapping(phys, &offset);
239 if(index != -1){
240 last = &ops[index];
241 if((last->type == MMAP) &&
242 (last->u.mmap.addr + last->u.mmap.len == virt) &&
243 (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
244 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
245 (last->u.mmap.offset + last->u.mmap.len == offset)){
246 last->u.mmap.len += len;
247 return(index);
251 if(index == last_filled){
252 (*do_ops)(mmu, ops, last_filled);
253 index = -1;
256 ops[++index] = ((struct host_vm_op) { .type = MMAP,
257 .u = { .mmap = {
258 .addr = virt,
259 .len = len,
260 .r = r,
261 .w = w,
262 .x = x,
263 .fd = fd,
264 .offset = offset }
265 } });
266 return(index);
269 int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
270 int index, int last_filled, union mm_context *mmu,
271 void (*do_ops)(union mm_context *, struct host_vm_op *, int))
273 struct host_vm_op *last;
275 if(index != -1){
276 last = &ops[index];
277 if((last->type == MUNMAP) &&
278 (last->u.munmap.addr + last->u.mmap.len == addr)){
279 last->u.munmap.len += len;
280 return(index);
284 if(index == last_filled){
285 (*do_ops)(mmu, ops, last_filled);
286 index = -1;
289 ops[++index] = ((struct host_vm_op) { .type = MUNMAP,
290 .u = { .munmap = {
291 .addr = addr,
292 .len = len } } });
293 return(index);
296 int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
297 struct host_vm_op *ops, int index, int last_filled,
298 union mm_context *mmu,
299 void (*do_ops)(union mm_context *, struct host_vm_op *, int))
301 struct host_vm_op *last;
303 if(index != -1){
304 last = &ops[index];
305 if((last->type == MPROTECT) &&
306 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
307 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
308 (last->u.mprotect.x == x)){
309 last->u.mprotect.len += len;
310 return(index);
314 if(index == last_filled){
315 (*do_ops)(mmu, ops, last_filled);
316 index = -1;
319 ops[++index] = ((struct host_vm_op) { .type = MPROTECT,
320 .u = { .mprotect = {
321 .addr = addr,
322 .len = len,
323 .r = r,
324 .w = w,
325 .x = x } } });
326 return(index);
329 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
331 address &= PAGE_MASK;
332 flush_tlb_range(vma, address, address + PAGE_SIZE);
335 void flush_tlb_all(void)
337 flush_tlb_mm(current->mm);
340 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
342 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
343 flush_tlb_kernel_range_common, start, end);
346 void flush_tlb_kernel_vm(void)
348 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
349 flush_tlb_kernel_range_common(start_vm, end_vm));
352 void __flush_tlb_one(unsigned long addr)
354 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
357 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
358 unsigned long end)
360 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
361 end);
364 void flush_tlb_mm(struct mm_struct *mm)
366 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
369 void force_flush_all(void)
371 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());