[PATCH] unnecessary long index i in sched
[linux/fpc-iii.git] / arch / um / kernel / tlb.c
blobf5b0636f9ad73fcdb3aa5bea335a3bac344c8eb8
1 /*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include "linux/mm.h"
7 #include "asm/page.h"
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
13 #include "tlb.h"
14 #include "mem.h"
15 #include "mem_user.h"
16 #include "os.h"
18 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 int r, int w, int x, struct host_vm_op *ops, int *index,
20 int last_filled, union mm_context *mmu, void **flush,
21 int (*do_ops)(union mm_context *, struct host_vm_op *,
22 int, int, void **))
24 __u64 offset;
25 struct host_vm_op *last;
26 int fd, ret = 0;
28 fd = phys_mapping(phys, &offset);
29 if(*index != -1){
30 last = &ops[*index];
31 if((last->type == MMAP) &&
32 (last->u.mmap.addr + last->u.mmap.len == virt) &&
33 (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
34 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
35 (last->u.mmap.offset + last->u.mmap.len == offset)){
36 last->u.mmap.len += len;
37 return 0;
41 if(*index == last_filled){
42 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
43 *index = -1;
46 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
47 .u = { .mmap = {
48 .addr = virt,
49 .len = len,
50 .r = r,
51 .w = w,
52 .x = x,
53 .fd = fd,
54 .offset = offset }
55 } });
56 return ret;
59 static int add_munmap(unsigned long addr, unsigned long len,
60 struct host_vm_op *ops, int *index, int last_filled,
61 union mm_context *mmu, void **flush,
62 int (*do_ops)(union mm_context *, struct host_vm_op *,
63 int, int, void **))
65 struct host_vm_op *last;
66 int ret = 0;
68 if(*index != -1){
69 last = &ops[*index];
70 if((last->type == MUNMAP) &&
71 (last->u.munmap.addr + last->u.mmap.len == addr)){
72 last->u.munmap.len += len;
73 return 0;
77 if(*index == last_filled){
78 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
79 *index = -1;
82 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
83 .u = { .munmap = {
84 .addr = addr,
85 .len = len } } });
86 return ret;
89 static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
90 int x, struct host_vm_op *ops, int *index,
91 int last_filled, union mm_context *mmu, void **flush,
92 int (*do_ops)(union mm_context *, struct host_vm_op *,
93 int, int, void **))
95 struct host_vm_op *last;
96 int ret = 0;
98 if(*index != -1){
99 last = &ops[*index];
100 if((last->type == MPROTECT) &&
101 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
102 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
103 (last->u.mprotect.x == x)){
104 last->u.mprotect.len += len;
105 return 0;
109 if(*index == last_filled){
110 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
111 *index = -1;
114 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
115 .u = { .mprotect = {
116 .addr = addr,
117 .len = len,
118 .r = r,
119 .w = w,
120 .x = x } } });
121 return ret;
124 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
126 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
127 unsigned long end_addr, int force,
128 int (*do_ops)(union mm_context *, struct host_vm_op *,
129 int, int, void **))
131 pgd_t *npgd;
132 pud_t *npud;
133 pmd_t *npmd;
134 pte_t *npte;
135 union mm_context *mmu = &mm->context;
136 unsigned long addr, end;
137 int r, w, x;
138 struct host_vm_op ops[1];
139 void *flush = NULL;
140 int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
141 int ret = 0;
143 if(mm == NULL) return;
145 ops[0].type = NONE;
146 for(addr = start_addr; addr < end_addr && !ret;){
147 npgd = pgd_offset(mm, addr);
148 if(!pgd_present(*npgd)){
149 end = ADD_ROUND(addr, PGDIR_SIZE);
150 if(end > end_addr)
151 end = end_addr;
152 if(force || pgd_newpage(*npgd)){
153 ret = add_munmap(addr, end - addr, ops,
154 &op_index, last_op, mmu,
155 &flush, do_ops);
156 pgd_mkuptodate(*npgd);
158 addr = end;
159 continue;
162 npud = pud_offset(npgd, addr);
163 if(!pud_present(*npud)){
164 end = ADD_ROUND(addr, PUD_SIZE);
165 if(end > end_addr)
166 end = end_addr;
167 if(force || pud_newpage(*npud)){
168 ret = add_munmap(addr, end - addr, ops,
169 &op_index, last_op, mmu,
170 &flush, do_ops);
171 pud_mkuptodate(*npud);
173 addr = end;
174 continue;
177 npmd = pmd_offset(npud, addr);
178 if(!pmd_present(*npmd)){
179 end = ADD_ROUND(addr, PMD_SIZE);
180 if(end > end_addr)
181 end = end_addr;
182 if(force || pmd_newpage(*npmd)){
183 ret = add_munmap(addr, end - addr, ops,
184 &op_index, last_op, mmu,
185 &flush, do_ops);
186 pmd_mkuptodate(*npmd);
188 addr = end;
189 continue;
192 npte = pte_offset_kernel(npmd, addr);
193 r = pte_read(*npte);
194 w = pte_write(*npte);
195 x = pte_exec(*npte);
196 if (!pte_young(*npte)) {
197 r = 0;
198 w = 0;
199 } else if (!pte_dirty(*npte)) {
200 w = 0;
202 if(force || pte_newpage(*npte)){
203 if(pte_present(*npte))
204 ret = add_mmap(addr,
205 pte_val(*npte) & PAGE_MASK,
206 PAGE_SIZE, r, w, x, ops,
207 &op_index, last_op, mmu,
208 &flush, do_ops);
209 else ret = add_munmap(addr, PAGE_SIZE, ops,
210 &op_index, last_op, mmu,
211 &flush, do_ops);
213 else if(pte_newprot(*npte))
214 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
215 &op_index, last_op, mmu,
216 &flush, do_ops);
218 *npte = pte_mkuptodate(*npte);
219 addr += PAGE_SIZE;
222 if(!ret)
223 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
225 /* This is not an else because ret is modified above */
226 if(ret) {
227 printk("fix_range_common: failed, killing current process\n");
228 force_sig(SIGKILL, current);
232 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
234 struct mm_struct *mm;
235 pgd_t *pgd;
236 pud_t *pud;
237 pmd_t *pmd;
238 pte_t *pte;
239 unsigned long addr, last;
240 int updated = 0, err;
242 mm = &init_mm;
243 for(addr = start; addr < end;){
244 pgd = pgd_offset(mm, addr);
245 if(!pgd_present(*pgd)){
246 last = ADD_ROUND(addr, PGDIR_SIZE);
247 if(last > end)
248 last = end;
249 if(pgd_newpage(*pgd)){
250 updated = 1;
251 err = os_unmap_memory((void *) addr,
252 last - addr);
253 if(err < 0)
254 panic("munmap failed, errno = %d\n",
255 -err);
257 addr = last;
258 continue;
261 pud = pud_offset(pgd, addr);
262 if(!pud_present(*pud)){
263 last = ADD_ROUND(addr, PUD_SIZE);
264 if(last > end)
265 last = end;
266 if(pud_newpage(*pud)){
267 updated = 1;
268 err = os_unmap_memory((void *) addr,
269 last - addr);
270 if(err < 0)
271 panic("munmap failed, errno = %d\n",
272 -err);
274 addr = last;
275 continue;
278 pmd = pmd_offset(pud, addr);
279 if(!pmd_present(*pmd)){
280 last = ADD_ROUND(addr, PMD_SIZE);
281 if(last > end)
282 last = end;
283 if(pmd_newpage(*pmd)){
284 updated = 1;
285 err = os_unmap_memory((void *) addr,
286 last - addr);
287 if(err < 0)
288 panic("munmap failed, errno = %d\n",
289 -err);
291 addr = last;
292 continue;
295 pte = pte_offset_kernel(pmd, addr);
296 if(!pte_present(*pte) || pte_newpage(*pte)){
297 updated = 1;
298 err = os_unmap_memory((void *) addr,
299 PAGE_SIZE);
300 if(err < 0)
301 panic("munmap failed, errno = %d\n",
302 -err);
303 if(pte_present(*pte))
304 map_memory(addr,
305 pte_val(*pte) & PAGE_MASK,
306 PAGE_SIZE, 1, 1, 1);
308 else if(pte_newprot(*pte)){
309 updated = 1;
310 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
312 addr += PAGE_SIZE;
314 return(updated);
317 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
319 return(pgd_offset(mm, address));
322 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
324 return(pud_offset(pgd, address));
327 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
329 return(pmd_offset(pud, address));
332 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
334 return(pte_offset_kernel(pmd, address));
337 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
339 pgd_t *pgd = pgd_offset(task->mm, addr);
340 pud_t *pud = pud_offset(pgd, addr);
341 pmd_t *pmd = pmd_offset(pud, addr);
343 return(pte_offset_map(pmd, addr));
346 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
348 address &= PAGE_MASK;
349 flush_tlb_range(vma, address, address + PAGE_SIZE);
352 void flush_tlb_all(void)
354 flush_tlb_mm(current->mm);
357 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
359 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
360 flush_tlb_kernel_range_common, start, end);
363 void flush_tlb_kernel_vm(void)
365 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
366 flush_tlb_kernel_range_common(start_vm, end_vm));
369 void __flush_tlb_one(unsigned long addr)
371 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
374 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
375 unsigned long end)
377 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
378 end);
381 void flush_tlb_mm(struct mm_struct *mm)
383 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
386 void force_flush_all(void)
388 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());