[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / um / kernel / tlb.c
blobeda477edfdf5e612e8c982a4c7f5f550b3ba5c0e
1 /*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include "linux/mm.h"
7 #include "asm/page.h"
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
13 #include "tlb.h"
14 #include "mem.h"
15 #include "mem_user.h"
16 #include "os.h"
18 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
20 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
21 unsigned long end_addr, int force, int data,
22 void (*do_ops)(int, struct host_vm_op *, int))
24 pgd_t *npgd;
25 pud_t *npud;
26 pmd_t *npmd;
27 pte_t *npte;
28 unsigned long addr, end;
29 int r, w, x;
30 struct host_vm_op ops[16];
31 int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
33 if(mm == NULL) return;
35 for(addr = start_addr; addr < end_addr;){
36 npgd = pgd_offset(mm, addr);
37 if(!pgd_present(*npgd)){
38 end = ADD_ROUND(addr, PGDIR_SIZE);
39 if(end > end_addr)
40 end = end_addr;
41 if(force || pgd_newpage(*npgd)){
42 op_index = add_munmap(addr, end - addr, ops,
43 op_index, last_op, data,
44 do_ops);
45 pgd_mkuptodate(*npgd);
47 addr = end;
48 continue;
51 npud = pud_offset(npgd, addr);
52 if(!pud_present(*npud)){
53 end = ADD_ROUND(addr, PUD_SIZE);
54 if(end > end_addr)
55 end = end_addr;
56 if(force || pud_newpage(*npud)){
57 op_index = add_munmap(addr, end - addr, ops,
58 op_index, last_op, data,
59 do_ops);
60 pud_mkuptodate(*npud);
62 addr = end;
63 continue;
66 npmd = pmd_offset(npud, addr);
67 if(!pmd_present(*npmd)){
68 end = ADD_ROUND(addr, PMD_SIZE);
69 if(end > end_addr)
70 end = end_addr;
71 if(force || pmd_newpage(*npmd)){
72 op_index = add_munmap(addr, end - addr, ops,
73 op_index, last_op, data,
74 do_ops);
75 pmd_mkuptodate(*npmd);
77 addr = end;
78 continue;
81 npte = pte_offset_kernel(npmd, addr);
82 r = pte_read(*npte);
83 w = pte_write(*npte);
84 x = pte_exec(*npte);
85 if(!pte_dirty(*npte))
86 w = 0;
87 if(!pte_young(*npte)){
88 r = 0;
89 w = 0;
91 if(force || pte_newpage(*npte)){
92 if(pte_present(*npte))
93 op_index = add_mmap(addr,
94 pte_val(*npte) & PAGE_MASK,
95 PAGE_SIZE, r, w, x, ops,
96 op_index, last_op, data,
97 do_ops);
98 else op_index = add_munmap(addr, PAGE_SIZE, ops,
99 op_index, last_op, data,
100 do_ops);
102 else if(pte_newprot(*npte))
103 op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
104 op_index, last_op, data,
105 do_ops);
107 *npte = pte_mkuptodate(*npte);
108 addr += PAGE_SIZE;
110 (*do_ops)(data, ops, op_index);
113 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
115 struct mm_struct *mm;
116 pgd_t *pgd;
117 pud_t *pud;
118 pmd_t *pmd;
119 pte_t *pte;
120 unsigned long addr, last;
121 int updated = 0, err;
123 mm = &init_mm;
124 for(addr = start; addr < end;){
125 pgd = pgd_offset(mm, addr);
126 if(!pgd_present(*pgd)){
127 last = ADD_ROUND(addr, PGDIR_SIZE);
128 if(last > end)
129 last = end;
130 if(pgd_newpage(*pgd)){
131 updated = 1;
132 err = os_unmap_memory((void *) addr,
133 last - addr);
134 if(err < 0)
135 panic("munmap failed, errno = %d\n",
136 -err);
138 addr = last;
139 continue;
142 pud = pud_offset(pgd, addr);
143 if(!pud_present(*pud)){
144 last = ADD_ROUND(addr, PUD_SIZE);
145 if(last > end)
146 last = end;
147 if(pud_newpage(*pud)){
148 updated = 1;
149 err = os_unmap_memory((void *) addr,
150 last - addr);
151 if(err < 0)
152 panic("munmap failed, errno = %d\n",
153 -err);
155 addr = last;
156 continue;
159 pmd = pmd_offset(pud, addr);
160 if(!pmd_present(*pmd)){
161 last = ADD_ROUND(addr, PMD_SIZE);
162 if(last > end)
163 last = end;
164 if(pmd_newpage(*pmd)){
165 updated = 1;
166 err = os_unmap_memory((void *) addr,
167 last - addr);
168 if(err < 0)
169 panic("munmap failed, errno = %d\n",
170 -err);
172 addr = last;
173 continue;
176 pte = pte_offset_kernel(pmd, addr);
177 if(!pte_present(*pte) || pte_newpage(*pte)){
178 updated = 1;
179 err = os_unmap_memory((void *) addr,
180 PAGE_SIZE);
181 if(err < 0)
182 panic("munmap failed, errno = %d\n",
183 -err);
184 if(pte_present(*pte))
185 map_memory(addr,
186 pte_val(*pte) & PAGE_MASK,
187 PAGE_SIZE, 1, 1, 1);
189 else if(pte_newprot(*pte)){
190 updated = 1;
191 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
193 addr += PAGE_SIZE;
195 return(updated);
198 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
200 address &= PAGE_MASK;
201 flush_tlb_range(vma, address, address + PAGE_SIZE);
204 void flush_tlb_all(void)
206 flush_tlb_mm(current->mm);
209 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
211 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
212 flush_tlb_kernel_range_common, start, end);
215 void flush_tlb_kernel_vm(void)
217 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
218 flush_tlb_kernel_range_common(start_vm, end_vm));
221 void __flush_tlb_one(unsigned long addr)
223 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
226 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
227 unsigned long end)
229 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
230 end);
233 void flush_tlb_mm(struct mm_struct *mm)
235 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
238 void force_flush_all(void)
240 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
243 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
245 return(pgd_offset(mm, address));
248 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
250 return(pud_offset(pgd, address));
253 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
255 return(pmd_offset(pud, address));
258 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
260 return(pte_offset_kernel(pmd, address));
263 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
265 pgd_t *pgd = pgd_offset(task->mm, addr);
266 pud_t *pud = pud_offset(pgd, addr);
267 pmd_t *pmd = pmd_offset(pud, addr);
269 return(pte_offset_map(pmd, addr));
272 int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
273 int r, int w, int x, struct host_vm_op *ops, int index,
274 int last_filled, int data,
275 void (*do_ops)(int, struct host_vm_op *, int))
277 __u64 offset;
278 struct host_vm_op *last;
279 int fd;
281 fd = phys_mapping(phys, &offset);
282 if(index != -1){
283 last = &ops[index];
284 if((last->type == MMAP) &&
285 (last->u.mmap.addr + last->u.mmap.len == virt) &&
286 (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
287 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
288 (last->u.mmap.offset + last->u.mmap.len == offset)){
289 last->u.mmap.len += len;
290 return(index);
294 if(index == last_filled){
295 (*do_ops)(data, ops, last_filled);
296 index = -1;
299 ops[++index] = ((struct host_vm_op) { .type = MMAP,
300 .u = { .mmap = {
301 .addr = virt,
302 .len = len,
303 .r = r,
304 .w = w,
305 .x = x,
306 .fd = fd,
307 .offset = offset }
308 } });
309 return(index);
312 int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
313 int index, int last_filled, int data,
314 void (*do_ops)(int, struct host_vm_op *, int))
316 struct host_vm_op *last;
318 if(index != -1){
319 last = &ops[index];
320 if((last->type == MUNMAP) &&
321 (last->u.munmap.addr + last->u.mmap.len == addr)){
322 last->u.munmap.len += len;
323 return(index);
327 if(index == last_filled){
328 (*do_ops)(data, ops, last_filled);
329 index = -1;
332 ops[++index] = ((struct host_vm_op) { .type = MUNMAP,
333 .u = { .munmap = {
334 .addr = addr,
335 .len = len } } });
336 return(index);
339 int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
340 struct host_vm_op *ops, int index, int last_filled, int data,
341 void (*do_ops)(int, struct host_vm_op *, int))
343 struct host_vm_op *last;
345 if(index != -1){
346 last = &ops[index];
347 if((last->type == MPROTECT) &&
348 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
349 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
350 (last->u.mprotect.x == x)){
351 last->u.mprotect.len += len;
352 return(index);
356 if(index == last_filled){
357 (*do_ops)(data, ops, last_filled);
358 index = -1;
361 ops[++index] = ((struct host_vm_op) { .type = MPROTECT,
362 .u = { .mprotect = {
363 .addr = addr,
364 .len = len,
365 .r = r,
366 .w = w,
367 .x = x } } });
368 return(index);