2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
18 static int add_mmap(unsigned long virt
, unsigned long phys
, unsigned long len
,
19 int r
, int w
, int x
, struct host_vm_op
*ops
, int *index
,
20 int last_filled
, union mm_context
*mmu
, void **flush
,
21 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
25 struct host_vm_op
*last
;
28 fd
= phys_mapping(phys
, &offset
);
31 if((last
->type
== MMAP
) &&
32 (last
->u
.mmap
.addr
+ last
->u
.mmap
.len
== virt
) &&
33 (last
->u
.mmap
.r
== r
) && (last
->u
.mmap
.w
== w
) &&
34 (last
->u
.mmap
.x
== x
) && (last
->u
.mmap
.fd
== fd
) &&
35 (last
->u
.mmap
.offset
+ last
->u
.mmap
.len
== offset
)){
36 last
->u
.mmap
.len
+= len
;
41 if(*index
== last_filled
){
42 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
46 ops
[++*index
] = ((struct host_vm_op
) { .type
= MMAP
,
59 static int add_munmap(unsigned long addr
, unsigned long len
,
60 struct host_vm_op
*ops
, int *index
, int last_filled
,
61 union mm_context
*mmu
, void **flush
,
62 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
65 struct host_vm_op
*last
;
70 if((last
->type
== MUNMAP
) &&
71 (last
->u
.munmap
.addr
+ last
->u
.mmap
.len
== addr
)){
72 last
->u
.munmap
.len
+= len
;
77 if(*index
== last_filled
){
78 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
82 ops
[++*index
] = ((struct host_vm_op
) { .type
= MUNMAP
,
89 static int add_mprotect(unsigned long addr
, unsigned long len
, int r
, int w
,
90 int x
, struct host_vm_op
*ops
, int *index
,
91 int last_filled
, union mm_context
*mmu
, void **flush
,
92 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
95 struct host_vm_op
*last
;
100 if((last
->type
== MPROTECT
) &&
101 (last
->u
.mprotect
.addr
+ last
->u
.mprotect
.len
== addr
) &&
102 (last
->u
.mprotect
.r
== r
) && (last
->u
.mprotect
.w
== w
) &&
103 (last
->u
.mprotect
.x
== x
)){
104 last
->u
.mprotect
.len
+= len
;
109 if(*index
== last_filled
){
110 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
114 ops
[++*index
] = ((struct host_vm_op
) { .type
= MPROTECT
,
124 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
126 void fix_range_common(struct mm_struct
*mm
, unsigned long start_addr
,
127 unsigned long end_addr
, int force
,
128 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
135 union mm_context
*mmu
= &mm
->context
;
136 unsigned long addr
, end
;
138 struct host_vm_op ops
[1];
140 int op_index
= -1, last_op
= sizeof(ops
) / sizeof(ops
[0]) - 1;
143 if(mm
== NULL
) return;
146 for(addr
= start_addr
; addr
< end_addr
&& !ret
;){
147 npgd
= pgd_offset(mm
, addr
);
148 if(!pgd_present(*npgd
)){
149 end
= ADD_ROUND(addr
, PGDIR_SIZE
);
152 if(force
|| pgd_newpage(*npgd
)){
153 ret
= add_munmap(addr
, end
- addr
, ops
,
154 &op_index
, last_op
, mmu
,
156 pgd_mkuptodate(*npgd
);
162 npud
= pud_offset(npgd
, addr
);
163 if(!pud_present(*npud
)){
164 end
= ADD_ROUND(addr
, PUD_SIZE
);
167 if(force
|| pud_newpage(*npud
)){
168 ret
= add_munmap(addr
, end
- addr
, ops
,
169 &op_index
, last_op
, mmu
,
171 pud_mkuptodate(*npud
);
177 npmd
= pmd_offset(npud
, addr
);
178 if(!pmd_present(*npmd
)){
179 end
= ADD_ROUND(addr
, PMD_SIZE
);
182 if(force
|| pmd_newpage(*npmd
)){
183 ret
= add_munmap(addr
, end
- addr
, ops
,
184 &op_index
, last_op
, mmu
,
186 pmd_mkuptodate(*npmd
);
192 npte
= pte_offset_kernel(npmd
, addr
);
194 w
= pte_write(*npte
);
196 if (!pte_young(*npte
)) {
199 } else if (!pte_dirty(*npte
)) {
202 if(force
|| pte_newpage(*npte
)){
203 if(pte_present(*npte
))
205 pte_val(*npte
) & PAGE_MASK
,
206 PAGE_SIZE
, r
, w
, x
, ops
,
207 &op_index
, last_op
, mmu
,
209 else ret
= add_munmap(addr
, PAGE_SIZE
, ops
,
210 &op_index
, last_op
, mmu
,
213 else if(pte_newprot(*npte
))
214 ret
= add_mprotect(addr
, PAGE_SIZE
, r
, w
, x
, ops
,
215 &op_index
, last_op
, mmu
,
218 *npte
= pte_mkuptodate(*npte
);
223 ret
= (*do_ops
)(mmu
, ops
, op_index
, 1, &flush
);
225 /* This is not an else because ret is modified above */
227 printk("fix_range_common: failed, killing current process\n");
228 force_sig(SIGKILL
, current
);
232 int flush_tlb_kernel_range_common(unsigned long start
, unsigned long end
)
234 struct mm_struct
*mm
;
239 unsigned long addr
, last
;
240 int updated
= 0, err
;
243 for(addr
= start
; addr
< end
;){
244 pgd
= pgd_offset(mm
, addr
);
245 if(!pgd_present(*pgd
)){
246 last
= ADD_ROUND(addr
, PGDIR_SIZE
);
249 if(pgd_newpage(*pgd
)){
251 err
= os_unmap_memory((void *) addr
,
254 panic("munmap failed, errno = %d\n",
261 pud
= pud_offset(pgd
, addr
);
262 if(!pud_present(*pud
)){
263 last
= ADD_ROUND(addr
, PUD_SIZE
);
266 if(pud_newpage(*pud
)){
268 err
= os_unmap_memory((void *) addr
,
271 panic("munmap failed, errno = %d\n",
278 pmd
= pmd_offset(pud
, addr
);
279 if(!pmd_present(*pmd
)){
280 last
= ADD_ROUND(addr
, PMD_SIZE
);
283 if(pmd_newpage(*pmd
)){
285 err
= os_unmap_memory((void *) addr
,
288 panic("munmap failed, errno = %d\n",
295 pte
= pte_offset_kernel(pmd
, addr
);
296 if(!pte_present(*pte
) || pte_newpage(*pte
)){
298 err
= os_unmap_memory((void *) addr
,
301 panic("munmap failed, errno = %d\n",
303 if(pte_present(*pte
))
305 pte_val(*pte
) & PAGE_MASK
,
308 else if(pte_newprot(*pte
)){
310 os_protect_memory((void *) addr
, PAGE_SIZE
, 1, 1, 1);
317 pgd_t
*pgd_offset_proc(struct mm_struct
*mm
, unsigned long address
)
319 return(pgd_offset(mm
, address
));
322 pud_t
*pud_offset_proc(pgd_t
*pgd
, unsigned long address
)
324 return(pud_offset(pgd
, address
));
327 pmd_t
*pmd_offset_proc(pud_t
*pud
, unsigned long address
)
329 return(pmd_offset(pud
, address
));
332 pte_t
*pte_offset_proc(pmd_t
*pmd
, unsigned long address
)
334 return(pte_offset_kernel(pmd
, address
));
337 pte_t
*addr_pte(struct task_struct
*task
, unsigned long addr
)
339 pgd_t
*pgd
= pgd_offset(task
->mm
, addr
);
340 pud_t
*pud
= pud_offset(pgd
, addr
);
341 pmd_t
*pmd
= pmd_offset(pud
, addr
);
343 return(pte_offset_map(pmd
, addr
));
346 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long address
)
348 address
&= PAGE_MASK
;
349 flush_tlb_range(vma
, address
, address
+ PAGE_SIZE
);
352 void flush_tlb_all(void)
354 flush_tlb_mm(current
->mm
);
357 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
359 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt
,
360 flush_tlb_kernel_range_common
, start
, end
);
363 void flush_tlb_kernel_vm(void)
365 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
366 flush_tlb_kernel_range_common(start_vm
, end_vm
));
369 void __flush_tlb_one(unsigned long addr
)
371 CHOOSE_MODE_PROC(__flush_tlb_one_tt
, __flush_tlb_one_skas
, addr
);
374 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
377 CHOOSE_MODE_PROC(flush_tlb_range_tt
, flush_tlb_range_skas
, vma
, start
,
381 void flush_tlb_mm(struct mm_struct
*mm
)
383 CHOOSE_MODE_PROC(flush_tlb_mm_tt
, flush_tlb_mm_skas
, mm
);
386 void force_flush_all(void)
388 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());