2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
8 #include "asm/pgalloc.h"
9 #include "asm/pgtable.h"
10 #include "asm/tlbflush.h"
11 #include "choose-mode.h"
12 #include "mode_kern.h"
13 #include "as-layout.h"
19 static int add_mmap(unsigned long virt
, unsigned long phys
, unsigned long len
,
20 int r
, int w
, int x
, struct host_vm_op
*ops
, int *index
,
21 int last_filled
, union mm_context
*mmu
, void **flush
,
22 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
26 struct host_vm_op
*last
;
29 fd
= phys_mapping(phys
, &offset
);
32 if((last
->type
== MMAP
) &&
33 (last
->u
.mmap
.addr
+ last
->u
.mmap
.len
== virt
) &&
34 (last
->u
.mmap
.r
== r
) && (last
->u
.mmap
.w
== w
) &&
35 (last
->u
.mmap
.x
== x
) && (last
->u
.mmap
.fd
== fd
) &&
36 (last
->u
.mmap
.offset
+ last
->u
.mmap
.len
== offset
)){
37 last
->u
.mmap
.len
+= len
;
42 if(*index
== last_filled
){
43 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
47 ops
[++*index
] = ((struct host_vm_op
) { .type
= MMAP
,
60 static int add_munmap(unsigned long addr
, unsigned long len
,
61 struct host_vm_op
*ops
, int *index
, int last_filled
,
62 union mm_context
*mmu
, void **flush
,
63 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
66 struct host_vm_op
*last
;
71 if((last
->type
== MUNMAP
) &&
72 (last
->u
.munmap
.addr
+ last
->u
.mmap
.len
== addr
)){
73 last
->u
.munmap
.len
+= len
;
78 if(*index
== last_filled
){
79 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
83 ops
[++*index
] = ((struct host_vm_op
) { .type
= MUNMAP
,
90 static int add_mprotect(unsigned long addr
, unsigned long len
, int r
, int w
,
91 int x
, struct host_vm_op
*ops
, int *index
,
92 int last_filled
, union mm_context
*mmu
, void **flush
,
93 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
96 struct host_vm_op
*last
;
101 if((last
->type
== MPROTECT
) &&
102 (last
->u
.mprotect
.addr
+ last
->u
.mprotect
.len
== addr
) &&
103 (last
->u
.mprotect
.r
== r
) && (last
->u
.mprotect
.w
== w
) &&
104 (last
->u
.mprotect
.x
== x
)){
105 last
->u
.mprotect
.len
+= len
;
110 if(*index
== last_filled
){
111 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
115 ops
[++*index
] = ((struct host_vm_op
) { .type
= MPROTECT
,
125 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
127 static inline int update_pte_range(pmd_t
*pmd
, unsigned long addr
,
128 unsigned long end
, struct host_vm_op
*ops
,
129 int last_op
, int *op_index
, int force
,
130 union mm_context
*mmu
, void **flush
,
131 int (*do_ops
)(union mm_context
*,
132 struct host_vm_op
*, int, int,
136 int r
, w
, x
, ret
= 0;
138 pte
= pte_offset_kernel(pmd
, addr
);
143 if (!pte_young(*pte
)) {
146 } else if (!pte_dirty(*pte
)) {
149 if(force
|| pte_newpage(*pte
)){
150 if(pte_present(*pte
))
151 ret
= add_mmap(addr
, pte_val(*pte
) & PAGE_MASK
,
152 PAGE_SIZE
, r
, w
, x
, ops
,
153 op_index
, last_op
, mmu
, flush
,
155 else ret
= add_munmap(addr
, PAGE_SIZE
, ops
, op_index
,
156 last_op
, mmu
, flush
, do_ops
);
158 else if(pte_newprot(*pte
))
159 ret
= add_mprotect(addr
, PAGE_SIZE
, r
, w
, x
, ops
,
160 op_index
, last_op
, mmu
, flush
,
162 *pte
= pte_mkuptodate(*pte
);
163 } while (pte
++, addr
+= PAGE_SIZE
, ((addr
!= end
) && !ret
));
167 static inline int update_pmd_range(pud_t
*pud
, unsigned long addr
,
168 unsigned long end
, struct host_vm_op
*ops
,
169 int last_op
, int *op_index
, int force
,
170 union mm_context
*mmu
, void **flush
,
171 int (*do_ops
)(union mm_context
*,
172 struct host_vm_op
*, int, int,
179 pmd
= pmd_offset(pud
, addr
);
181 next
= pmd_addr_end(addr
, end
);
182 if(!pmd_present(*pmd
)){
183 if(force
|| pmd_newpage(*pmd
)){
184 ret
= add_munmap(addr
, next
- addr
, ops
,
185 op_index
, last_op
, mmu
,
187 pmd_mkuptodate(*pmd
);
190 else ret
= update_pte_range(pmd
, addr
, next
, ops
, last_op
,
191 op_index
, force
, mmu
, flush
,
193 } while (pmd
++, addr
= next
, ((addr
!= end
) && !ret
));
197 static inline int update_pud_range(pgd_t
*pgd
, unsigned long addr
,
198 unsigned long end
, struct host_vm_op
*ops
,
199 int last_op
, int *op_index
, int force
,
200 union mm_context
*mmu
, void **flush
,
201 int (*do_ops
)(union mm_context
*,
202 struct host_vm_op
*, int, int,
209 pud
= pud_offset(pgd
, addr
);
211 next
= pud_addr_end(addr
, end
);
212 if(!pud_present(*pud
)){
213 if(force
|| pud_newpage(*pud
)){
214 ret
= add_munmap(addr
, next
- addr
, ops
,
215 op_index
, last_op
, mmu
,
217 pud_mkuptodate(*pud
);
220 else ret
= update_pmd_range(pud
, addr
, next
, ops
, last_op
,
221 op_index
, force
, mmu
, flush
,
223 } while (pud
++, addr
= next
, ((addr
!= end
) && !ret
));
227 void fix_range_common(struct mm_struct
*mm
, unsigned long start_addr
,
228 unsigned long end_addr
, int force
,
229 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
233 union mm_context
*mmu
= &mm
->context
;
234 struct host_vm_op ops
[1];
235 unsigned long addr
= start_addr
, next
;
236 int ret
= 0, last_op
= ARRAY_SIZE(ops
) - 1, op_index
= -1;
240 pgd
= pgd_offset(mm
, addr
);
242 next
= pgd_addr_end(addr
, end_addr
);
243 if(!pgd_present(*pgd
)){
244 if (force
|| pgd_newpage(*pgd
)){
245 ret
= add_munmap(addr
, next
- addr
, ops
,
246 &op_index
, last_op
, mmu
,
248 pgd_mkuptodate(*pgd
);
251 else ret
= update_pud_range(pgd
, addr
, next
, ops
, last_op
,
252 &op_index
, force
, mmu
, &flush
,
254 } while (pgd
++, addr
= next
, ((addr
!= end_addr
) && !ret
));
255 log_info("total flush time - %Ld nsecs\n", end_time
- start_time
);
258 ret
= (*do_ops
)(mmu
, ops
, op_index
, 1, &flush
);
260 /* This is not an else because ret is modified above */
262 printk("fix_range_common: failed, killing current process\n");
263 force_sig(SIGKILL
, current
);
267 int flush_tlb_kernel_range_common(unsigned long start
, unsigned long end
)
269 struct mm_struct
*mm
;
274 unsigned long addr
, last
;
275 int updated
= 0, err
;
278 for(addr
= start
; addr
< end
;){
279 pgd
= pgd_offset(mm
, addr
);
280 if(!pgd_present(*pgd
)){
281 last
= ADD_ROUND(addr
, PGDIR_SIZE
);
284 if(pgd_newpage(*pgd
)){
286 err
= os_unmap_memory((void *) addr
,
289 panic("munmap failed, errno = %d\n",
296 pud
= pud_offset(pgd
, addr
);
297 if(!pud_present(*pud
)){
298 last
= ADD_ROUND(addr
, PUD_SIZE
);
301 if(pud_newpage(*pud
)){
303 err
= os_unmap_memory((void *) addr
,
306 panic("munmap failed, errno = %d\n",
313 pmd
= pmd_offset(pud
, addr
);
314 if(!pmd_present(*pmd
)){
315 last
= ADD_ROUND(addr
, PMD_SIZE
);
318 if(pmd_newpage(*pmd
)){
320 err
= os_unmap_memory((void *) addr
,
323 panic("munmap failed, errno = %d\n",
330 pte
= pte_offset_kernel(pmd
, addr
);
331 if(!pte_present(*pte
) || pte_newpage(*pte
)){
333 err
= os_unmap_memory((void *) addr
,
336 panic("munmap failed, errno = %d\n",
338 if(pte_present(*pte
))
340 pte_val(*pte
) & PAGE_MASK
,
343 else if(pte_newprot(*pte
)){
345 os_protect_memory((void *) addr
, PAGE_SIZE
, 1, 1, 1);
352 pgd_t
*pgd_offset_proc(struct mm_struct
*mm
, unsigned long address
)
354 return(pgd_offset(mm
, address
));
357 pud_t
*pud_offset_proc(pgd_t
*pgd
, unsigned long address
)
359 return(pud_offset(pgd
, address
));
362 pmd_t
*pmd_offset_proc(pud_t
*pud
, unsigned long address
)
364 return(pmd_offset(pud
, address
));
367 pte_t
*pte_offset_proc(pmd_t
*pmd
, unsigned long address
)
369 return(pte_offset_kernel(pmd
, address
));
372 pte_t
*addr_pte(struct task_struct
*task
, unsigned long addr
)
374 pgd_t
*pgd
= pgd_offset(task
->mm
, addr
);
375 pud_t
*pud
= pud_offset(pgd
, addr
);
376 pmd_t
*pmd
= pmd_offset(pud
, addr
);
378 return(pte_offset_map(pmd
, addr
));
381 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long address
)
383 address
&= PAGE_MASK
;
384 flush_tlb_range(vma
, address
, address
+ PAGE_SIZE
);
387 void flush_tlb_all(void)
389 flush_tlb_mm(current
->mm
);
392 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
394 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt
,
395 flush_tlb_kernel_range_common
, start
, end
);
398 void flush_tlb_kernel_vm(void)
400 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
401 flush_tlb_kernel_range_common(start_vm
, end_vm
));
404 void __flush_tlb_one(unsigned long addr
)
406 CHOOSE_MODE_PROC(__flush_tlb_one_tt
, __flush_tlb_one_skas
, addr
);
409 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
412 CHOOSE_MODE_PROC(flush_tlb_range_tt
, flush_tlb_range_skas
, vma
, start
,
416 void flush_tlb_mm(struct mm_struct
*mm
)
418 CHOOSE_MODE_PROC(flush_tlb_mm_tt
, flush_tlb_mm_skas
, mm
);
421 void force_flush_all(void)
423 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());