2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
18 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
20 void fix_range_common(struct mm_struct
*mm
, unsigned long start_addr
,
21 unsigned long end_addr
, int force
,
22 void (*do_ops
)(union mm_context
*, struct host_vm_op
*,
29 union mm_context
*mmu
= &mm
->context
;
30 unsigned long addr
, end
;
32 struct host_vm_op ops
[16];
33 int op_index
= -1, last_op
= sizeof(ops
) / sizeof(ops
[0]) - 1;
35 if(mm
== NULL
) return;
37 for(addr
= start_addr
; addr
< end_addr
;){
38 npgd
= pgd_offset(mm
, addr
);
39 if(!pgd_present(*npgd
)){
40 end
= ADD_ROUND(addr
, PGDIR_SIZE
);
43 if(force
|| pgd_newpage(*npgd
)){
44 op_index
= add_munmap(addr
, end
- addr
, ops
,
45 op_index
, last_op
, mmu
,
47 pgd_mkuptodate(*npgd
);
53 npud
= pud_offset(npgd
, addr
);
54 if(!pud_present(*npud
)){
55 end
= ADD_ROUND(addr
, PUD_SIZE
);
58 if(force
|| pud_newpage(*npud
)){
59 op_index
= add_munmap(addr
, end
- addr
, ops
,
60 op_index
, last_op
, mmu
,
62 pud_mkuptodate(*npud
);
68 npmd
= pmd_offset(npud
, addr
);
69 if(!pmd_present(*npmd
)){
70 end
= ADD_ROUND(addr
, PMD_SIZE
);
73 if(force
|| pmd_newpage(*npmd
)){
74 op_index
= add_munmap(addr
, end
- addr
, ops
,
75 op_index
, last_op
, mmu
,
77 pmd_mkuptodate(*npmd
);
83 npte
= pte_offset_kernel(npmd
, addr
);
89 if(!pte_young(*npte
)){
93 if(force
|| pte_newpage(*npte
)){
94 if(pte_present(*npte
))
95 op_index
= add_mmap(addr
,
96 pte_val(*npte
) & PAGE_MASK
,
97 PAGE_SIZE
, r
, w
, x
, ops
,
98 op_index
, last_op
, mmu
,
100 else op_index
= add_munmap(addr
, PAGE_SIZE
, ops
,
101 op_index
, last_op
, mmu
,
104 else if(pte_newprot(*npte
))
105 op_index
= add_mprotect(addr
, PAGE_SIZE
, r
, w
, x
, ops
,
106 op_index
, last_op
, mmu
,
109 *npte
= pte_mkuptodate(*npte
);
112 (*do_ops
)(mmu
, ops
, op_index
);
115 int flush_tlb_kernel_range_common(unsigned long start
, unsigned long end
)
117 struct mm_struct
*mm
;
122 unsigned long addr
, last
;
123 int updated
= 0, err
;
126 for(addr
= start
; addr
< end
;){
127 pgd
= pgd_offset(mm
, addr
);
128 if(!pgd_present(*pgd
)){
129 last
= ADD_ROUND(addr
, PGDIR_SIZE
);
132 if(pgd_newpage(*pgd
)){
134 err
= os_unmap_memory((void *) addr
,
137 panic("munmap failed, errno = %d\n",
144 pud
= pud_offset(pgd
, addr
);
145 if(!pud_present(*pud
)){
146 last
= ADD_ROUND(addr
, PUD_SIZE
);
149 if(pud_newpage(*pud
)){
151 err
= os_unmap_memory((void *) addr
,
154 panic("munmap failed, errno = %d\n",
161 pmd
= pmd_offset(pud
, addr
);
162 if(!pmd_present(*pmd
)){
163 last
= ADD_ROUND(addr
, PMD_SIZE
);
166 if(pmd_newpage(*pmd
)){
168 err
= os_unmap_memory((void *) addr
,
171 panic("munmap failed, errno = %d\n",
178 pte
= pte_offset_kernel(pmd
, addr
);
179 if(!pte_present(*pte
) || pte_newpage(*pte
)){
181 err
= os_unmap_memory((void *) addr
,
184 panic("munmap failed, errno = %d\n",
186 if(pte_present(*pte
))
188 pte_val(*pte
) & PAGE_MASK
,
191 else if(pte_newprot(*pte
)){
193 protect_memory(addr
, PAGE_SIZE
, 1, 1, 1, 1);
200 pgd_t
*pgd_offset_proc(struct mm_struct
*mm
, unsigned long address
)
202 return(pgd_offset(mm
, address
));
205 pud_t
*pud_offset_proc(pgd_t
*pgd
, unsigned long address
)
207 return(pud_offset(pgd
, address
));
210 pmd_t
*pmd_offset_proc(pud_t
*pud
, unsigned long address
)
212 return(pmd_offset(pud
, address
));
215 pte_t
*pte_offset_proc(pmd_t
*pmd
, unsigned long address
)
217 return(pte_offset_kernel(pmd
, address
));
220 pte_t
*addr_pte(struct task_struct
*task
, unsigned long addr
)
222 pgd_t
*pgd
= pgd_offset(task
->mm
, addr
);
223 pud_t
*pud
= pud_offset(pgd
, addr
);
224 pmd_t
*pmd
= pmd_offset(pud
, addr
);
226 return(pte_offset_map(pmd
, addr
));
229 int add_mmap(unsigned long virt
, unsigned long phys
, unsigned long len
,
230 int r
, int w
, int x
, struct host_vm_op
*ops
, int index
,
231 int last_filled
, union mm_context
*mmu
,
232 void (*do_ops
)(union mm_context
*, struct host_vm_op
*, int))
235 struct host_vm_op
*last
;
238 fd
= phys_mapping(phys
, &offset
);
241 if((last
->type
== MMAP
) &&
242 (last
->u
.mmap
.addr
+ last
->u
.mmap
.len
== virt
) &&
243 (last
->u
.mmap
.r
== r
) && (last
->u
.mmap
.w
== w
) &&
244 (last
->u
.mmap
.x
== x
) && (last
->u
.mmap
.fd
== fd
) &&
245 (last
->u
.mmap
.offset
+ last
->u
.mmap
.len
== offset
)){
246 last
->u
.mmap
.len
+= len
;
251 if(index
== last_filled
){
252 (*do_ops
)(mmu
, ops
, last_filled
);
256 ops
[++index
] = ((struct host_vm_op
) { .type
= MMAP
,
269 int add_munmap(unsigned long addr
, unsigned long len
, struct host_vm_op
*ops
,
270 int index
, int last_filled
, union mm_context
*mmu
,
271 void (*do_ops
)(union mm_context
*, struct host_vm_op
*, int))
273 struct host_vm_op
*last
;
277 if((last
->type
== MUNMAP
) &&
278 (last
->u
.munmap
.addr
+ last
->u
.mmap
.len
== addr
)){
279 last
->u
.munmap
.len
+= len
;
284 if(index
== last_filled
){
285 (*do_ops
)(mmu
, ops
, last_filled
);
289 ops
[++index
] = ((struct host_vm_op
) { .type
= MUNMAP
,
296 int add_mprotect(unsigned long addr
, unsigned long len
, int r
, int w
, int x
,
297 struct host_vm_op
*ops
, int index
, int last_filled
,
298 union mm_context
*mmu
,
299 void (*do_ops
)(union mm_context
*, struct host_vm_op
*, int))
301 struct host_vm_op
*last
;
305 if((last
->type
== MPROTECT
) &&
306 (last
->u
.mprotect
.addr
+ last
->u
.mprotect
.len
== addr
) &&
307 (last
->u
.mprotect
.r
== r
) && (last
->u
.mprotect
.w
== w
) &&
308 (last
->u
.mprotect
.x
== x
)){
309 last
->u
.mprotect
.len
+= len
;
314 if(index
== last_filled
){
315 (*do_ops
)(mmu
, ops
, last_filled
);
319 ops
[++index
] = ((struct host_vm_op
) { .type
= MPROTECT
,
329 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long address
)
331 address
&= PAGE_MASK
;
332 flush_tlb_range(vma
, address
, address
+ PAGE_SIZE
);
335 void flush_tlb_all(void)
337 flush_tlb_mm(current
->mm
);
340 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
342 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt
,
343 flush_tlb_kernel_range_common
, start
, end
);
346 void flush_tlb_kernel_vm(void)
348 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
349 flush_tlb_kernel_range_common(start_vm
, end_vm
));
352 void __flush_tlb_one(unsigned long addr
)
354 CHOOSE_MODE_PROC(__flush_tlb_one_tt
, __flush_tlb_one_skas
, addr
);
357 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
360 CHOOSE_MODE_PROC(flush_tlb_range_tt
, flush_tlb_range_skas
, vma
, start
,
364 void flush_tlb_mm(struct mm_struct
*mm
)
366 CHOOSE_MODE_PROC(flush_tlb_mm_tt
, flush_tlb_mm_skas
, mm
);
369 void force_flush_all(void)
371 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());