2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <asm/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <as-layout.h>
15 #include <kern_util.h>
17 struct host_vm_change
{
19 enum { NONE
, MMAP
, MUNMAP
, MPROTECT
} type
;
45 #define INIT_HVC(mm, force) \
46 ((struct host_vm_change) \
47 { .ops = { { .type = NONE } }, \
48 .id = &mm->context.id, \
53 static int do_ops(struct host_vm_change
*hvc
, int end
,
56 struct host_vm_op
*op
;
59 for (i
= 0; i
< end
&& !ret
; i
++) {
63 ret
= map(hvc
->id
, op
->u
.mmap
.addr
, op
->u
.mmap
.len
,
64 op
->u
.mmap
.prot
, op
->u
.mmap
.fd
,
65 op
->u
.mmap
.offset
, finished
, &hvc
->data
);
68 ret
= unmap(hvc
->id
, op
->u
.munmap
.addr
,
69 op
->u
.munmap
.len
, finished
, &hvc
->data
);
72 ret
= protect(hvc
->id
, op
->u
.mprotect
.addr
,
73 op
->u
.mprotect
.len
, op
->u
.mprotect
.prot
,
74 finished
, &hvc
->data
);
77 printk(KERN_ERR
"Unknown op type %d in do_ops\n",
87 static int add_mmap(unsigned long virt
, unsigned long phys
, unsigned long len
,
88 unsigned int prot
, struct host_vm_change
*hvc
)
91 struct host_vm_op
*last
;
94 fd
= phys_mapping(phys
, &offset
);
95 if (hvc
->index
!= 0) {
96 last
= &hvc
->ops
[hvc
->index
- 1];
97 if ((last
->type
== MMAP
) &&
98 (last
->u
.mmap
.addr
+ last
->u
.mmap
.len
== virt
) &&
99 (last
->u
.mmap
.prot
== prot
) && (last
->u
.mmap
.fd
== fd
) &&
100 (last
->u
.mmap
.offset
+ last
->u
.mmap
.len
== offset
)) {
101 last
->u
.mmap
.len
+= len
;
106 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
107 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
111 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
113 .u
= { .mmap
= { .addr
= virt
,
122 static int add_munmap(unsigned long addr
, unsigned long len
,
123 struct host_vm_change
*hvc
)
125 struct host_vm_op
*last
;
128 if ((addr
>= STUB_START
) && (addr
< STUB_END
))
131 if (hvc
->index
!= 0) {
132 last
= &hvc
->ops
[hvc
->index
- 1];
133 if ((last
->type
== MUNMAP
) &&
134 (last
->u
.munmap
.addr
+ last
->u
.mmap
.len
== addr
)) {
135 last
->u
.munmap
.len
+= len
;
140 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
141 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
145 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
147 .u
= { .munmap
= { .addr
= addr
,
152 static int add_mprotect(unsigned long addr
, unsigned long len
,
153 unsigned int prot
, struct host_vm_change
*hvc
)
155 struct host_vm_op
*last
;
158 if (hvc
->index
!= 0) {
159 last
= &hvc
->ops
[hvc
->index
- 1];
160 if ((last
->type
== MPROTECT
) &&
161 (last
->u
.mprotect
.addr
+ last
->u
.mprotect
.len
== addr
) &&
162 (last
->u
.mprotect
.prot
== prot
)) {
163 last
->u
.mprotect
.len
+= len
;
168 if (hvc
->index
== ARRAY_SIZE(hvc
->ops
)) {
169 ret
= do_ops(hvc
, ARRAY_SIZE(hvc
->ops
), 0);
173 hvc
->ops
[hvc
->index
++] = ((struct host_vm_op
)
175 .u
= { .mprotect
= { .addr
= addr
,
181 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
183 static inline int update_pte_range(pmd_t
*pmd
, unsigned long addr
,
185 struct host_vm_change
*hvc
)
188 int r
, w
, x
, prot
, ret
= 0;
190 pte
= pte_offset_kernel(pmd
, addr
);
192 if ((addr
>= STUB_START
) && (addr
< STUB_END
))
198 if (!pte_young(*pte
)) {
201 } else if (!pte_dirty(*pte
))
204 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
205 (x
? UM_PROT_EXEC
: 0));
206 if (hvc
->force
|| pte_newpage(*pte
)) {
207 if (pte_present(*pte
))
208 ret
= add_mmap(addr
, pte_val(*pte
) & PAGE_MASK
,
209 PAGE_SIZE
, prot
, hvc
);
211 ret
= add_munmap(addr
, PAGE_SIZE
, hvc
);
212 } else if (pte_newprot(*pte
))
213 ret
= add_mprotect(addr
, PAGE_SIZE
, prot
, hvc
);
214 *pte
= pte_mkuptodate(*pte
);
215 } while (pte
++, addr
+= PAGE_SIZE
, ((addr
< end
) && !ret
));
219 static inline int update_pmd_range(pud_t
*pud
, unsigned long addr
,
221 struct host_vm_change
*hvc
)
227 pmd
= pmd_offset(pud
, addr
);
229 next
= pmd_addr_end(addr
, end
);
230 if (!pmd_present(*pmd
)) {
231 if (hvc
->force
|| pmd_newpage(*pmd
)) {
232 ret
= add_munmap(addr
, next
- addr
, hvc
);
233 pmd_mkuptodate(*pmd
);
236 else ret
= update_pte_range(pmd
, addr
, next
, hvc
);
237 } while (pmd
++, addr
= next
, ((addr
< end
) && !ret
));
241 static inline int update_pud_range(pgd_t
*pgd
, unsigned long addr
,
243 struct host_vm_change
*hvc
)
249 pud
= pud_offset(pgd
, addr
);
251 next
= pud_addr_end(addr
, end
);
252 if (!pud_present(*pud
)) {
253 if (hvc
->force
|| pud_newpage(*pud
)) {
254 ret
= add_munmap(addr
, next
- addr
, hvc
);
255 pud_mkuptodate(*pud
);
258 else ret
= update_pmd_range(pud
, addr
, next
, hvc
);
259 } while (pud
++, addr
= next
, ((addr
< end
) && !ret
));
263 void fix_range_common(struct mm_struct
*mm
, unsigned long start_addr
,
264 unsigned long end_addr
, int force
)
267 struct host_vm_change hvc
;
268 unsigned long addr
= start_addr
, next
;
271 hvc
= INIT_HVC(mm
, force
);
272 pgd
= pgd_offset(mm
, addr
);
274 next
= pgd_addr_end(addr
, end_addr
);
275 if (!pgd_present(*pgd
)) {
276 if (force
|| pgd_newpage(*pgd
)) {
277 ret
= add_munmap(addr
, next
- addr
, &hvc
);
278 pgd_mkuptodate(*pgd
);
281 else ret
= update_pud_range(pgd
, addr
, next
, &hvc
);
282 } while (pgd
++, addr
= next
, ((addr
< end_addr
) && !ret
));
285 ret
= do_ops(&hvc
, hvc
.index
, 1);
287 /* This is not an else because ret is modified above */
289 printk(KERN_ERR
"fix_range_common: failed, killing current "
290 "process: %d\n", task_tgid_vnr(current
));
291 /* We are under mmap_sem, release it such that current can terminate */
292 up_write(¤t
->mm
->mmap_sem
);
293 force_sig(SIGKILL
, current
);
294 do_signal(¤t
->thread
.regs
);
298 static int flush_tlb_kernel_range_common(unsigned long start
, unsigned long end
)
300 struct mm_struct
*mm
;
305 unsigned long addr
, last
;
306 int updated
= 0, err
;
309 for (addr
= start
; addr
< end
;) {
310 pgd
= pgd_offset(mm
, addr
);
311 if (!pgd_present(*pgd
)) {
312 last
= ADD_ROUND(addr
, PGDIR_SIZE
);
315 if (pgd_newpage(*pgd
)) {
317 err
= os_unmap_memory((void *) addr
,
320 panic("munmap failed, errno = %d\n",
327 pud
= pud_offset(pgd
, addr
);
328 if (!pud_present(*pud
)) {
329 last
= ADD_ROUND(addr
, PUD_SIZE
);
332 if (pud_newpage(*pud
)) {
334 err
= os_unmap_memory((void *) addr
,
337 panic("munmap failed, errno = %d\n",
344 pmd
= pmd_offset(pud
, addr
);
345 if (!pmd_present(*pmd
)) {
346 last
= ADD_ROUND(addr
, PMD_SIZE
);
349 if (pmd_newpage(*pmd
)) {
351 err
= os_unmap_memory((void *) addr
,
354 panic("munmap failed, errno = %d\n",
361 pte
= pte_offset_kernel(pmd
, addr
);
362 if (!pte_present(*pte
) || pte_newpage(*pte
)) {
364 err
= os_unmap_memory((void *) addr
,
367 panic("munmap failed, errno = %d\n",
369 if (pte_present(*pte
))
371 pte_val(*pte
) & PAGE_MASK
,
374 else if (pte_newprot(*pte
)) {
376 os_protect_memory((void *) addr
, PAGE_SIZE
, 1, 1, 1);
383 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long address
)
389 struct mm_struct
*mm
= vma
->vm_mm
;
391 int r
, w
, x
, prot
, err
= 0;
394 address
&= PAGE_MASK
;
395 pgd
= pgd_offset(mm
, address
);
396 if (!pgd_present(*pgd
))
399 pud
= pud_offset(pgd
, address
);
400 if (!pud_present(*pud
))
403 pmd
= pmd_offset(pud
, address
);
404 if (!pmd_present(*pmd
))
407 pte
= pte_offset_kernel(pmd
, address
);
412 if (!pte_young(*pte
)) {
415 } else if (!pte_dirty(*pte
)) {
419 mm_id
= &mm
->context
.id
;
420 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
421 (x
? UM_PROT_EXEC
: 0));
422 if (pte_newpage(*pte
)) {
423 if (pte_present(*pte
)) {
424 unsigned long long offset
;
427 fd
= phys_mapping(pte_val(*pte
) & PAGE_MASK
, &offset
);
428 err
= map(mm_id
, address
, PAGE_SIZE
, prot
, fd
, offset
,
431 else err
= unmap(mm_id
, address
, PAGE_SIZE
, 1, &flush
);
433 else if (pte_newprot(*pte
))
434 err
= protect(mm_id
, address
, PAGE_SIZE
, prot
, 1, &flush
);
439 *pte
= pte_mkuptodate(*pte
);
444 printk(KERN_ERR
"Failed to flush page for address 0x%lx\n", address
);
445 force_sig(SIGKILL
, current
);
448 pgd_t
*pgd_offset_proc(struct mm_struct
*mm
, unsigned long address
)
450 return pgd_offset(mm
, address
);
453 pud_t
*pud_offset_proc(pgd_t
*pgd
, unsigned long address
)
455 return pud_offset(pgd
, address
);
458 pmd_t
*pmd_offset_proc(pud_t
*pud
, unsigned long address
)
460 return pmd_offset(pud
, address
);
463 pte_t
*pte_offset_proc(pmd_t
*pmd
, unsigned long address
)
465 return pte_offset_kernel(pmd
, address
);
468 pte_t
*addr_pte(struct task_struct
*task
, unsigned long addr
)
470 pgd_t
*pgd
= pgd_offset(task
->mm
, addr
);
471 pud_t
*pud
= pud_offset(pgd
, addr
);
472 pmd_t
*pmd
= pmd_offset(pud
, addr
);
474 return pte_offset_map(pmd
, addr
);
477 void flush_tlb_all(void)
479 flush_tlb_mm(current
->mm
);
482 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
484 flush_tlb_kernel_range_common(start
, end
);
487 void flush_tlb_kernel_vm(void)
489 flush_tlb_kernel_range_common(start_vm
, end_vm
);
492 void __flush_tlb_one(unsigned long addr
)
494 flush_tlb_kernel_range_common(addr
, addr
+ PAGE_SIZE
);
497 static void fix_range(struct mm_struct
*mm
, unsigned long start_addr
,
498 unsigned long end_addr
, int force
)
500 fix_range_common(mm
, start_addr
, end_addr
, force
);
503 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
506 if (vma
->vm_mm
== NULL
)
507 flush_tlb_kernel_range_common(start
, end
);
508 else fix_range(vma
->vm_mm
, start
, end
, 0);
510 EXPORT_SYMBOL(flush_tlb_range
);
512 void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
516 * Don't bother flushing if this address space is about to be
519 if (atomic_read(&mm
->mm_users
) == 0)
522 fix_range(mm
, start
, end
, 0);
525 void flush_tlb_mm(struct mm_struct
*mm
)
527 struct vm_area_struct
*vma
= mm
->mmap
;
529 while (vma
!= NULL
) {
530 fix_range(mm
, vma
->vm_start
, vma
->vm_end
, 0);
535 void force_flush_all(void)
537 struct mm_struct
*mm
= current
->mm
;
538 struct vm_area_struct
*vma
= mm
->mmap
;
540 while (vma
!= NULL
) {
541 fix_range(mm
, vma
->vm_start
, vma
->vm_end
, 1);