2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
7 #include "linux/stddef.h"
8 #include "linux/kernel.h"
9 #include "linux/sched.h"
12 #include "asm/pgtable.h"
13 #include "asm/uaccess.h"
14 #include "asm/tlbflush.h"
15 #include "user_util.h"
20 static void do_ops(int unused
, struct host_vm_op
*ops
, int last
)
22 struct host_vm_op
*op
;
25 for(i
= 0; i
<= last
; i
++){
29 os_map_memory((void *) op
->u
.mmap
.addr
, op
->u
.mmap
.fd
,
30 op
->u
.mmap
.offset
, op
->u
.mmap
.len
,
31 op
->u
.mmap
.r
, op
->u
.mmap
.w
,
35 os_unmap_memory((void *) op
->u
.munmap
.addr
,
39 protect_memory(op
->u
.mprotect
.addr
, op
->u
.munmap
.len
,
40 op
->u
.mprotect
.r
, op
->u
.mprotect
.w
,
44 printk("Unknown op type %d in do_ops\n", op
->type
);
50 static void fix_range(struct mm_struct
*mm
, unsigned long start_addr
,
51 unsigned long end_addr
, int force
)
53 if((current
->thread
.mode
.tt
.extern_pid
!= -1) &&
54 (current
->thread
.mode
.tt
.extern_pid
!= os_getpid()))
55 panic("fix_range fixing wrong address space, current = 0x%p",
58 fix_range_common(mm
, start_addr
, end_addr
, force
, 0, do_ops
);
61 atomic_t vmchange_seq
= ATOMIC_INIT(1);
63 void flush_tlb_kernel_range_tt(unsigned long start
, unsigned long end
)
65 if(flush_tlb_kernel_range_common(start
, end
))
66 atomic_inc(&vmchange_seq
);
69 static void protect_vm_page(unsigned long addr
, int w
, int must_succeed
)
73 err
= protect_memory(addr
, PAGE_SIZE
, 1, w
, 1, must_succeed
);
75 else if((err
== -EFAULT
) || (err
== -ENOMEM
)){
76 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
);
77 protect_vm_page(addr
, w
, 1);
79 else panic("protect_vm_page : protect failed, errno = %d\n", err
);
82 void mprotect_kernel_vm(int w
)
92 for(addr
= start_vm
; addr
< end_vm
;){
93 pgd
= pgd_offset(mm
, addr
);
94 pud
= pud_offset(pgd
, addr
);
95 pmd
= pmd_offset(pud
, addr
);
96 if(pmd_present(*pmd
)){
97 pte
= pte_offset_kernel(pmd
, addr
);
98 if(pte_present(*pte
)) protect_vm_page(addr
, w
, 0);
101 else addr
+= PMD_SIZE
;
105 void flush_tlb_kernel_vm_tt(void)
107 flush_tlb_kernel_range(start_vm
, end_vm
);
110 void __flush_tlb_one_tt(unsigned long addr
)
112 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
);
115 void flush_tlb_range_tt(struct vm_area_struct
*vma
, unsigned long start
,
118 if(vma
->vm_mm
!= current
->mm
) return;
120 /* Assumes that the range start ... end is entirely within
121 * either process memory or kernel vm
123 if((start
>= start_vm
) && (start
< end_vm
)){
124 if(flush_tlb_kernel_range_common(start
, end
))
125 atomic_inc(&vmchange_seq
);
127 else fix_range(vma
->vm_mm
, start
, end
, 0);
130 void flush_tlb_mm_tt(struct mm_struct
*mm
)
134 if(mm
!= current
->mm
) return;
136 fix_range(mm
, 0, STACK_TOP
, 0);
138 seq
= atomic_read(&vmchange_seq
);
139 if(current
->thread
.mode
.tt
.vm_seq
== seq
)
141 current
->thread
.mode
.tt
.vm_seq
= seq
;
142 flush_tlb_kernel_range_common(start_vm
, end_vm
);
145 void force_flush_all_tt(void)
147 fix_range(current
->mm
, 0, STACK_TOP
, 1);
148 flush_tlb_kernel_range_common(start_vm
, end_vm
);