2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
7 #include "linux/stddef.h"
8 #include "linux/kernel.h"
9 #include "linux/sched.h"
12 #include "asm/pgtable.h"
13 #include "asm/uaccess.h"
14 #include "asm/tlbflush.h"
15 #include "user_util.h"
20 static int do_ops(union mm_context
*mmu
, struct host_vm_op
*ops
, int last
,
21 int finished
, void **flush
)
23 struct host_vm_op
*op
;
26 for(i
= 0; i
<= last
&& !ret
; i
++){
30 ret
= os_map_memory((void *) op
->u
.mmap
.addr
,
31 op
->u
.mmap
.fd
, op
->u
.mmap
.offset
,
32 op
->u
.mmap
.len
, op
->u
.mmap
.r
,
33 op
->u
.mmap
.w
, op
->u
.mmap
.x
);
36 ret
= os_unmap_memory((void *) op
->u
.munmap
.addr
,
40 ret
= protect_memory(op
->u
.mprotect
.addr
,
45 protect_memory(op
->u
.mprotect
.addr
, op
->u
.munmap
.len
,
46 op
->u
.mprotect
.r
, op
->u
.mprotect
.w
,
50 printk("Unknown op type %d in do_ops\n", op
->type
);
58 static void fix_range(struct mm_struct
*mm
, unsigned long start_addr
,
59 unsigned long end_addr
, int force
)
61 if((current
->thread
.mode
.tt
.extern_pid
!= -1) &&
62 (current
->thread
.mode
.tt
.extern_pid
!= os_getpid()))
63 panic("fix_range fixing wrong address space, current = 0x%p",
66 fix_range_common(mm
, start_addr
, end_addr
, force
, do_ops
);
69 atomic_t vmchange_seq
= ATOMIC_INIT(1);
71 void flush_tlb_kernel_range_tt(unsigned long start
, unsigned long end
)
73 if(flush_tlb_kernel_range_common(start
, end
))
74 atomic_inc(&vmchange_seq
);
77 static void protect_vm_page(unsigned long addr
, int w
, int must_succeed
)
81 err
= protect_memory(addr
, PAGE_SIZE
, 1, w
, 1, must_succeed
);
83 else if((err
== -EFAULT
) || (err
== -ENOMEM
)){
84 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
);
85 protect_vm_page(addr
, w
, 1);
87 else panic("protect_vm_page : protect failed, errno = %d\n", err
);
90 void mprotect_kernel_vm(int w
)
100 for(addr
= start_vm
; addr
< end_vm
;){
101 pgd
= pgd_offset(mm
, addr
);
102 pud
= pud_offset(pgd
, addr
);
103 pmd
= pmd_offset(pud
, addr
);
104 if(pmd_present(*pmd
)){
105 pte
= pte_offset_kernel(pmd
, addr
);
106 if(pte_present(*pte
)) protect_vm_page(addr
, w
, 0);
109 else addr
+= PMD_SIZE
;
113 void flush_tlb_kernel_vm_tt(void)
115 flush_tlb_kernel_range(start_vm
, end_vm
);
118 void __flush_tlb_one_tt(unsigned long addr
)
120 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
);
123 void flush_tlb_range_tt(struct vm_area_struct
*vma
, unsigned long start
,
126 if(vma
->vm_mm
!= current
->mm
) return;
128 /* Assumes that the range start ... end is entirely within
129 * either process memory or kernel vm
131 if((start
>= start_vm
) && (start
< end_vm
)){
132 if(flush_tlb_kernel_range_common(start
, end
))
133 atomic_inc(&vmchange_seq
);
135 else fix_range(vma
->vm_mm
, start
, end
, 0);
138 void flush_tlb_mm_tt(struct mm_struct
*mm
)
142 if(mm
!= current
->mm
) return;
144 fix_range(mm
, 0, STACK_TOP
, 0);
146 seq
= atomic_read(&vmchange_seq
);
147 if(current
->thread
.mode
.tt
.vm_seq
== seq
)
149 current
->thread
.mode
.tt
.vm_seq
= seq
;
150 flush_tlb_kernel_range_common(start_vm
, end_vm
);
153 void force_flush_all_tt(void)
155 fix_range(current
->mm
, 0, STACK_TOP
, 1);
156 flush_tlb_kernel_range_common(start_vm
, end_vm
);