2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
7 #include "linux/stddef.h"
8 #include "linux/sched.h"
11 #include "asm/pgtable.h"
19 static int do_ops(union mm_context
*mmu
, struct host_vm_op
*ops
, int last
,
20 int finished
, void **flush
)
22 struct host_vm_op
*op
;
25 for(i
= 0; i
<= last
&& !ret
; i
++){
29 ret
= map(&mmu
->skas
.id
, op
->u
.mmap
.addr
,
30 op
->u
.mmap
.len
, op
->u
.mmap
.prot
,
31 op
->u
.mmap
.fd
, op
->u
.mmap
.offset
, finished
,
35 ret
= unmap(&mmu
->skas
.id
, op
->u
.munmap
.addr
,
36 op
->u
.munmap
.len
, finished
, flush
);
39 ret
= protect(&mmu
->skas
.id
, op
->u
.mprotect
.addr
,
40 op
->u
.mprotect
.len
, op
->u
.mprotect
.prot
,
44 printk("Unknown op type %d in do_ops\n", op
->type
);
54 static void fix_range(struct mm_struct
*mm
, unsigned long start_addr
,
55 unsigned long end_addr
, int force
)
57 if(!proc_mm
&& (end_addr
> CONFIG_STUB_START
))
58 end_addr
= CONFIG_STUB_START
;
60 fix_range_common(mm
, start_addr
, end_addr
, force
, do_ops
);
63 void __flush_tlb_one_skas(unsigned long addr
)
65 flush_tlb_kernel_range_common(addr
, addr
+ PAGE_SIZE
);
68 void flush_tlb_range_skas(struct vm_area_struct
*vma
, unsigned long start
,
71 if(vma
->vm_mm
== NULL
)
72 flush_tlb_kernel_range_common(start
, end
);
73 else fix_range(vma
->vm_mm
, start
, end
, 0);
76 void flush_tlb_mm_skas(struct mm_struct
*mm
)
80 /* Don't bother flushing if this address space is about to be
83 if(atomic_read(&mm
->mm_users
) == 0)
86 end
= proc_mm
? task_size
: CONFIG_STUB_START
;
87 fix_range(mm
, 0, end
, 0);
90 void force_flush_all_skas(void)
92 struct mm_struct
*mm
= current
->mm
;
93 struct vm_area_struct
*vma
= mm
->mmap
;
96 fix_range(mm
, vma
->vm_start
, vma
->vm_end
, 1);
101 void flush_tlb_page_skas(struct vm_area_struct
*vma
, unsigned long address
)
107 struct mm_struct
*mm
= vma
->vm_mm
;
109 int r
, w
, x
, prot
, err
= 0;
112 pgd
= pgd_offset(mm
, address
);
113 if(!pgd_present(*pgd
))
116 pud
= pud_offset(pgd
, address
);
117 if(!pud_present(*pud
))
120 pmd
= pmd_offset(pud
, address
);
121 if(!pmd_present(*pmd
))
124 pte
= pte_offset_kernel(pmd
, address
);
129 if (!pte_young(*pte
)) {
132 } else if (!pte_dirty(*pte
)) {
136 mm_id
= &mm
->context
.skas
.id
;
137 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
138 (x
? UM_PROT_EXEC
: 0));
139 if(pte_newpage(*pte
)){
140 if(pte_present(*pte
)){
141 unsigned long long offset
;
144 fd
= phys_mapping(pte_val(*pte
) & PAGE_MASK
, &offset
);
145 err
= map(mm_id
, address
, PAGE_SIZE
, prot
, fd
, offset
,
148 else err
= unmap(mm_id
, address
, PAGE_SIZE
, 1, &flush
);
150 else if(pte_newprot(*pte
))
151 err
= protect(mm_id
, address
, PAGE_SIZE
, prot
, 1, &flush
);
156 *pte
= pte_mkuptodate(*pte
);
161 printk("Failed to flush page for address 0x%lx\n", address
);
162 force_sig(SIGKILL
, current
);