[IPV4]: Correct rp_filter help text.
[linux-2.6/verdex.git] / arch / um / kernel / skas / mmu.c
blob2c6d090a2e872b11d891c2510a1e4f55ef52dd17
1 /*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include "linux/sched.h"
7 #include "linux/list.h"
8 #include "linux/spinlock.h"
9 #include "linux/slab.h"
10 #include "linux/errno.h"
11 #include "linux/mm.h"
12 #include "asm/current.h"
13 #include "asm/segment.h"
14 #include "asm/mmu.h"
15 #include "asm/pgalloc.h"
16 #include "asm/pgtable.h"
17 #include "asm/ldt.h"
18 #include "os.h"
19 #include "skas.h"
21 extern int __syscall_stub_start;
23 static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
24 unsigned long kernel)
26 pgd_t *pgd;
27 pud_t *pud;
28 pmd_t *pmd;
29 pte_t *pte;
31 pgd = pgd_offset(mm, proc);
32 pud = pud_alloc(mm, pgd, proc);
33 if (!pud)
34 goto out;
36 pmd = pmd_alloc(mm, pud, proc);
37 if (!pmd)
38 goto out_pmd;
40 pte = pte_alloc_map(mm, pmd, proc);
41 if (!pte)
42 goto out_pte;
44 /* There's an interaction between the skas0 stub pages, stack
45 * randomization, and the BUG at the end of exit_mmap. exit_mmap
46 * checks that the number of page tables freed is the same as had
47 * been allocated. If the stack is on the last page table page,
48 * then the stack pte page will be freed, and if not, it won't. To
49 * avoid having to know where the stack is, or if the process mapped
50 * something at the top of its address space for some other reason,
51 * we set TASK_SIZE to end at the start of the last page table.
52 * This keeps exit_mmap off the last page, but introduces a leak
53 * of that page. So, we hang onto it here and free it in
54 * destroy_context_skas.
57 mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
58 #ifdef CONFIG_3_LEVEL_PGTABLES
59 mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
60 #endif
62 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
63 *pte = pte_mkread(*pte);
64 return(0);
66 out_pmd:
67 pud_free(pud);
68 out_pte:
69 pmd_free(pmd);
70 out:
71 return(-ENOMEM);
74 int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
76 struct mmu_context_skas *from_mm = NULL;
77 struct mmu_context_skas *to_mm = &mm->context.skas;
78 unsigned long stack = 0;
79 int ret = -ENOMEM;
81 if(skas_needs_stub){
82 stack = get_zeroed_page(GFP_KERNEL);
83 if(stack == 0)
84 goto out;
86 /* This zeros the entry that pgd_alloc didn't, needed since
87 * we are about to reinitialize it, and want mm.nr_ptes to
88 * be accurate.
90 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
92 ret = init_stub_pte(mm, CONFIG_STUB_CODE,
93 (unsigned long) &__syscall_stub_start);
94 if(ret)
95 goto out_free;
97 ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
98 if(ret)
99 goto out_free;
101 mm->nr_ptes--;
104 to_mm->id.stack = stack;
105 if(current->mm != NULL && current->mm != &init_mm)
106 from_mm = &current->mm->context.skas;
108 if(proc_mm){
109 ret = new_mm(stack);
110 if(ret < 0){
111 printk("init_new_context_skas - new_mm failed, "
112 "errno = %d\n", ret);
113 goto out_free;
115 to_mm->id.u.mm_fd = ret;
117 else {
118 if(from_mm)
119 to_mm->id.u.pid = copy_context_skas0(stack,
120 from_mm->id.u.pid);
121 else to_mm->id.u.pid = start_userspace(stack);
124 ret = init_new_ldt(to_mm, from_mm);
125 if(ret < 0){
126 printk("init_new_context_skas - init_ldt"
127 " failed, errno = %d\n", ret);
128 goto out_free;
131 return 0;
133 out_free:
134 if(to_mm->id.stack != 0)
135 free_page(to_mm->id.stack);
136 out:
137 return ret;
140 void destroy_context_skas(struct mm_struct *mm)
142 struct mmu_context_skas *mmu = &mm->context.skas;
144 if(proc_mm)
145 os_close_file(mmu->id.u.mm_fd);
146 else
147 os_kill_ptraced_process(mmu->id.u.pid, 1);
149 if(!proc_mm || !ptrace_faultinfo){
150 free_page(mmu->id.stack);
151 pte_lock_deinit(virt_to_page(mmu->last_page_table));
152 pte_free_kernel((pte_t *) mmu->last_page_table);
153 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
154 #ifdef CONFIG_3_LEVEL_PGTABLES
155 pmd_free((pmd_t *) mmu->last_pmd);
156 #endif