1 /* Copyright 2002,2003 Andi Kleen, SuSE Labs */
3 /* vsyscall handling for 32bit processes. Map a stub page into it
4 on demand because 32bit cannot reach the kernel's fixmaps */
7 #include <linux/string.h>
8 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/stringify.h>
12 #include <linux/security.h>
13 #include <asm/proto.h>
14 #include <asm/tlbflush.h>
15 #include <asm/ia32_unistd.h>
17 /* 32bit VDSOs mapped into user space. */
18 asm(".section \".init.data\",\"aw\"\n"
19 "syscall32_syscall:\n"
20 ".incbin \"arch/x86_64/ia32/vsyscall-syscall.so\"\n"
21 "syscall32_syscall_end:\n"
22 "syscall32_sysenter:\n"
23 ".incbin \"arch/x86_64/ia32/vsyscall-sysenter.so\"\n"
24 "syscall32_sysenter_end:\n"
27 extern unsigned char syscall32_syscall
[], syscall32_syscall_end
[];
28 extern unsigned char syscall32_sysenter
[], syscall32_sysenter_end
[];
29 extern int sysctl_vsyscall32
;
32 static int use_sysenter
= -1;
35 syscall32_nopage(struct vm_area_struct
*vma
, unsigned long adr
, int *type
)
37 struct page
*p
= virt_to_page(adr
- vma
->vm_start
+ syscall32_page
);
42 /* Prevent VMA merging */
43 static void syscall32_vma_close(struct vm_area_struct
*vma
)
47 static struct vm_operations_struct syscall32_vm_ops
= {
48 .close
= syscall32_vma_close
,
49 .nopage
= syscall32_nopage
,
54 /* Setup a VMA at program startup for the vsyscall page */
55 int syscall32_setup_pages(struct linux_binprm
*bprm
, int exstack
)
57 int npages
= (VSYSCALL32_END
- VSYSCALL32_BASE
) >> PAGE_SHIFT
;
58 struct vm_area_struct
*vma
;
59 struct mm_struct
*mm
= current
->mm
;
61 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
64 if (security_vm_enough_memory(npages
)) {
65 kmem_cache_free(vm_area_cachep
, vma
);
69 memset(vma
, 0, sizeof(struct vm_area_struct
));
70 /* Could randomize here */
71 vma
->vm_start
= VSYSCALL32_BASE
;
72 vma
->vm_end
= VSYSCALL32_END
;
73 /* MAYWRITE to allow gdb to COW and set breakpoints */
74 vma
->vm_flags
= VM_READ
|VM_EXEC
|VM_MAYREAD
|VM_MAYEXEC
|VM_MAYEXEC
|VM_MAYWRITE
;
75 vma
->vm_flags
|= mm
->def_flags
;
76 vma
->vm_page_prot
= protection_map
[vma
->vm_flags
& 7];
77 vma
->vm_ops
= &syscall32_vm_ops
;
80 down_write(&mm
->mmap_sem
);
81 insert_vm_struct(mm
, vma
);
82 mm
->total_vm
+= npages
;
83 up_write(&mm
->mmap_sem
);
87 static int __init
init_syscall32(void)
89 syscall32_page
= (void *)get_zeroed_page(GFP_KERNEL
);
91 panic("Cannot allocate syscall32 page");
92 if (use_sysenter
> 0) {
93 memcpy(syscall32_page
, syscall32_sysenter
,
94 syscall32_sysenter_end
- syscall32_sysenter
);
96 memcpy(syscall32_page
, syscall32_syscall
,
97 syscall32_syscall_end
- syscall32_syscall
);
102 __initcall(init_syscall32
);
104 /* May not be __init: called during resume */
105 void syscall32_cpu_init(void)
107 if (use_sysenter
< 0)
108 use_sysenter
= (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
);
110 /* Load these always in case some future AMD CPU supports
111 SYSENTER from compat mode too. */
112 checking_wrmsrl(MSR_IA32_SYSENTER_CS
, (u64
)__KERNEL_CS
);
113 checking_wrmsrl(MSR_IA32_SYSENTER_ESP
, 0ULL);
114 checking_wrmsrl(MSR_IA32_SYSENTER_EIP
, (u64
)ia32_sysenter_target
);
116 wrmsrl(MSR_CSTAR
, ia32_cstar_target
);