2 * Compatibility mode system call entry point for x86-64.
4 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
7 #include <asm/dwarf2.h>
8 #include <asm/calling.h>
9 #include <asm/asm-offsets.h>
10 #include <asm/current.h>
11 #include <asm/errno.h>
12 #include <asm/ia32_unistd.h>
13 #include <asm/thread_info.h>
14 #include <asm/segment.h>
15 #include <asm/irqflags.h>
18 #include <linux/linkage.h>
19 #include <linux/err.h>
21 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22 #include <linux/elf-em.h>
23 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
24 #define __AUDIT_ARCH_LE 0x40000000
26 #ifndef CONFIG_AUDITSYSCALL
27 #define sysexit_audit ia32_ret_from_sys_call
28 #define sysretl_audit ia32_ret_from_sys_call
31 .section .entry.text, "ax"
34 .macro CLEAR_RREGS _r9=rax
43 * Reload arg registers from stack in case ptrace changed them.
44 * We don't reload %eax because syscall_trace_enter() returned
45 * the %rax value we should see. Instead, we just truncate that
46 * value to 32 bits again as we did on entry from user mode.
47 * If it's a new value set by user_regset during entry tracing,
48 * this matches the normal truncation of the user-mode value.
49 * If it's -1 to make us punt the syscall, then (u32)-1 is still
50 * an appropriately invalid value.
52 .macro LOAD_ARGS32 _r9=0
60 movl %eax,%eax /* zero extension */
63 .macro CFI_STARTPROC32 simple
75 #ifdef CONFIG_PARAVIRT
76 ENTRY(native_usergs_sysret32)
79 ENDPROC(native_usergs_sysret32)
81 ENTRY(native_irq_enable_sysexit)
85 ENDPROC(native_irq_enable_sysexit)
89 * 32bit SYSENTER instruction entry.
91 * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
92 * IF and VM in rflags are cleared (IOW: interrupts are off).
93 * SYSENTER does not save anything on the stack,
94 * and does not save old rip (!!!) and rflags.
97 * eax system call number
106 * This is purely a fast path. For anything complicated we use the int 0x80
107 * path below. We set up a complete hardware stack frame to share code
108 * with the int 0x80 path.
110 ENTRY(ia32_sysenter_target)
111 CFI_STARTPROC32 simple
117 * Interrupts are off on entry.
118 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
119 * it is too small to ever cause noticeable irq latency.
122 movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
123 ENABLE_INTERRUPTS(CLBR_NONE)
125 /* Zero-extending 32-bit regs, do not remove */
129 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
132 /* Construct struct pt_regs on stack */
133 pushq_cfi $__USER32_DS /* pt_regs->ss */
134 pushq_cfi %rbp /* pt_regs->sp */
136 pushfq_cfi /* pt_regs->flags */
137 pushq_cfi $__USER32_CS /* pt_regs->cs */
138 pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
140 pushq_cfi_reg rax /* pt_regs->orig_ax */
141 pushq_cfi_reg rdi /* pt_regs->di */
142 pushq_cfi_reg rsi /* pt_regs->si */
143 pushq_cfi_reg rdx /* pt_regs->dx */
144 pushq_cfi_reg rcx /* pt_regs->cx */
145 pushq_cfi_reg rax /* pt_regs->ax */
147 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
148 CFI_ADJUST_CFA_OFFSET 10*8
151 * no need to do an access_ok check here because rbp has been
152 * 32bit zero extended
156 _ASM_EXTABLE(1b,ia32_badarg)
160 * Sysenter doesn't filter flags, so we need to clear NT
161 * ourselves. To save a few cycles, we can check whether
162 * NT was set instead of doing an unconditional popfq.
164 testl $X86_EFLAGS_NT,EFLAGS(%rsp)
165 jnz sysenter_fix_flags
166 sysenter_flags_fixed:
168 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
169 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
171 jnz sysenter_tracesys
172 cmpq $(IA32_NR_syscalls-1),%rax
175 /* 32bit syscall -> 64bit C ABI argument conversion */
176 movl %edi,%r8d /* arg5 */
177 movl %ebp,%r9d /* arg6 */
178 xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
179 movl %ebx,%edi /* arg1 */
180 movl %edx,%edx /* arg3 (zero extension) */
182 call *ia32_sys_call_table(,%rax,8)
184 DISABLE_INTERRUPTS(CLBR_NONE)
186 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
188 sysexit_from_sys_call:
190 * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
191 * NMI between STI and SYSEXIT has poorly specified behavior,
192 * and and NMI followed by an IRQ with usergs is fatal. So
193 * we just pretend we're using SYSEXIT but we really use
196 * This code path is still called 'sysexit' because it pairs
197 * with 'sysenter' and it uses the SYSENTER calling convention.
199 andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
200 movl RIP(%rsp),%ecx /* User %eip */
203 xorl %edx,%edx /* avoid info leaks */
207 movl EFLAGS(%rsp),%r11d /* User eflags */
208 /*CFI_RESTORE rflags*/
212 * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
213 * since it avoids a dicey window with interrupts enabled.
218 * USERGS_SYSRET32 does:
219 * gsbase = user's gs base
225 * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
231 * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
232 * avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
233 * address (already known to user code), and R12-R15 are
234 * callee-saved and therefore don't contain any interesting
241 #ifdef CONFIG_AUDITSYSCALL
242 .macro auditsys_entry_common
243 movl %esi,%r8d /* 5th arg: 4th syscall arg */
244 movl %ecx,%r9d /*swap with edx*/
245 movl %edx,%ecx /* 4th arg: 3rd syscall arg */
246 movl %r9d,%edx /* 3rd arg: 2nd syscall arg */
247 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
248 movl %eax,%edi /* 1st arg: syscall number */
249 call __audit_syscall_entry
250 movl RAX(%rsp),%eax /* reload syscall number */
251 cmpq $(IA32_NR_syscalls-1),%rax
253 movl %ebx,%edi /* reload 1st syscall arg */
254 movl RCX(%rsp),%esi /* reload 2nd syscall arg */
255 movl RDX(%rsp),%edx /* reload 3rd syscall arg */
256 movl RSI(%rsp),%ecx /* reload 4th syscall arg */
257 movl RDI(%rsp),%r8d /* reload 5th syscall arg */
260 .macro auditsys_exit exit
261 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
262 jnz ia32_ret_from_sys_call
264 ENABLE_INTERRUPTS(CLBR_NONE)
265 movl %eax,%esi /* second arg, syscall return value */
266 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
268 movslq %eax, %rsi /* if error sign extend to 64 bits */
269 1: setbe %al /* 1 if error, 0 if not */
270 movzbl %al,%edi /* zero-extend that into %edi */
271 call __audit_syscall_exit
272 movq RAX(%rsp),%rax /* reload syscall return value */
273 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
274 DISABLE_INTERRUPTS(CLBR_NONE)
276 testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
283 auditsys_entry_common
284 movl %ebp,%r9d /* reload 6th syscall arg */
285 jmp sysenter_dispatch
288 auditsys_exit sysexit_from_sys_call
292 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
294 jmp sysenter_flags_fixed
297 #ifdef CONFIG_AUDITSYSCALL
298 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
303 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
304 movq %rsp,%rdi /* &pt_regs -> arg1 */
305 call syscall_trace_enter
306 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
308 cmpq $(IA32_NR_syscalls-1),%rax
309 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
312 ENDPROC(ia32_sysenter_target)
315 * 32bit SYSCALL instruction entry.
317 * 32bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
318 * then loads new ss, cs, and rip from previously programmed MSRs.
319 * rflags gets masked by a value from another MSR (so CLD and CLAC
320 * are not needed). SYSCALL does not save anything on the stack
321 * and does not change rsp.
323 * Note: rflags saving+masking-with-MSR happens only in Long mode
324 * (in legacy 32bit mode, IF, RF and VM bits are cleared and that's it).
325 * Don't get confused: rflags saving+masking depends on Long Mode Active bit
326 * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
327 * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
330 * eax system call number
333 * ebp arg2 (note: not saved in the stack frame, should not be touched)
340 * This is purely a fast path. For anything complicated we use the int 0x80
341 * path below. We set up a complete hardware stack frame to share code
342 * with the int 0x80 path.
344 ENTRY(ia32_cstar_target)
345 CFI_STARTPROC32 simple
349 /*CFI_REGISTER rflags,r11*/
352 * Interrupts are off on entry.
353 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
354 * it is too small to ever cause noticeable irq latency.
359 movq PER_CPU_VAR(kernel_stack),%rsp
360 ENABLE_INTERRUPTS(CLBR_NONE)
362 /* Zero-extending 32-bit regs, do not remove */
365 /* Construct struct pt_regs on stack */
366 pushq_cfi $__USER32_DS /* pt_regs->ss */
367 pushq_cfi %r8 /* pt_regs->sp */
369 pushq_cfi %r11 /* pt_regs->flags */
370 pushq_cfi $__USER32_CS /* pt_regs->cs */
371 pushq_cfi %rcx /* pt_regs->ip */
373 pushq_cfi_reg rax /* pt_regs->orig_ax */
374 pushq_cfi_reg rdi /* pt_regs->di */
375 pushq_cfi_reg rsi /* pt_regs->si */
376 pushq_cfi_reg rdx /* pt_regs->dx */
377 pushq_cfi_reg rbp /* pt_regs->cx */
379 pushq_cfi_reg rax /* pt_regs->ax */
380 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
381 CFI_ADJUST_CFA_OFFSET 10*8
384 * no need to do an access_ok check here because r8 has been
385 * 32bit zero extended
389 _ASM_EXTABLE(1b,ia32_badarg)
391 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
392 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
395 cmpq $IA32_NR_syscalls-1,%rax
398 /* 32bit syscall -> 64bit C ABI argument conversion */
399 movl %edi,%r8d /* arg5 */
400 /* r9 already loaded */ /* arg6 */
401 xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
402 movl %ebx,%edi /* arg1 */
403 movl %edx,%edx /* arg3 (zero extension) */
405 call *ia32_sys_call_table(,%rax,8)
407 DISABLE_INTERRUPTS(CLBR_NONE)
409 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
411 sysretl_from_sys_call:
412 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
416 movl EFLAGS(%rsp),%r11d
417 /*CFI_REGISTER rflags,r11*/
425 * 64bit->32bit SYSRET restores eip from ecx,
426 * eflags from r11 (but RF and VM bits are forced to 0),
427 * cs and ss are loaded from MSRs.
428 * (Note: 32bit->32bit SYSRET is different: since r11
429 * does not exist, it merely sets eflags.IF=1).
431 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
432 * descriptor is not reinitialized. This means that we must
433 * avoid SYSRET with SS == NULL, which could happen if we schedule,
434 * exit the kernel, and re-enter using an interrupt vector. (All
435 * interrupt entries on x86_64 set SS to NULL.) We prevent that
436 * from happening by reloading SS in __switch_to.
440 #ifdef CONFIG_AUDITSYSCALL
443 movl %r9d,R9(%rsp) /* register to be clobbered by call */
444 auditsys_entry_common
445 movl R9(%rsp),%r9d /* reload 6th syscall arg */
449 auditsys_exit sysretl_from_sys_call
453 #ifdef CONFIG_AUDITSYSCALL
454 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
460 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
461 movq %rsp,%rdi /* &pt_regs -> arg1 */
462 call syscall_trace_enter
463 LOAD_ARGS32 1 /* reload args from stack in case ptrace changed it */
466 cmpq $(IA32_NR_syscalls-1),%rax
467 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
469 END(ia32_cstar_target)
478 * Emulated IA32 system calls via int 0x80.
481 * eax system call number
487 * ebp arg6 (note: not saved in the stack frame, should not be touched)
490 * Uses the same stack frame as the x86-64 version.
491 * All registers except eax must be saved (but ptrace may violate that).
492 * Arguments are zero extended. For system calls that want sign extension and
493 * take long arguments a wrapper is needed. Most calls can just be called
495 * Assumes it is only called from user space and entered with interrupts off.
499 CFI_STARTPROC32 simple
502 /*CFI_REL_OFFSET ss,4*8 */
503 CFI_REL_OFFSET rsp,3*8
504 /*CFI_REL_OFFSET rflags,2*8 */
505 /*CFI_REL_OFFSET cs,1*8 */
506 CFI_REL_OFFSET rip,0*8
509 * Interrupts are off on entry.
510 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
511 * it is too small to ever cause noticeable irq latency.
513 PARAVIRT_ADJUST_EXCEPTION_FRAME
515 ENABLE_INTERRUPTS(CLBR_NONE)
517 /* Zero-extending 32-bit regs, do not remove */
520 /* Construct struct pt_regs on stack (iret frame is already on stack) */
521 pushq_cfi_reg rax /* pt_regs->orig_ax */
522 pushq_cfi_reg rdi /* pt_regs->di */
523 pushq_cfi_reg rsi /* pt_regs->si */
524 pushq_cfi_reg rdx /* pt_regs->dx */
525 pushq_cfi_reg rcx /* pt_regs->cx */
526 pushq_cfi_reg rax /* pt_regs->ax */
528 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
529 CFI_ADJUST_CFA_OFFSET 10*8
531 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
532 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
534 cmpq $(IA32_NR_syscalls-1),%rax
537 /* 32bit syscall -> 64bit C ABI argument conversion */
538 movl %edi,%r8d /* arg5 */
539 movl %ebp,%r9d /* arg6 */
540 xchg %ecx,%esi /* rsi:arg2, rcx:arg4 */
541 movl %ebx,%edi /* arg1 */
542 movl %edx,%edx /* arg3 (zero extension) */
543 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
546 ia32_ret_from_sys_call:
548 jmp int_ret_from_sys_call
553 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
554 movq %rsp,%rdi /* &pt_regs -> arg1 */
555 call syscall_trace_enter
556 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */
558 cmpq $(IA32_NR_syscalls-1),%rax
559 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
564 movq $0,ORIG_RAX(%rsp)
570 .macro PTREGSCALL label, func
573 leaq \func(%rip),%rax
574 jmp ia32_ptregs_common
579 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
580 PTREGSCALL stub32_sigreturn, sys32_sigreturn
581 PTREGSCALL stub32_fork, sys_fork
582 PTREGSCALL stub32_vfork, sys_vfork
586 leaq sys_clone(%rip),%rax
588 jmp ia32_ptregs_common
593 CFI_STARTPROC32 simple
595 CFI_DEF_CFA rsp,SIZEOF_PTREGS
596 CFI_REL_OFFSET rax,RAX
597 CFI_REL_OFFSET rcx,RCX
598 CFI_REL_OFFSET rdx,RDX
599 CFI_REL_OFFSET rsi,RSI
600 CFI_REL_OFFSET rdi,RDI
601 CFI_REL_OFFSET rip,RIP
602 /* CFI_REL_OFFSET cs,CS*/
603 /* CFI_REL_OFFSET rflags,EFLAGS*/
604 CFI_REL_OFFSET rsp,RSP
605 /* CFI_REL_OFFSET ss,SS*/
611 END(ia32_ptregs_common)