4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015 Joyent, Inc.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
27 #include <sys/asm_linkage.h>
28 #include <sys/asm_misc.h>
29 #include <sys/regset.h>
30 #include <sys/privregs.h>
32 #include <sys/machbrand.h>
36 #include <sys/types.h>
37 #include <sys/thread.h>
38 #include <sys/systm.h>
42 #include <sys/segments.h>
45 #include <sys/ftrace.h>
46 #include <sys/traptrace.h>
47 #include <sys/clock.h>
48 #include <sys/model.h>
49 #include <sys/panic.h>
52 #include <sys/hypervisor.h>
60 * We implement five flavours of system call entry points
62 * - syscall/sysretq (amd64 generic)
63 * - syscall/sysretl (i386 plus SYSC bit)
64 * - sysenter/sysexit (i386 plus SEP bit)
65 * - int/iret (i386 generic)
66 * - lcall/iret (i386 generic)
68 * The current libc included in Solaris uses int/iret as the base unoptimized
69 * kernel entry method. Older libc implementations and legacy binaries may use
70 * the lcall call gate, so it must continue to be supported.
72 * System calls that use an lcall call gate are processed in trap() via a
73 * segment-not-present trap, i.e. lcalls are extremely slow(!).
75 * The basic pattern used in the 32-bit SYSC handler at this point in time is
76 * to have the bare minimum of assembler, and get to the C handlers as
77 * quickly as possible.
79 * The 64-bit handler is much closer to the sparcv9 handler; that's
80 * because of passing arguments in registers. The 32-bit world still
81 * passes arguments on the stack -- that makes that handler substantially
84 * The two handlers share a few code fragments which are broken
85 * out into preprocessor macros below.
87 * XX64 come back and speed all this up later. The 32-bit stuff looks
88 * especially easy to speed up the argument copying part ..
91 * Notes about segment register usage (c.f. the 32-bit kernel)
93 * In the 32-bit kernel, segment registers are dutifully saved and
94 * restored on all mode transitions because the kernel uses them directly.
95 * When the processor is running in 64-bit mode, segment registers are
99 * controlled by the hardware mechanisms that make mode transitions
101 * The remaining segment registers have to either be pointing at a valid
102 * descriptor i.e. with the 'present' bit set, or they can NULL descriptors
108 * fsbase and gsbase are used to control the place they really point at.
109 * The kernel only depends on %gs, and controls its own gsbase via swapgs
111 * Note that loading segment registers is still costly because the GDT
112 * lookup still happens (this is because the hardware can't know that we're
113 * not setting up these segment registers for a 32-bit program). Thus we
114 * avoid doing this in the syscall path, and defer them to lwp context switch
115 * handlers, so the register values remain virtualized to the lwp.
118 #if defined(SYSCALLTRACE)
119 #define ORL_SYSCALLTRACE(r32) \
120 orl syscalltrace
(%rip
), r32
122 #define ORL_SYSCALLTRACE(r32)
126 * In the 32-bit kernel, we do absolutely nothing before getting into the
127 * brand callback checks. In 64-bit land, we do swapgs and then come here.
128 * We assume that the %rsp- and %r15-stashing fields in the CPU structure
131 * Check if a brand_mach_ops callback is defined for the specified callback_id
132 * type. If so invoke it with the kernel's %gs value loaded and the following
135 * stack: --------------------------------------
136 * 32 | callback pointer |
137 * | 24 | user (or interrupt) stack pointer |
138 * | 16 | lwp pointer |
139 * v 8 | userland return address |
140 * 0 | callback wrapper return addr |
141 * --------------------------------------
143 * Since we're pushing the userland return address onto the kernel stack
144 * we need to get that address without accessing the user's stack (since we
145 * can't trust that data). There are different ways to get the userland
146 * return address depending on how the syscall trap was made:
148 * a) For sys_syscall and sys_syscall32 the return address is in %rcx.
149 * b) For sys_sysenter the return address is in %rdx.
150 * c) For sys_int80 and sys_syscall_int (int91), upon entry into the macro,
151 * the stack pointer points at the state saved when we took the interrupt:
152 * ------------------------
155 * | | EFLAGS register |
158 * ------------------------
160 * The 2nd parameter to the BRAND_CALLBACK macro is either the
161 * BRAND_URET_FROM_REG or BRAND_URET_FROM_INTR_STACK macro. These macros are
162 * used to generate the proper code to get the userland return address for
163 * each syscall entry point.
165 * The interface to the brand callbacks on the 64-bit kernel assumes %r15
166 * is available as a scratch register within the callback. If the callback
167 * returns within the kernel then this macro will restore %r15. If the
168 * callback is going to return directly to userland then it should restore
169 * %r15 before returning to userland.
171 #define BRAND_URET_FROM_REG(rip_reg) \
172 pushq rip_reg
/* push the return address */
175 * The interrupt stack pointer we saved on entry to the BRAND_CALLBACK macro
176 * is currently pointing at the user return address (%eip).
178 #define BRAND_URET_FROM_INTR_STACK() \
179 movq
%gs
:CPU_RTMP_RSP
, %r15 /* grab the intr. stack pointer */ ;\
180 pushq
(%r15) /* push the return address */
182 #define BRAND_CALLBACK(callback_id, push_userland_ret) \
183 movq
%rsp
, %gs
:CPU_RTMP_RSP
/* save the stack pointer */ ;\
184 movq
%r15, %gs
:CPU_RTMP_R15
/* save %r15 */ ;\
185 movq
%gs
:CPU_THREAD
, %r15 /* load the thread pointer */ ;\
186 movq T_STACK
(%r15), %rsp
/* switch to the kernel stack */ ;\
187 subq $
16, %rsp
/* save space for 2 pointers */ ;\
188 pushq
%r14 /* save %r14 */ ;\
189 movq
%gs
:CPU_RTMP_RSP
, %r14 ;\
190 movq
%r14, 8(%rsp
) /* stash the user stack pointer */ ;\
191 popq
%r14 /* restore %r14 */ ;\
192 movq T_LWP
(%r15), %r15 /* load the lwp pointer */ ;\
193 pushq
%r15 /* push the lwp pointer */ ;\
194 movq LWP_PROCP
(%r15), %r15 /* load the proc pointer */ ;\
195 movq P_BRAND
(%r15), %r15 /* load the brand pointer */ ;\
196 movq B_MACHOPS
(%r15), %r15 /* load the machops pointer */ ;\
197 movq _CONST
(_MUL
(callback_id
, CPTRSIZE
))(%r15), %r15 ;\
200 movq
%r15, 16(%rsp
) /* save the callback pointer */ ;\
201 push_userland_ret
/* push the return address */ ;\
202 call
*24(%rsp
) /* call callback */ ;\
203 1: movq
%gs
:CPU_RTMP_R15
, %r15 /* restore %r15 */ ;\
204 movq
%gs
:CPU_RTMP_RSP
, %rsp
/* restore the stack pointer */
206 #define MSTATE_TRANSITION(from, to) \
212 * Check to see if a simple (direct) return is possible i.e.
214 * if (t->t_post_sys_ast | syscalltrace |
215 * lwp->lwp_pcb.pcb_rupdate == 1)
221 * - condition code NE is set if post-sys is too complex
222 * - rtmp is zeroed if it isn't (we rely on this!)
225 #define CHECK_POSTSYS_NE(t, ltmp, rtmp) \
226 movq T_LWP
(t), ltmp; \
227 movzbl PCB_RUPDATE
(ltmp
), rtmp; \
228 ORL_SYSCALLTRACE
(rtmp
); \
229 orl T_POST_SYS_AST
(t), rtmp; \
233 * Fix up the lwp, thread, and eflags for a successful return
236 * - zwreg contains zero
238 #define SIMPLE_SYSCALL_POSTSYS(t, lwp, zwreg) \
239 movb $LWP_USER
, LWP_STATE
(lwp
); \
240 movw zwreg
, T_SYSNUM
(t); \
241 andb $_CONST
(0xffff - PS_C
), REGOFF_RFL
(%rsp
)
244 * ASSERT(lwptoregs(lwp) == rp);
246 * This may seem obvious, but very odd things happen if this
250 * (%rsp is ready for normal call sequence)
251 * Postconditions (if assertion is true):
254 * ASSERT(rp->r_cs == descnum)
256 * The code selector is written into the regs structure when the
257 * lwp stack is created. We use this ASSERT to validate that
258 * the regs structure really matches how we came in.
261 * (%rsp is ready for normal call sequence)
262 * Postconditions (if assertion is true):
265 * ASSERT(lwp->lwp_pcb.pcb_rupdate == 0);
267 * If this is false, it meant that we returned to userland without
268 * updating the segment registers as we were supposed to.
270 * Note that we must ensure no interrupts or other traps intervene
271 * between entering privileged mode and performing the assertion,
272 * otherwise we may perform a context switch on the thread, which
273 * will end up setting pcb_rupdate to 1 again.
280 .string "syscall_asm_amd64.s:%d lwptoregs(%p) [%p] != rp [%p]"
283 .string "syscall_asm_amd64.s:%d rp->r_cs [%ld] != %ld"
286 .string "syscall_asm_amd64.s:%d lwp %p, pcb_rupdate != 0"
290 #define ASSERT_LWPTOREGS(lwp, rp) \
291 movq LWP_REGS
(lwp
), %r11; \
294 leaq __lwptoregs_msg
(%rip
), %rdi; \
295 movl $__LINE__
, %esi; \
303 #define ASSERT_NO_RUPDATE_PENDING(lwp) \
304 testb $
0x1, PCB_RUPDATE
(lwp
); \
307 leaq __no_rupdate_msg
(%rip
), %rdi; \
308 movl $__LINE__
, %esi; \
314 #define ASSERT_LWPTOREGS(lwp, rp)
315 #define ASSERT_NO_RUPDATE_PENDING(lwp)
319 * Do the traptrace thing and restore any registers we used
320 * in situ. Assumes that %rsp is pointing at the base of
321 * the struct regs, obviously ..
324 #define SYSCALL_TRAPTRACE(ttype) \
325 TRACE_PTR
(%rdi
, %rbx
, %ebx
, %rcx
, ttype
); \
326 TRACE_REGS
(%rdi
, %rsp
, %rbx
, %rcx
); \
327 TRACE_STAMP
(%rdi
);
/* rdtsc clobbers %eax, %edx */ \
328 movq REGOFF_RAX
(%rsp
), %rax; \
329 movq REGOFF_RBX
(%rsp
), %rbx; \
330 movq REGOFF_RCX
(%rsp
), %rcx; \
331 movq REGOFF_RDX
(%rsp
), %rdx; \
332 movl
%eax
, TTR_SYSNUM
(%rdi
); \
333 movq REGOFF_RDI
(%rsp
), %rdi
335 #define SYSCALL_TRAPTRACE32(ttype) \
336 SYSCALL_TRAPTRACE
(ttype
); \
337 /* paranoia: clean the top 32-bits of the registers */ \
343 #else /* TRAPTRACE */
344 #define SYSCALL_TRAPTRACE(ttype)
345 #define SYSCALL_TRAPTRACE32(ttype)
346 #endif /* TRAPTRACE */
349 * The 64-bit libc syscall wrapper does this:
353 * movq %rcx, %r10 -- because syscall smashes %rcx
359 * Thus when we come into the kernel:
361 * %rdi, %rsi, %rdx, %r10, %r8, %r9 contain first six args
362 * %rax is the syscall number
363 * %r12-%r15 contain caller state
365 * The syscall instruction arranges that:
367 * %rcx contains the return %rip
368 * %r11d contains bottom 32-bits of %rflags
369 * %rflags is masked (as determined by the SFMASK msr)
370 * %cs is set to UCS_SEL (as determined by the STAR msr)
371 * %ss is set to UDS_SEL (as determined by the STAR msr)
372 * %rip is set to sys_syscall (as determined by the LSTAR msr)
374 * Or in other words, we have no registers available at all.
375 * Only swapgs can save us!
377 * Under the hypervisor, the swapgs has happened already. However, the
378 * state of the world is very different from that we're familiar with.
380 * In particular, we have a stack structure like that for interrupt
381 * gates, except that the %cs and %ss registers are modified for reasons
382 * that are not entirely clear. Critically, the %rcx/%r11 values do
383 * *not* reflect the usage of those registers under a 'real' syscall[1];
384 * the stack, therefore, looks like this:
386 * 0x0(rsp) potentially junk %rcx
387 * 0x8(rsp) potentially junk %r11
388 * 0x10(rsp) user %rip
389 * 0x18(rsp) modified %cs
390 * 0x20(rsp) user %rflags
391 * 0x28(rsp) user %rsp
392 * 0x30(rsp) modified %ss
395 * and before continuing on, we must load the %rip into %rcx and the
398 * [1] They used to, and we relied on it, but this was broken in 3.1.1.
402 #define XPV_SYSCALL_PROD \
403 movq
0x10(%rsp
), %rcx; \
404 movq
0x20(%rsp
), %r11; \
405 movq
0x28(%rsp
), %rsp
407 #define XPV_SYSCALL_PROD /* nothing */
421 size_t _allsyscalls_size;
425 ENTRY_NP2
(brand_sys_syscall
,_allsyscalls
)
426 SWAPGS
/* kernel gsbase */
428 BRAND_CALLBACK
(BRAND_CB_SYSCALL
, BRAND_URET_FROM_REG
(%rcx
))
429 jmp noprod_sys_syscall
431 ALTENTRY
(sys_syscall
)
432 SWAPGS
/* kernel gsbase */
436 movq
%r15, %gs
:CPU_RTMP_R15
437 movq
%rsp
, %gs
:CPU_RTMP_RSP
439 movq
%gs
:CPU_THREAD
, %r15
440 movq T_STACK
(%r15), %rsp
/* switch from user to kernel stack */
442 ASSERT_UPCALL_MASK_IS_SET
444 movl $UCS_SEL
, REGOFF_CS
(%rsp
)
445 movq
%rcx
, REGOFF_RIP
(%rsp
) /* syscall: %rip -> %rcx */
446 movq
%r11, REGOFF_RFL
(%rsp
) /* syscall: %rfl -> %r11d */
447 movl $UDS_SEL
, REGOFF_SS
(%rsp
)
449 movl
%eax
, %eax
/* wrapper: sysc# -> %eax */
450 movq
%rdi
, REGOFF_RDI
(%rsp
)
451 movq
%rsi
, REGOFF_RSI
(%rsp
)
452 movq
%rdx
, REGOFF_RDX
(%rsp
)
453 movq
%r10, REGOFF_RCX
(%rsp
) /* wrapper: %rcx -> %r10 */
454 movq
%r10, %rcx
/* arg[3] for direct calls */
456 movq
%r8, REGOFF_R8
(%rsp
)
457 movq
%r9, REGOFF_R9
(%rsp
)
458 movq
%rax
, REGOFF_RAX
(%rsp
)
459 movq
%rbx
, REGOFF_RBX
(%rsp
)
461 movq
%rbp
, REGOFF_RBP
(%rsp
)
462 movq
%r10, REGOFF_R10
(%rsp
)
463 movq
%gs
:CPU_RTMP_RSP
, %r11
464 movq
%r11, REGOFF_RSP
(%rsp
)
465 movq
%r12, REGOFF_R12
(%rsp
)
467 movq
%r13, REGOFF_R13
(%rsp
)
468 movq
%r14, REGOFF_R14
(%rsp
)
469 movq
%gs
:CPU_RTMP_R15
, %r10
470 movq
%r10, REGOFF_R15
(%rsp
)
471 movq $
0, REGOFF_SAVFP
(%rsp
)
472 movq $
0, REGOFF_SAVPC
(%rsp
)
475 * Copy these registers here in case we end up stopped with
476 * someone (like, say, /proc) messing with our register state.
477 * We don't -restore- them unless we have to in update_sregs.
479 * Since userland -can't- change fsbase or gsbase directly,
480 * and capturing them involves two serializing instructions,
481 * we don't bother to capture them here.
485 movq
%rbx
, REGOFF_DS
(%rsp
)
487 movq
%rbx
, REGOFF_ES
(%rsp
)
489 movq
%rbx
, REGOFF_FS
(%rsp
)
491 movq
%rbx
, REGOFF_GS
(%rsp
)
494 * Machine state saved in the regs structure on the stack
495 * First six args in %rdi, %rsi, %rdx, %rcx, %r8, %r9
496 * %eax is the syscall number
497 * %rsp is the thread's stack, %r15 is curthread
498 * REG_RSP(%rsp) is the user's stack
501 SYSCALL_TRAPTRACE
($TT_SYSC64
)
505 movq T_LWP
(%r15), %r14
506 ASSERT_NO_RUPDATE_PENDING
(%r14)
509 MSTATE_TRANSITION
(LMS_USER
, LMS_SYSTEM
)
510 movl REGOFF_RAX
(%rsp
), %eax
/* (%rax damaged by mstate call) */
512 ASSERT_LWPTOREGS
(%r14, %rsp
)
514 movb $LWP_SYS
, LWP_STATE
(%r14)
515 incq LWP_RU_SYSC
(%r14)
516 movb $NORMALRETURN
, LWP_EOSYS
(%r14)
518 incq
%gs
:CPU_STATS_SYS_SYSCALL
520 movw
%ax
, T_SYSNUM
(%r15)
521 movzbl T_PRE_SYS
(%r15), %ebx
522 ORL_SYSCALLTRACE
(%ebx
)
527 movq REGOFF_RDI
(%rbp
), %rdi
528 movq REGOFF_RSI
(%rbp
), %rsi
529 movq REGOFF_RDX
(%rbp
), %rdx
530 movq REGOFF_RCX
(%rbp
), %rcx
531 movq REGOFF_R8
(%rbp
), %r8
532 movq REGOFF_R9
(%rbp
), %r9
536 shll $SYSENT_SIZE_SHIFT
, %eax
537 leaq sysent
(%rax
), %rbx
545 * If the handler returns two ints, then we need to split the
546 * 64-bit return value into two 32-bit values.
548 testw $SE_32RVAL2
, SY_FLAGS
(%rbx
)
551 shrq $
32, %r13 /* upper 32-bits into %edx */
552 movl
%r12d
, %r12d
/* lower 32-bits into %eax */
555 * Optimistically assume that there's no post-syscall
556 * work to do. (This is to avoid having to call syscall_mstate()
557 * with interrupts disabled)
559 MSTATE_TRANSITION
(LMS_SYSTEM
, LMS_USER
)
562 * We must protect ourselves from being descheduled here;
563 * If we were, and we ended up on another cpu, or another
564 * lwp got in ahead of us, it could change the segment
565 * registers without us noticing before we return to userland.
568 CHECK_POSTSYS_NE
(%r15, %r14, %ebx
)
572 * We need to protect ourselves against non-canonical return values
573 * because Intel doesn't check for them on sysret (AMD does). Canonical
574 * addresses on current amd64 processors only use 48-bits for VAs; an
575 * address is canonical if all upper bits (47-63) are identical. If we
576 * find a non-canonical %rip, we opt to go through the full
577 * _syscall_post path which takes us into an iretq which is not
578 * susceptible to the same problems sysret is.
580 * We're checking for a canonical address by first doing an arithmetic
581 * shift. This will fill in the remaining bits with the value of bit 63.
582 * If the address were canonical, the register would now have either all
583 * zeroes or all ones in it. Therefore we add one (inducing overflow)
584 * and compare against 1. A canonical address will either be zero or one
585 * at this point, hence the use of ja.
587 * At this point, r12 and r13 have the return value so we can't use
590 movq REGOFF_RIP
(%rsp
), %rcx
597 SIMPLE_SYSCALL_POSTSYS
(%r15, %r14, %bx
)
599 movq
%r12, REGOFF_RAX
(%rsp
)
600 movq
%r13, REGOFF_RDX
(%rsp
)
603 * To get back to userland, we need the return %rip in %rcx and
604 * the return %rfl in %r11d. The sysretq instruction also arranges
605 * to fix up %cs and %ss; everything else is our responsibility.
607 movq REGOFF_RDI
(%rsp
), %rdi
608 movq REGOFF_RSI
(%rsp
), %rsi
609 movq REGOFF_RDX
(%rsp
), %rdx
610 /* %rcx used to restore %rip value */
612 movq REGOFF_R8
(%rsp
), %r8
613 movq REGOFF_R9
(%rsp
), %r9
614 movq REGOFF_RAX
(%rsp
), %rax
615 movq REGOFF_RBX
(%rsp
), %rbx
617 movq REGOFF_RBP
(%rsp
), %rbp
618 movq REGOFF_R10
(%rsp
), %r10
619 /* %r11 used to restore %rfl value */
620 movq REGOFF_R12
(%rsp
), %r12
622 movq REGOFF_R13
(%rsp
), %r13
623 movq REGOFF_R14
(%rsp
), %r14
624 movq REGOFF_R15
(%rsp
), %r15
626 movq REGOFF_RIP
(%rsp
), %rcx
627 movl REGOFF_RFL
(%rsp
), %r11d
630 addq $REGOFF_RIP
, %rsp
632 movq REGOFF_RSP
(%rsp
), %rsp
636 * There can be no instructions between the ALTENTRY below and
637 * SYSRET or we could end up breaking brand support. See label usage
638 * in sn1_brand_syscall_callback for an example.
640 ASSERT_UPCALL_MASK_IS_SET
643 ALTENTRY
(nopop_sys_syscall_swapgs_sysretq
)
646 * We can only get here after executing a brand syscall
647 * interposition callback handler and simply need to
648 * "sysretq" back to userland. On the hypervisor this
649 * involves the iret hypercall which requires us to construct
650 * just enough of the stack needed for the hypercall.
651 * (rip, cs, rflags, rsp, ss).
653 movq
%rsp
, %gs
:CPU_RTMP_RSP
/* save user's rsp */
654 movq
%gs
:CPU_THREAD
, %r11
655 movq T_STACK
(%r11), %rsp
657 movq
%rcx
, REGOFF_RIP
(%rsp
)
658 movl $UCS_SEL
, REGOFF_CS
(%rsp
)
659 movq
%gs
:CPU_RTMP_RSP
, %r11
660 movq
%r11, REGOFF_RSP
(%rsp
)
662 popq
%r11 /* hypercall enables ints */
663 movq
%r11, REGOFF_RFL
(%rsp
)
664 movl $UDS_SEL
, REGOFF_SS
(%rsp
)
665 addq $REGOFF_RIP
, %rsp
667 * XXPV: see comment in SYSRETQ definition for future optimization
670 ASSERT_UPCALL_MASK_IS_SET
673 ALTENTRY
(nopop_sys_syscall_swapgs_sysretq
)
674 SWAPGS
/* user gsbase */
678 SET_SIZE
(nopop_sys_syscall_swapgs_sysretq
)
684 jne _syscall_post_call
686 * Didn't abort, so reload the syscall args and invoke the handler.
688 movzwl T_SYSNUM
(%r15), %eax
695 jmp _syscall_post_call
700 * Sigh, our optimism wasn't justified, put it back to LMS_SYSTEM
701 * so that we can account for the extra work it takes us to finish.
703 MSTATE_TRANSITION
(LMS_USER
, LMS_SYSTEM
)
708 MSTATE_TRANSITION
(LMS_SYSTEM
, LMS_USER
)
710 SET_SIZE
(sys_syscall
)
711 SET_SIZE
(brand_sys_syscall
)
724 ENTRY_NP
(brand_sys_syscall32
)
725 SWAPGS
/* kernel gsbase */
727 BRAND_CALLBACK
(BRAND_CB_SYSCALL32
, BRAND_URET_FROM_REG
(%rcx
))
728 jmp nopop_sys_syscall32
730 ALTENTRY
(sys_syscall32
)
731 SWAPGS
/* kernel gsbase */
736 movq
%gs
:CPU_THREAD
, %r15
737 movq T_STACK
(%r15), %rsp
740 movl $U32CS_SEL
, REGOFF_CS
(%rsp
)
741 movl
%ecx
, REGOFF_RIP
(%rsp
) /* syscall: %rip -> %rcx */
742 movq
%r11, REGOFF_RFL
(%rsp
) /* syscall: %rfl -> %r11d */
743 movq
%r10, REGOFF_RSP
(%rsp
)
744 movl $UDS_SEL
, REGOFF_SS
(%rsp
)
747 movl
%edi
, REGOFF_RDI
(%rsp
)
748 movl
%esi
, REGOFF_RSI
(%rsp
)
749 movl
%ebp
, REGOFF_RBP
(%rsp
)
750 movl
%ebx
, REGOFF_RBX
(%rsp
)
751 movl
%edx
, REGOFF_RDX
(%rsp
)
752 movl
%ecx
, REGOFF_RCX
(%rsp
)
753 movl
%eax
, REGOFF_RAX
(%rsp
) /* wrapper: sysc# -> %eax */
754 movq $
0, REGOFF_SAVFP
(%rsp
)
755 movq $
0, REGOFF_SAVPC
(%rsp
)
758 * Copy these registers here in case we end up stopped with
759 * someone (like, say, /proc) messing with our register state.
760 * We don't -restore- them unless we have to in update_sregs.
762 * Since userland -can't- change fsbase or gsbase directly,
763 * we don't bother to capture them here.
767 movq
%rbx
, REGOFF_DS
(%rsp
)
769 movq
%rbx
, REGOFF_ES
(%rsp
)
771 movq
%rbx
, REGOFF_FS
(%rsp
)
773 movq
%rbx
, REGOFF_GS
(%rsp
)
776 * Application state saved in the regs structure on the stack
777 * %eax is the syscall number
778 * %rsp is the thread's stack, %r15 is curthread
779 * REG_RSP(%rsp) is the user's stack
782 SYSCALL_TRAPTRACE32
($TT_SYSC
)
786 movq T_LWP
(%r15), %r14
787 ASSERT_NO_RUPDATE_PENDING
(%r14)
791 MSTATE_TRANSITION
(LMS_USER
, LMS_SYSTEM
)
792 movl REGOFF_RAX
(%rsp
), %eax
/* (%rax damaged by mstate call) */
794 ASSERT_LWPTOREGS
(%r14, %rsp
)
796 incq
%gs
:CPU_STATS_SYS_SYSCALL
799 * Make some space for MAXSYSARGS (currently 8) 32-bit args placed
800 * into 64-bit (long) arg slots, maintaining 16 byte alignment. Or
803 * SA(MAXSYSARGS * sizeof (long)) == 64
805 #define SYS_DROP 64 /* drop for args */
807 movb $LWP_SYS
, LWP_STATE
(%r14)
813 * Fetch the arguments copied onto the kernel stack and put
814 * them in the right registers to invoke a C-style syscall handler.
815 * %rax contains the handler address.
817 * Ideas for making all this go faster of course include simply
818 * forcibly fetching 6 arguments from the user stack under lofault
819 * protection, reverting to copyin_args only when watchpoints
822 * (If we do this, make sure that exec and libthread leave
823 * enough space at the top of the stack to ensure that we'll
824 * never do a fetch from an invalid page.)
826 * Lots of ideas here, but they won't really help with bringup B-)
827 * Correctness can't wait, performance can wait a little longer ..
833 movl
0x10(%rsp
), %edx
834 movl
0x18(%rsp
), %ecx
835 movl
0x20(%rsp
), %r8d
836 movl
0x28(%rsp
), %r9d
840 movq
%rbp
, %rsp
/* pop the args */
843 * amd64 syscall handlers -always- return a 64-bit value in %rax.
844 * On the 32-bit kernel, they always return that value in %eax:%edx
845 * as required by the 32-bit ABI.
847 * Simulate the same behaviour by unconditionally splitting the
848 * return value in the same way.
851 shrq $
32, %r13 /* upper 32-bits into %edx */
852 movl
%eax
, %r12d
/* lower 32-bits into %eax */
855 * Optimistically assume that there's no post-syscall
856 * work to do. (This is to avoid having to call syscall_mstate()
857 * with interrupts disabled)
859 MSTATE_TRANSITION
(LMS_SYSTEM
, LMS_USER
)
862 * We must protect ourselves from being descheduled here;
863 * If we were, and we ended up on another cpu, or another
864 * lwp got in ahead of us, it could change the segment
865 * registers without us noticing before we return to userland.
868 CHECK_POSTSYS_NE
(%r15, %r14, %ebx
)
869 jne _full_syscall_postsys32
870 SIMPLE_SYSCALL_POSTSYS
(%r15, %r14, %bx
)
873 * To get back to userland, we need to put the return %rip in %rcx and
874 * the return %rfl in %r11d. The sysret instruction also arranges
875 * to fix up %cs and %ss; everything else is our responsibility.
878 movl
%r12d
, %eax
/* %eax: rval1 */
879 movl REGOFF_RBX
(%rsp
), %ebx
880 /* %ecx used for return pointer */
881 movl
%r13d
, %edx
/* %edx: rval2 */
882 movl REGOFF_RBP
(%rsp
), %ebp
883 movl REGOFF_RSI
(%rsp
), %esi
884 movl REGOFF_RDI
(%rsp
), %edi
886 movl REGOFF_RFL
(%rsp
), %r11d
/* %r11 -> eflags */
887 movl REGOFF_RIP
(%rsp
), %ecx
/* %ecx -> %eip */
888 movl REGOFF_RSP
(%rsp
), %esp
890 ASSERT_UPCALL_MASK_IS_SET
891 ALTENTRY
(nopop_sys_syscall32_swapgs_sysretl
)
892 SWAPGS
/* user gsbase */
894 SET_SIZE
(nopop_sys_syscall32_swapgs_sysretl
)
897 _full_syscall_postsys32
:
900 * Sigh, our optimism wasn't justified, put it back to LMS_SYSTEM
901 * so that we can account for the extra work it takes us to finish.
903 MSTATE_TRANSITION
(LMS_USER
, LMS_SYSTEM
)
905 movq
%r12, %rsi
/* rval1 - %eax */
906 movq
%r13, %rdx
/* rval2 - %edx */
908 MSTATE_TRANSITION
(LMS_SYSTEM
, LMS_USER
)
910 SET_SIZE
(sys_syscall32
)
911 SET_SIZE
(brand_sys_syscall32
)
916 * System call handler via the sysenter instruction
917 * Used only for 32-bit system calls on the 64-bit kernel.
919 * The caller in userland has arranged that:
921 * - %eax contains the syscall number
922 * - %ecx contains the user %esp
923 * - %edx contains the return %eip
924 * - the user stack contains the args to the syscall
926 * Hardware and (privileged) initialization code have arranged that by
927 * the time the sysenter instructions completes:
929 * - %rip is pointing to sys_sysenter (below).
930 * - %cs and %ss are set to kernel text and stack (data) selectors.
931 * - %rsp is pointing at the lwp's stack
932 * - interrupts have been disabled.
934 * Note that we are unable to return both "rvals" to userland with
935 * this call, as %edx is used by the sysexit instruction.
937 * One final complication in this routine is its interaction with
938 * single-stepping in a debugger. For most of the system call mechanisms,
939 * the CPU automatically clears the single-step flag before we enter the
940 * kernel. The sysenter mechanism does not clear the flag, so a user
941 * single-stepping through a libc routine may suddenly find themself
942 * single-stepping through the kernel. To detect this, kmdb compares the
943 * trap %pc to the [brand_]sys_enter addresses on each single-step trap.
944 * If it finds that we have single-stepped to a sysenter entry point, it
945 * explicitly clears the flag and executes the sys_sysenter routine.
947 * One final complication in this final complication is the fact that we
948 * have two different entry points for sysenter: brand_sys_sysenter and
949 * sys_sysenter. If we enter at brand_sys_sysenter and start single-stepping
950 * through the kernel with kmdb, we will eventually hit the instruction at
951 * sys_sysenter. kmdb cannot distinguish between that valid single-step
952 * and the undesirable one mentioned above. To avoid this situation, we
953 * simply add a jump over the instruction at sys_sysenter to make it
954 * impossible to single-step to it.
964 ENTRY_NP
(brand_sys_sysenter
)
965 SWAPGS
/* kernel gsbase */
966 ALTENTRY
(_brand_sys_sysenter_post_swapgs
)
967 BRAND_CALLBACK
(BRAND_CB_SYSENTER
, BRAND_URET_FROM_REG
(%rdx
))
969 * Jump over sys_sysenter to allow single-stepping as described
972 jmp _sys_sysenter_post_swapgs
974 ALTENTRY
(sys_sysenter
)
975 SWAPGS
/* kernel gsbase */
977 ALTENTRY
(_sys_sysenter_post_swapgs
)
978 movq
%gs
:CPU_THREAD
, %r15
980 movl $U32CS_SEL
, REGOFF_CS
(%rsp
)
981 movl
%ecx
, REGOFF_RSP
(%rsp
) /* wrapper: %esp -> %ecx */
982 movl
%edx
, REGOFF_RIP
(%rsp
) /* wrapper: %eip -> %edx */
985 movl $UDS_SEL
, REGOFF_SS
(%rsp
)
988 * Set the interrupt flag before storing the flags to the
989 * flags image on the stack so we can return to user with
990 * interrupts enabled if we return via sys_rtt_syscall32
993 movq
%r10, REGOFF_RFL
(%rsp
)
995 movl
%edi
, REGOFF_RDI
(%rsp
)
996 movl
%esi
, REGOFF_RSI
(%rsp
)
997 movl
%ebp
, REGOFF_RBP
(%rsp
)
998 movl
%ebx
, REGOFF_RBX
(%rsp
)
999 movl
%edx
, REGOFF_RDX
(%rsp
)
1000 movl
%ecx
, REGOFF_RCX
(%rsp
)
1001 movl
%eax
, REGOFF_RAX
(%rsp
) /* wrapper: sysc# -> %eax */
1002 movq $
0, REGOFF_SAVFP
(%rsp
)
1003 movq $
0, REGOFF_SAVPC
(%rsp
)
1006 * Copy these registers here in case we end up stopped with
1007 * someone (like, say, /proc) messing with our register state.
1008 * We don't -restore- them unless we have to in update_sregs.
1010 * Since userland -can't- change fsbase or gsbase directly,
1011 * we don't bother to capture them here.
1015 movq
%rbx
, REGOFF_DS
(%rsp
)
1017 movq
%rbx
, REGOFF_ES
(%rsp
)
1019 movq
%rbx
, REGOFF_FS
(%rsp
)
1021 movq
%rbx
, REGOFF_GS
(%rsp
)
1024 * Application state saved in the regs structure on the stack
1025 * %eax is the syscall number
1026 * %rsp is the thread's stack, %r15 is curthread
1027 * REG_RSP(%rsp) is the user's stack
1030 SYSCALL_TRAPTRACE
($TT_SYSENTER
)
1034 movq T_LWP
(%r15), %r14
1035 ASSERT_NO_RUPDATE_PENDING
(%r14)
1040 * Catch 64-bit process trying to issue sysenter instruction
1041 * on Nocona based systems.
1043 movq LWP_PROCP
(%r14), %rax
1044 cmpq $DATAMODEL_ILP32
, P_MODEL
(%rax
)
1048 * For a non-32-bit process, simulate a #ud, since that's what
1049 * native hardware does. The traptrace entry (above) will
1050 * let you know what really happened.
1052 movq $T_ILLINST
, REGOFF_TRAPNO
(%rsp
)
1053 movq REGOFF_CS
(%rsp
), %rdi
1054 movq
%rdi
, REGOFF_ERR
(%rsp
)
1056 movq REGOFF_RIP
(%rsp
), %rsi
1057 movl
%gs
:CPU_ID
, %edx
1062 MSTATE_TRANSITION
(LMS_USER
, LMS_SYSTEM
)
1063 movl REGOFF_RAX
(%rsp
), %eax
/* (%rax damaged by mstate calls) */
1065 ASSERT_LWPTOREGS
(%r14, %rsp
)
1067 incq
%gs
:CPU_STATS_SYS_SYSCALL
1070 * Make some space for MAXSYSARGS (currently 8) 32-bit args
1071 * placed into 64-bit (long) arg slots, plus one 64-bit
1072 * (long) arg count, maintaining 16 byte alignment.
1074 subq $SYS_DROP
, %rsp
1075 movb $LWP_SYS
, LWP_STATE
(%r14)
1081 * Fetch the arguments copied onto the kernel stack and put
1082 * them in the right registers to invoke a C-style syscall handler.
1083 * %rax contains the handler address.
1088 movl
0x10(%rsp
), %edx
1089 movl
0x18(%rsp
), %ecx
1090 movl
0x20(%rsp
), %r8d
1091 movl
0x28(%rsp
), %r9d
1093 call
*SY_CALLC
(%rbx
)
1095 movq
%rbp
, %rsp
/* pop the args */
1098 * amd64 syscall handlers -always- return a 64-bit value in %rax.
1099 * On the 32-bit kernel, the always return that value in %eax:%edx
1100 * as required by the 32-bit ABI.
1102 * Simulate the same behaviour by unconditionally splitting the
1103 * return value in the same way.
1106 shrq $
32, %r13 /* upper 32-bits into %edx */
1107 movl
%eax
, %r12d
/* lower 32-bits into %eax */
1110 * Optimistically assume that there's no post-syscall
1111 * work to do. (This is to avoid having to call syscall_mstate()
1112 * with interrupts disabled)
1114 MSTATE_TRANSITION
(LMS_SYSTEM
, LMS_USER
)
1117 * We must protect ourselves from being descheduled here;
1118 * If we were, and we ended up on another cpu, or another
1119 * lwp got int ahead of us, it could change the segment
1120 * registers without us noticing before we return to userland.
1123 CHECK_POSTSYS_NE
(%r15, %r14, %ebx
)
1124 jne _full_syscall_postsys32
1125 SIMPLE_SYSCALL_POSTSYS
(%r15, %r14, %bx
)
1128 * To get back to userland, load up the 32-bit registers and
1129 * sysexit back where we came from.
1133 * Interrupts will be turned on by the 'sti' executed just before
1134 * sysexit. The following ensures that restoring the user's rflags
1135 * doesn't enable interrupts too soon.
1137 andq $_BITNOT
(PS_IE
), REGOFF_RFL
(%rsp
)
1140 * (There's no point in loading up %edx because the sysexit
1141 * mechanism smashes it.)
1144 movl REGOFF_RBX
(%rsp
), %ebx
1145 movl REGOFF_RBP
(%rsp
), %ebp
1146 movl REGOFF_RSI
(%rsp
), %esi
1147 movl REGOFF_RDI
(%rsp
), %edi
1149 movl REGOFF_RIP
(%rsp
), %edx
/* sysexit: %edx -> %eip */
1150 pushq REGOFF_RFL
(%rsp
)
1152 movl REGOFF_RSP
(%rsp
), %ecx
/* sysexit: %ecx -> %esp */
1153 ALTENTRY
(sys_sysenter_swapgs_sysexit
)
1157 SET_SIZE
(sys_sysenter_swapgs_sysexit
)
1158 SET_SIZE
(sys_sysenter
)
1159 SET_SIZE
(_sys_sysenter_post_swapgs
)
1160 SET_SIZE
(brand_sys_sysenter
)
1165 * This is the destination of the "int $T_SYSCALLINT" interrupt gate, used by
1166 * the generic i386 libc to do system calls. We do a small amount of setup
1167 * before jumping into the existing sys_syscall32 path.
1178 ENTRY_NP
(brand_sys_syscall_int
)
1179 SWAPGS
/* kernel gsbase */
1182 BRAND_CALLBACK
(BRAND_CB_INT91
, BRAND_URET_FROM_INTR_STACK
())
1183 jmp nopop_syscall_int
1185 ALTENTRY
(sys_syscall_int
)
1186 SWAPGS
/* kernel gsbase */
1191 movq
%gs
:CPU_THREAD
, %r15
1192 movq T_STACK
(%r15), %rsp
1195 * Set t_post_sys on this thread to force ourselves out via the slow
1196 * path. It might be possible at some later date to optimize this out
1197 * and use a faster return mechanism.
1199 movb $
1, T_POST_SYS
(%r15)
1203 * There should be no instructions between this label and SWAPGS/IRET
1204 * or we could end up breaking branded zone support. See the usage of
1205 * this label in lx_brand_int80_callback and sn1_brand_int91_callback
1208 ALTENTRY
(sys_sysint_swapgs_iret
)
1209 SWAPGS
/* user gsbase */
1212 SET_SIZE
(sys_sysint_swapgs_iret
)
1213 SET_SIZE
(sys_syscall_int
)
1214 SET_SIZE
(brand_sys_syscall_int
)
1219 * Legacy 32-bit applications and old libc implementations do lcalls;
1220 * we should never get here because the LDT entry containing the syscall
1221 * segment descriptor has the "segment present" bit cleared, which means
1222 * we end up processing those system calls in trap() via a not-present trap.
1224 * We do it this way because a call gate unhelpfully does -nothing- to the
1225 * interrupt flag bit, so an interrupt can run us just after the lcall
1226 * completes, but just before the swapgs takes effect. Thus the INTR_PUSH and
1227 * INTR_POP paths would have to be slightly more complex to dance around
1228 * this problem, and end up depending explicitly on the first
1229 * instruction of this handler being either swapgs or cli.
1241 ENTRY_NP
(sys_lcall32
)
1242 SWAPGS
/* kernel gsbase */
1246 leaq __lcall_panic_str
(%rip
), %rdi
1249 SET_SIZE
(sys_lcall32
)
1252 .string "sys_lcall32: shouldn't be here!"
1255 * Declare a uintptr_t which covers the entire pc range of syscall
1256 * handlers for the stack walkers that need this.
1259 .globl _allsyscalls_size
1260 .type _allsyscalls_size, @object
1262 .NWORD . - _allsyscalls
1263 SET_SIZE
(_allsyscalls_size
)
1268 * These are the thread context handlers for lwps using sysenter/sysexit.
1280 sep_restore
(void
*ksp
)
1286 * setting this value to zero as we switch away causes the
1287 * stack-pointer-on-sysenter to be NULL, ensuring that we
1288 * don't silently corrupt another (preempted) thread stack
1289 * when running an lwp that (somehow) didn't get sep_restore'd
1294 movl $MSR_INTC_SEP_ESP
, %ecx
1300 * Update the kernel stack pointer as we resume onto this cpu.
1302 ENTRY_NP
(sep_restore
)
1306 movl $MSR_INTC_SEP_ESP
, %ecx
1309 SET_SIZE
(sep_restore
)