4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/vmparam.h>
33 #include <sys/systm.h>
34 #include <sys/signal.h>
35 #include <sys/stack.h>
36 #include <sys/regset.h>
37 #include <sys/privregs.h>
38 #include <sys/frame.h>
40 #include <sys/brand.h>
42 #include <sys/ucontext.h>
43 #include <sys/asm_linkage.h>
44 #include <sys/errno.h>
45 #include <sys/archsystm.h>
46 #include <sys/schedctl.h>
47 #include <sys/debug.h>
48 #include <sys/sysmacros.h>
54 savecontext(ucontext_t
*ucp
, const k_sigset_t
*mask
)
56 proc_t
*p
= ttoproc(curthread
);
57 klwp_t
*lwp
= ttolwp(curthread
);
58 struct regs
*rp
= lwptoregs(lwp
);
61 * We unconditionally assign to every field through the end
62 * of the gregs, but we need to bzero() everything -after- that
63 * to avoid having any kernel stack garbage escape to userland.
65 bzero(&ucp
->uc_mcontext
.fpregs
, sizeof (ucontext_t
) -
66 offsetof(ucontext_t
, uc_mcontext
.fpregs
));
68 ucp
->uc_flags
= UC_ALL
;
69 ucp
->uc_link
= (struct ucontext
*)lwp
->lwp_oldcontext
;
72 * Try to copyin() the ustack if one is registered. If the stack
73 * has zero size, this indicates that stack bounds checking has
74 * been disabled for this LWP. If stack bounds checking is disabled
75 * or the copyin() fails, we fall back to the legacy behavior.
77 if (lwp
->lwp_ustack
== (uintptr_t)NULL
||
78 copyin((void *)lwp
->lwp_ustack
, &ucp
->uc_stack
,
79 sizeof (ucp
->uc_stack
)) != 0 ||
80 ucp
->uc_stack
.ss_size
== 0) {
82 if (lwp
->lwp_sigaltstack
.ss_flags
== SS_ONSTACK
) {
83 ucp
->uc_stack
= lwp
->lwp_sigaltstack
;
85 ucp
->uc_stack
.ss_sp
= p
->p_usrstack
- p
->p_stksize
;
86 ucp
->uc_stack
.ss_size
= p
->p_stksize
;
87 ucp
->uc_stack
.ss_flags
= 0;
92 * If either the trace flag or REQUEST_STEP is set,
93 * arrange for single-stepping and turn off the trace flag.
95 if ((rp
->r_ps
& PS_T
) || (lwp
->lwp_pcb
.pcb_flags
& REQUEST_STEP
)) {
97 * Clear PS_T so that saved user context won't have trace
102 if (!(lwp
->lwp_pcb
.pcb_flags
& REQUEST_NOSTEP
)) {
103 lwp
->lwp_pcb
.pcb_flags
|= DEBUG_PENDING
;
105 * trap() always checks DEBUG_PENDING before
106 * checking for any pending signal. This at times
107 * can potentially lead to DEBUG_PENDING not being
108 * honoured. (for eg: the lwp is stopped by
109 * stop_on_fault() called from trap(), after being
110 * awakened it might see a pending signal and call
111 * savecontext(), however on the way back to userland
112 * there is no place it can be detected). Hence in
113 * anticipation of such occassions, set AST flag for
114 * the thread which will make the thread take an
115 * excursion through trap() where it will be handled
122 getgregs(lwp
, ucp
->uc_mcontext
.gregs
);
123 if (lwp
->lwp_pcb
.pcb_fpu
.fpu_flags
& FPU_EN
)
124 getfpregs(lwp
, &ucp
->uc_mcontext
.fpregs
);
126 ucp
->uc_flags
&= ~UC_FPU
;
128 sigktou(mask
, &ucp
->uc_sigmask
);
132 * Restore user context.
135 restorecontext(ucontext_t
*ucp
)
137 kthread_t
*t
= curthread
;
138 klwp_t
*lwp
= ttolwp(t
);
140 lwp
->lwp_oldcontext
= (uintptr_t)ucp
->uc_link
;
142 if (ucp
->uc_flags
& UC_STACK
) {
143 if (ucp
->uc_stack
.ss_flags
== SS_ONSTACK
)
144 lwp
->lwp_sigaltstack
= ucp
->uc_stack
;
146 lwp
->lwp_sigaltstack
.ss_flags
&= ~SS_ONSTACK
;
149 if (ucp
->uc_flags
& UC_CPU
) {
151 * If the trace flag is set, mark the lwp to take a
152 * single-step trap on return to user level (below).
153 * The x86 lcall interface and sysenter has already done this,
154 * and turned off the flag, but amd64 syscall interface has not.
156 if (lwptoregs(lwp
)->r_ps
& PS_T
)
157 lwp
->lwp_pcb
.pcb_flags
|= DEBUG_PENDING
;
158 setgregs(lwp
, ucp
->uc_mcontext
.gregs
);
159 lwp
->lwp_eosys
= JUSTRETURN
;
164 if (ucp
->uc_flags
& UC_FPU
)
165 setfpregs(lwp
, &ucp
->uc_mcontext
.fpregs
);
167 if (ucp
->uc_flags
& UC_SIGMASK
) {
169 * We don't need to acquire p->p_lock here;
170 * we are manipulating thread-private data.
172 schedctl_finish_sigblock(t
);
173 sigutok(&ucp
->uc_sigmask
, &t
->t_hold
);
174 if (sigcheck(ttoproc(t
), t
))
181 getsetcontext(int flag
, void *arg
)
185 klwp_t
*lwp
= ttolwp(curthread
);
189 * In future releases, when the ucontext structure grows,
190 * getcontext should be modified to only return the fields
191 * specified in the uc_flags. That way, the structure can grow
192 * and still be binary compatible will all .o's which will only
193 * have old fields defined in uc_flags
198 return (set_errno(EINVAL
));
201 schedctl_finish_sigblock(curthread
);
202 savecontext(&uc
, &curthread
->t_hold
);
203 if (uc
.uc_flags
& UC_SIGMASK
)
204 SIGSET_NATIVE_TO_BRAND(&uc
.uc_sigmask
);
205 if (copyout(&uc
, arg
, sizeof (uc
)))
206 return (set_errno(EFAULT
));
214 * Don't copyin filler or floating state unless we need it.
215 * The ucontext_t struct and fields are specified in the ABI.
217 if (copyin(ucp
, &uc
, sizeof (ucontext_t
) -
218 sizeof (uc
.uc_filler
) -
219 sizeof (uc
.uc_mcontext
.fpregs
))) {
220 return (set_errno(EFAULT
));
222 if (uc
.uc_flags
& UC_SIGMASK
)
223 SIGSET_BRAND_TO_NATIVE(&uc
.uc_sigmask
);
225 if ((uc
.uc_flags
& UC_FPU
) &&
226 copyin(&ucp
->uc_mcontext
.fpregs
, &uc
.uc_mcontext
.fpregs
,
227 sizeof (uc
.uc_mcontext
.fpregs
))) {
228 return (set_errno(EFAULT
));
233 if ((uc
.uc_flags
& UC_STACK
) && (lwp
->lwp_ustack
!= 0))
234 (void) copyout(&uc
.uc_stack
, (stack_t
*)lwp
->lwp_ustack
,
235 sizeof (uc
.uc_stack
));
239 if (copyout(&lwp
->lwp_ustack
, arg
, sizeof (caddr_t
)))
240 return (set_errno(EFAULT
));
244 if (copyin(arg
, &dummy_stk
, sizeof (dummy_stk
)))
245 return (set_errno(EFAULT
));
246 lwp
->lwp_ustack
= (uintptr_t)arg
;
251 #ifdef _SYSCALL32_IMPL
254 * Save user context for 32-bit processes.
257 savecontext32(ucontext32_t
*ucp
, const k_sigset_t
*mask
)
259 proc_t
*p
= ttoproc(curthread
);
260 klwp_t
*lwp
= ttolwp(curthread
);
261 struct regs
*rp
= lwptoregs(lwp
);
263 bzero(&ucp
->uc_mcontext
.fpregs
, sizeof (ucontext32_t
) -
264 offsetof(ucontext32_t
, uc_mcontext
.fpregs
));
266 ucp
->uc_flags
= UC_ALL
;
267 ucp
->uc_link
= (caddr32_t
)lwp
->lwp_oldcontext
;
269 if (lwp
->lwp_ustack
== (uintptr_t)NULL
||
270 copyin((void *)lwp
->lwp_ustack
, &ucp
->uc_stack
,
271 sizeof (ucp
->uc_stack
)) != 0 ||
272 ucp
->uc_stack
.ss_size
== 0) {
274 if (lwp
->lwp_sigaltstack
.ss_flags
== SS_ONSTACK
) {
275 ucp
->uc_stack
.ss_sp
=
276 (caddr32_t
)(uintptr_t)lwp
->lwp_sigaltstack
.ss_sp
;
277 ucp
->uc_stack
.ss_size
=
278 (size32_t
)lwp
->lwp_sigaltstack
.ss_size
;
279 ucp
->uc_stack
.ss_flags
= SS_ONSTACK
;
281 ucp
->uc_stack
.ss_sp
= (caddr32_t
)(uintptr_t)
282 (p
->p_usrstack
- p
->p_stksize
);
283 ucp
->uc_stack
.ss_size
= (size32_t
)p
->p_stksize
;
284 ucp
->uc_stack
.ss_flags
= 0;
289 * If either the trace flag or REQUEST_STEP is set, arrange
290 * for single-stepping and turn off the trace flag.
292 if ((rp
->r_ps
& PS_T
) || (lwp
->lwp_pcb
.pcb_flags
& REQUEST_STEP
)) {
294 * Clear PS_T so that saved user context won't have trace
299 if (!(lwp
->lwp_pcb
.pcb_flags
& REQUEST_NOSTEP
)) {
300 lwp
->lwp_pcb
.pcb_flags
|= DEBUG_PENDING
;
302 * See comments in savecontext().
308 getgregs32(lwp
, ucp
->uc_mcontext
.gregs
);
309 if (lwp
->lwp_pcb
.pcb_fpu
.fpu_flags
& FPU_EN
)
310 getfpregs32(lwp
, &ucp
->uc_mcontext
.fpregs
);
312 ucp
->uc_flags
&= ~UC_FPU
;
314 sigktou(mask
, &ucp
->uc_sigmask
);
318 getsetcontext32(int flag
, void *arg
)
323 klwp_t
*lwp
= ttolwp(curthread
);
325 stack32_t dummy_stk32
;
329 return (set_errno(EINVAL
));
332 schedctl_finish_sigblock(curthread
);
333 savecontext32(&uc
, &curthread
->t_hold
);
334 if (uc
.uc_flags
& UC_SIGMASK
)
335 SIGSET_NATIVE_TO_BRAND(&uc
.uc_sigmask
);
336 if (copyout(&uc
, arg
, sizeof (uc
)))
337 return (set_errno(EFAULT
));
344 if (copyin(ucp
, &uc
, sizeof (uc
) -
345 sizeof (uc
.uc_filler
) -
346 sizeof (uc
.uc_mcontext
.fpregs
))) {
347 return (set_errno(EFAULT
));
349 if (uc
.uc_flags
& UC_SIGMASK
)
350 SIGSET_BRAND_TO_NATIVE(&uc
.uc_sigmask
);
351 if ((uc
.uc_flags
& UC_FPU
) &&
352 copyin(&ucp
->uc_mcontext
.fpregs
, &uc
.uc_mcontext
.fpregs
,
353 sizeof (uc
.uc_mcontext
.fpregs
))) {
354 return (set_errno(EFAULT
));
357 ucontext_32ton(&uc
, &ucnat
);
358 restorecontext(&ucnat
);
360 if ((uc
.uc_flags
& UC_STACK
) && (lwp
->lwp_ustack
!= 0))
361 (void) copyout(&uc
.uc_stack
,
362 (stack32_t
*)lwp
->lwp_ustack
, sizeof (uc
.uc_stack
));
366 ustack32
= (caddr32_t
)lwp
->lwp_ustack
;
367 if (copyout(&ustack32
, arg
, sizeof (ustack32
)))
368 return (set_errno(EFAULT
));
372 if (copyin(arg
, &dummy_stk32
, sizeof (dummy_stk32
)))
373 return (set_errno(EFAULT
));
374 lwp
->lwp_ustack
= (uintptr_t)arg
;
379 #endif /* _SYSCALL32_IMPL */